summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/v8/src')
-rwxr-xr-xsrc/3rdparty/v8/src/SConscript413
-rw-r--r--src/3rdparty/v8/src/accessors.cc907
-rw-r--r--src/3rdparty/v8/src/accessors.h128
-rw-r--r--src/3rdparty/v8/src/allocation-inl.h49
-rw-r--r--src/3rdparty/v8/src/allocation.cc123
-rw-r--r--src/3rdparty/v8/src/allocation.h142
-rw-r--r--src/3rdparty/v8/src/api.cc6933
-rw-r--r--src/3rdparty/v8/src/api.h594
-rw-r--r--src/3rdparty/v8/src/apinatives.js122
-rw-r--r--src/3rdparty/v8/src/apiutils.h78
-rw-r--r--src/3rdparty/v8/src/arguments.h130
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm-inl.h549
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.cc3052
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.h1518
-rw-r--r--src/3rdparty/v8/src/arm/builtins-arm.cc1901
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.cc8166
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.h800
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.cc708
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.h115
-rw-r--r--src/3rdparty/v8/src/arm/constants-arm.cc154
-rw-r--r--src/3rdparty/v8/src/arm/constants-arm.h789
-rw-r--r--src/3rdparty/v8/src/arm/cpu-arm.cc134
-rw-r--r--src/3rdparty/v8/src/arm/debug-arm.cc345
-rw-r--r--src/3rdparty/v8/src/arm/deoptimizer-arm.cc1106
-rw-r--r--src/3rdparty/v8/src/arm/disasm-arm.cc1572
-rw-r--r--src/3rdparty/v8/src/arm/frames-arm.cc45
-rw-r--r--src/3rdparty/v8/src/arm/frames-arm.h172
-rw-r--r--src/3rdparty/v8/src/arm/full-codegen-arm.cc4622
-rw-r--r--src/3rdparty/v8/src/arm/ic-arm.cc1685
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.cc2515
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.h2742
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.cc6408
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.h513
-rw-r--r--src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc318
-rw-r--r--src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h83
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.cc4012
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.h1439
-rw-r--r--src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc1429
-rw-r--r--src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h259
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.cc3475
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.h468
-rw-r--r--src/3rdparty/v8/src/arm/stub-cache-arm.cc4091
-rw-r--r--src/3rdparty/v8/src/array.js1570
-rw-r--r--src/3rdparty/v8/src/assembler.cc1617
-rw-r--r--src/3rdparty/v8/src/assembler.h1000
-rw-r--r--src/3rdparty/v8/src/ast.cc1134
-rw-r--r--src/3rdparty/v8/src/ast.h2946
-rw-r--r--src/3rdparty/v8/src/atomicops.h182
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_arm_gcc.h145
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_arm_qnx.h121
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_mips_gcc.h181
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_tsan.h335
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_x86_gcc.cc133
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_x86_gcc.h287
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_x86_macosx.h301
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_x86_msvc.h209
-rw-r--r--src/3rdparty/v8/src/bignum-dtoa.cc658
-rw-r--r--src/3rdparty/v8/src/bignum-dtoa.h81
-rw-r--r--src/3rdparty/v8/src/bignum.cc767
-rw-r--r--src/3rdparty/v8/src/bignum.h141
-rw-r--r--src/3rdparty/v8/src/bootstrapper.cc2512
-rw-r--r--src/3rdparty/v8/src/bootstrapper.h192
-rw-r--r--src/3rdparty/v8/src/builtins.cc1876
-rw-r--r--src/3rdparty/v8/src/builtins.h413
-rw-r--r--src/3rdparty/v8/src/bytecodes-irregexp.h104
-rw-r--r--src/3rdparty/v8/src/cached-powers.cc179
-rw-r--r--src/3rdparty/v8/src/cached-powers.h64
-rw-r--r--src/3rdparty/v8/src/char-predicates-inl.h94
-rw-r--r--src/3rdparty/v8/src/char-predicates.h69
-rw-r--r--src/3rdparty/v8/src/checks.cc111
-rw-r--r--src/3rdparty/v8/src/checks.h295
-rw-r--r--src/3rdparty/v8/src/circular-queue-inl.h53
-rw-r--r--src/3rdparty/v8/src/circular-queue.cc126
-rw-r--r--src/3rdparty/v8/src/circular-queue.h103
-rw-r--r--src/3rdparty/v8/src/code-stubs-hydrogen.cc366
-rw-r--r--src/3rdparty/v8/src/code-stubs.cc644
-rw-r--r--src/3rdparty/v8/src/code-stubs.h1623
-rw-r--r--src/3rdparty/v8/src/code.h70
-rw-r--r--src/3rdparty/v8/src/codegen.cc229
-rw-r--r--src/3rdparty/v8/src/codegen.h130
-rw-r--r--src/3rdparty/v8/src/collection.js287
-rw-r--r--src/3rdparty/v8/src/compilation-cache.cc516
-rw-r--r--src/3rdparty/v8/src/compilation-cache.h303
-rw-r--r--src/3rdparty/v8/src/compiler-intrinsics.h110
-rw-r--r--src/3rdparty/v8/src/compiler.cc1144
-rw-r--r--src/3rdparty/v8/src/compiler.h548
-rw-r--r--src/3rdparty/v8/src/contexts.cc396
-rw-r--r--src/3rdparty/v8/src/contexts.h477
-rw-r--r--src/3rdparty/v8/src/conversions-inl.h678
-rw-r--r--src/3rdparty/v8/src/conversions.cc432
-rw-r--r--src/3rdparty/v8/src/conversions.h151
-rw-r--r--src/3rdparty/v8/src/counters.cc102
-rw-r--r--src/3rdparty/v8/src/counters.h282
-rw-r--r--src/3rdparty/v8/src/cpu-profiler-inl.h79
-rw-r--r--src/3rdparty/v8/src/cpu-profiler.cc616
-rw-r--r--src/3rdparty/v8/src/cpu-profiler.h288
-rw-r--r--src/3rdparty/v8/src/cpu.h69
-rw-r--r--src/3rdparty/v8/src/d8-debug.cc370
-rw-r--r--src/3rdparty/v8/src/d8-debug.h157
-rw-r--r--src/3rdparty/v8/src/d8-posix.cc688
-rw-r--r--src/3rdparty/v8/src/d8-readline.cc171
-rw-r--r--src/3rdparty/v8/src/d8-windows.cc42
-rw-r--r--src/3rdparty/v8/src/d8.cc1971
-rw-r--r--src/3rdparty/v8/src/d8.gyp113
-rw-r--r--src/3rdparty/v8/src/d8.h415
-rw-r--r--src/3rdparty/v8/src/d8.js2196
-rw-r--r--src/3rdparty/v8/src/data-flow.cc66
-rw-r--r--src/3rdparty/v8/src/data-flow.h260
-rw-r--r--src/3rdparty/v8/src/date.cc384
-rw-r--r--src/3rdparty/v8/src/date.h260
-rw-r--r--src/3rdparty/v8/src/date.js832
-rw-r--r--src/3rdparty/v8/src/dateparser-inl.h334
-rw-r--r--src/3rdparty/v8/src/dateparser.cc212
-rw-r--r--src/3rdparty/v8/src/dateparser.h409
-rw-r--r--src/3rdparty/v8/src/debug-agent.cc462
-rw-r--r--src/3rdparty/v8/src/debug-agent.h132
-rw-r--r--src/3rdparty/v8/src/debug-debugger.js2638
-rw-r--r--src/3rdparty/v8/src/debug.cc3803
-rw-r--r--src/3rdparty/v8/src/debug.h1056
-rw-r--r--src/3rdparty/v8/src/deoptimizer.cc2375
-rw-r--r--src/3rdparty/v8/src/deoptimizer.h888
-rw-r--r--src/3rdparty/v8/src/disasm.h80
-rw-r--r--src/3rdparty/v8/src/disassembler.cc358
-rw-r--r--src/3rdparty/v8/src/disassembler.h58
-rw-r--r--src/3rdparty/v8/src/diy-fp.cc59
-rw-r--r--src/3rdparty/v8/src/diy-fp.h117
-rw-r--r--src/3rdparty/v8/src/double.h232
-rw-r--r--src/3rdparty/v8/src/dtoa.cc106
-rw-r--r--src/3rdparty/v8/src/dtoa.h85
-rw-r--r--src/3rdparty/v8/src/elements-kind.cc139
-rw-r--r--src/3rdparty/v8/src/elements-kind.h229
-rw-r--r--src/3rdparty/v8/src/elements.cc2073
-rw-r--r--src/3rdparty/v8/src/elements.h208
-rw-r--r--src/3rdparty/v8/src/execution.cc972
-rw-r--r--src/3rdparty/v8/src/execution.h318
-rw-r--r--src/3rdparty/v8/src/extensions/externalize-string-extension.cc141
-rw-r--r--src/3rdparty/v8/src/extensions/externalize-string-extension.h50
-rw-r--r--src/3rdparty/v8/src/extensions/gc-extension.cc57
-rw-r--r--src/3rdparty/v8/src/extensions/gc-extension.h49
-rw-r--r--src/3rdparty/v8/src/extensions/statistics-extension.cc153
-rw-r--r--src/3rdparty/v8/src/extensions/statistics-extension.h49
-rw-r--r--src/3rdparty/v8/src/factory.cc1496
-rw-r--r--src/3rdparty/v8/src/factory.h533
-rw-r--r--src/3rdparty/v8/src/fast-dtoa.cc738
-rw-r--r--src/3rdparty/v8/src/fast-dtoa.h83
-rw-r--r--src/3rdparty/v8/src/fixed-dtoa.cc407
-rw-r--r--src/3rdparty/v8/src/fixed-dtoa.h55
-rw-r--r--src/3rdparty/v8/src/flag-definitions.h764
-rw-r--r--src/3rdparty/v8/src/flags.cc547
-rw-r--r--src/3rdparty/v8/src/flags.h82
-rw-r--r--src/3rdparty/v8/src/frames-inl.h338
-rw-r--r--src/3rdparty/v8/src/frames.cc1502
-rw-r--r--src/3rdparty/v8/src/frames.h978
-rw-r--r--src/3rdparty/v8/src/full-codegen.cc1584
-rw-r--r--src/3rdparty/v8/src/full-codegen.h863
-rw-r--r--src/3rdparty/v8/src/func-name-inferrer.cc107
-rw-r--r--src/3rdparty/v8/src/func-name-inferrer.h131
-rw-r--r--src/3rdparty/v8/src/gdb-jit.cc2173
-rw-r--r--src/3rdparty/v8/src/gdb-jit.h144
-rw-r--r--src/3rdparty/v8/src/global-handles.cc863
-rw-r--r--src/3rdparty/v8/src/global-handles.h284
-rw-r--r--src/3rdparty/v8/src/globals.h409
-rw-r--r--src/3rdparty/v8/src/handles-inl.h202
-rw-r--r--src/3rdparty/v8/src/handles.cc935
-rw-r--r--src/3rdparty/v8/src/handles.h375
-rw-r--r--src/3rdparty/v8/src/hashmap.h364
-rw-r--r--src/3rdparty/v8/src/heap-inl.h901
-rw-r--r--src/3rdparty/v8/src/heap-profiler.cc233
-rw-r--r--src/3rdparty/v8/src/heap-profiler.h113
-rw-r--r--src/3rdparty/v8/src/heap-snapshot-generator-inl.h88
-rw-r--r--src/3rdparty/v8/src/heap-snapshot-generator.cc2703
-rw-r--r--src/3rdparty/v8/src/heap-snapshot-generator.h697
-rw-r--r--src/3rdparty/v8/src/heap.cc7842
-rw-r--r--src/3rdparty/v8/src/heap.h3009
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.cc3277
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.h6186
-rw-r--r--src/3rdparty/v8/src/hydrogen.cc10851
-rw-r--r--src/3rdparty/v8/src/hydrogen.h1703
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32-inl.h503
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.cc2696
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.h1281
-rw-r--r--src/3rdparty/v8/src/ia32/builtins-ia32.cc1869
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.cc7936
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.h646
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.cc967
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.h107
-rw-r--r--src/3rdparty/v8/src/ia32/cpu-ia32.cc89
-rw-r--r--src/3rdparty/v8/src/ia32/debug-ia32.cc362
-rw-r--r--src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc1184
-rw-r--r--src/3rdparty/v8/src/ia32/disasm-ia32.cc1728
-rw-r--r--src/3rdparty/v8/src/ia32/frames-ia32.cc45
-rw-r--r--src/3rdparty/v8/src/ia32/frames-ia32.h137
-rw-r--r--src/3rdparty/v8/src/ia32/full-codegen-ia32.cc4595
-rw-r--r--src/3rdparty/v8/src/ia32/ic-ia32.cc1675
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc6266
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h475
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc494
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h110
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.cc2604
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.h2849
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc3101
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.h1018
-rw-r--r--src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc1420
-rw-r--r--src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h224
-rw-r--r--src/3rdparty/v8/src/ia32/simulator-ia32.cc30
-rw-r--r--src/3rdparty/v8/src/ia32/simulator-ia32.h74
-rw-r--r--src/3rdparty/v8/src/ia32/stub-cache-ia32.cc3833
-rw-r--r--src/3rdparty/v8/src/ic-inl.h144
-rw-r--r--src/3rdparty/v8/src/ic.cc2655
-rw-r--r--src/3rdparty/v8/src/ic.h850
-rw-r--r--src/3rdparty/v8/src/incremental-marking-inl.h145
-rw-r--r--src/3rdparty/v8/src/incremental-marking.cc1012
-rw-r--r--src/3rdparty/v8/src/incremental-marking.h285
-rw-r--r--src/3rdparty/v8/src/interface.cc244
-rw-r--r--src/3rdparty/v8/src/interface.h240
-rw-r--r--src/3rdparty/v8/src/interpreter-irregexp.cc641
-rw-r--r--src/3rdparty/v8/src/interpreter-irregexp.h49
-rw-r--r--src/3rdparty/v8/src/isolate-inl.h73
-rw-r--r--src/3rdparty/v8/src/isolate.cc2335
-rw-r--r--src/3rdparty/v8/src/isolate.h1494
-rw-r--r--src/3rdparty/v8/src/json-parser.h708
-rw-r--r--src/3rdparty/v8/src/json-stringifier.h788
-rw-r--r--src/3rdparty/v8/src/json.js226
-rw-r--r--src/3rdparty/v8/src/jsregexp-inl.h106
-rw-r--r--src/3rdparty/v8/src/jsregexp.cc6150
-rw-r--r--src/3rdparty/v8/src/jsregexp.h1624
-rw-r--r--src/3rdparty/v8/src/lazy-instance.h263
-rw-r--r--src/3rdparty/v8/src/list-inl.h274
-rw-r--r--src/3rdparty/v8/src/list.h218
-rw-r--r--src/3rdparty/v8/src/lithium-allocator-inl.h164
-rw-r--r--src/3rdparty/v8/src/lithium-allocator.cc2133
-rw-r--r--src/3rdparty/v8/src/lithium-allocator.h622
-rw-r--r--src/3rdparty/v8/src/lithium.cc495
-rw-r--r--src/3rdparty/v8/src/lithium.h722
-rw-r--r--src/3rdparty/v8/src/liveedit-debugger.js1137
-rw-r--r--src/3rdparty/v8/src/liveedit.cc2128
-rw-r--r--src/3rdparty/v8/src/liveedit.h183
-rw-r--r--src/3rdparty/v8/src/log-inl.h55
-rw-r--r--src/3rdparty/v8/src/log-utils.cc312
-rw-r--r--src/3rdparty/v8/src/log-utils.h151
-rw-r--r--src/3rdparty/v8/src/log.cc1912
-rw-r--r--src/3rdparty/v8/src/log.h554
-rw-r--r--src/3rdparty/v8/src/macro-assembler.h173
-rw-r--r--src/3rdparty/v8/src/macros.py242
-rw-r--r--src/3rdparty/v8/src/mark-compact-inl.h100
-rw-r--r--src/3rdparty/v8/src/mark-compact.cc4132
-rw-r--r--src/3rdparty/v8/src/mark-compact.h911
-rw-r--r--src/3rdparty/v8/src/marking-thread.cc85
-rw-r--r--src/3rdparty/v8/src/marking-thread.h71
-rw-r--r--src/3rdparty/v8/src/math.js283
-rw-r--r--src/3rdparty/v8/src/messages.cc200
-rw-r--r--src/3rdparty/v8/src/messages.h115
-rw-r--r--src/3rdparty/v8/src/messages.js1311
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips-inl.h425
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips.cc2305
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips.h1282
-rw-r--r--src/3rdparty/v8/src/mips/builtins-mips.cc1941
-rw-r--r--src/3rdparty/v8/src/mips/code-stubs-mips.cc8292
-rw-r--r--src/3rdparty/v8/src/mips/code-stubs-mips.h794
-rw-r--r--src/3rdparty/v8/src/mips/codegen-mips.cc729
-rw-r--r--src/3rdparty/v8/src/mips/codegen-mips.h117
-rw-r--r--src/3rdparty/v8/src/mips/constants-mips.cc355
-rw-r--r--src/3rdparty/v8/src/mips/constants-mips.h803
-rw-r--r--src/3rdparty/v8/src/mips/cpu-mips.cc100
-rw-r--r--src/3rdparty/v8/src/mips/debug-mips.cc337
-rw-r--r--src/3rdparty/v8/src/mips/deoptimizer-mips.cc1120
-rw-r--r--src/3rdparty/v8/src/mips/disasm-mips.cc1064
-rw-r--r--src/3rdparty/v8/src/mips/frames-mips.cc47
-rw-r--r--src/3rdparty/v8/src/mips/frames-mips.h231
-rw-r--r--src/3rdparty/v8/src/mips/full-codegen-mips.cc4645
-rw-r--r--src/3rdparty/v8/src/mips/ic-mips.cc1682
-rw-r--r--src/3rdparty/v8/src/mips/lithium-codegen-mips.cc6106
-rw-r--r--src/3rdparty/v8/src/mips/lithium-codegen-mips.h512
-rw-r--r--src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc323
-rw-r--r--src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.h83
-rw-r--r--src/3rdparty/v8/src/mips/lithium-mips.cc2398
-rw-r--r--src/3rdparty/v8/src/mips/lithium-mips.h2683
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.cc5553
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.h1583
-rw-r--r--src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc1397
-rw-r--r--src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h261
-rw-r--r--src/3rdparty/v8/src/mips/simulator-mips.cc2908
-rw-r--r--src/3rdparty/v8/src/mips/simulator-mips.h445
-rw-r--r--src/3rdparty/v8/src/mips/stub-cache-mips.cc4149
-rw-r--r--src/3rdparty/v8/src/mirror-debugger.js2626
-rw-r--r--src/3rdparty/v8/src/misc-intrinsics.h89
-rw-r--r--src/3rdparty/v8/src/mksnapshot.cc426
-rw-r--r--src/3rdparty/v8/src/natives.h67
-rw-r--r--src/3rdparty/v8/src/object-observe.js235
-rw-r--r--src/3rdparty/v8/src/objects-debug.cc1077
-rw-r--r--src/3rdparty/v8/src/objects-inl.h6007
-rw-r--r--src/3rdparty/v8/src/objects-printer.cc1169
-rw-r--r--src/3rdparty/v8/src/objects-visiting-inl.h725
-rw-r--r--src/3rdparty/v8/src/objects-visiting.cc184
-rw-r--r--src/3rdparty/v8/src/objects-visiting.h462
-rw-r--r--src/3rdparty/v8/src/objects.cc14119
-rw-r--r--src/3rdparty/v8/src/objects.h9150
-rw-r--r--src/3rdparty/v8/src/once.cc77
-rw-r--r--src/3rdparty/v8/src/once.h123
-rw-r--r--src/3rdparty/v8/src/optimizing-compiler-thread.cc157
-rw-r--r--src/3rdparty/v8/src/optimizing-compiler-thread.h111
-rw-r--r--src/3rdparty/v8/src/parser.cc5980
-rw-r--r--src/3rdparty/v8/src/parser.h886
-rw-r--r--src/3rdparty/v8/src/platform-cygwin.cc796
-rw-r--r--src/3rdparty/v8/src/platform-freebsd.cc918
-rw-r--r--src/3rdparty/v8/src/platform-linux.cc1393
-rw-r--r--src/3rdparty/v8/src/platform-macos.cc942
-rw-r--r--src/3rdparty/v8/src/platform-nullos.cc549
-rw-r--r--src/3rdparty/v8/src/platform-openbsd.cc975
-rw-r--r--src/3rdparty/v8/src/platform-posix.cc559
-rw-r--r--src/3rdparty/v8/src/platform-posix.h39
-rw-r--r--src/3rdparty/v8/src/platform-qnx.cc1086
-rw-r--r--src/3rdparty/v8/src/platform-solaris.cc893
-rw-r--r--src/3rdparty/v8/src/platform-tls-mac.h62
-rw-r--r--src/3rdparty/v8/src/platform-tls-win32.h62
-rw-r--r--src/3rdparty/v8/src/platform-tls.h50
-rw-r--r--src/3rdparty/v8/src/platform-win32.cc2271
-rw-r--r--src/3rdparty/v8/src/platform.h828
-rw-r--r--src/3rdparty/v8/src/preparse-data-format.h62
-rw-r--r--src/3rdparty/v8/src/preparse-data.cc183
-rw-r--r--src/3rdparty/v8/src/preparse-data.h231
-rw-r--r--src/3rdparty/v8/src/preparser-api.cc214
-rw-r--r--src/3rdparty/v8/src/preparser.cc1789
-rw-r--r--src/3rdparty/v8/src/preparser.h672
-rw-r--r--src/3rdparty/v8/src/prettyprinter.cc1136
-rw-r--r--src/3rdparty/v8/src/prettyprinter.h121
-rw-r--r--src/3rdparty/v8/src/profile-generator-inl.h100
-rw-r--r--src/3rdparty/v8/src/profile-generator.cc945
-rw-r--r--src/3rdparty/v8/src/profile-generator.h452
-rw-r--r--src/3rdparty/v8/src/property-details.h147
-rw-r--r--src/3rdparty/v8/src/property.cc122
-rw-r--r--src/3rdparty/v8/src/property.h482
-rw-r--r--src/3rdparty/v8/src/proxy.js194
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-irregexp-inl.h88
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc499
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h150
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc449
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-tracer.h111
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler.cc292
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler.h270
-rw-r--r--src/3rdparty/v8/src/regexp-stack.cc112
-rw-r--r--src/3rdparty/v8/src/regexp-stack.h148
-rw-r--r--src/3rdparty/v8/src/regexp.js481
-rw-r--r--src/3rdparty/v8/src/rewriter.cc284
-rw-r--r--src/3rdparty/v8/src/rewriter.h50
-rw-r--r--src/3rdparty/v8/src/runtime-profiler.cc482
-rw-r--r--src/3rdparty/v8/src/runtime-profiler.h154
-rw-r--r--src/3rdparty/v8/src/runtime.cc13380
-rw-r--r--src/3rdparty/v8/src/runtime.h727
-rw-r--r--src/3rdparty/v8/src/runtime.js667
-rw-r--r--src/3rdparty/v8/src/safepoint-table.cc238
-rw-r--r--src/3rdparty/v8/src/safepoint-table.h253
-rw-r--r--src/3rdparty/v8/src/scanner-character-streams.cc323
-rw-r--r--src/3rdparty/v8/src/scanner-character-streams.h129
-rwxr-xr-xsrc/3rdparty/v8/src/scanner.cc1094
-rw-r--r--src/3rdparty/v8/src/scanner.h570
-rw-r--r--src/3rdparty/v8/src/scopeinfo.cc569
-rw-r--r--src/3rdparty/v8/src/scopeinfo.h196
-rw-r--r--src/3rdparty/v8/src/scopes.cc1478
-rw-r--r--src/3rdparty/v8/src/scopes.h648
-rw-r--r--src/3rdparty/v8/src/serialize.cc1661
-rw-r--r--src/3rdparty/v8/src/serialize.h663
-rw-r--r--src/3rdparty/v8/src/simulator.h43
-rw-r--r--src/3rdparty/v8/src/small-pointer-list.h198
-rw-r--r--src/3rdparty/v8/src/smart-pointers.h149
-rw-r--r--src/3rdparty/v8/src/snapshot-common.cc132
-rw-r--r--src/3rdparty/v8/src/snapshot-empty.cc60
-rw-r--r--src/3rdparty/v8/src/snapshot.h98
-rw-r--r--src/3rdparty/v8/src/spaces-inl.h381
-rw-r--r--src/3rdparty/v8/src/spaces.cc3131
-rw-r--r--src/3rdparty/v8/src/spaces.h2836
-rw-r--r--src/3rdparty/v8/src/splay-tree-inl.h311
-rw-r--r--src/3rdparty/v8/src/splay-tree.h218
-rw-r--r--src/3rdparty/v8/src/store-buffer-inl.h79
-rw-r--r--src/3rdparty/v8/src/store-buffer.cc726
-rw-r--r--src/3rdparty/v8/src/store-buffer.h253
-rw-r--r--src/3rdparty/v8/src/string-search.cc41
-rw-r--r--src/3rdparty/v8/src/string-search.h588
-rw-r--r--src/3rdparty/v8/src/string-stream.cc594
-rw-r--r--src/3rdparty/v8/src/string-stream.h192
-rw-r--r--src/3rdparty/v8/src/string.js1047
-rw-r--r--src/3rdparty/v8/src/strtod.cc442
-rw-r--r--src/3rdparty/v8/src/strtod.h40
-rw-r--r--src/3rdparty/v8/src/stub-cache.cc1856
-rw-r--r--src/3rdparty/v8/src/stub-cache.h1014
-rw-r--r--src/3rdparty/v8/src/sweeper-thread.cc103
-rw-r--r--src/3rdparty/v8/src/sweeper-thread.h75
-rw-r--r--src/3rdparty/v8/src/symbol.js39
-rw-r--r--src/3rdparty/v8/src/third_party/valgrind/valgrind.h4033
-rw-r--r--src/3rdparty/v8/src/token.cc63
-rw-r--r--src/3rdparty/v8/src/token.h301
-rw-r--r--src/3rdparty/v8/src/transitions-inl.h220
-rw-r--r--src/3rdparty/v8/src/transitions.cc160
-rw-r--r--src/3rdparty/v8/src/transitions.h207
-rw-r--r--src/3rdparty/v8/src/type-info.cc755
-rw-r--r--src/3rdparty/v8/src/type-info.h345
-rw-r--r--src/3rdparty/v8/src/unbound-queue-inl.h95
-rw-r--r--src/3rdparty/v8/src/unbound-queue.h69
-rw-r--r--src/3rdparty/v8/src/unicode-inl.h205
-rw-r--r--src/3rdparty/v8/src/unicode.cc1861
-rw-r--r--src/3rdparty/v8/src/unicode.h279
-rw-r--r--src/3rdparty/v8/src/uri.h309
-rw-r--r--src/3rdparty/v8/src/uri.js452
-rw-r--r--src/3rdparty/v8/src/utils-inl.h48
-rw-r--r--src/3rdparty/v8/src/utils.cc107
-rw-r--r--src/3rdparty/v8/src/utils.h1086
-rw-r--r--src/3rdparty/v8/src/v8-counters.cc105
-rw-r--r--src/3rdparty/v8/src/v8-counters.h428
-rw-r--r--src/3rdparty/v8/src/v8.cc289
-rw-r--r--src/3rdparty/v8/src/v8.h158
-rw-r--r--src/3rdparty/v8/src/v8checks.h64
-rw-r--r--src/3rdparty/v8/src/v8conversions.cc132
-rw-r--r--src/3rdparty/v8/src/v8conversions.h60
-rw-r--r--src/3rdparty/v8/src/v8dll-main.cc44
-rw-r--r--src/3rdparty/v8/src/v8globals.h579
-rw-r--r--src/3rdparty/v8/src/v8memory.h86
-rw-r--r--src/3rdparty/v8/src/v8natives.js1732
-rw-r--r--src/3rdparty/v8/src/v8preparserdll-main.cc39
-rw-r--r--src/3rdparty/v8/src/v8threads.cc493
-rw-r--r--src/3rdparty/v8/src/v8threads.h172
-rw-r--r--src/3rdparty/v8/src/v8utils.cc282
-rw-r--r--src/3rdparty/v8/src/v8utils.h283
-rw-r--r--src/3rdparty/v8/src/variables.cc102
-rw-r--r--src/3rdparty/v8/src/variables.h196
-rw-r--r--src/3rdparty/v8/src/version.cc116
-rw-r--r--src/3rdparty/v8/src/version.h68
-rw-r--r--src/3rdparty/v8/src/vm-state-inl.h108
-rw-r--r--src/3rdparty/v8/src/vm-state.h60
-rw-r--r--src/3rdparty/v8/src/win32-headers.h116
-rw-r--r--src/3rdparty/v8/src/win32-math.cc106
-rw-r--r--src/3rdparty/v8/src/win32-math.h61
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64-inl.h521
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.cc3064
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.h1678
-rw-r--r--src/3rdparty/v8/src/x64/builtins-x64.cc1884
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.cc6940
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.h623
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.cc785
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.h108
-rw-r--r--src/3rdparty/v8/src/x64/cpu-x64.cc89
-rw-r--r--src/3rdparty/v8/src/x64/debug-x64.cc354
-rw-r--r--src/3rdparty/v8/src/x64/deoptimizer-x64.cc1076
-rw-r--r--src/3rdparty/v8/src/x64/disasm-x64.cc1869
-rw-r--r--src/3rdparty/v8/src/x64/frames-x64.cc45
-rw-r--r--src/3rdparty/v8/src/x64/frames-x64.h122
-rw-r--r--src/3rdparty/v8/src/x64/full-codegen-x64.cc4594
-rw-r--r--src/3rdparty/v8/src/x64/ic-x64.cc1690
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.cc5846
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.h450
-rw-r--r--src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc322
-rw-r--r--src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h74
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.cc2438
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.h2641
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.cc4637
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.h1508
-rw-r--r--src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc1553
-rw-r--r--src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h304
-rw-r--r--src/3rdparty/v8/src/x64/simulator-x64.cc27
-rw-r--r--src/3rdparty/v8/src/x64/simulator-x64.h72
-rw-r--r--src/3rdparty/v8/src/x64/stub-cache-x64.cc3613
-rw-r--r--src/3rdparty/v8/src/zone-inl.h126
-rw-r--r--src/3rdparty/v8/src/zone.cc216
-rw-r--r--src/3rdparty/v8/src/zone.h273
463 files changed, 0 insertions, 482144 deletions
diff --git a/src/3rdparty/v8/src/SConscript b/src/3rdparty/v8/src/SConscript
deleted file mode 100755
index 772ac4e..0000000
--- a/src/3rdparty/v8/src/SConscript
+++ /dev/null
@@ -1,413 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import sys
-from os.path import join, dirname, abspath
-root_dir = dirname(File('SConstruct').rfile().abspath)
-sys.path.append(join(root_dir, 'tools'))
-import js2c
-Import('context')
-Import('tools')
-Import('d8_env')
-
-
-SOURCES = {
- 'all': Split("""
- accessors.cc
- allocation.cc
- api.cc
- assembler.cc
- ast.cc
- atomicops_internals_x86_gcc.cc
- bignum-dtoa.cc
- bignum.cc
- bootstrapper.cc
- builtins.cc
- cached-powers.cc
- checks.cc
- circular-queue.cc
- code-stubs.cc
- codegen.cc
- compilation-cache.cc
- compiler.cc
- contexts.cc
- conversions.cc
- counters.cc
- cpu-profiler.cc
- data-flow.cc
- date.cc
- dateparser.cc
- debug-agent.cc
- debug.cc
- deoptimizer.cc
- disassembler.cc
- diy-fp.cc
- dtoa.cc
- elements-kind.cc
- elements.cc
- execution.cc
- extensions/externalize-string-extension.cc
- extensions/gc-extension.cc
- extensions/statistics-extension.cc
- factory.cc
- fast-dtoa.cc
- fixed-dtoa.cc
- flags.cc
- frames.cc
- full-codegen.cc
- func-name-inferrer.cc
- gdb-jit.cc
- global-handles.cc
- handles.cc
- heap-profiler.cc
- heap-snapshot-generator.cc
- heap.cc
- hydrogen-instructions.cc
- hydrogen.cc
- ic.cc
- incremental-marking.cc
- interface.cc
- interpreter-irregexp.cc
- isolate.cc
- jsregexp.cc
- lithium-allocator.cc
- lithium.cc
- liveedit.cc
- log-utils.cc
- log.cc
- mark-compact.cc
- messages.cc
- objects-printer.cc
- objects-visiting.cc
- objects.cc
- once.cc
- optimizing-compiler-thread.cc
- parser.cc
- preparse-data.cc
- preparser.cc
- profile-generator.cc
- property.cc
- regexp-macro-assembler-irregexp.cc
- regexp-macro-assembler.cc
- regexp-stack.cc
- rewriter.cc
- runtime-profiler.cc
- runtime.cc
- safepoint-table.cc
- scanner-character-streams.cc
- scanner.cc
- scopeinfo.cc
- scopes.cc
- serialize.cc
- snapshot-common.cc
- spaces.cc
- store-buffer.cc
- string-search.cc
- string-stream.cc
- strtod.cc
- stub-cache.cc
- token.cc
- transitions.cc
- type-info.cc
- unicode.cc
- utils.cc
- v8-counters.cc
- v8.cc
- v8conversions.cc
- v8threads.cc
- v8utils.cc
- variables.cc
- version.cc
- zone.cc
- """),
- 'arch:arm': Split("""
- arm/builtins-arm.cc
- arm/code-stubs-arm.cc
- arm/codegen-arm.cc
- arm/constants-arm.cc
- arm/cpu-arm.cc
- arm/debug-arm.cc
- arm/deoptimizer-arm.cc
- arm/disasm-arm.cc
- arm/frames-arm.cc
- arm/full-codegen-arm.cc
- arm/ic-arm.cc
- arm/lithium-arm.cc
- arm/lithium-codegen-arm.cc
- arm/lithium-gap-resolver-arm.cc
- arm/macro-assembler-arm.cc
- arm/regexp-macro-assembler-arm.cc
- arm/stub-cache-arm.cc
- arm/assembler-arm.cc
- """),
- 'arch:mips': Split("""
- mips/assembler-mips.cc
- mips/builtins-mips.cc
- mips/code-stubs-mips.cc
- mips/codegen-mips.cc
- mips/constants-mips.cc
- mips/cpu-mips.cc
- mips/debug-mips.cc
- mips/deoptimizer-mips.cc
- mips/disasm-mips.cc
- mips/frames-mips.cc
- mips/full-codegen-mips.cc
- mips/ic-mips.cc
- mips/lithium-codegen-mips.cc
- mips/lithium-gap-resolver-mips.cc
- mips/lithium-mips.cc
- mips/macro-assembler-mips.cc
- mips/regexp-macro-assembler-mips.cc
- mips/stub-cache-mips.cc
- """),
- 'arch:ia32': Split("""
- ia32/assembler-ia32.cc
- ia32/builtins-ia32.cc
- ia32/code-stubs-ia32.cc
- ia32/codegen-ia32.cc
- ia32/cpu-ia32.cc
- ia32/debug-ia32.cc
- ia32/deoptimizer-ia32.cc
- ia32/disasm-ia32.cc
- ia32/frames-ia32.cc
- ia32/full-codegen-ia32.cc
- ia32/ic-ia32.cc
- ia32/lithium-codegen-ia32.cc
- ia32/lithium-gap-resolver-ia32.cc
- ia32/lithium-ia32.cc
- ia32/macro-assembler-ia32.cc
- ia32/regexp-macro-assembler-ia32.cc
- ia32/stub-cache-ia32.cc
- """),
- 'arch:x64': Split("""
- x64/assembler-x64.cc
- x64/builtins-x64.cc
- x64/code-stubs-x64.cc
- x64/codegen-x64.cc
- x64/cpu-x64.cc
- x64/debug-x64.cc
- x64/deoptimizer-x64.cc
- x64/disasm-x64.cc
- x64/frames-x64.cc
- x64/full-codegen-x64.cc
- x64/ic-x64.cc
- x64/lithium-codegen-x64.cc
- x64/lithium-gap-resolver-x64.cc
- x64/lithium-x64.cc
- x64/macro-assembler-x64.cc
- x64/regexp-macro-assembler-x64.cc
- x64/stub-cache-x64.cc
- """),
- 'simulator:arm': ['arm/simulator-arm.cc'],
- 'simulator:mips': ['mips/simulator-mips.cc'],
- 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
- 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
- 'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
- 'os:android': ['platform-linux.cc', 'platform-posix.cc'],
- 'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
- 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
- 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'],
- 'os:nullos': ['platform-nullos.cc'],
- 'os:win32': ['platform-win32.cc', 'win32-math.cc'],
- 'mode:release': [],
- 'mode:debug': [
- 'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
- ]
-}
-
-
-PREPARSER_SOURCES = {
- 'all': Split("""
- allocation.cc
- bignum.cc
- bignum-dtoa.cc
- cached-powers.cc
- conversions.cc
- diy-fp.cc
- dtoa.cc
- fast-dtoa.cc
- fixed-dtoa.cc
- preparse-data.cc
- preparser.cc
- preparser-api.cc
- scanner.cc
- strtod.cc
- token.cc
- unicode.cc
- utils.cc
- """),
- 'os:win32': ['win32-math.cc']
-}
-
-
-D8_LIGHT_FILES = {
- 'all': [
- 'd8.cc'
- ]
-}
-
-
-D8_FULL_FILES = {
- 'all': [
- 'd8.cc', 'd8-debug.cc'
- ],
- 'os:linux': [
- 'd8-posix.cc'
- ],
- 'os:macos': [
- 'd8-posix.cc'
- ],
- 'os:android': [
- 'd8-posix.cc'
- ],
- 'os:freebsd': [
- 'd8-posix.cc'
- ],
- 'os:openbsd': [
- 'd8-posix.cc'
- ],
- 'os:solaris': [
- 'd8-posix.cc'
- ],
- 'os:cygwin': [
- 'd8-posix.cc'
- ],
- 'os:win32': [
- 'd8-windows.cc'
- ],
- 'os:nullos': [
- 'd8-windows.cc' # Empty implementation at the moment.
- ],
- 'console:readline': [
- 'd8-readline.cc'
- ]
-}
-
-
-LIBRARY_FILES = '''
-runtime.js
-v8natives.js
-array.js
-string.js
-uri.js
-math.js
-messages.js
-apinatives.js
-date.js
-regexp.js
-json.js
-liveedit-debugger.js
-mirror-debugger.js
-debug-debugger.js
-'''.split()
-
-
-EXPERIMENTAL_LIBRARY_FILES = '''
-symbol.js
-proxy.js
-collection.js
-'''.split()
-
-
-def Abort(message):
- print message
- sys.exit(1)
-
-
-def ConfigureObjectFiles():
- env = Environment(tools=tools)
- env.Replace(**context.flags['v8'])
- context.ApplyEnvOverrides(env)
- env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
- env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
-
- def BuildJS2CEnv(type):
- js2c_env = { 'TYPE': type, 'COMPRESSION': 'off' }
- if 'COMPRESS_STARTUP_DATA_BZ2' in env['CPPDEFINES']:
- js2c_env['COMPRESSION'] = 'bz2'
- return js2c_env
-
- # Build the standard platform-independent source files.
- source_files = context.GetRelevantSources(SOURCES)
- d8_js = env.JS2C('d8-js.cc', 'd8.js', **{'TYPE': 'D8', 'COMPRESSION': 'off'})
- d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
- if context.options['library'] == 'shared':
- d8_files = context.GetRelevantSources(D8_LIGHT_FILES)
- d8_objs = []
- else:
- d8_files = context.GetRelevantSources(D8_FULL_FILES)
- d8_objs = [d8_js_obj]
- d8_objs.append(context.ConfigureObject(d8_env, [d8_files]))
-
- # Combine the JavaScript library files into a single C++ file and
- # compile it.
- library_files = [s for s in LIBRARY_FILES]
- library_files.append('macros.py')
- libraries_src = env.JS2C(
- ['libraries.cc'], library_files, **BuildJS2CEnv('CORE'))
- libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
-
- # Combine the experimental JavaScript library files into a C++ file
- # and compile it.
- experimental_library_files = [ s for s in EXPERIMENTAL_LIBRARY_FILES ]
- experimental_library_files.append('macros.py')
- experimental_libraries_src = env.JS2C(['experimental-libraries.cc'],
- experimental_library_files,
- **BuildJS2CEnv('EXPERIMENTAL'))
- experimental_libraries_obj = context.ConfigureObject(env, experimental_libraries_src, CPPPATH=['.'])
-
- source_objs = context.ConfigureObject(env, source_files)
- non_snapshot_files = [source_objs]
-
- preparser_source_files = context.GetRelevantSources(PREPARSER_SOURCES)
- preparser_objs = context.ConfigureObject(env, preparser_source_files)
-
- # Create snapshot if necessary. For cross compilation you should either
- # do without snapshots and take the performance hit or you should build a
- # host VM with the simulator=arm and snapshot=on options and then take the
- # resulting snapshot.cc file from obj/release and put it in the src
- # directory. Then rebuild the VM with the cross compiler and specify
- # snapshot=nobuild on the scons command line.
- empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc')
- mksnapshot_env = env.Copy()
- mksnapshot_env.Replace(**context.flags['mksnapshot'])
- mksnapshot_src = 'mksnapshot.cc'
- mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, experimental_libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
- if context.use_snapshot:
- if context.build_snapshot:
- snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
- else:
- snapshot_cc = 'snapshot.cc'
- snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
- else:
- snapshot_obj = empty_snapshot_obj
- library_objs = [non_snapshot_files, libraries_obj, experimental_libraries_obj, snapshot_obj]
- return (library_objs, d8_objs, [mksnapshot], preparser_objs)
-
-
-(library_objs, d8_objs, mksnapshot, preparser_objs) = ConfigureObjectFiles()
-Return('library_objs d8_objs mksnapshot preparser_objs')
diff --git a/src/3rdparty/v8/src/accessors.cc b/src/3rdparty/v8/src/accessors.cc
deleted file mode 100644
index 57062be..0000000
--- a/src/3rdparty/v8/src/accessors.cc
+++ /dev/null
@@ -1,907 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "accessors.h"
-
-#include "contexts.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "factory.h"
-#include "frames-inl.h"
-#include "isolate.h"
-#include "list-inl.h"
-#include "property-details.h"
-
-namespace v8 {
-namespace internal {
-
-
-template <class C>
-static C* FindInstanceOf(Isolate* isolate, Object* obj) {
- for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) {
- if (Is<C>(cur)) return C::cast(cur);
- }
- return NULL;
-}
-
-
-// Entry point that never should be called.
-MaybeObject* Accessors::IllegalSetter(JSObject*, Object*, void*) {
- UNREACHABLE();
- return NULL;
-}
-
-
-Object* Accessors::IllegalGetAccessor(Object* object, void*) {
- UNREACHABLE();
- return object;
-}
-
-
-MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
- // According to ECMA-262, section 8.6.2.2, page 28, setting
- // read-only properties must be silently ignored.
- return value;
-}
-
-
-//
-// Accessors::ArrayLength
-//
-
-
-MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
- // Traverse the prototype chain until we reach an array.
- JSArray* holder = FindInstanceOf<JSArray>(Isolate::Current(), object);
- return holder == NULL ? Smi::FromInt(0) : holder->length();
-}
-
-
-// The helper function will 'flatten' Number objects.
-Object* Accessors::FlattenNumber(Object* value) {
- if (value->IsNumber() || !value->IsJSValue()) return value;
- JSValue* wrapper = JSValue::cast(value);
- ASSERT(Isolate::Current()->context()->native_context()->number_function()->
- has_initial_map());
- Map* number_map = Isolate::Current()->context()->native_context()->
- number_function()->initial_map();
- if (wrapper->map() == number_map) return wrapper->value();
- return value;
-}
-
-
-MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
- Isolate* isolate = object->GetIsolate();
-
- // This means one of the object's prototypes is a JSArray and the
- // object does not have a 'length' property. Calling SetProperty
- // causes an infinite loop.
- if (!object->IsJSArray()) {
- return object->SetLocalPropertyIgnoreAttributes(
- isolate->heap()->length_string(), value, NONE);
- }
-
- value = FlattenNumber(value);
-
- // Need to call methods that may trigger GC.
- HandleScope scope(isolate);
-
- // Protect raw pointers.
- Handle<JSArray> array_handle(JSArray::cast(object), isolate);
- Handle<Object> value_handle(value, isolate);
-
- bool has_exception;
- Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
- if (has_exception) return Failure::Exception();
- Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
- if (has_exception) return Failure::Exception();
-
- if (uint32_v->Number() == number_v->Number()) {
- return array_handle->SetElementsLength(*uint32_v);
- }
- return isolate->Throw(
- *isolate->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
-}
-
-
-const AccessorDescriptor Accessors::ArrayLength = {
- ArrayGetLength,
- ArraySetLength,
- 0
-};
-
-
-//
-// Accessors::StringLength
-//
-
-
-MaybeObject* Accessors::StringGetLength(Object* object, void*) {
- Object* value = object;
- if (object->IsJSValue()) value = JSValue::cast(object)->value();
- if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
- // If object is not a string we return 0 to be compatible with WebKit.
- // Note: Firefox returns the length of ToString(object).
- return Smi::FromInt(0);
-}
-
-
-const AccessorDescriptor Accessors::StringLength = {
- StringGetLength,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptSource
-//
-
-
-MaybeObject* Accessors::ScriptGetSource(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->source();
-}
-
-
-const AccessorDescriptor Accessors::ScriptSource = {
- ScriptGetSource,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptName
-//
-
-
-MaybeObject* Accessors::ScriptGetName(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->name();
-}
-
-
-const AccessorDescriptor Accessors::ScriptName = {
- ScriptGetName,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptId
-//
-
-
-MaybeObject* Accessors::ScriptGetId(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->id();
-}
-
-
-const AccessorDescriptor Accessors::ScriptId = {
- ScriptGetId,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptLineOffset
-//
-
-
-MaybeObject* Accessors::ScriptGetLineOffset(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->line_offset();
-}
-
-
-const AccessorDescriptor Accessors::ScriptLineOffset = {
- ScriptGetLineOffset,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptColumnOffset
-//
-
-
-MaybeObject* Accessors::ScriptGetColumnOffset(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->column_offset();
-}
-
-
-const AccessorDescriptor Accessors::ScriptColumnOffset = {
- ScriptGetColumnOffset,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptData
-//
-
-
-MaybeObject* Accessors::ScriptGetData(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->data();
-}
-
-
-const AccessorDescriptor Accessors::ScriptData = {
- ScriptGetData,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptType
-//
-
-
-MaybeObject* Accessors::ScriptGetType(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->type();
-}
-
-
-const AccessorDescriptor Accessors::ScriptType = {
- ScriptGetType,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptCompilationType
-//
-
-
-MaybeObject* Accessors::ScriptGetCompilationType(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->compilation_type();
-}
-
-
-const AccessorDescriptor Accessors::ScriptCompilationType = {
- ScriptGetCompilationType,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetLineEnds
-//
-
-
-MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
- JSValue* wrapper = JSValue::cast(object);
- Isolate* isolate = wrapper->GetIsolate();
- HandleScope scope(isolate);
- Handle<Script> script(Script::cast(wrapper->value()), isolate);
- InitScriptLineEnds(script);
- ASSERT(script->line_ends()->IsFixedArray());
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- // We do not want anyone to modify this array from JS.
- ASSERT(*line_ends == isolate->heap()->empty_fixed_array() ||
- line_ends->map() == isolate->heap()->fixed_cow_array_map());
- Handle<JSArray> js_array =
- isolate->factory()->NewJSArrayWithElements(line_ends);
- return *js_array;
-}
-
-
-const AccessorDescriptor Accessors::ScriptLineEnds = {
- ScriptGetLineEnds,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetContextData
-//
-
-
-MaybeObject* Accessors::ScriptGetContextData(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->context_data();
-}
-
-
-const AccessorDescriptor Accessors::ScriptContextData = {
- ScriptGetContextData,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetEvalFromScript
-//
-
-
-MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- if (!Script::cast(script)->eval_from_shared()->IsUndefined()) {
- Handle<SharedFunctionInfo> eval_from_shared(
- SharedFunctionInfo::cast(Script::cast(script)->eval_from_shared()));
-
- if (eval_from_shared->script()->IsScript()) {
- Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
- return *GetScriptWrapper(eval_from_script);
- }
- }
- return HEAP->undefined_value();
-}
-
-
-const AccessorDescriptor Accessors::ScriptEvalFromScript = {
- ScriptGetEvalFromScript,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetEvalFromScriptPosition
-//
-
-
-MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
- Script* raw_script = Script::cast(JSValue::cast(object)->value());
- HandleScope scope(raw_script->GetIsolate());
- Handle<Script> script(raw_script);
-
- // If this is not a script compiled through eval there is no eval position.
- int compilation_type = Smi::cast(script->compilation_type())->value();
- if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
- return script->GetHeap()->undefined_value();
- }
-
- // Get the function from where eval was called and find the source position
- // from the instruction offset.
- Handle<Code> code(SharedFunctionInfo::cast(
- script->eval_from_shared())->code());
- return Smi::FromInt(code->SourcePosition(code->instruction_start() +
- script->eval_from_instructions_offset()->value()));
-}
-
-
-const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
- ScriptGetEvalFromScriptPosition,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptGetEvalFromFunctionName
-//
-
-
-MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(
- Script::cast(script)->eval_from_shared()));
-
-
- // Find the name of the function calling eval.
- if (!shared->name()->IsUndefined()) {
- return shared->name();
- } else {
- return shared->inferred_name();
- }
-}
-
-
-const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
- ScriptGetEvalFromFunctionName,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::FunctionPrototype
-//
-
-
-MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
- JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
- if (function == NULL) return isolate->heap()->undefined_value();
- while (!function->should_have_prototype()) {
- function = FindInstanceOf<JSFunction>(isolate, function->GetPrototype());
- // There has to be one because we hit the getter.
- ASSERT(function != NULL);
- }
-
- if (!function->has_prototype()) {
- Object* prototype;
- { MaybeObject* maybe_prototype
- = isolate->heap()->AllocateFunctionPrototype(function);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
- Object* result;
- { MaybeObject* maybe_result = function->SetPrototype(prototype);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return function->prototype();
-}
-
-
-MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
- Object* value_raw,
- void*) {
- Isolate* isolate = object->GetIsolate();
- Heap* heap = isolate->heap();
- JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
- if (function_raw == NULL) return heap->undefined_value();
- if (!function_raw->should_have_prototype()) {
- // Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributes(heap->prototype_string(),
- value_raw,
- NONE);
- }
-
- HandleScope scope(isolate);
- Handle<JSFunction> function(function_raw, isolate);
- Handle<Object> value(value_raw, isolate);
-
- Handle<Object> old_value;
- bool is_observed =
- FLAG_harmony_observation &&
- *function == object &&
- function->map()->is_observed();
- if (is_observed) {
- if (function->has_prototype())
- old_value = handle(function->prototype(), isolate);
- else
- old_value = isolate->factory()->NewFunctionPrototype(function);
- }
-
- Handle<Object> result;
- MaybeObject* maybe_result = function->SetPrototype(*value);
- if (!maybe_result->ToHandle(&result, isolate)) return maybe_result;
- ASSERT(function->prototype() == *value);
-
- if (is_observed && !old_value->SameValue(*value)) {
- JSObject::EnqueueChangeRecord(
- function, "updated", isolate->factory()->prototype_string(), old_value);
- }
-
- return *function;
-}
-
-
-const AccessorDescriptor Accessors::FunctionPrototype = {
- FunctionGetPrototype,
- FunctionSetPrototype,
- 0
-};
-
-
-//
-// Accessors::FunctionLength
-//
-
-
-MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
- JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
- if (function == NULL) return Smi::FromInt(0);
- // Check if already compiled.
- if (function->shared()->is_compiled()) {
- return Smi::FromInt(function->shared()->length());
- }
- // If the function isn't compiled yet, the length is not computed correctly
- // yet. Compile it now and return the right length.
- HandleScope scope(isolate);
- Handle<JSFunction> handle(function);
- if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
- return Smi::FromInt(handle->shared()->length());
- }
- return Failure::Exception();
-}
-
-
-const AccessorDescriptor Accessors::FunctionLength = {
- FunctionGetLength,
- ReadOnlySetAccessor,
- 0
-};
-
-
-//
-// Accessors::FunctionName
-//
-
-
-MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
- JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
- return holder == NULL
- ? isolate->heap()->undefined_value()
- : holder->shared()->name();
-}
-
-
-const AccessorDescriptor Accessors::FunctionName = {
- FunctionGetName,
- ReadOnlySetAccessor,
- 0
-};
-
-
-//
-// Accessors::FunctionArguments
-//
-
-
-static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
- JavaScriptFrame* frame,
- Handle<JSFunction> inlined_function,
- int inlined_frame_index) {
- Isolate* isolate = inlined_function->GetIsolate();
- Factory* factory = isolate->factory();
- Vector<SlotRef> args_slots =
- SlotRef::ComputeSlotMappingForArguments(
- frame,
- inlined_frame_index,
- inlined_function->shared()->formal_parameter_count());
- int args_count = args_slots.length();
- Handle<JSObject> arguments =
- factory->NewArgumentsObject(inlined_function, args_count);
- Handle<FixedArray> array = factory->NewFixedArray(args_count);
- for (int i = 0; i < args_count; ++i) {
- Handle<Object> value = args_slots[i].GetValue(isolate);
- array->set(i, *value);
- }
- arguments->set_elements(*array);
- args_slots.Dispose();
-
- // Return the freshly allocated arguments object.
- return *arguments;
-}
-
-
-MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
- HandleScope scope(isolate);
- JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
- if (holder == NULL) return isolate->heap()->undefined_value();
- Handle<JSFunction> function(holder, isolate);
-
- if (function->shared()->native()) return isolate->heap()->null_value();
- // Find the top invocation of the function by traversing frames.
- List<JSFunction*> functions(2);
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- frame->GetFunctions(&functions);
- for (int i = functions.length() - 1; i >= 0; i--) {
- // Skip all frames that aren't invocations of the given function.
- if (functions[i] != *function) continue;
-
- if (i > 0) {
- // The function in question was inlined. Inlined functions have the
- // correct number of arguments and no allocated arguments object, so
- // we can construct a fresh one by interpreting the function's
- // deoptimization input data.
- return ConstructArgumentsObjectForInlinedFunction(frame, function, i);
- }
-
- if (!frame->is_optimized()) {
- // If there is an arguments variable in the stack, we return that.
- Handle<ScopeInfo> scope_info(function->shared()->scope_info());
- int index = scope_info->StackSlotIndex(
- isolate->heap()->arguments_string());
- if (index >= 0) {
- Handle<Object> arguments(frame->GetExpression(index), isolate);
- if (!arguments->IsArgumentsMarker()) return *arguments;
- }
- }
-
- // If there is no arguments variable in the stack or we have an
- // optimized frame, we find the frame that holds the actual arguments
- // passed to the function.
- it.AdvanceToArgumentsFrame();
- frame = it.frame();
-
- // Get the number of arguments and construct an arguments object
- // mirror for the right frame.
- const int length = frame->ComputeParametersCount();
- Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject(
- function, length);
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
-
- // Copy the parameters to the arguments object.
- ASSERT(array->length() == length);
- for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
- arguments->set_elements(*array);
-
- // Return the freshly allocated arguments object.
- return *arguments;
- }
- functions.Rewind(0);
- }
-
- // No frame corresponding to the given function found. Return null.
- return isolate->heap()->null_value();
-}
-
-
-const AccessorDescriptor Accessors::FunctionArguments = {
- FunctionGetArguments,
- ReadOnlySetAccessor,
- 0
-};
-
-
-//
-// Accessors::FunctionCaller
-//
-
-
-class FrameFunctionIterator {
- public:
- FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
- : frame_iterator_(isolate),
- functions_(2),
- index_(0) {
- GetFunctions();
- }
- JSFunction* next() {
- if (functions_.length() == 0) return NULL;
- JSFunction* next_function = functions_[index_];
- index_--;
- if (index_ < 0) {
- GetFunctions();
- }
- return next_function;
- }
-
- // Iterate through functions until the first occurence of 'function'.
- // Returns true if 'function' is found, and false if the iterator ends
- // without finding it.
- bool Find(JSFunction* function) {
- JSFunction* next_function;
- do {
- next_function = next();
- if (next_function == function) return true;
- } while (next_function != NULL);
- return false;
- }
-
- private:
- void GetFunctions() {
- functions_.Rewind(0);
- if (frame_iterator_.done()) return;
- JavaScriptFrame* frame = frame_iterator_.frame();
- frame->GetFunctions(&functions_);
- ASSERT(functions_.length() > 0);
- frame_iterator_.Advance();
- index_ = functions_.length() - 1;
- }
- JavaScriptFrameIterator frame_iterator_;
- List<JSFunction*> functions_;
- int index_;
-};
-
-
-MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
- HandleScope scope(isolate);
- AssertNoAllocation no_alloc;
- JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
- if (holder == NULL) return isolate->heap()->undefined_value();
- if (holder->shared()->native()) return isolate->heap()->null_value();
- Handle<JSFunction> function(holder, isolate);
-
- FrameFunctionIterator it(isolate, no_alloc);
-
- // Find the function from the frames.
- if (!it.Find(*function)) {
- // No frame corresponding to the given function found. Return null.
- return isolate->heap()->null_value();
- }
-
- // Find previously called non-toplevel function.
- JSFunction* caller;
- do {
- caller = it.next();
- if (caller == NULL) return isolate->heap()->null_value();
- } while (caller->shared()->is_toplevel());
-
- // If caller is a built-in function and caller's caller is also built-in,
- // use that instead.
- JSFunction* potential_caller = caller;
- while (potential_caller != NULL && potential_caller->IsBuiltin()) {
- caller = potential_caller;
- potential_caller = it.next();
- }
- if (!caller->shared()->native() && potential_caller != NULL) {
- caller = potential_caller;
- }
- // If caller is bound, return null. This is compatible with JSC, and
- // allows us to make bound functions use the strict function map
- // and its associated throwing caller and arguments.
- if (caller->shared()->bound()) {
- return isolate->heap()->null_value();
- }
- // Censor if the caller is not a classic mode function.
- // Change from ES5, which used to throw, see:
- // https://bugs.ecmascript.org/show_bug.cgi?id=310
- if (!caller->shared()->is_classic_mode()) {
- return isolate->heap()->null_value();
- }
-
- return caller;
-}
-
-
-const AccessorDescriptor Accessors::FunctionCaller = {
- FunctionGetCaller,
- ReadOnlySetAccessor,
- 0
-};
-
-
-//
-// Accessors::ObjectPrototype
-//
-
-
-static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
- Object* receiver) {
- Object* current = receiver->GetPrototype(isolate);
- while (current->IsJSObject() &&
- JSObject::cast(current)->map()->is_hidden_prototype()) {
- current = current->GetPrototype(isolate);
- }
- return current;
-}
-
-
-MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
- return GetPrototypeSkipHiddenPrototypes(Isolate::Current(), receiver);
-}
-
-
-MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw,
- Object* value_raw,
- void*) {
- const bool kSkipHiddenPrototypes = true;
- // To be consistent with other Set functions, return the value.
- if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed()))
- return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes);
-
- Isolate* isolate = receiver_raw->GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> receiver(receiver_raw);
- Handle<Object> value(value_raw, isolate);
- Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
- isolate);
-
- MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
- isolate);
- if (!new_value->SameValue(*old_value)) {
- JSObject::EnqueueChangeRecord(receiver, "prototype",
- isolate->factory()->proto_string(),
- old_value);
- }
- return *hresult;
-}
-
-
-const AccessorDescriptor Accessors::ObjectPrototype = {
- ObjectGetPrototype,
- ObjectSetPrototype,
- 0
-};
-
-
-//
-// Accessors::MakeModuleExport
-//
-
-static v8::Handle<v8::Value> ModuleGetExport(
- v8::Local<v8::String> property,
- const v8::AccessorInfo& info) {
- JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
- Context* context = Context::cast(instance->context());
- ASSERT(context->IsModuleContext());
- int slot = info.Data()->Int32Value();
- Object* value = context->get(slot);
- Isolate* isolate = instance->GetIsolate();
- if (value->IsTheHole()) {
- Handle<String> name = v8::Utils::OpenHandle(*property);
- isolate->ScheduleThrow(
- *isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1)));
- return v8::Handle<v8::Value>();
- }
- return v8::Utils::ToLocal(Handle<Object>(value, isolate));
-}
-
-
-static void ModuleSetExport(
- v8::Local<v8::String> property,
- v8::Local<v8::Value> value,
- const v8::AccessorInfo& info) {
- JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
- Context* context = Context::cast(instance->context());
- ASSERT(context->IsModuleContext());
- int slot = info.Data()->Int32Value();
- Object* old_value = context->get(slot);
- if (old_value->IsTheHole()) {
- Handle<String> name = v8::Utils::OpenHandle(*property);
- Isolate* isolate = instance->GetIsolate();
- isolate->ScheduleThrow(
- *isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1)));
- return;
- }
- context->set(slot, *v8::Utils::OpenHandle(*value));
-}
-
-
-Handle<AccessorInfo> Accessors::MakeModuleExport(
- Handle<String> name,
- int index,
- PropertyAttributes attributes) {
- Factory* factory = name->GetIsolate()->factory();
- Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
- info->set_property_attributes(attributes);
- info->set_all_can_read(true);
- info->set_all_can_write(true);
- info->set_name(*name);
- info->set_data(Smi::FromInt(index));
- Handle<Object> getter = v8::FromCData(&ModuleGetExport);
- Handle<Object> setter = v8::FromCData(&ModuleSetExport);
- info->set_getter(*getter);
- if (!(attributes & ReadOnly)) info->set_setter(*setter);
- return info;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/accessors.h b/src/3rdparty/v8/src/accessors.h
deleted file mode 100644
index 250f742..0000000
--- a/src/3rdparty/v8/src/accessors.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ACCESSORS_H_
-#define V8_ACCESSORS_H_
-
-#include "allocation.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// The list of accessor descriptors. This is a second-order macro
-// taking a macro to be applied to all accessor descriptor names.
-#define ACCESSOR_DESCRIPTOR_LIST(V) \
- V(FunctionPrototype) \
- V(FunctionLength) \
- V(FunctionName) \
- V(FunctionArguments) \
- V(FunctionCaller) \
- V(ArrayLength) \
- V(StringLength) \
- V(ScriptSource) \
- V(ScriptName) \
- V(ScriptId) \
- V(ScriptLineOffset) \
- V(ScriptColumnOffset) \
- V(ScriptData) \
- V(ScriptType) \
- V(ScriptCompilationType) \
- V(ScriptLineEnds) \
- V(ScriptContextData) \
- V(ScriptEvalFromScript) \
- V(ScriptEvalFromScriptPosition) \
- V(ScriptEvalFromFunctionName) \
- V(ObjectPrototype)
-
-// Accessors contains all predefined proxy accessors.
-
-class Accessors : public AllStatic {
- public:
- // Accessor descriptors.
-#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
- static const AccessorDescriptor name;
- ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
-#undef ACCESSOR_DESCRIPTOR_DECLARATION
-
- enum DescriptorId {
-#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
- k##name,
- ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
-#undef ACCESSOR_DESCRIPTOR_DECLARATION
- descriptorCount
- };
-
- // Accessor functions called directly from the runtime system.
- MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
- void*);
- MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
- Object* value,
- void*);
- static MaybeObject* FunctionGetArguments(Object* object, void*);
-
- // Accessor infos.
- static Handle<AccessorInfo> MakeModuleExport(
- Handle<String> name, int index, PropertyAttributes attributes);
-
- private:
- // Accessor functions only used through the descriptor.
- static MaybeObject* FunctionGetLength(Object* object, void*);
- static MaybeObject* FunctionGetName(Object* object, void*);
- static MaybeObject* FunctionGetCaller(Object* object, void*);
- MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
- Object* value, void*);
- static MaybeObject* ArrayGetLength(Object* object, void*);
- static MaybeObject* StringGetLength(Object* object, void*);
- static MaybeObject* ScriptGetName(Object* object, void*);
- static MaybeObject* ScriptGetId(Object* object, void*);
- static MaybeObject* ScriptGetSource(Object* object, void*);
- static MaybeObject* ScriptGetLineOffset(Object* object, void*);
- static MaybeObject* ScriptGetColumnOffset(Object* object, void*);
- static MaybeObject* ScriptGetData(Object* object, void*);
- static MaybeObject* ScriptGetType(Object* object, void*);
- static MaybeObject* ScriptGetCompilationType(Object* object, void*);
- static MaybeObject* ScriptGetLineEnds(Object* object, void*);
- static MaybeObject* ScriptGetContextData(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScript(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScriptPosition(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromFunctionName(Object* object, void*);
- static MaybeObject* ObjectGetPrototype(Object* receiver, void*);
- static MaybeObject* ObjectSetPrototype(JSObject* receiver,
- Object* value,
- void*);
-
- // Helper functions.
- static Object* FlattenNumber(Object* value);
- static MaybeObject* IllegalSetter(JSObject*, Object*, void*);
- static Object* IllegalGetAccessor(Object* object, void*);
- static MaybeObject* ReadOnlySetAccessor(JSObject*, Object* value, void*);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ACCESSORS_H_
diff --git a/src/3rdparty/v8/src/allocation-inl.h b/src/3rdparty/v8/src/allocation-inl.h
deleted file mode 100644
index d32db4b..0000000
--- a/src/3rdparty/v8/src/allocation-inl.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ALLOCATION_INL_H_
-#define V8_ALLOCATION_INL_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-void* PreallocatedStorageAllocationPolicy::New(size_t size) {
- return Isolate::Current()->PreallocatedStorageNew(size);
-}
-
-
-void PreallocatedStorageAllocationPolicy::Delete(void* p) {
- return Isolate::Current()->PreallocatedStorageDelete(p);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ALLOCATION_INL_H_
diff --git a/src/3rdparty/v8/src/allocation.cc b/src/3rdparty/v8/src/allocation.cc
deleted file mode 100644
index 6c7a08c..0000000
--- a/src/3rdparty/v8/src/allocation.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "allocation.h"
-
-#include <stdlib.h> // For free, malloc.
-#include <string.h> // For memcpy.
-#include "checks.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-void* Malloced::New(size_t size) {
- void* result = malloc(size);
- if (result == NULL) {
- v8::internal::FatalProcessOutOfMemory("Malloced operator new");
- }
- return result;
-}
-
-
-void Malloced::Delete(void* p) {
- free(p);
-}
-
-
-void Malloced::FatalProcessOutOfMemory() {
- v8::internal::FatalProcessOutOfMemory("Out of memory");
-}
-
-
-#ifdef DEBUG
-
-static void* invalid = static_cast<void*>(NULL);
-
-void* Embedded::operator new(size_t size) {
- UNREACHABLE();
- return invalid;
-}
-
-
-void Embedded::operator delete(void* p) {
- UNREACHABLE();
-}
-
-
-void* AllStatic::operator new(size_t size) {
- UNREACHABLE();
- return invalid;
-}
-
-
-void AllStatic::operator delete(void* p) {
- UNREACHABLE();
-}
-
-#endif
-
-
-char* StrDup(const char* str) {
- int length = StrLength(str);
- char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
- result[length] = '\0';
- return result;
-}
-
-
-char* StrNDup(const char* str, int n) {
- int length = StrLength(str);
- if (n < length) length = n;
- char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
- result[length] = '\0';
- return result;
-}
-
-
-void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
- next_ = other->next_;
- other->next_->previous_ = this;
- previous_ = other;
- other->next_ = this;
-}
-
-
-void PreallocatedStorage::Unlink() {
- next_->previous_ = previous_;
- previous_->next_ = next_;
-}
-
-
-PreallocatedStorage::PreallocatedStorage(size_t size)
- : size_(size) {
- previous_ = next_ = this;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/allocation.h b/src/3rdparty/v8/src/allocation.h
deleted file mode 100644
index 45bde4c..0000000
--- a/src/3rdparty/v8/src/allocation.h
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ALLOCATION_H_
-#define V8_ALLOCATION_H_
-
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Called when allocation routines fail to allocate.
-// This function should not return, but should terminate the current
-// processing.
-void FatalProcessOutOfMemory(const char* message);
-
-// Superclass for classes managed with new & delete.
-class Malloced {
- public:
- void* operator new(size_t size) { return New(size); }
- void operator delete(void* p) { Delete(p); }
-
- static void FatalProcessOutOfMemory();
- static void* New(size_t size);
- static void Delete(void* p);
-};
-
-
-// A macro is used for defining the base class used for embedded instances.
-// The reason is some compilers allocate a minimum of one word for the
-// superclass. The macro prevents the use of new & delete in debug mode.
-// In release mode we are not willing to pay this overhead.
-
-#ifdef DEBUG
-// Superclass for classes with instances allocated inside stack
-// activations or inside other objects.
-class Embedded {
- public:
- void* operator new(size_t size);
- void operator delete(void* p);
-};
-#define BASE_EMBEDDED : public Embedded
-#else
-#define BASE_EMBEDDED
-#endif
-
-
-// Superclass for classes only using statics.
-class AllStatic {
-#ifdef DEBUG
- public:
- void* operator new(size_t size);
- void operator delete(void* p);
-#endif
-};
-
-
-template <typename T>
-T* NewArray(size_t size) {
- T* result = new T[size];
- if (result == NULL) Malloced::FatalProcessOutOfMemory();
- return result;
-}
-
-
-template <typename T>
-void DeleteArray(T* array) {
- delete[] array;
-}
-
-
-// The normal strdup functions use malloc. These versions of StrDup
-// and StrNDup uses new and calls the FatalProcessOutOfMemory handler
-// if allocation fails.
-char* StrDup(const char* str);
-char* StrNDup(const char* str, int n);
-
-
-// Allocation policy for allocating in the C free store using malloc
-// and free. Used as the default policy for lists.
-class FreeStoreAllocationPolicy {
- public:
- INLINE(void* New(size_t size)) { return Malloced::New(size); }
- INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
-};
-
-
-// Allocation policy for allocating in preallocated space.
-// Used as an allocation policy for ScopeInfo when generating
-// stack traces.
-class PreallocatedStorage {
- public:
- explicit PreallocatedStorage(size_t size);
- size_t size() { return size_; }
-
- private:
- size_t size_;
- PreallocatedStorage* previous_;
- PreallocatedStorage* next_;
-
- void LinkTo(PreallocatedStorage* other);
- void Unlink();
-
- friend class Isolate;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
-};
-
-
-struct PreallocatedStorageAllocationPolicy {
- INLINE(void* New(size_t size));
- INLINE(static void Delete(void* ptr));
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ALLOCATION_H_
diff --git a/src/3rdparty/v8/src/api.cc b/src/3rdparty/v8/src/api.cc
deleted file mode 100644
index 1804a50..0000000
--- a/src/3rdparty/v8/src/api.cc
+++ /dev/null
@@ -1,6933 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "api.h"
-
-#include <math.h> // For isnan.
-#include <string.h> // For memcpy, strlen.
-#include "../include/v8-debug.h"
-#include "../include/v8-profiler.h"
-#include "../include/v8-testing.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "compiler.h"
-#include "conversions-inl.h"
-#include "counters.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "heap-profiler.h"
-#include "heap-snapshot-generator-inl.h"
-#include "messages.h"
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-#include "natives.h"
-#endif
-#include "parser.h"
-#include "platform.h"
-#include "profile-generator-inl.h"
-#include "property-details.h"
-#include "property.h"
-#include "runtime-profiler.h"
-#include "scanner-character-streams.h"
-#include "snapshot.h"
-#include "unicode-inl.h"
-#include "v8threads.h"
-#include "version.h"
-#include "vm-state-inl.h"
-
-
-#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
-
-#define ENTER_V8(isolate) \
- ASSERT((isolate)->IsInitialized()); \
- i::VMState __state__((isolate), i::OTHER)
-#define LEAVE_V8(isolate) \
- i::VMState __state__((isolate), i::EXTERNAL)
-
-namespace v8 {
-
-#define ON_BAILOUT(isolate, location, code) \
- if (IsDeadCheck(isolate, location) || \
- IsExecutionTerminatingCheck(isolate)) { \
- code; \
- UNREACHABLE(); \
- }
-
-
-#define EXCEPTION_PREAMBLE(isolate) \
- (isolate)->handle_scope_implementer()->IncrementCallDepth(); \
- ASSERT(!(isolate)->external_caught_exception()); \
- bool has_pending_exception = false
-
-
-#define EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, do_callback) \
- do { \
- i::HandleScopeImplementer* handle_scope_implementer = \
- (isolate)->handle_scope_implementer(); \
- handle_scope_implementer->DecrementCallDepth(); \
- if (has_pending_exception) { \
- if (handle_scope_implementer->CallDepthIsZero() && \
- (isolate)->is_out_of_memory()) { \
- if (!(isolate)->ignore_out_of_memory()) \
- i::V8::FatalProcessOutOfMemory(NULL); \
- } \
- bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero(); \
- (isolate)->OptionalRescheduleException(call_depth_is_zero); \
- do_callback \
- return value; \
- } \
- do_callback \
- } while (false)
-
-
-#define EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, value) \
- EXCEPTION_BAILOUT_CHECK_GENERIC( \
- isolate, value, i::V8::FireCallCompletedCallback(isolate);)
-
-
-#define EXCEPTION_BAILOUT_CHECK(isolate, value) \
- EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, ;)
-
-
-#define API_ENTRY_CHECK(isolate, msg) \
- do { \
- if (v8::Locker::IsActive()) { \
- ApiCheck(isolate->thread_manager()->IsLockedByCurrentThread(), \
- msg, \
- "Entering the V8 API without proper locking in place"); \
- } \
- } while (false)
-
-
-// --- E x c e p t i o n B e h a v i o r ---
-
-
-static void DefaultFatalErrorHandler(const char* location,
- const char* message) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->IsInitialized()) {
- i::VMState __state__(isolate, i::OTHER);
- API_Fatal(location, message);
- } else {
- API_Fatal(location, message);
- }
-}
-
-
-static FatalErrorCallback GetFatalErrorHandler() {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->exception_behavior() == NULL) {
- isolate->set_exception_behavior(DefaultFatalErrorHandler);
- }
- return isolate->exception_behavior();
-}
-
-
-void i::FatalProcessOutOfMemory(const char* location) {
- i::V8::FatalProcessOutOfMemory(location, false);
-}
-
-
-// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
-// The default fatal error handler is called and execution is stopped.
-void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
- i::HeapStats heap_stats;
- int start_marker;
- heap_stats.start_marker = &start_marker;
- int new_space_size;
- heap_stats.new_space_size = &new_space_size;
- int new_space_capacity;
- heap_stats.new_space_capacity = &new_space_capacity;
- intptr_t old_pointer_space_size;
- heap_stats.old_pointer_space_size = &old_pointer_space_size;
- intptr_t old_pointer_space_capacity;
- heap_stats.old_pointer_space_capacity = &old_pointer_space_capacity;
- intptr_t old_data_space_size;
- heap_stats.old_data_space_size = &old_data_space_size;
- intptr_t old_data_space_capacity;
- heap_stats.old_data_space_capacity = &old_data_space_capacity;
- intptr_t code_space_size;
- heap_stats.code_space_size = &code_space_size;
- intptr_t code_space_capacity;
- heap_stats.code_space_capacity = &code_space_capacity;
- intptr_t map_space_size;
- heap_stats.map_space_size = &map_space_size;
- intptr_t map_space_capacity;
- heap_stats.map_space_capacity = &map_space_capacity;
- intptr_t cell_space_size;
- heap_stats.cell_space_size = &cell_space_size;
- intptr_t cell_space_capacity;
- heap_stats.cell_space_capacity = &cell_space_capacity;
- intptr_t lo_space_size;
- heap_stats.lo_space_size = &lo_space_size;
- int global_handle_count;
- heap_stats.global_handle_count = &global_handle_count;
- int weak_global_handle_count;
- heap_stats.weak_global_handle_count = &weak_global_handle_count;
- int pending_global_handle_count;
- heap_stats.pending_global_handle_count = &pending_global_handle_count;
- int near_death_global_handle_count;
- heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
- int free_global_handle_count;
- heap_stats.free_global_handle_count = &free_global_handle_count;
- intptr_t memory_allocator_size;
- heap_stats.memory_allocator_size = &memory_allocator_size;
- intptr_t memory_allocator_capacity;
- heap_stats.memory_allocator_capacity = &memory_allocator_capacity;
- int objects_per_type[LAST_TYPE + 1] = {0};
- heap_stats.objects_per_type = objects_per_type;
- int size_per_type[LAST_TYPE + 1] = {0};
- heap_stats.size_per_type = size_per_type;
- int os_error;
- heap_stats.os_error = &os_error;
- int end_marker;
- heap_stats.end_marker = &end_marker;
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->heap()->HasBeenSetUp()) {
- // BUG(1718): Don't use the take_snapshot since we don't support
- // HeapIterator here without doing a special GC.
- isolate->heap()->RecordStats(&heap_stats, false);
- }
- i::V8::SetFatalError();
- FatalErrorCallback callback = GetFatalErrorHandler();
- const char* message = "Allocation failed - process out of memory";
- {
- if (isolate->IsInitialized()) {
- LEAVE_V8(isolate);
- callback(location, message);
- } else {
- callback(location, message);
- }
- }
- // If the callback returns, we stop execution.
- UNREACHABLE();
-}
-
-
-bool Utils::ReportApiFailure(const char* location, const char* message) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, message);
- i::V8::SetFatalError();
- return false;
-}
-
-
-bool V8::IsDead() {
- return i::V8::IsDead();
-}
-
-
-static inline bool ApiCheck(bool condition,
- const char* location,
- const char* message) {
- return condition ? true : Utils::ReportApiFailure(location, message);
-}
-
-
-static bool ReportV8Dead(const char* location) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, "V8 is no longer usable");
- return true;
-}
-
-
-static bool ReportEmptyHandle(const char* location) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, "Reading from empty handle");
- return true;
-}
-
-
-/**
- * IsDeadCheck checks that the vm is usable. If, for instance, the vm has been
- * out of memory at some point this check will fail. It should be called on
- * entry to all methods that touch anything in the heap, except destructors
- * which you sometimes can't avoid calling after the vm has crashed. Functions
- * that call EnsureInitialized or ON_BAILOUT don't have to also call
- * IsDeadCheck. ON_BAILOUT has the advantage over EnsureInitialized that you
- * can arrange to return if the VM is dead. This is needed to ensure that no VM
- * heap allocations are attempted on a dead VM. EnsureInitialized has the
- * advantage over ON_BAILOUT that it actually initializes the VM if this has not
- * yet been done.
- */
-static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) {
- return !isolate->IsInitialized()
- && i::V8::IsDead() ? ReportV8Dead(location) : false;
-}
-
-
-static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
- if (!isolate->IsInitialized()) return false;
- if (isolate->has_scheduled_exception()) {
- return isolate->scheduled_exception() ==
- isolate->heap()->termination_exception();
- }
- return false;
-}
-
-
-static inline bool EmptyCheck(const char* location, v8::Handle<v8::Data> obj) {
- return obj.IsEmpty() ? ReportEmptyHandle(location) : false;
-}
-
-
-static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
- return (obj == 0) ? ReportEmptyHandle(location) : false;
-}
-
-// --- S t a t i c s ---
-
-
-static bool InitializeHelper() {
- if (i::Snapshot::Initialize()) return true;
- return i::V8::Initialize(NULL);
-}
-
-
-static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
- const char* location) {
- if (IsDeadCheck(isolate, location)) return false;
- if (isolate != NULL) {
- if (isolate->IsInitialized()) return true;
- }
- ASSERT(isolate == i::Isolate::Current());
- return ApiCheck(InitializeHelper(), location, "Error initializing V8");
-}
-
-// Some initializing API functions are called early and may be
-// called on a thread different from static initializer thread.
-// If Isolate API is used, Isolate::Enter() will initialize TLS so
-// Isolate::Current() works. If it's a legacy case, then the thread
-// may not have TLS initialized yet. However, in initializing APIs it
-// may be too early to call EnsureInitialized() - some pre-init
-// parameters still have to be configured.
-static inline i::Isolate* EnterIsolateIfNeeded() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate != NULL)
- return isolate;
-
- i::Isolate::EnterDefaultIsolate();
- isolate = i::Isolate::Current();
- return isolate;
-}
-
-
-StartupDataDecompressor::StartupDataDecompressor()
- : raw_data(i::NewArray<char*>(V8::GetCompressedStartupDataCount())) {
- for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
- raw_data[i] = NULL;
- }
-}
-
-
-StartupDataDecompressor::~StartupDataDecompressor() {
- for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
- i::DeleteArray(raw_data[i]);
- }
- i::DeleteArray(raw_data);
-}
-
-
-int StartupDataDecompressor::Decompress() {
- int compressed_data_count = V8::GetCompressedStartupDataCount();
- StartupData* compressed_data =
- i::NewArray<StartupData>(compressed_data_count);
- V8::GetCompressedStartupData(compressed_data);
- for (int i = 0; i < compressed_data_count; ++i) {
- char* decompressed = raw_data[i] =
- i::NewArray<char>(compressed_data[i].raw_size);
- if (compressed_data[i].compressed_size != 0) {
- int result = DecompressData(decompressed,
- &compressed_data[i].raw_size,
- compressed_data[i].data,
- compressed_data[i].compressed_size);
- if (result != 0) return result;
- } else {
- ASSERT_EQ(0, compressed_data[i].raw_size);
- }
- compressed_data[i].data = decompressed;
- }
- V8::SetDecompressedStartupData(compressed_data);
- i::DeleteArray(compressed_data);
- return 0;
-}
-
-
-StartupData::CompressionAlgorithm V8::GetCompressedStartupDataAlgorithm() {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- return StartupData::kBZip2;
-#else
- return StartupData::kUncompressed;
-#endif
-}
-
-
-enum CompressedStartupDataItems {
- kSnapshot = 0,
- kSnapshotContext,
- kLibraries,
- kExperimentalLibraries,
- kCompressedStartupDataCount
-};
-
-int V8::GetCompressedStartupDataCount() {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- return kCompressedStartupDataCount;
-#else
- return 0;
-#endif
-}
-
-
-void V8::GetCompressedStartupData(StartupData* compressed_data) {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- compressed_data[kSnapshot].data =
- reinterpret_cast<const char*>(i::Snapshot::data());
- compressed_data[kSnapshot].compressed_size = i::Snapshot::size();
- compressed_data[kSnapshot].raw_size = i::Snapshot::raw_size();
-
- compressed_data[kSnapshotContext].data =
- reinterpret_cast<const char*>(i::Snapshot::context_data());
- compressed_data[kSnapshotContext].compressed_size =
- i::Snapshot::context_size();
- compressed_data[kSnapshotContext].raw_size = i::Snapshot::context_raw_size();
-
- i::Vector<const i::byte> libraries_source = i::Natives::GetScriptsSource();
- compressed_data[kLibraries].data =
- reinterpret_cast<const char*>(libraries_source.start());
- compressed_data[kLibraries].compressed_size = libraries_source.length();
- compressed_data[kLibraries].raw_size = i::Natives::GetRawScriptsSize();
-
- i::Vector<const i::byte> exp_libraries_source =
- i::ExperimentalNatives::GetScriptsSource();
- compressed_data[kExperimentalLibraries].data =
- reinterpret_cast<const char*>(exp_libraries_source.start());
- compressed_data[kExperimentalLibraries].compressed_size =
- exp_libraries_source.length();
- compressed_data[kExperimentalLibraries].raw_size =
- i::ExperimentalNatives::GetRawScriptsSize();
-#endif
-}
-
-
-void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- ASSERT_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
- i::Snapshot::set_raw_data(
- reinterpret_cast<const i::byte*>(decompressed_data[kSnapshot].data));
-
- ASSERT_EQ(i::Snapshot::context_raw_size(),
- decompressed_data[kSnapshotContext].raw_size);
- i::Snapshot::set_context_raw_data(
- reinterpret_cast<const i::byte*>(
- decompressed_data[kSnapshotContext].data));
-
- ASSERT_EQ(i::Natives::GetRawScriptsSize(),
- decompressed_data[kLibraries].raw_size);
- i::Vector<const char> libraries_source(
- decompressed_data[kLibraries].data,
- decompressed_data[kLibraries].raw_size);
- i::Natives::SetRawScriptsSource(libraries_source);
-
- ASSERT_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
- decompressed_data[kExperimentalLibraries].raw_size);
- i::Vector<const char> exp_libraries_source(
- decompressed_data[kExperimentalLibraries].data,
- decompressed_data[kExperimentalLibraries].raw_size);
- i::ExperimentalNatives::SetRawScriptsSource(exp_libraries_source);
-#endif
-}
-
-
-void V8::SetFatalErrorHandler(FatalErrorCallback that) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- isolate->set_exception_behavior(that);
-}
-
-
-void V8::SetAllowCodeGenerationFromStringsCallback(
- AllowCodeGenerationFromStringsCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- isolate->set_allow_code_gen_callback(callback);
-}
-
-
-#ifdef DEBUG
-void ImplementationUtilities::ZapHandleRange(i::Object** begin,
- i::Object** end) {
- i::HandleScope::ZapRange(begin, end);
-}
-#endif
-
-
-void V8::SetFlagsFromString(const char* str, int length) {
- i::FlagList::SetFlagsFromString(str, length);
-}
-
-
-void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
- i::FlagList::SetFlagsFromCommandLine(argc, argv, remove_flags);
-}
-
-
-v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ThrowException()")) {
- return v8::Handle<Value>();
- }
- ENTER_V8(isolate);
- // If we're passed an empty handle, we throw an undefined exception
- // to deal more gracefully with out of memory situations.
- if (value.IsEmpty()) {
- isolate->ScheduleThrow(isolate->heap()->undefined_value());
- } else {
- isolate->ScheduleThrow(*Utils::OpenHandle(*value));
- }
- return v8::Undefined();
-}
-
-
-RegisteredExtension* RegisteredExtension::first_extension_ = NULL;
-
-
-RegisteredExtension::RegisteredExtension(Extension* extension)
- : extension_(extension) { }
-
-
-void RegisteredExtension::Register(RegisteredExtension* that) {
- that->next_ = first_extension_;
- first_extension_ = that;
-}
-
-
-void RegisteredExtension::UnregisterAll() {
- RegisteredExtension* re = first_extension_;
- while (re != NULL) {
- RegisteredExtension* next = re->next();
- delete re;
- re = next;
- }
-}
-
-
-void RegisterExtension(Extension* that) {
- RegisteredExtension* extension = new RegisteredExtension(that);
- RegisteredExtension::Register(extension);
-}
-
-
-Extension::Extension(const char* name,
- const char* source,
- int dep_count,
- const char** deps,
- int source_length)
- : name_(name),
- source_length_(source_length >= 0 ?
- source_length :
- (source ? static_cast<int>(strlen(source)) : 0)),
- source_(source, source_length_),
- dep_count_(dep_count),
- deps_(deps),
- auto_enable_(false) {
- CHECK(source != NULL || source_length_ == 0);
-}
-
-
-v8::Handle<Primitive> Undefined() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Undefined()")) {
- return v8::Handle<v8::Primitive>();
- }
- return v8::Handle<Primitive>(ToApi<Primitive>(
- isolate->factory()->undefined_value()));
-}
-
-
-v8::Handle<Primitive> Null() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Null()")) {
- return v8::Handle<v8::Primitive>();
- }
- return v8::Handle<Primitive>(
- ToApi<Primitive>(isolate->factory()->null_value()));
-}
-
-
-v8::Handle<Boolean> True() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::True()")) {
- return v8::Handle<Boolean>();
- }
- return v8::Handle<Boolean>(
- ToApi<Boolean>(isolate->factory()->true_value()));
-}
-
-
-v8::Handle<Boolean> False() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::False()")) {
- return v8::Handle<Boolean>();
- }
- return v8::Handle<Boolean>(
- ToApi<Boolean>(isolate->factory()->false_value()));
-}
-
-
-ResourceConstraints::ResourceConstraints()
- : max_young_space_size_(0),
- max_old_space_size_(0),
- max_executable_size_(0),
- stack_limit_(NULL) { }
-
-
-bool SetResourceConstraints(ResourceConstraints* constraints) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
-
- int young_space_size = constraints->max_young_space_size();
- int old_gen_size = constraints->max_old_space_size();
- int max_executable_size = constraints->max_executable_size();
- if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
- // After initialization it's too late to change Heap constraints.
- ASSERT(!isolate->IsInitialized());
- bool result = isolate->heap()->ConfigureHeap(young_space_size / 2,
- old_gen_size,
- max_executable_size);
- if (!result) return false;
- }
- if (constraints->stack_limit() != NULL) {
- uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
- isolate->stack_guard()->SetStackLimit(limit);
- }
- return true;
-}
-
-
-i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
- if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL;
- LOG_API(isolate, "Persistent::New");
- i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
- return result.location();
-}
-
-
-void V8::MakeWeak(i::Isolate* isolate,
- i::Object** object,
- void* parameters,
- WeakReferenceCallback weak_reference_callback,
- NearDeathCallback near_death_callback) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "MakeWeak");
- isolate->global_handles()->MakeWeak(object,
- parameters,
- weak_reference_callback,
- near_death_callback);
-}
-
-
-void V8::ClearWeak(i::Isolate* isolate, i::Object** obj) {
- LOG_API(isolate, "ClearWeak");
- isolate->global_handles()->ClearWeakness(obj);
-}
-
-
-void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "DisposeGlobal");
- if (!isolate->IsInitialized()) return;
- isolate->global_handles()->Destroy(obj);
-}
-
-// --- H a n d l e s ---
-
-
-HandleScope::HandleScope() {
- i::Isolate* isolate = i::Isolate::Current();
- API_ENTRY_CHECK(isolate, "HandleScope::HandleScope");
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate_ = isolate;
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- is_closed_ = false;
- current->level++;
-}
-
-
-HandleScope::~HandleScope() {
- if (!is_closed_) {
- Leave();
- }
-}
-
-
-void HandleScope::Leave() {
- ASSERT(isolate_ == i::Isolate::Current());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- current->level--;
- ASSERT(current->level >= 0);
- current->next = prev_next_;
- if (current->limit != prev_limit_) {
- current->limit = prev_limit_;
- i::HandleScope::DeleteExtensions(isolate_);
- }
-
-#ifdef DEBUG
- i::HandleScope::ZapRange(prev_next_, prev_limit_);
-#endif
-}
-
-
-int HandleScope::NumberOfHandles() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "HandleScope::NumberOfHandles")) {
- return 0;
- }
- return i::HandleScope::NumberOfHandles(isolate);
-}
-
-
-i::Object** HandleScope::CreateHandle(i::Object* value) {
- return i::HandleScope::CreateHandle(i::Isolate::Current(), value);
-}
-
-
-i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
- ASSERT(isolate == i::Isolate::Current());
- return i::HandleScope::CreateHandle(isolate, value);
-}
-
-
-i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
- ASSERT(value->IsHeapObject());
- return reinterpret_cast<i::Object**>(
- i::HandleScope::CreateHandle(value->GetIsolate(), value));
-}
-
-
-void Context::Enter() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Isolate* isolate = env->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
- ENTER_V8(isolate);
-
- isolate->handle_scope_implementer()->EnterContext(env);
-
- isolate->handle_scope_implementer()->SaveContext(isolate->context());
- isolate->set_context(*env);
-}
-
-
-void Context::Exit() {
- // Exit is essentially a static function and doesn't use the
- // receiver, so we have to get the current isolate from the thread
- // local.
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
-
- if (!ApiCheck(isolate->handle_scope_implementer()->LeaveLastContext(),
- "v8::Context::Exit()",
- "Cannot exit non-entered context")) {
- return;
- }
-
- // Content of 'last_context' could be NULL.
- i::Context* last_context =
- isolate->handle_scope_implementer()->RestoreContext();
- isolate->set_context(last_context);
- isolate->set_context_exit_happened(true);
-}
-
-
-static void* DecodeSmiToAligned(i::Object* value, const char* location) {
- ApiCheck(value->IsSmi(), location, "Not a Smi");
- return reinterpret_cast<void*>(value);
-}
-
-
-static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
- i::Smi* smi = reinterpret_cast<i::Smi*>(value);
- ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
- return smi;
-}
-
-
-static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
- int index,
- bool can_grow,
- const char* location) {
- i::Handle<i::Context> env = Utils::OpenHandle(context);
- bool ok = !IsDeadCheck(env->GetIsolate(), location) &&
- ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
- ApiCheck(index >= 0, location, "Negative index");
- if (!ok) return i::Handle<i::FixedArray>();
- i::Handle<i::FixedArray> data(env->embedder_data());
- if (index < data->length()) return data;
- if (!can_grow) {
- Utils::ReportApiFailure(location, "Index too large");
- return i::Handle<i::FixedArray>();
- }
- int new_size = i::Max(index, data->length() << 1) + 1;
- data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size);
- env->set_embedder_data(*data);
- return data;
-}
-
-
-v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
- const char* location = "v8::Context::GetEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
- if (data.is_null()) return Local<Value>();
- i::Handle<i::Object> result(data->get(index), data->GetIsolate());
- return Utils::ToLocal(result);
-}
-
-
-void Context::SetEmbedderData(int index, v8::Handle<Value> value) {
- const char* location = "v8::Context::SetEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
- if (data.is_null()) return;
- i::Handle<i::Object> val = Utils::OpenHandle(*value);
- data->set(index, *val);
- ASSERT_EQ(*Utils::OpenHandle(*value),
- *Utils::OpenHandle(*GetEmbedderData(index)));
-}
-
-
-void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
- const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
- if (data.is_null()) return NULL;
- return DecodeSmiToAligned(data->get(index), location);
-}
-
-
-void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
- const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
- data->set(index, EncodeAlignedAsSmi(value, location));
- ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index));
-}
-
-
-i::Object** v8::HandleScope::RawClose(i::Object** value) {
- if (!ApiCheck(!is_closed_,
- "v8::HandleScope::Close()",
- "Local scope has already been closed")) {
- return 0;
- }
- LOG_API(isolate_, "CloseHandleScope");
-
- // Read the result before popping the handle block.
- i::Object* result = NULL;
- if (value != NULL) {
- result = *value;
- }
- is_closed_ = true;
- Leave();
-
- if (value == NULL) {
- return NULL;
- }
-
- // Allocate a new handle on the previous handle block.
- i::Handle<i::Object> handle(result, isolate_);
- return handle.location();
-}
-
-
-// --- N e a n d e r ---
-
-
-// A constructor cannot easily return an error value, therefore it is necessary
-// to check for a dead VM with ON_BAILOUT before constructing any Neander
-// objects. To remind you about this there is no HandleScope in the
-// NeanderObject constructor. When you add one to the site calling the
-// constructor you should check that you ensured the VM was not dead first.
-NeanderObject::NeanderObject(int size) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Nowhere");
- ENTER_V8(isolate);
- value_ = isolate->factory()->NewNeanderObject();
- i::Handle<i::FixedArray> elements = isolate->factory()->NewFixedArray(size);
- value_->set_elements(*elements);
-}
-
-
-int NeanderObject::size() {
- return i::FixedArray::cast(value_->elements())->length();
-}
-
-
-NeanderArray::NeanderArray() : obj_(2) {
- obj_.set(0, i::Smi::FromInt(0));
-}
-
-
-int NeanderArray::length() {
- return i::Smi::cast(obj_.get(0))->value();
-}
-
-
-i::Object* NeanderArray::get(int offset) {
- ASSERT(0 <= offset);
- ASSERT(offset < length());
- return obj_.get(offset + 1);
-}
-
-
-// This method cannot easily return an error value, therefore it is necessary
-// to check for a dead VM with ON_BAILOUT before calling it. To remind you
-// about this there is no HandleScope in this method. When you add one to the
-// site calling this method you should check that you ensured the VM was not
-// dead first.
-void NeanderArray::add(i::Handle<i::Object> value) {
- int length = this->length();
- int size = obj_.size();
- if (length == size - 1) {
- i::Handle<i::FixedArray> new_elms = FACTORY->NewFixedArray(2 * size);
- for (int i = 0; i < length; i++)
- new_elms->set(i + 1, get(i));
- obj_.value()->set_elements(*new_elms);
- }
- obj_.set(length + 1, *value);
- obj_.set(0, i::Smi::FromInt(length + 1));
-}
-
-
-void NeanderArray::set(int index, i::Object* value) {
- if (index < 0 || index >= this->length()) return;
- obj_.set(index + 1, value);
-}
-
-
-// --- T e m p l a t e ---
-
-
-static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
- that->set_tag(i::Smi::FromInt(type));
-}
-
-
-void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
- v8::PropertyAttribute attribute) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list(), isolate);
- if (list->IsUndefined()) {
- list = NeanderArray().value();
- Utils::OpenHandle(this)->set_property_list(*list);
- }
- NeanderArray array(list);
- array.add(Utils::OpenHandle(*name));
- array.add(Utils::OpenHandle(*value));
- array.add(Utils::OpenHandle(*v8::Integer::New(attribute)));
-}
-
-
-// --- F u n c t i o n T e m p l a t e ---
-static void InitializeFunctionTemplate(
- i::Handle<i::FunctionTemplateInfo> info) {
- info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
- info->set_flag(0);
-}
-
-
-Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::PrototypeTemplate()")) {
- return Local<ObjectTemplate>();
- }
- ENTER_V8(isolate);
- i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
- isolate);
- if (result->IsUndefined()) {
- result = Utils::OpenHandle(*ObjectTemplate::New());
- Utils::OpenHandle(this)->set_prototype_template(*result);
- }
- return Local<ObjectTemplate>(ToApi<ObjectTemplate>(result));
-}
-
-
-void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::Inherit()")) return;
- ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
-}
-
-
-Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
- v8::Handle<Value> data, v8::Handle<Signature> signature, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
- LOG_API(isolate, "FunctionTemplate::New");
- ENTER_V8(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
- i::Handle<i::FunctionTemplateInfo> obj =
- i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
- InitializeFunctionTemplate(obj);
- int next_serial_number = isolate->next_serial_number();
- isolate->set_next_serial_number(next_serial_number + 1);
- obj->set_serial_number(i::Smi::FromInt(next_serial_number));
- if (callback != 0) {
- if (data.IsEmpty()) data = v8::Undefined();
- Utils::ToLocal(obj)->SetCallHandler(callback, data);
- }
- obj->set_length(length);
- obj->set_undetectable(false);
- obj->set_needs_access_check(false);
-
- if (!signature.IsEmpty())
- obj->set_signature(*Utils::OpenHandle(*signature));
- return Utils::ToLocal(obj);
-}
-
-
-Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
- int argc, Handle<FunctionTemplate> argv[]) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Signature::New()");
- LOG_API(isolate, "Signature::New");
- ENTER_V8(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::SIGNATURE_INFO_TYPE);
- i::Handle<i::SignatureInfo> obj =
- i::Handle<i::SignatureInfo>::cast(struct_obj);
- if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
- if (argc > 0) {
- i::Handle<i::FixedArray> args = isolate->factory()->NewFixedArray(argc);
- for (int i = 0; i < argc; i++) {
- if (!argv[i].IsEmpty())
- args->set(i, *Utils::OpenHandle(*argv[i]));
- }
- obj->set_args(*args);
- }
- return Utils::ToLocal(obj);
-}
-
-
-Local<AccessorSignature> AccessorSignature::New(
- Handle<FunctionTemplate> receiver) {
- return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
-}
-
-
-Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
- Handle<FunctionTemplate> types[1] = { type };
- return TypeSwitch::New(1, types);
-}
-
-
-Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::TypeSwitch::New()");
- LOG_API(isolate, "TypeSwitch::New");
- ENTER_V8(isolate);
- i::Handle<i::FixedArray> vector = isolate->factory()->NewFixedArray(argc);
- for (int i = 0; i < argc; i++)
- vector->set(i, *Utils::OpenHandle(*types[i]));
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TYPE_SWITCH_INFO_TYPE);
- i::Handle<i::TypeSwitchInfo> obj =
- i::Handle<i::TypeSwitchInfo>::cast(struct_obj);
- obj->set_types(*vector);
- return Utils::ToLocal(obj);
-}
-
-
-int TypeSwitch::match(v8::Handle<Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "TypeSwitch::match");
- USE(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(*value);
- i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
- i::FixedArray* types = i::FixedArray::cast(info->types());
- for (int i = 0; i < types->length(); i++) {
- if (obj->IsInstanceOf(i::FunctionTemplateInfo::cast(types->get(i))))
- return i + 1;
- }
- return 0;
-}
-
-
-#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
- i::Handle<i::Object> foreign = FromCData(cdata); \
- (obj)->setter(*foreign); \
- } while (false)
-
-
-void FunctionTemplate::SetCallHandler(InvocationCallback callback,
- v8::Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
- i::Handle<i::CallHandlerInfo> obj =
- i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_call_code(*obj);
-}
-
-
-static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
- i::Handle<i::ExecutableAccessorInfo> obj =
- FACTORY->NewExecutableAccessorInfo();
- SET_FIELD_WRAPPED(obj, set_getter, getter);
- SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- obj->set_name(*Utils::OpenHandle(*name));
- if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
- if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
- if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
- obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
- if (!signature.IsEmpty()) {
- obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
- }
- return obj;
-}
-
-
-void FunctionTemplate::AddInstancePropertyAccessor(
- v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
-
- i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data,
- settings, attributes,
- signature);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors(),
- isolate);
- if (list->IsUndefined()) {
- list = NeanderArray().value();
- Utils::OpenHandle(this)->set_property_accessors(*list);
- }
- NeanderArray array(list);
- array.add(obj);
-}
-
-
-Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::InstanceTemplate()")
- || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
- return Local<ObjectTemplate>();
- ENTER_V8(isolate);
- if (Utils::OpenHandle(this)->instance_template()->IsUndefined()) {
- Local<ObjectTemplate> templ =
- ObjectTemplate::New(v8::Handle<FunctionTemplate>(this));
- Utils::OpenHandle(this)->set_instance_template(*Utils::OpenHandle(*templ));
- }
- i::Handle<i::ObjectTemplateInfo> result(i::ObjectTemplateInfo::cast(
- Utils::OpenHandle(this)->instance_template()));
- return Utils::ToLocal(result);
-}
-
-
-void FunctionTemplate::SetLength(int length) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetLength()")) return;
- ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_length(length);
-}
-
-
-void FunctionTemplate::SetClassName(Handle<String> name) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return;
- ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
-}
-
-
-void FunctionTemplate::SetHiddenPrototype(bool value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetHiddenPrototype()")) {
- return;
- }
- ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_hidden_prototype(value);
-}
-
-
-void FunctionTemplate::ReadOnlyPrototype() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) {
- return;
- }
- ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_read_only_prototype(true);
-}
-
-
-void FunctionTemplate::SetNamedInstancePropertyHandler(
- NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- bool is_fallback,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
- obj->set_is_fallback(i::Smi::FromInt(is_fallback));
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_named_property_handler(*obj);
-}
-
-
-void FunctionTemplate::SetIndexedInstancePropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
-}
-
-
-void FunctionTemplate::SetInstanceCallAsFunctionHandler(
- InvocationCallback callback,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
- i::Handle<i::CallHandlerInfo> obj =
- i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_instance_call_handler(*obj);
-}
-
-
-// --- O b j e c t T e m p l a t e ---
-
-
-Local<ObjectTemplate> ObjectTemplate::New() {
- return New(Local<FunctionTemplate>());
-}
-
-
-Local<ObjectTemplate> ObjectTemplate::New(
- v8::Handle<FunctionTemplate> constructor) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::New()")) {
- return Local<ObjectTemplate>();
- }
- EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
- LOG_API(isolate, "ObjectTemplate::New");
- ENTER_V8(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
- i::Handle<i::ObjectTemplateInfo> obj =
- i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
- InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
- if (!constructor.IsEmpty())
- obj->set_constructor(*Utils::OpenHandle(*constructor));
- obj->set_internal_field_count(i::Smi::FromInt(0));
- return Utils::ToLocal(obj);
-}
-
-
-// Ensure that the object template has a constructor. If no
-// constructor is available we create one.
-static void EnsureConstructor(ObjectTemplate* object_template) {
- if (Utils::OpenHandle(object_template)->constructor()->IsUndefined()) {
- Local<FunctionTemplate> templ = FunctionTemplate::New();
- i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
- constructor->set_instance_template(*Utils::OpenHandle(object_template));
- Utils::OpenHandle(object_template)->set_constructor(*constructor);
- }
-}
-
-
-void ObjectTemplate::SetAccessor(v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attribute,
- v8::Handle<AccessorSignature> signature) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->AddInstancePropertyAccessor(name,
- getter,
- setter,
- data,
- settings,
- attribute,
- signature);
-}
-
-
-void ObjectTemplate::SetNamedPropertyHandler(
- NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- false,
- data);
-}
-
-
-void ObjectTemplate::SetFallbackPropertyHandler(
- NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::ObjectTemplate::SetFallbackPropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- true,
- data);
-}
-
-
-void ObjectTemplate::MarkAsUndetectable() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUndetectable()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- cons->set_undetectable(true);
-}
-
-
-void ObjectTemplate::SetAccessCheckCallbacks(
- NamedSecurityCallback named_callback,
- IndexedSecurityCallback indexed_callback,
- Handle<Value> data,
- bool turned_on_by_default) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessCheckCallbacks()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
-
- i::Handle<i::Struct> struct_info =
- isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
- i::Handle<i::AccessCheckInfo> info =
- i::Handle<i::AccessCheckInfo>::cast(struct_info);
-
- SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
- SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
-
- if (data.IsEmpty()) data = v8::Undefined();
- info->set_data(*Utils::OpenHandle(*data));
-
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- cons->set_access_check_info(*info);
- cons->set_needs_access_check(turned_on_by_default);
-}
-
-
-void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- data);
-}
-
-
-void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetInstanceCallAsFunctionHandler(callback, data);
-}
-
-
-int ObjectTemplate::InternalFieldCount() {
- if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
- "v8::ObjectTemplate::InternalFieldCount()")) {
- return 0;
- }
- return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
-}
-
-
-void ObjectTemplate::SetInternalFieldCount(int value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetInternalFieldCount()")) {
- return;
- }
- if (!ApiCheck(i::Smi::IsValid(value),
- "v8::ObjectTemplate::SetInternalFieldCount()",
- "Invalid internal field count")) {
- return;
- }
- ENTER_V8(isolate);
- if (value > 0) {
- // The internal field count is set by the constructor function's
- // construct code, so we ensure that there is a constructor
- // function to do the setting.
- EnsureConstructor(this);
- }
- Utils::OpenHandle(this)->set_internal_field_count(i::Smi::FromInt(value));
-}
-
-
-bool ObjectTemplate::HasExternalResource() {
- if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
- "v8::ObjectTemplate::HasExternalResource()")) {
- return 0;
- }
- return !Utils::OpenHandle(this)->has_external_resource()->IsUndefined();
-}
-
-
-void ObjectTemplate::SetHasExternalResource(bool value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetHasExternalResource()")) {
- return;
- }
- ENTER_V8(isolate);
- if (value) {
- EnsureConstructor(this);
- Utils::OpenHandle(this)->set_has_external_resource(i::Smi::FromInt(1));
- } else {
- Utils::OpenHandle(this)->set_has_external_resource(
- Utils::OpenHandle(this)->GetHeap()->undefined_value());
- }
-}
-
-
-void ObjectTemplate::MarkAsUseUserObjectComparison() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::ObjectTemplate::MarkAsUseUserObjectComparison()")) {
- return;
- }
- ENTER_V8(isolate);
- EnsureConstructor(this);
- Utils::OpenHandle(this)->set_use_user_object_comparison(i::Smi::FromInt(1));
-}
-
-// --- S c r i p t D a t a ---
-
-
-ScriptData* ScriptData::PreCompile(const char* input, int length) {
- i::Utf8ToUtf16CharacterStream stream(
- reinterpret_cast<const unsigned char*>(input), length);
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
-}
-
-
-ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- if (str->IsExternalTwoByteString()) {
- i::ExternalTwoByteStringUtf16CharacterStream stream(
- i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
- } else {
- i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
- }
-}
-
-
-ScriptData* ScriptData::New(const char* data, int length) {
- // Return an empty ScriptData if the length is obviously invalid.
- if (length % sizeof(unsigned) != 0) {
- return new i::ScriptDataImpl();
- }
-
- // Copy the data to ensure it is properly aligned.
- int deserialized_data_length = length / sizeof(unsigned);
- // If aligned, don't create a copy of the data.
- if (reinterpret_cast<intptr_t>(data) % sizeof(unsigned) == 0) {
- return new i::ScriptDataImpl(data, length);
- }
- // Copy the data to align it.
- unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
- i::OS::MemCopy(deserialized_data, data, length);
-
- return new i::ScriptDataImpl(
- i::Vector<unsigned>(deserialized_data, deserialized_data_length));
-}
-
-
-// --- S c r i p t ---
-
-
-Local<Script> Script::New(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data,
- v8::Script::CompileFlags compile_flags) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
- LOG_API(isolate, "Script::New");
- ENTER_V8(isolate);
- i::SharedFunctionInfo* raw_result = NULL;
- { i::HandleScope scope(isolate);
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Handle<i::Object> name_obj;
- int line_offset = 0;
- int column_offset = 0;
- if (origin != NULL) {
- if (!origin->ResourceName().IsEmpty()) {
- name_obj = Utils::OpenHandle(*origin->ResourceName());
- }
- if (!origin->ResourceLineOffset().IsEmpty()) {
- line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
- }
- if (!origin->ResourceColumnOffset().IsEmpty()) {
- column_offset =
- static_cast<int>(origin->ResourceColumnOffset()->Value());
- }
- }
- EXCEPTION_PREAMBLE(isolate);
- i::ScriptDataImpl* pre_data_impl =
- static_cast<i::ScriptDataImpl*>(pre_data);
- // We assert that the pre-data is sane, even though we can actually
- // handle it if it turns out not to be in release mode.
- ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
- // If the pre-data isn't sane we simply ignore it
- if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
- pre_data_impl = NULL;
- }
- i::Handle<i::SharedFunctionInfo> result =
- i::Compiler::Compile(str,
- name_obj,
- line_offset,
- column_offset,
- isolate->global_context(),
- NULL,
- pre_data_impl,
- Utils::OpenHandle(*script_data, true),
- i::NOT_NATIVES_CODE,
- compile_flags);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
- raw_result = *result;
- }
- i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
- return Local<Script>(ToApi<Script>(result));
-}
-
-
-Local<Script> Script::New(v8::Handle<String> source,
- v8::Handle<Value> file_name,
- v8::Script::CompileFlags compile_flags) {
- ScriptOrigin origin(file_name);
- return New(source, &origin, 0, Handle<String>(), compile_flags);
-}
-
-
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data,
- v8::Script::CompileFlags compile_flags) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
- LOG_API(isolate, "Script::Compile");
- ENTER_V8(isolate);
- Local<Script> generic = New(source,
- origin,
- pre_data,
- script_data,
- compile_flags);
- if (generic.IsEmpty())
- return generic;
- i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
- i::Handle<i::SharedFunctionInfo> function =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- i::Handle<i::JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function,
- isolate->global_context());
- return Local<Script>(ToApi<Script>(result));
-}
-
-
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::Handle<Value> file_name,
- v8::Handle<String> script_data,
- v8::Script::CompileFlags compile_flags) {
- ScriptOrigin origin(file_name);
- return Compile(source, &origin, 0, script_data, compile_flags);
-}
-
-
-Local<Value> Script::Run() {
- return Run(Handle<Object>());
-}
-
-Local<Value> Script::Run(Handle<Object> qml) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
- LOG_API(isolate, "Script::Run");
- ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
- i::Object* raw_result = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSFunction> fun;
- if (obj->IsSharedFunctionInfo()) {
- i::Handle<i::SharedFunctionInfo>
- function_info(i::SharedFunctionInfo::cast(*obj), isolate);
- fun = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function_info, isolate->global_context());
- } else {
- fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
- }
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> qmlglobal = Utils::OpenHandle(*qml, true);
- i::Handle<i::Object> receiver(
- isolate->context()->global_proxy(), isolate);
- i::Handle<i::Object> result = i::Execution::Call(fun,
- receiver,
- 0,
- NULL,
- &has_pending_exception,
- false,
- qmlglobal);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
- raw_result = *result;
- }
- i::Handle<i::Object> result(raw_result, isolate);
- return Utils::ToLocal(result);
-}
-
-
-static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
- i::Handle<i::Object> obj = Utils::OpenHandle(script);
- i::Handle<i::SharedFunctionInfo> result;
- if (obj->IsSharedFunctionInfo()) {
- result =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- } else {
- result =
- i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared());
- }
- return result;
-}
-
-
-Local<Value> Script::Id() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>());
- LOG_API(isolate, "Script::Id");
- i::Object* raw_id = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- i::Handle<i::Object> id(script->id(), isolate);
- raw_id = *id;
- }
- i::Handle<i::Object> id(raw_id, isolate);
- return Utils::ToLocal(id);
-}
-
-
-void Script::SetData(v8::Handle<String> data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Script::SetData()", return);
- LOG_API(isolate, "Script::SetData");
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- script->set_data(*raw_data);
- }
-}
-
-
-// --- E x c e p t i o n s ---
-
-
-v8::TryCatch::TryCatch()
- : isolate_(i::Isolate::Current()),
- next_(isolate_->try_catch_handler_address()),
- exception_(isolate_->heap()->the_hole_value()),
- message_(i::Smi::FromInt(0)),
- is_verbose_(false),
- can_continue_(true),
- capture_message_(true),
- rethrow_(false) {
- isolate_->RegisterTryCatchHandler(this);
-}
-
-
-v8::TryCatch::~TryCatch() {
- ASSERT(isolate_ == i::Isolate::Current());
- if (rethrow_) {
- v8::HandleScope scope;
- v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
- isolate_->UnregisterTryCatchHandler(this);
- v8::ThrowException(exc);
- } else {
- isolate_->UnregisterTryCatchHandler(this);
- }
-}
-
-
-bool v8::TryCatch::HasCaught() const {
- return !reinterpret_cast<i::Object*>(exception_)->IsTheHole();
-}
-
-
-bool v8::TryCatch::CanContinue() const {
- return can_continue_;
-}
-
-
-v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
- if (!HasCaught()) return v8::Local<v8::Value>();
- rethrow_ = true;
- return v8::Undefined();
-}
-
-
-v8::Local<Value> v8::TryCatch::Exception() const {
- ASSERT(isolate_ == i::Isolate::Current());
- if (HasCaught()) {
- // Check for out of memory exception.
- i::Object* exception = reinterpret_cast<i::Object*>(exception_);
- return v8::Utils::ToLocal(i::Handle<i::Object>(exception, isolate_));
- } else {
- return v8::Local<Value>();
- }
-}
-
-
-v8::Local<Value> v8::TryCatch::StackTrace() const {
- ASSERT(isolate_ == i::Isolate::Current());
- if (HasCaught()) {
- i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
- if (!raw_obj->IsJSObject()) return v8::Local<Value>();
- i::HandleScope scope(isolate_);
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
- i::Handle<i::String> name = isolate_->factory()->stack_string();
- if (!obj->HasProperty(*name)) return v8::Local<Value>();
- i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name);
- if (value.is_null()) return v8::Local<Value>();
- return v8::Utils::ToLocal(scope.CloseAndEscape(value));
- } else {
- return v8::Local<Value>();
- }
-}
-
-
-v8::Local<v8::Message> v8::TryCatch::Message() const {
- ASSERT(isolate_ == i::Isolate::Current());
- if (HasCaught() && message_ != i::Smi::FromInt(0)) {
- i::Object* message = reinterpret_cast<i::Object*>(message_);
- return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
- } else {
- return v8::Local<v8::Message>();
- }
-}
-
-
-void v8::TryCatch::Reset() {
- ASSERT(isolate_ == i::Isolate::Current());
- exception_ = isolate_->heap()->the_hole_value();
- message_ = i::Smi::FromInt(0);
-}
-
-
-void v8::TryCatch::SetVerbose(bool value) {
- is_verbose_ = value;
-}
-
-
-void v8::TryCatch::SetCaptureMessage(bool value) {
- capture_message_ = value;
-}
-
-
-// --- M e s s a g e ---
-
-
-Local<String> Message::Get() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::Get()", return Local<String>());
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(isolate, obj);
- Local<String> result = Utils::ToLocal(raw_result);
- return scope.Close(result);
-}
-
-
-v8::Handle<Value> Message::GetScriptResourceName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceName()")) {
- return Local<String>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- // Return this.script.name.
- i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
- isolate));
- i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name(),
- isolate);
- return scope.Close(Utils::ToLocal(resource_name));
-}
-
-
-v8::Handle<Value> Message::GetScriptData() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceData()")) {
- return Local<Value>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- // Return this.script.data.
- i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
- isolate));
- i::Handle<i::Object> data(i::Script::cast(script->value())->data(), isolate);
- return scope.Close(Utils::ToLocal(data));
-}
-
-
-v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStackTrace()")) {
- return Local<v8::StackTrace>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
- if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
- i::Handle<i::JSArray> stackTrace =
- i::Handle<i::JSArray>::cast(stackFramesObj);
- return scope.Close(Utils::StackTraceToLocal(stackTrace));
-}
-
-
-static i::Handle<i::Object> CallV8HeapFunction(const char* name,
- i::Handle<i::Object> recv,
- int argc,
- i::Handle<i::Object> argv[],
- bool* has_pending_exception) {
- i::Isolate* isolate = i::Isolate::Current();
- i::Handle<i::String> fmt_str =
- isolate->factory()->InternalizeUtf8String(name);
- i::Object* object_fun =
- isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
- i::Handle<i::JSFunction> fun =
- i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
- i::Handle<i::Object> value =
- i::Execution::Call(fun, recv, argc, argv, has_pending_exception);
- return value;
-}
-
-
-static i::Handle<i::Object> CallV8HeapFunction(const char* name,
- i::Handle<i::Object> data,
- bool* has_pending_exception) {
- i::Handle<i::Object> argv[] = { data };
- return CallV8HeapFunction(name,
- i::Isolate::Current()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- has_pending_exception);
-}
-
-
-int Message::GetLineNumber() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::GetLineNumber()", return kNoLineNumberInfo);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
-
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber",
- Utils::OpenHandle(this),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- return static_cast<int>(result->Number());
-}
-
-
-int Message::GetStartPosition() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartPosition()")) return 0;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- return message->start_position();
-}
-
-
-int Message::GetEndPosition() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndPosition()")) return 0;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- return message->end_position();
-}
-
-
-int Message::GetStartColumn() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartColumn()")) {
- return kNoColumnInfo;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
- "GetPositionInLine",
- data_obj,
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- return static_cast<int>(start_col_obj->Number());
-}
-
-
-int Message::GetEndColumn() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndColumn()")) return kNoColumnInfo;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
- "GetPositionInLine",
- data_obj,
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(data_obj);
- int start = message->start_position();
- int end = message->end_position();
- return static_cast<int>(start_col_obj->Number()) + (end - start);
-}
-
-
-Local<String> Message::GetSourceLine() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::GetSourceLine()", return Local<String>());
- ENTER_V8(isolate);
- HandleScope scope;
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
- Utils::OpenHandle(this),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>());
- if (result->IsString()) {
- return scope.Close(Utils::ToLocal(i::Handle<i::String>::cast(result)));
- } else {
- return Local<String>();
- }
-}
-
-
-void Message::PrintCurrentStackTrace(FILE* out) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Message::PrintCurrentStackTrace()")) return;
- ENTER_V8(isolate);
- isolate->PrintCurrentStackTrace(out);
-}
-
-
-// --- S t a c k T r a c e ---
-
-Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrame()")) {
- return Local<StackFrame>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- i::Object* raw_object = self->GetElementNoExceptionThrown(index);
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
- return scope.Close(Utils::StackFrameToLocal(obj));
-}
-
-
-int StackTrace::GetFrameCount() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrameCount()")) return -1;
- ENTER_V8(isolate);
- return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
-}
-
-
-Local<Array> StackTrace::AsArray() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::AsArray()")) Local<Array>();
- ENTER_V8(isolate);
- return Utils::ToLocal(Utils::OpenHandle(this));
-}
-
-
-Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
- StackTraceOptions options) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StackTrace::CurrentStackTrace()")) {
- Local<StackTrace>();
- }
- ENTER_V8(isolate);
- i::Handle<i::JSArray> stackTrace =
- isolate->CaptureCurrentStackTrace(frame_limit, options);
- return Utils::StackTraceToLocal(stackTrace);
-}
-
-
-// --- S t a c k F r a m e ---
-
-int StackFrame::GetLineNumber() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetLineNumber()")) {
- return Message::kNoLineNumberInfo;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> line = GetProperty(self, "lineNumber");
- if (!line->IsSmi()) {
- return Message::kNoLineNumberInfo;
- }
- return i::Smi::cast(*line)->value();
-}
-
-
-int StackFrame::GetColumn() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetColumn()")) {
- return Message::kNoColumnInfo;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> column = GetProperty(self, "column");
- if (!column->IsSmi()) {
- return Message::kNoColumnInfo;
- }
- return i::Smi::cast(*column)->value();
-}
-
-
-Local<String> StackFrame::GetScriptName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) {
- return Local<String>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name = GetProperty(self, "scriptName");
- if (!name->IsString()) {
- return Local<String>();
- }
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
-}
-
-
-Local<String> StackFrame::GetScriptNameOrSourceURL() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptNameOrSourceURL()")) {
- return Local<String>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name = GetProperty(self, "scriptNameOrSourceURL");
- if (!name->IsString()) {
- return Local<String>();
- }
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
-}
-
-
-Local<String> StackFrame::GetFunctionName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetFunctionName()")) {
- return Local<String>();
- }
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name = GetProperty(self, "functionName");
- if (!name->IsString()) {
- return Local<String>();
- }
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
-}
-
-
-bool StackFrame::IsEval() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsEval()")) return false;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> is_eval = GetProperty(self, "isEval");
- return is_eval->IsTrue();
-}
-
-
-bool StackFrame::IsConstructor() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsConstructor()")) return false;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> is_constructor = GetProperty(self, "isConstructor");
- return is_constructor->IsTrue();
-}
-
-
-// --- D a t a ---
-
-bool Value::FullIsUndefined() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
- return false;
- }
- bool result = Utils::OpenHandle(this)->IsUndefined();
- ASSERT_EQ(result, QuickIsUndefined());
- return result;
-}
-
-
-bool Value::FullIsNull() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
- bool result = Utils::OpenHandle(this)->IsNull();
- ASSERT_EQ(result, QuickIsNull());
- return result;
-}
-
-
-bool Value::IsTrue() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsTrue()")) return false;
- return Utils::OpenHandle(this)->IsTrue();
-}
-
-
-bool Value::IsFalse() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFalse()")) return false;
- return Utils::OpenHandle(this)->IsFalse();
-}
-
-
-bool Value::IsFunction() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFunction()")) {
- return false;
- }
- return Utils::OpenHandle(this)->IsJSFunction();
-}
-
-
-bool Value::FullIsString() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsString()")) return false;
- bool result = Utils::OpenHandle(this)->IsString();
- ASSERT_EQ(result, QuickIsString());
- return result;
-}
-
-
-bool Value::IsArray() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false;
- return Utils::OpenHandle(this)->IsJSArray();
-}
-
-
-bool Value::IsObject() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
- return Utils::OpenHandle(this)->IsJSObject();
-}
-
-
-bool Value::IsNumber() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNumber()")) return false;
- return Utils::OpenHandle(this)->IsNumber();
-}
-
-
-bool Value::IsBoolean() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsBoolean()")) {
- return false;
- }
- return Utils::OpenHandle(this)->IsBoolean();
-}
-
-
-bool Value::IsExternal() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
- return false;
- }
- return Utils::OpenHandle(this)->IsExternal();
-}
-
-
-bool Value::IsInt32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsInt32()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) return true;
- if (obj->IsNumber()) {
- double value = obj->Number();
- static const i::DoubleRepresentation minus_zero(-0.0);
- i::DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
- return false;
- }
- return i::FastI2D(i::FastD2I(value)) == value;
- }
- return false;
-}
-
-
-bool Value::IsUint32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUint32()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
- if (obj->IsNumber()) {
- double value = obj->Number();
- static const i::DoubleRepresentation minus_zero(-0.0);
- i::DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
- return false;
- }
- return i::FastUI2D(i::FastD2UI(value)) == value;
- }
- return false;
-}
-
-
-bool Value::IsDate() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsDate()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Date_string());
-}
-
-
-bool Value::IsStringObject() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsStringObject()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->String_string());
-}
-
-
-bool Value::IsNumberObject() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Number_string());
-}
-
-
-static i::Object* LookupBuiltin(i::Isolate* isolate,
- const char* builtin_name) {
- i::Handle<i::String> string =
- isolate->factory()->InternalizeUtf8String(builtin_name);
- i::Handle<i::JSBuiltinsObject> builtins = isolate->js_builtins_object();
- return builtins->GetPropertyNoExceptionThrown(*string);
-}
-
-
-static bool CheckConstructor(i::Isolate* isolate,
- i::Handle<i::JSObject> obj,
- const char* class_name) {
- i::Object* constr = obj->map()->constructor();
- if (!constr->IsJSFunction()) return false;
- i::JSFunction* func = i::JSFunction::cast(constr);
- return func->shared()->native() &&
- constr == LookupBuiltin(isolate, class_name);
-}
-
-
-bool Value::IsNativeError() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsNativeError()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsJSObject()) {
- i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj));
- return CheckConstructor(isolate, js_obj, "$Error") ||
- CheckConstructor(isolate, js_obj, "$EvalError") ||
- CheckConstructor(isolate, js_obj, "$RangeError") ||
- CheckConstructor(isolate, js_obj, "$ReferenceError") ||
- CheckConstructor(isolate, js_obj, "$SyntaxError") ||
- CheckConstructor(isolate, js_obj, "$TypeError") ||
- CheckConstructor(isolate, js_obj, "$URIError");
- } else {
- return false;
- }
-}
-
-
-bool Value::IsBooleanObject() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsBooleanObject()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Boolean_string());
-}
-
-
-bool Value::IsRegExp() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsJSRegExp();
-}
-
-bool Value::IsError() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsError()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(HEAP->Error_string());
-}
-
-
-Local<String> Value::ToString() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> str;
- if (obj->IsString()) {
- str = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToString()")) {
- return Local<String>();
- }
- LOG_API(isolate, "ToString");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToString(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
- }
- return Local<String>(ToApi<String>(str));
-}
-
-
-Local<String> Value::ToDetailString() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> str;
- if (obj->IsString()) {
- str = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToDetailString()")) {
- return Local<String>();
- }
- LOG_API(isolate, "ToDetailString");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToDetailString(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
- }
- return Local<String>(ToApi<String>(str));
-}
-
-
-Local<v8::Object> Value::ToObject() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> val;
- if (obj->IsJSObject()) {
- val = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToObject()")) {
- return Local<v8::Object>();
- }
- LOG_API(isolate, "ToObject");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- val = i::Execution::ToObject(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
- }
- return Local<v8::Object>(ToApi<Object>(val));
-}
-
-
-Local<Boolean> Value::ToBoolean() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsBoolean()) {
- return Local<Boolean>(ToApi<Boolean>(obj));
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToBoolean()")) {
- return Local<Boolean>();
- }
- LOG_API(isolate, "ToBoolean");
- ENTER_V8(isolate);
- i::Handle<i::Object> val = i::Execution::ToBoolean(isolate, obj);
- return Local<Boolean>(ToApi<Boolean>(val));
- }
-}
-
-
-Local<Number> Value::ToNumber() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsNumber()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToNumber()")) {
- return Local<Number>();
- }
- LOG_API(isolate, "ToNumber");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>());
- }
- return Local<Number>(ToApi<Number>(num));
-}
-
-
-Local<Integer> Value::ToInteger() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsSmi()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInteger()")) return Local<Integer>();
- LOG_API(isolate, "ToInteger");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>());
- }
- return Local<Integer>(ToApi<Integer>(num));
-}
-
-
-void External::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
- ApiCheck(Utils::OpenHandle(that)->IsExternal(),
- "v8::External::Cast()",
- "Could not convert to external");
-}
-
-
-void v8::Object::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Object::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSObject(),
- "v8::Object::Cast()",
- "Could not convert to object");
-}
-
-
-void v8::Function::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Function::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSFunction(),
- "v8::Function::Cast()",
- "Could not convert to function");
-}
-
-
-void v8::String::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::String::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsString(),
- "v8::String::Cast()",
- "Could not convert to string");
-}
-
-
-void v8::Number::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsNumber(),
- "v8::Number::Cast()",
- "Could not convert to number");
-}
-
-
-void v8::Integer::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsNumber(),
- "v8::Integer::Cast()",
- "Could not convert to number");
-}
-
-
-void v8::Array::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Array::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSArray(),
- "v8::Array::Cast()",
- "Could not convert to array");
-}
-
-
-void v8::Date::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()),
- "v8::Date::Cast()",
- "Could not convert to date");
-}
-
-
-void v8::StringObject::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StringObject::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()),
- "v8::StringObject::Cast()",
- "Could not convert to StringObject");
-}
-
-
-void v8::NumberObject::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()),
- "v8::NumberObject::Cast()",
- "Could not convert to NumberObject");
-}
-
-
-void v8::BooleanObject::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::BooleanObject::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
- "v8::BooleanObject::Cast()",
- "Could not convert to BooleanObject");
-}
-
-
-void v8::RegExp::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSRegExp(),
- "v8::RegExp::Cast()",
- "Could not convert to regular expression");
-}
-
-
-bool Value::BooleanValue() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsBoolean()) {
- return obj->IsTrue();
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::BooleanValue()")) return false;
- LOG_API(isolate, "BooleanValue");
- ENTER_V8(isolate);
- i::Handle<i::Object> value = i::Execution::ToBoolean(isolate, obj);
- return value->IsTrue();
- }
-}
-
-
-double Value::NumberValue() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsNumber()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::NumberValue()")) {
- return i::OS::nan_value();
- }
- LOG_API(isolate, "NumberValue");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value());
- }
- return num->Number();
-}
-
-
-int64_t Value::IntegerValue() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsNumber()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IntegerValue()")) return 0;
- LOG_API(isolate, "IntegerValue");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- }
- if (num->IsSmi()) {
- return i::Smi::cast(*num)->value();
- } else {
- return static_cast<int64_t>(num->Number());
- }
-}
-
-
-Local<Int32> Value::ToInt32() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsSmi()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInt32()")) return Local<Int32>();
- LOG_API(isolate, "ToInt32");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInt32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>());
- }
- return Local<Int32>(ToApi<Int32>(num));
-}
-
-
-Local<Uint32> Value::ToUint32() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsSmi()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToUint32()")) return Local<Uint32>();
- LOG_API(isolate, "ToUInt32");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToUint32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
- }
- return Local<Uint32>(ToApi<Uint32>(num));
-}
-
-
-Local<Uint32> Value::ToArrayIndex() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj);
- return Local<Uint32>();
- }
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToArrayIndex()")) return Local<Uint32>();
- LOG_API(isolate, "ToArrayIndex");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> string_obj =
- i::Execution::ToString(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
- i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
- uint32_t index;
- if (str->AsArrayIndex(&index)) {
- i::Handle<i::Object> value;
- if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) {
- value = i::Handle<i::Object>(i::Smi::FromInt(index), isolate);
- } else {
- value = isolate->factory()->NewNumber(index);
- }
- return Utils::Uint32ToLocal(value);
- }
- return Local<Uint32>();
-}
-
-
-int32_t Value::Int32Value() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Int32Value()")) return 0;
- LOG_API(isolate, "Int32Value (slow)");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> num =
- i::Execution::ToInt32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- if (num->IsSmi()) {
- return i::Smi::cast(*num)->value();
- } else {
- return static_cast<int32_t>(num->Number());
- }
- }
-}
-
-
-bool Value::Equals(Handle<Value> that) const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Equals()")
- || EmptyCheck("v8::Value::Equals()", this)
- || EmptyCheck("v8::Value::Equals()", that)) {
- return false;
- }
- LOG_API(isolate, "Equals");
- ENTER_V8(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> other = Utils::OpenHandle(*that);
- // If both obj and other are JSObjects, we'd better compare by identity
- // immediately when going into JS builtin. The reason is Invoke
- // would overwrite global object receiver with global proxy.
- if (obj->IsJSObject() && other->IsJSObject()) {
- return *obj == *other;
- }
- i::Handle<i::Object> args[] = { other };
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result =
- CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args,
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return *result == i::Smi::FromInt(i::EQUAL);
-}
-
-
-bool Value::StrictEquals(Handle<Value> that) const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::StrictEquals()")
- || EmptyCheck("v8::Value::StrictEquals()", this)
- || EmptyCheck("v8::Value::StrictEquals()", that)) {
- return false;
- }
- LOG_API(isolate, "StrictEquals");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> other = Utils::OpenHandle(*that);
- // Must check HeapNumber first, since NaN !== NaN.
- if (obj->IsHeapNumber()) {
- if (!other->IsNumber()) return false;
- double x = obj->Number();
- double y = other->Number();
- // Must check explicitly for NaN:s on Windows, but -0 works fine.
- return x == y && !isnan(x) && !isnan(y);
- } else if (*obj == *other) { // Also covers Booleans.
- return true;
- } else if (obj->IsSmi()) {
- return other->IsNumber() && obj->Number() == other->Number();
- } else if (obj->IsString()) {
- return other->IsString() &&
- i::String::cast(*obj)->Equals(i::String::cast(*other));
- } else if (obj->IsUndefined() || obj->IsUndetectableObject()) {
- return other->IsUndefined() || other->IsUndetectableObject();
- } else {
- return false;
- }
-}
-
-
-uint32_t Value::Uint32Value() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Uint32Value()")) return 0;
- LOG_API(isolate, "Uint32Value");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> num =
- i::Execution::ToUint32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- if (num->IsSmi()) {
- return i::Smi::cast(*num)->value();
- } else {
- return static_cast<uint32_t>(num->Number());
- }
- }
-}
-
-
-bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
- v8::PropertyAttribute attribs) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Set()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::SetProperty(
- isolate,
- self,
- key_obj,
- value_obj,
- static_cast<PropertyAttributes>(attribs),
- i::kNonStrictMode);
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
-}
-
-
-bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Set()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::JSObject::SetElement(
- self,
- index,
- value_obj,
- NONE,
- i::kNonStrictMode);
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
-}
-
-
-bool v8::Object::ForceSet(v8::Handle<Value> key,
- v8::Handle<Value> value,
- v8::PropertyAttribute attribs) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ForceSet()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::ForceSetProperty(
- self,
- key_obj,
- value_obj,
- static_cast<PropertyAttributes>(attribs));
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
-}
-
-
-bool v8::Object::ForceDelete(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ForceDelete()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
-
- // When deleting a property on the global object using ForceDelete
- // deoptimize all functions as optimized code does not check for the hole
- // value with DontDelete properties. We have to deoptimize all contexts
- // because of possible cross-context inlined functions.
- if (self->IsJSGlobalProxy() || self->IsGlobalObject()) {
- i::Deoptimizer::DeoptimizeAll();
- }
-
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return obj->IsTrue();
-}
-
-
-Local<Value> v8::Object::Get(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetProperty(isolate, self, key_obj);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> v8::Object::Get(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::Object::GetElement(self, index);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
-}
-
-
-PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPropertyAttribute()",
- return static_cast<PropertyAttribute>(NONE));
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- if (!key_obj->IsString()) {
- EXCEPTION_PREAMBLE(isolate);
- key_obj = i::Execution::ToString(key_obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
- }
- i::Handle<i::String> key_string = i::Handle<i::String>::cast(key_obj);
- PropertyAttributes result = self->GetPropertyAttribute(*key_string);
- if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
- return static_cast<PropertyAttribute>(result);
-}
-
-
-Local<Value> v8::Object::GetPrototype() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPrototype()",
- return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> result(self->GetPrototype(isolate), isolate);
- return Utils::ToLocal(result);
-}
-
-
-bool v8::Object::SetPrototype(Handle<Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetPrototype()", return false);
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- // We do not allow exceptions thrown while setting the prototype
- // to propagate outside.
- TryCatch try_catch;
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
-}
-
-
-Local<Object> v8::Object::FindInstanceInPrototypeChain(
- v8::Handle<FunctionTemplate> tmpl) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::FindInstanceInPrototypeChain()",
- return Local<v8::Object>());
- ENTER_V8(isolate);
- i::JSObject* object = *Utils::OpenHandle(this);
- i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
- while (!object->IsInstanceOf(tmpl_info)) {
- i::Object* prototype = object->GetPrototype();
- if (!prototype->IsJSObject()) return Local<Object>();
- object = i::JSObject::cast(prototype);
- }
- return Utils::ToLocal(i::Handle<i::JSObject>(object));
-}
-
-
-Local<Array> v8::Object::GetPropertyNames() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPropertyNames()",
- return Local<v8::Array>());
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- bool threw = false;
- i::Handle<i::FixedArray> value =
- i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS, &threw);
- if (threw) return Local<v8::Array>();
- // Because we use caching to speed up enumeration it is important
- // to never change the result of the basic enumeration function so
- // we clone the result.
- i::Handle<i::FixedArray> elms = isolate->factory()->CopyFixedArray(value);
- i::Handle<i::JSArray> result =
- isolate->factory()->NewJSArrayWithElements(elms);
- return Utils::ToLocal(scope.CloseAndEscape(result));
-}
-
-
-Local<Array> v8::Object::GetOwnPropertyNames() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetOwnPropertyNames()",
- return Local<v8::Array>());
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- bool threw = false;
- i::Handle<i::FixedArray> value =
- i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY, &threw);
- if (threw) return Local<v8::Array>();
- // Because we use caching to speed up enumeration it is important
- // to never change the result of the basic enumeration function so
- // we clone the result.
- i::Handle<i::FixedArray> elms = isolate->factory()->CopyFixedArray(value);
- i::Handle<i::JSArray> result =
- isolate->factory()->NewJSArrayWithElements(elms);
- return Utils::ToLocal(scope.CloseAndEscape(result));
-}
-
-
-Local<String> v8::Object::ObjectProtoToString() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ObjectProtoToString()",
- return Local<v8::String>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-
- i::Handle<i::Object> name(self->class_name(), isolate);
-
- // Native implementation of Object.prototype.toString (v8natives.js):
- // var c = %ClassOf(this);
- // if (c === 'Arguments') c = 'Object';
- // return "[object " + c + "]";
-
- if (!name->IsString()) {
- return v8::String::New("[object ]");
-
- } else {
- i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Arguments"))) {
- return v8::String::New("[object Object]");
-
- } else {
- const char* prefix = "[object ";
- Local<String> str = Utils::ToLocal(class_name);
- const char* postfix = "]";
-
- int prefix_len = i::StrLength(prefix);
- int str_len = str->Length();
- int postfix_len = i::StrLength(postfix);
-
- int buf_len = prefix_len + str_len + postfix_len;
- i::ScopedVector<char> buf(buf_len);
-
- // Write prefix.
- char* ptr = buf.start();
- memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
- ptr += prefix_len;
-
- // Write real content.
- str->WriteAscii(ptr, 0, str_len);
- ptr += str_len;
-
- // Write postfix.
- memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
-
- // Copy the buffer into a heap-allocated string and return it.
- Local<String> result = v8::String::New(buf.start(), buf_len);
- return result;
- }
- }
-}
-
-
-Local<Value> v8::Object::GetConstructor() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetConstructor()",
- return Local<v8::Function>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> constructor(self->GetConstructor(), isolate);
- return Utils::ToLocal(constructor);
-}
-
-
-Local<String> v8::Object::GetConstructorName() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetConstructorName()",
- return Local<v8::String>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> name(self->constructor_name());
- return Utils::ToLocal(name);
-}
-
-
-bool v8::Object::Delete(v8::Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Delete()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return i::JSObject::DeleteProperty(self, key_obj)->IsTrue();
-}
-
-
-bool v8::Object::Has(v8::Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Has()", return false);
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return self->HasProperty(*key_obj);
-}
-
-
-bool v8::Object::Delete(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::DeleteProperty()",
- return false);
- ENTER_V8(isolate);
- HandleScope scope;
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::JSObject::DeleteElement(self, index)->IsTrue();
-}
-
-
-bool v8::Object::Has(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return self->HasElement(index);
-}
-
-
-bool Object::SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attributes) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- v8::Handle<AccessorSignature> signature;
- i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name, getter, setter, data,
- settings, attributes,
- signature);
- bool fast = Utils::OpenHandle(this)->HasFastProperties();
- i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
- if (result.is_null() || result->IsUndefined()) return false;
- if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(this), 0);
- return true;
-}
-
-
-bool v8::Object::HasOwnProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
- return false);
- return Utils::OpenHandle(this)->HasLocalProperty(
- *Utils::OpenHandle(*key));
-}
-
-
-bool v8::Object::HasRealNamedProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
- return false);
- return Utils::OpenHandle(this)->HasRealNamedProperty(
- *Utils::OpenHandle(*key));
-}
-
-
-bool v8::Object::HasRealIndexedProperty(uint32_t index) {
- ON_BAILOUT(Utils::OpenHandle(this)->GetIsolate(),
- "v8::Object::HasRealIndexedProperty()",
- return false);
- return Utils::OpenHandle(this)->HasRealElementProperty(index);
-}
-
-
-bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::HasRealNamedCallbackProperty()",
- return false);
- ENTER_V8(isolate);
- return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
- *Utils::OpenHandle(*key));
-}
-
-
-bool v8::Object::HasNamedLookupInterceptor() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasNamedLookupInterceptor()",
- return false);
- return Utils::OpenHandle(this)->HasNamedInterceptor();
-}
-
-
-bool v8::Object::HasIndexedLookupInterceptor() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasIndexedLookupInterceptor()",
- return false);
- return Utils::OpenHandle(this)->HasIndexedInterceptor();
-}
-
-
-static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
- i::Handle<i::JSObject> receiver,
- i::Handle<i::String> name,
- i::LookupResult* lookup) {
- if (!lookup->IsProperty()) {
- // No real property was found.
- return Local<Value>();
- }
-
- // If the property being looked up is a callback, it can throw
- // an exception.
- EXCEPTION_PREAMBLE(isolate);
- PropertyAttributes ignored;
- i::Handle<i::Object> result =
- i::Object::GetProperty(receiver, receiver, lookup, name,
- &ignored);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
-
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
- Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::GetRealNamedPropertyInPrototypeChain()",
- return Local<Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::LookupResult lookup(isolate);
- self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
- return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
-}
-
-
-Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetRealNamedProperty()",
- return Local<Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::LookupResult lookup(isolate);
- self_obj->LookupRealNamedProperty(*key_obj, &lookup);
- return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
-}
-
-
-// Turns on access checks by copying the map and setting the check flag.
-// Because the object gets a new map, existing inline cache caching
-// the old map of this object will fail.
-void v8::Object::TurnOnAccessCheck() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::TurnOnAccessCheck()", return);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
-
- // When turning on access checks for a global object deoptimize all functions
- // as optimized code does not always handle access checks.
- i::Deoptimizer::DeoptimizeGlobalObject(*obj);
-
- i::Handle<i::Map> new_map =
- isolate->factory()->CopyMap(i::Handle<i::Map>(obj->map()));
- new_map->set_is_access_check_needed(true);
- obj->set_map(*new_map);
-}
-
-
-bool v8::Object::IsDirty() {
- return Utils::OpenHandle(this)->IsDirty();
-}
-
-
-Local<v8::Object> v8::Object::Clone() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Clone()", return Local<Object>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSObject> result = i::Copy(self);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
- return Utils::ToLocal(result);
-}
-
-
-static i::Context* GetCreationContext(i::JSObject* object) {
- i::Object* constructor = object->map()->constructor();
- i::JSFunction* function;
- if (!constructor->IsJSFunction()) {
- // Functions have null as a constructor,
- // but any JSFunction knows its context immediately.
- ASSERT(object->IsJSFunction());
- function = i::JSFunction::cast(object);
- } else {
- function = i::JSFunction::cast(constructor);
- }
- return function->context()->native_context();
-}
-
-
-Local<v8::Context> v8::Object::CreationContext() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::CreationContext()", return Local<v8::Context>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Context* context = GetCreationContext(*self);
- return Utils::ToLocal(i::Handle<i::Context>(context));
-}
-
-
-int v8::Object::GetIdentityHash() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetIdentityHash()", return 0);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::JSObject::GetIdentityHash(self);
-}
-
-
-bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
- v8::Handle<v8::Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetHiddenValue()", return false);
- if (value.IsEmpty()) return DeleteHiddenValue(key);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_string =
- isolate->factory()->InternalizeString(key_obj);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- i::Handle<i::Object> result =
- i::JSObject::SetHiddenProperty(self, key_string, value_obj);
- return *result == *self;
-}
-
-
-v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetHiddenValue()",
- return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj);
- i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate);
- if (result->IsUndefined()) return v8::Local<v8::Value>();
- return Utils::ToLocal(result);
-}
-
-
-bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::DeleteHiddenValue()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj);
- self->DeleteHiddenProperty(*key_string);
- return true;
-}
-
-
-namespace {
-
-static i::ElementsKind GetElementsKindFromExternalArrayType(
- ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- return i::EXTERNAL_BYTE_ELEMENTS;
- break;
- case kExternalUnsignedByteArray:
- return i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
- break;
- case kExternalShortArray:
- return i::EXTERNAL_SHORT_ELEMENTS;
- break;
- case kExternalUnsignedShortArray:
- return i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
- break;
- case kExternalIntArray:
- return i::EXTERNAL_INT_ELEMENTS;
- break;
- case kExternalUnsignedIntArray:
- return i::EXTERNAL_UNSIGNED_INT_ELEMENTS;
- break;
- case kExternalFloatArray:
- return i::EXTERNAL_FLOAT_ELEMENTS;
- break;
- case kExternalDoubleArray:
- return i::EXTERNAL_DOUBLE_ELEMENTS;
- break;
- case kExternalPixelArray:
- return i::EXTERNAL_PIXEL_ELEMENTS;
- break;
- }
- UNREACHABLE();
- return i::DICTIONARY_ELEMENTS;
-}
-
-
-void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
- void* data,
- ExternalArrayType array_type,
- int length) {
- i::Isolate* isolate = object->GetIsolate();
- i::Handle<i::ExternalArray> array =
- isolate->factory()->NewExternalArray(length, array_type, data);
-
- i::Handle<i::Map> external_array_map =
- isolate->factory()->GetElementsTransitionMap(
- object,
- GetElementsKindFromExternalArrayType(array_type));
-
- object->set_map(*external_array_map);
- object->set_elements(*array);
-}
-
-} // namespace
-
-
-void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- if (!ApiCheck(length >= 0 && length <= i::ExternalPixelArray::kMaxLength,
- "v8::Object::SetIndexedPropertiesToPixelData()",
- "length exceeds max acceptable value")) {
- return;
- }
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!ApiCheck(!self->IsJSArray(),
- "v8::Object::SetIndexedPropertiesToPixelData()",
- "JSArray is not supported")) {
- return;
- }
- PrepareExternalArrayElements(self, data, kExternalPixelArray, length);
-}
-
-
-bool v8::Object::HasIndexedPropertiesInPixelData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(), "v8::HasIndexedPropertiesInPixelData()",
- return false);
- return self->HasExternalPixelElements();
-}
-
-
-uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelData()",
- return NULL);
- if (self->HasExternalPixelElements()) {
- return i::ExternalPixelArray::cast(self->elements())->
- external_pixel_pointer();
- } else {
- return NULL;
- }
-}
-
-
-int v8::Object::GetIndexedPropertiesPixelDataLength() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelDataLength()",
- return -1);
- if (self->HasExternalPixelElements()) {
- return i::ExternalPixelArray::cast(self->elements())->length();
- } else {
- return -1;
- }
-}
-
-
-void v8::Object::SetIndexedPropertiesToExternalArrayData(
- void* data,
- ExternalArrayType array_type,
- int length) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- if (!ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
- "v8::Object::SetIndexedPropertiesToExternalArrayData()",
- "length exceeds max acceptable value")) {
- return;
- }
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!ApiCheck(!self->IsJSArray(),
- "v8::Object::SetIndexedPropertiesToExternalArrayData()",
- "JSArray is not supported")) {
- return;
- }
- PrepareExternalArrayElements(self, data, array_type, length);
-}
-
-
-bool v8::Object::HasIndexedPropertiesInExternalArrayData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::HasIndexedPropertiesInExternalArrayData()",
- return false);
- return self->HasExternalArrayElements();
-}
-
-
-void* v8::Object::GetIndexedPropertiesExternalArrayData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::GetIndexedPropertiesExternalArrayData()",
- return NULL);
- if (self->HasExternalArrayElements()) {
- return i::ExternalArray::cast(self->elements())->external_pointer();
- } else {
- return NULL;
- }
-}
-
-
-ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::GetIndexedPropertiesExternalArrayDataType()",
- return static_cast<ExternalArrayType>(-1));
- switch (self->elements()->map()->instance_type()) {
- case i::EXTERNAL_BYTE_ARRAY_TYPE:
- return kExternalByteArray;
- case i::EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return kExternalUnsignedByteArray;
- case i::EXTERNAL_SHORT_ARRAY_TYPE:
- return kExternalShortArray;
- case i::EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return kExternalUnsignedShortArray;
- case i::EXTERNAL_INT_ARRAY_TYPE:
- return kExternalIntArray;
- case i::EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return kExternalUnsignedIntArray;
- case i::EXTERNAL_FLOAT_ARRAY_TYPE:
- return kExternalFloatArray;
- case i::EXTERNAL_DOUBLE_ARRAY_TYPE:
- return kExternalDoubleArray;
- case i::EXTERNAL_PIXEL_ARRAY_TYPE:
- return kExternalPixelArray;
- default:
- return static_cast<ExternalArrayType>(-1);
- }
-}
-
-
-int v8::Object::GetIndexedPropertiesExternalArrayDataLength() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::GetIndexedPropertiesExternalArrayDataLength()",
- return 0);
- if (self->HasExternalArrayElements()) {
- return i::ExternalArray::cast(self->elements())->length();
- } else {
- return -1;
- }
-}
-
-
-bool v8::Object::IsCallable() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::IsCallable()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (obj->IsJSFunction()) return true;
- return i::Execution::GetFunctionDelegate(obj)->IsJSFunction();
-}
-
-
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
- int argc,
- v8::Handle<v8::Value> argv[]) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
- return Local<v8::Value>());
- LOG_API(isolate, "Object::CallAsFunction");
- ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
- if (obj->IsJSFunction()) {
- fun = i::Handle<i::JSFunction>::cast(obj);
- } else {
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate =
- i::Execution::TryGetFunctionDelegate(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- fun = i::Handle<i::JSFunction>::cast(delegate);
- recv_obj = obj;
- }
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
- return Utils::ToLocal(scope.CloseAndEscape(returned));
-}
-
-
-Local<v8::Value> Object::CallAsConstructor(int argc,
- v8::Handle<v8::Value> argv[]) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::CallAsConstructor()",
- return Local<v8::Object>());
- LOG_API(isolate, "Object::CallAsConstructor");
- ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- if (obj->IsJSFunction()) {
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::New(fun, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
- return Utils::ToLocal(scope.CloseAndEscape(
- i::Handle<i::JSObject>::cast(returned)));
- }
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate =
- i::Execution::TryGetConstructorDelegate(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
- if (!delegate->IsUndefined()) {
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
- ASSERT(!delegate->IsUndefined());
- return Utils::ToLocal(scope.CloseAndEscape(returned));
- }
- return Local<v8::Object>();
-}
-
-
-Local<v8::Object> Function::NewInstance() const {
- return NewInstance(0, NULL);
-}
-
-
-Local<v8::Object> Function::NewInstance(int argc,
- v8::Handle<v8::Value> argv[]) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Function::NewInstance()",
- return Local<v8::Object>());
- LOG_API(isolate, "Function::NewInstance");
- ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
- HandleScope scope;
- i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::New(function, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
- return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
-}
-
-
-Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
- v8::Handle<v8::Value> argv[]) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
- LOG_API(isolate, "Function::Call");
- ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
- i::Object* raw_result = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
- i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
- raw_result = *returned;
- }
- i::Handle<i::Object> result(raw_result, isolate);
- return Utils::ToLocal(result);
-}
-
-
-void Function::SetName(v8::Handle<v8::String> name) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- USE(isolate);
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- func->shared()->set_name(*Utils::OpenHandle(*name));
-}
-
-
-Handle<Value> Function::GetName() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name(),
- func->GetIsolate()));
-}
-
-
-Handle<Value> Function::GetInferredName() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(),
- func->GetIsolate()));
-}
-
-
-ScriptOrigin Function::GetScriptOrigin() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script);
- v8::ScriptOrigin origin(
- Utils::ToLocal(scriptName),
- v8::Integer::New(script->line_offset()->value()),
- v8::Integer::New(script->column_offset()->value()));
- return origin;
- }
- return v8::ScriptOrigin(Handle<Value>());
-}
-
-
-const int Function::kLineOffsetNotFound = -1;
-
-
-int Function::GetScriptLineNumber() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return i::GetScriptLineNumber(script, func->shared()->start_position());
- }
- return kLineOffsetNotFound;
-}
-
-
-int Function::GetScriptColumnNumber() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return i::GetScriptColumnNumber(script, func->shared()->start_position());
- }
- return kLineOffsetNotFound;
-}
-
-Handle<Value> Function::GetScriptId() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (!func->shared()->script()->IsScript())
- return v8::Undefined();
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->id(), func->GetIsolate()));
-}
-
-int String::Length() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
- return str->length();
-}
-
-bool String::MayContainNonAscii() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
- return false;
- }
- return !str->HasOnlyAsciiChars();
-}
-
-
-bool String::IsOneByte() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsOneByte()")) {
- return false;
- }
- return str->IsOneByteConvertible();
-}
-
-
-class Utf8LengthVisitor {
- public:
- explicit Utf8LengthVisitor()
- : utf8_length_(0),
- last_character_(unibrow::Utf16::kNoPreviousCharacter) {}
-
- inline int GetLength() {
- return utf8_length_;
- }
-
- template<typename Char>
- inline void Visit(const Char* chars, unsigned length) {
- ASSERT(length > 0);
- // TODO(dcarney) Add back ascii fast path.
- int utf8_length = 0;
- int last_character = last_character_;
- for (unsigned i = 0; i < length; i++) {
- uint16_t c = chars[i];
- utf8_length += unibrow::Utf8::Length(c, last_character);
- last_character = c;
- }
- last_character_ = last_character;
- utf8_length_ += utf8_length;
- }
-
- inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
- Visit(chars, length);
- }
-
- inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
- Visit(chars, length);
- }
-
- private:
- int utf8_length_;
- int last_character_;
- DISALLOW_COPY_AND_ASSIGN(Utf8LengthVisitor);
-};
-
-
-static int Utf8Length(i::String* str, i::Isolate* isolate) {
- unsigned length = static_cast<unsigned>(str->length());
- if (length == 0) return 0;
- int32_t type = str->map()->instance_type();
- Utf8LengthVisitor visitor;
- // Non ConsString branch.
- if ((type & i::kStringRepresentationMask) != i::kConsStringTag) {
- i::ConsStringNullOp null_op;
- i::String::Visit(str, 0, visitor, null_op, type, length);
- return visitor.GetLength();
- }
- i::ConsStringIteratorOp* op = isolate->write_iterator();
- unsigned offset = 0;
- i::String* leaf = op->Operate(str, &offset, &type, &length);
- ASSERT(leaf != NULL);
- while (leaf != NULL) {
- i::ConsStringNullOp null_op;
- ASSERT(offset == 0);
- i::String::Visit(leaf, 0, visitor, null_op, type, length);
- leaf = op->ContinueOperation(&type, &length);
- }
- return visitor.GetLength();
-}
-
-
-int String::Utf8Length() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- i::Isolate* isolate = str->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::Utf8Length()")) return 0;
- return v8::Utf8Length(*str, isolate);
-}
-
-
-class Utf8WriterVisitor {
- public:
- Utf8WriterVisitor(char* buffer, int capacity)
- : early_termination_(false),
- last_character_(unibrow::Utf16::kNoPreviousCharacter),
- buffer_(buffer),
- start_(buffer),
- capacity_(capacity),
- utf16_chars_read_(0) {
- }
-
- static int WriteEndCharacter(uint16_t character,
- int last_character,
- int remaining,
- char* const buffer) {
- using namespace unibrow;
- ASSERT(remaining > 0);
- // We can't use a local buffer here because Encode needs to modify
- // previous characters in the stream. We know, however, that
- // exactly one character will be advanced.
- if (Utf16::IsTrailSurrogate(character) &&
- Utf16::IsLeadSurrogate(last_character)) {
- int written = Utf8::Encode(buffer, character, last_character);
- ASSERT(written == 1);
- return written;
- }
- // Use a scratch buffer to check the required characters.
- char temp_buffer[Utf8::kMaxEncodedSize];
- // Can't encode using last_character as gcc has array bounds issues.
- int written = Utf8::Encode(temp_buffer,
- character,
- unibrow::Utf16::kNoPreviousCharacter);
- // Won't fit.
- if (written > remaining) return 0;
- // Copy over the character from temp_buffer.
- for (int j = 0; j < written; j++) {
- buffer[j] = temp_buffer[j];
- }
- return written;
- }
-
- template<typename Char>
- void Visit(const Char* chars, const int length) {
- using namespace unibrow;
- // TODO(dcarney): Add back ascii fast path.
- ASSERT(!early_termination_);
- ASSERT(length > 0);
- // Copy state to stack.
- char* buffer = buffer_;
- int last_character = last_character_;
- int i = 0;
- // Do a fast loop where there is no exit capacity check.
- while (true) {
- int fast_length;
- if (capacity_ == -1) {
- fast_length = length;
- } else {
- int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
- // Need enough space to write everything but one character.
- STATIC_ASSERT(Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit == 3);
- int writable_length = (remaining_capacity - 3)/3;
- // Need to drop into slow loop.
- if (writable_length <= 0) break;
- fast_length = i + writable_length;
- if (fast_length > length) fast_length = length;
- }
- // Write the characters to the stream.
- for (; i < fast_length; i++) {
- uint16_t character = *chars++;
- buffer += Utf8::Encode(buffer, character, last_character);
- last_character = character;
- ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
- }
- // Array is fully written. Exit.
- if (fast_length == length) {
- // Write state back out to object.
- last_character_ = last_character;
- buffer_ = buffer;
- utf16_chars_read_ += i;
- return;
- }
- }
- ASSERT(capacity_ != -1);
- // Slow loop. Must check capacity on each iteration.
- int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
- ASSERT(remaining_capacity >= 0);
- for (; i < length && remaining_capacity > 0; i++) {
- uint16_t character = *chars++;
- int written = WriteEndCharacter(character,
- last_character,
- remaining_capacity,
- buffer);
- if (written == 0) {
- early_termination_ = true;
- break;
- }
- buffer += written;
- remaining_capacity -= written;
- last_character = character;
- }
- // Write state back out to object.
- last_character_ = last_character;
- buffer_ = buffer;
- utf16_chars_read_ += i;
- }
-
- inline bool IsDone() {
- return early_termination_;
- }
-
- inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
- Visit(chars, static_cast<int>(length));
- }
-
- inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
- Visit(chars, static_cast<int>(length));
- }
-
- inline int CompleteWrite(bool write_null, int* utf16_chars_read_out) {
- // Write out number of utf16 characters written to the stream.
- if (utf16_chars_read_out != NULL) {
- *utf16_chars_read_out = utf16_chars_read_;
- }
- // Only null terminate if all of the string was written and there's space.
- if (write_null &&
- !early_termination_ &&
- (capacity_ == -1 || (buffer_ - start_) < capacity_)) {
- *buffer_++ = '\0';
- }
- return static_cast<int>(buffer_ - start_);
- }
-
- private:
- bool early_termination_;
- int last_character_;
- char* buffer_;
- char* const start_;
- int capacity_;
- int utf16_chars_read_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8WriterVisitor);
-};
-
-
-uint32_t String::Hash() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Hash()")) return 0;
- return str->Hash();
-}
-
-
-String::CompleteHashData String::CompleteHash() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::CompleteHash()")) {
- return CompleteHashData();
- }
- CompleteHashData result;
- result.length = str->length();
- result.hash = str->Hash();
- if (str->IsSeqOneByteString() && str->IsSymbol())
- result.symbol_id = i::SeqString::cast(*str)->symbol_id();
- return result;
-}
-
-
-uint32_t String::ComputeHash(uint16_t *string, int length) {
- return i::StringHasher::HashSequentialString<i::uc16>(string, length, i::kZeroHashSeed) >>
- i::String::kHashShift;
-}
-
-
-uint32_t String::ComputeHash(char *string, int length) {
- return i::StringHasher::HashSequentialString<char>(string, length, i::kZeroHashSeed) >>
- i::String::kHashShift;
-}
-
-
-int String::WriteUtf8(char* buffer,
- int capacity,
- int* nchars_ref,
- int options) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
- LOG_API(isolate, "String::WriteUtf8");
- ENTER_V8(isolate);
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (options & HINT_MANY_WRITES_EXPECTED) {
- FlattenString(str); // Flatten the string for efficiency.
- }
- Utf8WriterVisitor writer(buffer, capacity);
- i::ConsStringIteratorOp* op = isolate->write_iterator();
- op->Reset();
- int32_t type = str->map()->instance_type();
- unsigned str_length = static_cast<unsigned>(str->length());
- if (str_length != 0) {
- i::String::Visit(*str, 0, writer, *op, type, str_length);
- while (!writer.IsDone()) {
- unsigned length_out;
- i::String* next = op->ContinueOperation(&type, &length_out);
- if (next == NULL) break;
- // TODO(dcarney): need an asserting null op.
- i::ConsStringNullOp null_op;
- i::String::Visit(next, 0, writer, null_op, type, length_out);
- }
- }
- return writer.CompleteWrite(!(options & NO_NULL_TERMINATION), nchars_ref);
-}
-
-
-int String::WriteAscii(char* buffer,
- int start,
- int length,
- int options) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
- LOG_API(isolate, "String::WriteAscii");
- ENTER_V8(isolate);
- ASSERT(start >= 0 && length >= -1);
- i::Handle<i::String> str = Utils::OpenHandle(this);
- isolate->string_tracker()->RecordWrite(str);
- if (options & HINT_MANY_WRITES_EXPECTED) {
- FlattenString(str); // Flatten the string for efficiency.
- }
-
- if (str->HasOnlyAsciiChars()) {
- // WriteToFlat is faster than using the StringCharacterStream.
- if (length == -1) length = str->length() + 1;
- int len = i::Min(length, str->length() - start);
- i::String::WriteToFlat(*str,
- reinterpret_cast<uint8_t*>(buffer),
- start,
- start + len);
- if (!(options & PRESERVE_ASCII_NULL)) {
- for (int i = 0; i < len; i++) {
- if (buffer[i] == '\0') buffer[i] = ' ';
- }
- }
- if (!(options & NO_NULL_TERMINATION) && length > len) {
- buffer[len] = '\0';
- }
- return len;
- }
-
- int end = length;
- if ((length == -1) || (length > str->length() - start)) {
- end = str->length() - start;
- }
- if (end < 0) return 0;
- i::StringCharacterStream write_stream(*str, isolate->write_iterator(), start);
- int i;
- for (i = 0; i < end; i++) {
- char c = static_cast<char>(write_stream.GetNext());
- if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
- buffer[i] = c;
- }
- if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length)) {
- buffer[i] = '\0';
- }
- return i;
-}
-
-
-template<typename CharType>
-static inline int WriteHelper(const String* string,
- CharType* buffer,
- int start,
- int length,
- int options) {
- i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::Write()")) return 0;
- LOG_API(isolate, "String::Write");
- ENTER_V8(isolate);
- ASSERT(start >= 0 && length >= -1);
- i::Handle<i::String> str = Utils::OpenHandle(string);
- isolate->string_tracker()->RecordWrite(str);
- if (options & String::HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringCharacterStream or Get(i) to access the characters.
- FlattenString(str);
- }
- int end = start + length;
- if ((length == -1) || (length > str->length() - start) )
- end = str->length();
- if (end < 0) return 0;
- i::String::WriteToFlat(*str, buffer, start, end);
- if (!(options & String::NO_NULL_TERMINATION) &&
- (length == -1 || end - start < length)) {
- buffer[end - start] = '\0';
- }
- return end - start;
-}
-
-
-int String::WriteOneByte(uint8_t* buffer,
- int start,
- int length,
- int options) const {
- return WriteHelper(this, buffer, start, length, options);
-}
-
-
-int String::Write(uint16_t* buffer,
- int start,
- int length,
- int options) const {
- return WriteHelper(this, buffer, start, length, options);
-}
-
-
-bool v8::String::IsExternal() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternal()")) {
- return false;
- }
- EnsureInitializedForIsolate(str->GetIsolate(), "v8::String::IsExternal()");
- return i::StringShape(*str).IsExternalTwoByte();
-}
-
-
-bool v8::String::IsExternalAscii() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternalAscii()")) {
- return false;
- }
- return i::StringShape(*str).IsExternalAscii();
-}
-
-
-void v8::String::VerifyExternalStringResource(
- v8::String::ExternalStringResource* value) const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- const v8::String::ExternalStringResource* expected;
- if (i::StringShape(*str).IsExternalTwoByte()) {
- const void* resource =
- i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
- expected = reinterpret_cast<const ExternalStringResource*>(resource);
- } else {
- expected = NULL;
- }
- CHECK_EQ(expected, value);
-}
-
-void v8::String::VerifyExternalStringResourceBase(
- v8::String::ExternalStringResourceBase* value, Encoding encoding) const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- const v8::String::ExternalStringResourceBase* expected;
- Encoding expectedEncoding;
- if (i::StringShape(*str).IsExternalAscii()) {
- const void* resource =
- i::Handle<i::ExternalAsciiString>::cast(str)->resource();
- expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
- expectedEncoding = ASCII_ENCODING;
- } else if (i::StringShape(*str).IsExternalTwoByte()) {
- const void* resource =
- i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
- expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
- expectedEncoding = TWO_BYTE_ENCODING;
- } else {
- expected = NULL;
- expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
- : TWO_BYTE_ENCODING;
- }
- CHECK_EQ(expected, value);
- CHECK_EQ(expectedEncoding, encoding);
-}
-
-const v8::String::ExternalAsciiStringResource*
- v8::String::GetExternalAsciiStringResource() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(),
- "v8::String::GetExternalAsciiStringResource()")) {
- return NULL;
- }
- if (i::StringShape(*str).IsExternalAscii()) {
- const void* resource =
- i::Handle<i::ExternalAsciiString>::cast(str)->resource();
- return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
- } else {
- return NULL;
- }
-}
-
-
-double Number::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->Number();
-}
-
-
-bool Boolean::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Boolean::Value()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsTrue();
-}
-
-
-int64_t Integer::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- return static_cast<int64_t>(obj->Number());
- }
-}
-
-
-int32_t Int32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Int32::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- return static_cast<int32_t>(obj->Number());
- }
-}
-
-
-uint32_t Uint32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Uint32::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
- } else {
- return static_cast<uint32_t>(obj->Number());
- }
-}
-
-
-int v8::Object::InternalFieldCount() {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (IsDeadCheck(obj->GetIsolate(), "v8::Object::InternalFieldCount()")) {
- return 0;
- }
- return obj->GetInternalFieldCount();
-}
-
-
-static bool InternalFieldOK(i::Handle<i::JSObject> obj,
- int index,
- const char* location) {
- return !IsDeadCheck(obj->GetIsolate(), location) &&
- ApiCheck(index < obj->GetInternalFieldCount(),
- location,
- "Internal field out of bounds");
-}
-
-
-Local<Value> v8::Object::SlowGetInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::GetInternalField()";
- if (!InternalFieldOK(obj, index, location)) return Local<Value>();
- i::Handle<i::Object> value(obj->GetInternalField(index), obj->GetIsolate());
- return Utils::ToLocal(value);
-}
-
-
-void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::SetInternalField()";
- if (!InternalFieldOK(obj, index, location)) return;
- i::Handle<i::Object> val = Utils::OpenHandle(*value);
- obj->SetInternalField(index, *val);
- ASSERT_EQ(value, GetInternalField(index));
-}
-
-
-void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
- if (!InternalFieldOK(obj, index, location)) return NULL;
- return DecodeSmiToAligned(obj->GetInternalField(index), location);
-}
-
-
-void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::SetAlignedPointerInInternalField()";
- if (!InternalFieldOK(obj, index, location)) return;
- obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
- ASSERT_EQ(value, GetAlignedPointerFromInternalField(index));
-}
-
-
-static void* ExternalValue(i::Object* obj) {
- // Obscure semantics for undefined, but somehow checked in our unit tests...
- if (obj->IsUndefined()) return NULL;
- i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0);
- return i::Foreign::cast(foreign)->foreign_address();
-}
-
-
-void v8::Object::SetExternalResource(v8::Object::ExternalResource* resource) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (resource != NULL) {
- obj->SetExternalResourceObject(
- *isolate->factory()->NewForeign(
- reinterpret_cast<i::Address>(resource)));
- } else {
- obj->SetExternalResourceObject(0);
- }
- if (!obj->IsSymbol()) {
- isolate->heap()->external_string_table()->AddObject(*obj);
- }
-}
-
-
-v8::Object::ExternalResource* v8::Object::GetExternalResource() {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Object* value = obj->GetExternalResourceObject();
- if (value->IsForeign()) {
- return reinterpret_cast<v8::Object::ExternalResource*>(
- i::Foreign::cast(value)->foreign_address());
- } else {
- return NULL;
- }
-}
-
-
-// --- E n v i r o n m e n t ---
-
-
-bool v8::V8::Initialize() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate != NULL && isolate->IsInitialized()) {
- return true;
- }
- return InitializeHelper();
-}
-
-
-void v8::V8::SetEntropySource(EntropySource source) {
- i::V8::SetEntropySource(source);
-}
-
-
-void v8::V8::SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver return_address_resolver) {
- i::V8::SetReturnAddressLocationResolver(return_address_resolver);
-}
-
-
-bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
- return i::ProfileEntryHookStub::SetFunctionEntryHook(entry_hook);
-}
-
-
-void v8::V8::SetJitCodeEventHandler(
- JitCodeEventOptions options, JitCodeEventHandler event_handler) {
- i::Isolate* isolate = i::Isolate::Current();
- // Ensure that logging is initialized for our isolate.
- isolate->InitializeLoggingAndCounters();
- isolate->logger()->SetCodeEventHandler(options, event_handler);
-}
-
-
-bool v8::V8::Dispose() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
- "v8::V8::Dispose()",
- "Use v8::Isolate::Dispose() for a non-default isolate.")) {
- return false;
- }
- i::V8::TearDown();
- return true;
-}
-
-
-HeapStatistics::HeapStatistics(): total_heap_size_(0),
- total_heap_size_executable_(0),
- total_physical_size_(0),
- used_heap_size_(0),
- heap_size_limit_(0) { }
-
-
-void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized()) {
- // Isolate is unitialized thus heap is not configured yet.
- heap_statistics->total_heap_size_ = 0;
- heap_statistics->total_heap_size_executable_ = 0;
- heap_statistics->total_physical_size_ = 0;
- heap_statistics->used_heap_size_ = 0;
- heap_statistics->heap_size_limit_ = 0;
- return;
- }
- Isolate* ext_isolate = reinterpret_cast<Isolate*>(isolate);
- return ext_isolate->GetHeapStatistics(heap_statistics);
-}
-
-
-void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::V8::VisitExternalResources");
- isolate->heap()->VisitExternalResources(visitor);
-}
-
-
-class VisitorAdapter : public i::ObjectVisitor {
- public:
- explicit VisitorAdapter(PersistentHandleVisitor* visitor)
- : visitor_(visitor) {}
- virtual void VisitPointers(i::Object** start, i::Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
- visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)),
- class_id);
- }
- private:
- PersistentHandleVisitor* visitor_;
-};
-
-
-void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
-
- i::AssertNoAllocation no_allocation;
-
- VisitorAdapter visitor_adapter(visitor);
- isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
-}
-
-
-void v8::V8::VisitHandlesForPartialDependence(
- Isolate* exported_isolate, PersistentHandleVisitor* visitor) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
- ASSERT(isolate == i::Isolate::Current());
- IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence");
-
- i::AssertNoAllocation no_allocation;
-
- VisitorAdapter visitor_adapter(visitor);
- isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds(
- &visitor_adapter);
-}
-
-
-bool v8::V8::IdleNotification(int hint) {
- // Returning true tells the caller that it need not
- // continue to call IdleNotification.
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate == NULL || !isolate->IsInitialized()) return true;
- return i::V8::IdleNotification(hint);
-}
-
-
-void v8::V8::LowMemoryNotification() {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate == NULL || !isolate->IsInitialized()) return;
- isolate->heap()->CollectAllAvailableGarbage("low memory notification");
-}
-
-
-int v8::V8::ContextDisposedNotification() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return 0;
- return isolate->heap()->NotifyContextDisposed();
-}
-
-
-const char* v8::V8::GetVersion() {
- return i::Version::GetVersion();
-}
-
-
-static i::Handle<i::FunctionTemplateInfo>
- EnsureConstructor(i::Handle<i::ObjectTemplateInfo> templ) {
- if (templ->constructor()->IsUndefined()) {
- Local<FunctionTemplate> constructor = FunctionTemplate::New();
- Utils::OpenHandle(*constructor)->set_instance_template(*templ);
- templ->set_constructor(*Utils::OpenHandle(*constructor));
- }
- return i::Handle<i::FunctionTemplateInfo>(
- i::FunctionTemplateInfo::cast(templ->constructor()));
-}
-
-
-Persistent<Context> v8::Context::New(
- v8::ExtensionConfiguration* extensions,
- v8::Handle<ObjectTemplate> global_template,
- v8::Handle<Value> global_object) {
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Context::New()");
- LOG_API(isolate, "Context::New");
- ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
-
- // Enter V8 via an ENTER_V8 scope.
- i::Handle<i::Context> env;
- {
- ENTER_V8(isolate);
- v8::Handle<ObjectTemplate> proxy_template = global_template;
- i::Handle<i::FunctionTemplateInfo> proxy_constructor;
- i::Handle<i::FunctionTemplateInfo> global_constructor;
-
- if (!global_template.IsEmpty()) {
- // Make sure that the global_template has a constructor.
- global_constructor =
- EnsureConstructor(Utils::OpenHandle(*global_template));
-
- // Create a fresh template for the global proxy object.
- proxy_template = ObjectTemplate::New();
- proxy_constructor =
- EnsureConstructor(Utils::OpenHandle(*proxy_template));
-
- // Set the global template to be the prototype template of
- // global proxy template.
- proxy_constructor->set_prototype_template(
- *Utils::OpenHandle(*global_template));
-
- // Migrate security handlers from global_template to
- // proxy_template. Temporarily removing access check
- // information from the global template.
- if (!global_constructor->access_check_info()->IsUndefined()) {
- proxy_constructor->set_access_check_info(
- global_constructor->access_check_info());
- proxy_constructor->set_needs_access_check(
- global_constructor->needs_access_check());
- global_constructor->set_needs_access_check(false);
- global_constructor->set_access_check_info(
- isolate->heap()->undefined_value());
- }
- }
-
- // Create the environment.
- env = isolate->bootstrapper()->CreateEnvironment(
- Utils::OpenHandle(*global_object, true),
- proxy_template,
- extensions);
-
- // Restore the access check info on the global template.
- if (!global_template.IsEmpty()) {
- ASSERT(!global_constructor.is_null());
- ASSERT(!proxy_constructor.is_null());
- global_constructor->set_access_check_info(
- proxy_constructor->access_check_info());
- global_constructor->set_needs_access_check(
- proxy_constructor->needs_access_check());
- }
- isolate->runtime_profiler()->Reset();
- }
- // Leave V8.
-
- if (env.is_null()) {
- return Persistent<Context>();
- }
- return Persistent<Context>(Utils::ToLocal(env));
-}
-
-
-void v8::Context::SetSecurityToken(Handle<Value> token) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::SetSecurityToken()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
- env->set_security_token(*token_handle);
-}
-
-
-void v8::Context::UseDefaultSecurityToken() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::UseDefaultSecurityToken()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- env->set_security_token(env->global_object());
-}
-
-
-Handle<Value> v8::Context::GetSecurityToken() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetSecurityToken()")) {
- return Handle<Value>();
- }
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Object* security_token = env->security_token();
- i::Handle<i::Object> token_handle(security_token, isolate);
- return Utils::ToLocal(token_handle);
-}
-
-
-bool Context::HasOutOfMemoryException() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- return env->has_out_of_memory();
-}
-
-
-bool Context::InContext() {
- return i::Isolate::Current()->context() != NULL;
-}
-
-
-v8::Isolate* Context::GetIsolate() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- return reinterpret_cast<Isolate*>(env->GetIsolate());
-}
-
-
-v8::Local<v8::Context> Context::GetEntered() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> last =
- isolate->handle_scope_implementer()->LastEnteredContext();
- if (last.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(last);
- return Utils::ToLocal(context);
-}
-
-
-v8::Local<v8::Context> Context::GetCurrent() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> current = isolate->native_context();
- if (current.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
- return Utils::ToLocal(context);
-}
-
-
-v8::Local<v8::Context> Context::GetCalling() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCalling()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> calling =
- isolate->GetCallingNativeContext();
- if (calling.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
- return Utils::ToLocal(context);
-}
-
-
-v8::Local<v8::Object> Context::GetCallingQmlGlobal() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCallingQmlGlobal()")) {
- return Local<Object>();
- }
-
- i::Context *context = isolate->context();
- i::JavaScriptFrameIterator it(isolate);
- if (it.done()) return Local<Object>();
- context = i::Context::cast(it.frame()->context());
- if (!context->qml_global_object()->IsUndefined()) {
- i::Handle<i::Object> qmlglobal(context->qml_global_object(), isolate);
- return Utils::ToLocal(i::Handle<i::JSObject>::cast(qmlglobal));
- } else {
- return Local<Object>();
- }
-}
-
-
-v8::Local<v8::Value> Context::GetCallingScriptData() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCallingScriptData()")) {
- return Local<Object>();
- }
-
- i::JavaScriptFrameIterator it(isolate);
- if (it.done()) return Local<Object>();
- i::Handle<i::JSFunction> function =
- i::Handle<i::JSFunction>(i::JSFunction::cast(it.frame()->function()));
- i::Handle<i::Script> script(i::Script::cast(function->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->data(), isolate));
-}
-
-
-v8::Local<v8::Object> Context::Global() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::Global()")) {
- return Local<v8::Object>();
- }
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::Object> global(context->global_proxy(), isolate);
- return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
-}
-
-
-void Context::DetachGlobal() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::DetachGlobal()")) return;
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- isolate->bootstrapper()->DetachGlobal(context);
-}
-
-
-void Context::ReattachGlobal(Handle<Object> global_object) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::ReattachGlobal()")) return;
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::JSGlobalProxy> global_proxy =
- i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object));
- isolate->bootstrapper()->ReattachGlobal(context, global_proxy);
-}
-
-
-void Context::AllowCodeGenerationFromStrings(bool allow) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::AllowCodeGenerationFromStrings()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- context->set_allow_code_gen_from_strings(
- allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
-}
-
-
-bool Context::IsCodeGenerationFromStringsAllowed() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::IsCodeGenerationFromStringsAllowed()")) {
- return false;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- return !context->allow_code_gen_from_strings()->IsFalse();
-}
-
-
-void Context::SetErrorMessageForCodeGenerationFromStrings(
- Handle<String> error) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::SetErrorMessageForCodeGenerationFromStrings()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::String> error_handle = Utils::OpenHandle(*error);
- context->set_error_message_for_code_gen_from_strings(*error_handle);
-}
-
-
-Local<v8::Object> ObjectTemplate::NewInstance() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
- return Local<v8::Object>());
- LOG_API(isolate, "ObjectTemplate::NewInstance");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj =
- i::Execution::InstantiateObject(Utils::OpenHandle(this),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
- return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
-}
-
-
-Local<v8::Function> FunctionTemplate::GetFunction() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::FunctionTemplate::GetFunction()",
- return Local<v8::Function>());
- LOG_API(isolate, "FunctionTemplate::GetFunction");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj =
- i::Execution::InstantiateFunction(Utils::OpenHandle(this),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Function>());
- return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
-}
-
-
-bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
- ON_BAILOUT(i::Isolate::Current(), "v8::FunctionTemplate::HasInstanceOf()",
- return false);
- i::Object* obj = *Utils::OpenHandle(*value);
- return obj->IsInstanceOf(*Utils::OpenHandle(this));
-}
-
-
-Local<External> v8::External::New(void* value) {
- STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::External::New()");
- LOG_API(isolate, "External::New");
- ENTER_V8(isolate);
- i::Handle<i::JSObject> external = isolate->factory()->NewExternal(value);
- return Utils::ExternalToLocal(external);
-}
-
-
-void* External::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL;
- return ExternalValue(*Utils::OpenHandle(this));
-}
-
-
-Local<String> v8::String::Empty() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
- return v8::Local<String>();
- }
- LOG_API(isolate, "String::Empty()");
- return Utils::ToLocal(isolate->factory()->empty_string());
-}
-
-
-Local<String> v8::String::New(const char* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
- LOG_API(isolate, "String::New(char)");
- if (length == 0) return Empty();
- ENTER_V8(isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- return Utils::ToLocal(result);
-}
-
-
-Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
- i::Handle<i::String> left_string = Utils::OpenHandle(*left);
- i::Isolate* isolate = left_string->GetIsolate();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
- LOG_API(isolate, "String::New(char)");
- ENTER_V8(isolate);
- i::Handle<i::String> right_string = Utils::OpenHandle(*right);
- i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
- right_string);
- return Utils::ToLocal(result);
-}
-
-
-Local<String> v8::String::NewUndetectable(const char* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
- LOG_API(isolate, "String::NewUndetectable(char)");
- ENTER_V8(isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- result->MarkAsUndetectable();
- return Utils::ToLocal(result);
-}
-
-
-static int TwoByteStringLength(const uint16_t* data) {
- int length = 0;
- while (data[length] != '\0') length++;
- return length;
-}
-
-
-Local<String> v8::String::New(const uint16_t* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
- LOG_API(isolate, "String::New(uint16_)");
- if (length == 0) return Empty();
- ENTER_V8(isolate);
- if (length == -1) length = TwoByteStringLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromTwoByte(
- i::Vector<const uint16_t>(data, length));
- return Utils::ToLocal(result);
-}
-
-
-Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
- LOG_API(isolate, "String::NewUndetectable(uint16_)");
- ENTER_V8(isolate);
- if (length == -1) length = TwoByteStringLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromTwoByte(
- i::Vector<const uint16_t>(data, length));
- result->MarkAsUndetectable();
- return Utils::ToLocal(result);
-}
-
-
-i::Handle<i::String> NewExternalStringHandle(i::Isolate* isolate,
- v8::String::ExternalStringResource* resource) {
- i::Handle<i::String> result =
- isolate->factory()->NewExternalStringFromTwoByte(resource);
- return result;
-}
-
-
-i::Handle<i::String> NewExternalAsciiStringHandle(i::Isolate* isolate,
- v8::String::ExternalAsciiStringResource* resource) {
- i::Handle<i::String> result =
- isolate->factory()->NewExternalStringFromAscii(resource);
- return result;
-}
-
-
-Local<String> v8::String::NewExternal(
- v8::String::ExternalStringResource* resource) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
- LOG_API(isolate, "String::NewExternal");
- ENTER_V8(isolate);
- CHECK(resource && resource->data());
- i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
- isolate->heap()->external_string_table()->AddString(*result);
- return Utils::ToLocal(result);
-}
-
-
-bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
- i::Handle<i::String> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
- if (i::StringShape(*obj).IsExternalTwoByte()) {
- return false; // Already an external string.
- }
- ENTER_V8(isolate);
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
- return false;
- }
- if (isolate->heap()->IsInGCPostProcessing()) {
- return false;
- }
- CHECK(resource && resource->data());
- bool result = obj->MakeExternal(resource);
- if (result && !obj->IsInternalizedString()) {
- isolate->heap()->external_string_table()->AddString(*obj);
- }
- return result;
-}
-
-
-Local<String> v8::String::NewExternal(
- v8::String::ExternalAsciiStringResource* resource) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
- LOG_API(isolate, "String::NewExternal");
- ENTER_V8(isolate);
- CHECK(resource && resource->data());
- i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
- isolate->heap()->external_string_table()->AddString(*result);
- return Utils::ToLocal(result);
-}
-
-
-bool v8::String::MakeExternal(
- v8::String::ExternalAsciiStringResource* resource) {
- i::Handle<i::String> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
- if (i::StringShape(*obj).IsExternalTwoByte()) {
- return false; // Already an external string.
- }
- ENTER_V8(isolate);
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
- return false;
- }
- if (isolate->heap()->IsInGCPostProcessing()) {
- return false;
- }
- CHECK(resource && resource->data());
- bool result = obj->MakeExternal(resource);
- if (result && !obj->IsInternalizedString()) {
- isolate->heap()->external_string_table()->AddString(*obj);
- }
- return result;
-}
-
-
-bool v8::String::CanMakeExternal() {
- if (!internal::FLAG_clever_optimizations) return false;
- i::Handle<i::String> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
- int size = obj->Size(); // Byte size of the original string.
- if (size < i::ExternalString::kShortSize) return false;
- i::StringShape shape(*obj);
- return !shape.IsExternal();
-}
-
-
-Local<v8::Object> v8::Object::New() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Object::New()");
- LOG_API(isolate, "Object::New");
- ENTER_V8(isolate);
- i::Handle<i::JSObject> obj =
- isolate->factory()->NewJSObject(isolate->object_function());
- return Utils::ToLocal(obj);
-}
-
-
-Local<v8::Value> v8::NumberObject::New(double value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::NumberObject::New()");
- LOG_API(isolate, "NumberObject::New");
- ENTER_V8(isolate);
- i::Handle<i::Object> number = isolate->factory()->NewNumber(value);
- i::Handle<i::Object> obj = isolate->factory()->ToObject(number);
- return Utils::ToLocal(obj);
-}
-
-
-double v8::NumberObject::NumberValue() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::NumberObject::NumberValue()")) return 0;
- LOG_API(isolate, "NumberObject::NumberValue");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- return jsvalue->value()->Number();
-}
-
-
-Local<v8::Value> v8::BooleanObject::New(bool value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::BooleanObject::New()");
- LOG_API(isolate, "BooleanObject::New");
- ENTER_V8(isolate);
- i::Handle<i::Object> boolean(value
- ? isolate->heap()->true_value()
- : isolate->heap()->false_value(),
- isolate);
- i::Handle<i::Object> obj = isolate->factory()->ToObject(boolean);
- return Utils::ToLocal(obj);
-}
-
-
-bool v8::BooleanObject::BooleanValue() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::BooleanObject::BooleanValue()")) return 0;
- LOG_API(isolate, "BooleanObject::BooleanValue");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- return jsvalue->value()->IsTrue();
-}
-
-
-Local<v8::Value> v8::StringObject::New(Handle<String> value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::StringObject::New()");
- LOG_API(isolate, "StringObject::New");
- ENTER_V8(isolate);
- i::Handle<i::Object> obj =
- isolate->factory()->ToObject(Utils::OpenHandle(*value));
- return Utils::ToLocal(obj);
-}
-
-
-Local<v8::String> v8::StringObject::StringValue() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StringObject::StringValue()")) {
- return Local<v8::String>();
- }
- LOG_API(isolate, "StringObject::StringValue");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- return Utils::ToLocal(
- i::Handle<i::String>(i::String::cast(jsvalue->value())));
-}
-
-
-Local<v8::Value> v8::Date::New(double time) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Date::New()");
- LOG_API(isolate, "Date::New");
- if (isnan(time)) {
- // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
- time = i::OS::nan_value();
- }
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj =
- i::Execution::NewDate(time, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Value>());
- return Utils::ToLocal(obj);
-}
-
-
-double v8::Date::NumberValue() const {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
- LOG_API(isolate, "Date::NumberValue");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
- return jsdate->value()->Number();
-}
-
-
-void v8::Date::DateTimeConfigurationChangeNotification() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
- return);
- LOG_API(isolate, "Date::DateTimeConfigurationChangeNotification");
- ENTER_V8(isolate);
-
- isolate->date_cache()->ResetDateCache();
-
- i::HandleScope scope(isolate);
- // Get the function ResetDateCache (defined in date.js).
- i::Handle<i::String> func_name_str =
- isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("ResetDateCache"));
- i::MaybeObject* result =
- isolate->js_builtins_object()->GetProperty(*func_name_str);
- i::Object* object_func;
- if (!result->ToObject(&object_func)) {
- return;
- }
-
- if (object_func->IsJSFunction()) {
- i::Handle<i::JSFunction> func =
- i::Handle<i::JSFunction>(i::JSFunction::cast(object_func));
-
- // Call ResetDateCache(0 but expect no exceptions:
- bool caught_exception = false;
- i::Execution::TryCall(func,
- isolate->js_builtins_object(),
- 0,
- NULL,
- &caught_exception);
- }
-}
-
-
-static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
- uint8_t flags_buf[3];
- int num_flags = 0;
- if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
- if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
- if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
- ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
- return FACTORY->InternalizeOneByteString(
- i::Vector<const uint8_t>(flags_buf, num_flags));
-}
-
-
-Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
- Flags flags) {
- i::Isolate* isolate = Utils::OpenHandle(*pattern)->GetIsolate();
- EnsureInitializedForIsolate(isolate, "v8::RegExp::New()");
- LOG_API(isolate, "RegExp::New");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSRegExp> obj = i::Execution::NewJSRegExp(
- Utils::OpenHandle(*pattern),
- RegExpFlagsToString(flags),
- &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::RegExp>());
- return Utils::ToLocal(i::Handle<i::JSRegExp>::cast(obj));
-}
-
-
-Local<v8::String> v8::RegExp::GetSource() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::RegExp::GetSource()")) {
- return Local<v8::String>();
- }
- i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::String>(obj->Pattern()));
-}
-
-
-// Assert that the static flags cast in GetFlags is valid.
-#define REGEXP_FLAG_ASSERT_EQ(api_flag, internal_flag) \
- STATIC_ASSERT(static_cast<int>(v8::RegExp::api_flag) == \
- static_cast<int>(i::JSRegExp::internal_flag))
-REGEXP_FLAG_ASSERT_EQ(kNone, NONE);
-REGEXP_FLAG_ASSERT_EQ(kGlobal, GLOBAL);
-REGEXP_FLAG_ASSERT_EQ(kIgnoreCase, IGNORE_CASE);
-REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE);
-#undef REGEXP_FLAG_ASSERT_EQ
-
-v8::RegExp::Flags v8::RegExp::GetFlags() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::GetFlags()")) {
- return v8::RegExp::kNone;
- }
- i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
- return static_cast<RegExp::Flags>(obj->GetFlags().value());
-}
-
-
-Local<v8::Array> v8::Array::New(int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Array::New()");
- LOG_API(isolate, "Array::New");
- ENTER_V8(isolate);
- int real_length = length > 0 ? length : 0;
- i::Handle<i::JSArray> obj = isolate->factory()->NewJSArray(real_length);
- i::Handle<i::Object> length_obj =
- isolate->factory()->NewNumberFromInt(real_length);
- obj->set_length(*length_obj);
- return Utils::ToLocal(obj);
-}
-
-
-uint32_t v8::Array::Length() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Array::Length()")) return 0;
- i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
- i::Object* length = obj->length();
- if (length->IsSmi()) {
- return i::Smi::cast(length)->value();
- } else {
- return static_cast<uint32_t>(length->Number());
- }
-}
-
-
-Local<Object> Array::CloneElementAt(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!self->HasFastObjectElements()) {
- return Local<Object>();
- }
- i::FixedArray* elms = i::FixedArray::cast(self->elements());
- i::Object* paragon = elms->get(index);
- if (!paragon->IsJSObject()) {
- return Local<Object>();
- }
- i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
- EXCEPTION_PREAMBLE(isolate);
- ENTER_V8(isolate);
- i::Handle<i::JSObject> result = i::Copy(paragon_handle);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
- return Utils::ToLocal(result);
-}
-
-
-Local<String> v8::String::NewSymbol(const char* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewSymbol()");
- LOG_API(isolate, "String::NewSymbol(char)");
- ENTER_V8(isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result = isolate->factory()->InternalizeUtf8String(
- i::Vector<const char>(data, length));
- return Utils::ToLocal(result);
-}
-
-
-Local<Number> v8::Number::New(double value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Number::New()");
- if (isnan(value)) {
- // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
- value = i::OS::nan_value();
- }
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
- return Utils::NumberToLocal(result);
-}
-
-
-Local<Integer> v8::Integer::New(int32_t value) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
- return v8::Integer::New(value, reinterpret_cast<Isolate*>(isolate));
-}
-
-
-Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Integer::NewFromUnsigned()");
- return Integer::NewFromUnsigned(value, reinterpret_cast<Isolate*>(isolate));
-}
-
-
-Local<Integer> v8::Integer::New(int32_t value, Isolate* isolate) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ASSERT(internal_isolate->IsInitialized());
- if (i::Smi::IsValid(value)) {
- return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
- internal_isolate));
- }
- ENTER_V8(internal_isolate);
- i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
- return Utils::IntegerToLocal(result);
-}
-
-
-Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ASSERT(internal_isolate->IsInitialized());
- bool fits_into_int32_t = (value & (1 << 31)) == 0;
- if (fits_into_int32_t) {
- return Integer::New(static_cast<int32_t>(value), isolate);
- }
- ENTER_V8(internal_isolate);
- i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
- return Utils::IntegerToLocal(result);
-}
-
-
-void V8::IgnoreOutOfMemoryException() {
- EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
-}
-
-
-bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
- ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- NeanderArray listeners(isolate->factory()->message_listeners());
- NeanderObject obj(2);
- obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
- obj.set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
- : *Utils::OpenHandle(*data));
- listeners.add(obj.value());
- return true;
-}
-
-
-void V8::RemoveMessageListeners(MessageCallback that) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::V8::RemoveMessageListener()");
- ON_BAILOUT(isolate, "v8::V8::RemoveMessageListeners()", return);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- NeanderArray listeners(isolate->factory()->message_listeners());
- for (int i = 0; i < listeners.length(); i++) {
- if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
-
- NeanderObject listener(i::JSObject::cast(listeners.get(i)));
- i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
- if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
- listeners.set(i, isolate->heap()->undefined_value());
- }
- }
-}
-
-
-void V8::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
- i::Isolate::Current()->SetCaptureStackTraceForUncaughtExceptions(
- capture,
- frame_limit,
- options);
-}
-
-
-void V8::SetCounterFunction(CounterLookupCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCounterFunction()")) return;
- isolate->stats_table()->SetCounterFunction(callback);
-}
-
-void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
- isolate->stats_table()->SetCreateHistogramFunction(callback);
- isolate->InitializeLoggingAndCounters();
- isolate->counters()->ResetHistograms();
-}
-
-void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return;
- isolate->stats_table()->
- SetAddHistogramSampleFunction(callback);
-}
-
-void V8::SetFailedAccessCheckCallbackFunction(
- FailedAccessCheckCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetFailedAccessCheckCallbackFunction()")) {
- return;
- }
- isolate->SetFailedAccessCheckCallback(callback);
-}
-
-
-void V8::SetUserObjectComparisonCallbackFunction(
- UserObjectComparisonCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::V8::SetUserObjectComparisonCallbackFunction()")) {
- return;
- }
- isolate->SetUserObjectComparisonCallback(callback);
-}
-
-
-void V8::AddObjectGroup(Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddObjectGroup(
- reinterpret_cast<i::Object***>(objects), length, info);
-}
-
-
-void V8::AddObjectGroup(Isolate* exported_isolate,
- Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
- ASSERT(isolate == i::Isolate::Current());
- if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddObjectGroup(
- reinterpret_cast<i::Object***>(objects), length, info);
-}
-
-
-void V8::AddImplicitReferences(Persistent<Object> parent,
- Persistent<Value>* children,
- size_t length) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddImplicitReferences(
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(),
- reinterpret_cast<i::Object***>(children), length);
-}
-
-
-intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() ||
- IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
- return 0;
- }
- return isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
- change_in_bytes);
-}
-
-
-void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
- isolate->heap()->SetGlobalGCPrologueCallback(callback);
-}
-
-
-void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCEpilogueCallback()")) return;
- isolate->heap()->SetGlobalGCEpilogueCallback(callback);
-}
-
-
-void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCPrologueCallback()")) return;
- isolate->heap()->AddGCPrologueCallback(callback, gc_type);
-}
-
-
-void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCPrologueCallback()")) return;
- isolate->heap()->RemoveGCPrologueCallback(callback);
-}
-
-
-void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCEpilogueCallback()")) return;
- isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
-}
-
-
-void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCEpilogueCallback()")) return;
- isolate->heap()->RemoveGCEpilogueCallback(callback);
-}
-
-
-void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddMemoryAllocationCallback()")) return;
- isolate->memory_allocator()->AddMemoryAllocationCallback(
- callback, space, action);
-}
-
-
-void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveMemoryAllocationCallback()")) return;
- isolate->memory_allocator()->RemoveMemoryAllocationCallback(
- callback);
-}
-
-
-void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
- if (callback == NULL) return;
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddLeaveScriptCallback()")) return;
- i::V8::AddCallCompletedCallback(callback);
-}
-
-
-void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveLeaveScriptCallback()")) return;
- i::V8::RemoveCallCompletedCallback(callback);
-}
-
-
-void V8::PauseProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->PauseProfiler();
-}
-
-
-void V8::ResumeProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->ResumeProfiler();
-}
-
-
-bool V8::IsProfilerPaused() {
- i::Isolate* isolate = i::Isolate::Current();
- return isolate->logger()->IsProfilerPaused();
-}
-
-
-int V8::GetCurrentThreadId() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "V8::GetCurrentThreadId()");
- return isolate->thread_id().ToInteger();
-}
-
-
-void V8::TerminateExecution(int thread_id) {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
- API_ENTRY_CHECK(isolate, "V8::TerminateExecution()");
- // If the thread_id identifies the current thread just terminate
- // execution right away. Otherwise, ask the thread manager to
- // terminate the thread with the given id if any.
- i::ThreadId internal_tid = i::ThreadId::FromInteger(thread_id);
- if (isolate->thread_id().Equals(internal_tid)) {
- isolate->stack_guard()->TerminateExecution();
- } else {
- isolate->thread_manager()->TerminateExecution(internal_tid);
- }
-}
-
-
-void V8::TerminateExecution(Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->TerminateExecution();
- } else {
- i::Isolate::GetDefaultIsolateStackGuard()->TerminateExecution();
- }
-}
-
-
-bool V8::IsExecutionTerminating(Isolate* isolate) {
- i::Isolate* i_isolate = isolate != NULL ?
- reinterpret_cast<i::Isolate*>(isolate) : i::Isolate::Current();
- return IsExecutionTerminatingCheck(i_isolate);
-}
-
-
-Isolate* Isolate::GetCurrent() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- return reinterpret_cast<Isolate*>(isolate);
-}
-
-
-Isolate* Isolate::New() {
- i::Isolate* isolate = new i::Isolate();
- return reinterpret_cast<Isolate*>(isolate);
-}
-
-
-void Isolate::Dispose() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- if (!ApiCheck(!isolate->IsInUse(),
- "v8::Isolate::Dispose()",
- "Disposing the isolate that is entered by a thread.")) {
- return;
- }
- isolate->TearDown();
-}
-
-
-void Isolate::Enter() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->Enter();
-}
-
-
-void Isolate::Exit() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->Exit();
-}
-
-
-void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- i::Heap* heap = isolate->heap();
- heap_statistics->total_heap_size_ = heap->CommittedMemory();
- heap_statistics->total_heap_size_executable_ =
- heap->CommittedMemoryExecutable();
- heap_statistics->total_physical_size_ = heap->CommittedPhysicalMemory();
- heap_statistics->used_heap_size_ = heap->SizeOfObjects();
- heap_statistics->heap_size_limit_ = heap->MaxReserved();
-}
-
-
-String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
- : str_(NULL), length_(0) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Utf8Value::Utf8Value()")) return;
- if (obj.IsEmpty()) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString();
- if (str.IsEmpty()) return;
- i::Handle<i::String> i_str = Utils::OpenHandle(*str);
- length_ = v8::Utf8Length(*i_str, isolate);
- str_ = i::NewArray<char>(length_ + 1);
- str->WriteUtf8(str_);
-}
-
-
-String::Utf8Value::~Utf8Value() {
- i::DeleteArray(str_);
-}
-
-
-String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
- : str_(NULL), length_(0) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::AsciiValue::AsciiValue()")) return;
- if (obj.IsEmpty()) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString();
- if (str.IsEmpty()) return;
- length_ = str->Length();
- str_ = i::NewArray<char>(length_ + 1);
- str->WriteAscii(str_);
-}
-
-
-String::AsciiValue::~AsciiValue() {
- i::DeleteArray(str_);
-}
-
-
-String::Value::Value(v8::Handle<v8::Value> obj)
- : str_(NULL), length_(0) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Value::Value()")) return;
- if (obj.IsEmpty()) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString();
- if (str.IsEmpty()) return;
- length_ = str->Length();
- str_ = i::NewArray<uint16_t>(length_ + 1);
- str->Write(str_);
-}
-
-
-String::Value::~Value() {
- i::DeleteArray(str_);
-}
-
-Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "RangeError");
- ON_BAILOUT(isolate, "v8::Exception::RangeError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewRangeError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
-
-Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "ReferenceError");
- ON_BAILOUT(isolate, "v8::Exception::ReferenceError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result =
- isolate->factory()->NewReferenceError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
-
-Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "SyntaxError");
- ON_BAILOUT(isolate, "v8::Exception::SyntaxError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
-
-Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "TypeError");
- ON_BAILOUT(isolate, "v8::Exception::TypeError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewTypeError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
-
-Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "Error");
- ON_BAILOUT(isolate, "v8::Exception::Error()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
-
-
-// --- D e b u g S u p p o r t ---
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->debug_event_callback() != NULL) {
- isolate->debug_event_callback()(event_details.GetEvent(),
- event_details.GetExecutionState(),
- event_details.GetEventData(),
- event_details.GetCallbackData());
- }
-}
-
-
-bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8(isolate);
-
- isolate->set_debug_event_callback(that);
-
- i::HandleScope scope(isolate);
- i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
- if (that != NULL) {
- foreign =
- isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
- }
- isolate->debugger()->SetEventListener(foreign,
- Utils::OpenHandle(*data, true));
- return true;
-}
-
-
-bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener2()");
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener2()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
- if (that != NULL) {
- foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
- }
- isolate->debugger()->SetEventListener(foreign,
- Utils::OpenHandle(*data, true));
- return true;
-}
-
-
-bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
- Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8(isolate);
- isolate->debugger()->SetEventListener(Utils::OpenHandle(*that),
- Utils::OpenHandle(*data, true));
- return true;
-}
-
-
-void Debug::DebugBreak(Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->DebugBreak();
- } else {
- i::Isolate::GetDefaultIsolateStackGuard()->DebugBreak();
- }
-}
-
-
-void Debug::CancelDebugBreak(Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->stack_guard()->Continue(i::DEBUGBREAK);
- } else {
- i::Isolate::GetDefaultIsolateStackGuard()->Continue(i::DEBUGBREAK);
- }
-}
-
-
-void Debug::DebugBreakForCommand(ClientData* data, Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debugger()->EnqueueDebugCommand(data);
- } else {
- i::Isolate::GetDefaultIsolateDebugger()->EnqueueDebugCommand(data);
- }
-}
-
-
-static void MessageHandlerWrapper(const v8::Debug::Message& message) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->message_handler()) {
- v8::String::Value json(message.GetJSON());
- (isolate->message_handler())(*json, json.length(), message.GetClientData());
- }
-}
-
-
-void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
- bool message_handler_thread) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
- ENTER_V8(isolate);
-
- // Message handler thread not supported any more. Parameter temporally left in
- // the API for client compatibility reasons.
- CHECK(!message_handler_thread);
-
- // TODO(sgjesse) support the old message handler API through a simple wrapper.
- isolate->set_message_handler(handler);
- if (handler != NULL) {
- isolate->debugger()->SetMessageHandler(MessageHandlerWrapper);
- } else {
- isolate->debugger()->SetMessageHandler(NULL);
- }
-}
-
-
-void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
- ENTER_V8(isolate);
- isolate->debugger()->SetMessageHandler(handler);
-}
-
-
-void Debug::SendCommand(const uint16_t* command, int length,
- ClientData* client_data,
- Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debugger()->ProcessCommand(
- i::Vector<const uint16_t>(command, length), client_data);
- } else {
- i::Isolate::GetDefaultIsolateDebugger()->ProcessCommand(
- i::Vector<const uint16_t>(command, length), client_data);
- }
-}
-
-
-void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
- int period) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetHostDispatchHandler");
- ENTER_V8(isolate);
- isolate->debugger()->SetHostDispatchHandler(handler, period);
-}
-
-
-void Debug::SetDebugMessageDispatchHandler(
- DebugMessageDispatchHandler handler, bool provide_locker) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate,
- "v8::Debug::SetDebugMessageDispatchHandler");
- ENTER_V8(isolate);
- isolate->debugger()->SetDebugMessageDispatchHandler(
- handler, provide_locker);
-}
-
-
-Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
- v8::Handle<v8::Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return Local<Value>();
- ON_BAILOUT(isolate, "v8::Debug::Call()", return Local<Value>());
- ENTER_V8(isolate);
- i::Handle<i::Object> result;
- EXCEPTION_PREAMBLE(isolate);
- if (data.IsEmpty()) {
- result = isolate->debugger()->Call(Utils::OpenHandle(*fun),
- isolate->factory()->undefined_value(),
- &has_pending_exception);
- } else {
- result = isolate->debugger()->Call(Utils::OpenHandle(*fun),
- Utils::OpenHandle(*data),
- &has_pending_exception);
- }
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return Local<Value>();
- ON_BAILOUT(isolate, "v8::Debug::GetMirror()", return Local<Value>());
- ENTER_V8(isolate);
- v8::HandleScope scope;
- i::Debug* isolate_debug = isolate->debug();
- isolate_debug->Load();
- i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
- i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("MakeMirror"));
- i::Handle<i::Object> fun_obj = i::GetProperty(isolate, debug, name);
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
- v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
- const int kArgc = 1;
- v8::Handle<v8::Value> argv[kArgc] = { obj };
- EXCEPTION_PREAMBLE(isolate);
- v8::Handle<v8::Value> result = v8_fun->Call(Utils::ToLocal(debug),
- kArgc,
- argv);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return scope.Close(result);
-}
-
-
-bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
- return i::Isolate::Current()->debugger()->StartAgent(name, port,
- wait_for_connection);
-}
-
-
-void Debug::DisableAgent() {
- return i::Isolate::Current()->debugger()->StopAgent();
-}
-
-
-void Debug::ProcessDebugMessages() {
- i::Execution::ProcessDebugMessages(true);
-}
-
-
-Local<Context> Debug::GetDebugContext() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()");
- ENTER_V8(isolate);
- return Utils::ToLocal(i::Isolate::Current()->debugger()->GetDebugContext());
-}
-
-
-void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- i::Debugger* debugger;
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- debugger = internal_isolate->debugger();
- } else {
- debugger = i::Isolate::GetDefaultIsolateDebugger();
- }
- debugger->set_live_edit_enabled(enable);
-}
-
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-Handle<String> CpuProfileNode::GetFunctionName() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
- const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- const i::CodeEntry* entry = node->entry();
- if (!entry->has_name_prefix()) {
- return Handle<String>(ToApi<String>(
- isolate->factory()->InternalizeUtf8String(entry->name())));
- } else {
- return Handle<String>(ToApi<String>(isolate->factory()->NewConsString(
- isolate->factory()->InternalizeUtf8String(entry->name_prefix()),
- isolate->factory()->InternalizeUtf8String(entry->name()))));
- }
-}
-
-
-Handle<String> CpuProfileNode::GetScriptResourceName() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
- const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
- node->entry()->resource_name())));
-}
-
-
-int CpuProfileNode::GetLineNumber() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetLineNumber");
- return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
-}
-
-
-double CpuProfileNode::GetTotalTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
-}
-
-
-double CpuProfileNode::GetSelfTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
-}
-
-
-double CpuProfileNode::GetTotalSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalSamplesCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
-}
-
-
-double CpuProfileNode::GetSelfSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
-}
-
-
-unsigned CpuProfileNode::GetCallUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetCallUid");
- return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
-}
-
-
-int CpuProfileNode::GetChildrenCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
-}
-
-
-const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetChild");
- const i::ProfileNode* child =
- reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
- return reinterpret_cast<const CpuProfileNode*>(child);
-}
-
-
-void CpuProfile::Delete() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::Delete");
- i::CpuProfiler::DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
- if (i::CpuProfiler::GetProfilesCount() == 0 &&
- !i::CpuProfiler::HasDetachedProfiles()) {
- // If this was the last profile, clean up all accessory data as well.
- i::CpuProfiler::DeleteAllProfiles();
- }
-}
-
-
-unsigned CpuProfile::GetUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetUid");
- return reinterpret_cast<const i::CpuProfile*>(this)->uid();
-}
-
-
-Handle<String> CpuProfile::GetTitle() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
- const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
- profile->title())));
-}
-
-
-const CpuProfileNode* CpuProfile::GetBottomUpRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetBottomUpRoot");
- const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root());
-}
-
-
-const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot");
- const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
-}
-
-
-int CpuProfiler::GetProfilesCount() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount");
- return i::CpuProfiler::GetProfilesCount();
-}
-
-
-const CpuProfile* CpuProfiler::GetProfile(int index,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile");
- return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::GetProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- index));
-}
-
-
-const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile");
- return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::FindProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- uid));
-}
-
-
-void CpuProfiler::StartProfiling(Handle<String> title) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling");
- i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title));
-}
-
-
-const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling");
- return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::StopProfiling(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- *Utils::OpenHandle(*title)));
-}
-
-
-void CpuProfiler::DeleteAllProfiles() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
- i::CpuProfiler::DeleteAllProfiles();
-}
-
-
-static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
- return const_cast<i::HeapGraphEdge*>(
- reinterpret_cast<const i::HeapGraphEdge*>(edge));
-}
-
-
-HeapGraphEdge::Type HeapGraphEdge::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
- return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
-}
-
-
-Handle<Value> HeapGraphEdge::GetName() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName");
- i::HeapGraphEdge* edge = ToInternal(this);
- switch (edge->type()) {
- case i::HeapGraphEdge::kContextVariable:
- case i::HeapGraphEdge::kInternal:
- case i::HeapGraphEdge::kProperty:
- case i::HeapGraphEdge::kShortcut:
- return Handle<String>(ToApi<String>(
- isolate->factory()->InternalizeUtf8String(edge->name())));
- case i::HeapGraphEdge::kElement:
- case i::HeapGraphEdge::kHidden:
- return Handle<Number>(ToApi<Number>(
- isolate->factory()->NewNumberFromInt(edge->index())));
- default: UNREACHABLE();
- }
- return v8::Undefined();
-}
-
-
-const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
- const i::HeapEntry* from = ToInternal(this)->from();
- return reinterpret_cast<const HeapGraphNode*>(from);
-}
-
-
-const HeapGraphNode* HeapGraphEdge::GetToNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode");
- const i::HeapEntry* to = ToInternal(this)->to();
- return reinterpret_cast<const HeapGraphNode*>(to);
-}
-
-
-static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
- return const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(entry));
-}
-
-
-HeapGraphNode::Type HeapGraphNode::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetType");
- return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
-}
-
-
-Handle<String> HeapGraphNode::GetName() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
- return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
- ToInternal(this)->name())));
-}
-
-
-SnapshotObjectId HeapGraphNode::GetId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
- return ToInternal(this)->id();
-}
-
-
-int HeapGraphNode::GetSelfSize() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
- return ToInternal(this)->self_size();
-}
-
-
-int HeapGraphNode::GetChildrenCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
- return ToInternal(this)->children().length();
-}
-
-
-const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
- return reinterpret_cast<const HeapGraphEdge*>(
- ToInternal(this)->children()[index]);
-}
-
-
-v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
- i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
- return v8::Handle<Value>(!object.is_null() ?
- ToApi<Value>(object) : ToApi<Value>(
- isolate->factory()->undefined_value()));
-}
-
-
-static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
- return const_cast<i::HeapSnapshot*>(
- reinterpret_cast<const i::HeapSnapshot*>(snapshot));
-}
-
-
-void HeapSnapshot::Delete() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Delete");
- if (i::HeapProfiler::GetSnapshotsCount() > 1) {
- ToInternal(this)->Delete();
- } else {
- // If this is the last snapshot, clean up all accessory data as well.
- i::HeapProfiler::DeleteAllSnapshots();
- }
-}
-
-
-HeapSnapshot::Type HeapSnapshot::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetType");
- return static_cast<HeapSnapshot::Type>(ToInternal(this)->type());
-}
-
-
-unsigned HeapSnapshot::GetUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
- return ToInternal(this)->uid();
-}
-
-
-Handle<String> HeapSnapshot::GetTitle() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
- return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
- ToInternal(this)->title())));
-}
-
-
-const HeapGraphNode* HeapSnapshot::GetRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead");
- return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
-}
-
-
-const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
- return reinterpret_cast<const HeapGraphNode*>(
- ToInternal(this)->GetEntryById(id));
-}
-
-
-int HeapSnapshot::GetNodesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
- return ToInternal(this)->entries().length();
-}
-
-
-const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
- return reinterpret_cast<const HeapGraphNode*>(
- &ToInternal(this)->entries().at(index));
-}
-
-
-SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId");
- return ToInternal(this)->max_snapshot_js_object_id();
-}
-
-
-void HeapSnapshot::Serialize(OutputStream* stream,
- HeapSnapshot::SerializationFormat format) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize");
- ApiCheck(format == kJSON,
- "v8::HeapSnapshot::Serialize",
- "Unknown serialization format");
- ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
- "v8::HeapSnapshot::Serialize",
- "Unsupported output encoding");
- ApiCheck(stream->GetChunkSize() > 0,
- "v8::HeapSnapshot::Serialize",
- "Invalid stream chunk size");
- i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
- serializer.Serialize(stream);
-}
-
-
-int HeapProfiler::GetSnapshotsCount() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount");
- return i::HeapProfiler::GetSnapshotsCount();
-}
-
-
-const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot");
- return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::GetSnapshot(index));
-}
-
-
-const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot");
- return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::FindSnapshot(uid));
-}
-
-
-SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId");
- i::Handle<i::Object> obj = Utils::OpenHandle(*value);
- return i::HeapProfiler::GetSnapshotObjectId(obj);
-}
-
-
-const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
- HeapSnapshot::Type type,
- ActivityControl* control,
- ObjectNameResolver* resolver) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
- i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
- switch (type) {
- case HeapSnapshot::kFull:
- internal_type = i::HeapSnapshot::kFull;
- break;
- default:
- UNREACHABLE();
- }
- return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::TakeSnapshot(
- *Utils::OpenHandle(*title), internal_type, control, resolver));
-}
-
-
-void HeapProfiler::StartHeapObjectsTracking() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking");
- i::HeapProfiler::StartHeapObjectsTracking();
-}
-
-
-void HeapProfiler::StopHeapObjectsTracking() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking");
- i::HeapProfiler::StopHeapObjectsTracking();
-}
-
-
-SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
- return i::HeapProfiler::PushHeapObjectsStats(stream);
-}
-
-
-void HeapProfiler::DeleteAllSnapshots() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
- i::HeapProfiler::DeleteAllSnapshots();
-}
-
-
-void HeapProfiler::DefineWrapperClass(uint16_t class_id,
- WrapperInfoCallback callback) {
- i::Isolate::Current()->heap_profiler()->DefineWrapperClass(class_id,
- callback);
-}
-
-
-int HeapProfiler::GetPersistentHandleCount() {
- i::Isolate* isolate = i::Isolate::Current();
- return isolate->global_handles()->NumberOfGlobalHandles();
-}
-
-
-size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
- return i::HeapProfiler::GetMemorySizeUsedByProfiler();
-}
-
-
-v8::Testing::StressType internal::Testing::stress_type_ =
- v8::Testing::kStressTypeOpt;
-
-
-void Testing::SetStressRunType(Testing::StressType type) {
- internal::Testing::set_stress_type(type);
-}
-
-int Testing::GetStressRuns() {
- if (internal::FLAG_stress_runs != 0) return internal::FLAG_stress_runs;
-#ifdef DEBUG
- // In debug mode the code runs much slower so stressing will only make two
- // runs.
- return 2;
-#else
- return 5;
-#endif
-}
-
-
-static void SetFlagsFromString(const char* flags) {
- V8::SetFlagsFromString(flags, i::StrLength(flags));
-}
-
-
-void Testing::PrepareStressRun(int run) {
- static const char* kLazyOptimizations =
- "--prepare-always-opt "
- "--max-inlined-source-size=999999 "
- "--max-inlined-nodes=999999 "
- "--max-inlined-nodes-cumulative=999999 "
- "--noalways-opt";
- static const char* kForcedOptimizations = "--always-opt";
-
- // If deoptimization stressed turn on frequent deoptimization. If no value
- // is spefified through --deopt-every-n-times use a default default value.
- static const char* kDeoptEvery13Times = "--deopt-every-n-times=13";
- if (internal::Testing::stress_type() == Testing::kStressTypeDeopt &&
- internal::FLAG_deopt_every_n_times == 0) {
- SetFlagsFromString(kDeoptEvery13Times);
- }
-
-#ifdef DEBUG
- // As stressing in debug mode only make two runs skip the deopt stressing
- // here.
- if (run == GetStressRuns() - 1) {
- SetFlagsFromString(kForcedOptimizations);
- } else {
- SetFlagsFromString(kLazyOptimizations);
- }
-#else
- if (run == GetStressRuns() - 1) {
- SetFlagsFromString(kForcedOptimizations);
- } else if (run != GetStressRuns() - 2) {
- SetFlagsFromString(kLazyOptimizations);
- }
-#endif
-}
-
-
-// TODO(svenpanne) Deprecate this.
-void Testing::DeoptimizeAll() {
- i::Isolate* isolate = i::Isolate::Current();
- i::HandleScope scope(isolate);
- internal::Deoptimizer::DeoptimizeAll();
-}
-
-
-namespace internal {
-
-
-void HandleScopeImplementer::FreeThreadResources() {
- Free();
-}
-
-
-char* HandleScopeImplementer::ArchiveThread(char* storage) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- handle_scope_data_ = *current;
- memcpy(storage, this, sizeof(*this));
-
- ResetAfterArchive();
- current->Initialize();
-
- return storage + ArchiveSpacePerThread();
-}
-
-
-int HandleScopeImplementer::ArchiveSpacePerThread() {
- return sizeof(HandleScopeImplementer);
-}
-
-
-char* HandleScopeImplementer::RestoreThread(char* storage) {
- memcpy(this, storage, sizeof(*this));
- *isolate_->handle_scope_data() = handle_scope_data_;
- return storage + ArchiveSpacePerThread();
-}
-
-
-void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
-#ifdef DEBUG
- bool found_block_before_deferred = false;
-#endif
- // Iterate over all handles in the blocks except for the last.
- for (int i = blocks()->length() - 2; i >= 0; --i) {
- Object** block = blocks()->at(i);
- if (last_handle_before_deferred_block_ != NULL &&
- (last_handle_before_deferred_block_ < &block[kHandleBlockSize]) &&
- (last_handle_before_deferred_block_ >= block)) {
- v->VisitPointers(block, last_handle_before_deferred_block_);
- ASSERT(!found_block_before_deferred);
-#ifdef DEBUG
- found_block_before_deferred = true;
-#endif
- } else {
- v->VisitPointers(block, &block[kHandleBlockSize]);
- }
- }
-
- ASSERT(last_handle_before_deferred_block_ == NULL ||
- found_block_before_deferred);
-
- // Iterate over live handles in the last block (if any).
- if (!blocks()->is_empty()) {
- v->VisitPointers(blocks()->last(), handle_scope_data_.next);
- }
-
- if (!saved_contexts_.is_empty()) {
- Object** start = reinterpret_cast<Object**>(&saved_contexts_.first());
- v->VisitPointers(start, start + saved_contexts_.length());
- }
-}
-
-
-void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- handle_scope_data_ = *current;
- IterateThis(v);
-}
-
-
-char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
- HandleScopeImplementer* scope_implementer =
- reinterpret_cast<HandleScopeImplementer*>(storage);
- scope_implementer->IterateThis(v);
- return storage + ArchiveSpacePerThread();
-}
-
-
-DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
- DeferredHandles* deferred =
- new DeferredHandles(isolate()->handle_scope_data()->next, isolate());
-
- while (!blocks_.is_empty()) {
- Object** block_start = blocks_.last();
- Object** block_limit = &block_start[kHandleBlockSize];
- // We should not need to check for NoHandleAllocation here. Assert
- // this.
- ASSERT(prev_limit == block_limit ||
- !(block_start <= prev_limit && prev_limit <= block_limit));
- if (prev_limit == block_limit) break;
- deferred->blocks_.Add(blocks_.last());
- blocks_.RemoveLast();
- }
-
- // deferred->blocks_ now contains the blocks installed on the
- // HandleScope stack since BeginDeferredScope was called, but in
- // reverse order.
-
- ASSERT(prev_limit == NULL || !blocks_.is_empty());
-
- ASSERT(!blocks_.is_empty() && prev_limit != NULL);
- ASSERT(last_handle_before_deferred_block_ != NULL);
- last_handle_before_deferred_block_ = NULL;
- return deferred;
-}
-
-
-void HandleScopeImplementer::BeginDeferredScope() {
- ASSERT(last_handle_before_deferred_block_ == NULL);
- last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next;
-}
-
-
-DeferredHandles::~DeferredHandles() {
- isolate_->UnlinkDeferredHandles(this);
-
- for (int i = 0; i < blocks_.length(); i++) {
-#ifdef DEBUG
- HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
-#endif
- isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]);
- }
-}
-
-
-void DeferredHandles::Iterate(ObjectVisitor* v) {
- ASSERT(!blocks_.is_empty());
-
- ASSERT((first_block_limit_ >= blocks_.first()) &&
- (first_block_limit_ <= &(blocks_.first())[kHandleBlockSize]));
-
- v->VisitPointers(blocks_.first(), first_block_limit_);
-
- for (int i = 1; i < blocks_.length(); i++) {
- v->VisitPointers(blocks_[i], &blocks_[i][kHandleBlockSize]);
- }
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/api.h b/src/3rdparty/v8/src/api.h
deleted file mode 100644
index ca2240b..0000000
--- a/src/3rdparty/v8/src/api.h
+++ /dev/null
@@ -1,594 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_API_H_
-#define V8_API_H_
-
-#include "v8.h"
-
-#include "../include/v8-testing.h"
-#include "apiutils.h"
-#include "contexts.h"
-#include "factory.h"
-#include "isolate.h"
-#include "list-inl.h"
-
-namespace v8 {
-
-// Constants used in the implementation of the API. The most natural thing
-// would usually be to place these with the classes that use them, but
-// we want to keep them out of v8.h because it is an externally
-// visible file.
-class Consts {
- public:
- enum TemplateType {
- FUNCTION_TEMPLATE = 0,
- OBJECT_TEMPLATE = 1
- };
-};
-
-
-// Utilities for working with neander-objects, primitive
-// env-independent JSObjects used by the api.
-class NeanderObject {
- public:
- explicit NeanderObject(int size);
- explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
- explicit inline NeanderObject(v8::internal::Object* obj);
- inline v8::internal::Object* get(int index);
- inline void set(int index, v8::internal::Object* value);
- inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
- int size();
- private:
- v8::internal::Handle<v8::internal::JSObject> value_;
-};
-
-
-// Utilities for working with neander-arrays, a simple extensible
-// array abstraction built on neander-objects.
-class NeanderArray {
- public:
- NeanderArray();
- explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
- inline v8::internal::Handle<v8::internal::JSObject> value() {
- return obj_.value();
- }
-
- void add(v8::internal::Handle<v8::internal::Object> value);
-
- int length();
-
- v8::internal::Object* get(int index);
- // Change the value at an index to undefined value. If the index is
- // out of bounds, the request is ignored. Returns the old value.
- void set(int index, v8::internal::Object* value);
- private:
- NeanderObject obj_;
-};
-
-
-NeanderObject::NeanderObject(v8::internal::Handle<v8::internal::Object> obj)
- : value_(v8::internal::Handle<v8::internal::JSObject>::cast(obj)) { }
-
-
-NeanderObject::NeanderObject(v8::internal::Object* obj)
- : value_(v8::internal::Handle<v8::internal::JSObject>(
- v8::internal::JSObject::cast(obj))) { }
-
-
-NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
- : obj_(obj) { }
-
-
-v8::internal::Object* NeanderObject::get(int offset) {
- ASSERT(value()->HasFastObjectElements());
- return v8::internal::FixedArray::cast(value()->elements())->get(offset);
-}
-
-
-void NeanderObject::set(int offset, v8::internal::Object* value) {
- ASSERT(value_->HasFastObjectElements());
- v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
-}
-
-
-template <typename T> inline T ToCData(v8::internal::Object* obj) {
- STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- return reinterpret_cast<T>(
- reinterpret_cast<intptr_t>(
- v8::internal::Foreign::cast(obj)->foreign_address()));
-}
-
-
-template <typename T>
-inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
- STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- return FACTORY->NewForeign(
- reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
-}
-
-
-class ApiFunction {
- public:
- explicit ApiFunction(v8::internal::Address addr) : addr_(addr) { }
- v8::internal::Address address() { return addr_; }
- private:
- v8::internal::Address addr_;
-};
-
-
-
-class RegisteredExtension {
- public:
- explicit RegisteredExtension(Extension* extension);
- static void Register(RegisteredExtension* that);
- static void UnregisterAll();
- Extension* extension() { return extension_; }
- RegisteredExtension* next() { return next_; }
- RegisteredExtension* next_auto() { return next_auto_; }
- static RegisteredExtension* first_extension() { return first_extension_; }
- private:
- Extension* extension_;
- RegisteredExtension* next_;
- RegisteredExtension* next_auto_;
- static RegisteredExtension* first_extension_;
-};
-
-
-#define OPEN_HANDLE_LIST(V) \
- V(Template, TemplateInfo) \
- V(FunctionTemplate, FunctionTemplateInfo) \
- V(ObjectTemplate, ObjectTemplateInfo) \
- V(Signature, SignatureInfo) \
- V(AccessorSignature, FunctionTemplateInfo) \
- V(TypeSwitch, TypeSwitchInfo) \
- V(Data, Object) \
- V(RegExp, JSRegExp) \
- V(Object, JSObject) \
- V(Array, JSArray) \
- V(String, String) \
- V(Script, Object) \
- V(Function, JSFunction) \
- V(Message, JSObject) \
- V(Context, Context) \
- V(External, Foreign) \
- V(StackTrace, JSArray) \
- V(StackFrame, JSObject)
-
-
-class Utils {
- public:
- static bool ReportApiFailure(const char* location, const char* message);
-
- static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
- static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
-
- static inline Local<Context> ToLocal(
- v8::internal::Handle<v8::internal::Context> obj);
- static inline Local<Value> ToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Function> ToLocal(
- v8::internal::Handle<v8::internal::JSFunction> obj);
- static inline Local<String> ToLocal(
- v8::internal::Handle<v8::internal::String> obj);
- static inline Local<RegExp> ToLocal(
- v8::internal::Handle<v8::internal::JSRegExp> obj);
- static inline Local<Object> ToLocal(
- v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<Array> ToLocal(
- v8::internal::Handle<v8::internal::JSArray> obj);
- static inline Local<Message> MessageToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<StackTrace> StackTraceToLocal(
- v8::internal::Handle<v8::internal::JSArray> obj);
- static inline Local<StackFrame> StackFrameToLocal(
- v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<Number> NumberToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Integer> IntegerToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Uint32> Uint32ToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<FunctionTemplate> ToLocal(
- v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
- static inline Local<ObjectTemplate> ToLocal(
- v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
- static inline Local<Signature> ToLocal(
- v8::internal::Handle<v8::internal::SignatureInfo> obj);
- static inline Local<AccessorSignature> AccessorSignatureToLocal(
- v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
- static inline Local<TypeSwitch> ToLocal(
- v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
- static inline Local<External> ExternalToLocal(
- v8::internal::Handle<v8::internal::JSObject> obj);
-
-#define DECLARE_OPEN_HANDLE(From, To) \
- static inline v8::internal::Handle<v8::internal::To> \
- OpenHandle(const From* that, bool allow_empty_handle = false);
-
-OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
-
-#undef DECLARE_OPEN_HANDLE
-};
-
-
-template <class T>
-inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
- return reinterpret_cast<T*>(obj.location());
-}
-
-
-template <class T>
-v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
- v8::HandleScope* scope) {
- v8::internal::Handle<T> handle;
- if (!is_null()) {
- handle = *this;
- }
- return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)), true);
-}
-
-
-// Implementations of ToLocal
-
-#define MAKE_TO_LOCAL(Name, From, To) \
- Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
- ASSERT(obj.is_null() || !obj->IsTheHole()); \
- return Local<To>(reinterpret_cast<To*>(obj.location())); \
- }
-
-MAKE_TO_LOCAL(ToLocal, Context, Context)
-MAKE_TO_LOCAL(ToLocal, Object, Value)
-MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
-MAKE_TO_LOCAL(ToLocal, String, String)
-MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
-MAKE_TO_LOCAL(ToLocal, JSObject, Object)
-MAKE_TO_LOCAL(ToLocal, JSArray, Array)
-MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
-MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
-MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
-MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
-MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
-MAKE_TO_LOCAL(MessageToLocal, Object, Message)
-MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
-MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
-MAKE_TO_LOCAL(NumberToLocal, Object, Number)
-MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
-MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
-MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
-
-#undef MAKE_TO_LOCAL
-
-
-// Implementations of OpenHandle
-
-#define MAKE_OPEN_HANDLE(From, To) \
- v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
- const v8::From* that, bool allow_empty_handle) { \
- EXTRA_CHECK(allow_empty_handle || that != NULL); \
- return v8::internal::Handle<v8::internal::To>( \
- reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
- }
-
-OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
-
-#undef MAKE_OPEN_HANDLE
-#undef OPEN_HANDLE_LIST
-
-
-namespace internal {
-
-// Tracks string usage to help make better decisions when
-// externalizing strings.
-//
-// Implementation note: internally this class only tracks fresh
-// strings and keeps a single use counter for them.
-class StringTracker {
- public:
- // Records that the given string's characters were copied to some
- // external buffer. If this happens often we should honor
- // externalization requests for the string.
- void RecordWrite(Handle<String> string) {
- Address address = reinterpret_cast<Address>(*string);
- Address top = isolate_->heap()->NewSpaceTop();
- if (IsFreshString(address, top)) {
- IncrementUseCount(top);
- }
- }
-
- // Estimates freshness and use frequency of the given string based
- // on how close it is to the new space top and the recorded usage
- // history.
- inline bool IsFreshUnusedString(Handle<String> string) {
- Address address = reinterpret_cast<Address>(*string);
- Address top = isolate_->heap()->NewSpaceTop();
- return IsFreshString(address, top) && IsUseCountLow(top);
- }
-
- private:
- StringTracker() : use_count_(0), last_top_(NULL), isolate_(NULL) { }
-
- static inline bool IsFreshString(Address string, Address top) {
- return top - kFreshnessLimit <= string && string <= top;
- }
-
- inline bool IsUseCountLow(Address top) {
- if (last_top_ != top) return true;
- return use_count_ < kUseLimit;
- }
-
- inline void IncrementUseCount(Address top) {
- if (last_top_ != top) {
- use_count_ = 0;
- last_top_ = top;
- }
- ++use_count_;
- }
-
- // Single use counter shared by all fresh strings.
- int use_count_;
-
- // Last new space top when the use count above was valid.
- Address last_top_;
-
- Isolate* isolate_;
-
- // How close to the new space top a fresh string has to be.
- static const int kFreshnessLimit = 1024;
-
- // The number of uses required to consider a string useful.
- static const int kUseLimit = 32;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(StringTracker);
-};
-
-
-class DeferredHandles {
- public:
- ~DeferredHandles();
-
- private:
- DeferredHandles(Object** first_block_limit, Isolate* isolate)
- : next_(NULL),
- previous_(NULL),
- first_block_limit_(first_block_limit),
- isolate_(isolate) {
- isolate->LinkDeferredHandles(this);
- }
-
- void Iterate(ObjectVisitor* v);
-
- List<Object**> blocks_;
- DeferredHandles* next_;
- DeferredHandles* previous_;
- Object** first_block_limit_;
- Isolate* isolate_;
-
- friend class HandleScopeImplementer;
- friend class Isolate;
-};
-
-
-// This class is here in order to be able to declare it a friend of
-// HandleScope. Moving these methods to be members of HandleScope would be
-// neat in some ways, but it would expose internal implementation details in
-// our public header file, which is undesirable.
-//
-// An isolate has a single instance of this class to hold the current thread's
-// data. In multithreaded V8 programs this data is copied in and out of storage
-// so that the currently executing thread always has its own copy of this
-// data.
-class HandleScopeImplementer {
- public:
- explicit HandleScopeImplementer(Isolate* isolate)
- : isolate_(isolate),
- blocks_(0),
- entered_contexts_(0),
- saved_contexts_(0),
- spare_(NULL),
- call_depth_(0),
- last_handle_before_deferred_block_(NULL) { }
-
- ~HandleScopeImplementer() {
- DeleteArray(spare_);
- }
-
- // Threading support for handle data.
- static int ArchiveSpacePerThread();
- char* RestoreThread(char* from);
- char* ArchiveThread(char* to);
- void FreeThreadResources();
-
- // Garbage collection support.
- void Iterate(v8::internal::ObjectVisitor* v);
- static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
-
-
- inline internal::Object** GetSpareOrNewBlock();
- inline void DeleteExtensions(internal::Object** prev_limit);
-
- inline void IncrementCallDepth() {call_depth_++;}
- inline void DecrementCallDepth() {call_depth_--;}
- inline bool CallDepthIsZero() { return call_depth_ == 0; }
-
- inline void EnterContext(Handle<Object> context);
- inline bool LeaveLastContext();
-
- // Returns the last entered context or an empty handle if no
- // contexts have been entered.
- inline Handle<Object> LastEnteredContext();
-
- inline void SaveContext(Context* context);
- inline Context* RestoreContext();
- inline bool HasSavedContexts();
-
- inline List<internal::Object**>* blocks() { return &blocks_; }
- Isolate* isolate() const { return isolate_; }
-
- void ReturnBlock(Object** block) {
- ASSERT(block != NULL);
- if (spare_ != NULL) DeleteArray(spare_);
- spare_ = block;
- }
-
- private:
- void ResetAfterArchive() {
- blocks_.Initialize(0);
- entered_contexts_.Initialize(0);
- saved_contexts_.Initialize(0);
- spare_ = NULL;
- last_handle_before_deferred_block_ = NULL;
- call_depth_ = 0;
- }
-
- void Free() {
- ASSERT(blocks_.length() == 0);
- ASSERT(entered_contexts_.length() == 0);
- ASSERT(saved_contexts_.length() == 0);
- blocks_.Free();
- entered_contexts_.Free();
- saved_contexts_.Free();
- if (spare_ != NULL) {
- DeleteArray(spare_);
- spare_ = NULL;
- }
- ASSERT(call_depth_ == 0);
- }
-
- void BeginDeferredScope();
- DeferredHandles* Detach(Object** prev_limit);
-
- Isolate* isolate_;
- List<internal::Object**> blocks_;
- // Used as a stack to keep track of entered contexts.
- List<Handle<Object> > entered_contexts_;
- // Used as a stack to keep track of saved contexts.
- List<Context*> saved_contexts_;
- Object** spare_;
- int call_depth_;
- Object** last_handle_before_deferred_block_;
- // This is only used for threading support.
- v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
-
- void IterateThis(ObjectVisitor* v);
- char* RestoreThreadHelper(char* from);
- char* ArchiveThreadHelper(char* to);
-
- friend class DeferredHandles;
- friend class DeferredHandleScope;
-
- DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
-};
-
-
-const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
-
-
-void HandleScopeImplementer::SaveContext(Context* context) {
- saved_contexts_.Add(context);
-}
-
-
-Context* HandleScopeImplementer::RestoreContext() {
- return saved_contexts_.RemoveLast();
-}
-
-
-bool HandleScopeImplementer::HasSavedContexts() {
- return !saved_contexts_.is_empty();
-}
-
-
-void HandleScopeImplementer::EnterContext(Handle<Object> context) {
- entered_contexts_.Add(context);
-}
-
-
-bool HandleScopeImplementer::LeaveLastContext() {
- if (entered_contexts_.is_empty()) return false;
- entered_contexts_.RemoveLast();
- return true;
-}
-
-
-Handle<Object> HandleScopeImplementer::LastEnteredContext() {
- if (entered_contexts_.is_empty()) return Handle<Object>::null();
- return entered_contexts_.last();
-}
-
-
-// If there's a spare block, use it for growing the current scope.
-internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
- internal::Object** block = (spare_ != NULL) ?
- spare_ :
- NewArray<internal::Object*>(kHandleBlockSize);
- spare_ = NULL;
- return block;
-}
-
-
-void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
- while (!blocks_.is_empty()) {
- internal::Object** block_start = blocks_.last();
- internal::Object** block_limit = block_start + kHandleBlockSize;
-#ifdef DEBUG
- // NoHandleAllocation may make the prev_limit to point inside the block.
- if (block_start <= prev_limit && prev_limit <= block_limit) break;
-#else
- if (prev_limit == block_limit) break;
-#endif
-
- blocks_.RemoveLast();
-#ifdef DEBUG
- v8::ImplementationUtilities::ZapHandleRange(block_start, block_limit);
-#endif
- if (spare_ != NULL) {
- DeleteArray(spare_);
- }
- spare_ = block_start;
- }
- ASSERT((blocks_.is_empty() && prev_limit == NULL) ||
- (!blocks_.is_empty() && prev_limit != NULL));
-}
-
-
-class Testing {
- public:
- static v8::Testing::StressType stress_type() { return stress_type_; }
- static void set_stress_type(v8::Testing::StressType stress_type) {
- stress_type_ = stress_type;
- }
-
- private:
- static v8::Testing::StressType stress_type_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_API_H_
diff --git a/src/3rdparty/v8/src/apinatives.js b/src/3rdparty/v8/src/apinatives.js
deleted file mode 100644
index 79b41dd..0000000
--- a/src/3rdparty/v8/src/apinatives.js
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file contains infrastructure used by the API. See
-// v8natives.js for an explanation of these files are processed and
-// loaded.
-
-
-function CreateDate(time) {
- var date = new $Date();
- date.setTime(time);
- return date;
-}
-
-
-var kApiFunctionCache = {};
-var functionCache = kApiFunctionCache;
-
-
-function Instantiate(data, name) {
- if (!%IsTemplate(data)) return data;
- var tag = %GetTemplateField(data, kApiTagOffset);
- switch (tag) {
- case kFunctionTag:
- return InstantiateFunction(data, name);
- case kNewObjectTag:
- var Constructor = %GetTemplateField(data, kApiConstructorOffset);
- // Note: Do not directly use a function template as a condition, our
- // internal ToBoolean doesn't handle that!
- var result = typeof Constructor === 'undefined' ?
- {} : new (Instantiate(Constructor))();
- ConfigureTemplateInstance(result, data);
- result = %ToFastProperties(result);
- return result;
- default:
- throw 'Unknown API tag <' + tag + '>';
- }
-}
-
-
-function InstantiateFunction(data, name) {
- // We need a reference to kApiFunctionCache in the stack frame
- // if we need to bail out from a stack overflow.
- var cache = kApiFunctionCache;
- var serialNumber = %GetTemplateField(data, kApiSerialNumberOffset);
- var isFunctionCached =
- (serialNumber in cache) && (cache[serialNumber] != kUninitialized);
- if (!isFunctionCached) {
- try {
- cache[serialNumber] = null;
- var fun = %CreateApiFunction(data);
- if (name) %FunctionSetName(fun, name);
- cache[serialNumber] = fun;
- var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
- var flags = %GetTemplateField(data, kApiFlagOffset);
- // Note: Do not directly use an object template as a condition, our
- // internal ToBoolean doesn't handle that!
- fun.prototype = typeof prototype === 'undefined' ?
- {} : Instantiate(prototype);
- if (flags & (1 << kReadOnlyPrototypeBit)) {
- %FunctionSetReadOnlyPrototype(fun);
- }
- %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
- var parent = %GetTemplateField(data, kApiParentTemplateOffset);
- // Note: Do not directly use a function template as a condition, our
- // internal ToBoolean doesn't handle that!
- if (!(typeof parent === 'undefined')) {
- var parent_fun = Instantiate(parent);
- fun.prototype.__proto__ = parent_fun.prototype;
- }
- ConfigureTemplateInstance(fun, data);
- } catch (e) {
- cache[serialNumber] = kUninitialized;
- throw e;
- }
- }
- return cache[serialNumber];
-}
-
-
-function ConfigureTemplateInstance(obj, data) {
- var properties = %GetTemplateField(data, kApiPropertyListOffset);
- if (properties) {
- // Disable access checks while instantiating the object.
- var requires_access_checks = %DisableAccessChecks(obj);
- try {
- for (var i = 0; i < properties[0]; i += 3) {
- var name = properties[i + 1];
- var prop_data = properties[i + 2];
- var attributes = properties[i + 3];
- var value = Instantiate(prop_data, name);
- %SetProperty(obj, name, value, attributes);
- }
- } finally {
- if (requires_access_checks) %EnableAccessChecks(obj);
- }
- }
-}
diff --git a/src/3rdparty/v8/src/apiutils.h b/src/3rdparty/v8/src/apiutils.h
deleted file mode 100644
index 71c0e1c..0000000
--- a/src/3rdparty/v8/src/apiutils.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_APIUTILS_H_
-#define V8_APIUTILS_H_
-
-namespace v8 {
-class ImplementationUtilities {
- public:
- static int GetNameCount(ExtensionConfiguration* that) {
- return that->name_count_;
- }
-
- static const char** GetNames(ExtensionConfiguration* that) {
- return that->names_;
- }
-
- // Packs additional parameters for the NewArguments function. |implicit_args|
- // is a pointer to the last element of 4-elements array controlled by GC.
- static void PrepareArgumentsData(internal::Object** implicit_args,
- internal::Isolate* isolate,
- internal::Object* data,
- internal::JSFunction* callee,
- internal::Object* holder) {
- implicit_args[v8::Arguments::kDataIndex] = data;
- implicit_args[v8::Arguments::kCalleeIndex] = callee;
- implicit_args[v8::Arguments::kHolderIndex] = holder;
- implicit_args[v8::Arguments::kIsolateIndex] =
- reinterpret_cast<internal::Object*>(isolate);
- }
-
- static v8::Arguments NewArguments(internal::Object** implicit_args,
- internal::Object** argv, int argc,
- bool is_construct_call) {
- ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction());
- ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject());
- // The implicit isolate argument is not tagged and looks like a SMI.
- ASSERT(implicit_args[v8::Arguments::kIsolateIndex]->IsSmi());
-
- return v8::Arguments(implicit_args, argv, argc, is_construct_call);
- }
-
- // Introduce an alias for the handle scope data to allow non-friends
- // to access the HandleScope data.
- typedef v8::HandleScope::Data HandleScopeData;
-
-#ifdef DEBUG
- static void ZapHandleRange(internal::Object** begin, internal::Object** end);
-#endif
-};
-
-} // namespace v8
-
-#endif // V8_APIUTILS_H_
diff --git a/src/3rdparty/v8/src/arguments.h b/src/3rdparty/v8/src/arguments.h
deleted file mode 100644
index f8fb00c..0000000
--- a/src/3rdparty/v8/src/arguments.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARGUMENTS_H_
-#define V8_ARGUMENTS_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Arguments provides access to runtime call parameters.
-//
-// It uses the fact that the instance fields of Arguments
-// (length_, arguments_) are "overlayed" with the parameters
-// (no. of parameters, and the parameter pointer) passed so
-// that inside the C++ function, the parameters passed can
-// be accessed conveniently:
-//
-// Object* Runtime_function(Arguments args) {
-// ... use args[i] here ...
-// }
-
-class Arguments BASE_EMBEDDED {
- public:
- Arguments(int length, Object** arguments)
- : length_(length), arguments_(arguments) { }
-
- Object*& operator[] (int index) {
- ASSERT(0 <= index && index < length_);
- return arguments_[-index];
- }
-
- template <class S> Handle<S> at(int index) {
- Object** value = &((*this)[index]);
- // This cast checks that the object we're accessing does indeed have the
- // expected type.
- S::cast(*value);
- return Handle<S>(reinterpret_cast<S**>(value));
- }
-
- int smi_at(int index) {
- return Smi::cast((*this)[index])->value();
- }
-
- double number_at(int index) {
- return (*this)[index]->Number();
- }
-
- // Get the total number of arguments including the receiver.
- int length() const { return length_; }
-
- Object** arguments() { return arguments_; }
-
- private:
- int length_;
- Object** arguments_;
-};
-
-
-// Custom arguments replicate a small segment of stack that can be
-// accessed through an Arguments object the same way the actual stack
-// can.
-class CustomArguments : public Relocatable {
- public:
- inline CustomArguments(Isolate* isolate,
- Object* data,
- Object* self,
- JSObject* holder) : Relocatable(isolate) {
- ASSERT(reinterpret_cast<Object*>(isolate)->IsSmi());
- values_[3] = self;
- values_[2] = holder;
- values_[1] = data;
- values_[0] = reinterpret_cast<Object*>(isolate);
- }
-
- inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
-#ifdef DEBUG
- for (size_t i = 0; i < ARRAY_SIZE(values_); i++) {
- values_[i] = reinterpret_cast<Object*>(kZapValue);
- }
-#endif
- }
-
- void IterateInstance(ObjectVisitor* v);
- Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
-
- private:
- Object* values_[4];
-};
-
-
-#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
-
-
-#define RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
-
-
-#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARGUMENTS_H_
diff --git a/src/3rdparty/v8/src/arm/assembler-arm-inl.h b/src/3rdparty/v8/src/arm/assembler-arm-inl.h
deleted file mode 100644
index af29bb8..0000000
--- a/src/3rdparty/v8/src/arm/assembler-arm-inl.h
+++ /dev/null
@@ -1,549 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
-#define V8_ARM_ASSEMBLER_ARM_INL_H_
-
-#include "arm/assembler-arm.h"
-
-#include "cpu.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-int Register::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(VFP2)) {
- return kMaxNumAllocatableRegisters;
- } else {
- return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
- }
-}
-
-
-int DwVfpRegister::NumRegisters() {
- if (CpuFeatures::IsSupported(VFP2)) {
- return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
- } else {
- return 1;
- }
-}
-
-
-int DwVfpRegister::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(VFP2)) {
- return NumRegisters() - kNumReservedRegisters;
- } else {
- return 1;
- }
-}
-
-
-int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
- ASSERT(!reg.is(kDoubleRegZero));
- ASSERT(!reg.is(kScratchDoubleReg));
- if (reg.code() > kDoubleRegZero.code()) {
- return reg.code() - kNumReservedRegisters;
- }
- return reg.code();
-}
-
-
-DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code()) {
- return from_code(index + kNumReservedRegisters);
- }
- return from_code(index);
-}
-
-
-void RelocInfo::apply(intptr_t delta) {
- if (RelocInfo::IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // relocate entry
- }
- // We do not use pc relative addressing on ARM, so there is
- // nothing else to do.
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
-}
-
-
-int RelocInfo::target_address_size() {
- return kPointerSize;
-}
-
-
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_pointer_at(pc_)));
-}
-
-
-Object** RelocInfo::target_object_address() {
- // Provide a "natural pointer" to the embedded object,
- // which can be de-referenced during heap iteration.
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- reconstructed_obj_ptr_ =
- reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
- return &reconstructed_obj_ptr_;
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
- return &reconstructed_adr_ptr_;
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
-}
-
-
-static const int kNoCodeAgeSequenceLength = 3;
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
- stub->instruction_start();
-}
-
-
-Address RelocInfo::call_address() {
- // The 2 instructions offset assumes patched debug break slot or return
- // sequence.
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
-#ifdef USE_BLX
- // A patched return sequence is:
- // ldr ip, [pc, #0]
- // blx ip
- return ((current_instr & kLdrPCMask) == kLdrPCPattern)
- && ((next_instr & kBlxRegMask) == kBlxRegPattern);
-#else
- // A patched return sequence is:
- // mov lr, pc
- // ldr pc, [pc, #-4]
- return (current_instr == kMovLrPc)
- && ((next_instr & kLdrPCMask) == kLdrPCPattern);
-#endif
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- imm32_ = immediate;
- rmode_ = rmode;
-}
-
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = LSL;
- shift_imm_ = 0;
-}
-
-
-bool Operand::is_reg() const {
- return rm_.is_valid() &&
- rs_.is(no_reg) &&
- shift_op_ == LSL &&
- shift_imm_ == 0;
-}
-
-
-void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
- GrowBuffer();
- }
- if (pc_offset() >= next_buffer_check_) {
- CheckConstPool(false, true);
- }
-}
-
-
-void Assembler::emit(Instr x) {
- CheckBuffer();
- *reinterpret_cast<Instr*>(pc_) = x;
- pc_ += kInstrSize;
-}
-
-
-Address Assembler::target_pointer_address_at(Address pc) {
- Address target_pc = pc;
- Instr instr = Memory::int32_at(target_pc);
- // If we have a bx instruction, the instruction before the bx is
- // what we need to patch.
- static const int32_t kBxInstMask = 0x0ffffff0;
- static const int32_t kBxInstPattern = 0x012fff10;
- if ((instr & kBxInstMask) == kBxInstPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-
-#ifdef USE_BLX
- // If we have a blx instruction, the instruction before it is
- // what needs to be patched.
- if ((instr & kBlxRegMask) == kBlxRegPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-#endif
-
- ASSERT(IsLdrPcImmediateOffset(instr));
- int offset = instr & 0xfff; // offset_12 is unsigned
- if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
- // Verify that the constant pool comes after the instruction referencing it.
- ASSERT(offset >= -4);
- return target_pc + offset + 8;
-}
-
-
-Address Assembler::target_pointer_at(Address pc) {
- if (IsMovW(Memory::int32_at(pc))) {
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- Instruction* instr = Instruction::At(pc);
- Instruction* next_instr = Instruction::At(pc + kInstrSize);
- return reinterpret_cast<Address>(
- (next_instr->ImmedMovwMovtValue() << 16) |
- instr->ImmedMovwMovtValue());
- }
- return Memory::Address_at(target_pointer_address_at(pc));
-}
-
-
-Address Assembler::target_address_from_return_address(Address pc) {
- // Returns the address of the call target from the return address that will
- // be returned to after a call.
-#ifdef USE_BLX
- // Call sequence on V7 or later is :
- // movw ip, #... @ call address low 16
- // movt ip, #... @ call address high 16
- // blx ip
- // @ return address
- // Or pre-V7 or cases that need frequent patching:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
- Address candidate = pc - 2 * Assembler::kInstrSize;
- Instr candidate_instr(Memory::int32_at(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr)) {
- return candidate;
- }
- candidate = pc - 3 * Assembler::kInstrSize;
- ASSERT(IsMovW(Memory::int32_at(candidate)) &&
- IsMovT(Memory::int32_at(candidate + kInstrSize)));
- return candidate;
-#else
- // Call sequence is:
- // mov lr, pc
- // ldr pc, [pc, #...] @ call address
- // @ return address
- return pc - kInstrSize;
-#endif
-}
-
-
-Address Assembler::return_address_from_call_start(Address pc) {
-#ifdef USE_BLX
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
- return pc + kInstrSize * 2;
- } else {
- ASSERT(IsMovW(Memory::int32_at(pc)));
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- return pc + kInstrSize * 3;
- }
-#else
- return pc + kInstrSize;
-#endif
-}
-
-
-void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-void Assembler::set_external_target_at(Address constant_pool_entry,
- Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-static Instr EncodeMovwImmediate(uint32_t immediate) {
- ASSERT(immediate < 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
-}
-
-
-void Assembler::set_target_pointer_at(Address pc, Address target) {
- if (IsMovW(Memory::int32_at(pc))) {
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
- uint32_t immediate = reinterpret_cast<uint32_t>(target);
- uint32_t intermediate = instr_ptr[0];
- intermediate &= ~EncodeMovwImmediate(0xFFFF);
- intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
- instr_ptr[0] = intermediate;
- intermediate = instr_ptr[1];
- intermediate &= ~EncodeMovwImmediate(0xFFFF);
- intermediate |= EncodeMovwImmediate(immediate >> 16);
- instr_ptr[1] = intermediate;
- ASSERT(IsMovW(Memory::int32_at(pc)));
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- CPU::FlushICache(pc, 2 * kInstrSize);
- } else {
- ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- Memory::Address_at(target_pointer_address_at(pc)) = target;
- // Intuitively, we would think it is necessary to always flush the
- // instruction cache after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
- // However, on ARM, no instruction is actually patched in the case
- // of embedded constants of the form:
- // ldr ip, [pc, #...]
- // since the instruction accessing this address in the constant pool remains
- // unchanged.
- }
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return target_pointer_at(pc);
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- set_target_pointer_at(pc, target);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.cc b/src/3rdparty/v8/src/arm/assembler-arm.cc
deleted file mode 100644
index a8c32d9..0000000
--- a/src/3rdparty/v8/src/arm/assembler-arm.cc
+++ /dev/null
@@ -1,3052 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "arm/assembler-arm-inl.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-// Get the CPU features enabled by the build. For cross compilation the
-// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
-// can be defined to enable ARMv7 and VFPv3 instructions when building the
-// snapshot.
-static unsigned CpuFeaturesImpliedByCompiler() {
- unsigned answer = 0;
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
- answer |= 1u << ARMv7;
-#endif // CAN_USE_ARMV7_INSTRUCTIONS
-#ifdef CAN_USE_VFP3_INSTRUCTIONS
- answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
-#endif // CAN_USE_VFP3_INSTRUCTIONS
-#ifdef CAN_USE_VFP2_INSTRUCTIONS
- answer |= 1u << VFP2;
-#endif // CAN_USE_VFP2_INSTRUCTIONS
-#ifdef CAN_USE_VFP32DREGS
- answer |= 1u << VFP32DREGS;
-#endif // CAN_USE_VFP32DREGS
-
-#ifdef __arm__
- // If the compiler is allowed to use VFP then we can use VFP too in our code
- // generation even when generating snapshots. ARMv7 and hardware floating
- // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
-#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
- && !defined(__SOFTFP__)
- answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
-#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
- // && !defined(__SOFTFP__)
-#endif // _arm__
- if (answer & (1u << ARMv7)) {
- answer |= 1u << UNALIGNED_ACCESSES;
- }
-
- return answer;
-}
-
-
-const char* DwVfpRegister::AllocationIndexToString(int index) {
- if (CpuFeatures::IsSupported(VFP2)) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code())
- index += kNumReservedRegisters;
-
- return VFPRegisters::Name(index, true);
- } else {
- ASSERT(index == 0);
- return "sfpd0";
- }
-}
-
-
-void CpuFeatures::Probe() {
- unsigned standard_features = static_cast<unsigned>(
- OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
- ASSERT(supported_ == 0 || supported_ == standard_features);
-#ifdef DEBUG
- initialized_ = true;
-#endif
-
- // Get the features implied by the OS and the compiler settings. This is the
- // minimal set of features which is also alowed for generated code in the
- // snapshot.
- supported_ |= standard_features;
-
- if (Serializer::enabled()) {
- // No probing for features if we might serialize (generate snapshot).
- return;
- }
-
-#ifndef __arm__
- // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
- // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
- if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
- }
- // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
- if (FLAG_enable_armv7) {
- supported_ |= 1u << ARMv7;
- }
-
- if (FLAG_enable_sudiv) {
- supported_ |= 1u << SUDIV;
- }
-
- if (FLAG_enable_movw_movt) {
- supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
- }
-
- if (FLAG_enable_32dregs) {
- supported_ |= 1u << VFP32DREGS;
- }
-
-#else // __arm__
- // Probe for additional features not already known to be available.
- if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
- // This implementation also sets the VFP flags if runtime
- // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
- // 0406B, page A1-6.
- found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
- } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
- found_by_runtime_probing_ |= 1u << VFP2;
- }
-
- if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
- found_by_runtime_probing_ |= 1u << ARMv7;
- }
-
- if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) {
- found_by_runtime_probing_ |= 1u << SUDIV;
- }
-
- if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) {
- found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES;
- }
-
- if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
- OS::ArmCpuHasFeature(ARMv7)) {
- found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
- }
-
- if (!IsSupported(VFP32DREGS) && OS::ArmCpuHasFeature(VFP32DREGS)) {
- found_by_runtime_probing_ |= 1u << VFP32DREGS;
- }
-
- supported_ |= found_by_runtime_probing_;
-#endif
-
- // Assert that VFP3 implies VFP2 and ARMv7.
- ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-const int RelocInfo::kApplyMask = 0;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on ARM means that it is a movw/movt instruction. We don't
- // generate those yet.
- return false;
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand and MemOperand
-// See assembler-arm-inl.h for inlined constructors
-
-Operand::Operand(Handle<Object> handle) {
- rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
- ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
- // encoded as ROR with shift_imm == 0
- ASSERT(shift_imm == 0);
- shift_op_ = ROR;
- shift_imm_ = 0;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
- ASSERT(shift_op != RRX);
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- rs_ = rs;
-}
-
-
-MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
- rn_ = rn;
- rm_ = no_reg;
- offset_ = offset;
- am_ = am;
-}
-
-MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
- rn_ = rn;
- rm_ = rm;
- shift_op_ = LSL;
- shift_imm_ = 0;
- am_ = am;
-}
-
-
-MemOperand::MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am) {
- ASSERT(is_uint5(shift_imm));
- rn_ = rn;
- rm_ = rm;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- am_ = am;
-}
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-
-// add(sp, sp, 4) instruction (aka Pop())
-const Instr kPopInstruction =
- al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
- kRegister_sp_Code * B12;
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
-// mov lr, pc
-const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
-// ldr rd, [pc, #offset]
-const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
-// vldr dd, [pc, #offset]
-const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
-const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
-// blxcc rm
-const Instr kBlxRegMask =
- 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
-const Instr kBlxRegPattern =
- B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
-const Instr kBlxIp = al | kBlxRegPattern | ip.code();
-const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
-const Instr kMovMvnPattern = 0xd * B21;
-const Instr kMovMvnFlip = B22;
-const Instr kMovLeaveCCMask = 0xdff * B16;
-const Instr kMovLeaveCCPattern = 0x1a0 * B16;
-const Instr kMovwMask = 0xff * B20;
-const Instr kMovwPattern = 0x30 * B20;
-const Instr kMovwLeaveCCFlip = 0x5 * B21;
-const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
-const Instr kCmpCmnPattern = 0x15 * B20;
-const Instr kCmpCmnFlip = B21;
-const Instr kAddSubFlip = 0x6 * B21;
-const Instr kAndBicFlip = 0xe * B21;
-
-// A mask for the Rd register for push, pop, ldr, str instructions.
-const Instr kLdrRegFpOffsetPattern =
- al | B26 | L | Offset | kRegister_fp_Code * B16;
-const Instr kStrRegFpOffsetPattern =
- al | B26 | Offset | kRegister_fp_Code * B16;
-const Instr kLdrRegFpNegOffsetPattern =
- al | B26 | L | NegOffset | kRegister_fp_Code * B16;
-const Instr kStrRegFpNegOffsetPattern =
- al | B26 | NegOffset | kRegister_fp_Code * B16;
-const Instr kLdrStrInstrTypeMask = 0xffff0000;
-const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
-const Instr kLdrStrOffsetMask = 0x00000fff;
-
-
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
- num_pending_reloc_info_ = 0;
- num_pending_64_bit_reloc_info_ = 0;
- next_buffer_check_ = 0;
- const_pool_blocked_nesting_ = 0;
- no_const_pool_before_ = 0;
- first_const_pool_use_ = -1;
- last_bound_pos_ = 0;
- ClearRecordedAstId();
-}
-
-
-Assembler::~Assembler() {
- ASSERT(const_pool_blocked_nesting_ == 0);
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_pending_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
-
- // Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
- }
-}
-
-
-void Assembler::CodeTargetAlign() {
- // Preferred alignment of jump targets on some ARM chips.
- Align(8);
-}
-
-
-Condition Assembler::GetCondition(Instr instr) {
- return Instruction::ConditionField(instr);
-}
-
-
-bool Assembler::IsBranch(Instr instr) {
- return (instr & (B27 | B25)) == (B27 | B25);
-}
-
-
-int Assembler::GetBranchOffset(Instr instr) {
- ASSERT(IsBranch(instr));
- // Take the jump offset in the lower 24 bits, sign extend it and multiply it
- // with 4 to get the offset in bytes.
- return ((instr & kImm24Mask) << 8) >> 6;
-}
-
-
-bool Assembler::IsLdrRegisterImmediate(Instr instr) {
- return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
-}
-
-
-bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
- return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
-}
-
-
-int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
- ASSERT(IsLdrRegisterImmediate(instr));
- bool positive = (instr & B23) == B23;
- int offset = instr & kOff12Mask; // Zero extended offset.
- return positive ? offset : -offset;
-}
-
-
-int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
- ASSERT(IsVldrDRegisterImmediate(instr));
- bool positive = (instr & B23) == B23;
- int offset = instr & kOff8Mask; // Zero extended offset.
- offset <<= 2;
- return positive ? offset : -offset;
-}
-
-
-Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsLdrRegisterImmediate(instr));
- bool positive = offset >= 0;
- if (!positive) offset = -offset;
- ASSERT(is_uint12(offset));
- // Set bit indicating whether the offset should be added.
- instr = (instr & ~B23) | (positive ? B23 : 0);
- // Set the actual offset.
- return (instr & ~kOff12Mask) | offset;
-}
-
-
-Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsVldrDRegisterImmediate(instr));
- ASSERT((offset & ~3) == offset); // Must be 64-bit aligned.
- bool positive = offset >= 0;
- if (!positive) offset = -offset;
- ASSERT(is_uint10(offset));
- // Set bit indicating whether the offset should be added.
- instr = (instr & ~B23) | (positive ? B23 : 0);
- // Set the actual offset. Its bottom 2 bits are zero.
- return (instr & ~kOff8Mask) | (offset >> 2);
-}
-
-
-bool Assembler::IsStrRegisterImmediate(Instr instr) {
- return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
-}
-
-
-Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsStrRegisterImmediate(instr));
- bool positive = offset >= 0;
- if (!positive) offset = -offset;
- ASSERT(is_uint12(offset));
- // Set bit indicating whether the offset should be added.
- instr = (instr & ~B23) | (positive ? B23 : 0);
- // Set the actual offset.
- return (instr & ~kOff12Mask) | offset;
-}
-
-
-bool Assembler::IsAddRegisterImmediate(Instr instr) {
- return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
-}
-
-
-Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsAddRegisterImmediate(instr));
- ASSERT(offset >= 0);
- ASSERT(is_uint12(offset));
- // Set the offset.
- return (instr & ~kOff12Mask) | offset;
-}
-
-
-Register Assembler::GetRd(Instr instr) {
- Register reg;
- reg.code_ = Instruction::RdValue(instr);
- return reg;
-}
-
-
-Register Assembler::GetRn(Instr instr) {
- Register reg;
- reg.code_ = Instruction::RnValue(instr);
- return reg;
-}
-
-
-Register Assembler::GetRm(Instr instr) {
- Register reg;
- reg.code_ = Instruction::RmValue(instr);
- return reg;
-}
-
-
-bool Assembler::IsPush(Instr instr) {
- return ((instr & ~kRdMask) == kPushRegPattern);
-}
-
-
-bool Assembler::IsPop(Instr instr) {
- return ((instr & ~kRdMask) == kPopRegPattern);
-}
-
-
-bool Assembler::IsStrRegFpOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsLdrRegFpOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsStrRegFpNegOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
-}
-
-
-bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
-}
-
-
-bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
- // Check the instruction is indeed a
- // ldr<cond> <Rd>, [pc +/- offset_12].
- return (instr & kLdrPCMask) == kLdrPCPattern;
-}
-
-
-bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
- // Check the instruction is indeed a
- // vldr<cond> <Dd>, [pc +/- offset_10].
- return (instr & kVldrDPCMask) == kVldrDPCPattern;
-}
-
-
-bool Assembler::IsTstImmediate(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
- (I | TST | S);
-}
-
-
-bool Assembler::IsCmpRegister(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
- (CMP | S);
-}
-
-
-bool Assembler::IsCmpImmediate(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
- (I | CMP | S);
-}
-
-
-Register Assembler::GetCmpImmediateRegister(Instr instr) {
- ASSERT(IsCmpImmediate(instr));
- return GetRn(instr);
-}
-
-
-int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
- ASSERT(IsCmpImmediate(instr));
- return instr & kOff12Mask;
-}
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the last
-// instruction using the label.
-
-
-// The link chain is terminated by a negative code position (must be aligned)
-const int kEndOfChain = -4;
-
-
-int Assembler::target_at(int pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- // Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
- }
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- int imm26 = ((instr & kImm24Mask) << 8) >> 6;
- if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
- ((instr & B24) != 0)) {
- // blx uses bit 24 to encode bit 2 of imm26
- imm26 += 2;
- }
- return pos + kPcLoadDelta + imm26;
-}
-
-
-void Assembler::target_at_put(int pos, int target_pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- ASSERT(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- return;
- }
- int imm26 = target_pos - (pos + kPcLoadDelta);
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- if (Instruction::ConditionField(instr) == kSpecialCondition) {
- // blx uses bit 24 to encode bit 2 of imm26
- ASSERT((imm26 & 1) == 0);
- instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
- } else {
- ASSERT((imm26 & 3) == 0);
- instr &= ~kImm24Mask;
- }
- int imm24 = imm26 >> 2;
- ASSERT(is_int24(imm24));
- instr_at_put(pos, instr | (imm24 & kImm24Mask));
-}
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- PrintF("@ %d ", l.pos());
- Instr instr = instr_at(l.pos());
- if ((instr & ~kImm24Mask) == 0) {
- PrintF("value\n");
- } else {
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
- Condition cond = Instruction::ConditionField(instr);
- const char* b;
- const char* c;
- if (cond == kSpecialCondition) {
- b = "blx";
- c = "";
- } else {
- if ((instr & B24) != 0)
- b = "bl";
- else
- b = "b";
-
- switch (cond) {
- case eq: c = "eq"; break;
- case ne: c = "ne"; break;
- case hs: c = "hs"; break;
- case lo: c = "lo"; break;
- case mi: c = "mi"; break;
- case pl: c = "pl"; break;
- case vs: c = "vs"; break;
- case vc: c = "vc"; break;
- case hi: c = "hi"; break;
- case ls: c = "ls"; break;
- case ge: c = "ge"; break;
- case lt: c = "lt"; break;
- case gt: c = "gt"; break;
- case le: c = "le"; break;
- case al: c = ""; break;
- default:
- c = "";
- UNREACHABLE();
- }
- }
- PrintF("%s%s\n", b, c);
- }
- next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
- while (L->is_linked()) {
- int fixup_pos = L->pos();
- next(L); // call next before overwriting link with target at fixup_pos
- target_at_put(fixup_pos, pos);
- }
- L->bind_to(pos);
-
- // Keep track of the last bound label so we don't eliminate any instructions
- // before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
-}
-
-
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
-void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // label can only be bound once
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::next(Label* L) {
- ASSERT(L->is_linked());
- int link = target_at(L->pos());
- if (link == kEndOfChain) {
- L->Unuse();
- } else {
- ASSERT(link >= 0);
- L->link_to(link);
- }
-}
-
-
-// Low-level code emission routines depending on the addressing mode.
-// If this returns true then you have to use the rotate_imm and immed_8
-// that it returns, because it may have already changed the instruction
-// to match them!
-static bool fits_shifter(uint32_t imm32,
- uint32_t* rotate_imm,
- uint32_t* immed_8,
- Instr* instr) {
- // imm32 must be unsigned.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
- if ((imm8 <= 0xff)) {
- *rotate_imm = rot;
- *immed_8 = imm8;
- return true;
- }
- }
- // If the opcode is one with a complementary version and the complementary
- // immediate fits, change the opcode.
- if (instr != NULL) {
- if ((*instr & kMovMvnMask) == kMovMvnPattern) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= kMovMvnFlip;
- return true;
- } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
- if (CpuFeatures::IsSupported(ARMv7)) {
- if (imm32 < 0x10000) {
- *instr ^= kMovwLeaveCCFlip;
- *instr |= EncodeMovwImmediate(imm32);
- *rotate_imm = *immed_8 = 0; // Not used for movw.
- return true;
- }
- }
- }
- } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
- if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
- *instr ^= kCmpCmnFlip;
- return true;
- }
- } else {
- Instr alu_insn = (*instr & kALUMask);
- if (alu_insn == ADD ||
- alu_insn == SUB) {
- if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
- *instr ^= kAddSubFlip;
- return true;
- }
- } else if (alu_insn == AND ||
- alu_insn == BIC) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= kAndBicFlip;
- return true;
- }
- }
- }
- }
- return false;
-}
-
-
-// We have to use the temporary register for things that can be relocated even
-// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
-// space. There is no guarantee that the relocated location can be similarly
-// encoded.
-bool Operand::must_output_reloc_info(const Assembler* assembler) const {
- if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif // def DEBUG
- if (assembler != NULL && assembler->predictable_code_size()) return true;
- return Serializer::enabled();
- } else if (RelocInfo::IsNone(rmode_)) {
- return false;
- }
- return true;
-}
-
-
-static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
- if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
- return true;
- }
- if (x.must_output_reloc_info(assembler)) {
- return false;
- }
- return CpuFeatures::IsSupported(ARMv7);
-}
-
-
-bool Operand::is_single_instruction(const Assembler* assembler,
- Instr instr) const {
- if (rm_.is_valid()) return true;
- uint32_t dummy1, dummy2;
- if (must_output_reloc_info(assembler) ||
- !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
- // The immediate operand cannot be encoded as a shifter operand, or use of
- // constant pool is required. For a mov instruction not setting the
- // condition code additional instruction conventions can be used.
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- return !use_movw_movt(*this, assembler);
- } else {
- // If this is not a mov or mvn instruction there will always an additional
- // instructions - either mov or ldr. The mov might actually be two
- // instructions mov or movw followed by movt so including the actual
- // instruction two or three instructions will be generated.
- return false;
- }
- } else {
- // No use of constant pool and the immediate operand can be encoded as a
- // shifter operand.
- return true;
- }
-}
-
-
-void Assembler::move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x) {
- if (rd.code() != pc.code() && s == LeaveCC) {
- if (use_movw_movt(x, this)) {
- if (x.must_output_reloc_info(this)) {
- RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
- // Make sure the movw/movt doesn't get separated.
- BlockConstPoolFor(2);
- }
- emit(cond | 0x30*B20 | rd.code()*B12 |
- EncodeMovwImmediate(x.imm32_ & 0xffff));
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- return;
- }
- }
-
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(rd, MemOperand(pc, 0), cond);
-}
-
-
-void Assembler::addrmod1(Instr instr,
- Register rn,
- Register rd,
- const Operand& x) {
- CheckBuffer();
- ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
- if (!x.rm_.is_valid()) {
- // Immediate.
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (x.must_output_reloc_info(this) ||
- !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
- // The immediate operand cannot be encoded as a shifter operand, so load
- // it first to register ip and change the original instruction to use ip.
- // However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'.
- CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
- Condition cond = Instruction::ConditionField(instr);
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- move_32_bit_immediate(cond, rd, LeaveCC, x);
- } else {
- if ((instr & kMovMvnMask) == kMovMvnPattern) {
- // Moves need to use a constant pool entry.
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(ip, MemOperand(pc, 0), cond);
- } else if (x.must_output_reloc_info(this)) {
- // Otherwise, use most efficient form of fetching from constant pool.
- move_32_bit_immediate(cond, ip, LeaveCC, x);
- } else {
- // If this is not a mov or mvn instruction we may still be able to
- // avoid a constant pool entry by using mvn or movw.
- mov(ip, x, LeaveCC, cond);
- }
- addrmod1(instr, rn, rd, Operand(ip));
- }
- return;
- }
- instr |= I | rotate_imm*B8 | immed_8;
- } else if (!x.rs_.is_valid()) {
- // Immediate shift.
- instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- } else {
- // Register shift.
- ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
- instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
- }
- emit(instr | rn.code()*B16 | rd.code()*B12);
- if (rn.is(pc) || x.rm_.is(pc)) {
- // Block constant pool emission for one instruction after reading pc.
- BlockConstPoolFor(1);
- }
-}
-
-
-void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(kCondMask | B | L)) == B26);
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- int offset_12 = x.offset_;
- if (offset_12 < 0) {
- offset_12 = -offset_12;
- am ^= U;
- }
- if (!is_uint12(offset_12)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
- addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_12 >= 0); // no masking needed
- instr |= offset_12;
- } else {
- // Register offset (shift_imm_ and shift_op_ are 0) or scaled
- // register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized.
- ASSERT(!x.rm_.is(pc));
- instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
- ASSERT(x.rn_.is_valid());
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- int offset_8 = x.offset_;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- if (!is_uint8(offset_8)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_8 >= 0); // no masking needed
- instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
- } else if (x.shift_imm_ != 0) {
- // Scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
- Instruction::ConditionField(instr));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- } else {
- // Register offset.
- ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
- instr |= x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
- ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
- ASSERT(rl != 0);
- ASSERT(!rn.is(pc));
- emit(instr | rn.code()*B16 | rl);
-}
-
-
-void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
- // Unindexed addressing is not encoded by this function.
- ASSERT_EQ((B27 | B26),
- (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
- ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
- int am = x.am_;
- int offset_8 = x.offset_;
- ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
- offset_8 >>= 2;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
-
- // Post-indexed addressing requires W == 1; different than in addrmod2/3.
- if ((am & P) == 0)
- am |= W;
-
- ASSERT(offset_8 >= 0); // no masking needed
- emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
-}
-
-
-int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(pc_offset());
- }
-
- // Block the emission of the constant pool, since the branch instruction must
- // be emitted at the pc offset recorded by the label.
- BlockConstPoolFor(1);
- return target_pos - (pc_offset() + kPcLoadDelta);
-}
-
-
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- }
-}
-
-
-// Branch instructions.
-void Assembler::b(int branch_offset, Condition cond) {
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | (imm24 & kImm24Mask));
-
- if (cond == al) {
- // Dead code is a good location to emit the constant pool.
- CheckConstPool(false, false);
- }
-}
-
-
-void Assembler::bl(int branch_offset, Condition cond) {
- positions_recorder()->WriteRecordedPositions();
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
-}
-
-
-void Assembler::blx(int branch_offset) { // v5 and above
- positions_recorder()->WriteRecordedPositions();
- ASSERT((branch_offset & 1) == 0);
- int h = ((branch_offset & 2) >> 1)*B24;
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
-}
-
-
-void Assembler::blx(Register target, Condition cond) { // v5 and above
- positions_recorder()->WriteRecordedPositions();
- ASSERT(!target.is(pc));
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
-}
-
-
-void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
- positions_recorder()->WriteRecordedPositions();
- ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
-}
-
-
-// Data-processing instructions.
-
-void Assembler::and_(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | AND | s, src1, dst, src2);
-}
-
-
-void Assembler::eor(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | EOR | s, src1, dst, src2);
-}
-
-
-void Assembler::sub(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | SUB | s, src1, dst, src2);
-}
-
-
-void Assembler::rsb(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | RSB | s, src1, dst, src2);
-}
-
-
-void Assembler::add(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | ADD | s, src1, dst, src2);
-}
-
-
-void Assembler::adc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | ADC | s, src1, dst, src2);
-}
-
-
-void Assembler::sbc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | SBC | s, src1, dst, src2);
-}
-
-
-void Assembler::rsc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | RSC | s, src1, dst, src2);
-}
-
-
-void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | TST | S, src1, r0, src2);
-}
-
-
-void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | TEQ | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | CMP | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp_raw_immediate(
- Register src, int raw_immediate, Condition cond) {
- ASSERT(is_uint12(raw_immediate));
- emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
-}
-
-
-void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | CMN | S, src1, r0, src2);
-}
-
-
-void Assembler::orr(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | ORR | s, src1, dst, src2);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
- if (dst.is(pc)) {
- positions_recorder()->WriteRecordedPositions();
- }
- // Don't allow nop instructions in the form mov rn, rn to be generated using
- // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
- // or MarkCode(int/NopMarkerTypes) pseudo instructions.
- ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
- addrmod1(cond | MOV | s, r0, dst, src);
-}
-
-
-void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
- ASSERT(immediate < 0x10000);
- // May use movw if supported, but on unsupported platforms will try to use
- // equivalent rotated immed_8 value and other tricks before falling back to a
- // constant pool load.
- mov(reg, Operand(immediate), LeaveCC, cond);
-}
-
-
-void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
- emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
-}
-
-
-void Assembler::bic(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | BIC | s, src1, dst, src2);
-}
-
-
-void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
- addrmod1(cond | MVN | s, r0, dst, src);
-}
-
-
-// Multiply instructions.
-void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
- Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::sdiv(Register dst, Register src1, Register src2,
- Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
- emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
- src2.code()*B8 | B4 | src1.code());
-}
-
-
-void Assembler::mul(Register dst, Register src1, Register src2,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
- // dst goes in bits 16-19 for this instruction!
- emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-// Miscellaneous arithmetic instructions.
-void Assembler::clz(Register dst, Register src, Condition cond) {
- // v5 and above.
- ASSERT(!dst.is(pc) && !src.is(pc));
- emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
- 15*B8 | CLZ | src.code());
-}
-
-
-// Saturating instructions.
-
-// Unsigned saturate.
-void Assembler::usat(Register dst,
- int satpos,
- const Operand& src,
- Condition cond) {
- // v6 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.rm_.is(pc));
- ASSERT((satpos >= 0) && (satpos <= 31));
- ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
- ASSERT(src.rs_.is(no_reg));
-
- int sh = 0;
- if (src.shift_op_ == ASR) {
- sh = 1;
- }
-
- emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
- src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
-}
-
-
-// Bitfield manipulation instructions.
-
-// Unsigned bit field extract.
-// Extracts #width adjacent bits from position #lsb in a register, and
-// writes them to the low bits of a destination register.
-// ubfx dst, src, #lsb, #width
-void Assembler::ubfx(Register dst,
- Register src,
- int lsb,
- int width,
- Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
-}
-
-
-// Signed bit field extract.
-// Extracts #width adjacent bits from position #lsb in a register, and
-// writes them to the low bits of a destination register. The extracted
-// value is sign extended to fill the destination register.
-// sbfx dst, src, #lsb, #width
-void Assembler::sbfx(Register dst,
- Register src,
- int lsb,
- int width,
- Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
-}
-
-
-// Bit field clear.
-// Sets #width adjacent bits at position #lsb in the destination register
-// to zero, preserving the value of the other bits.
-// bfc dst, #lsb, #width
-void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
-}
-
-
-// Bit field insert.
-// Inserts #width adjacent bits from the low bits of the source register
-// into position #lsb of the destination register.
-// bfi dst, src, #lsb, #width
-void Assembler::bfi(Register dst,
- Register src,
- int lsb,
- int width,
- Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
- src.code());
-}
-
-
-// Status register access instructions.
-void Assembler::mrs(Register dst, SRegister s, Condition cond) {
- ASSERT(!dst.is(pc));
- emit(cond | B24 | s | 15*B16 | dst.code()*B12);
-}
-
-
-void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
- Condition cond) {
- ASSERT(fields >= B16 && fields < B20); // at least one field set
- Instr instr;
- if (!src.rm_.is_valid()) {
- // Immediate.
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (src.must_output_reloc_info(this) ||
- !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // Immediate operand cannot be encoded, load it first to register ip.
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
- msr(fields, Operand(ip), cond);
- return;
- }
- instr = I | rotate_imm*B8 | immed_8;
- } else {
- ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
- instr = src.rm_.code();
- }
- emit(cond | instr | B24 | B21 | fields | 15*B12);
-}
-
-
-// Load/Store instructions.
-void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
- if (dst.is(pc)) {
- positions_recorder()->WriteRecordedPositions();
- }
- addrmod2(cond | B26 | L, dst, src);
-}
-
-
-void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26, src, dst);
-}
-
-
-void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
- addrmod2(cond | B26 | B | L, dst, src);
-}
-
-
-void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26 | B, src, dst);
-}
-
-
-void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | H | B4, dst, src);
-}
-
-
-void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
- addrmod3(cond | B7 | H | B4, src, dst);
-}
-
-
-void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | B4, dst, src);
-}
-
-
-void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
-}
-
-
-void Assembler::ldrd(Register dst1, Register dst2,
- const MemOperand& src, Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
- ASSERT(src.rm().is(no_reg));
- ASSERT(!dst1.is(lr)); // r14.
- ASSERT_EQ(0, dst1.code() % 2);
- ASSERT_EQ(dst1.code() + 1, dst2.code());
- addrmod3(cond | B7 | B6 | B4, dst1, src);
-}
-
-
-void Assembler::strd(Register src1, Register src2,
- const MemOperand& dst, Condition cond) {
- ASSERT(dst.rm().is(no_reg));
- ASSERT(!src1.is(lr)); // r14.
- ASSERT_EQ(0, src1.code() % 2);
- ASSERT_EQ(src1.code() + 1, src2.code());
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
- addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
-}
-
-// Load/Store multiple instructions.
-void Assembler::ldm(BlockAddrMode am,
- Register base,
- RegList dst,
- Condition cond) {
- // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
- ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
-
- addrmod4(cond | B27 | am | L, base, dst);
-
- // Emit the constant pool after a function return implemented by ldm ..{..pc}.
- if (cond == al && (dst & pc.bit()) != 0) {
- // There is a slight chance that the ldm instruction was actually a call,
- // in which case it would be wrong to return into the constant pool; we
- // recognize this case by checking if the emission of the pool was blocked
- // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
- // the case, we emit a jump over the pool.
- CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
- }
-}
-
-
-void Assembler::stm(BlockAddrMode am,
- Register base,
- RegList src,
- Condition cond) {
- addrmod4(cond | B27 | am, base, src);
-}
-
-
-// Exception-generating instructions and debugging support.
-// Stops with a non-negative code less than kNumOfWatchedStops support
-// enabling/disabling and a counter feature. See simulator-arm.h .
-void Assembler::stop(const char* msg, Condition cond, int32_t code) {
-#ifndef __arm__
- ASSERT(code >= kDefaultStopCode);
- {
- // The Simulator will handle the stop instruction and get the message
- // address. It expects to find the address just after the svc instruction.
- BlockConstPoolScope block_const_pool(this);
- if (code >= 0) {
- svc(kStopCode + code, cond);
- } else {
- svc(kStopCode + kMaxStopCode, cond);
- }
- emit(reinterpret_cast<Instr>(msg));
- }
-#else // def __arm__
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- if (cond != al) {
- Label skip;
- b(&skip, NegateCondition(cond));
- bkpt(0);
- bind(&skip);
- } else {
- bkpt(0);
- }
-#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
- svc(0x9f0001, cond);
-#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
-#endif // def __arm__
-}
-
-
-void Assembler::bkpt(uint32_t imm16) { // v5 and above
- ASSERT(is_uint16(imm16));
- emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
-}
-
-
-void Assembler::svc(uint32_t imm24, Condition cond) {
- ASSERT(is_uint24(imm24));
- emit(cond | 15*B24 | imm24);
-}
-
-
-// Coprocessor instructions.
-void Assembler::cdp(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
- crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
-}
-
-
-void Assembler::cdp2(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
-}
-
-
-void Assembler::mcr(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mcr2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
-}
-
-
-void Assembler::mrc(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mrc2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // Unindexed addressing.
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l) { // v5 and above
- ldc(coproc, crd, src, l, kSpecialCondition);
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- ldc(coproc, crd, rn, option, l, kSpecialCondition);
-}
-
-
-// Support for VFP.
-
-void Assembler::vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond) {
- // Ddst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406C.b, A8-924.
- // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
- // Vd(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- int vd, d;
- dst.split_code(&vd, &d);
-
- ASSERT(offset >= 0);
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
- 0xB*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
- }
-}
-
-
-void Assembler::vldr(const DwVfpRegister dst,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(dst, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vldr(const SwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond) {
- // Sdst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1010(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- int sd, d;
- dst.split_code(&sd, &d);
- ASSERT(offset >= 0);
-
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
- }
-}
-
-
-void Assembler::vldr(const SwVfpRegister dst,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(dst, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond) {
- // MEM(Rbase + offset) = Dsrc.
- // Instruction details available in ARM DDI 0406C.b, A8-1082.
- // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
- // Vd(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- ASSERT(offset >= 0);
- int vd, d;
- src.split_code(&vd, &d);
-
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
- ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
- }
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vstr(src, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vstr(const SwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond) {
- // MEM(Rbase + offset) = SSrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- int sd, d;
- src.split_code(&sd, &d);
- ASSERT(offset >= 0);
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
- }
-}
-
-
-void Assembler::vstr(const SwVfpRegister src,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vstr(src, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vldm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-922.
- // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
- // first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
-
- int sd, d;
- first.split_code(&sd, &d);
- int count = last.code() - first.code() + 1;
- ASSERT(count <= 16);
- emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
- 0xB*B8 | count*2);
-}
-
-
-void Assembler::vstm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-1080.
- // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
- // first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
-
- int sd, d;
- first.split_code(&sd, &d);
- int count = last.code() - first.code() + 1;
- ASSERT(count <= 16);
- emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
- 0xB*B8 | count*2);
-}
-
-void Assembler::vldm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-626.
- // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
- // first(15-12) | 1010(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
-
- int sd, d;
- first.split_code(&sd, &d);
- int count = last.code() - first.code() + 1;
- emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | count);
-}
-
-
-void Assembler::vstm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
- // first(15-12) | 1011(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
-
- int sd, d;
- first.split_code(&sd, &d);
- int count = last.code() - first.code() + 1;
- emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
- 0xA*B8 | count);
-}
-
-static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
- uint64_t i;
- memcpy(&i, &d, 8);
-
- *lo = i & 0xffffffff;
- *hi = i >> 32;
-}
-
-// Only works for little endian floating point formats.
-// We don't support VFP on the mixed endian floating point platform.
-static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
-
- // VMOV can accept an immediate of the form:
- //
- // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
- //
- // The immediate is encoded using an 8-bit quantity, comprised of two
- // 4-bit fields. For an 8-bit immediate of the form:
- //
- // [abcdefgh]
- //
- // where a is the MSB and h is the LSB, an immediate 64-bit double can be
- // created of the form:
- //
- // [aBbbbbbb,bbcdefgh,00000000,00000000,
- // 00000000,00000000,00000000,00000000]
- //
- // where B = ~b.
- //
-
- uint32_t lo, hi;
- DoubleAsTwoUInt32(d, &lo, &hi);
-
- // The most obvious constraint is the long block of zeroes.
- if ((lo != 0) || ((hi & 0xffff) != 0)) {
- return false;
- }
-
- // Bits 62:55 must be all clear or all set.
- if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
- return false;
- }
-
- // Bit 63 must be NOT bit 62.
- if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
- return false;
- }
-
- // Create the encoded immediate in the form:
- // [00000000,0000abcd,00000000,0000efgh]
- *encoding = (hi >> 16) & 0xf; // Low nybble.
- *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
- *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
-
- return true;
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- double imm,
- const Register scratch) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
-
- uint32_t enc;
- if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
- // The double can be encoded in the instruction.
- //
- // Dd = immediate
- // Instruction details available in ARM DDI 0406C.b, A8-936.
- // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
- int vd, d;
- dst.split_code(&vd, &d);
- emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm) {
- // TODO(jfb) Temporarily turned off until we have constant blinding or
- // some equivalent mitigation: an attacker can otherwise control
- // generated data which also happens to be executable, a Very Bad
- // Thing indeed.
- // Blinding gets tricky because we don't have xor, we probably
- // need to add/subtract without losing precision, which requires a
- // cookie value that Lithium is probably better positioned to
- // choose.
- // We could also add a few peepholes here like detecting 0.0 and
- // -0.0 and doing a vmov from the sequestered d14, forcing denorms
- // to zero (we set flush-to-zero), and normalizing NaN values.
- // We could also detect redundant values.
- // The code could also randomize the order of values, though
- // that's tricky because vldr has a limited reach. Furthermore
- // it breaks load locality.
- RecordRelocInfo(imm);
- vldr(dst, MemOperand(pc, 0));
- } else {
- // Synthesise the double from ARM immediates.
- uint32_t lo, hi;
- DoubleAsTwoUInt32(imm, &lo, &hi);
-
- if (scratch.is(no_reg)) {
- if (dst.code() < 16) {
- // Move the low part of the double into the lower of the corresponsing S
- // registers of D register dst.
- mov(ip, Operand(lo));
- vmov(dst.low(), ip);
-
- // Move the high part of the double into the higher of the
- // corresponsing S registers of D register dst.
- mov(ip, Operand(hi));
- vmov(dst.high(), ip);
- } else {
- // D16-D31 does not have S registers, so move the low and high parts
- // directly to the D register using vmov.32.
- // Note: This may be slower, so we only do this when we have to.
- mov(ip, Operand(lo));
- vmov(dst, VmovIndexLo, ip);
- mov(ip, Operand(hi));
- vmov(dst, VmovIndexHi, ip);
- }
- } else {
- // Move the low and high parts of the double to a D register in one
- // instruction.
- mov(ip, Operand(lo));
- mov(scratch, Operand(hi));
- vmov(dst, ip, scratch);
- }
- }
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Sd = Sm
- // Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int sd, d, sm, m;
- dst.split_code(&sd, &d);
- src.split_code(&sm, &m);
- emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Dd = Dm
- // Instruction details available in ARM DDI 0406C.b, A8-938.
- // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
- // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vm, m;
- src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
- vm);
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- const VmovIndex index,
- const Register src,
- const Condition cond) {
- // Dd[index] = Rt
- // Instruction details available in ARM DDI 0406C.b, A8-940.
- // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
- // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(index.index == 0 || index.index == 1);
- int vd, d;
- dst.split_code(&vd, &d);
- emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
- d*B7 | B4);
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond) {
- // Dm = <Rt,Rt2>.
- // Instruction details available in ARM DDI 0406C.b, A8-948.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(!src1.is(pc) && !src2.is(pc));
- int vm, m;
- dst.split_code(&vm, &m);
- emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
-}
-
-
-void Assembler::vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond) {
- // <Rt,Rt2> = Dm.
- // Instruction details available in ARM DDI 0406C.b, A8-948.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(!dst1.is(pc) && !dst2.is(pc));
- int vm, m;
- src.split_code(&vm, &m);
- emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond) {
- // Sn = Rt.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(!src.is(pc));
- int sn, n;
- dst.split_code(&sn, &n);
- emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
-}
-
-
-void Assembler::vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Rt = Sn.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(!dst.is(pc));
- int sn, n;
- src.split_code(&sn, &n);
- emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
-}
-
-
-// Type of data to read from or write to VFP register.
-// Used as specifier in generic vcvt instruction.
-enum VFPType { S32, U32, F32, F64 };
-
-
-static bool IsSignedVFPType(VFPType type) {
- switch (type) {
- case S32:
- return true;
- case U32:
- return false;
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-static bool IsIntegerVFPType(VFPType type) {
- switch (type) {
- case S32:
- case U32:
- return true;
- case F32:
- case F64:
- return false;
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-static bool IsDoubleVFPType(VFPType type) {
- switch (type) {
- case F32:
- return false;
- case F64:
- return true;
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-// Split five bit reg_code based on size of reg_type.
-// 32-bit register codes are Vm:M
-// 64-bit register codes are M:Vm
-// where Vm is four bits, and M is a single bit.
-static void SplitRegCode(VFPType reg_type,
- int reg_code,
- int* vm,
- int* m) {
- ASSERT((reg_code >= 0) && (reg_code <= 31));
- if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
- // 32 bit type.
- *m = reg_code & 0x1;
- *vm = reg_code >> 1;
- } else {
- // 64 bit type.
- *m = (reg_code & 0x10) >> 4;
- *vm = reg_code & 0x0F;
- }
-}
-
-
-// Encode vcvt.src_type.dst_type instruction.
-static Instr EncodeVCVT(const VFPType dst_type,
- const int dst_code,
- const VFPType src_type,
- const int src_code,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(src_type != dst_type);
- int D, Vd, M, Vm;
- SplitRegCode(src_type, src_code, &Vm, &M);
- SplitRegCode(dst_type, dst_code, &Vd, &D);
-
- if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
- // Conversion between IEEE floating point and 32-bit integer.
- // Instruction details available in ARM DDI 0406B, A8.6.295.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
- // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
-
- int sz, opc2, op;
-
- if (IsIntegerVFPType(dst_type)) {
- opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
- sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- op = mode;
- } else {
- ASSERT(IsIntegerVFPType(src_type));
- opc2 = 0x0;
- sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
- op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
- }
-
- return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
- Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
- } else {
- // Conversion between IEEE double and single precision.
- // Instruction details available in ARM DDI 0406B, A8.6.298.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
- Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
- }
-}
-
-
-void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
-}
-
-
-void Assembler::vneg(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-968.
- // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
- // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vm, m;
- src.split_code(&vm, &m);
-
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
- m*B5 | vm);
-}
-
-
-void Assembler::vabs(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-524.
- // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
- // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vm, m;
- src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
- m*B5 | vm);
-}
-
-
-void Assembler::vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vadd(Dn, Dm) double precision floating point addition.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406C.b, A8-830.
- // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
- n*B7 | m*B5 | vm);
-}
-
-
-void Assembler::vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vsub(Dn, Dm) double precision floating point subtraction.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406C.b, A8-1086.
- // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
- n*B7 | B6 | m*B5 | vm);
-}
-
-
-void Assembler::vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vmul(Dn, Dm) double precision floating point multiplication.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406C.b, A8-960.
- // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
- n*B7 | m*B5 | vm);
-}
-
-
-void Assembler::vmla(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-932.
- // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
- vm);
-}
-
-
-void Assembler::vmls(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-932.
- // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
- m*B5 | vm);
-}
-
-
-void Assembler::vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vdiv(Dn, Dm) double precision floating point division.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406C.b, A8-882.
- // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
- vm);
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406C.b, A8-864.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- src1.split_code(&vd, &d);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
- m*B5 | vm);
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const double src2,
- const Condition cond) {
- // vcmp(Dd, #0.0) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406C.b, A8-864.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(src2 == 0.0);
- int vd, d;
- src1.split_code(&vd, &d);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
-}
-
-
-void Assembler::vmsr(Register dst, Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-652.
- // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
- // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xE*B20 | B16 |
- dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-void Assembler::vmrs(Register dst, Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-652.
- // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
- // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xF*B20 | B16 |
- dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-void Assembler::vsqrt(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-1058.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vm, m;
- src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
- m*B5 | vm);
-}
-
-
-// Pseudo instructions.
-void Assembler::nop(int type) {
- // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
- // some of the CPU's pipeline and has to issue. Older ARM chips simply used
- // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
- // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
- // a type.
- ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
- emit(al | 13*B21 | type*B12 | type);
-}
-
-
-bool Assembler::IsMovT(Instr instr) {
- instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
- ((kNumRegisters-1)*B12) | // mask out register
- EncodeMovwImmediate(0xFFFF)); // mask out immediate value
- return instr == 0x34*B20;
-}
-
-
-bool Assembler::IsMovW(Instr instr) {
- instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
- ((kNumRegisters-1)*B12) | // mask out destination
- EncodeMovwImmediate(0xFFFF)); // mask out immediate value
- return instr == 0x30*B20;
-}
-
-
-bool Assembler::IsNop(Instr instr, int type) {
- ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
- // Check for mov rx, rx where x = type.
- return instr == (al | 13*B21 | type*B12 | type);
-}
-
-
-bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
- uint32_t dummy1;
- uint32_t dummy2;
- return fits_shifter(imm32, &dummy1, &dummy2, NULL);
-}
-
-
-// Debugging.
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-void Assembler::RecordConstPool(int size) {
- // We only need this for debugger support, to correctly compute offsets in the
- // code.
-#ifdef ENABLE_DEBUGGER_SUPPORT
- RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
-#endif
-}
-
-void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else if (buffer_size_ < 1*MB) {
- desc.buffer_size = 2*buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1*MB;
- }
- CHECK_GT(desc.buffer_size, 0); // no overflow
-
- // Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // None of our relocation types are pc relative pointing outside the code
- // buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries.
-
- // Relocate pending relocation entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION);
- if (rinfo.rmode() != RelocInfo::JS_RETURN) {
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- }
-}
-
-
-void Assembler::db(uint8_t data) {
- // No relocation info should be pending while using db. db is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using db.
- ASSERT(num_pending_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
- CheckBuffer();
- *reinterpret_cast<uint8_t*>(pc_) = data;
- pc_ += sizeof(uint8_t);
-}
-
-
-void Assembler::dd(uint32_t data) {
- // No relocation info should be pending while using dd. dd is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using dd.
- ASSERT(num_pending_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
- UseConstantPoolMode mode) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL) ||
- mode == DONT_USE_CONSTANT_POOL) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode)
- || mode == DONT_USE_CONSTANT_POOL);
- // These modes do not need an entry in the constant pool.
- } else {
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
- }
- if (!RelocInfo::IsNone(rinfo.rmode())) {
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
- RecordedAstId().ToInt(),
- NULL);
- ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
- } else {
- reloc_info_writer.Write(&rinfo);
- }
- }
-}
-
-void Assembler::RecordRelocInfo(double data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, data);
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
-}
-
-
-void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
- if (rinfo.rmode() == RelocInfo::NONE64) {
- ++num_pending_64_bit_reloc_info_;
- }
- ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
-}
-
-
-void Assembler::BlockConstPoolFor(int instructions) {
- int pc_limit = pc_offset() + instructions * kInstrSize;
- if (no_const_pool_before_ < pc_limit) {
- // If there are some pending entries, the constant pool cannot be blocked
- // further than constant pool instruction's reach.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
- // TODO(jfb) Also check 64-bit entries are in range (requires splitting
- // them up from 32-bit entries).
- no_const_pool_before_ = pc_limit;
- }
-
- if (next_buffer_check_ < no_const_pool_before_) {
- next_buffer_check_ = no_const_pool_before_;
- }
-}
-
-
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Some short sequence of instruction mustn't be broken up by constant pool
- // emission, such sequences are protected by calls to BlockConstPoolFor and
- // BlockConstPoolScope.
- if (is_const_pool_blocked()) {
- // Something is wrong if emission is forced and blocked at the same time.
- ASSERT(!force_emit);
- return;
- }
-
- // There is nothing to do if there are no pending constant pool entries.
- if (num_pending_reloc_info_ == 0) {
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
- // Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
- return;
- }
-
- // Check that the code buffer is large enough before emitting the constant
- // pool (include the jump over the pool and the constant pool marker and
- // the gap to the relocation information).
- // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
- int jump_instr = require_jump ? kInstrSize : 0;
- int size_up_to_marker = jump_instr + kInstrSize;
- int size_after_marker = num_pending_reloc_info_ * kPointerSize;
- bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
- // 64-bit values must be 64-bit aligned.
- // We'll start emitting at PC: branch+marker, then 32-bit values, then
- // 64-bit values which might need to be aligned.
- bool require_64_bit_align = has_fp_values &&
- (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
- if (require_64_bit_align) {
- size_after_marker += kInstrSize;
- }
- // num_pending_reloc_info_ also contains 64-bit entries, the above code
- // therefore already counted half of the size for 64-bit entries. Add the
- // remaining size.
- STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
- size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
-
- int size = size_up_to_marker + size_after_marker;
-
- // We emit a constant pool when:
- // * requested to do so by parameter force_emit (e.g. after each function).
- // * the distance from the first instruction accessing the constant pool to
- // any of the constant pool entries will exceed its limit the next
- // time the pool is checked. This is overly restrictive, but we don't emit
- // constant pool entries in-order so it's conservatively correct.
- // * the instruction doesn't require a jump after itself to jump over the
- // constant pool, and we're getting close to running out of range.
- if (!force_emit) {
- ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
- int dist = pc_offset() + size - first_const_pool_use_;
- if (has_fp_values) {
- if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
- (require_jump || (dist < kMaxDistToFPPool / 2))) {
- return;
- }
- } else {
- if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
- (require_jump || (dist < kMaxDistToIntPool / 2))) {
- return;
- }
- }
- }
-
- int needed_space = size + kGap;
- while (buffer_space() <= needed_space) GrowBuffer();
-
- {
- // Block recursive calls to CheckConstPool.
- BlockConstPoolScope block_const_pool(this);
- RecordComment("[ Constant Pool");
- RecordConstPool(size);
-
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) {
- b(&after_pool);
- }
-
- // Put down constant pool marker "Undefined instruction".
- // The data size helps disassembly know what to print.
- emit(kConstantPoolMarker | EncodeConstantPoolLength(size_after_marker));
-
- if (require_64_bit_align) {
- emit(kConstantPoolMarker);
- }
-
- // Emit 64-bit constant pool entries first: their range is smaller than
- // 32-bit entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
-
- if (rinfo.rmode() != RelocInfo::NONE64) {
- // 32-bit values emitted later.
- continue;
- }
-
- ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment.
-
- Instr instr = instr_at(rinfo.pc());
- // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
- ASSERT((IsVldrDPcImmediateOffset(instr) &&
- GetVldrDRegisterImmediateOffset(instr) == 0));
-
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- ASSERT(is_uint10(delta));
-
- instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
-
- const double double_data = rinfo.data64();
- uint64_t uint_data = 0;
- memcpy(&uint_data, &double_data, sizeof(double_data));
- emit(uint_data & 0xFFFFFFFF);
- emit(uint_data >> 32);
- }
-
- // Emit 32-bit constant pool entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
- rinfo.rmode() != RelocInfo::CONST_POOL);
-
- if (rinfo.rmode() == RelocInfo::NONE64) {
- // 64-bit values emitted earlier.
- continue;
- }
-
- Instr instr = instr_at(rinfo.pc());
-
- // 64-bit loads shouldn't get here.
- ASSERT(!IsVldrDPcImmediateOffset(instr));
-
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
-
- if (IsLdrPcImmediateOffset(instr) &&
- GetLdrRegisterImmediateOffset(instr) == 0) {
- ASSERT(is_uint12(delta));
- instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
- emit(rinfo.data());
- } else {
- ASSERT(IsMovW(instr));
- emit(rinfo.data());
- }
- }
-
- num_pending_reloc_info_ = 0;
- num_pending_64_bit_reloc_info_ = 0;
- first_const_pool_use_ = -1;
-
- RecordComment("]");
-
- if (after_pool.is_linked()) {
- bind(&after_pool);
- }
- }
-
- // Since a constant pool was just emitted, move the check offset forward by
- // the standard interval.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.h b/src/3rdparty/v8/src/arm/assembler-arm.h
deleted file mode 100644
index 12cee54..0000000
--- a/src/3rdparty/v8/src/arm/assembler-arm.h
+++ /dev/null
@@ -1,1518 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-// A light-weight ARM Assembler
-// Generates user mode instructions for the ARM architecture up to version 5
-
-#ifndef V8_ARM_ASSEMBLER_ARM_H_
-#define V8_ARM_ASSEMBLER_ARM_H_
-#include <stdio.h>
-#include "assembler.h"
-#include "constants-arm.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- if (f == VFP2 && !FLAG_enable_vfp2) return false;
- if (f == SUDIV && !FLAG_enable_sudiv) return false;
- if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
- return false;
- }
- if (f == VFP32DREGS && !FLAG_enable_32dregs) return false;
- return (supported_ & (1u << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- // VFP2 and ARMv7 are implied by VFP3.
- if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const unsigned old_supported_;
- };
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_;
-
- friend class ExternalReference;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-// Core register
-struct Register {
- static const int kNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters = 8;
- static const int kSizeInBytes = 4;
- static const int kGPRsPerNonVFP2Double = 2;
-
- inline static int NumAllocatableRegisters();
-
- static int ToAllocationIndex(Register reg) {
- ASSERT(reg.code() < kMaxNumAllocatableRegisters);
- return reg.code();
- }
-
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "r0",
- "r1",
- "r2",
- "r3",
- "r4",
- "r5",
- "r6",
- "r7",
- };
- return names[index];
- }
-
- static Register from_code(int code) {
- Register r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- void set_code(int code) {
- code_ = code;
- ASSERT(is_valid());
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0;
-const int kRegister_r1_Code = 1;
-const int kRegister_r2_Code = 2;
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_fp_Code = 11;
-const int kRegister_ip_Code = 12;
-const int kRegister_sp_Code = 13;
-const int kRegister_lr_Code = 14;
-const int kRegister_pc_Code = 15;
-
-const Register no_reg = { kRegister_no_reg_Code };
-
-const Register r0 = { kRegister_r0_Code };
-const Register r1 = { kRegister_r1_Code };
-const Register r2 = { kRegister_r2_Code };
-const Register r3 = { kRegister_r3_Code };
-const Register r4 = { kRegister_r4_Code };
-const Register r5 = { kRegister_r5_Code };
-const Register r6 = { kRegister_r6_Code };
-const Register r7 = { kRegister_r7_Code };
-// Used as context register.
-const Register r8 = { kRegister_r8_Code };
-// Used as lithium codegen scratch register.
-const Register r9 = { kRegister_r9_Code };
-// Used as roots register.
-const Register r10 = { kRegister_r10_Code };
-const Register fp = { kRegister_fp_Code };
-const Register ip = { kRegister_ip_Code };
-const Register sp = { kRegister_sp_Code };
-const Register lr = { kRegister_lr_Code };
-const Register pc = { kRegister_pc_Code };
-
-// Single word VFP register.
-struct SwVfpRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 32; }
- bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void split_code(int* vm, int* m) const {
- ASSERT(is_valid());
- *m = code_ & 0x1;
- *vm = code_ >> 1;
- }
-
- int code_;
-};
-
-
-// Double word VFP register.
-struct DwVfpRegister {
- static const int kMaxNumRegisters = 32;
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0, that does not fit in the immediate field of vmov instructions.
- // d14: 0.0
- // d15: scratch register.
- static const int kNumReservedRegisters = 2;
- static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
- kNumReservedRegisters;
-
- // Note: the number of registers can be different at snapshot and run-time.
- // Any code included in the snapshot must be able to run both with 16 or 32
- // registers.
- inline static int NumRegisters();
- inline static int NumAllocatableRegisters();
-
- inline static int ToAllocationIndex(DwVfpRegister reg);
- static const char* AllocationIndexToString(int index);
- inline static DwVfpRegister FromAllocationIndex(int index);
-
- static DwVfpRegister from_code(int code) {
- DwVfpRegister r = { code };
- return r;
- }
-
- bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumRegisters;
- }
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- SwVfpRegister low() const {
- ASSERT(code_ < 16);
- SwVfpRegister reg;
- reg.code_ = code_ * 2;
-
- ASSERT(reg.is_valid());
- return reg;
- }
- SwVfpRegister high() const {
- ASSERT(code_ < 16);
- SwVfpRegister reg;
- reg.code_ = (code_ * 2) + 1;
-
- ASSERT(reg.is_valid());
- return reg;
- }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void split_code(int* vm, int* m) const {
- ASSERT(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
- }
-
- int code_;
-};
-
-
-typedef DwVfpRegister DoubleRegister;
-
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-const SwVfpRegister s0 = { 0 };
-const SwVfpRegister s1 = { 1 };
-const SwVfpRegister s2 = { 2 };
-const SwVfpRegister s3 = { 3 };
-const SwVfpRegister s4 = { 4 };
-const SwVfpRegister s5 = { 5 };
-const SwVfpRegister s6 = { 6 };
-const SwVfpRegister s7 = { 7 };
-const SwVfpRegister s8 = { 8 };
-const SwVfpRegister s9 = { 9 };
-const SwVfpRegister s10 = { 10 };
-const SwVfpRegister s11 = { 11 };
-const SwVfpRegister s12 = { 12 };
-const SwVfpRegister s13 = { 13 };
-const SwVfpRegister s14 = { 14 };
-const SwVfpRegister s15 = { 15 };
-const SwVfpRegister s16 = { 16 };
-const SwVfpRegister s17 = { 17 };
-const SwVfpRegister s18 = { 18 };
-const SwVfpRegister s19 = { 19 };
-const SwVfpRegister s20 = { 20 };
-const SwVfpRegister s21 = { 21 };
-const SwVfpRegister s22 = { 22 };
-const SwVfpRegister s23 = { 23 };
-const SwVfpRegister s24 = { 24 };
-const SwVfpRegister s25 = { 25 };
-const SwVfpRegister s26 = { 26 };
-const SwVfpRegister s27 = { 27 };
-const SwVfpRegister s28 = { 28 };
-const SwVfpRegister s29 = { 29 };
-const SwVfpRegister s30 = { 30 };
-const SwVfpRegister s31 = { 31 };
-
-const DwVfpRegister no_dreg = { -1 };
-const DwVfpRegister d0 = { 0 };
-const DwVfpRegister d1 = { 1 };
-const DwVfpRegister d2 = { 2 };
-const DwVfpRegister d3 = { 3 };
-const DwVfpRegister d4 = { 4 };
-const DwVfpRegister d5 = { 5 };
-const DwVfpRegister d6 = { 6 };
-const DwVfpRegister d7 = { 7 };
-const DwVfpRegister d8 = { 8 };
-const DwVfpRegister d9 = { 9 };
-const DwVfpRegister d10 = { 10 };
-const DwVfpRegister d11 = { 11 };
-const DwVfpRegister d12 = { 12 };
-const DwVfpRegister d13 = { 13 };
-const DwVfpRegister d14 = { 14 };
-const DwVfpRegister d15 = { 15 };
-const DwVfpRegister d16 = { 16 };
-const DwVfpRegister d17 = { 17 };
-const DwVfpRegister d18 = { 18 };
-const DwVfpRegister d19 = { 19 };
-const DwVfpRegister d20 = { 20 };
-const DwVfpRegister d21 = { 21 };
-const DwVfpRegister d22 = { 22 };
-const DwVfpRegister d23 = { 23 };
-const DwVfpRegister d24 = { 24 };
-const DwVfpRegister d25 = { 25 };
-const DwVfpRegister d26 = { 26 };
-const DwVfpRegister d27 = { 27 };
-const DwVfpRegister d28 = { 28 };
-const DwVfpRegister d29 = { 29 };
-const DwVfpRegister d30 = { 30 };
-const DwVfpRegister d31 = { 31 };
-
-const Register sfpd_lo = { kRegister_r6_Code };
-const Register sfpd_hi = { kRegister_r7_Code };
-
-// Aliases for double registers. Defined using #define instead of
-// "static const DwVfpRegister&" because Clang complains otherwise when a
-// compilation unit that includes this header doesn't use the variables.
-#define kFirstCalleeSavedDoubleReg d8
-#define kLastCalleeSavedDoubleReg d15
-#define kDoubleRegZero d14
-#define kScratchDoubleReg d15
-
-
-// Coprocessor register
-struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-
-const CRegister no_creg = { -1 };
-
-const CRegister cr0 = { 0 };
-const CRegister cr1 = { 1 };
-const CRegister cr2 = { 2 };
-const CRegister cr3 = { 3 };
-const CRegister cr4 = { 4 };
-const CRegister cr5 = { 5 };
-const CRegister cr6 = { 6 };
-const CRegister cr7 = { 7 };
-const CRegister cr8 = { 8 };
-const CRegister cr9 = { 9 };
-const CRegister cr10 = { 10 };
-const CRegister cr11 = { 11 };
-const CRegister cr12 = { 12 };
-const CRegister cr13 = { 13 };
-const CRegister cr14 = { 14 };
-const CRegister cr15 = { 15 };
-
-
-// Coprocessor number
-enum Coprocessor {
- p0 = 0,
- p1 = 1,
- p2 = 2,
- p3 = 3,
- p4 = 4,
- p5 = 5,
- p6 = 6,
- p7 = 7,
- p8 = 8,
- p9 = 9,
- p10 = 10,
- p11 = 11,
- p12 = 12,
- p13 = 13,
- p14 = 14,
- p15 = 15
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-// Class Operand represents a shifter operand in data processing instructions
-class Operand BASE_EMBEDDED {
- public:
- // immediate
- INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE32));
- INLINE(static Operand Zero()) {
- return Operand(static_cast<int32_t>(0));
- }
- INLINE(explicit Operand(const ExternalReference& f));
- explicit Operand(Handle<Object> handle);
- INLINE(explicit Operand(Smi* value));
-
- // rm
- INLINE(explicit Operand(Register rm));
-
- // rm <shift_op> shift_imm
- explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
-
- // rm <shift_op> rs
- explicit Operand(Register rm, ShiftOp shift_op, Register rs);
-
- // Return true if this is a register operand.
- INLINE(bool is_reg() const);
-
- // Return true if this operand fits in one instruction so that no
- // 2-instruction solution with a load into the ip register is necessary. If
- // the instruction this operand is used for is a MOV or MVN instruction the
- // actual instruction to use is required for this calculation. For other
- // instructions instr is ignored.
- bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
- bool must_output_reloc_info(const Assembler* assembler) const;
-
- inline int32_t immediate() const {
- ASSERT(!rm_.is_valid());
- return imm32_;
- }
-
- Register rm() const { return rm_; }
- Register rs() const { return rs_; }
- ShiftOp shift_op() const { return shift_op_; }
-
- private:
- Register rm_;
- Register rs_;
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- int32_t imm32_; // valid if rm_ == no_reg
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
-};
-
-
-// Class MemOperand represents a memory operand in load and store instructions
-class MemOperand BASE_EMBEDDED {
- public:
- // [rn +/- offset] Offset/NegOffset
- // [rn +/- offset]! PreIndex/NegPreIndex
- // [rn], +/- offset PostIndex/NegPostIndex
- // offset is any signed 32-bit value; offset is first loaded to register ip if
- // it does not fit the addressing mode (12-bit unsigned and sign bit)
- explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
-
- // [rn +/- rm] Offset/NegOffset
- // [rn +/- rm]! PreIndex/NegPreIndex
- // [rn], +/- rm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
-
- // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
- // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
- // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
-
- void set_offset(int32_t offset) {
- ASSERT(rm_.is(no_reg));
- offset_ = offset;
- }
-
- uint32_t offset() const {
- ASSERT(rm_.is(no_reg));
- return offset_;
- }
-
- Register rn() const { return rn_; }
- Register rm() const { return rm_; }
- AddrMode am() const { return am_; }
-
- bool OffsetIsUint12Encodable() const {
- return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
- }
-
- private:
- Register rn_; // base
- Register rm_; // register offset
- int32_t offset_; // valid if rm_ == no_reg
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- AddrMode am_; // bits P, U, and W
-
- friend class Assembler;
-};
-
-extern const Instr kMovLrPc;
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-extern const Instr kBlxRegMask;
-extern const Instr kBlxRegPattern;
-extern const Instr kBlxIp;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
-
-struct VmovIndex {
- unsigned char index;
-};
-const VmovIndex VmovIndexLo = { 0 };
-const VmovIndex VmovIndexHi = { 1 };
-
-class Assembler : public AssemblerBase {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler();
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Returns the branch offset to the given label from the current code position
- // Links the label to the current position if it is still unbound
- // Manages the jump elimination optimization if the second parameter is true.
- int branch_offset(Label* L, bool jump_elimination_allowed);
-
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
- // Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc, or the object in a mov.
- INLINE(static Address target_pointer_address_at(Address pc));
-
- // Read/Modify the pointer in the branch/call/move instruction at pc.
- INLINE(static Address target_pointer_at(Address pc));
- INLINE(static void set_target_pointer_at(Address pc, Address target));
-
- // Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
-
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- INLINE(static Address target_address_from_return_address(Address pc));
-
- // Given the address of the beginning of a call, return the address
- // in the instruction stream that the call will return from.
- INLINE(static Address return_address_from_call_start(Address pc));
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches within generated code.
- inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Address target);
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address constant_pool_entry,
- Address target);
-
- // Here we are patching the address in the constant pool, not the actual call
- // instruction. The address in the constant pool is the same size as a
- // pointer.
- static const int kSpecialTargetSize = kPointerSize;
-
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
-#ifdef USE_BLX
- // Patched return sequence is:
- // ldr ip, [pc, #0] @ emited address and start
- // blx ip
- static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
-#else
- // Patched return sequence is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
-#endif
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
-#ifdef USE_BLX
- // Patched debug break slot code is:
- // ldr ip, [pc, #0] @ emited address and start
- // blx ip
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-#else
- // Patched debug break slot code is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
-#endif
-
-#ifdef USE_BLX
- static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
-#else
- static const int kPatchDebugBreakSlotReturnOffset = kInstrSize;
-#endif
-
- // Difference between address of current opcode and value read from pc
- // register.
- static const int kPcLoadDelta = 8;
-
- static const int kJSReturnSequenceInstructions = 4;
- static const int kDebugBreakSlotInstructions = 3;
- static const int kDebugBreakSlotLength =
- kDebugBreakSlotInstructions * kInstrSize;
-
- // ---------------------------------------------------------------------------
- // Code generation
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2 (>= 4).
- void Align(int m);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Branch instructions
- void b(int branch_offset, Condition cond = al);
- void bl(int branch_offset, Condition cond = al);
- void blx(int branch_offset); // v5 and above
- void blx(Register target, Condition cond = al); // v5 and above
- void bx(Register target, Condition cond = al); // v5 and above, plus v4t
-
- // Convenience branch instructions using labels
- void b(Label* L, Condition cond = al) {
- b(branch_offset(L, cond == al), cond);
- }
- void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); }
- void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); }
- void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
- void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
-
- // Data-processing instructions
-
- void and_(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void eor(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sub(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void sub(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- sub(dst, src1, Operand(src2), s, cond);
- }
-
- void rsb(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void add(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void add(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- add(dst, src1, Operand(src2), s, cond);
- }
-
- void adc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sbc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void rsc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void tst(Register src1, const Operand& src2, Condition cond = al);
- void tst(Register src1, Register src2, Condition cond = al) {
- tst(src1, Operand(src2), cond);
- }
-
- void teq(Register src1, const Operand& src2, Condition cond = al);
-
- void cmp(Register src1, const Operand& src2, Condition cond = al);
- void cmp(Register src1, Register src2, Condition cond = al) {
- cmp(src1, Operand(src2), cond);
- }
- void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
-
- void cmn(Register src1, const Operand& src2, Condition cond = al);
-
- void orr(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void orr(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- orr(dst, src1, Operand(src2), s, cond);
- }
-
- void mov(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
- void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
- mov(dst, Operand(src), s, cond);
- }
-
- // ARMv7 instructions for loading a 32 bit immediate in two instructions.
- // This may actually emit a different mov instruction, but on an ARMv7 it
- // is guaranteed to only emit one instruction.
- void movw(Register reg, uint32_t immediate, Condition cond = al);
- // The constant for movt should be in the range 0-0xffff.
- void movt(Register reg, uint32_t immediate, Condition cond = al);
-
- void bic(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void mvn(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
-
- // Multiply instructions
-
- void mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s = LeaveCC, Condition cond = al);
-
- void mls(Register dst, Register src1, Register src2, Register srcA,
- Condition cond = al);
-
- void sdiv(Register dst, Register src1, Register src2,
- Condition cond = al);
-
- void mul(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- // Miscellaneous arithmetic instructions
-
- void clz(Register dst, Register src, Condition cond = al); // v5 and above
-
- // Saturating instructions. v6 and above.
-
- // Unsigned saturate.
- //
- // Saturate an optionally shifted signed value to an unsigned range.
- //
- // usat dst, #satpos, src
- // usat dst, #satpos, src, lsl #sh
- // usat dst, #satpos, src, asr #sh
- //
- // Register dst will contain:
- //
- // 0, if s < 0
- // (1 << satpos) - 1, if s > ((1 << satpos) - 1)
- // s, otherwise
- //
- // where s is the contents of src after shifting (if used.)
- void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
-
- // Bitfield manipulation instructions. v7 and above.
-
- void ubfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
-
- void sbfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
-
- void bfc(Register dst, int lsb, int width, Condition cond = al);
-
- void bfi(Register dst, Register src, int lsb, int width,
- Condition cond = al);
-
- // Status register access instructions
-
- void mrs(Register dst, SRegister s, Condition cond = al);
- void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
-
- // Load/Store instructions
- void ldr(Register dst, const MemOperand& src, Condition cond = al);
- void str(Register src, const MemOperand& dst, Condition cond = al);
- void ldrb(Register dst, const MemOperand& src, Condition cond = al);
- void strb(Register src, const MemOperand& dst, Condition cond = al);
- void ldrh(Register dst, const MemOperand& src, Condition cond = al);
- void strh(Register src, const MemOperand& dst, Condition cond = al);
- void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
- void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
- void ldrd(Register dst1,
- Register dst2,
- const MemOperand& src, Condition cond = al);
- void strd(Register src1,
- Register src2,
- const MemOperand& dst, Condition cond = al);
-
- // Load/Store multiple instructions
- void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
- void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
-
- // Exception-generating instructions and debugging support
- void stop(const char* msg,
- Condition cond = al,
- int32_t code = kDefaultStopCode);
-
- void bkpt(uint32_t imm16); // v5 and above
- void svc(uint32_t imm24, Condition cond = al);
-
- // Coprocessor instructions
-
- void cdp(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2, Condition cond = al);
-
- void cdp2(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2); // v5 and above
-
- void mcr(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mcr2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void mrc(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mrc2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short, Condition cond = al);
- void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short); // v5 and above
- void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
- // Support for VFP.
- // All these APIs support S0 to S31 and D0 to D31.
-
- void vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond = al);
- void vldr(const DwVfpRegister dst,
- const MemOperand& src,
- const Condition cond = al);
-
- void vldr(const SwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond = al);
- void vldr(const SwVfpRegister dst,
- const MemOperand& src,
- const Condition cond = al);
-
- void vstr(const DwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond = al);
- void vstr(const DwVfpRegister src,
- const MemOperand& dst,
- const Condition cond = al);
-
- void vstr(const SwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond = al);
- void vstr(const SwVfpRegister src,
- const MemOperand& dst,
- const Condition cond = al);
-
- void vldm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond = al);
-
- void vstm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond = al);
-
- void vldm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond = al);
-
- void vstm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond = al);
-
- void vmov(const DwVfpRegister dst,
- double imm,
- const Register scratch = no_reg);
- void vmov(const SwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const VmovIndex index,
- const Register src,
- const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond = al);
- void vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond = al);
- void vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond = al);
- void vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vcvt_f64_s32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f32_s32(const SwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f64_u32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_s32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_u32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f64_f32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
-
- void vneg(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void vabs(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmla(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmls(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const double src2,
- const Condition cond = al);
- void vmrs(const Register dst,
- const Condition cond = al);
- void vmsr(const Register dst,
- const Condition cond = al);
- void vsqrt(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
-
- // Pseudo instructions
-
- // Different nop operations are used by the code generator to detect certain
- // states of the generated code.
- enum NopMarkerTypes {
- NON_MARKING_NOP = 0,
- DEBUG_BREAK_NOP,
- // IC markers.
- PROPERTY_ACCESS_INLINED,
- PROPERTY_ACCESS_INLINED_CONTEXT,
- PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
- // Helper values.
- LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
- };
-
- void nop(int type = 0); // 0 is the default non-marking type.
-
- void push(Register src, Condition cond = al) {
- str(src, MemOperand(sp, 4, NegPreIndex), cond);
- }
-
- void pop(Register dst, Condition cond = al) {
- ldr(dst, MemOperand(sp, 4, PostIndex), cond);
- }
-
- void pop() {
- add(sp, sp, Operand(kPointerSize));
- }
-
- // Jump unconditionally to given label.
- void jmp(Label* L) { b(L, al); }
-
- static bool use_immediate_embedded_pointer_loads(
- const Assembler* assembler) {
-#ifdef USE_BLX
- return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- (assembler == NULL || !assembler->predictable_code_size());
-#else
- // If not using BLX, all loads from the constant pool cannot be immediate,
- // because the ldr pc, [pc + #xxxx] used for calls must be a single
- // instruction and cannot be easily distinguished out of context from
- // other loads that could use movw/movt.
- return false;
-#endif
- }
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* label) {
- return pc_offset() - label->pos();
- }
-
- // Check the number of instructions generated from label to here.
- int InstructionsGeneratedSince(Label* label) {
- return SizeOfCodeGeneratedSince(label) / kInstrSize;
- }
-
- // Check whether an immediate fits an addressing mode 1 instruction.
- bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
-
- // Class for scoping postponing the constant pool generation.
- class BlockConstPoolScope {
- public:
- explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockConstPool();
- }
- ~BlockConstPoolScope() {
- assem_->EndBlockConstPool();
- }
-
- private:
- Assembler* assem_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
- };
-
- // Debugging
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) {
- ASSERT(recorded_ast_id_.IsNone());
- recorded_ast_id_ = ast_id;
- }
-
- TypeFeedbackId RecordedAstId() {
- ASSERT(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
- }
-
- void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
- // Record the emission of a constant pool.
- //
- // The emission of constant pool depends on the size of the code generated and
- // the number of RelocInfo recorded.
- // The Debug mechanism needs to map code offsets between two versions of a
- // function, compiled with and without debugger support (see for example
- // Debug::PrepareForBreakPoints()).
- // Compiling functions with debugger support generates additional code
- // (Debug::GenerateSlot()). This may affect the emission of the constant
- // pools and cause the version of the code with debugger support to have
- // constant pools generated in different places.
- // Recording the position and size of emitted constant pools allows to
- // correctly compute the offset mappings between the different versions of a
- // function in all situations.
- //
- // The parameter indicates the size of the constant pool (in bytes), including
- // the marker and branch over the data.
- void RecordConstPool(int size);
-
- // Writes a single byte or word of data in the code stream. Used
- // for inline tables, e.g., jump-tables. The constant pool should be
- // emitted before any use of db and dd to ensure that constant pools
- // are not emitted as part of the tables generated.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- // Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
- static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- static void instr_at_put(byte* pc, Instr instr) {
- *reinterpret_cast<Instr*>(pc) = instr;
- }
- static Condition GetCondition(Instr instr);
- static bool IsBranch(Instr instr);
- static int GetBranchOffset(Instr instr);
- static bool IsLdrRegisterImmediate(Instr instr);
- static bool IsVldrDRegisterImmediate(Instr instr);
- static int GetLdrRegisterImmediateOffset(Instr instr);
- static int GetVldrDRegisterImmediateOffset(Instr instr);
- static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
- static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
- static bool IsStrRegisterImmediate(Instr instr);
- static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
- static bool IsAddRegisterImmediate(Instr instr);
- static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
- static Register GetRd(Instr instr);
- static Register GetRn(Instr instr);
- static Register GetRm(Instr instr);
- static bool IsPush(Instr instr);
- static bool IsPop(Instr instr);
- static bool IsStrRegFpOffset(Instr instr);
- static bool IsLdrRegFpOffset(Instr instr);
- static bool IsStrRegFpNegOffset(Instr instr);
- static bool IsLdrRegFpNegOffset(Instr instr);
- static bool IsLdrPcImmediateOffset(Instr instr);
- static bool IsVldrDPcImmediateOffset(Instr instr);
- static bool IsTstImmediate(Instr instr);
- static bool IsCmpRegister(Instr instr);
- static bool IsCmpImmediate(Instr instr);
- static Register GetCmpImmediateRegister(Instr instr);
- static int GetCmpImmediateRawImmediate(Instr instr);
- static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
- static bool IsMovT(Instr instr);
- static bool IsMovW(Instr instr);
-
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
- // PC-relative loads, thereby defining a maximum distance between the
- // instruction and the accessed constant.
- static const int kMaxDistToIntPool = 4*KB;
- static const int kMaxDistToFPPool = 1*KB;
- // All relocations could be integer, it therefore acts as the limit.
- static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
-
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
- // Check if is time to emit a constant pool.
- void CheckConstPool(bool force_emit, bool require_jump);
-
- protected:
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
-
- int buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
- // Decode branch instruction at pos and return branch target pos
- int target_at(int pos);
-
- // Patch branch instruction at pos to branch to given branch target pos
- void target_at_put(int pos, int target_pos);
-
- // Prevent contant pool emission until EndBlockConstPool is called.
- // Call to this function can be nested but must be followed by an equal
- // number of call to EndBlockConstpool.
- void StartBlockConstPool() {
- if (const_pool_blocked_nesting_++ == 0) {
- // Prevent constant pool checks happening by setting the next check to
- // the biggest possible offset.
- next_buffer_check_ = kMaxInt;
- }
- }
-
- // Resume constant pool emission. Need to be called as many time as
- // StartBlockConstPool to have an effect.
- void EndBlockConstPool() {
- if (--const_pool_blocked_nesting_ == 0) {
- // Check the constant pool hasn't been blocked for too long.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
- ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
- // Two cases:
- // * no_const_pool_before_ >= next_buffer_check_ and the emission is
- // still blocked
- // * no_const_pool_before_ < next_buffer_check_ and the next emit will
- // trigger a check.
- next_buffer_check_ = no_const_pool_before_;
- }
- }
-
- bool is_const_pool_blocked() const {
- return (const_pool_blocked_nesting_ > 0) ||
- (pc_offset() < no_const_pool_before_);
- }
-
- private:
- int next_buffer_check_; // pc offset of next buffer check
-
- // Code generation
- // The relocation writer's position is at least kGap bytes below the end of
- // the generated instructions. This is so that multi-instruction sequences do
- // not have to check for overflow. The same is true for writes of large
- // relocation info entries.
- static const int kGap = 32;
-
- // Constant pool generation
- // Pools are emitted in the instruction stream, preferably after unconditional
- // jumps or after returns from functions (in dead code locations).
- // If a long code sequence does not contain unconditional jumps, it is
- // necessary to emit the constant pool before the pool gets too far from the
- // location it is accessed from. In this case, we emit a jump over the emitted
- // constant pool.
- // Constants in the pool may be addresses of functions that gets relocated;
- // if so, a relocation info entry is associated to the constant pool entry.
-
- // Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckPoolIntervalInst = 32;
- static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
-
-
- // Emission of the constant pool may be blocked in some code sequences.
- int const_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_const_pool_before_; // Block emission before this pc offset.
-
- // Keep track of the first instruction requiring a constant pool entry
- // since the previous constant pool was emitted.
- int first_const_pool_use_;
-
- // Relocation info generation
- // Each relocation is encoded as a variable size value
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
- RelocInfoWriter reloc_info_writer;
-
- // Relocation info records are also used during code generation as temporary
- // containers for constants and code target addresses until they are emitted
- // to the constant pool. These pending relocation info records are temporarily
- // stored in a separate buffer until a constant pool is emitted.
- // If every instruction in a long sequence is accessing the pool, we need one
- // pending relocation entry per instruction.
-
- // the buffer of pending relocation info
- RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
- // number of pending reloc info entries in the buffer
- int num_pending_reloc_info_;
- // Number of pending reloc info entries included above which also happen to
- // be 64-bit.
- int num_pending_64_bit_reloc_info_;
-
- // The bound position, before this we cannot do instruction elimination.
- int last_bound_pos_;
-
- // Code emission
- inline void CheckBuffer();
- void GrowBuffer();
- inline void emit(Instr x);
-
- // 32-bit immediate values
- void move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x);
-
- // Instruction generation
- void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
- void addrmod2(Instr instr, Register rd, const MemOperand& x);
- void addrmod3(Instr instr, Register rd, const MemOperand& x);
- void addrmod4(Instr instr, Register rn, RegList rl);
- void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
-
- // Labels
- void print(Label* L);
- void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
- void next(Label* L);
-
- enum UseConstantPoolMode {
- USE_CONSTANT_POOL,
- DONT_USE_CONSTANT_POOL
- };
-
- // Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
- UseConstantPoolMode mode = USE_CONSTANT_POOL);
- void RecordRelocInfo(double data);
- void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
-
- friend class RegExpMacroAssemblerARM;
- friend class RelocInfo;
- friend class CodePatcher;
- friend class BlockConstPoolScope;
-
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
- friend class EnsureSpace;
-};
-
-
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) {
- assembler->CheckBuffer();
- }
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/builtins-arm.cc b/src/3rdparty/v8/src/arm/builtins-arm.cc
deleted file mode 100644
index 466c890..0000000
--- a/src/3rdparty/v8/src/arm/builtins-arm.cc
+++ /dev/null
@@ -1,1901 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments excluding receiver
- // -- r1 : called function (only guaranteed when
- // extra_args requires it)
- // -- cp : context
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == r0)
- // -- sp[4 * argc] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(r1);
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects r0 to contain the number of arguments
- // including the receiver and the extra arguments.
- __ add(r0, r0, Operand(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
-
-
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the native context.
-
- __ ldr(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ ldr(result,
- MemOperand(result,
- Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
-}
-
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ ldr(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the Array function from the native context.
- __ ldr(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, Operand::Zero());
- __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
- if (initial_capacity == 0) {
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ add(scratch1, result, Operand(JSArray::kSize));
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array (untagged)
- // scratch2: start of next object
- __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
- STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- for (int i = 0; i < initial_capacity; i++) {
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- }
- } else {
- Label loop, entry;
- __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
- __ b(&entry);
- __ bind(&loop);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(scratch1, scratch2);
- __ b(lt, &loop);
- }
-}
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array_storage,
- Register elements_array_end,
- Register scratch1,
- Register scratch2,
- bool fill_with_hole,
- Label* gc_required) {
- // Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ tst(array_size, array_size);
- __ Assert(ne, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested number of elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ mov(elements_array_end,
- Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
- __ add(elements_array_end,
- elements_array_end,
- Operand(array_size, ASR, kSmiTagSize));
- __ AllocateInNewSpace(
- elements_array_end,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array_storage: initial map
- // array_size: size of array (smi)
- __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // array_size: size of array (smi)
- __ add(elements_array_storage, result, Operand(JSArray::kSize));
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ sub(elements_array_storage,
- elements_array_storage,
- Operand(kHeapObjectTag));
- // Initialize the fixed array and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // elements_array_storage: elements array (untagged)
- // array_size: size of array (smi)
- __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ str(array_size,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
-
- // Calculate elements array and elements array end.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // array_size: smi-tagged size of elements array
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(elements_array_end,
- elements_array_storage,
- Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
- __ jmp(&entry);
- __ bind(&loop);
- __ str(scratch1,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(elements_array_storage, elements_array_end);
- __ b(lt, &loop);
- }
-}
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// r0: argc
-// r1: constructor (built-in Array function)
-// lr: return address
-// sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in r1 needs to be preserved for
-// entering the generic code. In both cases argc in r0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments or one.
- __ cmp(r0, Operand::Zero());
- __ b(ne, &argc_one_or_more);
-
- // Handle construction of an empty array.
- __ bind(&empty_array);
- AllocateEmptyJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
- // Set up return value, remove receiver from stack and return.
- __ mov(r0, r2);
- __ add(sp, sp, Operand(kPointerSize));
- __ Jump(lr);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(r0, Operand(1));
- __ b(ne, &argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
- __ tst(r2, r2);
- __ b(ne, &not_empty_array);
- __ Drop(1); // Adjust stack.
- __ mov(r0, Operand::Zero()); // Treat this as a call with argc of zero.
- __ b(&empty_array);
-
- __ bind(&not_empty_array);
- __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
- __ b(ne, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is too large to actually allocate an elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
- __ b(ge, call_generic_code);
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
- // Set up return value, remove receiver and argument from stack and return.
- __ mov(r0, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Jump(lr);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: last argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r6);
-
- // Fill arguments as array elements. Copy from the top of the stack (last
- // element) to the array backing store filling it backwards. Note:
- // elements_array_end points after the backing store therefore PreIndex is
- // used when filling the backing store.
- // r0: argc
- // r3: JSArray
- // r4: elements_array storage start (untagged)
- // r5: elements_array_end (untagged)
- // sp[0]: last argument
- Label loop, entry;
- __ mov(r7, sp);
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r2, &has_non_smi_element);
- }
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ cmp(r4, r5);
- __ b(lt, &loop);
-
- __ bind(&finish);
- __ mov(sp, r7);
-
- // Remove caller arguments and receiver from the stack, setup return value and
- // return.
- // r0: argc
- // r3: JSArray
- // sp[0]: receiver
- __ add(sp, sp, Operand(kPointerSize));
- __ mov(r0, r3);
- __ Jump(lr);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(
- r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(r3, r4);
- __ b(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // r3: JSArray
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r2,
- r9,
- &cant_transition_map);
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ RecordWriteField(r3,
- HeapObject::kMapOffset,
- r2,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- Label loop2;
- __ sub(r7, r7, Operand(kPointerSize));
- __ bind(&loop2);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ cmp(r4, r5);
- __ b(lt, &loop2);
- __ b(&finish);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, r1);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray functions should be maps.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for InternalArray function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for InternalArray function");
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- // Get the Array function.
- GenerateLoadArrayFunction(masm, r1);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- r2 : type info cell
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r3, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r1, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
-
- if (FLAG_optimize_constructed_arrays) {
- // We should either have undefined in r2 or a valid jsglobalpropertycell
- Label okay_here;
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
- __ cmp(r2, Operand(undefined_sentinel));
- __ b(eq, &okay_here);
- __ ldr(r3, FieldMemOperand(r2, 0));
- __ cmp(r3, Operand(global_property_cell_map));
- __ Assert(eq, "Expected property cell in register ebx");
- __ bind(&okay_here);
- }
- }
-
- if (FLAG_optimize_constructed_arrays) {
- Label not_zero_case, not_one_case;
- __ tst(r0, r0);
- __ b(ne, &not_zero_case);
- ArrayNoArgumentConstructorStub no_argument_stub;
- __ TailCallStub(&no_argument_stub);
-
- __ bind(&not_zero_case);
- __ cmp(r0, Operand(1));
- __ b(gt, &not_one_case);
- ArraySingleArgumentConstructorStub single_argument_stub;
- __ TailCallStub(&single_argument_stub);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub n_argument_stub;
- __ TailCallStub(&n_argument_stub);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, r2, r3);
-
- Register function = r1;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r2);
- __ cmp(function, Operand(r2));
- __ Assert(eq, "Unexpected String function");
- }
-
- // Load the first arguments in r0 and get rid of the rest.
- Label no_arguments;
- __ cmp(r0, Operand::Zero());
- __ b(eq, &no_arguments);
- // First args = sp[(argc - 1) * 4].
- __ sub(r0, r0, Operand(1));
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
- // sp now point to args[0], drop args[0] + receiver.
- __ Drop(2);
-
- Register argument = r2;
- Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- r0, // Input.
- argument, // Result.
- r3, // Scratch.
- r4, // Scratch.
- r5, // Scratch.
- false, // Is it a Smi?
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
- __ bind(&argument_is_string);
-
- // ----------- S t a t e -------------
- // -- r2 : argument converted to string
- // -- r1 : constructor function
- // -- lr : return address
- // -----------------------------------
-
- Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- r0, // Result.
- r3, // Scratch.
- r4, // Scratch.
- &gc_required,
- TAG_OBJECT);
-
- // Initialising the String Object.
- Register map = r3;
- __ LoadGlobalFunctionInitialMap(function, map, r4);
- if (FLAG_debug_code) {
- __ ldrb(r4, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(eq, "Unexpected string wrapper instance size");
- __ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmp(r4, Operand::Zero());
- __ Assert(eq, "Unexpected unused properties of string wrapper");
- }
- __ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- __ str(argument, FieldMemOperand(r0, JSValue::kValueOffset));
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
- __ Ret();
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- __ JumpIfSmi(r0, &convert_argument);
-
- // Is it a String?
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ tst(r3, Operand(kIsNotStringMask));
- __ b(ne, &convert_argument);
- __ mov(argument, r0);
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- __ b(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into r2.
- __ bind(&convert_argument);
- __ push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- }
- __ pop(function);
- __ mov(argument, r0);
- __ b(&argument_is_string);
-
- // Load the empty string into r2, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ b(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
- __ Ret();
-}
-
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mov(pc, r2);
-}
-
-
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- __ push(r1); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(r5);
- // Restore receiver.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
- GenerateTailCallToSharedCode(masm);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- Isolate* isolate = masm->isolate();
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ push(r0); // Smi-tagged arguments count.
- __ push(r1); // Constructor function.
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(ne, &rt_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
- __ ldrb(r4, constructor_count);
- __ sub(r4, r4, Operand(1), SetCC);
- __ strb(r4, constructor_count);
- __ b(ne, &allocate);
-
- __ Push(r1, r2);
-
- __ push(r1); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(r2);
- __ pop(r1);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Fill all the in-object properties with the appropriate filler.
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
- // r0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(r0, r6);
- __ Assert(le, "Unexpected number of pre-allocated property fields.");
- }
- __ InitializeFieldsWithFiller(r5, r0, r7);
- // To allow for truncation.
- __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
- }
- __ InitializeFieldsWithFiller(r5, r6, r7);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- __ add(r4, r4, Operand(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not fall through to runtime call if it is.
- // r1: constructor function
- // r4: JSObject
- // r5: start of next object (not tagged)
- __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ add(r3, r3, Operand(r6));
- __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
- __ sub(r3, r3, Operand(r6), SetCC);
-
- // Done if no extra properties are to be allocated.
- __ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: start of next object
- __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ mov(r2, r5);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
- // Initialize the fields to undefined.
- // r1: constructor function
- // r2: First element of FixedArray (not tagged)
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
- }
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r2, r6);
- __ b(lt, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // r1: constructor function
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
-
- // Continue with JSObject being successfully allocated
- // r1: constructor function
- // r4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r4, r5);
- }
-
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- __ bind(&rt_call);
- __ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(r4, r0);
-
- // Receiver for constructor call allocated.
- // r4: JSObject
- __ bind(&allocated);
- __ push(r4);
- __ push(r4);
-
- // Reload the number of arguments and the constructor from the stack.
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
-
- // Set up pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Set up number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
-
- // Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r1: constructor function
- // r2: address of last argument (caller sp)
- // r3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
- __ push(ip);
- __ bind(&entry);
- __ sub(r3, r3, Operand(2), SetCC);
- __ b(ge, &loop);
-
- // Call the function.
- // r0: number of arguments
- // r1: constructor function
- if (is_api_function) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context from the frame.
- // r0: result
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &exit);
-
- // Symbols are "objects".
- __ CompareInstanceType(r1, r3, SYMBOL_TYPE);
- __ b(eq, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-
- // Leave construct frame.
- }
-
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
- __ add(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
- __ Jump(lr);
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Called from Generate_JS_Entry
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- // r5-r7, cp may be clobbered
-
- // Clear the context before we push it when entering the internal frame.
- __ mov(cp, Operand::Zero());
-
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Set up the context from the function argument.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- __ InitializeRootRegister();
-
- // Push the function and the receiver onto the stack.
- __ push(r1);
- __ push(r2);
-
- // Copy arguments to the stack in a loop.
- // r1: function
- // r3: argc
- // r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
- // r2 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
- __ ldr(r0, MemOperand(r0)); // dereference handle
- __ push(r0); // push parameter
- __ bind(&entry);
- __ cmp(r4, r2);
- __ b(ne, &loop);
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ mov(r5, Operand(r4));
- __ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
- if (kR9Available == 1) {
- __ mov(r9, Operand(r4));
- }
-
- // Invoke the code and pass argc as r0.
- __ mov(r0, Operand(r3));
- if (is_construct) {
- // No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
- // Exit the JS frame and remove the parameters (except function), and
- // return.
- // Respect ABI stack constraint.
- }
- __ Jump(lr);
-
- // r0: result
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-}
-
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // The following registers must be saved and restored when calling through to
- // the runtime:
- // r0 - contains return address (beginning of patch sequence)
- // r1 - function object
- FrameScope scope(masm, StackFrame::MANUAL);
- __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r1);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ mov(pc, r0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
- }
-
- __ add(sp, sp, Operand(kPointerSize)); // Ignore state
- __ mov(pc, lr); // Jump to miss handler
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- }
-
- // Get the full codegen state from the stack and untag it -> r6.
- __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(r6);
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS));
- __ b(ne, &with_tos_register);
- __ add(sp, sp, Operand(1 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&with_tos_register);
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ cmp(r6, Operand(FullCodeGenerator::TOS_REG));
- __ b(ne, &unknown_state);
- __ add(sp, sp, Operand(2 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&unknown_state);
- __ stop("no cases left");
-}
-
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ Ret();
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CPU::SupportsCrankshaft()) {
- __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
- return;
- }
-
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- Label skip;
- __ cmp(r0, Operand(Smi::FromInt(-1)));
- __ b(ne, &skip);
- __ Ret();
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(r0);
- __ push(r0);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- // 1. Make sure we have at least one argument.
- // r0: actual number of arguments
- { Label done;
- __ cmp(r0, Operand::Zero());
- __ b(ne, &done);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2);
- __ add(r0, r0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- // r0: actual number of arguments
- Label slow, non_function;
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ JumpIfSmi(r1, &non_function);
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- // r0: actual number of arguments
- // r1: function
- Label shift_arguments;
- __ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &shift_arguments);
-
- // Do not transform the receiver for native (Compilerhints already in r3).
- __ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ ldr(r2, MemOperand(r2, -kPointerSize));
- // r0: actual number of arguments
- // r1: function
- // r2: first argument
- __ JumpIfSmi(r2, &convert_to_object);
-
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r3, Heap::kNullValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &use_global_receiver);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &shift_arguments);
-
- __ bind(&convert_to_object);
-
- {
- // Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
- __ push(r0);
-
- __ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(r2, r0);
-
- __ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-
- // Exit the internal frame.
- }
-
- // Restore the function to r1, and the flag to r4.
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ mov(r4, Operand::Zero());
- __ jmp(&patch_receiver);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r2, MemOperand(r3, -kPointerSize));
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ mov(r4, Operand(1, RelocInfo::NONE32)); // indicate function proxy
- __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(eq, &shift_arguments);
- __ bind(&non_function);
- __ mov(r4, Operand(2, RelocInfo::NONE32)); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- // r0: actual number of arguments
- // r1: function
- // r4: call type (0: JS function, 1: function proxy, 2: non-function)
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r1, MemOperand(r2, -kPointerSize));
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // r0: actual number of arguments
- // r1: function
- // r4: call type (0: JS function, 1: function proxy, 2: non-function)
- __ bind(&shift_arguments);
- { Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, -kPointerSize));
- __ str(ip, MemOperand(r2));
- __ sub(r2, r2, Operand(kPointerSize));
- __ cmp(r2, sp);
- __ b(ne, &loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ sub(r0, r0, Operand(1));
- __ pop();
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // r0: actual number of arguments
- // r1: function
- // r4: call type (0: JS function, 1: function proxy, 2: non-function)
- { Label function, non_proxy;
- __ tst(r4, r4);
- __ b(eq, &function);
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ cmp(r4, Operand(1));
- __ b(ne, &non_proxy);
-
- __ push(r1); // re-add proxy object as additional argument
- __ add(r0, r0, Operand(1));
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- // r0: actual number of arguments
- // r1: function
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ cmp(r2, r0); // Check formal and actual parameter counts.
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET,
- ne);
-
- ParameterCount expected(0);
- __ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- // Make r2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
- __ sub(r2, sp, r2);
- // Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(gt, &okay); // Signed comparison.
-
- // Out of stack space.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
-
- // Push current limit and index.
- __ bind(&okay);
- __ push(r0); // limit
- __ mov(r1, Operand::Zero()); // initial index
- __ push(r1);
-
- // Get the receiver.
- __ ldr(r0, MemOperand(fp, kRecvOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r1.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Do not transform the receiver for strict mode functions.
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(r0, &call_to_object);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
-
- // Check if the receiver is already a JavaScript object.
- // r0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &push_receiver);
-
- // Convert the receiver to a regular object.
- // r0: receiver
- __ bind(&call_to_object);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ b(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // r0: receiver
- __ bind(&push_receiver);
- __ push(r0);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ b(&entry);
-
- // Load the current argument from the arguments array and push it to the
- // stack.
- // r0: current argument index
- __ bind(&loop);
- __ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(r0);
-
- // Use inline caching to access the arguments.
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ add(r0, r0, Operand(1 << kSmiTagSize));
- __ str(r0, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, kLimitOffset));
- __ cmp(r0, r1);
- __ b(ne, &loop);
-
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- frame_scope.GenerateLeaveFrame();
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Jump(lr);
-
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(r1); // add function proxy as last argument
- __ add(r0, r0, Operand(1));
- __ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- // Tear down the internal frame and remove function, receiver and args.
- }
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Jump(lr);
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(3 * kPointerSize));
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then tear down the parameters.
- __ ldr(r1, MemOperand(fp, -3 * kPointerSize));
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : actual number of arguments
- // -- r1 : function (passed through to callee)
- // -- r2 : expected number of arguments
- // -- r3 : code entry to call
- // -- r5 : call kind information
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
-
- Label enough, too_few;
- __ cmp(r0, r2);
- __ b(lt, &too_few);
- __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ b(eq, &dont_adapt_arguments);
-
- { // Enough parameters: actual >= expected
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into r0 and copy end address into r2.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- // adjust for return address and receiver
- __ add(r0, r0, Operand(2 * kPointerSize));
- __ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: copy end address
- // r3: code entry to call
-
- Label copy;
- __ bind(&copy);
- __ ldr(ip, MemOperand(r0, 0));
- __ push(ip);
- __ cmp(r0, r2); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
-
- __ b(&invoke);
- }
-
- { // Too few parameters: Actual < expected
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into r0 and copy end address is fp.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- Label copy;
- __ bind(&copy);
- // Adjust load for return address and receiver.
- __ ldr(ip, MemOperand(r0, 2 * kPointerSize));
- __ push(ip);
- __ cmp(r0, fp); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
-
- // Fill the remaining expected arguments with undefined.
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
- __ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
-
- Label fill;
- __ bind(&fill);
- __ push(ip);
- __ cmp(sp, r2);
- __ b(ne, &fill);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ Call(r3);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Jump(lr);
-
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ Jump(r3);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.cc b/src/3rdparty/v8/src/arm/code-stubs-arm.cc
deleted file mode 100644
index e7a8489..0000000
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.cc
+++ /dev/null
@@ -1,8166 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r3, r2, r1, r0 };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r1, r0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r0, r1 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
-}
-
-
-static void InitializeArrayConstructorDescriptor(Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // register state
- // r1 -- constructor function
- // r2 -- type info cell with elements kind
- // r0 -- number of arguments to the constructor function
- static Register registers[] = { r1, r2 };
- descriptor->register_param_count_ = 2;
- // stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &r0;
- descriptor->register_params_ = registers;
- descriptor->extra_expression_stack_count_ = 1;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cond);
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-
-
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
- Register scratch1, Register scratch2,
- Label* not_a_heap_number) {
- __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
- __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, scratch2);
- __ b(ne, not_a_heap_number);
-}
-
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(r0, &check_heap_number);
- __ Ret();
-
- __ bind(&check_heap_number);
- EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
- __ Ret();
-
- __ bind(&call_builtin);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(r3);
-
- // Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
-
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
- __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ ldr(r1,
- FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ tst(r1, r1);
- __ b(ne, &check_optimized);
- }
- __ bind(&install_unoptimized);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
-
- // r2 holds native context, r1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into r4.
- __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
- __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
- __ cmp(r2, r5);
- __ b(eq, &install_optimized);
-
- // Iterate through the rest of map backwards. r4 holds an index as a Smi.
- Label loop;
- __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
-
- __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ b(eq, &install_unoptimized);
- __ sub(r4, r4, Operand(
- Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r5, MemOperand(r5));
- __ cmp(r2, r5);
- __ b(ne, &loop);
- // Hit: fetch the optimized code.
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r5, r5, Operand(kPointerSize));
- __ ldr(r4, MemOperand(r5));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(),
- 1, r6, r7);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Now link a function into a list of optimized functions.
- __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(r4, r0);
- __ RecordWriteContextSlot(
- r2,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- r4,
- r1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(r4, Heap::kFalseValueRootIndex);
- __ Push(cp, r3, r4);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(r1, Operand(Smi::FromInt(0)));
- __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-
- // Copy the qml global object from the surrounding context.
- __ ldr(r1,
- MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ str(r1,
- MemOperand(r0, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ pop();
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0, r1, r2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(r3, &after_sentinel);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmp(r3, Operand::Zero());
- __ Assert(eq, message);
- }
- __ ldr(r3, GlobalObjectOperand());
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
- __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
- __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
- __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX));
-
- // Copy the qml global object from the surrounding context.
- __ ldr(r1, ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX));
- __ str(r1, ContextOperand(r0, Context::QML_GLOBAL_OBJECT_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- //
- // r3: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
-
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- AllocationFlags flags = TAG_OBJECT;
- if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
- flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
- }
- __ AllocateInNewSpace(size, r0, r1, r2, fail, flags);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ mov(r2, Operand(Handle<Map>(masm->isolate()->heap()->
- allocation_site_info_map())));
- __ str(r2, FieldMemOperand(r0, allocation_info_start));
- __ str(r3, FieldMemOperand(r0, allocation_info_start + kPointerSize));
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ add(r2, r0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ add(r2, r0, Operand(JSArray::kSize));
- }
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
- __ b(ne, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&check_fast_elements);
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(r3);
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ CompareRoot(r3, expected_map_index);
- __ Assert(eq, message);
- __ pop(r3);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
- public:
- ConvertToDoubleStub(Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return ConvertToDouble; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
- Register exponent = result1_;
- Register mantissa = result2_;
-
- Label not_special;
- // Convert from Smi to integer.
- __ mov(source_, Operand(source_, ASR, kSmiTagSize));
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
- // Subtract from 0 if source was negative.
- __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(source_, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
- // 1, 0 and -1 all have 0 for the second word.
- __ mov(mantissa, Operand::Zero());
- __ Ret();
-
- __ bind(&not_special);
- // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ CountLeadingZeros(zeros_, source_, mantissa);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here. Use a fudge factor to
- // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
- // that fit in the ARM's constant field.
- int fudge = 0x400;
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
- __ add(mantissa, mantissa, Operand(fudge));
- __ orr(exponent,
- exponent,
- Operand(mantissa, LSL, HeapNumber::kExponentShift));
- // Shift up the source chopping the top bit off.
- __ add(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ mov(source_, Operand(source_, LSL, zeros_));
- // Compute lower part of fraction (last 12 bits).
- __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
- // And the top (top 20 bits).
- __ orr(exponent,
- exponent,
- Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
- __ Ret();
-}
-
-
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
- __ vmov(d7.high(), scratch1);
- __ vcvt_f64_s32(d7, d7.high());
- __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
- __ vmov(d6.high(), scratch1);
- __ vcvt_f64_s32(d6, d6.high());
- if (destination == kCoreRegisters) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(scratch1, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
- __ push(lr);
- __ Call(stub1.GetCode(masm->isolate()));
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(scratch1, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(lr);
- }
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- Label is_smi, done;
-
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP2) &&
- destination == kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- // Load the double from tagged HeapNumber to double register.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(dst, scratch1, HeapNumber::kValueOffset);
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
- }
- __ jmp(&done);
-
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Convert smi to double using VFP instructions.
- __ vmov(dst.high(), scratch1);
- __ vcvt_f64_s32(dst, dst.high());
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ vmov(dst1, dst2, dst);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, Operand(object));
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(lr);
- __ Call(stub.GetCode(masm->isolate()));
- __ pop(lr);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- Label done;
- Label not_in_int32_range;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ cmp(scratch1, heap_number_map);
- __ b(ne, not_number);
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- &not_in_int32_range);
- __ jmp(&done);
-
- __ bind(&not_in_int32_range);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst_mantissa,
- Register dst_exponent,
- Register scratch2,
- SwVfpRegister single_scratch) {
- ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst_mantissa));
- ASSERT(!int_scratch.is(dst_exponent));
-
- Label done;
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(single_scratch, int_scratch);
- __ vcvt_f64_s32(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ vmov(dst_mantissa, dst_exponent, double_dst);
- }
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst_exponent | dst_mantissa |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ cmp(int_scratch, Operand::Zero());
- __ mov(dst_exponent, int_scratch);
- __ mov(dst_mantissa, int_scratch);
- __ b(eq, &done);
-
- // Preload the sign of the value.
- __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
- // Get the absolute value of the object (as an unsigned integer).
- __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
-
- // Get mantissa[51:20].
-
- // Get the position of the first set bit.
- __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2);
- __ rsb(dst_mantissa, dst_mantissa, Operand(31));
-
- // Set the exponent.
- __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst_exponent, scratch2, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ mov(scratch2, Operand(1));
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa));
-
- __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
- // Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord),
- SetCC);
- __ b(mi, &fewer_than_20_useful_bits);
- // Set the higher 20 bits of the mantissa.
- __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2));
- __ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2));
- __ b(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
- __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
- __ orr(dst_exponent, dst_exponent, scratch2);
- // Set dst1 to 0.
- __ mov(dst_mantissa, Operand::Zero());
- }
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- DwVfpRegister double_scratch,
- Register dst_mantissa,
- Register dst_exponent,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
- dst_exponent, scratch2, single_scratch);
- __ b(&done);
-
- __ bind(&obj_is_not_smi);
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- scratch1,
- double_dst,
- scratch2,
- double_scratch,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
-
- if (destination == kCoreRegisters) {
- __ vmov(dst_mantissa, dst_exponent, double_dst);
- }
-
- } else {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers.
- bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
- if (save_registers) {
- // Save both output registers, because the other one probably holds
- // an important value too.
- __ Push(dst_exponent, dst_mantissa);
- }
- __ Ldrd(dst_mantissa, dst_exponent,
- FieldMemOperand(object, HeapNumber::kValueOffset));
-
- // Check for 0 and -0.
- Label zero;
- __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst_mantissa));
- __ cmp(scratch1, Operand::Zero());
- __ b(eq, &zero);
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- Label restore_input_and_miss;
- DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
- &restore_input_and_miss);
-
- // dst_* were trashed. Reload the double value.
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ Ldrd(dst_mantissa, dst_exponent,
- FieldMemOperand(object, HeapNumber::kValueOffset));
- __ b(&done);
-
- __ bind(&restore_input_and_miss);
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ b(not_int32);
-
- __ bind(&zero);
- if (save_registers) {
- __ Drop(2);
- }
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
-
- Label done, maybe_undefined;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
-
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
- } else {
- // Load the double value in the destination registers.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
- __ orr(dst, scratch2, Operand(dst));
- __ cmp(dst, Operand::Zero());
- __ b(eq, &done);
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ mov(dst, Operand(dst, LSR, scratch3));
- // Set the implicit first bit.
- __ rsb(scratch3, scratch3, Operand(32));
- __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
- // Set the sign.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
- }
- __ b(&done);
-
- __ bind(&maybe_undefined);
- __ CompareRoot(object, Heap::kUndefinedValueRootIndex);
- __ b(ne, not_int32);
- // |undefined| is truncated to 0.
- __ mov(dst, Operand(Smi::FromInt(0)));
- // Fall through.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src_exponent,
- Register src_mantissa,
- Register dst,
- Register scratch,
- Label* not_int32) {
- // Get exponent alone in scratch.
- __ Ubfx(scratch,
- src_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Substract the bias from the exponent.
- __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
-
- // src1: higher (exponent) part of the double value.
- // src2: lower (mantissa) part of the double value.
- // scratch: unbiased exponent.
-
- // Fast cases. Check for obvious non 32-bit integer values.
- // Negative exponent cannot yield 32-bit integers.
- __ b(mi, not_int32);
- // Exponent greater than 31 cannot yield 32-bit integers.
- // Also, a positive value with an exponent equal to 31 is outside of the
- // signed 32-bit integer range.
- // Another way to put it is that if (exponent - signbit) > 30 then the
- // number cannot be represented as an int32.
- Register tmp = dst;
- __ sub(tmp, scratch, Operand(src_exponent, LSR, 31));
- __ cmp(tmp, Operand(30));
- __ b(gt, not_int32);
- // - Bits [21:0] in the mantissa are not null.
- __ tst(src_mantissa, Operand(0x3fffff));
- __ b(ne, not_int32);
-
- // Otherwise the exponent needs to be big enough to shift left all the
- // non zero bits left. So we need the (30 - exponent) last bits of the
- // 31 higher bits of the mantissa to be null.
- // Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
-
- // Get the 32 higher bits of the mantissa in dst.
- __ Ubfx(dst,
- src_mantissa,
- HeapNumber::kMantissaBitsInTopWord,
- 32 - HeapNumber::kMantissaBitsInTopWord);
- __ orr(dst,
- dst,
- Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord));
-
- // Create the mask and test the lower bits (of the higher bits).
- __ rsb(scratch, scratch, Operand(32));
- __ mov(src_mantissa, Operand(1));
- __ mov(src_exponent, Operand(src_mantissa, LSL, scratch));
- __ sub(src_exponent, src_exponent, Operand(1));
- __ tst(dst, src_exponent);
- __ b(ne, not_int32);
-}
-
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is callee-saved.
- // We currently always use r5 to pass it.
- ASSERT(heap_number_result.is(r5));
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, scratch);
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(d0, r0, r1);
- __ vmov(d1, r2, r3);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number. Double returned in
- // registers r0 and r1 or in d0.
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(d0,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- __ Strd(r0, r1, FieldMemOperand(heap_number_result,
- HeapNumber::kValueOffset));
- }
- // Place heap_number_result in r0 and return to the pushed return address.
- __ mov(r0, Operand(heap_number_result));
- __ pop(pc);
-}
-
-
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
- return true;
- }
- if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
- WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- Label max_negative_int;
- // the_int_ has the answer which is a signed int32 but not a Smi.
- // We test for the special value that has a different exponent. This test
- // has the neat side effect of setting the flags according to the sign.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int_, Operand(0x80000000u));
- __ b(eq, &max_negative_int);
- // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
- uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ mov(scratch_, Operand(non_smi_exponent));
- // Set the sign bit in scratch_ if the value was negative.
- __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
- // Subtract from 0 if the value was negative.
- __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
- // We should be masking the implict first digit of the mantissa away here,
- // but it just ends up combining harmlessly with the last digit of the
- // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
- // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kExponentOffset));
- __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kMantissaOffset));
- __ Ret();
-
- __ bind(&max_negative_int);
- // The max negative int32 is stored as a positive number in the mantissa of
- // a double because it uses a sign bit instead of using two's complement.
- // The actual mantissa bits stored are all 0 because the implicit most
- // significant 1 bit is not stored.
- non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(ip, Operand::Zero());
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
- __ Ret();
-}
-
-
-// Handle the case where the lhs and rhs are the same object.
-// Equality is almost reflexive (everything but NaN), so this is a test
-// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cond) {
- Label not_identical;
- Label heap_number, return_equal;
- __ cmp(r0, r1);
- __ b(ne, &not_identical);
-
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- }
- }
- }
-
- __ bind(&return_equal);
- if (cond == lt) {
- __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
- } else if (cond == gt) {
- __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
- } else {
- __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
- }
- __ Ret();
-
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
- }
- __ Ret();
- }
- // No fall through here.
-
- __ bind(&not_identical);
-}
-
-
-// See comment at call site.
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- Label rhs_is_smi;
- __ JumpIfSmi(rhs, &rhs_is_smi);
-
- // Lhs is a Smi. Check whether the rhs is a heap number.
- __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If rhs is not a number and lhs is a Smi then strict equality cannot
- // succeed. Return non-equal
- // If rhs is r0 then there is already a non zero value in it.
- if (!rhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert lhs to a double in d7.
- CpuFeatures::Scope scope(VFP2);
- __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
- // Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- __ push(lr);
- // Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(masm->isolate()));
- // Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ pop(lr);
- }
-
- // We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a smi.
- __ jmp(lhs_not_nan);
-
- __ bind(&rhs_is_smi);
- // Rhs is a smi. Check whether the non-smi lhs is a heap number.
- __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If lhs is not a number and rhs is a smi then strict equality cannot
- // succeed. Return non-equal.
- // If lhs is r0 then there is already a non zero value in it.
- if (!lhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- // Convert rhs to a double in d6 .
- __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
- } else {
- __ push(lr);
- // Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- // Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(lr);
- }
- // Fall through to both_loaded_as_doubles.
-}
-
-
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
- Label one_is_nan, neither_is_nan;
-
- __ Sbfx(r4,
- lhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, lhs_not_nan);
- __ mov(r4,
- Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand::Zero());
- __ b(ne, &one_is_nan);
-
- __ bind(lhs_not_nan);
- __ Sbfx(r4,
- rhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, &neither_is_nan);
- __ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand::Zero());
- __ b(eq, &neither_is_nan);
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in r0 to make the comparison fail.
- if (cond == lt || cond == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
- Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
-
- // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cond == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- __ cmp(rhs_mantissa, Operand(lhs_mantissa));
- __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
- // Return non-zero if the numbers are unequal.
- __ Ret(ne);
-
- __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
- // If exponents are equal then return 0.
- __ Ret(eq);
-
- // Exponents are unequal. The only way we can return that the numbers
- // are equal is if one is -0 and the other is 0. We already dealt
- // with the case where both are -0 or both are 0.
- // We start by seeing if the mantissas (that are equal) or the bottom
- // 31 bits of the rhs exponent are non-zero. If so we return not
- // equal.
- __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
- __ mov(r0, Operand(r4), LeaveCC, ne);
- __ Ret(ne);
- // Now they are equal if and only if the lhs exponent is zero in its
- // low 31 bits.
- __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ Ret();
- } else {
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, r5);
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(d0, r0, r1);
- __ vmov(d1, r2, r3);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
- __ pop(pc); // Return.
- }
-}
-
-
-// See comment at call site.
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // If either operand is a JS object or an oddball value, then they are
- // not equal since their pointers are different.
- // There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- Label first_non_object;
- // Get the type of the first operand into r2 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, &first_non_object);
-
- // Return non-zero (r0 is not zero)
- Label return_not_equal;
- __ bind(&return_not_equal);
- __ Ret();
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r2, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r3, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- // Now that we have the types we might as well check for
- // internalized-internalized.
- // Ensure that no non-strings have the internalized bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(r2, r2, Operand(r3));
- __ tst(r2, Operand(kIsInternalizedMask));
- __ b(ne, &return_not_equal);
-}
-
-
-// See comment at call site.
-static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* both_loaded_as_doubles,
- Label* not_heap_numbers,
- Label* slow) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
- __ b(ne, not_heap_numbers);
- __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ cmp(r2, r3);
- __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
-
- // Both are heap numbers. Load them up then jump to the code we have
- // for that.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- }
- __ jmp(both_loaded_as_doubles);
-}
-
-
-// Fast negative check for internalized-to-internalized equality.
-static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // r2 is object type of rhs.
- // Ensure that no non-strings have the internalized bit set.
- Label object_test;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ tst(r2, Operand(kIsNotStringMask));
- __ b(ne, &object_test);
- __ tst(r2, Operand(kIsInternalizedMask));
- __ b(eq, possible_strings);
- __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, not_both_strings);
- __ tst(r3, Operand(kIsInternalizedMask));
- __ b(eq, possible_strings);
-
- // Both are internalized. We already checked they weren't the same pointer
- // so they are not equal.
- __ mov(r0, Operand(NOT_EQUAL));
- __ Ret();
-
- __ bind(&object_test);
- __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ and_(r0, r2, Operand(r3));
- __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ Ret();
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
- } else {
- __ b(not_found);
- }
- }
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- Register probe = mask;
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
- }
- // We could be strict about internalized/non-internalized here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-// On entry r1 and r2 are the values to be compared.
-// On exit r0 is 0, positive or negative to indicate the result of
-// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Register lhs = r1;
- Register rhs = r0;
- Condition cc = GetCondition();
-
- Label miss;
- ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
-
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles, lhs_not_nan;
-
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, &not_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(&not_two_smis);
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- {
- Label not_user_equal, user_equal;
- __ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &not_user_equal);
-
- __ CompareObjectType(r0, r2, r4, JS_OBJECT_TYPE);
- __ b(ne, &not_user_equal);
-
- __ CompareObjectType(r1, r3, r4, JS_OBJECT_TYPE);
- __ b(ne, &not_user_equal);
-
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &user_equal);
-
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitField2Offset));
- __ and_(r3, r3, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r3, Operand(1 << Map::kUseUserObjectComparison));
- __ b(ne, &not_user_equal);
-
- __ bind(&user_equal);
-
- __ Push(r0, r1);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- __ bind(&not_user_equal);
- }
-
-
- // Handle the case where the objects are identical. Either returns the answer
- // or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
-
- // If either is a Smi (we know that not both are), then they can only
- // be strictly equal if the other is a HeapNumber.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs, Operand(rhs));
- __ JumpIfNotSmi(r2, &not_smis);
- // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
- // 1) Return the answer.
- // 2) Go to slow.
- // 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to lhs_not_nan.
- // In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison. If VFP3 is supported the double values of the numbers have
- // been loaded into d7 and d6. Otherwise, the double values have been loaded
- // into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
-
- __ bind(&both_loaded_as_doubles);
- // The arguments have been converted to doubles and stored in d6 and d7, if
- // VFP3 is supported, or in r0, r1, r2, and r3.
- Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(VFP2)) {
- __ bind(&lhs_not_nan);
- CpuFeatures::Scope scope(VFP2);
- Label no_nan;
- // ARMv7 VFP3 instructions to implement double precision comparison.
- __ VFPCompareAndSetFlags(d7, d6);
- Label nan;
- __ b(vs, &nan);
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
-
- __ bind(&nan);
- // If one of the sides was a NaN then the v flag is set. Load r0 with
- // whatever it takes to make the comparison fail, since comparisons with NaN
- // always fail.
- if (cc == lt || cc == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- } else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc);
- }
-
- __ bind(&not_smis);
- // At this point we know we are dealing with two different objects,
- // and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict()) {
- // This returns non-equal for some object types, or falls through if it
- // was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
- }
-
- Label check_for_internalized_strings;
- Label flat_string_check;
- // Check for heap-number-heap-number comparison. Can jump to slow case,
- // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to
- // check_for_internalized_strings.
- // In this case r2 will contain the type of rhs_. Never falls through.
- EmitCheckForTwoHeapNumbers(masm,
- lhs,
- rhs,
- &both_loaded_as_doubles,
- &check_for_internalized_strings,
- &flat_string_check);
-
- __ bind(&check_for_internalized_strings);
- // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
- // internalized strings.
- if (cc == eq && !strict()) {
- // Returns an answer for two internalized strings or two detectable objects.
- // Otherwise jumps to string case or not both strings case.
- // Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForInternalizedStringsOrObjects(
- masm, lhs, rhs, &flat_string_check, &slow);
- }
-
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
- __ bind(&flat_string_check);
-
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
-
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- if (cc == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
- r2,
- r3,
- r4);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
- r2,
- r3,
- r4,
- r5);
- }
- // Never falls through to here.
-
- __ bind(&slow);
-
- __ Push(lhs, rhs);
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- native = Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- ASSERT(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
- }
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- __ push(r0);
- }
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- const Register map = r9.is(tos_) ? r7 : r9;
- const Register temp = map;
-
- // undefined -> false.
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value.
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- __ tst(tos_, Operand(kSmiTagMask));
- // tos_ contains the correct return value already
- __ Ret(eq);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(tos_, &patch);
- }
-
- if (types_.NeedsMap()) {
- __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- __ mov(tos_, Operand::Zero(), LeaveCC, ne);
- __ Ret(ne);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // Spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- // tos_ contains the correct non-zero return value already.
- __ Ret(ge);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
- __ Ret(lt); // the string length is OK as the return value
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // Heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &not_heap_number);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
- } else {
- Label done, not_nan, not_zero;
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
- // -0 maps to false:
- __ bic(
- temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC);
- __ b(ne, &not_zero);
- // If exponent word is zero then the answer depends on the mantissa word.
- __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
- __ jmp(&done);
-
- // Check for NaN.
- __ bind(&not_zero);
- // We already zeroed the sign bit, now shift out the mantissa so we only
- // have the exponent left.
- __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
- unsigned int shifted_exponent_mask =
- HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
- __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32));
- __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
-
- // Reload exponent word.
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
- __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32));
- // If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand::Zero(), LeaveCC, ne);
- __ b(ne, &done);
-
- // Load mantissa word.
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
- __ cmp(temp, Operand::Zero());
- // If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand::Zero(), LeaveCC, ne);
- __ b(ne, &done);
-
- __ bind(&not_nan);
- __ mov(tos_, Operand(1, RelocInfo::NONE32));
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- __ LoadRoot(ip, value);
- __ cmp(tos_, ip);
- // The value of a root is never NULL, so we can avoid loading a non-null
- // value into tos_ when we want to return 'true'.
- if (!result) {
- __ mov(tos_, Operand::Zero(), LeaveCC, eq);
- }
- __ Ret(eq);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- if (!tos_.is(r3)) {
- __ mov(r3, Operand(tos_));
- }
- __ mov(r2, Operand(Smi::FromInt(tos_.code())));
- __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
- __ Push(r3, r2, r1);
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ stm(db_w, sp, kCallerSaved | lr.bit());
-
- const Register scratch = r1;
-
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP2);
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(scratch);
-
- __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
- }
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP2);
-
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(scratch);
-
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
- }
- __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
- }
- __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
-}
-
-
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ mov(r3, Operand(r0)); // the operand
- __ mov(r2, Operand(Smi::FromInt(op_)));
- __ mov(r1, Operand(Smi::FromInt(mode_)));
- __ mov(r0, Operand(Smi::FromInt(operand_type_)));
- __ Push(r3, r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // The result of negating zero or the smallest negative smi is not a smi.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, slow);
-
- // Return '0 - value'.
- __ rsb(r0, r0, Operand::Zero());
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // Flip bits and revert inverted smi-tag.
- __ mvn(r0, Operand(r0));
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ Ret();
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (mode_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- }
-
- __ bind(&heapnumber_allocated);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(
- MacroAssembler* masm, Label* slow) {
- Label impossible;
-
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
- // Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ add(r2, r1, Operand(0x40000000), SetCC);
- __ b(mi, &try_float);
-
- // Tag the result as a smi and we're done.
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
- __ Ret();
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- // Allocate a new heap number without zapping r0, which we need if it fails.
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- }
-
- // Convert the heap number in r0 to an untagged integer in r1.
- // This can't go slow-case because it's the same number we already
- // converted once again.
- __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
- __ mvn(r1, Operand(r1));
-
- __ bind(&heapnumber_allocated);
- __ mov(r0, r2); // Move newly allocated heap number to r0.
- }
-
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&impossible);
- if (FLAG_debug_code) {
- __ stop("Incorrect assumption in bit-not stub");
- }
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(VFP2);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ push(r2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- ASSERT(right.is(r0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ add(right, left, Operand(right), SetCC); // Add optimistically.
- __ Ret(vc);
- __ sub(right, right, Operand(left)); // Revert optimistic add.
- break;
- case Token::SUB:
- __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
- __ Ret(vc);
- __ sub(right, left, Operand(right)); // Revert optimistic subtract.
- break;
- case Token::MUL:
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(ip, right);
- // Do multiplication
- // scratch1 = lower 32 bits of ip * left.
- // scratch2 = higher 32 bits of ip * left.
- __ smull(scratch1, scratch2, left, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &not_smi_result);
- // Go slow on zero result to handle -0.
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return smi 0 if the non-zero one was positive.
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- break;
- case Token::DIV: {
- Label div_with_sdiv;
-
- // Check for 0 divisor.
- __ cmp(right, Operand::Zero());
- __ b(eq, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- if (CpuFeatures::IsSupported(SUDIV)) {
- __ b(ne, &div_with_sdiv);
- // Check for no remainder.
- __ tst(left, scratch1);
- __ b(ne, &not_smi_result);
- // Check for positive left hand side.
- __ cmp(left, Operand::Zero());
- __ b(mi, &div_with_sdiv);
- } else {
- __ b(ne, &not_smi_result);
- // Check for positive and no remainder.
- __ orr(scratch2, scratch1, Operand(0x80000000u));
- __ tst(left, scratch2);
- __ b(ne, &not_smi_result);
- }
-
- // Perform division by shifting.
- __ CountLeadingZeros(scratch1, scratch1, scratch2);
- __ rsb(scratch1, scratch1, Operand(31));
- __ mov(right, Operand(left, LSR, scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- Label result_not_zero;
-
- __ bind(&div_with_sdiv);
- // Do division.
- __ sdiv(scratch1, left, right);
- // Check that the remainder is zero.
- __ mls(scratch2, scratch1, right, left);
- __ cmp(scratch2, Operand::Zero());
- __ b(ne, &not_smi_result);
- // Check for negative zero result.
- __ cmp(scratch1, Operand::Zero());
- __ b(ne, &result_not_zero);
- __ cmp(right, Operand::Zero());
- __ b(lt, &not_smi_result);
- __ bind(&result_not_zero);
- // Check for the corner case of dividing the most negative smi by -1.
- __ cmp(scratch1, Operand(0x40000000));
- __ b(eq, &not_smi_result);
- // Tag and return the result.
- __ SmiTag(right, scratch1);
- __ Ret();
- }
- break;
- }
- case Token::MOD: {
- Label modulo_with_sdiv;
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- // Check for x % 0.
- __ cmp(right, Operand::Zero());
- __ b(eq, &not_smi_result);
-
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, &modulo_with_sdiv);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- __ b(ne, &modulo_with_sdiv);
- } else {
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
- }
-
- // Perform modulus by masking (scratch1 contains right - 1).
- __ and_(right, left, Operand(scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- __ bind(&modulo_with_sdiv);
- __ mov(scratch2, right);
- // Perform modulus with sdiv and mls.
- __ sdiv(scratch1, left, right);
- __ mls(right, scratch1, right, left);
- // Return if the result is not 0.
- __ cmp(right, Operand::Zero());
- __ Ret(ne);
- // The result is 0, check for -0 case.
- __ cmp(left, Operand::Zero());
- __ Ret(pl);
- // This is a -0 case, restore the value of right.
- __ mov(right, scratch2);
- // We fall through here to not_smi_result to produce -0.
- }
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- __ Ret();
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- // Smi tag result.
- __ bic(right, right, Operand(kSmiTagMask));
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &not_smi_result);
- // Smi tag result.
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &not_smi_result);
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- Register scratch3 = r4;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
- // depending on whether VFP3 is available or not.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP2) &&
- op != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
-
- // Allocate new heap number for result.
- Register result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load the operands.
- if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
- } else {
- // Load right operand to d7 or r2/r3.
- if (right_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, right, destination, d7, d8, r2, r3, heap_number_map,
- scratch1, scratch2, s0, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, right, d7, r2, r3, heap_number_map,
- scratch1, scratch2, fail);
- }
- // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, left, destination, d6, d8, r0, r1, heap_number_map,
- scratch1, scratch2, s0, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, left, d6, r0, r1, heap_number_map,
- scratch1, scratch2, fail);
- }
- }
-
- // Calculate the result.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- // Using VFP registers:
- // d6: Left value
- // d7: Right value
- CpuFeatures::Scope scope(VFP2);
- switch (op) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op,
- result,
- scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(r3, left);
- __ SmiUntag(r2, right);
- } else {
- // Convert operands to 32-bit integers. Right in r2 and left in r3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- }
-
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- if (CpuFeatures::IsSupported(VFP2)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, not_numbers);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check that the *signed* result fits in a smi.
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ SmiTag(r0, r2);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = r5;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, r2);
- if (op == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
- Label not_smis;
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smis, call_runtime;
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- DwVfpRegister double_scratch = d0;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ orr(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
- ? FloatingPointHelper::kVFPRegisters
- : FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- d7,
- d8,
- r2,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- d6,
- d8,
- r4,
- r5,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
-
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ != Token::DIV) {
- // These operations produce an integer result.
- // Try to return a smi if we can.
- // Otherwise return a heap number if allowed, or jump to type
- // transition.
-
- __ EmitVFPTruncate(kRoundToZero,
- scratch1,
- d5,
- scratch2,
- d8);
-
- if (result_type_ <= BinaryOpIC::INT32) {
- // If the ne condition is set, result does
- // not fit in a 32-bit integer.
- __ b(ne, &transition);
- }
-
- // Check if the result fits in a smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- // If not try to return a heap number.
- __ b(mi, &return_heap_number);
- // Check for minus zero. Return heap number for minus zero.
- Label not_zero;
- __ cmp(scratch1, Operand::Zero());
- __ b(ne, &not_zero);
- __ vmov(scratch2, d5.high());
- __ tst(scratch2, Operand(HeapNumber::kSignMask));
- __ b(ne, &return_heap_number);
- __ bind(&not_zero);
-
- // Tag the result and return.
- __ SmiTag(r0, scratch1);
- __ Ret();
- } else {
- // DIV just falls through to allocating a heap number.
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER
- : BinaryOpIC::INT32)) {
- // We are using vfp registers so r5 is available.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- }
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- // We preserved r0 and r1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(r5, r4);
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
-
- // Load the left value from the value saved on the stack.
- __ Pop(r1, r0);
-
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ b(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- Register scratch3 = r5;
- // Convert operands to 32-bit integers. Right in r2 and left in r3. The
- // registers r0 and r1 (right and left) are preserved for the runtime
- // call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- d1,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- d1,
- &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (r2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- // The non vfp2 code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(VFP2)) {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number);
- } else {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &call_runtime);
- }
- break;
- case Token::SHL:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ add(scratch1, r2, Operand(0x40000000), SetCC);
- // If not try to return a heap number. (We know the result is an int32.)
- __ b(mi, &return_heap_number);
- // Tag the result and return.
- __ SmiTag(r0, r2);
- __ Ret();
-
- __ bind(&return_heap_number);
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_s32(double_scratch, double_scratch.low());
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_u32(double_scratch, double_scratch.low());
- }
-
- // Store the result.
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- __ mov(r0, r5);
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(ne, &check);
- if (Token::IsBitOp(op_)) {
- __ mov(r1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &done);
- if (Token::IsBitOp(op_)) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = r1;
- Register right = r0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &left_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(r0) && !result.is(r1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ b(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, Operand(overwritable_operand));
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(r1, r0);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in d2, double result goes
- // into d2.
- // Tagged case: tagged input on top of stack and in r0,
- // tagged result (heap number) goes into r0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = r9;
- const Register scratch1 = r7;
- const Register cache_entry = r0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- if (tagged) {
- // Argument is a number and is on stack and in r0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(r0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(r2, r3, d0);
- } else {
- // Input is untagged double in d2. Output goes to d2.
- __ vmov(r2, r3, d2);
- }
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- Isolate* isolate = masm->isolate();
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(isolate);
- __ mov(cache_entry, Operand(cache_array));
- // cache_entry points to cache array.
- int cache_array_index
- = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
- __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(cache_entry, Operand::Zero());
- __ b(eq, &invalid_cache);
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ cmp(r3, r5, eq);
- __ b(ne, &calculate);
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into r0.
- __ pop();
- __ mov(r0, Operand(r6));
- } else {
- // Load result into d2.
- __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(VFP3))
-
- __ bind(&calculate);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- ExternalReference runtime_function =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime_function, 1, 1);
- } else {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatures::Scope scope(VFP2);
-
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // r0: precalculated cache entry address.
- // r2 and r3: parts of the double value.
- // Store r0, r2 and r3 on stack for later before calling C function.
- __ Push(r3, r2, cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ Pop(r3, r2, cache_entry);
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
- __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
- __ Ret();
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
- __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
- __ bind(&no_update);
-
- // We return the value in d2 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- Isolate* isolate = masm->isolate();
-
- __ push(lr);
- __ PrepareCallCFunction(0, 1, scratch);
- if (masm->use_eabi_hardfloat()) {
- __ vmov(d0, d2);
- } else {
- __ vmov(r0, r1, d2);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(lr);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope vfp2_scope(VFP2);
- const Register base = r1;
- const Register exponent = r2;
- const Register heapnumbermap = r5;
- const Register heapnumber = r0;
- const DwVfpRegister double_base = d1;
- const DwVfpRegister double_exponent = d2;
- const DwVfpRegister double_result = d3;
- const DwVfpRegister double_scratch = d0;
- const SwVfpRegister single_scratch = s0;
- const Register scratch = r9;
- const Register scratch2 = r7;
-
- Label call_runtime, done, int_exponent;
- if (exponent_type_ == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack to double registers.
- __ ldr(base, MemOperand(sp, 1 * kPointerSize));
- __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
-
- __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
- __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ b(ne, &call_runtime);
-
- __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent);
-
- __ bind(&base_is_smi);
- __ vmov(single_scratch, scratch);
- __ vcvt_f64_s32(double_base, single_scratch);
- __ bind(&unpack_exponent);
-
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
- __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ b(ne, &call_runtime);
- __ vldr(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
- // Base is already in double_base.
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
- __ vldr(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type_ != INTEGER) {
- Label int_exponent_convert;
- // Detect integer exponents stored as double.
- __ vcvt_u32_f64(single_scratch, double_exponent);
- // We do not check for NaN or Infinity here because comparing numbers on
- // ARM correctly distinguishes NaNs. We end up calling the built-in.
- __ vcvt_f64_u32(double_scratch, single_scratch);
- __ VFPCompareAndSetFlags(double_scratch, double_exponent);
- __ b(eq, &int_exponent_convert);
-
- if (exponent_type_ == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label not_plus_half;
-
- // Test for 0.5.
- __ vmov(double_scratch, 0.5, scratch);
- __ VFPCompareAndSetFlags(double_exponent, double_scratch);
- __ b(ne, &not_plus_half);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY, scratch);
- __ VFPCompareAndSetFlags(double_base, double_scratch);
- __ vneg(double_result, double_scratch, eq);
- __ b(eq, &done);
-
- // Add +0 to convert -0 to +0.
- __ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vsqrt(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&not_plus_half);
- __ vmov(double_scratch, -0.5, scratch);
- __ VFPCompareAndSetFlags(double_exponent, double_scratch);
- __ b(ne, &call_runtime);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY, scratch);
- __ VFPCompareAndSetFlags(double_base, double_scratch);
- __ vmov(double_result, kDoubleRegZero, eq);
- __ b(eq, &done);
-
- // Add +0 to convert -0 to +0.
- __ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vmov(double_result, 1.0, scratch);
- __ vsqrt(double_scratch, double_scratch);
- __ vdiv(double_result, double_result, double_scratch);
- __ jmp(&done);
- }
-
- __ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- }
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
- __ jmp(&done);
-
- __ bind(&int_exponent_convert);
- __ vcvt_u32_f64(single_scratch, double_exponent);
- __ vmov(scratch, single_scratch);
- }
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
-
- // Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type_ == INTEGER) {
- __ mov(scratch, exponent);
- } else {
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
- }
- __ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, 1.0, scratch2);
-
- // Get absolute value of exponent.
- __ cmp(scratch, Operand::Zero());
- __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
- __ sub(scratch, scratch2, scratch, LeaveCC, mi);
-
- Label while_true;
- __ bind(&while_true);
- __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
- __ vmul(double_result, double_result, double_scratch, cs);
- __ vmul(double_scratch, double_scratch, double_scratch, ne);
- __ b(ne, &while_true);
-
- __ cmp(exponent, Operand::Zero());
- __ b(ge, &done);
- __ vmov(double_scratch, 1.0, scratch);
- __ vdiv(double_result, double_scratch, double_result);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ VFPCompareAndSetFlags(double_result, 0.0);
- __ b(ne, &done);
- // double_exponent may not containe the exponent value if the input was a
- // smi. We set it with exponent value before bailing out.
- __ vmov(single_scratch, exponent);
- __ vcvt_f64_s32(double_exponent, single_scratch);
-
- // Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(
- heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
- __ vstr(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- ASSERT(heapnumber.is(r0));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
- __ Ret(2);
- } else {
- __ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- }
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
-
- __ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
- __ Ret();
- }
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return true;
-}
-
-
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CEntryStub::GenerateAheadOfTime(isolate);
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-}
-
-
-void CodeStub::GenerateFPStubs(Isolate* isolate) {
- SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
- CEntryStub save_doubles(1, mode);
- StoreBufferOverflowStub stub(mode);
- // These stubs might already be in the snapshot, detect that and don't
- // regenerate, which would lead to code stub initialization state being messed
- // up.
- Code* save_doubles_code = NULL;
- Code* store_buffer_overflow_code = NULL;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope2(VFP2);
- save_doubles_code = *save_doubles.GetCode(isolate);
- store_buffer_overflow_code = *stub.GetCode(isolate);
- } else {
- save_doubles_code = *save_doubles.GetCode(isolate);
- store_buffer_overflow_code = *stub.GetCode(isolate);
- }
- save_doubles_code->set_is_pregenerated(true);
- store_buffer_overflow_code->set_is_pregenerated(true);
- }
- ISOLATE->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
-}
-
-
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, value, Operand(0xf));
- __ cmp(scratch, Operand(0xf));
- __ b(eq, oom_label);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // r0: result parameter for PerformGC, if any
- // r4: number of arguments including receiver (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to the first argument (C callee-saved)
- Isolate* isolate = masm->isolate();
-
- if (do_gc) {
- // Passing r0.
- __ PrepareCallCFunction(1, 0, r1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate),
- 1, 0);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ mov(r0, Operand(scope_depth));
- __ ldr(r1, MemOperand(r0));
- __ add(r1, r1, Operand(1));
- __ str(r1, MemOperand(r0));
- }
-
- // Call C built-in.
- // r0 = argc, r1 = argv
- __ mov(r0, Operand(r4));
- __ mov(r1, Operand(r6));
-
-#if defined(V8_HOST_ARCH_ARM)
- int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (FLAG_debug_code) {
- if (frame_alignment > kPointerSize) {
- Label alignment_as_expected;
- ASSERT(IsPowerOf2(frame_alignment));
- __ tst(sp, Operand(frame_alignment_mask));
- __ b(eq, &alignment_as_expected);
- // Don't use Check here, as it will call Runtime_Abort re-entering here.
- __ stop("Unexpected alignment");
- __ bind(&alignment_as_expected);
- }
- }
-#endif
-
- __ mov(r2, Operand(ExternalReference::isolate_address()));
-
- // To let the GC traverse the return address of the exit frames, we need to
- // know where the return address is. The CEntryStub is unmovable, so
- // we can store the address on the stack to be able to find it again and
- // we never have to restore it, because it will not change.
- // Compute the return address in lr to return to after the jump below. Pc is
- // already at '+ 8' from the current instruction but return is after three
- // instructions so add another 4 to pc to get the return address.
- {
- // Prevent literal pool emission before return address.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- masm->add(lr, pc, Operand(4));
- __ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
- }
-
- if (always_allocate) {
- // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
- // though (contain the result).
- __ mov(r2, Operand(scope_depth));
- __ ldr(r3, MemOperand(r2));
- __ sub(r3, r3, Operand(1));
- __ str(r3, MemOperand(r2));
- }
-
- // check for failure result
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- // Lower 2 bits of r2 are 0 iff r0 has failure tag.
- __ add(r2, r0, Operand(1));
- __ tst(r2, Operand(kFailureTagMask));
- __ b(eq, &failure_returned);
-
- // Exit C frame and return.
- // r0:r1: result
- // sp: stack pointer
- // fp: frame pointer
- // Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles_, r4);
- __ mov(pc, lr);
-
- // check if we should retry or throw exception
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ b(eq, &retry);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ ldr(r0, MemOperand(ip));
- __ str(r3, MemOperand(ip));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(r0, Operand(isolate->factory()->termination_exception()));
- __ b(eq, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // r0: number of arguments including receiver
- // r1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // Result returned in r0 or r0+r1 by default.
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Compute the argv pointer in a callee-saved register.
- __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ sub(r6, r6, Operand(kPointerSize));
-
- // Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
-
- // Set up argc and the builtin function in callee-saved registers.
- __ mov(r4, Operand(r0));
- __ mov(r5, Operand(r1));
-
- // r4: number of arguments (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to first argument (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE32));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, r0, ip, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r0, MemOperand(r2));
- // Fall through to the next label.
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(r0);
-
- __ bind(&throw_normal_exception);
- __ Throw(r0);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // [sp+0]: argv
-
- Label invoke, handler_entry, exit;
-
- // Called from C, so do not pop argc and args on exit (preserve sp)
- // No need to save register-passed args
- // Save callee-saved registers (incl. cp and fp), sp, and lr
- __ stm(db_w, sp, kCalleeSaved | lr.bit());
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Save callee-saved vfp registers.
- __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- // Set up the reserved register for 0.0.
- __ vmov(kDoubleRegZero, 0.0);
- }
-
- // Get address of argv, see stm above.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
-
- // Set up argv in r4.
- int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(VFP2)) {
- offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
- }
- __ ldr(r4, MemOperand(sp, offset_to_argv));
-
- // Push a frame with special values setup to mark it as an entry frame.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- Isolate* isolate = masm->isolate();
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r7, Operand(Smi::FromInt(marker)));
- __ mov(r6, Operand(Smi::FromInt(marker)));
- __ mov(r5,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
- __ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
-
- // Set up frame pointer for the frame to be pushed.
- __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
- __ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ ldr(r6, MemOperand(r5));
- __ cmp(r6, Operand::Zero());
- __ b(ne, &non_outermost_js);
- __ str(fp, MemOperand(r5));
- __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- Label cont;
- __ b(&cont);
- __ bind(&non_outermost_js);
- __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
- __ bind(&cont);
- __ push(ip);
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
-
- // Block literal pool emission whilst taking the position of the handler
- // entry. This avoids making the assumption that literal pools are always
- // emitted after an instruction is emitted, rather than before.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- }
- __ str(r0, MemOperand(ip));
- __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
- __ b(&exit);
-
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
- __ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bl(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ mov(r5, Operand(isolate->factory()->the_hole_value()));
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r5, MemOperand(ip));
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- if (is_construct) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
- __ mov(ip, Operand(construct_entry));
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
- __ mov(ip, Operand(entry));
- }
- __ ldr(ip, MemOperand(ip)); // deref address
-
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc. We block literal pool
- // emission for the same reason.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
- // Unlink this frame from the handler chain.
- __ PopTryHandler();
-
- __ bind(&exit); // r0 holds result
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(r5);
- __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ b(ne, &non_outermost_js_2);
- __ mov(r6, Operand::Zero());
- __ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ str(r6, MemOperand(r5));
- __ bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ pop(r3);
- __ mov(ip,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
- __ str(r3, MemOperand(ip));
-
- // Reset the stack to the callee saved registers.
- __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Restore callee-saved registers and return.
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Restore callee-saved vfp registers.
- __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- }
-
- __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
-}
-
-
-// Uses registers r0 to r4.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: r0 or at sp + 1 * kPointerSize.
-// * function: r1 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register r4.
-// (See LCodeGen::DoInstanceOfKnownGlobal)
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
- // ReturnTrueFalse is only implemented for inlined call sites.
- ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = r0; // Object (lhs).
- Register map = r3; // Map of the object.
- const Register function = r1; // Function (rhs).
- const Register prototype = r4; // Prototype of the function.
- const Register inline_site = r9;
- const Register scratch = r2;
-
- const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
-
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ ldr(object, MemOperand(sp, 1 * kPointerSize));
- __ ldr(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- Label miss;
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ b(ne, &miss);
- __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
- __ b(ne, &miss);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- ASSERT(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- // The offset was stored in r4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ LoadFromSafepointRegisterSlot(scratch, r4);
- __ sub(inline_site, lr, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- }
-
- // Register mapping: r3 is object map and r4 is function prototype.
- // Get prototype of object into r2.
- __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
-
- // We don't need map any more. Use it as a scratch register.
- Register scratch2 = map;
- map = no_reg;
-
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmp(scratch, Operand(prototype));
- __ b(eq, &is_instance);
- __ cmp(scratch, scratch2);
- __ b(eq, &is_not_instance);
- __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Patch the call site to return true.
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ str(r0, MemOperand(scratch));
-
- if (!ReturnTrueFalseObject()) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Patch the call site to return false.
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ str(r0, MemOperand(scratch));
-
- if (!ReturnTrueFalseObject()) {
- __ mov(r0, Operand(Smi::FromInt(1)));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // Null is not instance of anything.
- __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
- __ b(ne, &object_not_null);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(r0, r1);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- __ cmp(r0, Operand::Zero());
- __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
- }
-}
-
-
-void ArrayLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadArrayLength(masm, receiver, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss,
- support_wrapper_);
-
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -----------------------------------
- __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r2;
- value = r0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : key
- // -----------------------------------
- receiver = r1;
- value = r0;
- }
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
- __ b(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::GenerateStoreMiss(masm, kind());
-}
-
-
-Register InstanceofStub::left() { return r0; }
-
-
-Register InstanceofStub::right() { return r1; }
-
-
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, r0, reg_, inobject_, index_);
- __ Ret();
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(r1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register r0. Use unsigned comparison to get negative
- // check for free.
- __ cmp(r1, r0);
- __ b(hs, &slow);
-
- // Read the argument from the stack and return it.
- __ sub(r3, r0, r1);
- __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the adaptor frame and return it.
- __ sub(r3, r0, r1);
- __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(r1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
- __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &runtime);
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r2, MemOperand(sp, 0 * kPointerSize));
- __ add(r3, r3, Operand(r2, LSL, 1));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
- // Stack layout:
- // sp[0] : number of parameters (tagged)
- // sp[4] : address of receiver argument
- // sp[8] : function
- // Registers used over whole function:
- // r6 : allocated object (tagged)
- // r9 : mapped parameter count (tagged)
-
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- // r1 = parameter count (tagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
- __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ mov(r2, r1);
- __ b(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ add(r3, r3, Operand(r2, LSL, 1));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
-
- // r1 = parameter count (tagged)
- // r2 = argument count (tagged)
- // Compute the mapped parameter count = min(r1, r2) in r1.
- __ cmp(r1, Operand(r2));
- __ mov(r1, Operand(r2), LeaveCC, gt);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- __ cmp(r1, Operand(Smi::FromInt(0)));
- __ mov(r9, Operand::Zero(), LeaveCC, eq);
- __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
- __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
-
- // 2. Backing store.
- __ add(r9, r9, Operand(r2, LSL, 1));
- __ add(r9, r9, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
-
- // r0 = address of new object(s) (tagged)
- // r2 = argument count (tagged)
- // Get the arguments boilerplate from the current native context into r4.
- const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
-
- __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
- __ cmp(r1, Operand::Zero());
- __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
- __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
-
- // r0 = address of new object (tagged)
- // r1 = mapped parameter count (tagged)
- // r2 = argument count (tagged)
- // r4 = address of boilerplate object (tagged)
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ ldr(r3, FieldMemOperand(r4, i));
- __ str(r3, FieldMemOperand(r0, i));
- }
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ str(r3, FieldMemOperand(r0, kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ str(r2, FieldMemOperand(r0, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, r4 will point there, otherwise
- // it will point to the backing store.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- // r0 = address of new object (tagged)
- // r1 = mapped parameter count (tagged)
- // r2 = argument count (tagged)
- // r4 = address of parameter map or backing store (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ cmp(r1, Operand(Smi::FromInt(0)));
- // Move backing store address to r3, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(r3, r4, LeaveCC, eq);
- __ b(eq, &skip_parameter_map);
-
- __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
- __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ add(r6, r1, Operand(Smi::FromInt(2)));
- __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ add(r6, r4, Operand(r1, LSL, 1));
- __ add(r6, r6, Operand(kParameterMapHeaderSize));
- __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(r6, r1);
- __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
- __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ sub(r9, r9, Operand(r1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
- __ add(r3, r4, Operand(r6, LSL, 1));
- __ add(r3, r3, Operand(kParameterMapHeaderSize));
-
- // r6 = loop variable (tagged)
- // r1 = mapping index (tagged)
- // r3 = address of backing store (tagged)
- // r4 = address of parameter map (tagged)
- // r5 = temporary scratch (a.o., for address calculation)
- // r7 = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ sub(r6, r6, Operand(Smi::FromInt(1)));
- __ mov(r5, Operand(r6, LSL, 1));
- __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ str(r9, MemOperand(r4, r5));
- __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(r7, MemOperand(r3, r5));
- __ add(r9, r9, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ cmp(r6, Operand(Smi::FromInt(0)));
- __ b(ne, &parameters_loop);
-
- __ bind(&skip_parameter_map);
- // r2 = argument count (tagged)
- // r3 = address of backing store (tagged)
- // r5 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
- __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
- __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ mov(r9, r1);
- __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
- __ sub(r4, r4, Operand(r9, LSL, 1));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ sub(r4, r4, Operand(kPointerSize));
- __ ldr(r6, MemOperand(r4, 0));
- __ add(r5, r3, Operand(r9, LSL, 1));
- __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
- __ add(r9, r9, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(r9, Operand(r2));
- __ b(lt, &arguments_loop);
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // r2 = argument count (tagged)
- __ bind(&runtime);
- __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
-
- // Get the length from the frame.
- __ ldr(r1, MemOperand(sp, 0));
- __ b(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r1, MemOperand(sp, 0));
- __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ cmp(r1, Operand::Zero());
- __ b(eq, &add_arguments_object);
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
- __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT |
- SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
- __ ldr(r4, MemOperand(r4, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
-
- // Copy the JS object part.
- __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
-
- // If there are no actual arguments, we're done.
- Label done;
- __ cmp(r1, Operand::Zero());
- __ b(eq, &done);
-
- // Get the parameters pointer from the stack.
- __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
- __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // Untag the length for the loop.
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
-
- // Copy the fixed array slots.
- Label loop;
- // Set up r4 to point to the first array slot.
- __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement r2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
- // Post-increment r4 with kPointerSize on each iteration.
- __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
- __ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand::Zero());
- __ b(ne, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // sp[0]: last_match_info (expected JSArray)
- // sp[4]: previous index
- // sp[8]: subject string
- // sp[12]: JSRegExp object
-
- const int kLastMatchInfoOffset = 0 * kPointerSize;
- const int kPreviousIndexOffset = 1 * kPointerSize;
- const int kSubjectOffset = 2 * kPointerSize;
- const int kJSRegExpOffset = 3 * kPointerSize;
-
- Label runtime;
- // Allocation of registers for this function. These are in callee save
- // registers and will be preserved by the call to the native RegExp code, as
- // this code is called using the normal C calling convention. When calling
- // directly from generated code the native RegExp code will not do a GC and
- // therefore the content of these registers are safe to use after the call.
- Register subject = r4;
- Register regexp_data = r5;
- Register last_match_info_elements = r6;
-
- // Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
- __ mov(r0, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r0, MemOperand(r0, 0));
- __ cmp(r0, Operand::Zero());
- __ b(eq, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- __ b(ne, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ tst(regexp_data, Operand(kSmiTagMask));
- __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
- __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
- __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // regexp_data: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
- __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ b(ne, &runtime);
-
- // regexp_data: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ ldr(r2,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // Or number_of_captures * 2 <= offsets vector size - 2
- // Multiplying by 2 comes for free since r2 is smi-tagged.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
- __ b(hi, &runtime);
-
- // Reset offset for possibly sliced string.
- __ mov(r9, Operand::Zero());
- __ ldr(subject, MemOperand(sp, kSubjectOffset));
- __ JumpIfSmi(subject, &runtime);
- __ mov(r3, subject); // Make a copy of the original subject string.
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // subject: subject string
- // r3: subject string
- // r0: subject string instance type
- // regexp_data: RegExp data (FixedArray)
- // Handle subject string according to its encoding and representation:
- // (1) Sequential string? If yes, go to (5).
- // (2) Anything but sequential or cons? If yes, go to (6).
- // (3) Cons string. If the string is flat, replace subject with first string.
- // Otherwise bailout.
- // (4) Is subject external? If yes, go to (7).
- // (5) Sequential string. Load regexp code according to encoding.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (6) Not a long external string? If yes, go to (8).
- // (7) External string. Make it, offset-wise, look like a sequential string.
- // Go to (5).
- // (8) Short external string or not a string? If yes, bail out to runtime.
- // (9) Sliced string. Replace subject with parent. Go to (4).
-
- Label seq_string /* 5 */, external_string /* 7 */,
- check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
- not_long_external /* 8 */;
-
- // (1) Sequential string? If yes, go to (5).
- __ and_(r1,
- r0,
- Operand(kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask),
- SetCC);
- STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ b(eq, &seq_string); // Go to (5).
-
- // (2) Anything but sequential or cons? If yes, go to (6).
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmp(r1, Operand(kExternalStringTag));
- __ b(ge, &not_seq_nor_cons); // Go to (6).
-
- // (3) Cons string. Check that it's flat.
- // Replace subject with first string and reload instance type.
- __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ CompareRoot(r0, Heap::kempty_stringRootIndex);
- __ b(ne, &runtime);
- __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
-
- // (4) Is subject external? If yes, go to (7).
- __ bind(&check_underlying);
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r0, Operand(kStringRepresentationMask));
- // The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
- __ b(ne, &external_string); // Go to (7).
-
- // (5) Sequential string. Load regexp code according to encoding.
- __ bind(&seq_string);
- // subject: sequential subject string (or look-alike, external string)
- // r3: original subject string
- // Load previous index and check range before r3 is overwritten. We have to
- // use r3 instead of subject here because subject might have been only made
- // to look like a sequential string when it actually is an external string.
- __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(r1, &runtime);
- __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
- __ cmp(r3, Operand(r1));
- __ b(ls, &runtime);
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
-
- STATIC_ASSERT(4 == kOneByteStringTag);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ and_(r0, r0, Operand(kStringEncodingMask));
- __ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
-
- // (E) Carry on. String handling is done.
- // r7: irregexp code
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // a smi (code flushing support).
- __ JumpIfSmi(r7, &runtime);
-
- // r1: previous index
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = 4;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers.
-
- // Argument 9 (sp[20]): Pass current isolate address.
- __ mov(r0, Operand(ExternalReference::isolate_address()));
- __ str(r0, MemOperand(sp, 5 * kPointerSize));
-
- // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
- __ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 4 * kPointerSize));
-
- // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
- __ mov(r0, Operand(address_of_regexp_stack_memory_address));
- __ ldr(r0, MemOperand(r0, 0));
- __ mov(r2, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r2, MemOperand(r2, 0));
- __ add(r0, r0, Operand(r2));
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(r0, Operand::Zero());
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
-
- // Argument 5 (sp[4]): static offsets vector buffer.
- __ mov(r0,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
- __ str(r0, MemOperand(sp, 1 * kPointerSize));
-
- // For arguments 4 and 3 get string length, calculate start of string data and
- // calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ eor(r3, r3, Operand(1));
- // Load the length from the original subject string from the previous stack
- // frame. Therefore we have to use fp, which points exactly to two pointer
- // sizes below the previous sp. (Because creating a new stack frame pushes
- // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
- __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
- // If slice offset is not 0, load the length from the original sliced string.
- // Argument 4, r3: End of string data
- // Argument 3, r2: Start of string data
- // Prepare start and end index of the input.
- __ add(r9, r8, Operand(r9, LSL, r3));
- __ add(r2, r9, Operand(r1, LSL, r3));
-
- __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
- __ mov(r8, Operand(r8, ASR, kSmiTagSize));
- __ add(r3, r9, Operand(r8, LSL, r3));
-
- // Argument 2 (r1): Previous index.
- // Already there
-
- // Argument 1 (r0): Subject string.
- __ mov(r0, subject);
-
- // Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub;
- stub.GenerateCall(masm, r7);
-
- __ LeaveExitFrame(false, no_reg);
-
- // r0: result
- // subject: subject string (callee saved)
- // regexp_data: RegExp data (callee saved)
- // last_match_info_elements: Last match info elements (callee saved)
- // Check the result.
- Label success;
- __ cmp(r0, Operand(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
- __ b(eq, &success);
- Label failure;
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
- __ b(eq, &failure);
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ b(ne, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(isolate->factory()->the_hole_value()));
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ ldr(r0, MemOperand(r2, 0));
- __ cmp(r0, r1);
- __ b(eq, &runtime);
-
- __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
-
- Label termination_exception;
- __ b(eq, &termination_exception);
-
- __ Throw(r0);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(r0);
-
- __ bind(&failure);
- // For failure and exception return null.
- __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Process the result from the native regexp code.
- __ bind(&success);
- __ ldr(r1,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- // Multiplying by 2 comes for free since r1 is smi-tagged.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r1, r1, Operand(2)); // r1 was a smi.
-
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
- __ b(ne, &runtime);
- // Check that the JSArray is in fast case.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ ldr(r0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
- __ b(gt, &runtime);
-
- // r1: number of capture registers
- // r4: subject string
- // Store the capture count.
- __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
- __ str(r2, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
- // Store last subject and last input.
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
- __ mov(r2, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- subject,
- r7,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- __ mov(subject, r2);
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- subject,
- r7,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
- __ mov(r2, Operand(address_of_static_offsets_vector));
-
- // r1: number of capture registers
- // r2: offsets vector
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ add(r0,
- last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
- __ bind(&next_capture);
- __ sub(r1, r1, Operand(1), SetCC);
- __ b(mi, &done);
- // Read the value from the static offsets vector buffer.
- __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
- // Store the smi value in the last match info.
- __ mov(r3, Operand(r3, LSL, kSmiTagSize));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-
- // Deferred code for string handling.
- // (6) Not a long external string? If yes, go to (8).
- __ bind(&not_seq_nor_cons);
- // Compare flags are still set.
- __ b(gt, &not_long_external); // Go to (8).
-
- // (7) External string. Make it, offset-wise, look like a sequential string.
- __ bind(&external_string);
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ tst(r0, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
- }
- __ ldr(subject,
- FieldMemOperand(subject, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ sub(subject,
- subject,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&seq_string); // Go to (5).
-
- // (8) Short external string or not a string? If yes, bail out to runtime.
- __ bind(&not_long_external);
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ b(ne, &runtime);
-
- // (9) Sliced string. Replace subject with parent. Go to (4).
- // Load offset into r9 and replace subject string with parent.
- __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ mov(r9, Operand(r9, ASR, kSmiTagSize));
- __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- __ jmp(&check_underlying); // Go to (4).
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- Factory* factory = masm->isolate()->factory();
-
- __ ldr(r1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(r1, &slowcase);
- __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
- __ b(hi, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
- __ add(r2, r5, Operand(objects_size));
- __ AllocateInNewSpace(
- r2, // In: Size, in words.
- r0, // Out: Start of allocation (tagged).
- r3, // Scratch register.
- r4, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // r0: Start of allocated area, object-tagged.
- // r1: Number of elements in array, as smi.
- // r5: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(factory->empty_fixed_array()));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ ldr(r1, MemOperand(sp, kPointerSize * 0));
- __ ldr(r2, MemOperand(sp, kPointerSize * 1));
- __ ldr(r6, MemOperand(sp, kPointerSize * 2));
- __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
- __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
- __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // r0: JSArray, tagged.
- // r3: FixedArray, tagged.
- // r5: Number of elements in array, untagged.
-
- // Set map.
- __ mov(r2, Operand(factory->fixed_array_map()));
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ mov(r6, Operand(r5, LSL, kSmiTagSize));
- __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with undefined.
- // r0: JSArray, tagged.
- // r2: undefined.
- // r3: Start of elements in FixedArray.
- // r5: Number of elements to fill.
- Label loop;
- __ cmp(r5, Operand::Zero());
- __ bind(&loop);
- __ b(le, &done); // Jump if r5 is negative or zero.
- __ sub(r5, r5, Operand(1), SetCC);
- __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
- __ jmp(&loop);
-
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // r1 : the function to call
- // r2 : cache cell for call target
- ASSERT(!FLAG_optimize_constructed_arrays);
- Label done;
-
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
-
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(r3, r1);
- __ b(eq, &done);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &done);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
-
- // An uninitialized cache is patched with the function.
- __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // r1 : the function to call
- // r2 : cache cell for call target
- ASSERT(FLAG_optimize_constructed_arrays);
- Label initialize, done, miss, megamorphic, not_array_function;
-
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
-
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(r3, r1);
- __ b(eq, &done);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &done);
-
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
- __ cmp(r3, Operand(terminal_kind_sentinel));
- __ b(ne, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &megamorphic);
- __ jmp(&done);
-
- __ bind(&miss);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(eq, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &not_array_function);
-
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- GetInitialFastElementsKind());
- __ mov(r3, Operand(initial_kind_sentinel));
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- __ b(&done);
-
- __ bind(&not_array_function);
- __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // r1 : the function to call
- // r2 : cache cell for call target
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(ne, &call);
- // Patch the receiver on the stack with the global receiver object.
- __ ldr(r3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
- }
-
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &non_function);
- // Get the map of the function object.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Fast-case: Invoke the function now.
- // r1: pushed function
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(eq, &call_as_function);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- }
- // Check for function proxy.
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function);
- __ push(r1); // put proxy as additional argument
- __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
- __ mov(r2, Operand::Zero());
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_METHOD);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Set up the number of arguments.
- __ mov(r2, Operand::Zero());
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // r0 : number of arguments
- // r1 : the function to call
- // r2 : cache cell for call target
- Label slow, non_function_call;
-
- // Check that the function is not a smi.
- __ JumpIfSmi(r1, &non_function_call);
- // Check that the function is a JSFunction.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? r3 : r2;
- __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // r0: number of arguments
- // r1: called object
- // r3: object type
- Label do_call;
- __ bind(&slow);
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-// StringCharCodeAtGenerator
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ tst(result_, Operand(kIsNotStringMask));
- __ b(ne, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(index_));
- __ b(ls, index_out_of_range_);
-
- __ mov(index_, Operand(index_, ASR, kSmiTagSize));
-
- StringCharLoadGenerator::Generate(masm,
- object_,
- index_,
- result_,
- &call_runtime_);
-
- __ mov(result_, Operand(result_, LSL, kSmiTagSize));
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- result_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ Move(index_, r0);
- __ pop(object_);
- // Reload the instance type.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ mov(index_, Operand(index_, LSL, kSmiTagSize));
- __ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
- __ tst(code_,
- Operand(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
- __ b(ne, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand::Zero());
- }
- __ b(eq, &done);
-
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex));
- // Perform sub between load and dependent store to get the load time to
- // complete.
- __ sub(count, count, Operand(1), SetCC);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- // last iteration.
- __ b(gt, &loop);
-
- __ bind(&done);
-}
-
-
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
-
-
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
- __ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, "Destination of copy not aligned.");
- }
-
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand::Zero());
- }
- __ b(eq, &done);
-
- // Assume that you cannot read (or write) unaligned.
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ cmp(count, Operand(8));
- __ add(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ b(lt, &byte_loop);
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
- Label dest_aligned;
- __ b(eq, &dest_aligned);
- __ cmp(scratch4, Operand(2));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
- __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
- __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
- __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
- __ bind(&dest_aligned);
- }
-
- Label simple_loop;
-
- __ sub(scratch4, dest, Operand(src));
- __ and_(scratch4, scratch4, Operand(0x03), SetCC);
- __ b(eq, &simple_loop);
- // Shift register is number of bits in a source word that
- // must be combined with bits in the next source word in order
- // to create a destination word.
-
- // Complex loop for src/dst that are not aligned the same way.
- {
- Label loop;
- __ mov(scratch4, Operand(scratch4, LSL, 3));
- Register left_shift = scratch4;
- __ and_(src, src, Operand(~3)); // Round down to load previous word.
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- // Store the "shift" most significant bits of scratch in the least
- // signficant bits (i.e., shift down by (32-shift)).
- __ rsb(scratch2, left_shift, Operand(32));
- Register right_shift = scratch2;
- __ mov(scratch1, Operand(scratch1, LSR, right_shift));
-
- __ bind(&loop);
- __ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
- __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- __ mov(scratch1, Operand(scratch3, LSR, right_shift));
- // Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
- __ b(ge, &loop);
- }
- // There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
- // scratch1 (eight times that number in scratch4). We may have read past
- // the end of the string, but because objects are aligned, we have not read
- // past the end of the object.
- // Find the minimum of remaining characters to move and preloaded characters
- // and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
- __ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
- // Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
- // scratch ready to write.
- __ cmp(scratch5, Operand(2));
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
- // Copy any remaining bytes.
- __ b(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dst, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- __ sub(scratch3, limit, Operand(dest));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- // Compare to 8, not 4, because we do the substraction before increasing
- // dest.
- __ cmp(scratch3, Operand(8));
- __ b(ge, &loop);
- }
-
- // Copy bytes from src to dst until dst hits limit.
- __ bind(&byte_loop);
- __ cmp(dest, Operand(limit));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
- __ b(ge, &done);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ b(&byte_loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ sub(scratch, c1, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
- __ b(hi, &not_array_index);
- __ sub(scratch, c2, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register
- __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
- __ b(ls, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load string table
- // Load address of first element of the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, 1));
- __ sub(mask, mask, Operand(1));
-
- // Calculate untagged address of the first element of the string table.
- Register first_string_table_element = string_table;
- __ add(first_string_table_element, string_table,
- Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_string_table_element: address of the first element of
- // the string table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the string table.
- const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- if (i > 0) {
- __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ and_(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ ldr(candidate,
- MemOperand(first_string_table_element,
- candidate,
- LSL,
- kPointerSizeLog2));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
- __ b(ne, &is_string);
-
- __ cmp(undefined, candidate);
- __ b(eq, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(ip, candidate);
- __ Assert(eq, "oddball in string table is not undefined or the hole");
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(2)));
- __ b(ne, &next_probe[i]);
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
- __ cmp(chars, scratch);
- __ b(eq, &found_in_string_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ Move(r0, result);
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = character + (character << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ add(hash, hash, Operand(character));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ add(hash, hash, Operand(hash, LSL, 3));
- // hash ^= hash >> 11;
- __ eor(hash, hash, Operand(hash, LSR, 11));
- // hash += hash << 15;
- __ add(hash, hash, Operand(hash, LSL, 15));
-
- __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
-
- // if (hash == 0) hash = 27;
- __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- const int kToOffset = 0 * kPointerSize;
- const int kFromOffset = 1 * kPointerSize;
- const int kStringOffset = 2 * kPointerSize;
-
- __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
- STATIC_ASSERT(kFromOffset == kToOffset + 4);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-
- // Arithmetic shift right by one un-smi-tags. In this case we rotate right
- // instead because we bail out on non-smi values: ROR and ASR are equivalent
- // for smis but they set the flags in a way that's easier to optimize.
- __ mov(r2, Operand(r2, ROR, 1), SetCC);
- __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
- // If either to or from had the smi tag bit set, then C is set now, and N
- // has the same value: we rotated by 1, so the bottom bit is now the top bit.
- // We want to bailout to runtime here if From is negative. In that case, the
- // next instruction is not executed and we fall through to bailing out to
- // runtime.
- // Executed if both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC, cc);
- // One of the above un-smis or the above SUB could have set N==1.
- __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
-
- // Make sure first argument is a string.
- __ ldr(r0, MemOperand(sp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- // Do a JumpIfSmi, but fold its jump into the subsequent string test.
- __ tst(r0, Operand(kSmiTagMask));
- Condition is_string = masm->IsObjectStringType(r0, r1, ne);
- ASSERT(is_string == eq);
- __ b(NegateCondition(is_string), &runtime);
-
- Label single_char;
- __ cmp(r2, Operand(1));
- __ b(eq, &single_char);
-
- // Short-cut for the case of trivial substring.
- Label return_r0;
- // r0: original string
- // r2: result string length
- __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
- __ cmp(r2, Operand(r4, ASR, 1));
- // Return original string.
- __ b(eq, &return_r0);
- // Longer than original string's length or negative: unsafe arguments.
- __ b(hi, &runtime);
- // Shorter than original string's length: an actual substring.
-
- // Deal with different string types: update the index if necessary
- // and put the underlying string into r5.
- // r0: original string
- // r1: instance type
- // r2: length
- // r3: from index (untagged)
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ tst(r1, Operand(kIsIndirectStringMask));
- __ b(eq, &seq_or_external_string);
-
- __ tst(r1, Operand(kSlicedNotConsMask));
- __ b(ne, &sliced_string);
- // Cons string. Check whether it is flat, then fetch first part.
- __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
- __ CompareRoot(r5, Heap::kempty_stringRootIndex);
- __ b(ne, &runtime);
- __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
- // Update instance type.
- __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
- // Update instance type.
- __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(r5, r0);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // r5: underlying subject string
- // r1: instance type of underlying subject string
- // r2: length
- // r3: adjusted start index (untagged)
- __ cmp(r2, Operand(SlicedString::kMinLength));
- // Short slice. Copy instead of slicing.
- __ b(lt, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(r1, Operand(kStringEncodingMask));
- __ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
- __ jmp(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
- __ bind(&set_slice_header);
- __ mov(r3, Operand(r3, LSL, 1));
- __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ jmp(&return_r0);
-
- __ bind(&copy_routine);
- }
-
- // r5: underlying subject string
- // r1: instance type of underlying subject string
- // r2: length
- // r3: adjusted start index (untagged)
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r1, Operand(kExternalStringTag));
- __ b(eq, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ tst(r1, Operand(kShortExternalStringTag));
- __ b(ne, &runtime);
- __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
- // r5 already points to the first character of underlying string.
- __ jmp(&allocate_result);
-
- __ bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&allocate_result);
- // Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ tst(r1, Operand(kStringEncodingMask));
- __ b(eq, &two_byte_sequential);
-
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
-
- // Locate first character of substring to copy.
- __ add(r5, r5, r3);
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: result string
- // r1: first character of result string
- // r2: result string length
- // r5: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- COPY_ASCII | DEST_ALWAYS_ALIGNED);
- __ jmp(&return_r0);
-
- // Allocate and copy the resulting two-byte string.
- __ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
-
- // Locate first character of substring to copy.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(r5, r5, Operand(r3, LSL, 1));
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: result string.
- // r1: first character of result.
- // r2: result length.
- // r5: first character of substring to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
-
- __ bind(&return_r0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ Drop(3);
- __ Ret();
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-
- __ bind(&single_char);
- // r0: original string
- // r1: instance type
- // r2: length
- // r3: from index (untagged)
- __ SmiTag(r3, r3);
- StringCharAtGenerator generator(
- r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ Drop(3);
- __ Ret();
- generator.SkipSlow(masm, &runtime);
-}
-
-
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ cmp(length, scratch2);
- __ b(eq, &check_zero_length);
- __ bind(&strings_not_equal);
- __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
- __ Ret();
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(length, Operand::Zero());
- __ b(ne, &compare_chars);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm,
- left, right, length, scratch2, scratch3,
- &strings_not_equal);
-
- // Characters are equal.
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- Label result_not_equal, compare_lengths;
- // Find minimum length and length difference.
- __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
- Register length_delta = scratch3;
- __ mov(scratch1, scratch2, LeaveCC, gt);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(min_length, Operand::Zero());
- __ b(eq, &compare_lengths);
-
- // Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4,
- &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use length_delta as result if it's zero.
- __ mov(r0, Operand(length_delta), SetCC);
- __ bind(&result_not_equal);
- // Conditionally update the result based either on length_delta or
- // the last comparion performed in the loop above.
- __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
- __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
- __ Ret();
-}
-
-
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ add(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, Operand(scratch1));
- __ add(right, right, Operand(scratch1));
- __ rsb(length, length, Operand::Zero());
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ ldrb(scratch1, MemOperand(left, index));
- __ ldrb(scratch2, MemOperand(right, index));
- __ cmp(scratch1, scratch2);
- __ b(ne, chars_not_equal);
- __ add(index, index, Operand(1), SetCC);
- __ b(ne, &loop);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
-
- Label not_same;
- __ cmp(r0, r1);
- __ b(ne, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
-
- // Compare flat ASCII strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfEitherSmi(r0, r1, &call_runtime);
- // Load instance types.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ tst(r4, Operand(kIsNotStringMask));
- __ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // r0: first string
- // r1: second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
- __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
- __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- STATIC_ASSERT(kSmiTag == 0);
- // Else test if second string is empty.
- __ cmp(r3, Operand(Smi::FromInt(0)), ne);
- __ b(ne, &strings_not_empty); // If either string was empty, return r0.
-
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&strings_not_empty);
- }
-
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize));
- // Both strings are non-empty.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ add(r6, r2, Operand(r3));
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return a string here.
- __ cmp(r6, Operand(2));
- __ b(ne, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in r2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode)
- __ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(r6, Operand(ConsString::kMinLength));
- __ b(lt, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r4, Operand(kStringEncodingMask));
- __ tst(r5, Operand(kStringEncodingMask), ne);
- __ b(eq, &non_ascii);
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ mov(r0, Operand(r7));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
- // r4: first instance type.
- // r5: second instance type.
- __ tst(r4, Operand(kAsciiDataHintMask));
- __ tst(r5, Operand(kAsciiDataHintMask), ne);
- __ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
- __ b(eq, &ascii_data);
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r6: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
-
- // Check whether both strings have same encoding
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
- __ b(ne, &call_runtime);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r7,
- r0,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &first_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r4, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r1,
- r1,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &second_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r5, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r7: first character of first string
- // r1: first character of second string
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of lengths.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r5, Operand(kStringEncodingMask));
- __ b(eq, &non_ascii_string_add_flat_result);
-
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r7: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r7: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
- __ b(lt, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- false,
- &not_cached);
- __ mov(arg, scratch1);
- __ str(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CompareObjectType(
- arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
- __ b(ne, slow);
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ and_(scratch2,
- scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ cmp(scratch2,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, slow);
- __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ str(arg, MemOperand(sp, stack_offset));
-
- __ bind(&done);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
- Label miss;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, &miss);
-
- if (GetCondition() == eq) {
- // For equality we do not care about the sign of the result.
- __ sub(r0, r0, r1, SetCC);
- } else {
- // Untag before subtracting to avoid handling overflow.
- __ SmiUntag(r1);
- __ sub(r0, r1, SmiUntagOperand(r0));
- }
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
-
- Label generic_stub;
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss;
-
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(r1, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(r0, &miss);
- }
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP2 is unsupported.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(r0, &right_smi);
- __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vldr(d1, r2, HeapNumber::kValueOffset);
- __ b(&left);
- __ bind(&right_smi);
- __ SmiUntag(r2, r0); // Can't clobber r0 yet.
- SwVfpRegister single_scratch = d2.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d1, single_scratch);
-
- __ bind(&left);
- __ JumpIfSmi(r1, &left_smi);
- __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
- __ b(&done);
- __ bind(&left_smi);
- __ SmiUntag(r2, r1); // Can't clobber r1 yet.
- single_scratch = d3.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d0, single_scratch);
-
- __ bind(&done);
- // Compare operands.
- __ VFPCompareAndSetFlags(d0, d1);
-
- // Don't base result on status bits when a NaN is involved.
- __ b(vs, &unordered);
-
- // Return a result of -1, 0, or 1, based on status bits.
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
- }
-
- __ bind(&unordered);
- __ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &miss);
- __ JumpIfSmi(r1, &unordered);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
- __ jmp(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(eq, &unordered);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
- Label miss;
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
- Register tmp1 = r2;
- Register tmp2 = r3;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are internalized strings.
- __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp1, Operand(tmp2));
- __ tst(tmp1, Operand(kIsInternalizedMask));
- __ b(eq, &miss);
-
- // Internalized strings are compared by identity.
- __ cmp(left, right);
- // Make sure r0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(r0));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASSERT(GetCondition() == eq);
- Label miss;
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
- Register tmp1 = r2;
- Register tmp2 = r3;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
- __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
-
- Label succeed1;
- __ tst(tmp1, Operand(kIsInternalizedMask));
- __ b(ne, &succeed1);
- __ cmp(tmp1, Operand(SYMBOL_TYPE));
- __ b(ne, &miss);
- __ bind(&succeed1);
-
- Label succeed2;
- __ tst(tmp2, Operand(kIsInternalizedMask));
- __ b(ne, &succeed2);
- __ cmp(tmp2, Operand(SYMBOL_TYPE));
- __ b(ne, &miss);
- __ bind(&succeed2);
-
- // Unique names are compared by identity.
- __ cmp(left, right);
- // Make sure r0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(r0));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
- Label miss;
-
- bool equality = Token::IsEqualityOp(op_);
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
- Register tmp1 = r2;
- Register tmp2 = r3;
- Register tmp3 = r4;
- Register tmp4 = r5;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are strings. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ orr(tmp3, tmp1, tmp2);
- __ tst(tmp3, Operand(kIsNotStringMask));
- __ b(ne, &miss);
-
- // Fast check for identical strings.
- __ cmp(left, right);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
- __ Ret(eq);
-
- // Handle not identical strings.
-
- // Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical.
- if (equality) {
- ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp3, tmp1, Operand(tmp2));
- __ tst(tmp3, Operand(kIsInternalizedMask));
- // Make sure r0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(r0));
- __ Ret(ne);
- }
-
- // Check that both strings are sequential ASCII.
- Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- tmp1, tmp2, tmp3, tmp4, &runtime);
-
- // Compare flat ASCII strings. Returns when done.
- if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, tmp4);
- }
-
- // Handle more complex cases in runtime.
- __ bind(&runtime);
- __ Push(left, right);
- if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
- } else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
- Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &miss);
-
- __ CompareObjectType(r0, r2, r3, JS_OBJECT_TYPE);
- __ b(ne, &miss);
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
- __ CompareObjectType(r1, r2, r3, JS_OBJECT_TYPE);
- __ b(ne, &miss);
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
-
- ASSERT(GetCondition() == eq);
- __ sub(r0, r0, Operand(r1));
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &miss);
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r2, Operand(known_map_));
- __ b(ne, &miss);
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
- __ cmp(r3, Operand(known_map_));
- __ b(ne, &miss);
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitField2Offset));
- __ and_(r3, r3, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r3, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
-
- __ sub(r0, r0, Operand(r1));
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r0);
- __ push(lr);
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
- __ push(ip);
- __ CallExternalReference(miss, 3);
- // Compute the entry point of the rewritten stub.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ pop(lr);
- __ pop(r0);
- __ pop(r1);
- }
-
- __ Jump(r2);
-}
-
-
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- __ ldr(pc, MemOperand(sp, 0));
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ExternalReference function) {
- __ mov(r2, Operand(function));
- GenerateCall(masm, r2);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- Register target) {
- intptr_t code =
- reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
- __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
-
- // Prevent literal pool emission during calculation of return address.
- Assembler::BlockConstPoolScope block_const_pool(masm);
-
- // Push return address (accessible to GC through exit frame pc).
- // Note that using pc with str is deprecated.
- Label start;
- __ bind(&start);
- __ add(ip, pc, Operand(Assembler::kInstrSize));
- __ str(ip, MemOperand(sp, 0));
- __ Jump(target); // Call the C++ function.
- ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
- masm->SizeOfCodeGeneratedSince(&start));
-}
-
-
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0) {
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
- __ sub(index, index, Operand(1));
- __ and_(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- Register tmp = properties;
- __ add(tmp, properties, Operand(index, LSL, 1));
- __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- ASSERT(!tmp.is(entity_name));
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ cmp(entity_name, tmp);
- __ b(eq, done);
-
- if (i != kInlinedProbes - 1) {
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
- // Stop if found the property.
- __ cmp(entity_name, Operand(Handle<String>(name)));
- __ b(eq, miss);
-
- Label the_hole;
- __ cmp(entity_name, tmp);
- __ b(eq, &the_hole);
-
- // Check if the entry name is not an internalized string.
- __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ tst(entity_name, Operand(kIsInternalizedMask));
- __ b(eq, miss);
-
- __ bind(&the_hole);
-
- // Restore the properties.
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- }
- }
-
- const int spill_mask =
- (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
- r2.bit() | r1.bit() | r0.bit());
-
- __ stm(db_w, sp, spill_mask);
- __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(r1, Operand(Handle<String>(name)));
- StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmp(r0, Operand::Zero());
- __ ldm(ia_w, sp, spill_mask);
-
- __ b(eq, done);
- __ b(ne, miss);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- ASSERT(!elements.is(scratch1));
- ASSERT(!elements.is(scratch2));
- ASSERT(!name.is(scratch1));
- ASSERT(!name.is(scratch2));
-
- __ AssertString(name);
-
- // Compute the capacity mask.
- __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
- __ sub(scratch1, scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
- __ add(scratch2, scratch2, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
- }
- __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
-
- // Scale the index by multiplying by the element size.
- ASSERT(StringDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- __ add(scratch2, elements, Operand(scratch2, LSL, 2));
- __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
- __ cmp(name, Operand(ip));
- __ b(eq, done);
- }
-
- const int spill_mask =
- (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
- r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ stm(db_w, sp, spill_mask);
- if (name.is(r0)) {
- ASSERT(!elements.is(r1));
- __ Move(r1, name);
- __ Move(r0, elements);
- } else {
- __ Move(r0, elements);
- __ Move(r1, name);
- }
- StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmp(r0, Operand::Zero());
- __ mov(scratch2, Operand(r2));
- __ ldm(ia_w, sp, spill_mask);
-
- __ b(ne, done);
- __ b(eq, miss);
-}
-
-
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Registers:
- // result: StringDictionary to probe
- // r1: key
- // : StringDictionary to probe.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Register result = r0;
- Register dictionary = r0;
- Register key = r1;
- Register index = r2;
- Register mask = r3;
- Register hash = r4;
- Register undefined = r5;
- Register entry_key = r6;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, kSmiTagSize));
- __ sub(mask, mask, Operand(1));
-
- __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
-
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
- __ add(index, hash, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
- } else {
- __ mov(index, Operand(hash));
- }
- __ and_(index, mask, Operand(index, LSR, String::kHashShift));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- ASSERT_EQ(kSmiTagSize, 1);
- __ add(index, dictionary, Operand(index, LSL, 2));
- __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ cmp(entry_key, Operand(undefined));
- __ b(eq, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmp(entry_key, Operand(key));
- __ b(eq, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not an internalized string.
- __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ ldrb(entry_key,
- FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ tst(entry_key, Operand(kIsInternalizedMask));
- __ b(eq, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ mov(result, Operand::Zero());
- __ Ret();
- }
-
- __ bind(&in_dictionary);
- __ mov(result, Operand(1));
- __ Ret();
-
- __ bind(&not_in_dictionary);
- __ mov(result, Operand::Zero());
- __ Ret();
-}
-
-
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
- // FastNewClosureStub::Generate
- { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-
-bool RecordWriteStub::IsPregenerated() {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(VFP2);
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
- {
- // Block literal pool emission, as the position of these two instructions
- // is assumed by the patching code.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ b(&skip_to_incremental_noncompacting);
- __ b(&skip_to_incremental_compacting);
- }
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
- PatchBranchIntoNop(masm, 0);
- PatchBranchIntoNop(masm, Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(r0));
- __ Move(address, regs_.address());
- __ Move(r0, regs_.object());
- __ Move(r1, address);
- __ mov(r2, Operand(ExternalReference::isolate_address()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
- __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
- __ ldr(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
- __ str(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ b(mi, &need_incremental);
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-
- // Get the value from the slot.
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : element value to store
- // -- r1 : array literal
- // -- r2 : map of array literal
- // -- r3 : element index as smi
- // -- r4 : array literal index in function as smi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- __ CheckFastElements(r2, r5, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(r0, &smi_element);
- __ CheckFastSmiElements(r2, r5, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(r1, r3, r0);
- __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
- __ Push(r5, r4);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(r0, MemOperand(r6, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
- __ Ret();
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3,
- // Overwrites all regs after this.
- r5, r6, r7, r9, r2,
- &slow_elements);
- __ Ret();
-}
-
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- bool save_fp_regs = CpuFeatures::IsSupported(VFP2);
- CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ ldr(r1, MemOperand(fp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
- __ add(sp, sp, r1);
- __ Ret();
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
- PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
- ProfileEntryHookStub stub;
- __ push(lr);
- __ CallStub(&stub);
- __ pop(lr);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // The entry hook is a "push lr" instruction, followed by a call.
- const int32_t kReturnAddressDistanceFromFunctionStart =
- 3 * Assembler::kInstrSize;
-
- // Save live volatile registers.
- __ Push(lr, r5, r1);
- const int32_t kNumSavedRegs = 3;
-
- // Compute the function's address for the first argument.
- __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
-
- // The caller's return address is above the saved temporaries.
- // Grab that for the second argument to the hook.
- __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
-
- // Align the stack if necessary.
- int frame_alignment = masm->ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
- __ mov(r5, sp);
- ASSERT(IsPowerOf2(frame_alignment));
- __ and_(sp, sp, Operand(-frame_alignment));
- }
-
-#if defined(V8_HOST_ARCH_ARM)
- __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
- __ ldr(ip, MemOperand(ip));
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- Address trampoline_address = reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(EntryHookTrampoline));
- ApiFunction dispatcher(trampoline_address);
- __ mov(ip, Operand(ExternalReference(&dispatcher,
- ExternalReference::BUILTIN_CALL,
- masm->isolate())));
-#endif
- __ Call(ip);
-
- // Restore the stack pointer if needed.
- if (frame_alignment > kPointerSize) {
- __ mov(sp, r5);
- }
-
- __ Pop(lr, r5, r1);
- __ Ret();
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.h b/src/3rdparty/v8/src/arm/code-stubs-arm.h
deleted file mode 100644
index f952756..0000000
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.h
+++ /dev/null
@@ -1,800 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_CODE_STUBS_ARM_H_
-#define V8_ARM_CODE_STUBS_ARM_H_
-
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
- void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- StringCompareStub() { }
-
- // Compares two flat ASCII strings and returns result in r0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in r0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal);
-};
-
-
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch) { }
-
- bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
-
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
- ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
- ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
- }
-
- static Mode GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- Assembler::kInstrSize);
-
- if (Assembler::IsBranch(first_instruction)) {
- return INCREMENTAL;
- }
-
- ASSERT(Assembler::IsTstImmediate(first_instruction));
-
- if (Assembler::IsBranch(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(Assembler::IsTstImmediate(second_instruction));
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize);
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->pop(scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- CpuFeatures::Scope scope(VFP2);
- masm->sub(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
- // Save all VFP registers except d0.
- // TODO(hans): We should probably save d0 too. And maybe use vstm.
- for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- CpuFeatures::Scope scope(VFP2);
- // Restore all VFP registers except d0.
- // TODO(hans): We should probably restore d0 too. And maybe use vldm.
- for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
- masm->add(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
- }
- masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 4> {};
- class ValueBits: public BitField<int, 4, 4> {};
- class AddressBits: public BitField<int, 8, 4> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public PlatformCodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub: public PlatformCodeStub {
- public:
- DirectCEntryStub() {}
- void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm, ExternalReference function);
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Major MajorKey() { return DirectCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination {
- kVFPRegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |int_scratch| to a double, storing
- // the result either in |double_dst| or |dst2:dst1|, depending on
- // |destination|.
- // Warning: The value in |int_scratch| will be changed in the process!
- static void ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- SwVfpRegister single_scratch);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- DwVfpRegister double_scratch,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when VFP3 is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
- Label* not_int32);
-
- // Generate non VFP3 code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when VFP3 is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in r0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- // Loads the objects from |object| into floating point registers.
- // Depending on |destination| the value ends up either in |dst| or
- // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
- // must be supported. If kCoreRegisters are requested and VFP3 is
- // supported, |dst| will be scratched. If |object| is neither smi nor
- // heap number, |not_number| is jumped to with |object| still intact.
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-
-class StringDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0);
-
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- Major MajorKey() { return StringDictionaryLookup; }
-
- int MinorKey() {
- return LookupModeBits::encode(mode_);
- }
-
- class LookupModeBits: public BitField<LookupMode, 0, 1> {};
-
- LookupMode mode_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.cc b/src/3rdparty/v8/src/arm/codegen-arm.cc
deleted file mode 100644
index 6e3c635..0000000
--- a/src/3rdparty/v8/src/arm/codegen-arm.cc
+++ /dev/null
@@ -1,708 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen.h"
-#include "macro-assembler.h"
-#include "simulator-arm.h"
-
-namespace v8 {
-namespace internal {
-
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- return NULL;
-}
-
-
-#define __ masm.
-
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_arm_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_arm_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(VFP2)) return &exp;
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-
- {
- CpuFeatures::Scope use_vfp(VFP2);
- DwVfpRegister input = d0;
- DwVfpRegister result = d1;
- DwVfpRegister double_scratch1 = d2;
- DwVfpRegister double_scratch2 = d3;
- Register temp1 = r4;
- Register temp2 = r5;
- Register temp3 = r6;
-
- if (masm.use_eabi_hardfloat()) {
- // Input value is in d0 anyway, nothing to do.
- } else {
- __ vmov(input, r0, r1);
- }
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(
- &masm, input, result, double_scratch1, double_scratch2,
- temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- if (masm.use_eabi_hardfloat()) {
- __ vmov(d0, result);
- } else {
- __ vmov(r0, r1, result);
- }
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#else
- fast_exp_arm_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
-#undef __
-
-
-UnaryMathFunction CreateSqrtFunction() {
- return &sqrt;
-}
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : target map, scratch for subsequent call
- // -- r4 : scratch (elements)
- // -----------------------------------
- if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(r2, r4);
- __ b(eq, allocation_site_info_found);
- }
-
- // Set transitioned map.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : target map, scratch for subsequent call
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(r2, r4);
- __ b(eq, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ b(eq, &only_change_map);
-
- __ push(lr);
- __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedArray
- // r5: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- // Use lr as a temporary register.
- __ mov(lr, Operand(r5, LSL, 2));
- __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
- __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
- // r6: destination FixedDoubleArray, not tagged as heap object.
-
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
- // Update receiver's map.
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
-
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ add(r3, r6, Operand(kHeapObjectTag));
- __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ RecordWriteField(r2,
- JSObject::kElementsOffset,
- r3,
- r9,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Prepare for conversion loop.
- __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r6, r7, Operand(r5, LSL, 2));
- __ mov(r4, Operand(kHoleNanLower32));
- __ mov(r5, Operand(kHoleNanUpper32));
- // r3: begin of source FixedArray element fields, not tagged
- // r4: kHoleNanLower32
- // r5: kHoleNanUpper32
- // r6: end of destination FixedDoubleArray, not tagged
- // r7: begin of FixedDoubleArray element fields, not tagged
- if (!vfp2_supported) __ Push(r1, r0);
-
- __ b(&entry);
-
- __ bind(&only_change_map);
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ b(&done);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(lr);
- __ b(fail);
-
- // Convert and copy elements.
- __ bind(&loop);
- __ ldr(r9, MemOperand(r3, 4, PostIndex));
- // r9: current element
- __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
-
- // Normal smi, convert to double and store.
- if (vfp2_supported) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, r9);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r7, 0);
- __ add(r7, r7, Operand(8));
- } else {
- FloatingPointHelper::ConvertIntToDouble(masm,
- r9,
- FloatingPointHelper::kCoreRegisters,
- d0,
- r0,
- r1,
- lr,
- s0);
- __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
- }
- __ b(&entry);
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ SmiTag(r9);
- __ orr(r9, r9, Operand(1));
- __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "object found in smi-only array");
- }
- __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
-
- __ bind(&entry);
- __ cmp(r7, r6);
- __ b(lt, &loop);
-
- if (!vfp2_supported) __ Pop(r1, r0);
- __ pop(lr);
- __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : target map, scratch for subsequent call
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label entry, loop, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(r2, r4);
- __ b(eq, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ b(eq, &only_change_map);
-
- __ push(lr);
- __ Push(r3, r2, r1, r0);
- __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedDoubleArray
- // r5: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r0, r0, Operand(r5, LSL, 1));
- __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
- // r6: destination FixedArray, not tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
- __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
-
- // Prepare for conversion loop.
- __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
- __ add(r3, r6, Operand(FixedArray::kHeaderSize));
- __ add(r6, r6, Operand(kHeapObjectTag));
- __ add(r5, r3, Operand(r5, LSL, 1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses in r4 to fully take advantage of post-indexing.
- // r3: begin of destination FixedArray element fields, not tagged
- // r4: begin of source FixedDoubleArray element fields, not tagged, +4
- // r5: end of destination FixedArray, not tagged
- // r6: destination FixedArray
- // r7: the-hole pointer
- // r9: heap number map
- __ b(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ Pop(r3, r2, r1, r0);
- __ pop(lr);
- __ b(fail);
-
- __ bind(&loop);
- __ ldr(r1, MemOperand(r4, 8, PostIndex));
- // lr: current element's upper 32 bit
- // r4: address of next element's upper 32 bit
- __ cmp(r1, Operand(kHoleNanUpper32));
- __ b(eq, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
- // r2: new heap number
- __ ldr(r0, MemOperand(r4, 12, NegOffset));
- __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
- __ mov(r0, r3);
- __ str(r2, MemOperand(r3, 4, PostIndex));
- __ RecordWrite(r6,
- r0,
- r2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ b(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ str(r7, MemOperand(r3, 4, PostIndex));
-
- __ bind(&entry);
- __ cmp(r3, r5);
- __ b(lt, &loop);
-
- __ Pop(r3, r2, r1, r0);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ RecordWriteField(r2,
- JSObject::kElementsOffset,
- r6,
- r9,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(lr);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- // Fetch the instance type of the receiver into result register.
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ tst(result, Operand(kIsIndirectStringMask));
- __ b(eq, &check_sequential);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ tst(result, Operand(kSlicedNotConsMask));
- __ b(eq, &cons_string);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ add(index, index, Operand(result, ASR, kSmiTagSize));
- __ jmp(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kempty_stringRootIndex);
- __ b(ne, call_runtime);
- // Get the first of the two strings and load its instance type.
- __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result, Operand(kStringRepresentationMask));
- __ b(ne, &external_string);
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ add(string,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&check_encoding);
-
- // Handle external strings.
- __ bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ tst(result, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
- }
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ tst(result, Operand(kShortExternalStringMask));
- __ b(ne, call_runtime);
- __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label ascii, done;
- __ bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &ascii);
- // Two-byte string.
- __ ldrh(result, MemOperand(string, index, LSL, 1));
- __ jmp(&done);
- __ bind(&ascii);
- // Ascii string.
- __ ldrb(result, MemOperand(string, index));
- __ bind(&done);
-}
-
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ tst(index, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi index");
- __ tst(value, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi value");
-
- __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
- __ cmp(index, ip);
- __ Check(lt, "Index is too large");
-
- __ cmp(index, Operand(Smi::FromInt(0)));
- __ Check(ge, "Index is negative");
-
- __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
-
- __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type");
- }
-
- __ add(ip,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(value, value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
- __ strb(value, MemOperand(ip, index, LSR, 1));
- } else {
- // No need to untag a smi for two-byte addressing.
- __ strh(value, MemOperand(ip, index));
- }
-}
-
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DwVfpRegister input,
- DwVfpRegister result,
- DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3) {
- ASSERT(!input.is(result));
- ASSERT(!input.is(double_scratch1));
- ASSERT(!input.is(double_scratch2));
- ASSERT(!result.is(double_scratch1));
- ASSERT(!result.is(double_scratch2));
- ASSERT(!double_scratch1.is(double_scratch2));
- ASSERT(!temp1.is(temp2));
- ASSERT(!temp1.is(temp3));
- ASSERT(!temp2.is(temp3));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ vldr(double_scratch1, ExpConstant(0, temp3));
- __ vmov(result, kDoubleRegZero);
- __ VFPCompareAndSetFlags(double_scratch1, input);
- __ b(ge, &done);
- __ vldr(double_scratch2, ExpConstant(1, temp3));
- __ VFPCompareAndSetFlags(input, double_scratch2);
- __ vldr(result, ExpConstant(2, temp3));
- __ b(ge, &done);
- __ vldr(double_scratch1, ExpConstant(3, temp3));
- __ vldr(result, ExpConstant(4, temp3));
- __ vmul(double_scratch1, double_scratch1, input);
- __ vadd(double_scratch1, double_scratch1, result);
- __ vmov(temp2, temp1, double_scratch1);
- __ vsub(double_scratch1, double_scratch1, result);
- __ vldr(result, ExpConstant(6, temp3));
- __ vldr(double_scratch2, ExpConstant(5, temp3));
- __ vmul(double_scratch1, double_scratch1, double_scratch2);
- __ vsub(double_scratch1, double_scratch1, input);
- __ vsub(result, result, double_scratch1);
- __ vmul(input, double_scratch1, double_scratch1);
- __ vmul(result, result, input);
- __ mov(temp1, Operand(temp2, LSR, 11));
- __ vldr(double_scratch2, ExpConstant(7, temp3));
- __ vmul(result, result, double_scratch2);
- __ vsub(result, result, double_scratch1);
- __ vldr(double_scratch2, ExpConstant(8, temp3));
- __ vadd(result, result, double_scratch2);
- __ movw(ip, 0x7ff);
- __ and_(temp2, temp2, Operand(ip));
- __ add(temp1, temp1, Operand(0x3ff));
- __ mov(temp1, Operand(temp1, LSL, 20));
-
- // Must not call ExpConstant() after overwriting temp3!
- __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
- __ add(temp3, temp3, Operand(kPointerSize));
- __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
- __ orr(temp1, temp1, temp2);
- __ vmov(input, ip, temp1);
- __ vmul(result, result, input);
- __ bind(&done);
-}
-
-#undef __
-
-// add(r0, pc, Operand(-8))
-static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found in FUNCTIONS
- static bool initialized = false;
- static uint32_t sequence[kNoCodeAgeSequenceLength];
- byte* byte_sequence = reinterpret_cast<byte*>(sequence);
- *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
- if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- PredictableCodeSizeScope scope(patcher.masm(), *length);
- patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
- initialized = true;
- }
- return byte_sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = !memcmp(sequence, young_sequence, young_length);
- ASSERT(result ||
- Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Memory::Address_at(
- sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
- patcher.masm()->add(r0, pc, Operand(-8));
- patcher.masm()->ldr(pc, MemOperand(pc, -4));
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.h b/src/3rdparty/v8/src/arm/codegen-arm.h
deleted file mode 100644
index 75899a9..0000000
--- a/src/3rdparty/v8/src/arm/codegen-arm.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_CODEGEN_ARM_H_
-#define V8_ARM_CODEGEN_ARM_H_
-
-#include "ast.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- CodeGenerator() {
- InitializeAstVisitor();
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Expression* type);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- DwVfpRegister input,
- DwVfpRegister result,
- DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/constants-arm.cc b/src/3rdparty/v8/src/arm/constants-arm.cc
deleted file mode 100644
index cdca1f5..0000000
--- a/src/3rdparty/v8/src/arm/constants-arm.cc
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "constants-arm.h"
-
-
-namespace v8 {
-namespace internal {
-
-double Instruction::DoubleImmedVmov() const {
- // Reconstruct a double from the immediate encoded in the vmov instruction.
- //
- // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
- // double: [aBbbbbbb,bbcdefgh,00000000,00000000,
- // 00000000,00000000,00000000,00000000]
- //
- // where B = ~b. Only the high 16 bits are affected.
- uint64_t high16;
- high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
- high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
- high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
- high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
-
- uint64_t imm = high16 << 48;
- double d;
- memcpy(&d, &imm, 8);
- return d;
-}
-
-
-// These register names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-const char* Registers::names_[kNumRegisters] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
-};
-
-
-// List of alias names which can be used when referring to ARM registers.
-const Registers::RegisterAlias Registers::aliases_[] = {
- {10, "sl"},
- {11, "r11"},
- {12, "r12"},
- {13, "r13"},
- {14, "r14"},
- {15, "r15"},
- {kNoRegister, NULL}
-};
-
-
-const char* Registers::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumRegisters)) {
- result = names_[reg];
- } else {
- result = "noreg";
- }
- return result;
-}
-
-
-// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
-// Note that "sN:sM" is the same as "dN/2" up to d15.
-// These register names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-const char* VFPRegisters::names_[kNumVFPRegisters] = {
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
- "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
- "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
- "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
- "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
-};
-
-
-const char* VFPRegisters::Name(int reg, bool is_double) {
- ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
- return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
-}
-
-
-int VFPRegisters::Number(const char* name, bool* is_double) {
- for (int i = 0; i < kNumVFPRegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- if (i < kNumVFPSingleRegisters) {
- *is_double = false;
- return i;
- } else {
- *is_double = true;
- return i - kNumVFPSingleRegisters;
- }
- }
- }
-
- // No register with the requested name found.
- return kNoRegister;
-}
-
-
-int Registers::Number(const char* name) {
- // Look through the canonical names.
- for (int i = 0; i < kNumRegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- return i;
- }
- }
-
- // Look through the alias names.
- int i = 0;
- while (aliases_[i].reg != kNoRegister) {
- if (strcmp(aliases_[i].name, name) == 0) {
- return aliases_[i].reg;
- }
- i++;
- }
-
- // No register with the requested name found.
- return kNoRegister;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/constants-arm.h b/src/3rdparty/v8/src/arm/constants-arm.h
deleted file mode 100644
index 841df92..0000000
--- a/src/3rdparty/v8/src/arm/constants-arm.h
+++ /dev/null
@@ -1,789 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_CONSTANTS_ARM_H_
-#define V8_ARM_CONSTANTS_ARM_H_
-
-// ARM EABI is required.
-#if defined(__arm__) && !defined(__ARM_EABI__) && !defined(_WIN32_WCE)
-#error ARM EABI support is required.
-#endif
-
-// This means that interwork-compatible jump instructions are generated. We
-// want to generate them on the simulator too so it makes snapshots that can
-// be used on real hardware.
-#if defined(__THUMB_INTERWORK__) || !defined(__arm__) || defined(_WIN32_WCE)
-# define USE_THUMB_INTERWORK 1
-#endif
-
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_6J__) || \
- defined(__ARM_ARCH_6K__) || \
- defined(__ARM_ARCH_6Z__) || \
- defined(__ARM_ARCH_6ZK__) || \
- defined(__ARM_ARCH_6T2__) || \
- defined(CAN_USE_ARMV7_INSTRUCTIONS)
-# define CAN_USE_ARMV6_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
- defined(__ARM_ARCH_5TEJ__) || \
- defined(CAN_USE_ARMV6_INSTRUCTIONS)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-#endif
-
-// Simulator should support ARM5 instructions and unaligned access by default.
-#if !defined(__arm__) || defined(_WIN32_WCE)
-# if !defined(_WIN32_WCE)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# endif
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-
-# ifndef CAN_USE_UNALIGNED_ACCESSES
-# define CAN_USE_UNALIGNED_ACCESSES 1
-# endif
-
-#endif
-
-// Using blx may yield better code, so use it when required or when available
-#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
-#define USE_BLX 1
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Constant pool marker.
-// Use UDF, the permanently undefined instruction.
-const int kConstantPoolMarkerMask = 0xfff000f0;
-const int kConstantPoolMarker = 0xe7f000f0;
-const int kConstantPoolLengthMaxMask = 0xffff;
-inline int EncodeConstantPoolLength(int length) {
- ASSERT((length & kConstantPoolLengthMaxMask) == length);
- return ((length & 0xfff0) << 4) | (length & 0xf);
-}
-inline int DecodeConstantPoolLength(int instr) {
- ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
- return ((instr >> 4) & 0xfff0) | (instr & 0xf);
-}
-
-// Number of registers in normal ARM mode.
-const int kNumRegisters = 16;
-
-// VFP support.
-const int kNumVFPSingleRegisters = 32;
-const int kNumVFPDoubleRegisters = 32;
-const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
-
-// PC is register 15.
-const int kPCRegister = 15;
-const int kNoRegister = -1;
-
-// -----------------------------------------------------------------------------
-// Conditions.
-
-// Defines constants and accessor classes to assemble, disassemble and
-// simulate ARM instructions.
-//
-// Section references in the code refer to the "ARM Architecture Reference
-// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
-//
-// Constants for specific fields are defined in their respective named enums.
-// General constants are in an anonymous enum in class Instr.
-
-// Values for the condition field as defined in section A3.2
-enum Condition {
- kNoCondition = -1,
-
- eq = 0 << 28, // Z set Equal.
- ne = 1 << 28, // Z clear Not equal.
- cs = 2 << 28, // C set Unsigned higher or same.
- cc = 3 << 28, // C clear Unsigned lower.
- mi = 4 << 28, // N set Negative.
- pl = 5 << 28, // N clear Positive or zero.
- vs = 6 << 28, // V set Overflow.
- vc = 7 << 28, // V clear No overflow.
- hi = 8 << 28, // C set, Z clear Unsigned higher.
- ls = 9 << 28, // C clear or Z set Unsigned lower or same.
- ge = 10 << 28, // N == V Greater or equal.
- lt = 11 << 28, // N != V Less than.
- gt = 12 << 28, // Z clear, N == V Greater than.
- le = 13 << 28, // Z set or N != V Less then or equal
- al = 14 << 28, // Always.
-
- kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
- kNumberOfConditions = 16,
-
- // Aliases.
- hs = cs, // C set Unsigned higher or same.
- lo = cc // C clear Unsigned lower.
-};
-
-
-inline Condition NegateCondition(Condition cond) {
- ASSERT(cond != al);
- return static_cast<Condition>(cond ^ ne);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cond) {
- switch (cond) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- default:
- return cond;
- };
-}
-
-
-// -----------------------------------------------------------------------------
-// Instructions encoding.
-
-// Instr is merely used by the Assembler to distinguish 32bit integers
-// representing instructions from usual 32 bit values.
-// Instruction objects are pointers to 32bit values, and provide methods to
-// access the various ISA fields.
-typedef int32_t Instr;
-
-
-// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
-// as defined in section A3.4
-enum Opcode {
- AND = 0 << 21, // Logical AND.
- EOR = 1 << 21, // Logical Exclusive OR.
- SUB = 2 << 21, // Subtract.
- RSB = 3 << 21, // Reverse Subtract.
- ADD = 4 << 21, // Add.
- ADC = 5 << 21, // Add with Carry.
- SBC = 6 << 21, // Subtract with Carry.
- RSC = 7 << 21, // Reverse Subtract with Carry.
- TST = 8 << 21, // Test.
- TEQ = 9 << 21, // Test Equivalence.
- CMP = 10 << 21, // Compare.
- CMN = 11 << 21, // Compare Negated.
- ORR = 12 << 21, // Logical (inclusive) OR.
- MOV = 13 << 21, // Move.
- BIC = 14 << 21, // Bit Clear.
- MVN = 15 << 21 // Move Not.
-};
-
-
-// The bits for bit 7-4 for some type 0 miscellaneous instructions.
-enum MiscInstructionsBits74 {
- // With bits 22-21 01.
- BX = 1 << 4,
- BXJ = 2 << 4,
- BLX = 3 << 4,
- BKPT = 7 << 4,
-
- // With bits 22-21 11.
- CLZ = 1 << 4
-};
-
-
-// Instruction encoding bits and masks.
-enum {
- H = 1 << 5, // Halfword (or byte).
- S6 = 1 << 6, // Signed (or unsigned).
- L = 1 << 20, // Load (or store).
- S = 1 << 20, // Set condition code (or leave unchanged).
- W = 1 << 21, // Writeback base register (or leave unchanged).
- A = 1 << 21, // Accumulate in multiply instruction (or not).
- B = 1 << 22, // Unsigned byte (or word).
- N = 1 << 22, // Long (or short).
- U = 1 << 23, // Positive (or negative) offset/index.
- P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
- I = 1 << 25, // Immediate shifter operand (or not).
-
- B4 = 1 << 4,
- B5 = 1 << 5,
- B6 = 1 << 6,
- B7 = 1 << 7,
- B8 = 1 << 8,
- B9 = 1 << 9,
- B12 = 1 << 12,
- B16 = 1 << 16,
- B18 = 1 << 18,
- B19 = 1 << 19,
- B20 = 1 << 20,
- B21 = 1 << 21,
- B22 = 1 << 22,
- B23 = 1 << 23,
- B24 = 1 << 24,
- B25 = 1 << 25,
- B26 = 1 << 26,
- B27 = 1 << 27,
- B28 = 1 << 28,
-
- // Instruction bit masks.
- kCondMask = 15 << 28,
- kALUMask = 0x6f << 21,
- kRdMask = 15 << 12, // In str instruction.
- kCoprocessorMask = 15 << 8,
- kOpCodeMask = 15 << 21, // In data-processing instructions.
- kImm24Mask = (1 << 24) - 1,
- kOff12Mask = (1 << 12) - 1,
- kOff8Mask = (1 << 8) - 1
-};
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants.
-
-// Condition code updating mode.
-enum SBit {
- SetCC = 1 << 20, // Set condition code.
- LeaveCC = 0 << 20 // Leave condition code unchanged.
-};
-
-
-// Status register selection.
-enum SRegister {
- CPSR = 0 << 22,
- SPSR = 1 << 22
-};
-
-
-// Shifter types for Data-processing operands as defined in section A5.1.2.
-enum ShiftOp {
- LSL = 0 << 5, // Logical shift left.
- LSR = 1 << 5, // Logical shift right.
- ASR = 2 << 5, // Arithmetic shift right.
- ROR = 3 << 5, // Rotate right.
-
- // RRX is encoded as ROR with shift_imm == 0.
- // Use a special code to make the distinction. The RRX ShiftOp is only used
- // as an argument, and will never actually be encoded. The Assembler will
- // detect it and emit the correct ROR shift operand with shift_imm == 0.
- RRX = -1,
- kNumberOfShifts = 4
-};
-
-
-// Status register fields.
-enum SRegisterField {
- CPSR_c = CPSR | 1 << 16,
- CPSR_x = CPSR | 1 << 17,
- CPSR_s = CPSR | 1 << 18,
- CPSR_f = CPSR | 1 << 19,
- SPSR_c = SPSR | 1 << 16,
- SPSR_x = SPSR | 1 << 17,
- SPSR_s = SPSR | 1 << 18,
- SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values).
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode.
-enum AddrMode {
- // Bit encoding P U W.
- Offset = (8|4|0) << 21, // Offset (without writeback to base).
- PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
- PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
- NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
- NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
- NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
-};
-
-
-// Load/store multiple addressing mode.
-enum BlockAddrMode {
- // Bit encoding P U W .
- da = (0|0|0) << 21, // Decrement after.
- ia = (0|4|0) << 21, // Increment after.
- db = (8|0|0) << 21, // Decrement before.
- ib = (8|4|0) << 21, // Increment before.
- da_w = (0|0|1) << 21, // Decrement after with writeback to base.
- ia_w = (0|4|1) << 21, // Increment after with writeback to base.
- db_w = (8|0|1) << 21, // Decrement before with writeback to base.
- ib_w = (8|4|1) << 21, // Increment before with writeback to base.
-
- // Alias modes for comparison when writeback does not matter.
- da_x = (0|0|0) << 21, // Decrement after.
- ia_x = (0|4|0) << 21, // Increment after.
- db_x = (8|0|0) << 21, // Decrement before.
- ib_x = (8|4|0) << 21, // Increment before.
-
- kBlockAddrModeMask = (8|4|1) << 21
-};
-
-
-// Coprocessor load/store operand size.
-enum LFlag {
- Long = 1 << 22, // Long load/store coprocessor.
- Short = 0 << 22 // Short load/store coprocessor.
-};
-
-
-// -----------------------------------------------------------------------------
-// Supervisor Call (svc) specific support.
-
-// Special Software Interrupt codes when used in the presence of the ARM
-// simulator.
-// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
-// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
-enum SoftwareInterruptCodes {
- // transition to C code
- kCallRtRedirected= 0x10,
- // break point
- kBreakpoint= 0x20,
- // stop
- kStopCode = 1 << 23
-};
-const uint32_t kStopCodeMask = kStopCode - 1;
-const uint32_t kMaxStopCode = kStopCode - 1;
-const int32_t kDefaultStopCode = -1;
-
-
-// Type of VFP register. Determines register encoding.
-enum VFPRegPrecision {
- kSinglePrecision = 0,
- kDoublePrecision = 1
-};
-
-
-// VFP FPSCR constants.
-enum VFPConversionMode {
- kFPSCRRounding = 0,
- kDefaultRoundToZero = 1
-};
-
-// This mask does not include the "inexact" or "input denormal" cumulative
-// exceptions flags, because we usually don't want to check for it.
-const uint32_t kVFPExceptionMask = 0xf;
-const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
-const uint32_t kVFPOverflowExceptionBit = 1 << 2;
-const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
-const uint32_t kVFPInexactExceptionBit = 1 << 4;
-const uint32_t kVFPFlushToZeroMask = 1 << 24;
-
-const uint32_t kVFPNConditionFlagBit = 1 << 31;
-const uint32_t kVFPZConditionFlagBit = 1 << 30;
-const uint32_t kVFPCConditionFlagBit = 1 << 29;
-const uint32_t kVFPVConditionFlagBit = 1 << 28;
-
-
-// VFP rounding modes. See ARM DDI 0406B Page A2-29.
-enum VFPRoundingMode {
- RN = 0 << 22, // Round to Nearest.
- RP = 1 << 22, // Round towards Plus Infinity.
- RM = 2 << 22, // Round towards Minus Infinity.
- RZ = 3 << 22, // Round towards zero.
-
- // Aliases.
- kRoundToNearest = RN,
- kRoundToPlusInf = RP,
- kRoundToMinusInf = RM,
- kRoundToZero = RZ
-};
-
-const uint32_t kVFPRoundingModeMask = 3 << 22;
-
-enum CheckForInexactConversion {
- kCheckForInexactConversion,
- kDontCheckForInexactConversion
-};
-
-// -----------------------------------------------------------------------------
-// Hints.
-
-// Branch hints are not used on the ARM. They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm. Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-// These constants are declared in assembler-arm.cc, as they use named registers
-// and other constants.
-
-
-// add(sp, sp, 4) instruction (aka Pop())
-extern const Instr kPopInstruction;
-
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-extern const Instr kPushRegPattern;
-
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-extern const Instr kPopRegPattern;
-
-// mov lr, pc
-extern const Instr kMovLrPc;
-// ldr rd, [pc, #offset]
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-// vldr dd, [pc, #offset]
-extern const Instr kVldrDPCMask;
-extern const Instr kVldrDPCPattern;
-// blxcc rm
-extern const Instr kBlxRegMask;
-
-extern const Instr kBlxRegPattern;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
-
-// A mask for the Rd register for push, pop, ldr, str instructions.
-extern const Instr kLdrRegFpOffsetPattern;
-
-extern const Instr kStrRegFpOffsetPattern;
-
-extern const Instr kLdrRegFpNegOffsetPattern;
-
-extern const Instr kStrRegFpNegOffsetPattern;
-
-extern const Instr kLdrStrInstrTypeMask;
-extern const Instr kLdrStrInstrArgumentMask;
-extern const Instr kLdrStrOffsetMask;
-
-
-// -----------------------------------------------------------------------------
-// Instruction abstraction.
-
-// The class Instruction enables access to individual fields defined in the ARM
-// architecture instruction set encoding as described in figure A3-1.
-// Note that the Assembler uses typedef int32_t Instr.
-//
-// Example: Test whether the instruction at ptr does set the condition code
-// bits.
-//
-// bool InstructionSetsConditionCodes(byte* ptr) {
-// Instruction* instr = Instruction::At(ptr);
-// int type = instr->TypeValue();
-// return ((type == 0) || (type == 1)) && instr->HasS();
-// }
-//
-class Instruction {
- public:
- enum {
- kInstrSize = 4,
- kInstrSizeLog2 = 2,
- kPCReadOffset = 8
- };
-
- // Helper macro to define static accessors.
- // We use the cast to char* trick to bypass the strict anti-aliasing rules.
- #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
- static inline return_type Name(Instr instr) { \
- char* temp = reinterpret_cast<char*>(&instr); \
- return reinterpret_cast<Instruction*>(temp)->Name(); \
- }
-
- #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
-
- // Get the raw instruction bits.
- inline Instr InstructionBits() const {
- return *reinterpret_cast<const Instr*>(this);
- }
-
- // Set the raw instruction bits to value.
- inline void SetInstructionBits(Instr value) {
- *reinterpret_cast<Instr*>(this) = value;
- }
-
- // Read one particular bit out of the instruction bits.
- inline int Bit(int nr) const {
- return (InstructionBits() >> nr) & 1;
- }
-
- // Read a bit field's value out of the instruction bits.
- inline int Bits(int hi, int lo) const {
- return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
- }
-
- // Read a bit field out of the instruction bits.
- inline int BitField(int hi, int lo) const {
- return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
- }
-
- // Static support.
-
- // Read one particular bit out of the instruction bits.
- static inline int Bit(Instr instr, int nr) {
- return (instr >> nr) & 1;
- }
-
- // Read the value of a bit field out of the instruction bits.
- static inline int Bits(Instr instr, int hi, int lo) {
- return (instr >> lo) & ((2 << (hi - lo)) - 1);
- }
-
-
- // Read a bit field out of the instruction bits.
- static inline int BitField(Instr instr, int hi, int lo) {
- return instr & (((2 << (hi - lo)) - 1) << lo);
- }
-
-
- // Accessors for the different named fields used in the ARM encoding.
- // The naming of these accessor corresponds to figure A3-1.
- //
- // Two kind of accessors are declared:
- // - <Name>Field() will return the raw field, i.e. the field's bits at their
- // original place in the instruction encoding.
- // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
- // 0xC0810002 ConditionField(instr) will return 0xC0000000.
- // - <Name>Value() will return the field value, shifted back to bit 0.
- // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
- // 0xC0810002 ConditionField(instr) will return 0xC.
-
-
- // Generally applicable fields
- inline Condition ConditionValue() const {
- return static_cast<Condition>(Bits(31, 28));
- }
- inline Condition ConditionField() const {
- return static_cast<Condition>(BitField(31, 28));
- }
- DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
- DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
-
- inline int TypeValue() const { return Bits(27, 25); }
-
- inline int RnValue() const { return Bits(19, 16); }
- DECLARE_STATIC_ACCESSOR(RnValue);
- inline int RdValue() const { return Bits(15, 12); }
- DECLARE_STATIC_ACCESSOR(RdValue);
-
- inline int CoprocessorValue() const { return Bits(11, 8); }
- // Support for VFP.
- // Vn(19-16) | Vd(15-12) | Vm(3-0)
- inline int VnValue() const { return Bits(19, 16); }
- inline int VmValue() const { return Bits(3, 0); }
- inline int VdValue() const { return Bits(15, 12); }
- inline int NValue() const { return Bit(7); }
- inline int MValue() const { return Bit(5); }
- inline int DValue() const { return Bit(22); }
- inline int RtValue() const { return Bits(15, 12); }
- inline int PValue() const { return Bit(24); }
- inline int UValue() const { return Bit(23); }
- inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
- inline int Opc2Value() const { return Bits(19, 16); }
- inline int Opc3Value() const { return Bits(7, 6); }
- inline int SzValue() const { return Bit(8); }
- inline int VLValue() const { return Bit(20); }
- inline int VCValue() const { return Bit(8); }
- inline int VAValue() const { return Bits(23, 21); }
- inline int VBValue() const { return Bits(6, 5); }
- inline int VFPNRegValue(VFPRegPrecision pre) {
- return VFPGlueRegValue(pre, 16, 7);
- }
- inline int VFPMRegValue(VFPRegPrecision pre) {
- return VFPGlueRegValue(pre, 0, 5);
- }
- inline int VFPDRegValue(VFPRegPrecision pre) {
- return VFPGlueRegValue(pre, 12, 22);
- }
-
- // Fields used in Data processing instructions
- inline int OpcodeValue() const {
- return static_cast<Opcode>(Bits(24, 21));
- }
- inline Opcode OpcodeField() const {
- return static_cast<Opcode>(BitField(24, 21));
- }
- inline int SValue() const { return Bit(20); }
- // with register
- inline int RmValue() const { return Bits(3, 0); }
- DECLARE_STATIC_ACCESSOR(RmValue);
- inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
- inline ShiftOp ShiftField() const {
- return static_cast<ShiftOp>(BitField(6, 5));
- }
- inline int RegShiftValue() const { return Bit(4); }
- inline int RsValue() const { return Bits(11, 8); }
- inline int ShiftAmountValue() const { return Bits(11, 7); }
- // with immediate
- inline int RotateValue() const { return Bits(11, 8); }
- inline int Immed8Value() const { return Bits(7, 0); }
- inline int Immed4Value() const { return Bits(19, 16); }
- inline int ImmedMovwMovtValue() const {
- return Immed4Value() << 12 | Offset12Value(); }
-
- // Fields used in Load/Store instructions
- inline int PUValue() const { return Bits(24, 23); }
- inline int PUField() const { return BitField(24, 23); }
- inline int BValue() const { return Bit(22); }
- inline int WValue() const { return Bit(21); }
- inline int LValue() const { return Bit(20); }
- // with register uses same fields as Data processing instructions above
- // with immediate
- inline int Offset12Value() const { return Bits(11, 0); }
- // multiple
- inline int RlistValue() const { return Bits(15, 0); }
- // extra loads and stores
- inline int SignValue() const { return Bit(6); }
- inline int HValue() const { return Bit(5); }
- inline int ImmedHValue() const { return Bits(11, 8); }
- inline int ImmedLValue() const { return Bits(3, 0); }
-
- // Fields used in Branch instructions
- inline int LinkValue() const { return Bit(24); }
- inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
-
- // Fields used in Software interrupt instructions
- inline SoftwareInterruptCodes SvcValue() const {
- return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
- }
-
- // Test for special encodings of type 0 instructions (extra loads and stores,
- // as well as multiplications).
- inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
-
- // Test for miscellaneous instructions encodings of type 0 instructions.
- inline bool IsMiscType0() const { return (Bit(24) == 1)
- && (Bit(23) == 0)
- && (Bit(20) == 0)
- && ((Bit(7) == 0)); }
-
- // Test for a nop instruction, which falls under type 1.
- inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
-
- // Test for a stop instruction.
- inline bool IsStop() const {
- return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
- }
-
- // Special accessors that test for existence of a value.
- inline bool HasS() const { return SValue() == 1; }
- inline bool HasB() const { return BValue() == 1; }
- inline bool HasW() const { return WValue() == 1; }
- inline bool HasL() const { return LValue() == 1; }
- inline bool HasU() const { return UValue() == 1; }
- inline bool HasSign() const { return SignValue() == 1; }
- inline bool HasH() const { return HValue() == 1; }
- inline bool HasLink() const { return LinkValue() == 1; }
-
- // Decoding the double immediate in the vmov instruction.
- double DoubleImmedVmov() const;
-
- // Instructions are read of out a code stream. The only way to get a
- // reference to an instruction is to convert a pointer. There is no way
- // to allocate or create instances of class Instruction.
- // Use the At(pc) function to create references to Instruction.
- static Instruction* At(byte* pc) {
- return reinterpret_cast<Instruction*>(pc);
- }
-
-
- private:
- // Join split register codes, depending on single or double precision.
- // four_bit is the position of the least-significant bit of the four
- // bit specifier. one_bit is the position of the additional single bit
- // specifier.
- inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
- if (pre == kSinglePrecision) {
- return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
- }
- return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
- }
-
- // We need to prevent the creation of instances of class Instruction.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
-};
-
-
-// Helper functions for converting between register numbers and names.
-class Registers {
- public:
- // Return the name of the register.
- static const char* Name(int reg);
-
- // Lookup the register number for the name provided.
- static int Number(const char* name);
-
- struct RegisterAlias {
- int reg;
- const char* name;
- };
-
- private:
- static const char* names_[kNumRegisters];
- static const RegisterAlias aliases_[];
-};
-
-// Helper functions for converting between VFP register numbers and names.
-class VFPRegisters {
- public:
- // Return the name of the register.
- static const char* Name(int reg, bool is_double);
-
- // Lookup the register number for the name provided.
- // Set flag pointed by is_double to true if register
- // is double-precision.
- static int Number(const char* name, bool* is_double);
-
- private:
- static const char* names_[kNumVFPRegisters];
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CONSTANTS_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/cpu-arm.cc b/src/3rdparty/v8/src/arm/cpu-arm.cc
deleted file mode 100644
index bed9503..0000000
--- a/src/3rdparty/v8/src/arm/cpu-arm.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for arm independent of OS goes here.
-
-#include "v8.h"
-
-#if defined(__arm__) && !defined(_WIN32_WCE)
- #if !defined(__QNXNTO__)
- #include <sys/syscall.h> // for cache flushing.
- #else
- #include <sys/mman.h> // for cache flushing.
- #endif
-#endif
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-#include "simulator.h" // for cache flushing.
-
-namespace v8 {
-namespace internal {
-
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(VFP3);
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // Nothing to do flushing no instructions.
- if (size == 0) {
- return;
- }
-
-#if defined (USE_SIMULATOR)
- // Not generating ARM instructions for C-code. This means that we are
- // building an ARM emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#elif defined(__QNXNTO__)
- // The QNX kernel does not expose the symbol __ARM_NR_cacheflush so we
- // use the msync system call instead of the approach used on Linux
- msync(start, size, MS_SYNC|MS_INVALIDATE_ICACHE);
-#elif defined(_WIN32_WCE)
- // Windows CE compiler does not support the asm command, nor does it expose
- // __ARM_NR_cacheflush. As well as Windows CE does not support to flush a
- // region, so we need to flush the whole process.
- FlushInstructionCache(GetCurrentProcess(), NULL, NULL);
-#else
- // Ideally, we would call
- // syscall(__ARM_NR_cacheflush, start,
- // reinterpret_cast<intptr_t>(start) + size, 0);
- // however, syscall(int, ...) is not supported on all platforms, especially
- // not when using EABI, so we call the __ARM_NR_cacheflush syscall directly.
-
- register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start);
- register uint32_t end asm("a2") =
- reinterpret_cast<uint32_t>(start) + size;
- register uint32_t flg asm("a3") = 0;
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
- asm volatile(
- "svc 0x0"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (scno));
- #else
- // r7 is reserved by the EABI in thumb mode.
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: push {r7} \n\t"
- "mov r7, %4 \n\t"
- "svc 0x0 \n\t"
- "pop {r7} \n\t"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
- : "r3");
- #endif
-#endif
-}
-
-
-void CPU::DebugBreak() {
-#if !defined (__arm__) || !defined(CAN_USE_ARMV5_INSTRUCTIONS)
- UNIMPLEMENTED(); // when building ARM emulator target
-#else
- asm volatile("bkpt 0");
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/debug-arm.cc b/src/3rdparty/v8/src/arm/debug-arm.cc
deleted file mode 100644
index e9a65b2..0000000
--- a/src/3rdparty/v8/src/arm/debug-arm.cc
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- // Patch the code changing the return from JS function sequence from
- // mov sp, fp
- // ldmia sp!, {fp, lr}
- // add sp, sp, #4
- // bx lr
- // to a call to the debug break return code.
- // #ifdef USE_BLX
- // ldr ip, [pc, #0]
- // blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
- // <debug break return code entry point address>
- // bktp 0
- CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
-#ifdef USE_BLX
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
- patcher.masm()->bkpt(0);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceInstructions);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Patch the code changing the debug break slot code from
- // mov r2, r2
- // mov r2, r2
- // mov r2, r2
- // to a call to the debug break slot code.
- // #ifdef USE_BLX
- // ldr ip, [pc, #0]
- // blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
- // <debug break slot code entry point address>
- CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
-#ifdef USE_BLX
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
- }
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
- }
- }
- __ stm(db_w, sp, object_regs | non_object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ mov(r0, Operand::Zero()); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ ldm(ia_w, sp, object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
- }
- }
- }
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ mov(ip, Operand(after_break_target));
- __ ldr(ip, MemOperand(ip));
- __ Jump(ip);
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC load (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- [sp] : receiver
- // -----------------------------------
- // Registers r0 and r2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- // Registers r0, r1, and r2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC call (from ic-arm.cc)
- // ----------- S t a t e -------------
- // -- r2 : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r2.bit(), 0);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm.cc).
- // ----------- S t a t e -------------
- // -- r1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm.cc).
- // ----------- S t a t e -------------
- // -- r1 : function
- // -- r2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm.cc)
- // ----------- S t a t e -------------
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm.cc)
- // ----------- S t a t e -------------
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- // -- r2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the constant pool in the debug break slot code.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(MacroAssembler::DEBUG_BREAK_NOP);
- }
- ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
- masm->InstructionsGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
-}
-
-const bool Debug::kFrameDropperSupported = false;
-
-#undef __
-
-
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc b/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
deleted file mode 100644
index 2e1e3e3..0000000
--- a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
+++ /dev/null
@@ -1,1106 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-const int Deoptimizer::table_entry_size_ = 16;
-
-
-int Deoptimizer::patch_size() {
- const int kCallInstructionSizeInWords = 3;
- return kCallInstructionSizeInWords * Assembler::kInstrSize;
-}
-
-
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
- Address code_start_address = code->instruction_start();
-
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
-
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
- // We need calls to have a predictable size in the unoptimized code, but
- // this is optimized code, so we don't have to have a predictable size.
- int call_size_in_bytes =
- MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
- RelocInfo::NONE32);
- int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
- ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
- ASSERT(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
- ASSERT(prev_call_address == NULL ||
- call_address >= prev_call_address + patch_size());
- ASSERT(call_address + patch_size() <= code->instruction_end());
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
-}
-
-
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- // The back edge bookkeeping code matches the pattern:
- //
- // <decrement profiling counter>
- // 2a 00 00 01 bpl ok
- // e5 9f c? ?? ldr ip, [pc, <stack guard address>]
- // e1 2f ff 3c blx ip
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
-
- // We patch the code to the following form:
- //
- // <decrement profiling counter>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
- // and overwrite the constant containing the
- // address of the stack check stub.
-
- // Replace conditional jump with NOP.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->nop();
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(check_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
-
- // Replace NOP with conditional jump.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(+16, pl);
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
- //
- // FROM TO
- // | .... | | .... |
- // +-------------------------+ +-------------------------+
- // | JSFunction continuation | | JSFunction continuation |
- // +-------------------------+ +-------------------------+
- // | | saved frame (fp) | | saved frame (fp) |
- // | +=========================+<-fp +=========================+<-fp
- // | | JSFunction context | | JSFunction context |
- // v +-------------------------+ +-------------------------|
- // | COMPILED_STUB marker | | STUB_FAILURE marker |
- // +-------------------------+ +-------------------------+
- // | | | caller args.arguments_ |
- // | ... | +-------------------------+
- // | | | caller args.length_ |
- // |-------------------------|<-sp +-------------------------+
- // | caller args pointer |
- // +-------------------------+
- // | caller stack param 1 |
- // parameters in registers +-------------------------+
- // and spilled to stack | .... |
- // +-------------------------+
- // | caller stack param n |
- // +-------------------------+<-sp
- // r0 = number of parameters
- // r1 = failure handler address
- // fp = saved frame
- // cp = JSFunction context
- //
-
- ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
- int major_key = compiled_code_->major_key();
- CodeStubInterfaceDescriptor* descriptor =
- isolate_->code_stub_interface_descriptor(major_key);
-
- // The output frame must have room for all pushed register parameters
- // and the standard stack frame slots. Include space for an argument
- // object to the callee and optionally the space to pass the argument
- // object to the stub failure handler.
- int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
- sizeof(Arguments) + kPointerSize;
- int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
- int input_frame_size = input_->GetFrameSize();
- int output_frame_size = height_in_bytes + fixed_frame_size;
- if (trace_) {
- PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
- CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
- height_in_bytes);
- }
-
- // The stub failure trampoline is a single frame.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, NULL);
- output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ASSERT(frame_index == 0);
- output_[frame_index] = output_frame;
-
- // The top address for the output frame can be computed from the input
- // frame pointer and the output frame's height. Subtract space for the
- // context and function slots.
- intptr_t top_address = input_->GetRegister(fp.code()) - (2 * kPointerSize) -
- height_in_bytes;
- output_frame->SetTop(top_address);
-
- // Read caller's PC (JSFunction continuation) from the input frame.
- intptr_t input_frame_offset = input_frame_size - kPointerSize;
- intptr_t output_frame_offset = output_frame_size - kPointerSize;
- intptr_t value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Read caller's FP from the input frame, and set this frame's FP.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- intptr_t frame_ptr = input_->GetRegister(fp.code());
- output_frame->SetRegister(fp.code(), frame_ptr);
- output_frame->SetFp(frame_ptr);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // The context can be gotten from the input frame.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(cp.code(), value);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_frame_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (stub fail sentinel)\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- int caller_arg_count = 0;
- if (descriptor->stack_parameter_count_ != NULL) {
- caller_arg_count =
- input_->GetRegister(descriptor->stack_parameter_count_->code());
- }
-
- // Build the Arguments object for the caller's parameters and a pointer to it.
- output_frame_offset -= kPointerSize;
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (caller_arg_count - 1) * kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.arguments\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = caller_arg_count;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.length\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = frame_ptr - (output_frame_size - output_frame_offset) -
- StandardFrameConstants::kMarkerOffset + kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args*\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Copy the register parameters to the failure frame.
- for (int i = 0; i < descriptor->register_param_count_; ++i) {
- output_frame_offset -= kPointerSize;
- DoTranslateCommand(iterator, 0, output_frame_offset);
- }
-
- ASSERT(0 == output_frame_offset);
-
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-
- ApiFunction function(descriptor->deoptimization_handler_);
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
- output_frame->SetRegister(r0.code(), params);
- output_frame->SetRegister(r1.code(), handler);
-
- // Compute this frame's PC, state, and continuation.
- Code* trampoline = NULL;
- int extra = descriptor->extra_expression_stack_count_;
- StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
- ASSERT(trampoline != NULL);
- output_frame->SetPc(reinterpret_cast<intptr_t>(
- trampoline->instruction_start()));
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(notify_failure->entry()));
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 8 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // Constructor function being invoked by the stub.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-// This code is very similar to ia32 code, but relies on register names (fp, sp)
-// and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- output_frame->SetRegister(fp.code(), fp_value);
- }
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
- if (is_topmost) {
- output_frame->SetRegister(pc.code(), pc_value);
- }
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
-
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
-#define __ masm()->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
-
- Isolate* isolate = masm()->isolate();
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- // Everything but pc, lr and ip which will be saved but not restored.
- RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
-
- const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Save all allocatable VFP registers before messing with them.
- ASSERT(kDoubleRegZero.code() == 14);
- ASSERT(kScratchDoubleReg.code() == 15);
-
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(ip);
-
- // Push registers d0-d13, and possibly d16-d31, on the stack.
- // If d16-d31 are not pushed, decrease the stack pointer instead.
- __ vstm(db_w, sp, d16, d31, ne);
- __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- __ vstm(db_w, sp, d0, d13);
- } else {
- __ sub(sp, sp, Operand(kDoubleRegsSize));
- }
-
- // Push all 16 registers (needed to populate FrameDescription::registers_).
- // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
- // handle this a bit differently.
- __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id from the stack.
- __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible (r3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register r4.
- if (type() == EAGER) {
- __ mov(r3, Operand::Zero());
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(r3, lr);
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(r3, lr);
- // Correct two words for bailout id and return address.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
- __ sub(r4, fp, r4);
-
- // Allocate a new deoptimizer object.
- // Pass four arguments in r0 to r3 and fifth argument on stack.
- __ PrepareCallCFunction(6, r5);
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(r1, Operand(type())); // bailout type,
- // r2: bailout id already loaded.
- // r3: code address or 0 already loaded.
- __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
- __ mov(r5, Operand(ExternalReference::isolate_address()));
- __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
-
- // Preserve "deoptimizer" object in register r0 and get the input
- // frame descriptor pointer to r1 (deoptimizer->input_);
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- ASSERT(Register::kNumRegisters == kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r2, MemOperand(sp, i * kPointerSize));
- __ str(r2, MemOperand(r1, offset));
- }
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
- }
- }
-
- // Remove the bailout id, eventually return address, and the saved registers
- // from the stack.
- if (type() == EAGER || type() == OSR) {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
-
- // Compute a pointer to the unwinding limit in register r2; that is
- // the first stack slot not part of the input frame.
- __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
- __ add(r2, r2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ b(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(r4);
- __ str(r4, MemOperand(r3, 0));
- __ add(r3, r3, Operand(sizeof(uint32_t)));
- __ bind(&pop_loop_header);
- __ cmp(r2, sp);
- __ b(ne, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(r0); // Preserve deoptimizer object across call.
- // r0: deoptimizer object; r1: scratch.
- __ PrepareCallCFunction(1, r1);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
- }
- __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
- // Outer loop state: r4 = current "FrameDescription** output_",
- // r1 = one past the last FrameDescription**.
- __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
- __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
- __ add(r1, r4, Operand(r1, LSL, 2));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
- __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
- __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ sub(r3, r3, Operand(sizeof(uint32_t)));
- __ add(r6, r2, Operand(r3));
- __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r7);
- __ bind(&inner_loop_header);
- __ cmp(r3, Operand::Zero());
- __ b(ne, &inner_push_loop); // test for gt?
- __ add(r4, r4, Operand(kPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(r4, r1);
- __ b(lt, &outer_push_loop);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(ip);
-
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
- int src_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
- if (i == kDoubleRegZero.code()) continue;
- if (i == kScratchDoubleReg.code()) continue;
-
- const DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
- src_offset += kDoubleSize;
- }
- }
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
- __ push(r6);
- }
-
- __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
- __ push(r6);
- __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
- __ push(r6);
-
- // Push the registers from the last output frame.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r6, MemOperand(r2, offset));
- __ push(r6);
- }
-
- // Restore the registers from the stack.
- __ ldm(ia_w, sp, restored_regs); // all but pc registers.
- __ pop(ip); // remove sp
- __ pop(ip); // remove lr
-
- __ InitializeRootRegister();
-
- __ pop(ip); // remove pc
- __ pop(r7); // get continuation, leave pc on stack
- __ pop(lr);
- __ Jump(r7);
- __ stop("Unreachable.");
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- if (type() == EAGER) {
- __ nop();
- } else {
- // Emulate ia32 like call by pushing return address to stack.
- __ push(lr);
- }
- __ mov(ip, Operand(i));
- __ push(ip);
- __ b(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/disasm-arm.cc b/src/3rdparty/v8/src/arm/disasm-arm.cc
deleted file mode 100644
index dec62b3..0000000
--- a/src/3rdparty/v8/src/arm/disasm-arm.cc
+++ /dev/null
@@ -1,1572 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A Disassembler object is used to disassemble a block of code instruction by
-// instruction. The default implementation of the NameConverter object can be
-// overriden to modify register names or to do symbol lookup on addresses.
-//
-// The example below will disassemble a block of code and print it to stdout.
-//
-// NameConverter converter;
-// Disassembler d(converter);
-// for (byte* pc = begin; pc < end;) {
-// v8::internal::EmbeddedVector<char, 256> buffer;
-// byte* prev_pc = pc;
-// pc += d.InstructionDecode(buffer, pc);
-// printf("%p %08x %s\n",
-// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
-// }
-//
-// The Disassembler class also has a convenience method to disassemble a block
-// of code into a FILE*, meaning that the above functionality could also be
-// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
-
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "constants-arm.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-//------------------------------------------------------------------------------
-
-// Decoder decodes and disassembles instructions into an output buffer.
-// It uses the converter to convert register names and call destinations into
-// more informative description.
-class Decoder {
- public:
- Decoder(const disasm::NameConverter& converter,
- Vector<char> out_buffer)
- : converter_(converter),
- out_buffer_(out_buffer),
- out_buffer_pos_(0) {
- out_buffer_[out_buffer_pos_] = '\0';
- }
-
- ~Decoder() {}
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(byte* instruction);
-
- static bool IsConstantPoolAt(byte* instr_ptr);
- static int ConstantPoolSizeAt(byte* instr_ptr);
-
- private:
- // Bottleneck functions to print into the out_buffer.
- void PrintChar(const char ch);
- void Print(const char* str);
-
- // Printing of common values.
- void PrintRegister(int reg);
- void PrintSRegister(int reg);
- void PrintDRegister(int reg);
- int FormatVFPRegister(Instruction* instr, const char* format);
- void PrintMovwMovt(Instruction* instr);
- int FormatVFPinstruction(Instruction* instr, const char* format);
- void PrintCondition(Instruction* instr);
- void PrintShiftRm(Instruction* instr);
- void PrintShiftImm(Instruction* instr);
- void PrintShiftSat(Instruction* instr);
- void PrintPU(Instruction* instr);
- void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
-
- // Handle formatting of instructions and their options.
- int FormatRegister(Instruction* instr, const char* option);
- int FormatOption(Instruction* instr, const char* option);
- void Format(Instruction* instr, const char* format);
- void Unknown(Instruction* instr);
-
- // Each of these functions decodes one particular instruction type, a 3-bit
- // field in the instruction encoding.
- // Types 0 and 1 are combined as they are largely the same except for the way
- // they interpret the shifter operand.
- void DecodeType01(Instruction* instr);
- void DecodeType2(Instruction* instr);
- void DecodeType3(Instruction* instr);
- void DecodeType4(Instruction* instr);
- void DecodeType5(Instruction* instr);
- void DecodeType6(Instruction* instr);
- // Type 7 includes special Debugger instructions.
- int DecodeType7(Instruction* instr);
- // For VFP support.
- void DecodeTypeVFP(Instruction* instr);
- void DecodeType6CoprocessorIns(Instruction* instr);
-
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
- void DecodeVCMP(Instruction* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
-
- const disasm::NameConverter& converter_;
- Vector<char> out_buffer_;
- int out_buffer_pos_;
-
- DISALLOW_COPY_AND_ASSIGN(Decoder);
-};
-
-
-// Support for assertions in the Decoder formatting functions.
-#define STRING_STARTS_WITH(string, compare_string) \
- (strncmp(string, compare_string, strlen(compare_string)) == 0)
-
-
-// Append the ch to the output buffer.
-void Decoder::PrintChar(const char ch) {
- out_buffer_[out_buffer_pos_++] = ch;
-}
-
-
-// Append the str to the output buffer.
-void Decoder::Print(const char* str) {
- char cur = *str++;
- while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- PrintChar(cur);
- cur = *str++;
- }
- out_buffer_[out_buffer_pos_] = 0;
-}
-
-
-// These condition names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-static const char* cond_names[kNumberOfConditions] = {
- "eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
- "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
-};
-
-
-// Print the condition guarding the instruction.
-void Decoder::PrintCondition(Instruction* instr) {
- Print(cond_names[instr->ConditionValue()]);
-}
-
-
-// Print the register name according to the active name converter.
-void Decoder::PrintRegister(int reg) {
- Print(converter_.NameOfCPURegister(reg));
-}
-
-// Print the VFP S register name according to the active name converter.
-void Decoder::PrintSRegister(int reg) {
- Print(VFPRegisters::Name(reg, false));
-}
-
-// Print the VFP D register name according to the active name converter.
-void Decoder::PrintDRegister(int reg) {
- Print(VFPRegisters::Name(reg, true));
-}
-
-
-// These shift names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-static const char* const shift_names[kNumberOfShifts] = {
- "lsl", "lsr", "asr", "ror"
-};
-
-
-// Print the register shift operands for the instruction. Generally used for
-// data processing instructions.
-void Decoder::PrintShiftRm(Instruction* instr) {
- ShiftOp shift = instr->ShiftField();
- int shift_index = instr->ShiftValue();
- int shift_amount = instr->ShiftAmountValue();
- int rm = instr->RmValue();
-
- PrintRegister(rm);
-
- if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
- // Special case for using rm only.
- return;
- }
- if (instr->RegShiftValue() == 0) {
- // by immediate
- if ((shift == ROR) && (shift_amount == 0)) {
- Print(", RRX");
- return;
- } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
- shift_amount = 32;
- }
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[shift_index],
- shift_amount);
- } else {
- // by register
- int rs = instr->RsValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s ", shift_names[shift_index]);
- PrintRegister(rs);
- }
-}
-
-
-// Print the immediate operand for the instruction. Generally used for data
-// processing instructions.
-void Decoder::PrintShiftImm(Instruction* instr) {
- int rotate = instr->RotateValue() * 2;
- int immed8 = instr->Immed8Value();
- int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d", imm);
-}
-
-
-// Print the optional shift and immediate used by saturating instructions.
-void Decoder::PrintShiftSat(Instruction* instr) {
- int shift = instr->Bits(11, 7);
- if (shift > 0) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[instr->Bit(6) * 2],
- instr->Bits(11, 7));
- }
-}
-
-
-// Print PU formatting to reduce complexity of FormatOption.
-void Decoder::PrintPU(Instruction* instr) {
- switch (instr->PUField()) {
- case da_x: {
- Print("da");
- break;
- }
- case ia_x: {
- Print("ia");
- break;
- }
- case db_x: {
- Print("db");
- break;
- }
- case ib_x: {
- Print("ib");
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
-// the FormatOption method.
-void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
- switch (svc) {
- case kCallRtRedirected:
- Print("call rt redirected");
- return;
- case kBreakpoint:
- Print("breakpoint");
- return;
- default:
- if (svc >= kStopCode) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d - 0x%x",
- svc & kStopCodeMask,
- svc & kStopCodeMask);
- } else {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- svc);
- }
- return;
- }
-}
-
-
-// Handle all register based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatRegister(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'r');
- if (format[1] == 'n') { // 'rn: Rn register
- int reg = instr->RnValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'rd: Rd register
- int reg = instr->RdValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 's') { // 'rs: Rs register
- int reg = instr->RsValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'm') { // 'rm: Rm register
- int reg = instr->RmValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'rt: Rt register
- int reg = instr->RtValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'l') {
- // 'rlist: register list for load and store multiple instructions
- ASSERT(STRING_STARTS_WITH(format, "rlist"));
- int rlist = instr->RlistValue();
- int reg = 0;
- Print("{");
- // Print register list in ascending order, by scanning the bit mask.
- while (rlist != 0) {
- if ((rlist & 1) != 0) {
- PrintRegister(reg);
- if ((rlist >> 1) != 0) {
- Print(", ");
- }
- }
- reg++;
- rlist >>= 1;
- }
- Print("}");
- return 5;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Handle all VFP register based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
- ASSERT((format[0] == 'S') || (format[0] == 'D'));
-
- VFPRegPrecision precision =
- format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
-
- int retval = 2;
- int reg = -1;
- if (format[1] == 'n') {
- reg = instr->VFPNRegValue(precision);
- } else if (format[1] == 'm') {
- reg = instr->VFPMRegValue(precision);
- } else if (format[1] == 'd') {
- if ((instr->TypeValue() == 7) &&
- (instr->Bit(24) == 0x0) &&
- (instr->Bits(11, 9) == 0x5) &&
- (instr->Bit(4) == 0x1)) {
- // vmov.32 has Vd in a different place.
- reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
- } else {
- reg = instr->VFPDRegValue(precision);
- }
-
- if (format[2] == '+') {
- int immed8 = instr->Immed8Value();
- if (format[0] == 'S') reg += immed8 - 1;
- if (format[0] == 'D') reg += (immed8 / 2 - 1);
- }
- if (format[2] == '+') retval = 3;
- } else {
- UNREACHABLE();
- }
-
- if (precision == kSinglePrecision) {
- PrintSRegister(reg);
- } else {
- PrintDRegister(reg);
- }
-
- return retval;
-}
-
-
-int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
- Print(format);
- return 0;
-}
-
-
-// Print the movw or movt instruction.
-void Decoder::PrintMovwMovt(Instruction* instr) {
- int imm = instr->ImmedMovwMovtValue();
- int rd = instr->RdValue();
- PrintRegister(rd);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", #%d", imm);
-}
-
-
-// FormatOption takes a formatting string and interprets it based on
-// the current instructions. The format string points to the first
-// character of the option string (the option escape has already been
-// consumed by the caller.) FormatOption returns the number of
-// characters that were consumed from the formatting string.
-int Decoder::FormatOption(Instruction* instr, const char* format) {
- switch (format[0]) {
- case 'a': { // 'a: accumulate multiplies
- if (instr->Bit(21) == 0) {
- Print("ul");
- } else {
- Print("la");
- }
- return 1;
- }
- case 'b': { // 'b: byte loads or stores
- if (instr->HasB()) {
- Print("b");
- }
- return 1;
- }
- case 'c': { // 'cond: conditional execution
- ASSERT(STRING_STARTS_WITH(format, "cond"));
- PrintCondition(instr);
- return 4;
- }
- case 'd': { // 'd: vmov double immediate.
- double d = instr->DoubleImmedVmov();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%g", d);
- return 1;
- }
- case 'f': { // 'f: bitfield instructions - v7 and above.
- uint32_t lsbit = instr->Bits(11, 7);
- uint32_t width = instr->Bits(20, 16) + 1;
- if (instr->Bit(21) == 0) {
- // BFC/BFI:
- // Bits 20-16 represent most-significant bit. Covert to width.
- width -= lsbit;
- ASSERT(width > 0);
- }
- ASSERT((width + lsbit) <= 32);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d, #%d", lsbit, width);
- return 1;
- }
- case 'h': { // 'h: halfword operation for extra loads and stores
- if (instr->HasH()) {
- Print("h");
- } else {
- Print("b");
- }
- return 1;
- }
- case 'i': { // 'i: immediate value from adjacent bits.
- // Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
- int width = (format[3] - '0') * 10 + (format[4] - '0');
- int lsb = (format[6] - '0') * 10 + (format[7] - '0');
-
- ASSERT((width >= 1) && (width <= 32));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width + lsb) <= 32);
-
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- instr->Bits(width + lsb - 1, lsb));
- return 8;
- }
- case 'l': { // 'l: branch and link
- if (instr->HasLink()) {
- Print("l");
- }
- return 1;
- }
- case 'm': {
- if (format[1] == 'w') {
- // 'mw: movt/movw instructions.
- PrintMovwMovt(instr);
- return 2;
- }
- if (format[1] == 'e') { // 'memop: load/store instructions.
- ASSERT(STRING_STARTS_WITH(format, "memop"));
- if (instr->HasL()) {
- Print("ldr");
- } else {
- if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) &&
- (instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
- if (instr->Bit(5) == 1) {
- Print("strd");
- } else {
- Print("ldrd");
- }
- return 5;
- }
- Print("str");
- }
- return 5;
- }
- // 'msg: for simulator break instructions
- ASSERT(STRING_STARTS_WITH(format, "msg"));
- byte* str =
- reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s", converter_.NameInCode(str));
- return 3;
- }
- case 'o': {
- if ((format[3] == '1') && (format[4] == '2')) {
- // 'off12: 12-bit offset for load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "off12"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Offset12Value());
- return 5;
- } else if (format[3] == '0') {
- // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
- ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- (instr->Bits(19, 8) << 4) +
- instr->Bits(3, 0));
- return 15;
- }
- // 'off8: 8-bit offset for extra load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "off8"));
- int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", offs8);
- return 4;
- }
- case 'p': { // 'pu: P and U bits for load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "pu"));
- PrintPU(instr);
- return 2;
- }
- case 'r': {
- return FormatRegister(instr, format);
- }
- case 's': {
- if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
- if (format[6] == 'o') { // 'shift_op
- ASSERT(STRING_STARTS_WITH(format, "shift_op"));
- if (instr->TypeValue() == 0) {
- PrintShiftRm(instr);
- } else {
- ASSERT(instr->TypeValue() == 1);
- PrintShiftImm(instr);
- }
- return 8;
- } else if (format[6] == 's') { // 'shift_sat.
- ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
- PrintShiftSat(instr);
- return 9;
- } else { // 'shift_rm
- ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
- PrintShiftRm(instr);
- return 8;
- }
- } else if (format[1] == 'v') { // 'svc
- ASSERT(STRING_STARTS_WITH(format, "svc"));
- PrintSoftwareInterrupt(instr->SvcValue());
- return 3;
- } else if (format[1] == 'i') { // 'sign: signed extra loads and stores
- ASSERT(STRING_STARTS_WITH(format, "sign"));
- if (instr->HasSign()) {
- Print("s");
- }
- return 4;
- }
- // 's: S field of data processing instructions
- if (instr->HasS()) {
- Print("s");
- }
- return 1;
- }
- case 't': { // 'target: target of branch instructions
- ASSERT(STRING_STARTS_WITH(format, "target"));
- int off = (instr->SImmed24Value() << 2) + 8;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%+d -> %s",
- off,
- converter_.NameOfAddress(
- reinterpret_cast<byte*>(instr) + off));
- return 6;
- }
- case 'u': { // 'u: signed or unsigned multiplies
- // The manual gets the meaning of bit 22 backwards in the multiply
- // instruction overview on page A3.16.2. The instructions that
- // exist in u and s variants are the following:
- // smull A4.1.87
- // umull A4.1.129
- // umlal A4.1.128
- // smlal A4.1.76
- // For these 0 means u and 1 means s. As can be seen on their individual
- // pages. The other 18 mul instructions have the bit set or unset in
- // arbitrary ways that are unrelated to the signedness of the instruction.
- // None of these 18 instructions exist in both a 'u' and an 's' variant.
-
- if (instr->Bit(22) == 0) {
- Print("u");
- } else {
- Print("s");
- }
- return 1;
- }
- case 'v': {
- return FormatVFPinstruction(instr, format);
- }
- case 'S':
- case 'D': {
- return FormatVFPRegister(instr, format);
- }
- case 'w': { // 'w: W field of load and store instructions
- if (instr->HasW()) {
- Print("!");
- }
- return 1;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Format takes a formatting string for a whole instruction and prints it into
-// the output buffer. All escaped options are handed to FormatOption to be
-// parsed further.
-void Decoder::Format(Instruction* instr, const char* format) {
- char cur = *format++;
- while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- if (cur == '\'') { // Single quote is used as the formatting escape.
- format += FormatOption(instr, format);
- } else {
- out_buffer_[out_buffer_pos_++] = cur;
- }
- cur = *format++;
- }
- out_buffer_[out_buffer_pos_] = '\0';
-}
-
-
-// The disassembler may end up decoding data inlined in the code. We do not want
-// it to crash if the data does not ressemble any known instruction.
-#define VERIFY(condition) \
-if(!(condition)) { \
- Unknown(instr); \
- return; \
-}
-
-
-// For currently unimplemented decodings the disassembler calls Unknown(instr)
-// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instruction* instr) {
- Format(instr, "unknown");
-}
-
-
-void Decoder::DecodeType01(Instruction* instr) {
- int type = instr->TypeValue();
- if ((type == 0) && instr->IsSpecialType0()) {
- // multiply instruction or extra loads and stores
- if (instr->Bits(7, 4) == 9) {
- if (instr->Bit(24) == 0) {
- // multiply instructions
- if (instr->Bit(23) == 0) {
- if (instr->Bit(21) == 0) {
- // The MUL instruction description (A 4.1.33) refers to Rd as being
- // the destination for the operation, but it confusingly uses the
- // Rn field to encode it.
- Format(instr, "mul'cond's 'rn, 'rm, 'rs");
- } else {
- if (instr->Bit(22) == 0) {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
- } else {
- // The MLS instruction description (A 4.1.29) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
- }
- }
- } else {
- // The signed/long multiply instructions use the terms RdHi and RdLo
- // when referring to the target registers. They are mapped to the Rn
- // and Rd fields as follows:
- // RdLo == Rd field
- // RdHi == Rn field
- // The order of registers is: <RdLo>, <RdHi>, <Rm>, <Rs>
- Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
- }
- } else {
- Unknown(instr); // not used by V8
- }
- } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
- // ldrd, strd
- switch (instr->PUField()) {
- case da_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn], #-'off8");
- }
- break;
- }
- case ia_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn], #+'off8");
- }
- break;
- }
- case db_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn, #-'off8]'w");
- }
- break;
- }
- case ib_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn, #+'off8]'w");
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- } else {
- // extra load/store instructions
- switch (instr->PUField()) {
- case da_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
- }
- break;
- }
- case ia_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
- }
- break;
- }
- case db_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
- }
- break;
- }
- case ib_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- return;
- }
- } else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
- switch (instr->BitField(7, 4)) {
- case BX:
- Format(instr, "bx'cond 'rm");
- break;
- case BLX:
- Format(instr, "blx'cond 'rm");
- break;
- case BKPT:
- Format(instr, "bkpt 'off0to3and8to19");
- break;
- default:
- Unknown(instr); // not used by V8
- break;
- }
- } else if (instr->Bits(22, 21) == 3) {
- switch (instr->BitField(7, 4)) {
- case CLZ:
- Format(instr, "clz'cond 'rd, 'rm");
- break;
- default:
- Unknown(instr); // not used by V8
- break;
- }
- } else {
- Unknown(instr); // not used by V8
- }
- } else if ((type == 1) && instr->IsNopType1()) {
- Format(instr, "nop'cond");
- } else {
- switch (instr->OpcodeField()) {
- case AND: {
- Format(instr, "and'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case EOR: {
- Format(instr, "eor'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case SUB: {
- Format(instr, "sub'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case RSB: {
- Format(instr, "rsb'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case ADD: {
- Format(instr, "add'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case ADC: {
- Format(instr, "adc'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case SBC: {
- Format(instr, "sbc'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case RSC: {
- Format(instr, "rsc'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case TST: {
- if (instr->HasS()) {
- Format(instr, "tst'cond 'rn, 'shift_op");
- } else {
- Format(instr, "movw'cond 'mw");
- }
- break;
- }
- case TEQ: {
- if (instr->HasS()) {
- Format(instr, "teq'cond 'rn, 'shift_op");
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
- case CMP: {
- if (instr->HasS()) {
- Format(instr, "cmp'cond 'rn, 'shift_op");
- } else {
- Format(instr, "movt'cond 'mw");
- }
- break;
- }
- case CMN: {
- if (instr->HasS()) {
- Format(instr, "cmn'cond 'rn, 'shift_op");
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
- case ORR: {
- Format(instr, "orr'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case MOV: {
- Format(instr, "mov'cond's 'rd, 'shift_op");
- break;
- }
- case BIC: {
- Format(instr, "bic'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case MVN: {
- Format(instr, "mvn'cond's 'rd, 'shift_op");
- break;
- }
- default: {
- // The Opcode field is a 4-bit field.
- UNREACHABLE();
- break;
- }
- }
- }
-}
-
-
-void Decoder::DecodeType2(Instruction* instr) {
- switch (instr->PUField()) {
- case da_x: {
- if (instr->HasW()) {
- Unknown(instr); // not used in V8
- return;
- }
- Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
- break;
- }
- case ia_x: {
- if (instr->HasW()) {
- Unknown(instr); // not used in V8
- return;
- }
- Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
- break;
- }
- case db_x: {
- Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
- break;
- }
- case ib_x: {
- Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void Decoder::DecodeType3(Instruction* instr) {
- switch (instr->PUField()) {
- case da_x: {
- VERIFY(!instr->HasW());
- Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
- break;
- }
- case ia_x: {
- if (instr->HasW()) {
- VERIFY(instr->Bits(5, 4) == 0x1);
- if (instr->Bit(22) == 0x1) {
- Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
- } else {
- UNREACHABLE(); // SSAT.
- }
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
- }
- break;
- }
- case db_x: {
- if (FLAG_enable_sudiv) {
- if (!instr->HasW()) {
- if (instr->Bits(5, 4) == 0x1) {
- if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
- // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
- Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
- break;
- }
- }
- }
- }
- Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
- break;
- }
- case ib_x: {
- if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
- uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = widthminus1 + lsbit;
- if (msbit <= 31) {
- if (instr->Bit(22)) {
- Format(instr, "ubfx'cond 'rd, 'rm, 'f");
- } else {
- Format(instr, "sbfx'cond 'rd, 'rm, 'f");
- }
- } else {
- UNREACHABLE();
- }
- } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
- if (msbit >= lsbit) {
- if (instr->RmValue() == 15) {
- Format(instr, "bfc'cond 'rd, 'f");
- } else {
- Format(instr, "bfi'cond 'rd, 'rm, 'f");
- }
- } else {
- UNREACHABLE();
- }
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void Decoder::DecodeType4(Instruction* instr) {
- if (instr->Bit(22) != 0) {
- // Privileged mode currently not supported.
- Unknown(instr);
- } else {
- if (instr->HasL()) {
- Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
- } else {
- Format(instr, "stm'cond'pu 'rn'w, 'rlist");
- }
- }
-}
-
-
-void Decoder::DecodeType5(Instruction* instr) {
- Format(instr, "b'l'cond 'target");
-}
-
-
-void Decoder::DecodeType6(Instruction* instr) {
- DecodeType6CoprocessorIns(instr);
-}
-
-
-int Decoder::DecodeType7(Instruction* instr) {
- if (instr->Bit(24) == 1) {
- if (instr->SvcValue() >= kStopCode) {
- Format(instr, "stop'cond 'svc");
- // Also print the stop message. Its address is encoded
- // in the following 4 bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "\n %p %08x stop message: %s",
- reinterpret_cast<int32_t*>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize));
- // We have decoded 2 * Instruction::kInstrSize bytes.
- return 2 * Instruction::kInstrSize;
- } else {
- Format(instr, "svc'cond 'svc");
- }
- } else {
- DecodeTypeVFP(instr);
- }
- return Instruction::kInstrSize;
-}
-
-
-// void Decoder::DecodeTypeVFP(Instruction* instr)
-// vmov: Sn = Rt
-// vmov: Rt = Sn
-// vcvt: Dd = Sm
-// vcvt: Sd = Dm
-// Dd = vabs(Dm)
-// Dd = vneg(Dm)
-// Dd = vadd(Dn, Dm)
-// Dd = vsub(Dn, Dm)
-// Dd = vmul(Dn, Dm)
-// Dd = vmla(Dn, Dm)
-// Dd = vmls(Dn, Dm)
-// Dd = vdiv(Dn, Dm)
-// vcmp(Dd, Dm)
-// vmrs
-// vmsr
-// Dd = vsqrt(Dm)
-void Decoder::DecodeTypeVFP(Instruction* instr) {
- VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
- VERIFY(instr->Bits(11, 9) == 0x5);
-
- if (instr->Bit(4) == 0) {
- if (instr->Opc1Value() == 0x7) {
- // Other data processing instructions
- if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
- // vmov register to register.
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmov'cond.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vmov'cond.f32 'Sd, 'Sm");
- }
- } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
- // vabs
- Format(instr, "vabs'cond.f64 'Dd, 'Dm");
- } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
- // vneg
- Format(instr, "vneg'cond.f64 'Dd, 'Dm");
- } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
- DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() >> 1) == 0x6) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCMP(instr);
- } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
- Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
- } else if (instr->Opc3Value() == 0x0) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmov'cond.f64 'Dd, 'd");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if (instr->Opc1Value() == 0x3) {
- if (instr->SzValue() == 0x1) {
- if (instr->Opc3Value() & 0x1) {
- Format(instr, "vsub'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm");
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else {
- if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0)) {
- DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x1) &&
- (instr->Bit(23) == 0x0)) {
- if (instr->Bit(21) == 0x0) {
- Format(instr, "vmov'cond.32 'Dd[0], 'rt");
- } else {
- Format(instr, "vmov'cond.32 'Dd[1], 'rt");
- }
- } else if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
- if (instr->VLValue() == 0) {
- if (instr->Bits(15, 12) == 0xF) {
- Format(instr, "vmsr'cond FPSCR, APSR");
- } else {
- Format(instr, "vmsr'cond FPSCR, 'rt");
- }
- } else {
- if (instr->Bits(15, 12) == 0xF) {
- Format(instr, "vmrs'cond APSR, FPSCR");
- } else {
- Format(instr, "vmrs'cond 'rt, FPSCR");
- }
- }
- }
- }
-}
-
-
-void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
- Instruction* instr) {
- VERIFY((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0));
-
- bool to_arm_register = (instr->VLValue() == 0x1);
-
- if (to_arm_register) {
- Format(instr, "vmov'cond 'rt, 'Sn");
- } else {
- Format(instr, "vmov'cond 'Sn, 'rt");
- }
-}
-
-
-void Decoder::DecodeVCMP(Instruction* instr) {
- VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1));
-
- // Comparison.
- bool dp_operation = (instr->SzValue() == 1);
- bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
-
- if (dp_operation && !raise_exception_for_qnan) {
- if (instr->Opc2Value() == 0x4) {
- Format(instr, "vcmp'cond.f64 'Dd, 'Dm");
- } else if (instr->Opc2Value() == 0x5) {
- Format(instr, "vcmp'cond.f64 'Dd, #0.0");
- } else {
- Unknown(instr); // invalid
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
-}
-
-
-void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
- VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
-
- bool double_to_single = (instr->SzValue() == 1);
-
- if (double_to_single) {
- Format(instr, "vcvt'cond.f32.f64 'Sd, 'Dm");
- } else {
- Format(instr, "vcvt'cond.f64.f32 'Dd, 'Sm");
- }
-}
-
-
-void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
- (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
-
- bool to_integer = (instr->Bit(18) == 1);
- bool dp_operation = (instr->SzValue() == 1);
- if (to_integer) {
- bool unsigned_integer = (instr->Bit(16) == 0);
-
- if (dp_operation) {
- if (unsigned_integer) {
- Format(instr, "vcvt'cond.u32.f64 'Sd, 'Dm");
- } else {
- Format(instr, "vcvt'cond.s32.f64 'Sd, 'Dm");
- }
- } else {
- if (unsigned_integer) {
- Format(instr, "vcvt'cond.u32.f32 'Sd, 'Sm");
- } else {
- Format(instr, "vcvt'cond.s32.f32 'Sd, 'Sm");
- }
- }
- } else {
- bool unsigned_integer = (instr->Bit(7) == 0);
-
- if (dp_operation) {
- if (unsigned_integer) {
- Format(instr, "vcvt'cond.f64.u32 'Dd, 'Sm");
- } else {
- Format(instr, "vcvt'cond.f64.s32 'Dd, 'Sm");
- }
- } else {
- if (unsigned_integer) {
- Format(instr, "vcvt'cond.f32.u32 'Sd, 'Sm");
- } else {
- Format(instr, "vcvt'cond.f32.s32 'Sd, 'Sm");
- }
- }
- }
-}
-
-
-// Decode Type 6 coprocessor instructions.
-// Dm = vmov(Rt, Rt2)
-// <Rt, Rt2> = vmov(Dm)
-// Ddst = MEM(Rbase + 4*offset).
-// MEM(Rbase + 4*offset) = Dsrc.
-void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
- VERIFY(instr->TypeValue() == 6);
-
- if (instr->CoprocessorValue() == 0xA) {
- switch (instr->OpcodeValue()) {
- case 0x8:
- case 0xA:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn - 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Sd, ['rn - 4*'imm08@00]");
- }
- break;
- case 0xC:
- case 0xE:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn + 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
- }
- break;
- case 0x4:
- case 0x5:
- case 0x6:
- case 0x7:
- case 0x9:
- case 0xB: {
- bool to_vfp_register = (instr->VLValue() == 0x1);
- if (to_vfp_register) {
- Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}");
- } else {
- Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}");
- }
- break;
- }
- default:
- Unknown(instr); // Not used by V8.
- }
- } else if (instr->CoprocessorValue() == 0xB) {
- switch (instr->OpcodeValue()) {
- case 0x2:
- // Load and store double to two GP registers
- if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
- Unknown(instr); // Not used by V8.
- } else if (instr->HasL()) {
- Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
- } else {
- Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
- }
- break;
- case 0x8:
- case 0xA:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Dd, ['rn - 4*'imm08@00]");
- }
- break;
- case 0xC:
- case 0xE:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
- }
- break;
- case 0x4:
- case 0x5:
- case 0x6:
- case 0x7:
- case 0x9:
- case 0xB: {
- bool to_vfp_register = (instr->VLValue() == 0x1);
- if (to_vfp_register) {
- Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
- } else {
- Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}");
- }
- break;
- }
- default:
- Unknown(instr); // Not used by V8.
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
-}
-
-#undef VERIFIY
-
-bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
- int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker;
-}
-
-
-int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
- if (IsConstantPoolAt(instr_ptr)) {
- int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return DecodeConstantPoolLength(instruction_bits);
- } else {
- return -1;
- }
-}
-
-
-// Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte* instr_ptr) {
- Instruction* instr = Instruction::At(instr_ptr);
- // Print raw instruction bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
- if (instr->ConditionField() == kSpecialCondition) {
- Unknown(instr);
- return Instruction::kInstrSize;
- }
- int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "constant pool begin (length %d)",
- DecodeConstantPoolLength(instruction_bits));
- return Instruction::kInstrSize;
- }
- switch (instr->TypeValue()) {
- case 0:
- case 1: {
- DecodeType01(instr);
- break;
- }
- case 2: {
- DecodeType2(instr);
- break;
- }
- case 3: {
- DecodeType3(instr);
- break;
- }
- case 4: {
- DecodeType4(instr);
- break;
- }
- case 5: {
- DecodeType5(instr);
- break;
- }
- case 6: {
- DecodeType6(instr);
- break;
- }
- case 7: {
- return DecodeType7(instr);
- }
- default: {
- // The type field is 3-bits in the ARM encoding.
- UNREACHABLE();
- break;
- }
- }
- return Instruction::kInstrSize;
-}
-
-
-} } // namespace v8::internal
-
-
-
-//------------------------------------------------------------------------------
-
-namespace disasm {
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Registers::Name(reg);
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // ARM does not have the concept of a byte register
- return "nobytereg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- UNREACHABLE(); // ARM does not have any XMM registers
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // The default name converter is called for unknown code. So we will not try
- // to access any memory.
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- v8::internal::Decoder d(converter_, buffer);
- return d.InstructionDecode(instruction);
-}
-
-
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return v8::internal::Decoder::ConstantPoolSizeAt(instruction);
-}
-
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
- }
-}
-
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/frames-arm.cc b/src/3rdparty/v8/src/arm/frames-arm.cc
deleted file mode 100644
index a805d28..0000000
--- a/src/3rdparty/v8/src/arm/frames-arm.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/frames-arm.h b/src/3rdparty/v8/src/arm/frames-arm.h
deleted file mode 100644
index ee9fc0e..0000000
--- a/src/3rdparty/v8/src/arm/frames-arm.h
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_FRAMES_ARM_H_
-#define V8_ARM_FRAMES_ARM_H_
-
-namespace v8 {
-namespace internal {
-
-
-// The ARM ABI does not specify the usage of register r9, which may be reserved
-// as the static base or thread register on some platforms, in which case we
-// leave it alone. Adjust the value of kR9Available accordingly:
-const int kR9Available = 1; // 1 if available to us, 0 if reserved
-
-
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 16;
-
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved =
- 1 << 0 | // r0 a1
- 1 << 1 | // r1 a2
- 1 << 2 | // r2 a3
- 1 << 3; // r3 a4
-
-const int kNumJSCallerSaved = 4;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0
-int JSCallerSavedCode(int n);
-
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved =
- 1 << 4 | // r4 v1
- 1 << 5 | // r5 v2
- 1 << 6 | // r6 v3
- 1 << 7 | // r7 v4
- 1 << 8 | // r8 v5 (cp in JavaScript code)
- kR9Available << 9 | // r9 v6
- 1 << 10 | // r10 v7
- 1 << 11; // r11 v8 (fp in JavaScript code)
-
-// When calling into C++ (only for C++ calls that can't cause a GC).
-// The call code will take care of lr, fp, etc.
-const RegList kCallerSaved =
- 1 << 0 | // r0
- 1 << 1 | // r1
- 1 << 2 | // r2
- 1 << 3 | // r3
- 1 << 9; // r9
-
-
-const int kNumCalleeSaved = 7 + kR9Available;
-
-// Double registers d8 to d15 are callee-saved.
-const int kNumDoubleCalleeSaved = 8;
-
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-const int kNumSafepointRegisters = 16;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-// ----------------------------------------------------
-
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset = -3 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- // The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = 0 * kPointerSize;
- // The calling JS function is below FP.
- static const int kCallerPCOffset = 1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = 2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/full-codegen-arm.cc b/src/3rdparty/v8/src/arm/full-codegen-arm.cc
deleted file mode 100644
index 36580c7..0000000
--- a/src/3rdparty/v8/src/arm/full-codegen-arm.cc
+++ /dev/null
@@ -1,4622 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-#include "arm/code-stubs-arm.h"
-#include "arm/macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-// A patch site is a location in the code which it is possible to patch. This
-// class has a number of methods to emit the code which is patchable and the
-// method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
-// immediate value is used) is the delta from the pc to the first instruction of
-// the patchable code.
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- // When initially emitting this ensure that a jump is always generated to skip
- // the inlined smi code.
- void EmitJumpIfNotSmi(Register reg, Label* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- __ b(eq, target); // Always taken before patched.
- }
-
- // When initially emitting this ensure that a jump is never generated to skip
- // the inlined smi code.
- void EmitJumpIfSmi(Register reg, Label* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- __ b(ne, target); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- // Block literal pool emission whilst recording patch site information.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg;
- reg.set_code(delta_to_patch_site / kOff12Mask);
- __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- } else {
- __ nop(); // Signals no inlined code.
- }
- }
-
- private:
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// o r1: the JS function object being called (i.e., ourselves)
-// o cp: our context
-// o fp: our caller's frame pointer
-// o sp: stack pointer
-// o lr: return address
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop-at");
- }
-#endif
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
- __ cmp(r5, Operand::Zero());
- __ b(eq, &ok);
- int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
- }
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- int locals_count = info->scope()->num_stack_slots();
-
- info->set_prologue_offset(masm_->pc_offset());
- {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
-
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
- // Argument to NewContext is the function, which is still in r1.
- Comment cmnt(masm_, "[ Allocate context");
- __ push(r1);
- if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ str(r0, target);
-
- // Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- }
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
- }
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ add(r2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(num_parameters)));
- __ Push(r3, r2, r1);
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
- }
- ArgumentsAccessStub stub(type);
- __ CallStub(&stub);
-
- SetVar(arguments, r0, r1, r2);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- }
- EmitReturnSequence();
-
- // Force emit the constant pool, so it doesn't get emitted in the middle
- // of the stack check table.
- masm()->CheckConstPool(true, false);
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ mov(r0, Operand(Smi::FromInt(0)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ mov(r2, Operand(profiling_counter_));
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- if (isolate()->IsDebuggerActive()) {
- // Detect debug break requests as soon as possible.
- reset_value = FLAG_interrupt_budget >> 4;
- }
- __ mov(r2, Operand(profiling_counter_));
- __ mov(r3, Operand(Smi::FromInt(reset_value)));
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- // Block literal pools whilst emitting stack check code.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- Label ok;
-
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- EmitProfilingCounterReset();
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ b(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ b(pl, &ok);
- __ push(r0);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(r2);
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- InterruptStub stub;
- __ CallStub(&stub);
- }
- __ pop(r0);
- EmitProfilingCounterReset();
- __ bind(&ok);
- }
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
- PredictableCodeSizeScope predictable(masm_, -1);
- __ RecordJSReturn();
- masm_->mov(sp, fp);
- masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
- }
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- __ mov(result_register(), Operand(lit));
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates cannot be pushed directly.
- __ mov(result_register(), Operand(lit));
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else {
- if (true_label_ != fall_through_) __ b(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else {
- if (true_label_ != fall_through_) __ b(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ mov(result_register(), Operand(lit));
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp, 0));
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(ip, value_root_index);
- __ push(ip);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else {
- if (false_label_ != fall_through_) __ b(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub, condition->test_id());
- __ tst(result_register(), result_register());
- Split(ne, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ b(cond, if_true);
- } else if (if_true == fall_through) {
- __ b(NegateCondition(cond), if_false);
- } else {
- __ b(cond, if_true);
- __ b(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- ASSERT(var->IsStackAllocated());
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return MemOperand(fp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- // Use destination as scratch.
- MemOperand location = VarOperand(var, dest);
- __ ldr(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!scratch0.is(src));
- ASSERT(!scratch0.is(scratch1));
- ASSERT(!scratch1.is(src));
- MemOperand location = VarOperand(var, scratch0);
- __ str(src, location);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- __ RecordWriteContextSlot(scratch0,
- location.offset(),
- src,
- scratch1,
- kLRHasBeenSaved,
- kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- Label skip;
- if (should_normalize) __ b(&skip);
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
- __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
- __ Check(ne, "Declaration in with context.");
- __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
- __ Check(ne, "Declaration in catch context.");
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, StackOperand(variable));
- }
- break;
-
- case Variable::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, ContextOperand(cp, variable->index()));
- // No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ mov(r2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- ASSERT(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ mov(r1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, r2, r1, r0);
- } else {
- __ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
- __ Push(cp, r2, r1, r0);
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), StackOperand(variable));
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), ContextOperand(cp, variable->index()));
- int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- r2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ mov(r2, Operand(variable->name()));
- __ mov(r1, Operand(Smi::FromInt(NONE)));
- __ Push(cp, r2, r1);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
-
- // Load instance object.
- __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
- __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
- __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
-
- // Assign it.
- __ str(r1, ContextOperand(cp, variable->index()));
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(cp,
- Context::SlotOffset(variable->index()),
- r1,
- r3,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
-
- // Traverse into body.
- Visit(declaration->module());
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- // The context is the first argument.
- __ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ ldr(r1, MemOperand(sp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ orr(r2, r1, r0);
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
-
- __ cmp(r1, r0);
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
- patch_site.EmitPatchInfo();
-
- __ cmp(r0, Operand::Zero());
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ b(nested_statement.break_label());
- } else {
- __ b(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, &exit);
- Register null_value = r5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmp(r0, null_value);
- __ b(eq, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &done_convert);
- __ bind(&convert);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- __ push(r0);
-
- // Check for proxies.
- Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- __ b(le, &call_runtime);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ b(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(r0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array;
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &fixed_array);
-
- // We got a map in register r0. Get the enumeration cache from it.
- Label no_descriptors;
- __ bind(&use_cache);
-
- __ EnumLength(r1, r0);
- __ cmp(r1, Operand(Smi::FromInt(0)));
- __ b(eq, &no_descriptors);
-
- __ LoadInstanceDescriptors(r0, r2);
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ push(r0); // Map.
- __ mov(r0, Operand(Smi::FromInt(0)));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(r2, r1, r0);
- __ jmp(&loop);
-
- __ bind(&no_descriptors);
- __ Drop(1);
- __ jmp(&exit);
-
- // We got a fixed array in register r0. Iterate through that.
- Label non_proxy;
- __ bind(&fixed_array);
-
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(r1, cell);
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
-
- __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
- __ b(gt, &non_proxy);
- __ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ Push(r1, r0); // Smi and array
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ Push(r1, r0); // Fixed array length (as smi) and initial index.
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&loop);
- // Load the current count to r0, load the length to r1.
- __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
- __ cmp(r0, r1); // Compare to the array length.
- __ b(hs, loop_statement.break_label());
-
- // Get the current entry of the array into register r3.
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register r2.
- __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r4, Operand(r2));
- __ b(eq, &update_each);
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ cmp(r2, Operand(Smi::FromInt(0)));
- __ b(eq, &update_each);
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ push(r1); // Enumerable.
- __ push(r3); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ mov(r3, Operand(r0), SetCC);
- __ b(eq, loop_statement.continue_label());
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register r3.
- __ bind(&update_each);
- __ mov(result_register(), r3);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for the going to the next element by incrementing
- // the index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_label());
- __ pop(r0);
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- __ push(r0);
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ b(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_label());
- __ Drop(5);
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ mov(r0, Operand(info));
- __ push(r0);
- __ CallStub(&stub);
- } else {
- __ mov(r0, Operand(info));
- __ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, r0, r1);
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
- TypeofState typeof_state,
- Label* slow) {
- Register current = cp;
- Register next = r1;
- Register temp = r2;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- }
- // Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at native context.
- __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- __ cmp(temp, ip);
- __ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- // Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
- __ b(&loop);
- __ bind(&fast);
- }
-
- __ ldr(r0, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(r2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- ASSERT(var->IsContextSlot());
- Register context = cp;
- Register next = r3;
- Register temp = r4;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- }
- __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST) {
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET || CONST_HARMONY
- __ b(ne, done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ jmp(done);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in r2 and the global
- // object (receiver) in r0.
- __ ldr(r0, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(r2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(r0);
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- ASSERT(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(r0, var);
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ b(ne, &done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- context()->Plug(r0);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case Variable::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
- __ mov(r1, Operand(var->name()));
- __ Push(cp, r1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
- context()->Plug(r0);
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // r5 = materialized value (RegExp literal)
- // r4 = JS function, literals array
- // r3 = literal index
- // r2 = RegExp pattern
- // r1 = RegExp flags
- // r0 = RegExp literal clone
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r5, FieldMemOperand(r4, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in r0.
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r2, Operand(expr->pattern()));
- __ mov(r1, Operand(expr->flags()));
- __ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(r5, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(r5);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(r5);
-
- __ bind(&allocated);
- // After this, registers are used as follows:
- // r0: Newly allocated regexp.
- // r5: Materialized regexp.
- // r2: temp.
- __ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ push(r1);
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(constant_properties));
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ mov(r0, Operand(Smi::FromInt(flags)));
- int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- __ CallStub(&stub);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in r0.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(r0); // Save result on stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ mov(r2, Operand(key->handle()));
- __ ldr(r1, MemOperand(sp));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- // Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(r0);
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
- break;
- case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
- __ push(r0);
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
- Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
-
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(constant_elements));
- __ Push(r3, r2, r1);
- if (has_fast_elements && constant_elements_values->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
- length);
- __ CallStub(&stub);
- __ IncrementCounter(
- isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
-
- if (has_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(r0);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ ldr(r6, MemOperand(sp)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
- __ str(result_register(), FieldMemOperand(r1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(r1, offset, result_register(), r2,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
- __ mov(r3, Operand(Smi::FromInt(i)));
- __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
- StoreArrayLiteralElementStub stub;
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ push(result_register());
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY:
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- __ ldr(r1, MemOperand(sp, 0));
- __ push(r0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(r0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ mov(r2, Operand(key->handle()));
- // Call load IC. It has arguments receiver and property name r0 and r2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = r2;
- Register scratch2 = r3;
-
- // Get the arguments.
- Register left = r1;
- Register right = r0;
- __ pop(left);
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
- switch (op) {
- case Token::SAR:
- __ b(&stub_call);
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- __ bic(right, right, Operand(kSmiTagMask));
- break;
- case Token::SHL: {
- __ b(&stub_call);
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::SHR: {
- __ b(&stub_call);
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::ADD:
- __ add(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::SUB:
- __ sub(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::MUL: {
- __ SmiUntag(ip, right);
- __ smull(scratch1, scratch2, left, ip);
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &stub_call);
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ b(ne, &done);
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ b(mi, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
- __ pop(r1);
- BinaryOpStub stub(op, mode);
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(r0); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ mov(r1, r0);
- __ pop(r0); // Restore value.
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(r0); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(r1, r0);
- __ pop(r2);
- __ pop(r0); // Restore value.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
- // Global var, const, or let.
- __ mov(r2, Operand(var->name()));
- __ ldr(r1, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (op == Token::INIT_CONST) {
- // Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ ldr(r1, StackOperand(var));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
- __ str(result_register(), StackOperand(var));
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(r0);
- __ mov(r0, Operand(var->name()));
- __ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- if (var->IsLookupSlot()) {
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r3, location);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(ne, &assign);
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- // Perform the assignment.
- __ bind(&assign);
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- }
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
- MemOperand location = VarOperand(var, r1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
- // Check for an uninitialized let binding.
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ Check(eq, "Let binding re-initialization.");
- }
- // Perform the assignment.
- __ str(r0, location);
- if (var->IsContextSlot()) {
- __ mov(r3, r0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- }
- // Non-initializing assignments to consts are ignored.
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- __ pop(r1);
-
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ pop(r1); // Key.
- __ pop(r2);
-
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(r0);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(r1);
- EmitKeyedPropertyLoad(expr);
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- // All calls must have a predictable size in full-codegen code to ensure that
- // the debugger can patch them correctly.
- __ Call(code, rmode, ast_id, al, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ mov(r2, Operand(name));
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(r1);
- __ push(r0);
- __ push(r1);
-
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
-
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(r2, Operand(cell));
-
- CallFunctionStub stub(arg_count, flags);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- } else {
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- }
- __ push(r1);
-
- // Push the receiver of the enclosing function.
- int receiver_offset = 2 + info_->scope()->num_parameters();
- __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
- __ push(r1);
- // Push the language mode.
- __ mov(r1, Operand(Smi::FromInt(language_mode())));
- __ push(r1);
-
- // Push the start position of the scope the calls resides in.
- __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
- __ push(r1);
-
- // Push the qml mode flag.
- __ mov(r1, Operand(Smi::FromInt(is_qml_mode())));
- __ push(r1);
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
-
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2); // Reserved receiver slot.
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
- EmitResolvePossiblyDirectEval(arg_count);
-
- // The runtime call returns a pair of values in r0 (function) and
- // r1 (receiver). Touch up the stack with the right values.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
- }
-
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ ldr(r0, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ push(r0);
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
- }
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in r0)
- // and the object holding it (returned in edx).
- __ push(context_register());
- __ mov(r2, Operand(proxy->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ Push(r0, r1); // Function, receiver.
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ b(&call);
- __ bind(&done);
- // Push function.
- __ push(r0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- __ push(r1);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found
- // by LoadContextSlot. That object could be the hole if the
- // receiver is implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
- RelocInfo::CODE_TARGET);
- } else {
- EmitKeyedCallWithIC(expr, property->key());
- }
- } else {
- // Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
- // Load global receiver object.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ push(r1);
- // Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into r1 and r0.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(r2, Operand(cell));
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask));
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask | 0x80000000));
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, if_true);
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_false);
- __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(lt, if_false);
- __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(r0);
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, if_true);
-
- // Check for fast case object. Generate false result for slow case object.
- __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r2, ip);
- __ b(eq, if_false);
-
- // Look for valueOf name in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(r3, r1);
- __ cmp(r3, Operand::Zero());
- __ b(eq, &done);
-
- __ LoadInstanceDescriptors(r1, r4);
- // r4: descriptor array.
- // r3: valid entries in the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
- __ mul(r3, r3, ip);
- // Calculate location of the first key name.
- __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
- // Calculate the end of the descriptor array.
- __ mov(r2, r4);
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- // The use of ip to store the valueOf string assumes that it is not otherwise
- // used in the loop below.
- __ mov(ip, Operand(FACTORY->value_of_string()));
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r3, MemOperand(r4, 0));
- __ cmp(r3, ip);
- __ b(eq, if_false);
- __ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmp(r4, Operand(r2));
- __ b(ne, &loop);
-
- __ bind(&done);
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- __ JumpIfSmi(r2, if_false);
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(r2, r3);
- __ b(ne, if_false);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r2, SYMBOL_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(r1);
- __ cmp(r0, r1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in r0.
- VisitForAccumulatorValue(args->at(0));
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label exit;
- // Get the number of formal parameters.
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(r0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
- // Map is now in r0.
- __ b(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ b(eq, &function);
-
- __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ b(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &non_function_constructor);
-
- // r0 now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
- __ b(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ LoadRoot(r0, Heap::kfunction_class_stringRootIndex);
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ LoadRoot(r0, Heap::kObject_stringRootIndex);
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(r0, Heap::kNullValueRootIndex);
-
- // All done.
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r4, Operand(r0));
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in r0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP2)) {
- __ PrepareCallCFunction(1, r0);
- __ ldr(r0,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatures::Scope scope(VFP2);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand::Zero());
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- __ mov(r0, r4);
- } else {
- __ PrepareCallCFunction(2, r0);
- __ ldr(r1,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ mov(r0, Operand(r4));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(r0, &done);
- // If the object is not a value type, return the object.
- __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
- __ b(ne, &done);
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label runtime, done, not_date_object;
- Register object = r0;
- Register result = r0;
- Register scratch0 = r9;
- Register scratch1 = r1;
-
- __ JumpIfSmi(object, &not_date_object);
- __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
- __ b(ne, &not_date_object);
-
- if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch1, Operand(stamp));
- __ ldr(scratch1, MemOperand(scratch1));
- __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch1, scratch0);
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ mov(r1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
- }
-
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(r2);
- __ pop(r1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(r2);
- __ pop(r1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
- context()->Plug(r0);
-}
-
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(VFP2)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(r1); // r0 = value. r1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(r1, &done);
-
- // If the object is not a value type, return the value.
- __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
- __ b(ne, &done);
-
- // Store the value.
- __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(r2, r0);
- __ RecordWriteField(
- r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(r0, r1);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(r1);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r1;
- Register index = r0;
- Register result = r3;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Load the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r1;
- Register index = r0;
- Register scratch = r3;
- Register result = r0;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ mov(result, Operand(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &runtime);
-
- // InvokeFunction requires the function in r1. Move it in there.
- __ mov(r1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(r0);
- __ CallRuntime(Runtime::kCall, args->length());
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = r0;
- Register cache = r1;
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
-
- Label done, not_found;
- // tmp now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // r2 now holds finger offset as a smi.
- __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // r3 now points to the start of fixed array elements.
- __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
- // Note side effect of PreIndex: r3 now points to the key of the pair.
- __ cmp(key, r2);
- __ b(ne, &not_found);
-
- __ ldr(r0, MemOperand(r3, kPointerSize));
- __ b(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = r0;
- Register left = r1;
- Register tmp = r2;
- Register tmp2 = r3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, Operand(right));
- __ b(eq, &ok);
- // Fail if either is a non-HeapObject.
- __ and_(tmp, left, Operand(right));
- __ JumpIfSmi(tmp, &fail);
- __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
- __ b(ne, &fail);
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ cmp(tmp, Operand(tmp2));
- __ b(ne, &fail);
- __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ cmp(tmp, tmp2);
- __ b(eq, &ok);
- __ bind(&fail);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(r0);
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ IndexFromHash(r0, r0);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = r0;
- Register elements = no_reg; // Will be r0.
- Register result = no_reg; // Will be r0.
- Register separator = r1;
- Register array_length = r2;
- Register result_pos = no_reg; // Will be r2
- Register string_length = r3;
- Register string = r4;
- Register element = r5;
- Register elements_end = r6;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- // Separator operand is on the stack.
- __ pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
- __ b(ne, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
-
- // If the array has length zero, return the empty string.
- __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length, SetCC);
- __ b(ne, &non_trivial_array);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
- __ b(&done);
-
- __ bind(&non_trivial_array);
-
- // Get the FixedArray containing array's elements.
- elements = array;
- __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- array = no_reg; // End of array's live range.
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, Operand::Zero());
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (smi).
- // element: Current array element.
- // elements_end: Array end.
- if (generate_debug_code_) {
- __ cmp(array_length, Operand::Zero());
- __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
- }
- __ bind(&loop);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ JumpIfSmi(string, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch1), SetCC);
- __ b(vs, &bailout);
- __ cmp(element, elements_end);
- __ b(lt, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, Operand(1));
- __ b(ne, &not_size_one_array);
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ b(&done);
-
- __ bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array.
- // string_length: Sum of string lengths (smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ JumpIfSmi(separator, &bailout);
- __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string. array_length is not
- // smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch1));
- __ smull(scratch2, ip, array_length, scratch1);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ cmp(ip, Operand::Zero());
- __ b(ne, &bailout);
- __ tst(scratch2, Operand(0x80000000));
- __ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch2), SetCC);
- __ b(vs, &bailout);
- __ SmiUntag(string_length);
-
- // Get first element in the array to free up the elements register to be used
- // for the result.
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- result = elements; // End of live range for elements.
- elements = no_reg;
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array.
- __ AllocateAsciiString(result,
- string_length,
- scratch1,
- scratch2,
- elements_end,
- &bailout);
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- result_pos = array_length; // End of live range for array_length.
- array_length = no_reg;
- __ add(result_pos,
- result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(1)));
- __ b(eq, &one_char_separator);
- __ b(gt, &long_separator);
-
- // Empty separator case
- __ bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ b(lt, &empty_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
- __ b(&done);
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&one_char_separator_loop_entry);
-
- __ bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
-
- // Copy the separator character to the result.
- __ strb(separator, MemOperand(result_pos, 1, PostIndex));
-
- // Copy next array element to the result.
- __ bind(&one_char_separator_loop_entry);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
- __ b(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
-
- __ bind(&long_separator);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ b(lt, &long_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
- __ b(&done);
-
- __ bind(&bailout);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ ldr(r0, GlobalObjectOperand());
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
- __ push(r0);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- __ mov(r2, Operand(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
- __ push(r1);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(r0);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
- if (var->IsUnallocated()) {
- __ ldr(r2, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
- __ Push(r2, r1, r0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(r0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(var->is_this());
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(r0);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- // We handle value contexts explicitly rather than simply visiting
- // for control and plugging the control flow into the context,
- // because we need to prepare a pair of extra administrative AST ids
- // for the optimizing compiler.
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
- __ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- if (context()->IsStackValue()) __ push(r0);
- __ jmp(&done);
- __ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- if (context()->IsStackValue()) __ push(r0);
- __ bind(&done);
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(r0);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ mov(ip, Operand(Smi::FromInt(0)));
- __ push(ip);
- }
- if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in the accumulator.
- VisitForAccumulatorValue(prop->obj());
- __ push(r0);
- EmitNamedPropertyLoad(prop);
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ ldr(r1, MemOperand(sp, 0));
- __ push(r0);
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- __ JumpIfSmi(r0, &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- break;
- case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- }
- }
- }
-
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
- __ b(vs, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(r0, &done);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
- }
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(count_value)));
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
-
- // Store the value returned in r0.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(r0);
- }
- // For all contexts except EffectConstant We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- __ pop(r1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ pop(r1); // Key.
- __ pop(r2); // Receiver.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ ldr(r0, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(r2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallIC(ic);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(r0);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ mov(r0, Operand(proxy->name()));
- __ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(r0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_string())) {
- __ JumpIfSmi(r0, if_true);
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
- __ JumpIfSmi(r0, if_false);
- // Check for undetectable objects => false.
- __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
- __ b(ge, if_false);
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
- __ CompareRoot(r0, Heap::kTrueValueRootIndex);
- __ b(eq, if_true);
- __ CompareRoot(r0, Heap::kFalseValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
- __ CompareRoot(r0, Heap::kNullValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(eq, if_true);
- __ JumpIfSmi(r0, if_false);
- // Check for undetectable objects => true.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(ne, if_true, if_false, fall_through);
-
- } else if (check->Equals(isolate()->heap()->function_string())) {
- __ JumpIfSmi(r0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
- __ b(eq, if_true);
- __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
- __ JumpIfSmi(r0, if_false);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(r0, Heap::kNullValueRootIndex);
- __ b(eq, if_true);
- }
- if (FLAG_harmony_symbols) {
- __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
- __ b(eq, if_true);
- }
- // Check for JS objects => true.
- __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(lt, if_false);
- __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, if_false);
- // Check for undetectable objects => false.
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- __ tst(r0, r0);
- Split(eq, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cond = CompareIC::ComputeCondition(op);
- __ pop(r1);
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ orr(r2, r0, Operand(r1));
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
- __ cmp(r1, r0);
- Split(cond, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ cmp(r0, Operand::Zero());
- Split(cond, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(r1, nil_value);
- __ cmp(r0, r1);
- if (expr->op() == Token::EQ_STRICT) {
- Split(eq, if_true, if_false, fall_through);
- } else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- __ b(eq, if_true);
- __ LoadRoot(r1, other_nil_value);
- __ cmp(r0, r1);
- __ b(eq, if_true);
- __ JumpIfSmi(r0, if_false);
- // It can be an undetectable object.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
- __ cmp(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(r0);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return r0;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return cp;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ str(value, MemOperand(fp, frame_offset));
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, ContextOperand(cp, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ mov(ip, Operand(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts created by a call to eval have the same closure as the
- // context calling eval, not the anonymous closure containing the eval
- // code. Fetch it from the context.
- __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
- } else {
- ASSERT(declaration_scope->is_function_scope());
- __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- __ push(ip);
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta)
- __ sub(r1, lr, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r1, r1, Operand(r1)); // Convert to smi.
-
- // Store result register while executing finally block.
- __ push(r1);
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ ldr(r1, MemOperand(ip));
- __ push(r1);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(ip, Operand(has_pending_message));
- __ ldr(r1, MemOperand(ip));
- __ SmiTag(r1);
- __ push(r1);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(ip, Operand(pending_message_script));
- __ ldr(r1, MemOperand(ip));
- __ push(r1);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Restore pending message from stack.
- __ pop(r1);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(ip, Operand(pending_message_script));
- __ str(r1, MemOperand(ip));
-
- __ pop(r1);
- __ SmiUntag(r1);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(ip, Operand(has_pending_message));
- __ str(r1, MemOperand(ip));
-
- __ pop(r1);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ str(r1, MemOperand(ip));
-
- // Restore result register from stack.
- __ pop(r1);
-
- // Uncook return address and return.
- __ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
- __ add(pc, r1, Operand(masm_->CodeObject()));
-}
-
-
-#undef __
-
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ ldr(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ PopTryHandler();
- __ bl(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/ic-arm.cc b/src/3rdparty/v8/src/arm/ic-arm.cc
deleted file mode 100644
index e8d0fab..0000000
--- a/src/3rdparty/v8/src/arm/ic-arm.cc
+++ /dev/null
@@ -1,1685 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "assembler-arm.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "disasm.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register t0,
- Register t1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // elements: holds the property dictionary on fall through.
- // Scratch registers:
- // t0: used to holds the receiver map.
- // t1: used to holds the receiver instance type, receiver bit mask and
- // elements map.
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, miss);
-
- // If this assert fails, we have to check upper bound too.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, t1, miss);
-
- // Check that the global object does not require access checks.
- __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor)));
- __ b(ne, miss);
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(t1, ip);
- __ b(ne, miss);
-}
-
-
-// Helper function used from LoadIC/CallIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
- __ b(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- Register scratch,
- int interceptor_bit,
- Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ b(ne, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(JS_OBJECT_TYPE));
- __ b(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
- //
- // scratch2 - used to hold the loaded value.
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ b(ne, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
- // Check that the key (index) is within bounds.
- __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch1));
- __ b(hs, out_of_range);
- // Fast case: Do the load.
- __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(scratch2,
- MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ b(eq, out_of_range);
- __ mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or an internalized string.
-// Falls through if a key is an internalized string.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_internalized) {
- // The key is not a smi.
- // Is it a string?
- __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
- __ b(ge, not_internalized);
-
- // Is the string an array index, with cached numeric value?
- __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
- __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
- __ b(eq, index_string);
-
- // Is the string internalized?
- // map: key map
- __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ tst(hash, Operand(kIsInternalizedMask));
- __ b(eq, not_internalized);
-}
-
-
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r1 : receiver
- // -- r2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(r1, &number);
- __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
- __ b(ne, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
- __ b(hs, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &boolean);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // r1: function
-
- // Check that the value isn't a smi.
- __ JumpIfSmi(r1, miss);
-
- // Check that the value is a JSFunction.
- __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
-
- // r0: elements
- // Search the dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
-
- GenerateFunctionTailCall(masm, argc, &miss, r4);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4);
- }
-
- // Get the receiver of the function from the stack.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
-
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ JumpIfSmi(r2, &invoke);
- __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
- __ b(eq, &global);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(r2, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3);
-
- __ bind(&do_call);
- // receiver in r1 is not used after this point.
- // r2: key
- // r1: function
- GenerateFunctionTailCall(masm, argc, &slow_call, r0);
-
- __ bind(&check_number_dictionary);
- // r2: key
- // r3: elements map
- // r4: elements
- // Check whether the elements is a number dictionary.
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow_load);
- __ mov(r0, Operand(r2, ASR, kSmiTagSize));
- // r0: untagged index
- __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- }
- __ mov(r1, r0);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
-
- // The key is known to be internalized.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor an internalized string,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(r3, r2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ JumpIfSmi(r2, &miss);
- __ IsObjectJSStringType(r2, r0, &miss);
-
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r0, r2, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
-
- // r1: elements
- GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
-
- __ mov(r3, r0);
- __ Push(r3, r2);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the map check
- // later, we do not need to check for interceptors or whether it
- // requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
- __ b(lt, slow_case);
-
- // Check that the key is a positive smi.
- __ tst(key, Operand(0x80000001));
- __ b(ne, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
- __ cmp(key, Operand(scratch2));
- __ b(cs, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kOffset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, key, scratch3);
- __ add(scratch3, scratch3, Operand(kOffset));
-
- __ ldr(scratch2, MemOperand(scratch1, scratch3));
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, scratch3);
- __ b(eq, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, scratch2, scratch3);
- __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
- return MemOperand(scratch1, scratch3);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
- DONT_DO_SMI_CHECK);
- __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch));
- __ b(cs, slow_case);
- __ mov(scratch, Operand(kPointerSize >> 1));
- __ mul(scratch, key, scratch);
- __ add(scratch,
- scratch,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- return MemOperand(backing_store, scratch);
-}
-
-
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, &notin, &slow);
- __ ldr(r0, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r2.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
- __ ldr(r2, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &slow);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
- __ str(r0, mapped_location);
- __ add(r6, r3, r5);
- __ mov(r9, r0);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
- __ str(r0, unmapped_location);
- __ add(r6, r3, r4);
- __ mov(r9, r0);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Load receiver.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, &notin, &slow);
- __ ldr(r1, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
- __ ldr(r1, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, r3);
- __ b(eq, &slow);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
-
- __ Push(r1, r0);
-
- // Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = r0;
- Register receiver = r1;
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r2, r3, &check_number_dictionary);
-
- GenerateFastArrayLoad(
- masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r0: key
- // r3: elements map
- // r4: elements
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow);
- __ mov(r2, Operand(r0, ASR, kSmiTagSize));
- __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
- __ Ret();
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
- 1, r2, r3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
- __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
- __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(r3, r3, Operand(mask));
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ mov(r4, Operand(cache_keys));
- __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and move r4 to next entry.
- __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
- __ cmp(r2, r5);
- __ b(ne, &try_next_entry);
- __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load string
- __ cmp(r0, r5);
- __ b(eq, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- // Last entry: Load map and move r4 to string.
- __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
- __ cmp(r2, r5);
- __ b(ne, &slow);
- __ ldr(r5, MemOperand(r4));
- __ cmp(r0, r5);
- __ b(ne, &slow);
-
- // Get field offset.
- // r0 : key
- // r1 : receiver
- // r2 : receiver's map
- // r3 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ mov(r4, Operand(cache_field_offsets));
- if (i != 0) {
- __ add(r3, r3, Operand(i));
- }
- __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
- __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
- __ sub(r5, r5, r6, SetCC);
- __ b(ge, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ add(r6, r6, r5); // Index from start of object.
- __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r2, r3);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r2, r3);
- __ Ret();
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r1: receiver
- // r0: key
- // r3: elements
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
- // Load the property to r0.
- GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
- __ IncrementCounter(
- isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3);
- __ Ret();
-
- __ bind(&index_string);
- __ IndexFromHash(r3, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key (index)
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Register receiver = r1;
- Register index = r0;
- Register scratch = r3;
- Register result = r0;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r1, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
- __ b(ne, &slow);
-
- // Get the map of the receiver.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
- __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
- __ b(ne, &slow);
-
- // Everything is fine, call runtime.
- __ Push(r1, r0); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate()),
- 2,
- 1);
-
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r2 : receiver
- // -- r3 : target map
- // -- lr : return address
- // -----------------------------------
- // Must return the modified receiver in r0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&fail);
- }
-
- __ push(r2);
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r2 : receiver
- // -- r3 : target map
- // -- lr : return address
- // -----------------------------------
- // Must return the modified receiver in r0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&fail);
- }
-
- __ push(r2);
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
-
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- Register scratch_value = r4;
- Register address = r5;
- if (check_map == kCheckMap) {
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, fast_double);
- }
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
-
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ b(ne, slow);
- }
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- elements, // Overwritten.
- r3, // Scratch regs...
- r4,
- r5,
- r6,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- r4,
- slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
- slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- Register receiver_map = r3;
- Register elements_map = r6;
- Register elements = r7; // Elements array of the receiver.
- // r4 and r5 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
- // Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ cmp(r4, Operand(JS_ARRAY_TYPE));
- __ b(eq, &array);
- // Check that the object is some kind of JSObject.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, &slow);
-
- // Object case: Check key against length in the elements array.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(lo, &fast_object);
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // r0: value.
- // r1: key.
- // r2: receiver.
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- __ b(ne, &slow); // Only support writing to writing to array[array.length].
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &slow);
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ b(ne, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &extra);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
-
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(r1, r2, r0);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
-
- GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(),
- 1, r4, r5);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(r1, r2, r0);
-
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(strict_mode)));
- __ Push(r1, r0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address cmp_instruction_address =
- Assembler::return_address_from_call_start(address);
-
- // If the instruction following the call is not a cmp rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(cmp_instruction_address);
- return Assembler::IsCmpImmediate(instr);
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- Address cmp_instruction_address =
- Assembler::return_address_from_call_start(address);
-
- // If the instruction following the call is not a cmp rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(cmp_instruction_address);
- if (!Assembler::IsCmpImmediate(instr)) {
- return;
- }
-
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int delta = Assembler::GetCmpImmediateRawImmediate(instr);
- delta +=
- Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
- // If the delta is 0 the instruction is cmp r0, #0 which also signals that
- // nothing was inlined.
- if (delta == 0) {
- return;
- }
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
- address, cmp_instruction_address, delta);
- }
-#endif
-
- Address patch_address =
- cmp_instruction_address - delta * Instruction::kInstrSize;
- Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
- // This is patching a conditional "jump if not smi/jump if smi" site.
- // Enabling by changing from
- // cmp rx, rx
- // b eq/ne, <target>
- // to
- // tst rx, #kSmiTagMask
- // b ne/eq, <target>
- // and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
- Register reg = Assembler::GetRn(instr_at_patch);
- if (check == ENABLE_INLINED_SMI_CHECK) {
- ASSERT(Assembler::IsCmpRegister(instr_at_patch));
- ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
- Assembler::GetRm(instr_at_patch).code());
- patcher.masm()->tst(reg, Operand(kSmiTagMask));
- } else {
- ASSERT(check == DISABLE_INLINED_SMI_CHECK);
- ASSERT(Assembler::IsTstImmediate(instr_at_patch));
- patcher.masm()->cmp(reg, reg);
- }
- ASSERT(Assembler::IsBranch(branch_instr));
- if (Assembler::GetCondition(branch_instr) == eq) {
- patcher.EmitCondition(ne);
- } else {
- ASSERT(Assembler::GetCondition(branch_instr) == ne);
- patcher.EmitCondition(eq);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.cc b/src/3rdparty/v8/src/arm/lithium-arm.cc
deleted file mode 100644
index 3385b43..0000000
--- a/src/3rdparty/v8/src/arm/lithium-arm.cc
+++ /dev/null
@@ -1,2515 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-allocator-inl.h"
-#include "arm/lithium-arm.h"
-#include "arm/lithium-codegen-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "shl-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[r2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
- // Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
- return spill_slot_count_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LChunkBuilder::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
- }
- }
-
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* left_operand = UseFixed(left, r1);
- LOperand* right_operand = UseFixed(right, r0);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- instr->set_hydrogen_value(current);
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
-
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment.
- Representation rep = value->representation();
- HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
- return AssignEnvironment(result);
- }
- return result;
-}
-
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
- LOperand* value = UseRegister(instr->value());
- return DefineAsRegister(new(zone()) LArgumentsLength(value));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
- UseFixed(instr->right(), r1));
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
- FixedTemp(r4));
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegisterAtStart(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
- LOperand* receiver = UseFixed(instr->receiver(), r0);
- LOperand* length = UseFixed(instr->length(), r2);
- LOperand* elements = UseFixed(instr->elements(), r3);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = Use(instr->argument());
- return new(zone()) LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context,
- instr->qml_global()));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
- return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
- } else if (op == kMathPowHalf) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LOperand* temp = FixedTemp(d3);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- return DefineFixedDouble(result, d2);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
-
- LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL;
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineAsRegister(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
- LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- LCallGlobal* result = new(zone()) LCallGlobal(instr->qml_global());
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineAsRegister(new(zone()) LBitI(left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LBitNotI(value));
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- // TODO(1042) The fixed register allocation
- // is needed because we call TypeRecordingBinaryOpStub from
- // the generated code, which requires registers r0
- // and r1 to be used. We should remove that
- // when we provide a native implementation.
- LOperand* dividend = UseFixed(instr->left(), r0);
- LOperand* divisor = UseFixed(instr->right(), r1);
- return AssignEnvironment(AssignPointerMap(
- DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
-
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
-
- return false;
-}
-
-
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (CpuFeatures::IsSupported(SUDIV)) {
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
- }
-
- if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- int32_t int32_val = constant_val->Integer32Value();
- if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) ||
- CpuFeatures::IsSupported(SUDIV)) {
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
- ? UseRegister(right)
- : UseOrConstant(right);
- LOperand* remainder = TempRegister();
- ASSERT(CpuFeatures::IsSupported(SUDIV) ||
- (right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())));
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LModI* mod;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
- } else {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- mod = new(zone()) LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(d10),
- FixedTemp(d11));
- }
-
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
- return AssignEnvironment(DefineAsRegister(mod));
- } else {
- return DefineAsRegister(mod);
- }
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = UseFixedDouble(instr->right(), d2);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, d1), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
- temp = TempRegister();
- } else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- }
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- AssignEnvironment(mul);
- }
- return DefineAsRegister(mul);
-
- } else if (instr->representation().IsDouble()) {
- if (instr->UseCount() == 1 && (instr->uses().value()->IsAdd() ||
- instr->uses().value()->IsSub())) {
- HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
-
- if (use->IsAdd() && instr == use->left()) {
- // This mul is the lhs of an add. The add and mul will be folded into a
- // multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) {
- // This mul is the rhs of an add, where the lhs is not another mul.
- // The add and mul will be folded into a multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->right() && use->IsSub()) {
- // This mul is the rhs of a sub. The sub and mul will be folded into a
- // multiply-sub in DoSub.
- return NULL;
- }
- }
-
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- if (instr->left()->IsConstant()) {
- // If lhs is constant, do reverse subtraction instead.
- return DoRSub(instr);
- }
-
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineAsRegister(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- if (instr->right()->IsMul()) {
- return DoMultiplySub(instr->left(), HMul::cast(instr->right()));
- }
-
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- // Note: The lhs of the subtraction becomes the rhs of the
- // reverse-subtraction.
- LOperand* left = UseRegisterAtStart(instr->right());
- LOperand* right = UseOrConstantAtStart(instr->left());
- LRSubI* rsb = new(zone()) LRSubI(left, right);
- LInstruction* result = DefineAsRegister(rsb);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
- LOperand* addend_op = UseRegisterAtStart(addend);
- return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
- multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
- LOperand* minuend_op = UseRegisterAtStart(minuend);
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
-
- return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op,
- multiplier_op,
- multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- if (instr->left()->IsMul()) {
- return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
- }
-
- if (instr->right()->IsMul()) {
- ASSERT(!instr->left()->IsMul());
- return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
- }
-
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
- } else {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return DefineAsRegister(new(zone()) LMathMinMax(left, right));
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), d2) :
- UseFixed(instr->right(), r2);
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d3),
- instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, d7), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LCmpT* result = new(zone()) LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- } else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpConstantEqAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
- return new(zone()) LClassOfTestAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), r0);
- LDateField* result =
- new(zone()) LDateField(object, FixedTemp(r1), instr->index());
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = UseRegister(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* res = NULL;
- if (instr->value()->type().IsSmi()) {
- res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = FixedTemp(d11);
- res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
- temp1,
- temp2,
- temp3));
- res = AssignEnvironment(res);
- }
- return res;
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
-
- // Make sure that the temp and result_temp registers are
- // different.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- Define(result, result_temp);
- return AssignPointerMap(result);
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
- return AssignEnvironment(DefineAsRegister(res));
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineAsRegister(new(zone()) LSmiTag(value));
- } else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- }
- } else {
- ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value())));
- } else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(Define(result, temp1));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new(zone()) LClampIToUint8(reg));
- } else {
- ASSERT(input_rep.IsTagged());
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve d1 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), r0));
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new(zone()) LConstantD);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to check the value in the cell in the case where we perform
- // a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
- : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), r1);
- LOperand* value = UseFixed(instr->value(), r0);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), r0);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, r0), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
-
- if (!instr->is_external()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
- } else {
- ASSERT(instr->representation().IsTagged());
- obj = UseRegisterAtStart(instr->elements());
- }
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- // float->double conversion on non-VFP2 requires an extra scratch
- // register. For convenience, just mark the elements register as "UseTemp"
- // so that it can be used as a temp during the float->double conversion
- // after it's no longer needed after the float load.
- bool needs_temp =
- !CpuFeatures::IsSupported(VFP2) &&
- (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
- LOperand* external_pointer = needs_temp
- ? UseTempRegister(instr->elements())
- : UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
-
- DefineAsRegister(result);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- return can_deoptimize ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), r1);
- LOperand* key = UseFixed(instr->key(), r0);
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
-
- return new(zone()) LStoreKeyed(object, key, val);
- }
-
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), r2);
- LOperand* key = UseFixed(instr->key(), r1);
- LOperand* val = UseFixed(instr->value(), r0);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* new_map_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
- return DefineSameAsFirst(result);
- } else if (FLAG_compiled_transitions) {
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, NULL);
- return AssignPointerMap(result);
- } else {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* fixed_object_reg = FixedTemp(r2);
- LOperand* new_map_reg = FixedTemp(r3);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = instr->is_in_object()
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), r1);
- LOperand* val = UseFixed(instr->value(), r0);
-
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result =
- new(zone()) LAllocateObject(TempRegister(), TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* key = UseFixed(instr->key(), r1);
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- ASSERT(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- ASSERT(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
- Register reg = descriptor->register_params_[instr->index()];
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), r0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new(zone()) LLazyBailout;
- result = AssignEnvironment(result);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
- } else {
- ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
- }
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* object = UseRegisterAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* object = UseFixed(instr->enumerable(), r0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.h b/src/3rdparty/v8/src/arm/lithium-arm.h
deleted file mode 100644
index c654400..0000000
--- a/src/3rdparty/v8/src/arm/lithium-arm.h
+++ /dev/null
@@ -1,2742 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_LITHIUM_ARM_H_
-#define V8_ARM_LITHIUM_ARM_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(AllocateObject) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckNonSmi) \
- V(CheckMaps) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeclareGlobals) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(DummyUse) \
- V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(In) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(Uint32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(MapEnumLength) \
- V(MathExp) \
- V(MathFloorOfDiv) \
- V(MathMinMax) \
- V(ModI) \
- V(MulI) \
- V(MultiplyAddD) \
- V(MultiplySubD) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(Random) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(StringLength) \
- V(SubI) \
- V(RSubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(Throw) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
- void MarkAsCall() { is_call_ = true; }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- bool ClobbersDoubleRegisters() const { return is_call_; }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- // Iterator support.
- friend class InputIterator;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- bool is_call_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
-
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
- static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap: public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
-
- private:
- int block_id_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- virtual bool IsControl() const { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-};
-
-
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 3> {
- public:
- // Used when the right hand is a constant power of 2.
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
- }
-
- // Used for the standard case.
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 0> {
- public:
- LDivI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 1> {
- public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplyAddD(LOperand* addend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = addend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* addend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
-};
-
-
-// Instruction for computing minuend - multiplier * multiplicand.
-class LMultiplySubD: public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplySubD(LOperand* minuend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = minuend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* minuend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
- public:
- LUnaryMathOperation(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
- public:
- LMathExp(LOperand* value,
- LOperand* double_temp,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsNilAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
- public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
- public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
-};
-
-
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LRSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LRSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LDateField: public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-
- private:
- Smi* index_;
-};
-
-
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
-class LThrow: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRandom(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- Token::Value op() const { return op_; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LReturn(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_external() const {
- return hydrogen()->is_external();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedGeneric(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
- }
-
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalObject(LOperand* context, bool qml_global) {
- inputs_[0] = context;
- qml_global_ = qml_global;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- LOperand* context() { return inputs_[0]; }
- bool qml_global() { return qml_global_; }
-
- private:
- bool qml_global_;
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
- }
-
- LOperand* key() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagU(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
- public:
- LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
- public:
- LTaggedToI(LOperand* value,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* new_map_temp,
- LOperand* fixed_object_temp) {
- inputs_[0] = object;
- temps_[0] = new_map_temp;
- temps_[1] = fixed_object_temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
- }
-
- LOperand* char_code() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- LOperand* string() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
- public:
- LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampDToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
- public:
- LAllocateObject(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LAllocate: public LTemplateInstruction<1, 2, 2> {
- public:
- LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
- }
-
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk: public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- zone_(graph->zone()),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
- LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
- LInstruction* DoRSub(HSub* instr);
-
- static bool HasMagicNumberForDivisor(int32_t divisor);
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* reason);
-
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Zone* zone_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_LITHIUM_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc b/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
deleted file mode 100644
index f0b0e96..0000000
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
+++ /dev/null
@@ -1,6408 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arm/lithium-codegen-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) const { }
-
- virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(GetStackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
- PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
-}
-
-
-void LCodeGen::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
-#endif
-
- // r1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ cmp(r5, Operand::Zero());
- __ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
- }
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- __ Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- } else {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
- frame_is_built_ = true;
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ sub(sp, sp, Operand(slots * kPointerSize));
- __ push(r0);
- __ push(r1);
- __ add(r0, sp, Operand(slots * kPointerSize));
- __ mov(r1, Operand(kSlotsZapValue));
- Label loop;
- __ bind(&loop);
- __ sub(r0, r0, Operand(kPointerSize));
- __ str(r1, MemOperand(r0, 2 * kPointerSize));
- __ cmp(r0, sp);
- __ b(ne, &loop);
- __ pop(r1);
- __ pop(r0);
- } else {
- __ sub(sp, sp, Operand(slots * kPointerSize));
- }
- }
-
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ str(r0, target);
- // Update the write barrier. This clobbers r3 and r0.
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
- instr->CompileToNative(this);
- }
- }
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred build frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
- frame_is_built_ = true;
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred destroy frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(frame_is_built_);
- __ pop(ip);
- __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
- frame_is_built_ = false;
- }
- __ jmp(code->exit());
- }
- }
-
- // Force constant pool emission at the end of the deferred code to make
- // sure that no constant pools are emitted after.
- masm()->CheckConstPool(true, false);
-
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeoptJumpTable() {
- // Check that the jump table is accessible from everywhere in the function
- // code, i.e. that offsets to the table can be encoded in the 24bit signed
- // immediate of a branch instruction.
- // To simplify we consider the code size from the first instruction to the
- // end of the jump table. We also don't consider the pc load delta.
- // Each entry in the jump table generates one instruction and inlines one
- // 32bit data after it.
- if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 7)) {
- Abort("Generated code is too large");
- }
-
- __ RecordComment("[ Deoptimisation jump table");
- Label table_start;
- __ bind(&table_start);
- Label needs_frame_not_call;
- Label needs_frame_is_call;
- for (int i = 0; i < deopt_jump_table_.length(); i++) {
- __ bind(&deopt_jump_table_[i].label);
- Address entry = deopt_jump_table_[i].address;
- bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (deopt_jump_table_[i].needs_frame) {
- __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
- if (is_lazy_deopt) {
- if (needs_frame_is_call.is_bound()) {
- __ b(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- __ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, ip);
- }
- } else {
- if (needs_frame_not_call.is_bound()) {
- __ b(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- __ mov(pc, ip);
- }
- }
- } else {
- if (is_lazy_deopt) {
- __ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
- } else {
- __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
- }
- }
- masm()->CheckConstPool(false, false);
- }
- __ RecordComment("]");
-
- // Force constant pool emission at the end of the deopt jump table to make
- // sure that no constant pools are emitted after.
- masm()->CheckConstPool(true, false);
-
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
- return DwVfpRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
- if (op->IsRegister()) {
- return ToRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort("EmitLoadRegister: Unsupported double immediate.");
- } else {
- ASSERT(r.IsTagged());
- if (literal->IsSmi()) {
- __ mov(scratch, Operand(literal));
- } else {
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
- }
- }
- return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
- __ ldr(scratch, ToMemOperand(op));
- return scratch;
- }
- UNREACHABLE();
- return scratch;
-}
-
-
-DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DwVfpRegister dbl_scratch) {
- if (op->IsDoubleRegister()) {
- return ToDoubleRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
- __ vmov(flt_scratch, ip);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
- return dbl_scratch;
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- } else if (r.IsTagged()) {
- Abort("unsupported tagged immediate");
- }
- } else if (op->IsStackSlot() || op->IsArgument()) {
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(op));
- MemOperand mem_op = ToMemOperand(op);
- __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
- return dbl_scratch;
- }
- UNREACHABLE();
- return dbl_scratch;
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort("ToOperand Unsupported double immediate.");
- }
- ASSERT(r.IsTagged());
- return Operand(constant->handle());
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort("ToOperand IsDoubleRegister unimplemented");
- return Operand::Zero();
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand::Zero();
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- ASSERT(!op->IsRegister());
- ASSERT(!op->IsDoubleRegister());
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()));
-}
-
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- ASSERT(op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
- bool has_closure_id = !info()->closure().is_null() &&
- *info()->closure() != *environment->closure();
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- }
-
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
- }
- }
-
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- TargetAddressStorageMode storage_mode) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- TargetAddressStorageMode storage_mode) {
- ASSERT(instr != NULL);
- // Block literal pool emission to ensure nop indicating no inlined smi code
- // is in the correct position.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
-
- __ CallRuntime(function, num_arguments);
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr) {
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
- if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- return;
- }
-
- if (FLAG_trap_on_deopt) {
- __ stop("trap_on_deopt", cc);
- }
-
- ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
- if (cc == al && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- }
- } else {
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
- deopt_jump_table_.Add(table_entry, zone());
- }
- __ b(cc, &deopt_jump_table_.last().label);
- }
-}
-
-
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
- maps.Add(map, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- ASSERT(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
- LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- __ ldr(r0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
-
- Label positive_dividend, done;
- __ cmp(dividend, Operand::Zero());
- __ b(pl, &positive_dividend);
- __ rsb(result, dividend, Operand::Zero());
- __ and_(result, result, Operand(divisor - 1), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
- }
- __ rsb(result, result, Operand::Zero());
- __ b(&done);
- __ bind(&positive_dividend);
- __ and_(result, dividend, Operand(divisor - 1));
- __ bind(&done);
- return;
- }
-
- // These registers hold untagged 32 bit values.
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- Register result = ToRegister(instr->result());
- Label done;
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatures::Scope scope(SUDIV);
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
-
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
- DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // For r3 = r1 % r2; we can have the following ARM code
- // sdiv r3, r1, r2
- // mls r3, r3, r2, r1
-
- __ sdiv(result, left, right);
- __ mls(result, result, right, left);
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
- }
- } else {
- Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister dividend = ToDoubleRegister(instr->temp2());
- DwVfpRegister divisor = ToDoubleRegister(instr->temp3());
- DwVfpRegister quotient = double_scratch0();
-
- ASSERT(!dividend.is(divisor));
- ASSERT(!dividend.is(quotient));
- ASSERT(!divisor.is(quotient));
- ASSERT(!scratch.is(left));
- ASSERT(!scratch.is(right));
- ASSERT(!scratch.is(result));
-
- Label vfp_modulo, both_positive, right_negative;
-
- CpuFeatures::Scope scope(VFP2);
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
-
- __ Move(result, left);
-
- // (0 % x) must yield 0 (if x is finite, which is the case here).
- __ cmp(left, Operand::Zero());
- __ b(eq, &done);
- // Preload right in a vfp register.
- __ vmov(divisor.low(), right);
- __ b(lt, &vfp_modulo);
-
- __ cmp(left, Operand(right));
- __ b(lt, &done);
-
- // Check for (positive) power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
- scratch,
- &right_negative,
- &both_positive);
- // Perform modulo operation (scratch contains right - 1).
- __ and_(result, scratch, Operand(left));
- __ b(&done);
-
- __ bind(&right_negative);
- // Negate right. The sign of the divisor does not matter.
- __ rsb(right, right, Operand::Zero());
-
- __ bind(&both_positive);
- const int kUnfolds = 3;
- // If the right hand side is smaller than the (nonnegative)
- // left hand side, the left hand side is the result.
- // Else try a few subtractions of the left hand side.
- __ mov(scratch, left);
- for (int i = 0; i < kUnfolds; i++) {
- // Check if the left hand side is less or equal than the
- // the right hand side.
- __ cmp(scratch, Operand(right));
- __ mov(result, scratch, LeaveCC, lt);
- __ b(lt, &done);
- // If not, reduce the left hand side by the right hand
- // side and check again.
- if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
- }
-
- __ bind(&vfp_modulo);
- // Load the arguments in VFP registers.
- // The divisor value is preloaded before. Be careful that 'right'
- // is only live on entry.
- __ vmov(dividend.low(), left);
- // From here on don't use right as it may have been reallocated
- // (for example to scratch2).
- right = no_reg;
-
- __ vcvt_f64_s32(dividend, dividend.low());
- __ vcvt_f64_s32(divisor, divisor.low());
-
- // We do not care about the sign of the divisor.
- __ vabs(divisor, divisor);
- // Compute the quotient and round it to a 32bit integer.
- __ vdiv(quotient, dividend, divisor);
- __ vcvt_s32_f64(quotient.low(), quotient);
- __ vcvt_f64_s32(quotient, quotient.low());
-
- // Compute the remainder in result.
- DwVfpRegister double_scratch = dividend;
- __ vmul(double_scratch, divisor, quotient);
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
- __ vmov(scratch, double_scratch.low());
-
- if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ sub(result, left, scratch);
- } else {
- Label ok;
- // Check for -0.
- __ sub(scratch2, left, scratch, SetCC);
- __ b(ne, &ok);
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- __ bind(&ok);
- // Load the result and we are done.
- __ mov(result, scratch2);
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, ip));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
-
- uint32_t divisor_abs = abs(divisor);
-
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
-
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
-
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ rsb(result, dividend, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, environment);
- }
- // Compute the remainder.
- __ mov(remainder, Operand::Zero());
- return;
-
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ mov(scratch, Operand(dividend, ASR, power - 1));
- }
- __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
- __ mov(result, Operand(scratch, ASR, power));
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ rsb(result, result, Operand::Zero());
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sub(remainder, dividend, Operand(result, LSL, power));
- } else {
- __ add(remainder, dividend, Operand(result, LSL, power));
- }
- return;
- } else {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ mov(ip, Operand(M));
- __ smull(ip, scratch, dividend, ip);
- if (M < 0) {
- __ add(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ mov(scratch, Operand(scratch, ASR, s));
- }
- __ add(result, scratch, Operand(dividend, LSR, 31));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
- // Compute the remainder.
- __ mov(ip, Operand(divisor));
- // This sequence could be replaced with 'mls' when
- // it gets implemented.
- __ mul(scratch, result, ip);
- __ sub(remainder, dividend, scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- class DeferredDivI: public LDeferredCode {
- public:
- DeferredDivI(LCodeGen* codegen, LDivI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
- instr_->left(),
- instr_->right(),
- Token::DIV);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LDivI* instr_;
- };
-
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
- const Register scratch = scratch0();
- const Register result = ToRegister(instr->result());
-
- // Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ cmp(left, Operand::Zero());
- __ b(ne, &left_not_zero);
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
- DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- Label done, deoptimize;
- // Test for a few common cases first.
- __ cmp(right, Operand(1));
- __ mov(result, left, LeaveCC, eq);
- __ b(eq, &done);
-
- __ cmp(right, Operand(2));
- __ tst(left, Operand(1), eq);
- __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
- __ b(eq, &done);
-
- __ cmp(right, Operand(4));
- __ tst(left, Operand(3), eq);
- __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
- __ b(eq, &done);
-
- // Call the stub. The numbers in r0 and r1 have
- // to be tagged to Smis. If that is not possible, deoptimize.
- DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
-
- __ TrySmiTag(left, &deoptimize, scratch);
- __ TrySmiTag(right, &deoptimize, scratch);
-
- __ b(al, deferred->entry());
- __ bind(deferred->exit());
-
- // If the result in r0 is a Smi, untag it, else deoptimize.
- __ JumpIfNotSmi(result, &deoptimize);
- __ SmiUntag(result);
- __ b(&done);
-
- __ bind(&deoptimize);
- DeoptimizeIf(al, instr->environment());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
- DwVfpRegister addend = ToDoubleRegister(instr->addend());
- DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
- DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- ASSERT(addend.is(ToDoubleRegister(instr->result())));
-
- __ vmla(addend, multiplier, multiplicand);
-}
-
-
-void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
- DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
- DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
- DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- ASSERT(minuend.is(ToDoubleRegister(instr->result())));
-
- __ vmls(minuend, multiplier, multiplicand);
-}
-
-
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
-
- if (!CpuFeatures::IsSupported(SUDIV)) {
- // If the CPU doesn't support sdiv instruction, we only optimize when we
- // have magic numbers for the divisor. The standard integer division routine
- // is usually slower than transitionning to VFP.
- ASSERT(instr->right()->IsConstantOperand());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
- if (divisor < 0) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ cmp(remainder, Operand::Zero());
- __ teq(remainder, Operand(divisor), ne);
- __ sub(result, result, Operand(1), LeaveCC, mi);
- } else {
- CpuFeatures::Scope scope(SUDIV);
- const Register right = ToRegister(instr->right());
-
- // Check for x / 0.
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
- DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(right, Operand::Zero());
- __ cmp(left, Operand::Zero(), mi);
- // "right" can't be null because the code would have already been
- // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
- // In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
- }
-
- Label done;
- __ sdiv(result, left, right);
- // If both operands have the same sign then we are done.
- __ eor(remainder, left, Operand(right), SetCC);
- __ b(pl, &done);
-
- // Check if the result needs to be corrected.
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- __ sub(result, result, Operand(1), LeaveCC, ne);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
- LOperand* left_argument,
- LOperand* right_argument,
- Token::Value op) {
- CpuFeatures::Scope vfp_scope(VFP2);
- Register left = ToRegister(left_argument);
- Register right = ToRegister(right_argument);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
- // Move left to r1 and right to r0 for the stub call.
- if (left.is(r1)) {
- __ Move(r0, right);
- } else if (left.is(r0) && right.is(r1)) {
- __ Swap(r0, r1, r2);
- } else if (left.is(r0)) {
- ASSERT(!right.is(r1));
- __ mov(r1, r0);
- __ mov(r0, right);
- } else {
- ASSERT(!left.is(r0) && !right.is(r0));
- __ mov(r0, right);
- __ mov(r1, left);
- }
- BinaryOpStub stub(op, OVERWRITE_LEFT);
- __ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(pointer_map,
- 0,
- Safepoint::kNoLazyDeopt);
- // Overwrite the stored value of r0 with the result of the stub.
- __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
- // Note that result may alias left.
- Register left = ToRegister(instr->left());
- LOperand* right_op = instr->right();
-
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
- if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
- if (bailout_on_minus_zero && (constant < 0)) {
- // The case of a null constant will be handled separately.
- // If constant is negative and left is null, the result should be -0.
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
-
- switch (constant) {
- case -1:
- __ rsb(result, left, Operand::Zero());
- break;
- case 0:
- if (bailout_on_minus_zero) {
- // If left is strictly negative and the constant is null, the
- // result is -0. Deoptimize if required, otherwise return 0.
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- }
- __ mov(result, Operand::Zero());
- break;
- case 1:
- __ Move(result, left);
- break;
- default:
- // Multiplying by powers of two and powers of two plus or minus
- // one can be done faster with shifted operands.
- // For other constants we emit standard code.
- int32_t mask = constant >> 31;
- uint32_t constant_abs = (constant + mask) ^ mask;
-
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ mov(result, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ add(result, left, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ rsb(result, left, Operand(left, LSL, shift));
- }
-
- // Correct the sign of the result is the constant is negative.
- if (constant < 0) __ rsb(result, result, Operand::Zero());
-
- } else {
- // Generate standard code.
- __ mov(ip, Operand(constant));
- __ mul(result, left, ip);
- }
- }
-
- } else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ orr(ToRegister(instr->temp()), left, right);
- }
-
- if (can_overflow) {
- // scratch:result = left * right.
- __ smull(result, scratch, left, right);
- __ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr->environment());
- } else {
- __ mul(result, left, right);
- }
-
- if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
- __ cmp(ToRegister(instr->temp()), Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->left();
- LOperand* right_op = instr->right();
- ASSERT(left_op->IsRegister());
- Register left = ToRegister(left_op);
- Register result = ToRegister(instr->result());
- Operand right(no_reg);
-
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
- right = Operand(EmitLoadRegister(right_op, ip));
- } else {
- ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
- right = ToOperand(right_op);
- }
-
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(result, left, right);
- break;
- case Token::BIT_OR:
- __ orr(result, left, right);
- break;
- case Token::BIT_XOR:
- __ eor(result, left, right);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
- // result may alias either of them.
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- if (right_op->IsRegister()) {
- // Mask the right_op operand.
- __ and_(scratch, ToRegister(right_op), Operand(0x1F));
- switch (instr->op()) {
- case Token::ROR:
- __ mov(result, Operand(left, ROR, scratch));
- break;
- case Token::SAR:
- __ mov(result, Operand(left, ASR, scratch));
- break;
- case Token::SHR:
- if (instr->can_deopt()) {
- __ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr->environment());
- } else {
- __ mov(result, Operand(left, LSR, scratch));
- }
- break;
- case Token::SHL:
- __ mov(result, Operand(left, LSL, scratch));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // Mask the right_op operand.
- int value = ToInteger32(LConstantOperand::cast(right_op));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, ROR, shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, ASR, shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, LSR, shift_count));
- } else {
- if (instr->can_deopt()) {
- __ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
- }
- __ Move(result, left);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ mov(result, Operand(left, LSL, shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
- }
-}
-
-
-void LCodeGen::DoRSubI(LRSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- CpuFeatures::Scope scope(VFP2);
- double v = instr->value();
- __ Vmov(result, v, scratch0());
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
- if (value->IsSmi()) {
- __ mov(ToRegister(instr->result()), Operand(value));
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- Label done;
-
- // If the object is a smi return the object.
- __ tst(input, Operand(kSmiTagMask));
- __ Move(result, input, eq);
- __ b(eq, &done);
-
- // If the object is not a value type, return the object.
- __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
- __ Move(result, input, ne);
- __ b(ne, &done);
- __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- Label runtime, done;
- ASSERT(object.is(result));
- ASSERT(object.is(r0));
- ASSERT(!scratch.is(scratch0()));
- ASSERT(!scratch.is(object));
-
- __ tst(object, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr->environment());
-
- if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand(stamp));
- __ ldr(scratch, MemOperand(scratch));
- __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch, scratch0());
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(r1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ mvn(result, Operand(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ push(input_reg);
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- if (instr->hydrogen()->representation().IsInteger32()) {
- Register left_reg = ToRegister(left);
- Operand right_op = (right->IsRegister() || right->IsConstantOperand())
- ? ToOperand(right)
- : Operand(EmitLoadRegister(right, ip));
- Register result_reg = ToRegister(instr->result());
- __ cmp(left_reg, right_op);
- if (!result_reg.is(left_reg)) {
- __ mov(result_reg, left_reg, LeaveCC, condition);
- }
- __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
- } else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister left_reg = ToDoubleRegister(left);
- DwVfpRegister right_reg = ToDoubleRegister(right);
- DwVfpRegister result_reg = ToDoubleRegister(instr->result());
- Label check_nan_left, check_zero, return_left, return_right, done;
- __ VFPCompareAndSetFlags(left_reg, right_reg);
- __ b(vs, &check_nan_left);
- __ b(eq, &check_zero);
- __ b(condition, &return_left);
- __ b(al, &return_right);
-
- __ bind(&check_zero);
- __ VFPCompareAndSetFlags(left_reg, 0.0);
- __ b(ne, &return_left); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- // We could use a single 'vorr' instruction here if we had NEON support.
- __ vneg(left_reg, left_reg);
- __ vsub(result_reg, left_reg, right_reg);
- __ vneg(result_reg, result_reg);
- } else {
- // Since we operate on +0 and/or -0, vadd and vand have the same effect;
- // the decision for vadd is easy because vand is a NEON instruction.
- __ vadd(result_reg, left_reg, right_reg);
- }
- __ b(al, &done);
-
- __ bind(&check_nan_left);
- __ VFPCompareAndSetFlags(left_reg, left_reg);
- __ b(vs, &return_left); // left == NaN.
- __ bind(&return_right);
- if (!right_reg.is(result_reg)) {
- __ vmov(result_reg, right_reg);
- }
- __ b(al, &done);
-
- __ bind(&return_left);
- if (!left_reg.is(result_reg)) {
- __ vmov(result_reg, left_reg);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister left = ToDoubleRegister(instr->left());
- DwVfpRegister right = ToDoubleRegister(instr->right());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- __ vadd(result, left, right);
- break;
- case Token::SUB:
- __ vsub(result, left, right);
- break;
- case Token::MUL:
- __ vmul(result, left, right);
- break;
- case Token::DIV:
- __ vdiv(result, left, right);
- break;
- case Token::MOD: {
- // Save r0-r3 on the stack.
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
-
- __ PrepareCallCFunction(0, 2, scratch0());
- __ SetCallCDoubleArguments(left, right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 0, 2);
- // Move the result in the double result register.
- __ GetCFunctionDoubleResult(result);
-
- // Restore r0-r3.
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->left()).is(r1));
- ASSERT(ToRegister(instr->right()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- // Block literal pool emission to ensure nop indicating no inlined smi code
- // is in the correct position.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
- __ b(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->value());
- __ cmp(reg, Operand::Zero());
- EmitBranch(true_block, false_block, ne);
- } else if (r.IsDouble()) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
-
- // Test the double value. Zero and NaN are false.
- __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
- __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
- EmitBranch(true_block, false_block, eq);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, eq);
- } else if (type.IsSmi()) {
- __ cmp(reg, Operand::Zero());
- EmitBranch(true_block, false_block, ne);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
-
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
- // undefined -> false.
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ b(eq, false_label);
- }
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
- // Boolean -> its value.
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ b(eq, true_label);
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ b(eq, false_label);
- }
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
- // 'null' -> false.
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ b(eq, false_label);
- }
-
- if (expected.Contains(ToBooleanStub::SMI)) {
- // Smis: 0 -> false, all other -> true.
- __ cmp(reg, Operand::Zero());
- __ b(eq, false_label);
- __ JumpIfSmi(reg, true_label);
- } else if (expected.NeedsMap()) {
- // If we need a map later and have a Smi -> deopt.
- __ tst(reg, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
- }
-
- const Register map = scratch0();
- if (expected.NeedsMap()) {
- __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- if (expected.CanBeUndetectable()) {
- // Undetectable -> false.
- __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- __ b(ne, false_label);
- }
- }
-
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
- // spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, true_label);
- }
-
- if (expected.Contains(ToBooleanStub::STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_string);
- __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ cmp(ip, Operand::Zero());
- __ b(ne, true_label);
- __ b(false_label);
- __ bind(&not_string);
- }
-
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- CpuFeatures::Scope scope(VFP2);
- // heap number -> false iff +0, -0, or NaN.
- DwVfpRegister dbl_scratch = double_scratch0();
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &not_heap_number);
- __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
- __ b(vs, false_label); // NaN -> false.
- __ b(eq, false_label); // +0, -0 -> false.
- __ b(true_label);
- __ bind(&not_heap_number);
- }
-
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(al, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = kNoCondition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::LT:
- cond = is_unsigned ? lo : lt;
- break;
- case Token::GT:
- cond = is_unsigned ? hi : gt;
- break;
- case Token::LTE:
- cond = is_unsigned ? ls : le;
- break;
- case Token::GTE:
- cond = is_unsigned ? hs : ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Condition cond = TokenToCondition(instr->op(), false);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- CpuFeatures::Scope scope(VFP2);
- // Compare left and right operands as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to false block label.
- __ b(vs, chunk_->GetAssemblyLabel(false_block));
- } else {
- if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left),
- Operand(ToInteger32(LConstantOperand::cast(right))));
- } else if (left->IsConstantOperand()) {
- __ cmp(ToRegister(right),
- Operand(ToInteger32(LConstantOperand::cast(left))));
- // We transposed the operands. Reverse the condition.
- cond = ReverseCondition(cond);
- } else {
- __ cmp(ToRegister(left), ToRegister(right));
- }
- }
- EmitBranch(true_block, false_block, cond);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmp(left, Operand(instr->hydrogen()->right()));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register scratch = scratch0();
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(ip, nil_value);
- __ cmp(reg, ip);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, eq);
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ b(eq, true_label);
- __ LoadRoot(ip, other_nil_value);
- __ cmp(reg, ip);
- __ b(eq, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object) {
- Register temp2 = scratch0();
- __ JumpIfSmi(input, is_not_object);
-
- __ LoadRoot(temp2, Heap::kNullValueRootIndex);
- __ cmp(input, temp2);
- __ b(eq, is_object);
-
- // Load map.
- __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
- __ tst(temp2, Operand(1 << Map::kIsUndetectable));
- __ b(ne, is_not_object);
-
- // Load instance type and check that it is in object type range.
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(lt, is_not_object);
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- return le;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond =
- EmitIsObject(reg, temp1, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
- __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
-
- return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond =
- EmitIsString(reg, temp1, false_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
- __ tst(temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined.
- __ cmp(r0, Operand::Zero());
-
- Condition condition = ComputeCompareCondition(op);
-
- EmitBranch(true_block, false_block, condition);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return hs;
- if (from == FIRST_TYPE) return ls;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
-
- __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ ldr(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String>class_name,
- Register input,
- Register temp,
- Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!input.is(temp2));
- ASSERT(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, is_false);
- __ b(eq, is_true);
- __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
- __ b(eq, is_true);
- } else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(gt, is_false);
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
-
- // Objects with a non-function constructor have class 'Object'.
- __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
- __ b(ne, is_true);
- } else {
- __ b(ne, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(temp, FieldMemOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- __ cmp(temp, Operand(class_name));
- // End with the answer in flags.
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = scratch0();
- Register temp2 = ToRegister(instr->temp());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(temp, Operand(instr->map()));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-
- __ cmp(r0, Operand::Zero());
- __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
- __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- virtual LInstruction* instr() { return instr_; }
- Label* map_check() { return &map_check_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- ASSERT(object.is(r0));
- ASSERT(result.is(r0));
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = temp;
- __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- {
- // Block constant pool emission to ensure the positions of instructions are
- // as expected by the patcher. See InstanceofStub::Generate().
- Assembler::BlockConstPoolScope block_const_pool(masm());
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- __ cmp(map, Operand(ip));
- __ b(ne, &cache_miss);
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ mov(result, Operand(factory()->the_hole_value()));
- }
- __ b(&done);
-
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
- __ bind(&cache_miss);
- // Null is not instance of anything.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(object, Operand(ip));
- __ b(eq, &false_result);
-
- // String values is not instance of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp);
- __ b(is_string, &false_result);
-
- // Go to the deferred code.
- __ b(deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Register result = ToRegister(instr->result());
- ASSERT(result.is(r0));
-
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- // Get the temp register reserved by the instruction. This needs to be r4 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
- ASSERT(temp.is(r4));
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 5;
- // Make sure that code size is predicable, since we use specific constants
- // offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- __ BlockConstPoolFor(kAdditionalDelta);
- __ mov(temp, Operand(delta * kPointerSize));
- // The mov above can generate one or two instructions. The delta was computed
- // for two instructions, so we need to pad here in case of one instruction.
- if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
- ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
- __ nop();
- }
- __ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
- // restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
-}
-
-
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined.
- __ cmp(r0, Operand::Zero());
-
- Condition condition = ComputeCompareCondition(op);
- __ LoadRoot(ToRegister(instr->result()),
- Heap::kTrueValueRootIndex,
- condition);
- __ LoadRoot(ToRegister(instr->result()),
- Heap::kFalseValueRootIndex,
- NegateCondition(condition));
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
- if (NeedsEagerFrame()) {
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- if (!info()->IsStub()) {
- __ add(sp, sp, Operand(sp_delta));
- }
- }
- __ Jump(lr);
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- __ mov(r2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Register cell = scratch0();
-
- // Load the cell.
- __ mov(cell, Operand(instr->hydrogen()->cell()));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We use a temp to check the payload (CompareRoot might clobber ip).
- Register payload = ToRegister(instr->temp());
- __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
- __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment());
- }
-
- // Store the value.
- __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
- } else {
- __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, target);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
- } else {
- __ b(ne, &skip_assignment);
- }
- }
-
- __ str(value, target);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context,
- target.offset(),
- value,
- scratch,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
- } else {
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
- __ cmp(result, Operand(Handle<Map>(current->map())));
- DeoptimizeIf(ne, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- Register object_map = scratch0();
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(
- object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(ne, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- __ b(ne, &next);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ b(&done);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- // Name is always in r2.
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register scratch = scratch0();
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function. Load map into the
- // result register.
- __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- DeoptimizeIf(ne, instr->environment());
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ b(ne, &non_instance);
-
- // Get the prototype or initial map from the function.
- __ ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
- __ b(ne, &done);
-
- // Get the prototype from the initial map.
- __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- __ bind(&non_instance);
- __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- Register scratch = scratch0();
-
- __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- // |scratch| still contains |input|'s map.
- __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ ubfx(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ cmp(scratch, Operand(GetInitialFastElementsKind()));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ b(le, &done);
- __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(le, &done);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->object());
- __ ldr(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, length, index);
- __ add(length, length, Operand(1));
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Operand operand = key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size);
- __ add(scratch0(), external_pointer, operand);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, kScratchDoubleReg.low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
- }
- } else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Register value = external_pointer;
- __ ldr(value, MemOperand(scratch0(), additional_offset));
- __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
-
- __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
- __ and_(scratch0(), scratch0(),
- Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(scratch0(), Operand(0x00));
- __ b(eq, &exponent_rebiased);
-
- __ teq(scratch0(), Operand(0xff));
- __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(scratch0(),
- scratch0(),
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
- __ orr(sfpd_hi, sfpd_hi,
- Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(sfpd_hi, sfpd_hi,
- Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
- __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
-
- } else {
- __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
- __ ldr(sfpd_hi, MemOperand(scratch0(),
- additional_offset + kPointerSize));
- }
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ ldrsb(result, mem_operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ ldrb(result, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ ldrsh(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ ldrh(result, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ ldr(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ ldr(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
-
- int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- ((constant_key + instr->additional_index()) << element_size_shift);
- if (!key_is_constant) {
- __ add(elements, elements, Operand(key, LSL, shift_size));
- }
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(elements, elements, Operand(base_offset));
- __ vldr(result, elements, 0);
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
- } else {
- __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
- __ ldr(sfpd_lo, MemOperand(elements, base_offset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- ASSERT(kPointerSize == sizeof(kHoleNanLower32));
- __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ ldr(result, FieldMemOperand(store_base, offset));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ tst(result, Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-MemOperand LCodeGen::PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int additional_index,
- int additional_offset) {
- if (additional_index != 0 && !key_is_constant) {
- additional_index *= 1 << (element_size - shift_size);
- __ add(scratch0(), key, Operand(additional_index));
- }
-
- if (key_is_constant) {
- return MemOperand(base,
- (constant_key << element_size) + additional_offset);
- }
-
- if (additional_index == 0) {
- if (shift_size >= 0) {
- return MemOperand(base, key, LSL, shift_size);
- } else {
- ASSERT_EQ(-1, shift_size);
- return MemOperand(base, key, LSR, 1);
- }
- }
-
- if (shift_size >= 0) {
- return MemOperand(base, scratch0(), LSL, shift_size);
- } else {
- ASSERT_EQ(-1, shift_size);
- return MemOperand(base, scratch0(), LSR, 1);
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->key()).is(r0));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ sub(result, sp, Operand(2 * kPointerSize));
- } else {
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
- __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ mov(result, fp, LeaveCC, ne);
- __ mov(result, scratch, LeaveCC, eq);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(fp, elem);
- __ mov(result, Operand(scope()->num_parameters()));
- __ b(eq, &done);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result,
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register scratch = scratch0();
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
-
- // Do not transform the receiver to object for strict mode
- // functions.
- __ ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(scratch,
- Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
- __ b(ne, &receiver_ok);
-
- // Do not transform the receiver to object for builtins.
- __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &receiver_ok);
-
- // Normal function. Replace undefined or null with global receiver.
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(eq, &global_object);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(eq, &global_object);
-
- // Deoptimize if the receiver is not a JS object.
- __ tst(receiver, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr->environment());
- __ jmp(&receiver_ok);
-
- __ bind(&global_object);
- __ ldr(receiver, GlobalObjectOperand());
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- ASSERT(receiver.is(r0)); // Used for parameter count.
- ASSERT(function.is(r1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(r0));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr->environment());
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ push(receiver);
- __ mov(receiver, length);
- // The arguments are at a one pointer size offset from elements.
- __ add(elements, elements, Operand(1 * kPointerSize));
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ cmp(length, Operand::Zero());
- __ b(eq, &invoke);
- __ bind(&loop);
- __ ldr(scratch, MemOperand(elements, length, LSL, 2));
- __ push(scratch);
- __ sub(length, length, Operand(1), SetCC);
- __ b(ne, &loop);
-
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in receiver which is r0, as expected
- // by InvokeFunction.
- ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort("DoPushArgument not implemented for double type.");
- } else {
- Register argument_reg = EmitLoadRegister(argument, ip);
- __ push(argument_reg);
- }
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- __ mov(result, cp);
- return;
- }
- }
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- __ push(cp); // The context is the first argument.
- __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
- __ push(scratch0());
- __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
- __ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, instr->qml_global()
- ? Context::QML_GLOBAL_OBJECT_INDEX
- : Context::GLOBAL_OBJECT_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global_object());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- R1State r1_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- if (can_invoke_directly) {
- if (r1_state == R1_UNINITIALIZED) {
- __ LoadHeapObject(r1, function);
- }
-
- // Change context.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Set r0 to arguments count if adaption is not needed. Assumes that r0
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(r0, Operand(arity));
- }
-
- // Invoke function.
- __ SetCallKind(r5, call_kind);
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ Call(ip);
-
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
- }
-
- // Restore context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- R1_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Deoptimize if not a heap number.
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
-
- Label done;
- Register exponent = scratch0();
- scratch = no_reg;
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it.
- __ tst(exponent, Operand(HeapNumber::kSignMask));
- // Move the input to the result if necessary.
- __ Move(result, input);
- __ b(eq, &done);
-
- // Input is negative. Reverse its sign.
- // Preserve the value of all registers.
- {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(r1) ? r0 : r1;
- Register tmp2 = input.is(r2) ? r0 : r2;
- Register tmp3 = input.is(r3) ? r0 : r3;
- Register tmp4 = input.is(r4) ? r0 : r4;
-
- // exponent: floating point exponent value.
-
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ b(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
- __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
- __ StoreToSafepointRegisterSlot(tmp1, result);
- }
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ cmp(input, Operand::Zero());
- __ Move(result, input, pl);
- // We can make rsb conditional because the previous cmp instruction
- // will clear the V (overflow) flag and rsb won't set this flag
- // if input is positive.
- __ rsb(result, input, Operand::Zero(), SetCC, mi);
- // Deoptimize on overflow.
- DeoptimizeIf(vs, instr->environment());
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LUnaryMathOperation* instr_;
- };
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- __ vabs(result, input);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else {
- // Representation is tagged.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input, deferred->entry());
- // If smi, handle it directly.
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- __ EmitVFPTruncate(kRoundToMinusInf,
- result,
- input,
- scratch,
- double_scratch0());
- DeoptimizeIf(ne, instr->environment());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- Label done;
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
- Register scratch = scratch0();
- Label done, check_sign_on_zero;
-
- // Extract exponent bits.
- __ vmov(result, input.high());
- __ ubfx(scratch,
- result,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // If the number is in ]-0.5, +0.5[, the result is +/- 0.
- __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
- __ mov(result, Operand::Zero(), LeaveCC, le);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ b(le, &check_sign_on_zero);
- } else {
- __ b(le, &done);
- }
-
- // The following conversion will not work with numbers
- // outside of ]-2^32, 2^32[.
- __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
- DeoptimizeIf(ge, instr->environment());
-
- __ Vmov(double_scratch0(), 0.5, scratch);
- __ vadd(double_scratch0(), input, double_scratch0());
-
- // Save the original sign for later comparison.
- __ and_(scratch, result, Operand(HeapNumber::kSignMask));
-
- // Check sign of the result: if the sign changed, the input
- // value was in ]0.5, 0[ and the result should be -0.
- __ vmov(result, double_scratch0().high());
- __ eor(result, result, Operand(scratch), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(mi, instr->environment());
- } else {
- __ mov(result, Operand::Zero(), LeaveCC, mi);
- __ b(mi, &done);
- }
-
- __ EmitVFPTruncate(kRoundToMinusInf,
- result,
- double_scratch0(),
- scratch,
- double_scratch1);
- DeoptimizeIf(ne, instr->environment());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
- __ bind(&check_sign_on_zero);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- __ vsqrt(result, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- DwVfpRegister temp = ToDoubleRegister(instr->temp());
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done;
- __ vmov(temp, -V8_INFINITY, scratch0());
- __ VFPCompareAndSetFlags(input, temp);
- __ vneg(result, temp, eq);
- __ b(&done, eq);
-
- // Add +0 to convert -0 to +0.
- __ vadd(result, input, kDoubleRegZero);
- __ vsqrt(result, result);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- CpuFeatures::Scope scope(VFP2);
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(d2));
- ASSERT(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(r2));
- ASSERT(ToDoubleRegister(instr->left()).is(d1));
- ASSERT(ToDoubleRegister(instr->result()).is(d3));
-
- if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(r2, &no_deopt);
- __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
- __ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
- CpuFeatures::Scope scope(VFP2);
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(d7));
- ASSERT(ToRegister(instr->global_object()).is(r0));
-
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
- // r2: FixedArray of the native context's random seeds
-
- // Load state[0].
- __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
- __ cmp(r1, Operand::Zero());
- __ b(eq, deferred->entry());
- // Load state[1].
- __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
- // r1: state[0].
- // r0: state[1].
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ and_(r3, r1, Operand(0xFFFF));
- __ mov(r4, Operand(18273));
- __ mul(r3, r3, r4);
- __ add(r1, r3, Operand(r1, LSR, 16));
- // Save state[0].
- __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ and_(r3, r0, Operand(0xFFFF));
- __ mov(r4, Operand(36969));
- __ mul(r3, r3, r4);
- __ add(r0, r3, Operand(r0, LSR, 16));
- // Save state[1].
- __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ and_(r0, r0, Operand(0x3FFFF));
- __ add(r0, r0, Operand(r1, LSL, 14));
-
- __ bind(deferred->exit());
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand::Zero());
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in r0.
-}
-
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DwVfpRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(
- masm(), input, result, double_scratch1, double_scratch2,
- temp1, temp2, scratch0());
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
- default:
- Abort("Unimplemented type of LUnaryMathOperation.");
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(instr->HasPointerMap());
-
- if (instr->known_function().is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- CallKnownFunction(instr->known_function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- R1_CONTAINS_TARGET);
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->target(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- R1_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->constructor()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- __ mov(r0, Operand(instr->arity()));
- if (FLAG_optimize_constructed_arrays) {
- // No cell in r2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
- isolate());
- __ mov(r2, Operand(undefined_value));
- }
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(ToRegister(instr->constructor()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
- ASSERT(FLAG_optimize_constructed_arrays);
-
- __ mov(r0, Operand(instr->arity()));
- __ mov(r2, Operand(instr->hydrogen()->property_cell()));
- Handle<Code> array_construct_code =
- isolate()->builtins()->ArrayConstructCode();
-
- CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- int offset = instr->offset();
-
- ASSERT(!object.is(value));
-
- if (!instr->transition().is_null()) {
- __ mov(scratch, Operand(instr->transition()));
- __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- scratch,
- temp,
- kLRHasBeenSaved,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
- }
-
- // Do the store.
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ str(value, FieldMemOperand(object, offset));
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- scratch,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- } else {
- __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ str(value, FieldMemOperand(scratch, offset));
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(scratch,
- offset,
- value,
- object,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- // Name is always in r2.
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ mov(ip, Operand(Smi::FromInt(constant_index)));
- } else {
- __ mov(ip, Operand(constant_index));
- }
- __ cmp(ip, ToRegister(instr->length()));
- } else {
- __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
- }
- DeoptimizeIf(hs, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- CpuFeatures::Scope scope(VFP2);
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
- DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size));
- __ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), scratch0(), additional_offset);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, scratch0(), additional_offset);
- }
- } else {
- Register value(ToRegister(instr->value()));
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(value, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(value, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(value, mem_operand);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- Operand operand = key_is_constant
- ? Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(scratch, elements, operand);
- if (!key_is_constant) {
- __ add(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- // Check for NaN. All NaNs must be canonicalized.
- __ VFPCompareAndSetFlags(value, value);
- Label after_canonicalization;
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ b(vc, &after_canonicalization);
- __ Vmov(value,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-
- __ bind(&after_canonicalization);
- }
-
- __ vstr(value, scratch, instr->additional_index() << element_size_shift);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
- : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ str(value, FieldMemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ add(key, store_base, Operand(offset - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r2));
- ASSERT(ToRegister(instr->key()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
- Register scratch = scratch0();
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(from_map));
- __ b(ne, &not_applicable);
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(new_map_reg, Operand(to_map));
- __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- // Write barrier.
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- scratch, kLRHasBeenSaved, kDontSaveFPRegs);
- } else if (FLAG_compiled_transitions) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ Move(r0, object_reg);
- __ Move(r1, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(r2));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(r3));
- __ mov(new_map_reg, Operand(to_map));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(r2));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(r3));
- __ mov(new_map_reg, Operand(to_map));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
- } else {
- UNREACHABLE();
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
- DeoptimizeIf(eq, instr->environment());
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ mov(scratch, Operand(Smi::FromInt(const_index)));
- __ push(scratch);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- __ AssertSmi(r0);
- __ SmiUntag(r0);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
-
- __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
- __ b(hi, deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
- __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result, ip);
- __ b(eq, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- CpuFeatures::Scope scope(VFP2);
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- SwVfpRegister single_scratch = double_scratch0().low();
- if (input->IsStackSlot()) {
- Register scratch = scratch0();
- __ ldr(scratch, ToMemOperand(input));
- __ vmov(single_scratch, scratch);
- } else {
- __ vmov(single_scratch, ToRegister(input));
- }
- __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatures::Scope scope(VFP2);
- LOperand* input = instr->value();
- LOperand* output = instr->result();
-
- SwVfpRegister flt_scratch = double_scratch0().low();
- __ vmov(flt_scratch, ToRegister(input));
- __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagI* instr_;
- };
-
- Register src = ToRegister(instr->value());
- Register dst = ToRegister(instr->result());
-
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
- __ SmiTag(dst, src, SetCC);
- __ b(vs, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmp(reg, Operand(Smi::kMaxValue));
- __ b(hi, deferred->entry());
- __ SmiTag(reg, reg);
- __ bind(deferred->exit());
-}
-
-
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
- masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- masm->orr(hiword, scratch,
- Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- masm->mov(loword, Operand::Zero());
- masm->orr(hiword, scratch,
- Operand(hiword, LSL, -mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
- Register src = ToRegister(value);
- Register dst = ToRegister(instr->result());
- DwVfpRegister dbl_scratch = double_scratch0();
- SwVfpRegister flt_scratch = dbl_scratch.low();
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ eor(src, src, Operand(0x80000000));
- }
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
- } else {
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
- sfpd_lo, sfpd_hi,
- scratch0(), s0);
- }
- } else {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(flt_scratch, src);
- __ vcvt_f64_u32(dbl_scratch, flt_scratch);
- } else {
- Label no_leading_zero, done;
- __ tst(src, Operand(0x80000000));
- __ b(ne, &no_leading_zero);
-
- // Integer has one leading zeros.
- GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
- __ b(&done);
-
- __ bind(&no_leading_zero);
- GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
- __ b(&done);
- }
- }
-
- if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, r5);
- __ b(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ mov(ip, Operand::Zero());
- __ StoreToSafepointRegisterSlot(ip, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- __ Move(dst, r0);
- __ sub(dst, dst, Operand(kHeapObjectTag));
-
- // Done. Put the value in dbl_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
- } else {
- __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
- __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
- }
- __ add(dst, dst, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(dst, dst);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagD* instr_;
- };
-
- DwVfpRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
- Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input_reg = ToDoubleRegister(instr->value());
- __ VFPCompareAndSetFlags(input_reg, input_reg);
- __ b(vc, &no_special_nan_handling);
- __ vmov(reg, scratch0(), input_reg);
- __ cmp(scratch0(), Operand(kHoleNanUpper32));
- Label canonicalize;
- __ b(ne, &canonicalize);
- __ Move(reg, factory()->the_hole_value());
- __ b(&done);
- __ bind(&canonicalize);
- __ Vmov(input_reg,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
- no_reg);
- } else {
- Label not_hole;
- __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
- __ b(ne, &not_hole);
- __ Move(reg, factory()->the_hole_value());
- __ b(&done);
- __ bind(&not_hole);
- __ and_(scratch, sfpd_hi, Operand(0x7ff00000));
- __ cmp(scratch, Operand(0x7ff00000));
- __ b(ne, &no_special_nan_handling);
- Label special_nan_handling;
- __ tst(sfpd_hi, Operand(0x000FFFFF));
- __ b(ne, &special_nan_handling);
- __ cmp(sfpd_lo, Operand(0));
- __ b(eq, &no_special_nan_handling);
- __ bind(&special_nan_handling);
- double canonical_nan =
- FixedDoubleArray::canonical_not_the_hole_nan_as_double();
- uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
- __ mov(sfpd_lo,
- Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
- __ mov(sfpd_hi,
- Operand(static_cast<uint32_t>(casted_nan >> 32)));
- }
- }
-
- __ bind(&no_special_nan_handling);
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- // We want the untagged address first for performance
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
- DONT_TAG_RESULT);
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(input_reg, reg, HeapNumber::kValueOffset);
- } else {
- __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
- __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
- }
- // Now that we have finished with the object's real address tag it
- __ add(reg, reg, Operand(kHeapObjectTag));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ mov(reg, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- __ sub(r0, r0, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(r0, reg);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- if (instr->needs_check()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr->environment());
- } else {
- __ SmiUntag(result, input);
- }
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- DwVfpRegister result_reg,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
- Register scratch = scratch0();
- SwVfpRegister flt_scratch = double_scratch0().low();
- ASSERT(!result_reg.is(double_scratch0()));
- CpuFeatures::Scope scope(VFP2);
-
- Label load_smi, heap_number, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
- // Heap number map check.
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env);
- } else {
- Label heap_number;
- __ b(eq, &heap_number);
-
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
-
- // Convert undefined to NaN.
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- __ jmp(&done);
-
- __ bind(&heap_number);
- }
- // Heap number to double register conversion.
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- if (deoptimize_on_minus_zero) {
- __ vmov(ip, result_reg.low());
- __ cmp(ip, Operand::Zero());
- __ b(ne, &done);
- __ vmov(ip, result_reg.high());
- __ cmp(ip, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
- }
- __ jmp(&done);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ SmiUntag(scratch, input_reg, SetCC);
- DeoptimizeIf(cs, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- __ Vmov(result_reg,
- FixedDoubleArray::hole_nan_as_double(),
- no_reg);
- __ b(&done);
- } else {
- __ SmiUntag(scratch, input_reg);
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- // Smi to double register conversion
- __ bind(&load_smi);
- // scratch: untagged value of input_reg
- __ vmov(flt_scratch, scratch);
- __ vcvt_f64_s32(result_reg, flt_scratch);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->value());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister double_scratch = double_scratch0();
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
-
- ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
- Label done;
-
- // The input was optimistically untagged; revert it.
- // The carry flag is set when we reach this deferred code as we just executed
- // SmiUntag(heap_object, SetCC)
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ adc(input_reg, input_reg, Operand(input_reg));
-
- // Heap number map check.
- __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, Operand(ip));
-
- if (instr->truncating()) {
- CpuFeatures::Scope scope(VFP2);
- Register scratch3 = ToRegister(instr->temp2());
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label heap_number;
- __ b(eq, &heap_number);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
- __ mov(input_reg, Operand::Zero());
- __ b(&done);
-
- __ bind(&heap_number);
- __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
-
- __ EmitECMATruncate(input_reg,
- double_scratch2,
- double_scratch,
- scratch1,
- scratch2,
- scratch3);
-
- } else {
- CpuFeatures::Scope scope(VFP3);
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment());
-
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
- __ EmitVFPTruncate(kRoundToZero,
- input_reg,
- double_scratch,
- scratch1,
- double_scratch2,
- kCheckForInexactConversion);
- DeoptimizeIf(ne, instr->environment());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(input_reg, Operand::Zero());
- __ b(ne, &done);
- __ vmov(scratch1, double_scratch.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(input_reg, SetCC);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ b(cs, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- DwVfpRegister result_reg = ToDoubleRegister(result);
-
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
- HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
- }
- }
- }
-
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister double_input = ToDoubleRegister(instr->value());
- DwVfpRegister double_scratch = double_scratch0();
-
- Label done;
-
- if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- __ EmitECMATruncate(result_reg,
- double_input,
- double_scratch,
- scratch1,
- scratch2,
- scratch3);
- } else {
- __ EmitVFPTruncate(kRoundToMinusInf,
- result_reg,
- double_input,
- scratch1,
- double_scratch,
- kCheckForInexactConversion);
-
- // Deoptimize if we had a vfp invalid exception,
- // including inexact operation.
- DeoptimizeIf(ne, instr->environment());
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmp(scratch, Operand(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr->environment());
- } else {
- DeoptimizeIf(lo, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr->environment());
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (IsPowerOf2(mask)) {
- ASSERT(tag == 0 || IsPowerOf2(tag));
- __ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
- } else {
- __ and_(scratch, scratch, Operand(mask));
- __ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
- Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- __ cmp(reg, ip);
- } else {
- __ cmp(reg, Operand(target));
- }
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapCommon(Register map_reg,
- Handle<Map> map,
- CompareMapMode mode,
- LEnvironment* env) {
- Label success;
- __ CompareMap(map_reg, map, &success, mode);
- DeoptimizeIf(ne, env);
- __ bind(&success);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- Register map_reg = scratch0();
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
-
- Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(map_reg, map, &success, REQUIRE_EXACT_MAP);
- __ b(eq, &success);
- }
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- CpuFeatures::Scope vfp_scope(VFP2);
- DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- CpuFeatures::Scope scope(VFP2);
- Register unclamped_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampUint8(result_reg, unclamped_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- CpuFeatures::Scope scope(VFP2);
- Register scratch = scratch0();
- Register input_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
- Label is_smi, done, heap_number;
-
- // Both smi and heap number cases are handled.
- __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
-
- // Check for heap number
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(factory()->heap_number_map()));
- __ b(eq, &heap_number);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr->environment());
- __ mov(result_reg, Operand::Zero());
- __ jmp(&done);
-
- // Heap number
- __ bind(&heap_number);
- __ vldr(double_scratch0(), FieldMemOperand(input_reg,
- HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
- __ jmp(&done);
-
- // smi
- __ bind(&is_smi);
- __ ClampUint8(result_reg, result_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
- Register prototype_reg = ToRegister(instr->temp());
- Register map_reg = ToRegister(instr->temp2());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- __ LoadHeapObject(prototype_reg,
- prototypes->at(prototypes->length() - 1));
- } else {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(prototype_reg, prototypes->at(i));
- __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg,
- maps->at(i),
- ALLOW_ELEMENT_TRANSITION_MAPS,
- instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Register scratch2 = ToRegister(instr->temp2());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ str(scratch, FieldMemOperand(result, property_offset));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ mov(r0, Operand(Smi::FromInt(instance_size)));
- __ push(r0);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- flags);
- } else {
- Register size = ToRegister(instr->size());
- __ AllocateInNewSpace(size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- flags);
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand(Smi::FromInt(0)));
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(size, size);
- __ push(size);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
- // Load map into r2.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- __ cmp(r2, Operand(boilerplate_elements_kind));
- DeoptimizeIf(ne, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r3, literals);
- __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
- __ Push(r3, r2, r1);
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(r2));
- ASSERT(!result.is(r2));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ add(r2, result, Operand(elements_offset));
- } else {
- __ ldr(r2, FieldMemOperand(source, i));
- }
- __ str(r2, FieldMemOperand(result, object_offset + i));
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ mov(r2, Operand(Handle<Map>(isolate()->heap()->
- allocation_site_info_map())));
- __ str(r2, FieldMemOperand(result, object_size));
- __ str(source, FieldMemOperand(result, object_size + kPointerSize));
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ ldr(r2, FieldMemOperand(source, i));
- __ str(r2, FieldMemOperand(result, elements_offset + i));
- }
-
- // Copy elements backing store content.
- int elements_length = has_elements ? elements->length() : 0;
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- // We only support little endian mode...
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(r2, Operand(value_low));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ mov(r2, Operand(value_high));
- __ str(r2, FieldMemOperand(result, total_offset + 4));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- // Load map into r2.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- __ cmp(r2, Operand(boilerplate_elements_kind));
- DeoptimizeIf(ne, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r3, literals);
- __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r1, Operand(constant_properties));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- __ mov(r0, Operand(Smi::FromInt(flags)));
-
- // Pick the right runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- __ Push(r3, r2, r1, r0);
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ Push(r3, r2, r1, r0);
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).is(r0));
- __ push(r0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- Label materialized;
- // Registers will be used as follows:
- // r7 = literals array.
- // r1 = regexp literal.
- // r0 = regexp literal clone.
- // r2 and r4-r6 are used as temporaries.
- int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(r7, instr->hydrogen()->literals());
- __ ldr(r1, FieldMemOperand(r7, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in r0.
- __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r5, Operand(instr->hydrogen()->pattern()));
- __ mov(r4, Operand(instr->hydrogen()->flags()));
- __ Push(r7, r6, r5, r4);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(r1, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ Push(r1, r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(r1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ ldr(r3, FieldMemOperand(r1, i));
- __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
- __ str(r3, FieldMemOperand(r0, i));
- __ str(r2, FieldMemOperand(r0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
- __ str(r3, FieldMemOperand(r0, size - kPointerSize));
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ mov(r1, Operand(shared_info));
- __ push(r1);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r2, Operand(shared_info));
- __ mov(r1, Operand(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, r2, r1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- Register input = ToRegister(instr->value());
- __ push(input);
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
- if (final_branch_condition != kNoCondition) {
- EmitBranch(true_block, false_block, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = kNoCondition;
- Register scratch = scratch0();
- if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(input, Operand(ip));
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
- __ b(ge, false_label);
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->boolean_string())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ b(eq, true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = eq;
-
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->undefined_string())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ b(eq, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = ne;
-
- } else if (type_name->Equals(heap()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
- __ b(eq, true_label);
- __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ b(eq, true_label);
- }
- if (FLAG_harmony_symbols) {
- __ CompareObjectType(input, input, scratch, SYMBOL_TYPE);
- __ b(eq, true_label);
- __ CompareInstanceType(input, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- } else {
- __ CompareObjectType(input, input, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- }
- __ b(lt, false_label);
- __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, false_label);
- // Check for undetectable objects => false.
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = eq;
-
- } else {
- __ b(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp1, scratch0());
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- ASSERT(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
- __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt() {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- // Block literal pool emission for duration of padding.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(al, instr->environment());
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Register object = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- Register strict = scratch0();
- __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(object, key, strict);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- Register obj = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- __ Push(key, obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStackCheck* instr_;
- };
-
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &done);
- StackCheckStub stub;
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
- __ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- } else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(lo, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- DeoptimizeIf(eq, instr->environment());
-
- Register null_value = r5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmp(r0, null_value);
- DeoptimizeIf(eq, instr->environment());
-
- __ tst(r0, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(null_value, &call_runtime);
-
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ b(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(r0);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
- DeoptimizeIf(ne, instr->environment());
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ cmp(result, Operand(Smi::FromInt(0)));
- __ b(ne, &load_cache);
- __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ jmp(&done);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ ldr(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
- __ ldr(result,
- FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- __ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ cmp(map, scratch0());
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- Label out_of_object, done;
- __ cmp(index, Operand::Zero());
- __ b(lt, &out_of_object);
-
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
- __ b(&done);
-
- __ bind(&out_of_object);
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result, FieldMemOperand(scratch,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(&done);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h b/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
deleted file mode 100644
index f1e3332..0000000
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
+++ /dev/null
@@ -1,513 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
-#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
-
-#include "arm/lithium-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- translations_(info->zone()),
- deferred_(8, info->zone()),
- osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
- bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DwVfpRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DwVfpRegister dbl_scratch);
- int ToInteger32(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
- LOperand* left_argument,
- LOperand* right_argument,
- Token::Value op);
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- void DoCheckMapCommon(Register map_reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- MemOperand PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int additional_index,
- int additional_offset);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
-
- LPlatformChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
-
- int GetNextEmittedBlock(int block);
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register temporary2);
-
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return info()->num_parameters(); }
-
- void Abort(const char* reason);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
- bool GenerateSafepointTable();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(
- Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
-
- void CallCodeGeneric(
- Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr);
-
- enum R1State {
- R1_UNINITIALIZED,
- R1_CONTAINS_TARGET
- };
-
- // Generate a direct call to a known function. Expects the function
- // to be in r1.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- R1State r1_state);
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- DwVfpRegister ToDoubleRegister(int index) const;
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordPosition(int position);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(Register input,
- DwVfpRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- // Emit optimized code for integer division.
- // Inputs are signed.
- // All registers are clobbered.
- // If 'remainder' is no_reg, it is not computed.
- void EmitSignedIntegerDivisionByConstant(Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment);
-
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
- void EnsureSpaceForLazyDeopt();
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> deopt_jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
- int last_lazy_deopt_pc_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope BASE_EMBEDDED {
- public:
- PushSafepointRegistersScope(LCodeGen* codegen,
- Safepoint::Kind kind)
- : codegen_(codegen) {
- ASSERT(codegen_->info()->is_calling());
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = kind;
-
- switch (codegen_->expected_safepoint_kind_) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PushSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PushSafepointRegistersAndDoubles();
- break;
- default:
- UNREACHABLE();
- }
- }
-
- ~PushSafepointRegistersScope() {
- Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
- ASSERT((kind & Safepoint::kWithRegisters) != 0);
- switch (kind) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PopSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PopSafepointRegistersAndDoubles();
- break;
- default:
- UNREACHABLE();
- }
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
deleted file mode 100644
index 4df1338..0000000
--- a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arm/lithium-gap-resolver-arm.h"
-#include "arm/lithium-codegen-arm.h"
-
-namespace v8 {
-namespace internal {
-
-static const Register kSavedValueRegister = { 9 };
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
- saved_destination_(NULL) { }
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when by reaching this move again.
- PerformMove(i);
- if (in_cycle_) {
- RestoreValue();
- }
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
-
- // We can only find a cycle, when doing a depth-first traversal of moves,
- // be encountering the starting move again. So by spilling the source of
- // the starting move, we break the cycle. All moves are then unblocked,
- // and the starting move is completed by writing the spilled value to
- // its destination. All other moves from the spilled source have been
- // completed prior to breaking the cycle.
- // An additional complication is that moves to MemOperands with large
- // offsets (more than 1K or 4K) require us to spill this spilled value to
- // the stack, to free up the register.
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
- ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
- ASSERT(!in_cycle_);
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
- if (source->IsRegister()) {
- __ mov(kSavedValueRegister, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(VFP2);
- __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
- // This move will be done by restoring the saved value to the destination.
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- ASSERT(in_cycle_);
- ASSERT(saved_destination_ != NULL);
-
- // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
- if (saved_destination_->IsRegister()) {
- __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
- } else if (saved_destination_->IsStackSlot()) {
- __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ mov(cgen_->ToRegister(destination), source_register);
- } else {
- ASSERT(destination->IsStackSlot());
- __ str(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ ldr(cgen_->ToRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- if (!destination_operand.OffsetIsUint12Encodable()) {
- CpuFeatures::Scope scope(VFP2);
- // ip is overwritten while saving the value to the destination.
- // Therefore we can't use ip. It is OK if the read from the source
- // destroys ip, since that happens before the value is read.
- __ vldr(kScratchDoubleReg.low(), source_operand);
- __ vstr(kScratchDoubleReg.low(), destination_operand);
- } else {
- __ ldr(ip, source_operand);
- __ str(ip, destination_operand);
- }
- } else {
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
- }
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
- __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else {
- ASSERT(destination->IsStackSlot());
- ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsInteger32(constant_source)) {
- __ mov(kSavedValueRegister,
- Operand(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(kSavedValueRegister,
- cgen_->ToHandle(constant_source));
- }
- __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleRegister()) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ vmov(cgen_->ToDoubleRegister(destination), source_register);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- __ vstr(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(VFP2);
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- // kSavedDoubleValueRegister was used to break the cycle,
- // but kSavedValueRegister is free.
- MemOperand source_high_operand =
- cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
- __ ldr(kSavedValueRegister, source_high_operand);
- __ str(kSavedValueRegister, destination_high_operand);
- } else {
- __ vldr(kScratchDoubleReg, source_operand);
- __ vstr(kScratchDoubleReg, destination_operand);
- }
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h
deleted file mode 100644
index 9dd09c8..0000000
--- a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
deleted file mode 100644
index 326f555..0000000
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
+++ /dev/null
@@ -1,4012 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <limits.h> // For LONG_MIN, LONG_MAX.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-// We always generate arm code, never thumb code, even if V8 is compiled to
-// thumb, so we require inter-working support
-#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
-#error "flag -mthumb-interwork missing"
-#endif
-
-
-// We do not support thumb inter-working with an arm architecture not supporting
-// the blx instruction (below v5t). If you know what CPU you are compiling for
-// you can use -march=armv7 or similar.
-#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
-# error "For thumb inter-working we require an architecture which supports blx"
-#endif
-
-
-// Using bx does not yield better code, so use it only when required
-#if defined(USE_THUMB_INTERWORK)
-#define USE_BX 1
-#endif
-
-
-void MacroAssembler::Jump(Register target, Condition cond) {
-#if USE_BX
- bx(target, cond);
-#else
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
-}
-
-
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
- Condition cond) {
-#if USE_BX
- mov(ip, Operand(target, rmode));
- bx(ip, cond);
-#else
- mov(pc, Operand(target, rmode), LeaveCC, cond);
-#endif
-}
-
-
-void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
- Condition cond) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
-}
-
-
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- // 'code' is always generated ARM code, never THUMB code
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-}
-
-
-int MacroAssembler::CallSize(Register target, Condition cond) {
-#ifdef USE_BLX
- return kInstrSize;
-#else
- return 2 * kInstrSize;
-#endif
-}
-
-
-void MacroAssembler::Call(Register target, Condition cond) {
- // Block constant pool for the call instruction sequence.
- BlockConstPoolScope block_const_pool(this);
- Label start;
- bind(&start);
-#ifdef USE_BLX
- blx(target, cond);
-#else
- // set lr for return at current pc + 8
- mov(lr, Operand(pc), LeaveCC, cond);
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
- ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
-}
-
-
-int MacroAssembler::CallSize(
- Address target, RelocInfo::Mode rmode, Condition cond) {
- int size = 2 * kInstrSize;
- Instr mov_instr = cond | MOV | LeaveCC;
- intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
- size += kInstrSize;
- }
- return size;
-}
-
-
-int MacroAssembler::CallSizeNotPredictableCodeSize(
- Address target, RelocInfo::Mode rmode, Condition cond) {
- int size = 2 * kInstrSize;
- Instr mov_instr = cond | MOV | LeaveCC;
- intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
- size += kInstrSize;
- }
- return size;
-}
-
-
-void MacroAssembler::Call(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- TargetAddressStorageMode mode) {
- // Block constant pool for the call instruction sequence.
- BlockConstPoolScope block_const_pool(this);
- Label start;
- bind(&start);
-
- bool old_predictable_code_size = predictable_code_size();
- if (mode == NEVER_INLINE_TARGET_ADDRESS) {
- set_predictable_code_size(true);
- }
-
-#ifdef USE_BLX
- // Call sequence on V7 or later may be :
- // movw ip, #... @ call address low 16
- // movt ip, #... @ call address high 16
- // blx ip
- // @ return address
- // Or for pre-V7 or values that may be back-patched
- // to avoid ICache flushes:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
-
- // Statement positions are expected to be recorded when the target
- // address is loaded. The mov method will automatically record
- // positions when pc is the target, since this is not the case here
- // we have to do it explicitly.
- positions_recorder()->WriteRecordedPositions();
-
- mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
- blx(ip, cond);
-
-#else
- // Set lr for return at current pc + 8.
- mov(lr, Operand(pc), LeaveCC, cond);
- // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
-#endif
- ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
- if (mode == NEVER_INLINE_TARGET_ADDRESS) {
- set_predictable_code_size(old_predictable_code_size);
- }
-}
-
-
-int MacroAssembler::CallSize(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond) {
- return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
-}
-
-
-void MacroAssembler::Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond,
- TargetAddressStorageMode mode) {
- Label start;
- bind(&start);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- SetRecordedAstId(ast_id);
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
- }
- // 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
-}
-
-
-void MacroAssembler::Ret(Condition cond) {
-#if USE_BX
- bx(lr, cond);
-#else
- mov(pc, Operand(lr), LeaveCC, cond);
-#endif
-}
-
-
-void MacroAssembler::Drop(int count, Condition cond) {
- if (count > 0) {
- add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Ret(int drop, Condition cond) {
- Drop(drop, cond);
- Ret(cond);
-}
-
-
-void MacroAssembler::Swap(Register reg1,
- Register reg2,
- Register scratch,
- Condition cond) {
- if (scratch.is(no_reg)) {
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- } else {
- mov(scratch, reg1, LeaveCC, cond);
- mov(reg1, reg2, LeaveCC, cond);
- mov(reg2, scratch, LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Call(Label* target) {
- bl(target);
-}
-
-
-void MacroAssembler::Push(Handle<Object> handle) {
- mov(ip, Operand(handle));
- push(ip);
-}
-
-
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, Operand(value));
-}
-
-
-void MacroAssembler::Move(Register dst, Register src, Condition cond) {
- if (!dst.is(src)) {
- mov(dst, src, LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatures::Scope scope(VFP2);
- if (!dst.is(src)) {
- vmov(dst, src);
- }
-}
-
-
-void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
- Condition cond) {
- if (!src2.is_reg() &&
- !src2.must_output_reloc_info(this) &&
- src2.immediate() == 0) {
- mov(dst, Operand::Zero(), LeaveCC, cond);
- } else if (!src2.is_single_instruction(this) &&
- !src2.must_output_reloc_info(this) &&
- CpuFeatures::IsSupported(ARMv7) &&
- IsPowerOf2(src2.immediate() + 1)) {
- ubfx(dst, src1, 0,
- WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
- } else {
- and_(dst, src1, src2, LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
- Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- and_(dst, src1, Operand(mask), LeaveCC, cond);
- if (lsb != 0) {
- mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
- }
- } else {
- ubfx(dst, src1, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
- Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- and_(dst, src1, Operand(mask), LeaveCC, cond);
- int shift_up = 32 - lsb - width;
- int shift_down = lsb + shift_up;
- if (shift_up != 0) {
- mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
- }
- if (shift_down != 0) {
- mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
- }
- } else {
- sbfx(dst, src1, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Bfi(Register dst,
- Register src,
- Register scratch,
- int lsb,
- int width,
- Condition cond) {
- ASSERT(0 <= lsb && lsb < 32);
- ASSERT(0 <= width && width < 32);
- ASSERT(lsb + width < 32);
- ASSERT(!scratch.is(dst));
- if (width == 0) return;
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- bic(dst, dst, Operand(mask));
- and_(scratch, src, Operand((1 << width) - 1));
- mov(scratch, Operand(scratch, LSL, lsb));
- orr(dst, dst, scratch);
- } else {
- bfi(dst, src, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
- Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- bic(dst, src, Operand(mask));
- } else {
- Move(dst, src, cond);
- bfc(dst, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
- Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- ASSERT(!dst.is(pc) && !src.rm().is(pc));
- ASSERT((satpos >= 0) && (satpos <= 31));
-
- // These asserts are required to ensure compatibility with the ARMv7
- // implementation.
- ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
- ASSERT(src.rs().is(no_reg));
-
- Label done;
- int satval = (1 << satpos) - 1;
-
- if (cond != al) {
- b(NegateCondition(cond), &done); // Skip saturate if !condition.
- }
- if (!(src.is_reg() && dst.is(src.rm()))) {
- mov(dst, src);
- }
- tst(dst, Operand(~satval));
- b(eq, &done);
- mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
- mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
- bind(&done);
- } else {
- usat(dst, satpos, src, cond);
- }
-}
-
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond) {
- if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- !Heap::RootCanBeWrittenAfterInitialization(index) &&
- !predictable_code_size()) {
- Handle<Object> root(isolate()->heap()->roots_array_start()[index],
- isolate());
- if (!isolate()->heap()->InNewSpace(*root)) {
- // The CPU supports fast immediate values, and this root will never
- // change. We will load it as a relocatable immediate value.
- mov(destination, Operand(root), LeaveCC, cond);
- return;
- }
- }
- ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond) {
- str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- mov(result, Operand(cell));
- ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- mov(result, Operand(object));
- }
-}
-
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cond,
- Label* branch) {
- ASSERT(cond == eq || cond == ne);
- and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
- b(cond, branch);
-}
-
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
-
- add(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- tst(dst, Operand((1 << kPointerSizeLog2) - 1));
- b(eq, &ok);
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
-
- RecordWrite(object,
- dst,
- value,
- lr_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
- }
-}
-
-
-// Will clobber 4 registers: object, address, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
-
- if (emit_debug_code()) {
- ldr(ip, MemOperand(address));
- cmp(ip, value);
- Check(eq, "Wrong address or value passed to RecordWrite");
- }
-
- Label done;
-
- if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- tst(value, Operand(kSmiTagMask));
- b(eq, &done);
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- eq,
- &done);
-
- // Record the actual write.
- if (lr_status == kLRHasNotBeenSaved) {
- push(lr);
- }
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
- if (lr_status == kLRHasNotBeenSaved) {
- pop(lr);
- }
-
- bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
- }
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(ip, Operand(store_buffer));
- ldr(scratch, MemOperand(ip));
- // Store pointer to buffer and increment buffer top.
- str(address, MemOperand(scratch, kPointerSize, PostIndex));
- // Write back new top of buffer.
- str(scratch, MemOperand(ip));
- // Call stub on end of buffer.
- // Check for end of buffer.
- tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kFallThroughAtEnd) {
- b(eq, &done);
- } else {
- ASSERT(and_then == kReturnAtEnd);
- Ret(eq);
- }
- push(lr);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
- CallStub(&store_buffer_overflow);
- pop(lr);
- bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
-}
-
-
-// Push and pop all registers that can hold pointers.
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of contiguous register values starting with r0:
- ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
- // Safepoints expect a block of kNumSafepointRegisters values on the
- // stack, so adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
- sub(sp, sp, Operand(num_unsaved * kPointerSize));
- stm(db_w, sp, kSafepointSavedRegisters);
-}
-
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ldm(ia_w, sp, kSafepointSavedRegisters);
- add(sp, sp, Operand(num_unsaved * kPointerSize));
-}
-
-
-void MacroAssembler::PushSafepointRegistersAndDoubles() {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
- kDoubleSize));
- for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
- vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
- }
-}
-
-
-void MacroAssembler::PopSafepointRegistersAndDoubles() {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
- vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
- }
- add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
- kDoubleSize));
- PopSafepointRegisters();
-}
-
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
- Register dst) {
- str(src, SafepointRegistersAndDoublesSlot(dst));
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
- str(src, SafepointRegisterSlot(dst));
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- ldr(dst, SafepointRegisterSlot(src));
-}
-
-
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the highest encoding,
- // which means that lowest encodings are closest to the stack pointer.
- ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
- return reg_code;
-}
-
-
-MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- // General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
- int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
- return MemOperand(sp, doubles_size + register_offset);
-}
-
-
-void MacroAssembler::Ldrd(Register dst1, Register dst2,
- const MemOperand& src, Condition cond) {
- ASSERT(src.rm().is(no_reg));
- ASSERT(!dst1.is(lr)); // r14.
- ASSERT_EQ(0, dst1.code() % 2);
- ASSERT_EQ(dst1.code() + 1, dst2.code());
-
- // V8 does not use this addressing mode, so the fallback code
- // below doesn't support it yet.
- ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
-
- // Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
- CpuFeatures::Scope scope(ARMv7);
- ldrd(dst1, dst2, src, cond);
- } else {
- if ((src.am() == Offset) || (src.am() == NegOffset)) {
- MemOperand src2(src);
- src2.set_offset(src2.offset() + 4);
- if (dst1.is(src.rn())) {
- ldr(dst2, src2, cond);
- ldr(dst1, src, cond);
- } else {
- ldr(dst1, src, cond);
- ldr(dst2, src2, cond);
- }
- } else { // PostIndex or NegPostIndex.
- ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
- if (dst1.is(src.rn())) {
- ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
- ldr(dst1, src, cond);
- } else {
- MemOperand src2(src);
- src2.set_offset(src2.offset() - 4);
- ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
- ldr(dst2, src2, cond);
- }
- }
- }
-}
-
-
-void MacroAssembler::Strd(Register src1, Register src2,
- const MemOperand& dst, Condition cond) {
- ASSERT(dst.rm().is(no_reg));
- ASSERT(!src1.is(lr)); // r14.
- ASSERT_EQ(0, src1.code() % 2);
- ASSERT_EQ(src1.code() + 1, src2.code());
-
- // V8 does not use this addressing mode, so the fallback code
- // below doesn't support it yet.
- ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
-
- // Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
- CpuFeatures::Scope scope(ARMv7);
- strd(src1, src2, dst, cond);
- } else {
- MemOperand dst2(dst);
- if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
- dst2.set_offset(dst2.offset() + 4);
- str(src1, dst, cond);
- str(src2, dst2, cond);
- } else { // PostIndex or NegPostIndex.
- ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
- dst2.set_offset(dst2.offset() - 4);
- str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
- str(src2, dst2, cond);
- }
- }
-}
-
-
-void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Compare and move FPSCR flags to the normal condition flags.
- VFPCompareAndLoadFlags(src1, src2, pc, cond);
-}
-
-void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
- const double src2,
- const Condition cond) {
- // Compare and move FPSCR flags to the normal condition flags.
- VFPCompareAndLoadFlags(src1, src2, pc, cond);
-}
-
-
-void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Register fpscr_flags,
- const Condition cond) {
- // Compare and load FPSCR.
- vcmp(src1, src2, cond);
- vmrs(fpscr_flags, cond);
-}
-
-void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const double src2,
- const Register fpscr_flags,
- const Condition cond) {
- // Compare and load FPSCR.
- vcmp(src1, src2, cond);
- vmrs(fpscr_flags, cond);
-}
-
-void MacroAssembler::Vmov(const DwVfpRegister dst,
- const double imm,
- const Register scratch) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
- // Handle special values first.
- if (value.bits == zero.bits) {
- vmov(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits) {
- vneg(dst, kDoubleRegZero);
- } else {
- vmov(dst, imm, scratch);
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- // r0-r3: preserved
- stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- mov(ip, Operand(Smi::FromInt(type)));
- push(ip);
- mov(ip, Operand(CodeObject()));
- push(ip);
- add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- // r0: preserved
- // r1: preserved
- // r2: preserved
-
- // Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer and return address.
- mov(sp, fp);
- ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
- // Set up the frame structure on the stack.
- ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
- ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
- ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- Push(lr, fp);
- mov(fp, Operand(sp)); // Set up new frame pointer.
- // Reserve room for saved entry sp and code object.
- sub(sp, sp, Operand(2 * kPointerSize));
- if (emit_debug_code()) {
- mov(ip, Operand::Zero());
- str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
- }
- mov(ip, Operand(CodeObject()));
- str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
-
- // Save the frame pointer and the context in top.
- mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- str(fp, MemOperand(ip));
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- str(cp, MemOperand(ip));
-
- // Optionally save all double registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(VFP2);
- // Check CPU flags for number of registers, setting the Z condition flag.
- CheckFor32DRegs(ip);
-
- // Push registers d0-d15, and possibly d16-d31, on the stack.
- // If d16-d31 are not pushed, decrease the stack pointer instead.
- vstm(db_w, sp, d16, d31, ne);
- sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- vstm(db_w, sp, d0, d15);
- // Note that d0 will be accessible at
- // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
- }
-
- // Reserve place for the return address and stack space and align the frame
- // preparing for calling the runtime function.
- const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
- if (frame_alignment > 0) {
- ASSERT(IsPowerOf2(frame_alignment));
- and_(sp, sp, Operand(-frame_alignment));
- }
-
- // Set the exit frame sp value to point just before the return address
- // location.
- add(ip, sp, Operand(kPointerSize));
- str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
-}
-
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- mov(scratch1, Operand(length, LSL, kSmiTagSize));
- LoadRoot(scratch2, map_index);
- str(scratch1, FieldMemOperand(string, String::kLengthOffset));
- mov(scratch1, Operand(String::kEmptyHashField));
- str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
-int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_ARM)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one ARM
- // platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so this is controlled from a
- // flag.
- return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_ARM)
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
- // Optionally restore all double registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(VFP2);
- // Calculate the stack location of the saved doubles and restore them.
- const int offset = 2 * kPointerSize;
- sub(r3, fp,
- Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
-
- // Check CPU flags for number of registers, setting the Z condition flag.
- CheckFor32DRegs(ip);
-
- // Pop registers d0-d15, and possibly d16-d31, from r3.
- // If d16-d31 are not popped, increase r3 instead.
- vldm(ia_w, r3, d0, d15);
- vldm(ia_w, r3, d16, d31, ne);
- add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq);
- }
-
- // Clear top frame.
- mov(r3, Operand::Zero());
- mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- str(r3, MemOperand(ip));
-
- // Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ldr(cp, MemOperand(ip));
-#ifdef DEBUG
- str(r3, MemOperand(ip));
-#endif
-
- // Tear down the exit frame, pop the arguments, and return.
- mov(sp, Operand(fp));
- ldm(ia_w, sp, fp.bit() | lr.bit());
- if (argument_count.is_valid()) {
- add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
- }
-}
-
-void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- if (use_eabi_hardfloat()) {
- Move(dst, d0);
- } else {
- vmov(dst, r0, r1);
- }
-}
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be r5 to
- // follow the calling convention which requires the call type to be
- // in r5.
- ASSERT(dst.is(r5));
- if (call_kind == CALL_AS_FUNCTION) {
- mov(dst, Operand(Smi::FromInt(1)));
- } else {
- mov(dst, Operand(Smi::FromInt(0)));
- }
-}
-
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- bool definitely_matches = false;
- *definitely_mismatches = false;
- Label regular_invoke;
-
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline:
- // r0: actual arguments count
- // r1: function (passed through to callee)
- // r2: expected arguments count
- // r3: callee code entry
-
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract if values are
- // passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(r0));
- ASSERT(expected.is_immediate() || expected.reg().is(r2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
-
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- mov(r0, Operand(actual.immediate()));
- const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- if (expected.immediate() == sentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- *definitely_mismatches = true;
- mov(r2, Operand(expected.immediate()));
- }
- }
- } else {
- if (actual.is_immediate()) {
- cmp(expected.reg(), Operand(actual.immediate()));
- b(eq, &regular_invoke);
- mov(r0, Operand(actual.immediate()));
- } else {
- cmp(expected.reg(), Operand(actual.reg()));
- b(eq, &regular_invoke);
- }
- }
-
- if (!definitely_matches) {
- if (!code_constant.is_null()) {
- mov(r3, Operand(code_constant));
- add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(r5, call_kind);
- Call(adaptor);
- call_wrapper.AfterCall();
- if (!*definitely_mismatches) {
- b(done);
- }
- } else {
- SetCallKind(r5, call_kind);
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&regular_invoke);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(r5, call_kind);
- Call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, call_kind);
- Jump(code);
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(r5, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(r5, call_kind);
- Jump(code, rmode);
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeFunction(Register fun,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Contract with called JS functions requires that function is passed in r1.
- ASSERT(fun.is(r1));
-
- Register expected_reg = r2;
- Register code_reg = r3;
-
- ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- ldr(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
- mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
- ldr(code_reg,
- FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Get the function and setup the context.
- LoadHeapObject(r1, function);
- ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- ParameterCount expected(function->shared()->formal_parameter_count());
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- b(lt, fail);
- cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- b(gt, fail);
-}
-
-
-void MacroAssembler::IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail) {
- ASSERT(kNotStringTag != 0);
-
- ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- tst(scratch, Operand(kIsNotStringMask));
- b(ne, fail);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- mov(r0, Operand::Zero());
- mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-#endif
-
-
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
- // We will build up the handler from the bottom by pushing on the stack.
- // Set up the code object (r5) and the state (r6) for pushing.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- mov(r5, Operand(CodeObject()));
- mov(r6, Operand(state));
-
- // Push the frame pointer, context, state, and code object.
- if (kind == StackHandler::JS_ENTRY) {
- mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
- mov(ip, Operand::Zero()); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
- } else {
- stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
- }
-
- // Link the current handler as the next handler.
- mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(r5, MemOperand(r6));
- push(r5);
- // Set this new handler as the current one.
- str(sp, MemOperand(r6));
-}
-
-
-void MacroAssembler::PopTryHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- pop(r1);
- mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
- str(r1, MemOperand(ip));
-}
-
-
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // r0 = exception, r1 = code object, r2 = state.
- ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
- add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
- ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
- add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in r0.
- if (!value.is(r0)) {
- mov(r0, value);
- }
- // Drop the stack pointer to the top of the top handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(sp, MemOperand(r3));
- // Restore the next handler.
- pop(r2);
- str(r2, MemOperand(r3));
-
- // Get the code object (r1) and state (r2). Restore the context and frame
- // pointer.
- ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
- // or cp.
- tst(cp, cp);
- str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in r0.
- if (!value.is(r0)) {
- mov(r0, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind);
- bind(&fetch_next);
- ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
- tst(r2, Operand(StackHandler::KindField::kMask));
- b(ne, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(r2);
- str(r2, MemOperand(r3));
- // Get the code object (r1) and state (r2). Clear the context and frame
- // pointer (0 was saved in the handler).
- ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!holder_reg.is(ip));
- ASSERT(!scratch.is(ip));
-
- // Load current lexical context from the stack frame.
- ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- cmp(scratch, Operand::Zero());
- Check(ne, "we should not have an empty lexical context");
-#endif
-
- // Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- ldr(scratch, FieldMemOperand(scratch, offset));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the native_context_map.
- ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::native_context should be a native context.");
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- cmp(scratch, Operand(ip));
- b(eq, &same_contexts);
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- mov(holder_reg, ip); // Move ip to its holding place.
- LoadRoot(ip, Heap::kNullValueRootIndex);
- cmp(holder_reg, ip);
- Check(ne, "JSGlobalProxy::context() should not be null.");
-
- ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::native_context should be a native context.");
- // Restore ip is not needed. ip is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore ip to holder's context.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- ldr(scratch, FieldMemOperand(scratch, token_offset));
- ldr(ip, FieldMemOperand(ip, token_offset));
- cmp(scratch, Operand(ip));
- b(ne, miss);
-
- bind(&same_contexts);
-}
-
-
-void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
- // First of all we assign the hash seed to scratch.
- LoadRoot(scratch, Heap::kHashSeedRootIndex);
- SmiUntag(scratch);
-
- // Xor original key with a seed.
- eor(t0, t0, Operand(scratch));
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- mvn(scratch, Operand(t0));
- add(t0, scratch, Operand(t0, LSL, 15));
- // hash = hash ^ (hash >> 12);
- eor(t0, t0, Operand(t0, LSR, 12));
- // hash = hash + (hash << 2);
- add(t0, t0, Operand(t0, LSL, 2));
- // hash = hash ^ (hash >> 4);
- eor(t0, t0, Operand(t0, LSR, 4));
- // hash = hash * 2057;
- mov(scratch, Operand(t0, LSL, 11));
- add(t0, t0, Operand(t0, LSL, 3));
- add(t0, t0, scratch);
- // hash = hash ^ (hash >> 16);
- eor(t0, t0, Operand(t0, LSR, 16));
-}
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- GetNumberHash(t0, t1);
-
- // Compute the capacity mask.
- ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
- sub(t1, t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- mov(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(t2, t2, Operand(t1));
-
- // Scale the index by multiplying by the element size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
- ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
- cmp(key, Operand(ip));
- if (i != kProbes - 1) {
- b(eq, &done);
- } else {
- b(ne, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a normal property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ldr(t1, FieldMemOperand(t2, kDetailsOffset));
- tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
- b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- ldr(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
- }
- jmp(gc_required);
- return;
- }
-
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!scratch1.is(ip));
- ASSERT(!scratch2.is(ip));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- ASSERT_EQ(0, object_size & kObjectAlignmentMask);
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDM.
- // Also, assert that the registers are numbered such that the values
- // are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
- ASSERT(result.code() < ip.code());
-
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
- mov(topaddr, Operand(new_space_allocation_top));
- Operand obj_size_operand = Operand(object_size);
- if (!obj_size_operand.is_single_instruction(this)) {
- // We are about to steal IP, so we need to load this value first
- mov(obj_size_reg, obj_size_operand);
- }
-
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
- Check(eq, "Unexpected allocation top");
- }
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
- Label aligned;
- b(eq, &aligned);
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- if (obj_size_operand.is_single_instruction(this)) {
- // We can add the size as an immediate
- add(scratch2, result, obj_size_operand, SetCC);
- } else {
- // Doesn't fit in an immediate, we have to use the register
- add(scratch2, result, obj_size_reg, SetCC);
- }
- b(cs, gc_required);
- cmp(scratch2, Operand(ip));
- b(hi, gc_required);
- str(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
- }
- jmp(gc_required);
- return;
- }
-
- // Assert that the register arguments are different and that none of
- // them are ip. ip is used explicitly in the code generated below.
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!object_size.is(ip));
- ASSERT(!result.is(ip));
- ASSERT(!scratch1.is(ip));
- ASSERT(!scratch2.is(ip));
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDM.
- // Also, assert that the registers are numbered such that the values
- // are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
- ASSERT(result.code() < ip.code());
-
- // Set up allocation top address.
- Register topaddr = scratch1;
- mov(topaddr, Operand(new_space_allocation_top));
-
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
- Check(eq, "Unexpected allocation top");
- }
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
- Label aligned;
- b(eq, &aligned);
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. Object size may be in words so a shift is
- // required to get the number of bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
- } else {
- add(scratch2, result, Operand(object_size), SetCC);
- }
- b(cs, gc_required);
- cmp(scratch2, Operand(ip));
- b(hi, gc_required);
-
- // Update allocation top. result temporarily holds the new top.
- if (emit_debug_code()) {
- tst(scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space");
- }
- str(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, object, Operand(~kHeapObjectTagMask));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- mov(scratch, Operand(new_space_allocation_top));
- ldr(scratch, MemOperand(scratch));
- cmp(object, scratch);
- Check(lt, "Undo allocation of non allocated memory");
-#endif
- // Write the address of the object to un-allocate as the current top.
- mov(scratch, Operand(new_space_allocation_top));
- str(object, MemOperand(scratch));
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
- add(scratch1, scratch1,
- Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
- and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kCharSize == 1);
- add(scratch1, length,
- Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
- and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::CompareObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type) {
- ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(map, type_reg, type);
-}
-
-
-void MacroAssembler::CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type) {
- ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(type_reg, Operand(type));
-}
-
-
-void MacroAssembler::CompareRoot(Register obj,
- Heap::RootListIndex index) {
- ASSERT(!obj.is(ip));
- LoadRoot(ip, index);
- cmp(obj, ip);
-}
-
-
-void MacroAssembler::CheckFastElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- b(ls, fail);
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail,
- int elements_offset) {
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
- Register mantissa_reg = scratch2;
- Register exponent_reg = scratch3;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg,
- scratch1,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
- ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- cmp(exponent_reg, scratch1);
- b(ge, &maybe_nan);
-
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- add(scratch1, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- str(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
- str(exponent_reg, FieldMemOperand(scratch1, offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- b(gt, &is_nan);
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- cmp(mantissa_reg, Operand::Zero());
- b(eq, &have_double_value);
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- jmp(&have_double_value);
-
- bind(&smi_value);
- add(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
- add(scratch1, scratch1,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- // scratch1 is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP2)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = elements_reg;
- SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- d0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- s2);
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- vstr(d0, scratch1, 0);
- } else {
- str(mantissa_reg, MemOperand(scratch1, 0));
- str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
- bind(&done);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(scratch, map, early_success, mode);
-}
-
-
-void MacroAssembler::CompareMap(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
- cmp(obj_map, Operand(map));
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- b(eq, early_success);
- cmp(obj_map, Operand(Handle<Map>(current_map)));
- }
- }
- }
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, scratch, map, &success, mode);
- b(ne, fail);
- bind(&success);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(ip, index);
- cmp(scratch, ip);
- b(ne, fail);
-}
-
-
-void MacroAssembler::DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- mov(ip, Operand(map));
- cmp(scratch, ip);
- Jump(success, RelocInfo::CODE_TARGET, eq);
- bind(&fail);
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- b(ne, miss);
-
- if (miss_on_bound_function) {
- ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- ldr(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- tst(scratch,
- Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
- b(ne, miss);
- }
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- b(ne, &non_instance);
-
- // Get the prototype or initial map from the function.
- ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- cmp(result, ip);
- b(eq, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CompareObjectType(result, scratch, scratch, MAP_TYPE);
- b(ne, &done);
-
- // Get the prototype from the initial map.
- ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub,
- TypeFeedbackId ast_id,
- Condition cond) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
-}
-
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- int stack_space) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
-
- // Allocate HandleScope in callee-save registers.
- mov(r7, Operand(next_address));
- ldr(r4, MemOperand(r7, kNextOffset));
- ldr(r5, MemOperand(r7, kLimitOffset));
- ldr(r6, MemOperand(r7, kLevelOffset));
- add(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, r0);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
- stub.GenerateCall(this, function);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, r0);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- cmp(r0, Operand::Zero());
- LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- ldr(r0, MemOperand(r0), ne);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- str(r4, MemOperand(r7, kNextOffset));
- if (emit_debug_code()) {
- ldr(r1, MemOperand(r7, kLevelOffset));
- cmp(r1, r6);
- Check(eq, "Unexpected level after return from api call");
- }
- sub(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
- ldr(ip, MemOperand(r7, kLimitOffset));
- cmp(r5, ip);
- b(ne, &delete_allocated_handles);
-
- // Check if the function scheduled an exception.
- bind(&leave_exit_frame);
- LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
- ldr(r5, MemOperand(ip));
- cmp(r4, r5);
- b(ne, &promote_scheduled_exception);
-
- // LeaveExitFrame expects unwind space to be in a register.
- mov(r4, Operand(stack_space));
- LeaveExitFrame(false, r4);
- mov(pc, lr);
-
- bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- str(r5, MemOperand(r7, kLimitOffset));
- mov(r4, r0);
- PrepareCallCFunction(1, r5);
- mov(r0, Operand(ExternalReference::isolate_address()));
- CallCFunction(
- ExternalReference::delete_handle_scope_extensions(isolate()), 1);
- mov(r0, r4);
- jmp(&leave_exit_frame);
-}
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- add(sp, sp, Operand(num_arguments * kPointerSize));
- }
- LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- mov(index, Operand(hash, LSL, kSmiTagSize));
-}
-
-
-void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg) {
- // ARMv7 VFP3 instructions to implement integer to double conversion.
- mov(r7, Operand(inReg, ASR, kSmiTagSize));
- vmov(s15, r7);
- vcvt_f64_s32(d7, s15);
- vmov(outLowReg, outHighReg, d7);
-}
-
-
-void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
- DwVfpRegister result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags) {
- Label done;
- if ((flags & OBJECT_NOT_SMI) == 0) {
- Label not_smi;
- JumpIfNotSmi(object, &not_smi);
- // Remove smi tag and convert to double.
- mov(scratch1, Operand(object, ASR, kSmiTagSize));
- vmov(scratch3, scratch1);
- vcvt_f64_s32(result, scratch3);
- b(&done);
- bind(&not_smi);
- }
- // Check for heap number and load double value from it.
- ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
- sub(scratch2, object, Operand(kHeapObjectTag));
- cmp(scratch1, heap_number_map);
- b(ne, not_number);
- if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
- // If exponent is all ones the number is either a NaN or +/-Infinity.
- ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- Sbfx(scratch1,
- scratch1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // All-one value sign extend to -1.
- cmp(scratch1, Operand(-1));
- b(eq, not_number);
- }
- vldr(result, scratch2, HeapNumber::kValueOffset);
- bind(&done);
-}
-
-
-void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2) {
- mov(scratch1, Operand(smi, ASR, kSmiTagSize));
- vmov(scratch2, scratch1);
- vcvt_f64_s32(value, scratch2);
-}
-
-
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- sub(scratch, source, Operand(kHeapObjectTag));
- vldr(double_scratch, scratch, HeapNumber::kValueOffset);
- vcvt_s32_f64(double_scratch.low(), double_scratch);
- vmov(dest, double_scratch.low());
- // Signed vcvt instruction will saturate to the minimum (0x80000000) or
- // maximun (0x7fffffff) signed 32bits integer when the double is out of
- // range. When substracting one, the minimum signed integer becomes the
- // maximun signed integer.
- sub(scratch, dest, Operand(1));
- cmp(scratch, Operand(LONG_MAX - 1));
- // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
- b(ge, not_int32);
- } else {
- // This code is faster for doubles that are in the ranges -0x7fffffff to
- // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
- // the range of signed int32 values that are not Smis. Jumps to the label
- // 'not_int32' if the double isn't in the range -0x80000000.0 to
- // 0x80000000.0 (excluding the endpoints).
- Label right_exponent, done;
- // Get exponent word.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, Operand::Zero());
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
- // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
- // split it up to avoid a constant pool entry. You can't do that in general
- // for cmp because of the overflow flag, but we know the exponent is in the
- // range 0-2047 so there is no overflow.
- int fudge_factor = 0x400;
- sub(scratch2, scratch2, Operand(fudge_factor));
- cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- b(eq, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- b(gt, not_int32);
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
- sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
- // Dest already has a Smi zero.
- b(lt, &done);
-
- // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
- // get how much to shift down.
- rsb(dest, scratch2, Operand(30));
-
- bind(&right_exponent);
- // Get the top bits of the mantissa.
- and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- rsb(dest, dest, Operand::Zero(), LeaveCC, ne);
- bind(&done);
- }
-}
-
-
-void MacroAssembler::TryFastDoubleToInt32(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Label* done) {
- ASSERT(!double_input.is(double_scratch));
-
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
- vcvt_f64_s32(double_scratch, double_scratch.low());
- VFPCompareAndSetFlags(double_input, double_scratch);
- b(eq, done);
-}
-
-
-void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
- Register result,
- DwVfpRegister double_input,
- Register scratch,
- DwVfpRegister double_scratch,
- CheckForInexactConversion check_inexact) {
- ASSERT(!result.is(scratch));
- ASSERT(!double_input.is(double_scratch));
-
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatures::Scope scope(VFP2);
- Register prev_fpscr = result;
- Label done;
-
- // Test for values that can be exactly represented as a signed 32-bit integer.
- TryFastDoubleToInt32(result, double_input, double_scratch, &done);
-
- // Convert to integer, respecting rounding mode.
- int32_t check_inexact_conversion =
- (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
-
- // Set custom FPCSR:
- // - Set rounding mode.
- // - Clear vfp cumulative exception flags.
- // - Make sure Flush-to-zero mode control bit is unset.
- vmrs(prev_fpscr);
- bic(scratch,
- prev_fpscr,
- Operand(kVFPExceptionMask |
- check_inexact_conversion |
- kVFPRoundingModeMask |
- kVFPFlushToZeroMask));
- // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
- if (rounding_mode != kRoundToNearest) {
- orr(scratch, scratch, Operand(rounding_mode));
- }
- vmsr(scratch);
-
- // Convert the argument to an integer.
- vcvt_s32_f64(double_scratch.low(),
- double_input,
- (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
- : kFPSCRRounding);
-
- // Retrieve FPSCR.
- vmrs(scratch);
- // Restore FPSCR.
- vmsr(prev_fpscr);
- // Move the converted value into the result register.
- vmov(result, double_scratch.low());
- // Check for vfp exceptions.
- tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
-
- bind(&done);
-}
-
-
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
-
- // Extract the biased exponent in result.
- Ubfx(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Check for Infinity and NaNs, which should return 0.
- cmp(result, Operand(HeapNumber::kExponentMask));
- mov(result, Operand::Zero(), LeaveCC, eq);
- b(eq, &done);
-
- // Express exponent as delta to (number of mantissa bits + 31).
- sub(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
- SetCC);
-
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- b(le, &normal_exponent);
- mov(result, Operand::Zero());
- b(&done);
-
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
-
- // Save the sign.
- Register sign = result;
- result = no_reg;
- and_(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // Set the implicit 1 before the mantissa part in input_high.
- orr(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- mov(input_high, Operand(input_high, LSL, scratch));
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- rsb(scratch, scratch, Operand(32), SetCC);
- b(&pos_shift, ge);
-
- // Negate scratch.
- rsb(scratch, scratch, Operand::Zero());
- mov(input_low, Operand(input_low, LSL, scratch));
- b(&shift_done);
-
- bind(&pos_shift);
- mov(input_low, Operand(input_low, LSR, scratch));
-
- bind(&shift_done);
- orr(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- cmp(sign, Operand::Zero());
- result = sign;
- sign = no_reg;
- rsb(result, input_high, Operand::Zero(), LeaveCC, ne);
- mov(result, input_high, LeaveCC, eq);
- bind(&done);
-}
-
-
-void MacroAssembler::EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Register scratch,
- Register input_high,
- Register input_low) {
- CpuFeatures::Scope scope(VFP2);
- ASSERT(!input_high.is(result));
- ASSERT(!input_low.is(result));
- ASSERT(!input_low.is(input_high));
- ASSERT(!scratch.is(result) &&
- !scratch.is(input_high) &&
- !scratch.is(input_low));
- ASSERT(!double_input.is(double_scratch));
-
- Label done;
-
- // Test if the value can be exactly represented as a signed integer.
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
- vcvt_f64_s32(double_scratch, double_scratch.low());
- // Note: this comparison is cheaper than reading the FPSCR exception bits.
- VFPCompareAndSetFlags(double_input, double_scratch);
- b(eq, &done);
-
- // Check the exception flags. If they are not set, we are done.
- // If they are set, it could be because of the conversion above, or because
- // they were set before this code.
- vmrs(scratch);
- tst(scratch, Operand(kVFPOverflowExceptionBit |
- kVFPUnderflowExceptionBit |
- kVFPInvalidOpExceptionBit));
- b(eq, &done);
-
- // Clear cumulative exception flags.
- bic(scratch, scratch, Operand(kVFPExceptionMask));
- vmsr(scratch);
- // Try a conversion to a signed integer.
- vcvt_s32_f64(double_scratch.low(), double_input);
- // Retrieve the FPSCR.
- vmrs(scratch);
- // Check for overflow and NaNs.
- tst(scratch, Operand(kVFPOverflowExceptionBit |
- kVFPUnderflowExceptionBit |
- kVFPInvalidOpExceptionBit));
- // If we had no exceptions we are done.
- b(eq, &done);
-
- // Load the double value and perform a manual truncation.
- vmov(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::GetLeastBitsFromSmi(Register dst,
- Register src,
- int num_least_bits) {
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
- ubfx(dst, src, kSmiTagSize, num_least_bits);
- } else {
- mov(dst, Operand(src, ASR, kSmiTagSize));
- and_(dst, dst, Operand((1 << num_least_bits) - 1));
- }
-}
-
-
-void MacroAssembler::GetLeastBitsFromInt32(Register dst,
- Register src,
- int num_least_bits) {
- and_(dst, src, Operand((1 << num_least_bits) - 1));
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // All parameters are on the stack. r0 has the return value after call.
-
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- mov(r1, Operand(ExternalReference(f, isolate())));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function, isolate())));
- SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
- CEntryStub stub(1, mode);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- mov(r0, Operand(num_arguments));
- mov(r1, Operand(ext));
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
-#if defined(__thumb__)
- // Thumb mode builtin.
- ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
- mov(r1, Operand(builtin));
- CEntryStub stub(1);
- Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- GetBuiltinEntry(r2, id);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(r2));
- SetCallKind(r5, CALL_AS_METHOD);
- Call(r2);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, CALL_AS_METHOD);
- Jump(r2);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the builtins object into target register.
- ldr(target,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
- // Load the JavaScript builtin function from the builtins object.
- ldr(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(r1));
- GetBuiltinFunction(r1, id);
- // Load the code entry point from the builtins object.
- ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch1, Operand(value));
- mov(scratch2, Operand(ExternalReference(counter)));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch2, Operand(ExternalReference(counter)));
- ldr(scratch1, MemOperand(scratch2));
- add(scratch1, scratch1, Operand(value));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch2, Operand(ExternalReference(counter)));
- ldr(scratch1, MemOperand(scratch2));
- sub(scratch1, scratch1, Operand(value));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::Assert(Condition cond, const char* msg) {
- if (emit_debug_code())
- Check(cond, msg);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(ip, index);
- cmp(reg, ip);
- Check(eq, "Register did not match expected root");
- }
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- ASSERT(!elements.is(ip));
- Label ok;
- push(elements);
- ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- cmp(elements, ip);
- b(eq, &ok);
- LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
- cmp(elements, ip);
- b(eq, &ok);
- LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- cmp(elements, ip);
- b(eq, &ok);
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- pop(elements);
- }
-}
-
-
-void MacroAssembler::Check(Condition cond, const char* msg) {
- Label L;
- b(cond, &L);
- Abort(msg);
- // will not return here
- bind(&L);
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- Label abort_start;
- bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
-
- mov(r0, Operand(p0));
- push(r0);
- mov(r0, Operand(Smi::FromInt(p1 - p0)));
- push(r0);
- // Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
- // will not return here
- if (is_const_pool_blocked()) {
- // If the calling code cares about the exact number of
- // instructions generated, we insert padding here to keep the size
- // of the Abort macro constant.
- static const int kExpectedAbortInstructions = 10;
- int abort_instructions = InstructionsGeneratedSince(&abort_start);
- ASSERT(abort_instructions <= kExpectedAbortInstructions);
- while (abort_instructions++ < kExpectedAbortInstructions) {
- nop();
- }
- }
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in esi).
- mov(dst, cp);
- }
-}
-
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- // Load the global or builtins object from the current context.
- ldr(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check that the function's map is the same as the expected cached map.
- ldr(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(ip, FieldMemOperand(scratch, offset));
- cmp(map_in_out, ip);
- b(ne, no_map_match);
-
- // Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(map_in_out, FieldMemOperand(scratch, offset));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- ldr(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- ldr(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- ldr(function, MemOperand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- ldr(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- ldr(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- b(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
- Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero) {
- sub(scratch, reg, Operand(1), SetCC);
- b(mi, not_power_of_two_or_zero);
- tst(scratch, reg);
- b(ne, not_power_of_two_or_zero);
-}
-
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
- Register reg,
- Register scratch,
- Label* zero_and_neg,
- Label* not_power_of_two) {
- sub(scratch, reg, Operand(1), SetCC);
- b(mi, zero_and_neg);
- tst(scratch, reg);
- b(ne, not_power_of_two);
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register reg1,
- Register reg2,
- Label* on_not_both_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(reg1, Operand(kSmiTagMask));
- tst(reg2, Operand(kSmiTagMask), eq);
- b(ne, on_not_both_smi);
-}
-
-
-void MacroAssembler::UntagAndJumpIfSmi(
- Register dst, Register src, Label* smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
- b(cc, smi_case); // Shifter carry is not set for a smi.
-}
-
-
-void MacroAssembler::UntagAndJumpIfNotSmi(
- Register dst, Register src, Label* non_smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
- b(cs, non_smi_case); // Shifter carry is set for a non-smi.
-}
-
-
-void MacroAssembler::JumpIfEitherSmi(Register reg1,
- Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(reg1, Operand(kSmiTagMask));
- tst(reg2, Operand(kSmiTagMask), ne);
- b(eq, on_either_smi);
-}
-
-
-void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi");
- }
-}
-
-
-void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(eq, "Operand is not smi");
- }
-}
-
-
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi and not a string");
- push(object);
- ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
- pop(object);
- Check(lo, "Operand is not a string");
- }
-}
-
-
-
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- if (emit_debug_code()) {
- CompareRoot(src, root_value_index);
- Check(eq, message);
- }
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number) {
- ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- cmp(scratch, heap_number_map);
- b(ne, on_not_heap_number);
-}
-
-
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
-}
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that neither is a smi.
- STATIC_ASSERT(kSmiTag == 0);
- and_(scratch1, first, Operand(second));
- JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
-}
-
-
-// Allocates a heap number or jumps to the need_gc label if the young space
-// is full and a scavenge is needed.
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required,
- TaggingMode tagging_mode) {
- // Allocate an object in the heap for the heap number and tag it as a heap
- // object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT :
- NO_ALLOCATION_FLAGS);
-
- // Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (tagging_mode == TAG_RESULT) {
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
-}
-
-
-void MacroAssembler::AllocateHeapNumberWithValue(Register result,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required) {
- AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
- sub(scratch1, result, Operand(kHeapObjectTag));
- vstr(value, scratch1, HeapNumber::kValueOffset);
-}
-
-
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- // At least one bit set in the first 15 registers.
- ASSERT((temps & ((1 << 15) - 1)) != 0);
- ASSERT((temps & dst.bit()) == 0);
- ASSERT((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
-
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < 15; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.set_code(i);
- break;
- }
- }
- ASSERT(!tmp.is(no_reg));
-
- for (int i = 0; i < field_count; i++) {
- ldr(tmp, FieldMemOperand(src, i * kPointerSize));
- str(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
- // Align src before copying in word size chunks.
- bind(&align_loop);
- cmp(length, Operand::Zero());
- b(eq, &done);
- bind(&align_loop_1);
- tst(src, Operand(kPointerSize - 1));
- b(eq, &word_loop);
- ldrb(scratch, MemOperand(src, 1, PostIndex));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
-
- // Copy bytes in word size chunks.
- bind(&word_loop);
- if (emit_debug_code()) {
- tst(src, Operand(kPointerSize - 1));
- Assert(eq, "Expecting alignment for CopyBytes");
- }
- cmp(length, Operand(kPointerSize));
- b(lt, &byte_loop);
- ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
- if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
- } else {
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- }
- sub(length, length, Operand(kPointerSize));
- b(&word_loop);
-
- // Copy the last bytes if any left.
- bind(&byte_loop);
- cmp(length, Operand::Zero());
- b(eq, &done);
- bind(&byte_loop_1);
- ldrb(scratch, MemOperand(src, 1, PostIndex));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
- bind(&done);
-}
-
-
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- b(&entry);
- bind(&loop);
- str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
- bind(&entry);
- cmp(start_offset, end_offset);
- b(lt, &loop);
-}
-
-
-void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
- Register source, // Input.
- Register scratch) {
- ASSERT(!zeros.is(source) || !source.is(scratch));
- ASSERT(!zeros.is(scratch));
- ASSERT(!scratch.is(ip));
- ASSERT(!source.is(ip));
- ASSERT(!zeros.is(ip));
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- clz(zeros, source); // This instruction is only supported after ARM5.
-#else
- // Order of the next two lines is important: zeros register
- // can be the same as source register.
- Move(scratch, source);
- mov(zeros, Operand::Zero());
- // Top 16.
- tst(scratch, Operand(0xffff0000));
- add(zeros, zeros, Operand(16), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
- // Top 8.
- tst(scratch, Operand(0xff000000));
- add(zeros, zeros, Operand(8), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
- // Top 4.
- tst(scratch, Operand(0xf0000000));
- add(zeros, zeros, Operand(4), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
- // Top 2.
- tst(scratch, Operand(0xc0000000));
- add(zeros, zeros, Operand(2), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
- // Top bit.
- tst(scratch, Operand(0x80000000u));
- add(zeros, zeros, Operand(1), LeaveCC, eq);
-#endif
-}
-
-
-void MacroAssembler::CheckFor32DRegs(Register scratch) {
- mov(scratch, Operand(ExternalReference::cpu_features()));
- ldr(scratch, MemOperand(scratch));
- tst(scratch, Operand(1u << VFP32DREGS));
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch1, first, Operand(kFlatAsciiStringMask));
- and_(scratch2, second, Operand(kFlatAsciiStringMask));
- cmp(scratch1, Operand(kFlatAsciiStringTag));
- // Ignore second test if first test failed.
- cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
- b(ne, failure);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch, type, Operand(kFlatAsciiStringMask));
- cmp(scratch, Operand(kFlatAsciiStringTag));
- b(ne, failure);
-}
-
-static const int kRegisterPassedArguments = 4;
-
-
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments) {
- int stack_passed_words = 0;
- if (use_eabi_hardfloat()) {
- // In the hard floating point calling convention, we can use
- // all double registers to pass doubles.
- if (num_double_arguments > DoubleRegister::NumRegisters()) {
- stack_passed_words +=
- 2 * (num_double_arguments - DoubleRegister::NumRegisters());
- }
- } else {
- // In the soft floating point calling convention, every double
- // argument is passed using two registers.
- num_reg_arguments += 2 * num_double_arguments;
- }
- // Up to four simple arguments are passed in registers r0..r3.
- if (num_reg_arguments > kRegisterPassedArguments) {
- stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
- }
- return stack_passed_words;
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- int num_double_arguments,
- Register scratch) {
- int frame_alignment = ActivationFrameAlignment();
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
- if (frame_alignment > kPointerSize) {
- // Make stack end at alignment and make room for num_arguments - 4 words
- // and the original value of sp.
- mov(scratch, sp);
- sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
- and_(sp, sp, Operand(-frame_alignment));
- str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
- }
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- Register scratch) {
- PrepareCallCFunction(num_reg_arguments, 0, scratch);
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- if (use_eabi_hardfloat()) {
- Move(d0, dreg);
- } else {
- vmov(r0, r1, dreg);
- }
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
- DwVfpRegister dreg2) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- if (use_eabi_hardfloat()) {
- if (dreg2.is(d0)) {
- ASSERT(!dreg1.is(d1));
- Move(d1, dreg2);
- Move(d0, dreg1);
- } else {
- Move(d0, dreg1);
- Move(d1, dreg2);
- }
- } else {
- vmov(r0, r1, dreg1);
- vmov(r2, r3, dreg2);
- }
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
- Register reg) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- if (use_eabi_hardfloat()) {
- Move(d0, dreg);
- Move(r0, reg);
- } else {
- Move(r2, reg);
- vmov(r0, r1, dreg);
- }
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments) {
- mov(ip, Operand(function));
- CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
-}
-
-
-void MacroAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
- ASSERT(has_frame());
- // Make sure that the stack is aligned before calling a C function unless
- // running in the simulator. The simulator has its own alignment check which
- // provides more information.
-#if defined(V8_HOST_ARCH_ARM)
- if (emit_debug_code()) {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- tst(sp, Operand(frame_alignment_mask));
- b(eq, &alignment_as_expected);
- // Don't use Check here, as it will call Runtime_Abort possibly
- // re-entering here.
- stop("Unexpected alignment");
- bind(&alignment_as_expected);
- }
- }
-#endif
-
- // Just call directly. The function called cannot cause a GC, or
- // allow preemption, so the return address in the link register
- // stays correct.
- Call(function);
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
- if (ActivationFrameAlignment() > kPointerSize) {
- ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
- }
-}
-
-
-void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
- const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- const int32_t kPCRegOffset = 2 * kPointerSize;
- ldr(result, MemOperand(ldr_location));
- if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [pc + offset] .
- and_(result, result, Operand(kLdrPCPattern));
- cmp(result, Operand(kLdrPCPattern));
- Check(eq, "The instruction to patch should be a load from pc.");
- // Result was clobbered. Restore it.
- ldr(result, MemOperand(ldr_location));
- }
- // Get the address of the constant.
- and_(result, result, Operand(kLdrOffsetMask));
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(kPCRegOffset));
-}
-
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met) {
- Bfc(scratch, object, 0, kPageSizeBits);
- ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- tst(scratch, Operand(mask));
- b(cc, condition_met);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- tst(ip, Operand(mask_scratch));
- b(first_bit == 1 ? eq : ne, &other_color);
- // Shift left 1 by adding.
- add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
- b(eq, &word_boundary);
- tst(ip, Operand(mask_scratch));
- b(second_bit == 1 ? ne : eq, has_color);
- jmp(&other_color);
-
- bind(&word_boundary);
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
- tst(ip, Operand(1));
- b(second_bit == 1 ? ne : eq, has_color);
- bind(&other_color);
-}
-
-
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- Label is_data_object;
- ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- b(eq, &is_data_object);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, not_data_object);
- bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
- Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
- mov(ip, Operand(1));
- mov(mask_reg, Operand(ip, LSL, mask_reg));
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- tst(mask_scratch, load_scratch);
- b(ne, &done);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // LSL may overflow, making the check conservative.
- tst(load_scratch, Operand(mask_scratch, LSL, 1));
- b(eq, &ok);
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
- b(eq, &is_data_object);
-
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- tst(instance_type, Operand(kExternalStringTag));
- mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
- b(ne, &is_data_object);
-
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- ldr(ip, FieldMemOperand(value, String::kLengthOffset));
- tst(instance_type, Operand(kStringEncodingMask));
- mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
- add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- orr(ip, ip, Operand(mask_scratch));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- add(ip, ip, Operand(length));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
-}
-
-
-void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- Usat(output_reg, 8, Operand(input_reg));
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DwVfpRegister input_reg,
- DwVfpRegister temp_double_reg) {
- Label above_zero;
- Label done;
- Label in_bounds;
-
- Vmov(temp_double_reg, 0.0);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
- b(gt, &above_zero);
-
- // Double value is less than zero, NaN or Inf, return 0.
- mov(result_reg, Operand::Zero());
- b(al, &done);
-
- // Double value is >= 255, return 255.
- bind(&above_zero);
- Vmov(temp_double_reg, 255.0, result_reg);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
- b(le, &in_bounds);
- mov(result_reg, Operand(255));
- b(al, &done);
-
- // In 0-255 range, round and truncate.
- bind(&in_bounds);
- // Save FPSCR.
- vmrs(ip);
- // Set rounding mode to round to the nearest integer by clearing bits[23:22].
- bic(result_reg, ip, Operand(kVFPRoundingModeMask));
- vmsr(result_reg);
- vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
- vmov(result_reg, input_reg.low());
- // Restore FPSCR.
- vmsr(ip);
- bind(&done);
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::EnumLength(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
- and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
-}
-
-
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Register empty_fixed_array_value = r6;
- LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Label next, start;
- mov(r2, r0);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
-
- EnumLength(r3, r1);
- cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
- b(eq, call_runtime);
-
- jmp(&start);
-
- bind(&next);
- ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLength(r3, r1);
- cmp(r3, Operand(Smi::FromInt(0)));
- b(ne, call_runtime);
-
- bind(&start);
-
- // Check that there are no elements. Register r2 contains the current JS
- // object we've reached through the prototype chain.
- ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
- cmp(r2, empty_fixed_array_value);
- b(ne, call_runtime);
-
- ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- cmp(r2, null_value);
- b(ne, &next);
-}
-
-
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
- Register receiver_reg,
- Register scratch_reg) {
- Label no_info_available;
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- add(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
- cmp(scratch_reg, Operand(new_space_start));
- b(lt, &no_info_available);
- mov(ip, Operand(new_space_allocation_top));
- ldr(ip, MemOperand(ip));
- cmp(scratch_reg, ip);
- b(gt, &no_info_available);
- ldr(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
- cmp(scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
- bind(&no_info_available);
-}
-
-
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-#endif
-
-
-CodePatcher::CodePatcher(byte* address, int instructions)
- : address_(address),
- size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void CodePatcher::Emit(Instr instr) {
- masm()->emit(instr);
-}
-
-
-void CodePatcher::Emit(Address addr) {
- masm()->emit(reinterpret_cast<Instr>(addr));
-}
-
-
-void CodePatcher::EmitCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- instr = (instr & ~kCondMask) | cond;
- masm_.emit(instr);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.h b/src/3rdparty/v8/src/arm/macro-assembler-arm.h
deleted file mode 100644
index 7b05a67..0000000
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.h
+++ /dev/null
@@ -1,1439 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
-#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
-
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static helper functions
-
-// Generate a MemOperand for loading a field from an object.
-inline MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-inline Operand SmiUntagOperand(Register object) {
- return Operand(object, ASR, kSmiTagSize);
-}
-
-
-
-// Give alias names to registers
-const Register cp = { 8 }; // JavaScript context pointer
-const Register kRootRegister = { 10 }; // Roots array pointer.
-
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
-// Flags used for the ObjectToDoubleVFPRegister function.
-enum ObjectToDoubleFlags {
- // No special flags.
- NO_OBJECT_TO_DOUBLE_FLAGS = 0,
- // Object is known to be a non smi.
- OBJECT_NOT_SMI = 1 << 0,
- // Don't load NaNs or infinities, branch to the non number case instead.
- AVOID_NANS_AND_INFINITIES = 1 << 1
-};
-
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-
-
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg);
-#endif
-
-
-enum TargetAddressStorageMode {
- CAN_INLINE_TARGET_ADDRESS,
- NEVER_INLINE_TARGET_ADDRESS
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // Jump, Call, and Ret pseudo instructions implementing inter-working.
- void Jump(Register target, Condition cond = al);
- void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(Register target, Condition cond = al);
- void Call(Register target, Condition cond = al);
- int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSizeNotPredictableCodeSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond = al);
- void Call(Address target, RelocInfo::Mode rmode,
- Condition cond = al,
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
- void Call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al,
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
- void Ret(Condition cond = al);
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the sp register.
- void Drop(int count, Condition cond = al);
-
- void Ret(int drop, Condition cond = al);
-
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1,
- Register reg2,
- Register scratch = no_reg,
- Condition cond = al);
-
-
- void And(Register dst, Register src1, const Operand& src2,
- Condition cond = al);
- void Ubfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
- void Sbfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
- // The scratch register is not used for ARMv7.
- // scratch can be the same register as src (in which case it is trashed), but
- // not the same as dst.
- void Bfi(Register dst,
- Register src,
- Register scratch,
- int lsb,
- int width,
- Condition cond = al);
- void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
- void Usat(Register dst, int satpos, const Operand& src,
- Condition cond = al);
-
- void Call(Label* target);
-
- // Register move. May do nothing if the registers are identical.
- void Move(Register dst, Handle<Object> value);
- void Move(Register dst, Register src, Condition cond = al);
- void Move(DwVfpRegister dst, DwVfpRegister src);
-
- // Load an object from the root table.
- void LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond = al);
- // Store an object to the root table.
- void StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond = al);
-
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
- // ---------------------------------------------------------------------------
- // GC Support
-
- void IncrementalMarkingRecordWriteHelper(Register object,
- Register value,
- Register address);
-
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- lr_status,
- save_fp,
- remembered_set_action,
- smi_check);
- }
-
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // Push a handle.
- void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
-
- // Push two registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Condition cond = al) {
- ASSERT(!src1.is(src2));
- if (src1.code() > src2.code()) {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
- } else {
- str(src1, MemOperand(sp, 4, NegPreIndex), cond);
- str(src2, MemOperand(sp, 4, NegPreIndex), cond);
- }
- }
-
- // Push three registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- } else {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
- str(src3, MemOperand(sp, 4, NegPreIndex), cond);
- }
- } else {
- str(src1, MemOperand(sp, 4, NegPreIndex), cond);
- Push(src2, src3, cond);
- }
- }
-
- // Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1,
- Register src2,
- Register src3,
- Register src4,
- Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- ASSERT(!src1.is(src4));
- ASSERT(!src2.is(src4));
- ASSERT(!src3.is(src4));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- if (src3.code() > src4.code()) {
- stm(db_w,
- sp,
- src1.bit() | src2.bit() | src3.bit() | src4.bit(),
- cond);
- } else {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- str(src4, MemOperand(sp, 4, NegPreIndex), cond);
- }
- } else {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
- Push(src3, src4, cond);
- }
- } else {
- str(src1, MemOperand(sp, 4, NegPreIndex), cond);
- Push(src2, src3, src4, cond);
- }
- }
-
- // Pop two registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Condition cond = al) {
- ASSERT(!src1.is(src2));
- if (src1.code() > src2.code()) {
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
- } else {
- ldr(src2, MemOperand(sp, 4, PostIndex), cond);
- ldr(src1, MemOperand(sp, 4, PostIndex), cond);
- }
- }
-
- // Pop three registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- } else {
- ldr(src3, MemOperand(sp, 4, PostIndex), cond);
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
- }
- } else {
- Pop(src2, src3, cond);
- str(src1, MemOperand(sp, 4, PostIndex), cond);
- }
- }
-
- // Pop four registers. Pops rightmost register first (from lower address).
- void Pop(Register src1,
- Register src2,
- Register src3,
- Register src4,
- Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- ASSERT(!src1.is(src4));
- ASSERT(!src2.is(src4));
- ASSERT(!src3.is(src4));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- if (src3.code() > src4.code()) {
- ldm(ia_w,
- sp,
- src1.bit() | src2.bit() | src3.bit() | src4.bit(),
- cond);
- } else {
- ldr(src4, MemOperand(sp, 4, PostIndex), cond);
- ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- }
- } else {
- Pop(src3, src4, cond);
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
- }
- } else {
- Pop(src2, src3, src4, cond);
- ldr(src1, MemOperand(sp, 4, PostIndex), cond);
- }
- }
-
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
- void PushSafepointRegistersAndDoubles();
- void PopSafepointRegistersAndDoubles();
- // Store value in register src in the safepoint stack slot for
- // register dst.
- void StoreToSafepointRegisterSlot(Register src, Register dst);
- void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
- // Load the value of the src register from its safepoint stack slot
- // into register dst.
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- // Load two consecutive registers with two consecutive memory locations.
- void Ldrd(Register dst1,
- Register dst2,
- const MemOperand& src,
- Condition cond = al);
-
- // Store two consecutive registers to two consecutive memory locations.
- void Strd(Register src1,
- Register src2,
- const MemOperand& dst,
- Condition cond = al);
-
- // Compare double values and move the result to the normal condition flags.
- void VFPCompareAndSetFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void VFPCompareAndSetFlags(const DwVfpRegister src1,
- const double src2,
- const Condition cond = al);
-
- // Compare double values and then load the fpscr flags to a register.
- void VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Register fpscr_flags,
- const Condition cond = al);
- void VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const double src2,
- const Register fpscr_flags,
- const Condition cond = al);
-
- void Vmov(const DwVfpRegister dst,
- const double imm,
- const Register scratch = no_reg);
-
- // Enter exit frame.
- // stack_space - extra stack space, used for alignment before call to C.
- void EnterExitFrame(bool save_doubles, int stack_space = 0);
-
- // Leave the current exit frame. Expects the return value in r0.
- // Expect the number of values, pushed prior to the exit frame, to
- // remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count);
-
- // Get the actual activation frame alignment for target environment.
- static int ActivationFrameAlignment();
-
- void LoadContext(Register dst, int context_chain_length);
-
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch);
-
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(kRootRegister, Operand(roots_array_start));
- }
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
- void IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- // Must preserve the result register.
- void PopTryHandler();
-
- // Passes thrown value to the handler of top of the try handler chain.
- void Throw(Register value);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain.
- void ThrowUncatchable(Register value);
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- void GetNumberHash(Register t0, Register scratch);
-
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2);
-
-
- inline void MarkCode(NopMarkerTypes type) {
- nop(type);
- }
-
- // Check if the given instruction is a 'type' marker.
- // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
- // These instructions are generated to mark special location in the code,
- // like some special IC code.
- static inline bool IsMarkedCode(Instr instr, int type) {
- ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
- return IsNop(instr, type);
- }
-
-
- static inline int GetCodeMarker(Instr instr) {
- int dst_reg_offset = 12;
- int dst_mask = 0xf << dst_reg_offset;
- int src_mask = 0xf;
- int dst_reg = (instr & dst_mask) >> dst_reg_offset;
- int src_reg = instr & src_mask;
- uint32_t non_register_mask = ~(dst_mask | src_mask);
- uint32_t mov_mask = al | 13 << 21;
-
- // Return <n> if we have a mov rn rn, else return -1.
- int type = ((instr & non_register_mask) == mov_mask) &&
- (dst_reg == src_reg) &&
- (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
- ? src_reg
- : -1;
- ASSERT((type == -1) ||
- ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
- return type;
- }
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocates a heap number or jumps to the gc_required label if the young
- // space is full and a scavenge is needed. All registers are clobbered also
- // when control continues at the gc_required label.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT);
- void AllocateHeapNumberWithValue(Register result,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required);
-
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
-
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch);
-
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
-
- // Compare object type for heap object. heap_object contains a non-Smi
- // whose object type should be compared with the given type. This both
- // sets the flags and leaves the object type in the type_reg register.
- // It leaves the map in the map register (unless the type_reg and map register
- // are the same register). It leaves the heap object in the heap_object
- // register unless the heap_object register is the same register as one of the
- // other registers.
- void CompareObjectType(Register heap_object,
- Register map,
- Register type_reg,
- InstanceType type);
-
- // Compare instance type in a map. map contains a valid map object whose
- // object type should be compared with the given type. This both
- // sets the flags and leaves the object type in the type_reg register.
- void CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type);
-
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail, in which
- // case scratch2, scratch3 and scratch4 are unmodified.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- // All regs below here overwritten.
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail,
- int elements_offset = 0);
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
- // set with result of map compare. If multiple map compares are required, the
- // compare sequences branches to early_success.
- void CompareMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMap(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type);
-
-
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
-
-
- // Compare the object in a register to a value from the root list.
- // Uses the ip register as scratch.
- void CompareRoot(Register obj, Heap::RootListIndex index);
-
-
- // Load and check the instance type of an object for being a string.
- // Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string
- // and the passed-in condition passed. If the passed-in condition failed
- // then flags remain unchanged.
- Condition IsObjectStringType(Register obj,
- Register type,
- Condition cond = al) {
- ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
- ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
- tst(type, Operand(kIsNotStringMask), cond);
- ASSERT_EQ(0, kStringTag);
- return eq;
- }
-
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // Get the number of least significant bits from a register
- void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
- void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
-
- // Uses VFP instructions to Convert a Smi to a double.
- void IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg);
-
- // Load the value of a number object into a VFP double register. If the object
- // is not a number a jump to the label not_number is performed and the VFP
- // double register is unchanged.
- void ObjectToDoubleVFPRegister(
- Register object,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
-
- // Load the value of a smi object into a VFP double register. The register
- // scratch1 can be the same register as smi in which case smi will hold the
- // untagged value afterwards.
- void SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2);
-
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If VFP3 is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32);
-
- // Try to convert a double to a signed 32-bit integer. If the double value
- // can be exactly represented as an integer, the code jumps to 'done' and
- // 'result' contains the integer value. Otherwise, the code falls through.
- void TryFastDoubleToInt32(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Label* done);
-
- // Truncates a double using a specific rounding mode, and writes the value
- // to the result register.
- // Clears the z flag (ne condition) if an overflow occurs.
- // If kCheckForInexactConversion is passed, the z flag is also cleared if the
- // conversion was inexact, i.e. if the double value could not be converted
- // exactly to a 32-bit integer.
- void EmitVFPTruncate(VFPRoundingMode rounding_mode,
- Register result,
- DwVfpRegister double_input,
- Register scratch,
- DwVfpRegister double_scratch,
- CheckForInexactConversion check
- = kDontCheckForInexactConversion);
-
- // Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the signed 32bit
- // integer range to a 32bit signed integer.
- // Expects the double value loaded in input_high and input_low.
- // Exits with the answer in 'result'.
- // Note that this code does not work for values in the 32bit range!
- void EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3);
-
- // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
- // instruction. On pre-ARM5 hardware this routine gives the wrong answer
- // for 0 (31 instead of 32). Source and scratch can be the same in which case
- // the source is clobbered. Source and zeros can also be the same in which
- // case scratch should be a different register.
- void CountLeadingZeros(Register zeros,
- Register source,
- Register scratch);
-
- // Check whether d16-d31 are available on the CPU. The result is given by the
- // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
- void CheckFor32DRegs(Register scratch);
-
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub.
- void CallStub(CodeStub* stub,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
-
- // Call a code stub.
- void TailCallStub(CodeStub* stub, Condition cond = al);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, non-register arguments must be stored in
- // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
- // are word sized. If double arguments are used, this function assumes that
- // all double arguments are stored before core registers; otherwise the
- // correct alignment of the double values is not guaranteed.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_reg_arguments,
- int num_double_registers,
- Register scratch);
- void PrepareCallCFunction(int num_reg_arguments,
- Register scratch);
-
- // There are two ways of passing double arguments on ARM, depending on
- // whether soft or hard floating point ABI is used. These functions
- // abstract parameter passing for the three different ways we call
- // C functions from generated code.
- void SetCallCDoubleArguments(DwVfpRegister dreg);
- void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
- void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
- void GetCFunctionDoubleResult(const DwVfpRegister dst);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call JS arguments space and
- // the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& builtin);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the code object for the given builtin in the target register and
- // setup the function in r1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
-
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cond is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cond, const char* msg);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cond, const char* msg);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- // EABI variant for double arguments in use.
- bool use_eabi_hardfloat() {
-#if USE_EABI_HARDFLOAT
- return true;
-#else
- return false;
-#endif
- }
-
- // ---------------------------------------------------------------------------
- // Number utilities
-
- // Check whether the value of reg is a power of two and not zero. If not
- // control continues at the label not_power_of_two. If reg is a power of two
- // the register scratch contains the value of (reg - 1) when control falls
- // through.
- void JumpIfNotPowerOfTwoOrZero(Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero);
- // Check whether the value of reg is a power of two and not zero.
- // Control falls through if it is, with scratch containing the mask
- // value (reg - 1).
- // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
- // zero or negative, or jumps to the 'not_power_of_two' label if the value is
- // strictly positive but not a power of two.
- void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
- Register scratch,
- Label* zero_and_neg,
- Label* not_power_of_two);
-
- // ---------------------------------------------------------------------------
- // Smi utilities
-
- void SmiTag(Register reg, SBit s = LeaveCC) {
- add(reg, reg, Operand(reg), s);
- }
- void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
- add(dst, src, Operand(src), s);
- }
-
- // Try to convert int32 to smi. If the value is to large, preserve
- // the original value and jump to not_a_smi. Destroys scratch and
- // sets flags.
- void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
- mov(scratch, reg);
- SmiTag(scratch, SetCC);
- b(vs, not_a_smi);
- mov(reg, scratch);
- }
-
- void SmiUntag(Register reg, SBit s = LeaveCC) {
- mov(reg, Operand(reg, ASR, kSmiTagSize), s);
- }
- void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
- mov(dst, Operand(src, ASR, kSmiTagSize), s);
- }
-
- // Untag the source value into destination and jump if source is a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
- // Untag the source value into destination and jump if source is not a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
- // Jump if the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(eq, smi_label);
- }
- // Jump if either of the registers contain a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(ne, not_smi_label);
- }
- // Jump if either of the registers contain a non-smi.
- void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
-
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
- // Abort execution if argument is a string, enabled via --debug-code.
- void AssertString(Register object);
-
- // Abort execution if argument is not the root value with the given index,
- // enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
-
- // ---------------------------------------------------------------------------
- // HeapNumber utilities
-
- void JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number);
-
- // ---------------------------------------------------------------------------
- // String utilities
-
- // Checks if both objects are sequential ASCII strings and jumps to label
- // if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Checks if both objects are sequential ASCII strings and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* not_flat_ascii_strings);
-
- // Checks if both instance types are sequential ASCII strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Check if instance type is sequential ASCII string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
-
-
- // ---------------------------------------------------------------------------
- // Patching helpers.
-
- // Get the location of a relocated constant (its address in the constant pool)
- // from its load site.
- void GetRelocatedValueLocation(Register ldr_location,
- Register result);
-
-
- void ClampUint8(Register output_reg, Register input_reg);
-
- void ClampDoubleToUint8(Register result_reg,
- DwVfpRegister input_reg,
- DwVfpRegister temp_double_reg);
-
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void EnumLength(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
-
- template<typename Field>
- void DecodeField(Register reg) {
- static const int shift = Field::kShift;
- static const int mask = (Field::kMask >> shift) << kSmiTagSize;
- mov(reg, Operand(reg, LSR, shift));
- and_(reg, reg, Operand(mask));
- }
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- // Expects object in r0 and returns map with validated enum cache
- // in r0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
-
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to eq
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
-
- private:
- void CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
- MemOperand SafepointRegisterSlot(Register reg);
- MemOperand SafepointRegistersAndDoublesSlot(Register reg);
-
- bool generating_stub_;
- bool allow_stub_calls_;
- bool has_frame_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-};
-
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int instructions);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr instr);
-
- // Emit an address directly.
- void Emit(Address addr);
-
- // Emit the condition part of an instruction leaving the rest of the current
- // instruction unchanged.
- void EmitCondition(Condition cond);
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-inline MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
-}
-
-
-static inline MemOperand QmlGlobalObjectOperand() {
- return ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX);
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
deleted file mode 100644
index acb24ef..0000000
--- a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
+++ /dev/null
@@ -1,1429 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "arm/regexp-macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - r4 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - r5 : Pointer to current code object (Code*) including heap object tag.
- * - r6 : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - r7 : Currently loaded character. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - r8 : Points to tip of backtrack stack
- * - r9 : Unused, might be used by C code and expected unchanged.
- * - r10 : End of input (points to byte after last character in input).
- * - r11 : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - r12 : IP register, used by assembler. Very volatile.
- * - r13/sp : Points to tip of C stack.
- *
- * The remaining registers are free for computations.
- * Each call to a public method should retain this convention.
- *
- * The stack will have the following structure:
- * - fp[56] Isolate* isolate (address of the current isolate)
- * - fp[52] direct_call (if 1, direct call from JavaScript code,
- * if 0, call through the runtime system).
- * - fp[48] stack_area_base (high end of the memory area to use as
- * backtracking stack).
- * - fp[44] capture array size (may fit multiple sets of matches)
- * - fp[40] int* capture_array (int[num_saved_registers_], for output).
- * - fp[36] secondary link/return address used by native call.
- * --- sp when called ---
- * - fp[32] return address (lr).
- * - fp[28] old frame pointer (r11).
- * - fp[0..24] backup of registers r4..r10.
- * --- frame pointer ----
- * - fp[-4] end of input (address of end of string).
- * - fp[-8] start of input (address of first character in string).
- * - fp[-12] start index (character index of start).
- * - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] success counter (only for global regexps to count matches).
- * - fp[-24] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
- * non-position.
- * - fp[-28] At start (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - fp[-32] register 0 (Only positions must be stored in the first
- * - register 1 num_saved_registers_ registers)
- * - ...
- * - register num_registers-1
- * --- sp ---
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers start out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code and the remaining arguments are passed in registers, e.g. by calling the
- * code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * Address secondary_return_address, // Only used by native call.
- * int* capture_output_array,
- * byte* stack_area_base,
- * bool direct_call = false)
- * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in arm/simulator-arm.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- EmitBacktrackConstantPool();
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerARM::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ add(current_input_offset(),
- current_input_offset(), Operand(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ ldr(r0, register_location(reg));
- __ add(r0, r0, Operand(by));
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(r0);
- __ add(pc, r0, Operand(code_pointer()));
-}
-
-
-void RegExpMacroAssemblerARM::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmp(current_character(), Operand(c));
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmp(current_character(), Operand(limit));
- BranchOrBacktrack(gt, on_greater);
-}
-
-
-void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, &not_at_start);
-
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- __ cmp(r0, r1);
- BranchOrBacktrack(eq, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- __ cmp(r0, r1);
- BranchOrBacktrack(ne, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmp(current_character(), Operand(limit));
- BranchOrBacktrack(lt, on_less);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- if (on_failure == NULL) {
- // Instead of inlining a backtrack for each test, (re)use the global
- // backtrack target.
- on_failure = &backtrack_label_;
- }
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- if (cp_offset != 0) {
- int byte_offset = cp_offset * char_size();
- __ add(r0, r0, Operand(byte_offset));
- }
-
- // r0 : Address of characters to match against str.
- int stored_high_byte = 0;
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
- ASSERT(str[i] <= String::kMaxOneByteCharCode);
- __ cmp(r1, Operand(str[i]));
- } else {
- __ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
- uc16 match_char = str[i];
- int match_high_byte = (match_char >> 8);
- if (match_high_byte == 0) {
- __ cmp(r1, Operand(str[i]));
- } else {
- if (match_high_byte != stored_high_byte) {
- __ mov(r2, Operand(match_high_byte));
- stored_high_byte = match_high_byte;
- }
- __ add(r3, r2, Operand(match_char & 0xff));
- __ cmp(r1, r3);
- }
- }
- BranchOrBacktrack(ne, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
- __ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
- __ cmp(current_input_offset(), r0);
- __ add(backtrack_stackpointer(),
- backtrack_stackpointer(), Operand(kPointerSize), LeaveCC, eq);
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ ldr(r0, register_location(start_reg)); // Index of start of capture
- __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
- __ sub(r1, r1, r0, SetCC); // Length of capture.
-
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
- __ b(eq, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
-
- if (mode_ == ASCII) {
- Label success;
- Label fail;
- Label loop_check;
-
- // r0 - offset of start of capture
- // r1 - length of capture
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r0, Operand(r1));
-
- // r0 - Address of start of capture.
- // r1 - Address of end of capture
- // r2 - Address of current input position.
-
- Label loop;
- __ bind(&loop);
- __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
- __ cmp(r4, r3);
- __ b(eq, &loop_check);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ orr(r3, r3, Operand(0x20)); // Convert capture character to lower-case.
- __ orr(r4, r4, Operand(0x20)); // Also convert input character.
- __ cmp(r4, r3);
- __ b(ne, &fail);
- __ sub(r3, r3, Operand('a'));
- __ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter?
-#ifndef ENABLE_LATIN_1
- __ b(hi, &fail);
-#else
- __ b(ls, &loop_check); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ sub(r3, r3, Operand(224 - 'a'));
- __ cmp(r3, Operand(254 - 224));
- __ b(hi, &fail); // Weren't Latin-1 letters.
- __ cmp(r3, Operand(247 - 224)); // Check for 247.
- __ b(eq, &fail);
-#endif
-
- __ bind(&loop_check);
- __ cmp(r0, r1);
- __ b(lt, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- BranchOrBacktrack(al, on_no_match);
-
- __ bind(&success);
- // Compute new value of character position after the matched part.
- __ sub(current_input_offset(), r2, end_of_input_address());
- } else {
- ASSERT(mode_ == UC16);
- int argument_count = 4;
- __ PrepareCallCFunction(argument_count, r2);
-
- // r0 - offset of start of capture
- // r1 - length of capture
-
- // Put arguments into arguments registers.
- // Parameters are
- // r0: Address byte_offset1 - Address captured substring's start.
- // r1: Address byte_offset2 - Address of current character position.
- // r2: size_t byte_length - length of capture in bytes(!)
- // r3: Isolate* isolate
-
- // Address of start of capture.
- __ add(r0, r0, Operand(end_of_input_address()));
- // Length of capture.
- __ mov(r2, Operand(r1));
- // Save length in callee-save register for use on return.
- __ mov(r4, Operand(r1));
- // Address of current input position.
- __ add(r1, current_input_offset(), Operand(end_of_input_address()));
- // Isolate.
- __ mov(r3, Operand(ExternalReference::isolate_address()));
-
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
- }
-
- // Check if function returned non-zero for success or zero for failure.
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ add(current_input_offset(), current_input_offset(), Operand(r4));
- }
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- Label success;
-
- // Find length of back-referenced capture.
- __ ldr(r0, register_location(start_reg));
- __ ldr(r1, register_location(start_reg + 1));
- __ sub(r1, r1, r0, SetCC); // Length to check.
- // Succeed on empty capture (including no capture).
- __ b(eq, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
-
- // Compute pointers to match string and capture string
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r1, Operand(r0));
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
- } else {
- ASSERT(mode_ == UC16);
- __ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
- }
- __ cmp(r3, r4);
- BranchOrBacktrack(ne, on_no_match);
- __ cmp(r0, r1);
- __ b(lt, &loop);
-
- // Move current character position to position after match.
- __ sub(current_input_offset(), r2, end_of_input_address());
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
- Label* on_not_equal) {
- __ cmp(current_character(), Operand(c));
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c == 0) {
- __ tst(current_character(), Operand(mask));
- } else {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
- }
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal) {
- if (c == 0) {
- __ tst(current_character(), Operand(mask));
- } else {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
- }
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ sub(r0, current_character(), Operand(minus));
- __ and_(r0, r0, Operand(mask));
- __ cmp(r0, Operand(c));
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ sub(r0, current_character(), Operand(from));
- __ cmp(r0, Operand(to - from));
- BranchOrBacktrack(ls, on_in_range); // Unsigned lower-or-same condition.
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ sub(r0, current_character(), Operand(from));
- __ cmp(r0, Operand(to - from));
- BranchOrBacktrack(hi, on_not_in_range); // Unsigned higher condition.
-}
-
-
-void RegExpMacroAssemblerARM::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ mov(r0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ and_(r1, current_character(), Operand(kTableSize - 1));
- __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ add(r1,
- current_character(),
- Operand(ByteArray::kHeaderSize - kHeapObjectTag));
- }
- __ ldrb(r0, MemOperand(r0, r1));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ cmp(current_character(), Operand(' '));
- __ b(eq, &success);
- // Check range 0x09..0x0d
- __ sub(r0, current_character(), Operand('\t'));
- __ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(hi, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmp(current_character(), Operand(' '));
- BranchOrBacktrack(eq, on_no_match);
- __ sub(r0, current_character(), Operand('\t'));
- __ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(ls, on_no_match);
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ sub(r0, current_character(), Operand('0'));
- __ cmp(current_character(), Operand('9' - '0'));
- BranchOrBacktrack(hi, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ sub(r0, current_character(), Operand('0'));
- __ cmp(r0, Operand('9' - '0'));
- BranchOrBacktrack(ls, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- BranchOrBacktrack(ls, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(ls, on_no_match);
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- if (mode_ == ASCII) {
- BranchOrBacktrack(hi, on_no_match);
- } else {
- Label done;
- __ b(ls, &done);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(hi, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Operand('z'));
- BranchOrBacktrack(hi, on_no_match);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ mov(r0, Operand(map));
- __ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(eq, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Operand('z'));
- __ b(hi, &done);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ mov(r0, Operand(map));
- __ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_no_match);
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerARM::Fail() {
- __ mov(r0, Operand(FAILURE));
- __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
- Label return_r0;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
- RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
- r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
- RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
- __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
- __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ mov(r0, Operand::Zero());
- __ push(r0); // Make room for success counter and initialize it to 0.
- __ push(r0); // Make room for "position - 1" constant (value is irrelevant).
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ sub(r0, sp, r0, SetCC);
- // Handle it if the stack pointer is already below the stack limit.
- __ b(ls, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(r0, Operand(num_registers_ * kPointerSize));
- __ b(hs, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(r0);
- __ cmp(r0, Operand::Zero());
- // If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &return_r0);
-
- __ bind(&stack_ok);
-
- // Allocate space on stack for registers.
- __ sub(sp, sp, Operand(num_registers_ * kPointerSize));
- // Load string end.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- // Load input start.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStart));
- // Find negative length (offset of start relative to end).
- __ sub(current_input_offset(), r0, end_of_input_address());
- // Set r0 to address of char before start of the input string
- // (effectively string position -1).
- __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
- __ sub(r0, current_input_offset(), Operand(char_size()));
- __ sub(r0, r0, Operand(r1, LSL, (mode_ == UC16) ? 1 : 0));
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- // Initialize code pointer register
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(r1, Operand::Zero());
- __ b(ne, &load_char_start_regexp);
- __ mov(current_character(), Operand('\n'), LeaveCC, eq);
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
- if (num_saved_registers_ > 8) {
- // Address of register 0.
- __ add(r1, frame_pointer(), Operand(kRegisterZero));
- __ mov(r2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(ne, &init_loop);
- } else {
- for (int i = 0; i < num_saved_registers_; i++) {
- __ str(r0, register_location(i));
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
-
- __ jmp(&start_label_);
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput));
- __ ldr(r2, MemOperand(frame_pointer(), kStartIndex));
- __ sub(r1, end_of_input_address(), r1);
- // r1 is length of input in bytes.
- if (mode_ == UC16) {
- __ mov(r1, Operand(r1, LSR, 1));
- }
- // r1 is length of input in characters.
- __ add(r1, r1, Operand(r2));
- // r1 is length of string in characters.
-
- ASSERT_EQ(0, num_saved_registers_ % 2);
- // Always an even number of capture registers. This allows us to
- // unroll the loop once to add an operation between a load of a register
- // and the following use of that register.
- for (int i = 0; i < num_saved_registers_; i += 2) {
- __ ldr(r2, register_location(i));
- __ ldr(r3, register_location(i + 1));
- if (i == 0 && global_with_zero_length_check()) {
- // Keep capture start in r4 for the zero-length check later.
- __ mov(r4, r2);
- }
- if (mode_ == UC16) {
- __ add(r2, r1, Operand(r2, ASR, 1));
- __ add(r3, r1, Operand(r3, ASR, 1));
- } else {
- __ add(r2, r1, Operand(r2));
- __ add(r3, r1, Operand(r3));
- }
- __ str(r2, MemOperand(r0, kPointerSize, PostIndex));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
- }
- }
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
- // Increment success counter.
- __ add(r0, r0, Operand(1));
- __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ sub(r1, r1, Operand(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmp(r1, Operand(num_saved_registers_));
- __ b(lt, &return_r0);
-
- __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- // Advance the location for output.
- __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
- __ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
-
- // Prepare r0 to initialize registers with its value in the next run.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- // r4: capture start index
- __ cmp(current_input_offset(), r4);
- // Not a zero-length match, restart.
- __ b(ne, &load_char_start_regexp);
- // Offset from the end is zero if we already reached the end.
- __ cmp(current_input_offset(), Operand::Zero());
- __ b(eq, &exit_label_);
- // Advance current position after a zero-length match.
- __ add(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
- }
-
- __ b(&load_char_start_regexp);
- } else {
- __ mov(r0, Operand(SUCCESS));
- }
- }
-
- // Exit and return r0
- __ bind(&exit_label_);
- if (global()) {
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- }
-
- __ bind(&return_r0);
- // Skip sp past regexp registers and local variables..
- __ mov(sp, frame_pointer());
- // Restore registers r4..r11 and return (restoring lr to pc).
- __ ldm(ia_w, sp, registers_to_retain | pc.bit());
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- CallCheckStackGuardState(r0);
- __ cmp(r0, Operand::Zero());
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ b(ne, &return_r0);
-
- // String might have moved: Reload end of string from frame.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
- Label grow_failed;
-
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, r0);
- __ mov(r0, backtrack_stackpointer());
- __ add(r1, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r2, Operand(ExternalReference::isolate_address()));
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ cmp(r0, Operand::Zero());
- __ b(eq, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ mov(backtrack_stackpointer(), r0);
- // Restore saved registers and continue.
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code = FACTORY->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerARM::GoTo(Label* to) {
- BranchOrBacktrack(al, to);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(comparand));
- BranchOrBacktrack(ge, if_ge);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(comparand));
- BranchOrBacktrack(lt, if_lt);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(current_input_offset()));
- BranchOrBacktrack(eq, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerARM::Implementation() {
- return kARMImplementation;
-}
-
-
-void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerARM::PopCurrentPosition() {
- Pop(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM::PopRegister(int register_index) {
- Pop(r0);
- __ str(r0, register_location(register_index));
-}
-
-
-static bool is_valid_memory_offset(int value) {
- if (value < 0) value = -value;
- return value < (1<<12);
-}
-
-
-void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
- } else {
- int constant_offset = GetBacktrackConstantPoolEntry();
- masm_->label_at_put(label, constant_offset);
- // Reading pc-relative is based on the address 8 bytes ahead of
- // the current opcode.
- unsigned int offset_of_pc_register_read =
- masm_->pc_offset() + Assembler::kPcLoadDelta;
- int pc_offset_of_constant =
- constant_offset - offset_of_pc_register_read;
- ASSERT(pc_offset_of_constant < 0);
- if (is_valid_memory_offset(pc_offset_of_constant)) {
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
- } else {
- // Not a 12-bit offset, so it needs to be loaded from the constant
- // pool.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
- __ ldr(r0, MemOperand(pc, r0));
- }
- }
- Push(r0);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM::PushCurrentPosition() {
- Push(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ ldr(r0, register_location(register_index));
- Push(r0);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(int reg) {
- __ ldr(current_input_offset(), register_location(reg));
-}
-
-
-void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) {
- __ ldr(backtrack_stackpointer(), register_location(reg));
- __ ldr(r0, MemOperand(frame_pointer(), kStackHighEnd));
- __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(r0));
-}
-
-
-void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ cmp(current_input_offset(), Operand(-by * char_size()));
- __ b(ge, &after_position);
- __ mov(current_input_offset(), Operand(-by * char_size()));
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ mov(r0, Operand(to));
- __ str(r0, register_location(register_index));
-}
-
-
-bool RegExpMacroAssemblerARM::Succeed() {
- __ jmp(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ str(current_input_offset(), register_location(reg));
- } else {
- __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
- __ ldr(r1, MemOperand(frame_pointer(), kStackHighEnd));
- __ sub(r0, backtrack_stackpointer(), r1);
- __ str(r0, register_location(reg));
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
- // RegExp code frame pointer.
- __ mov(r2, frame_pointer());
- // Code* of self.
- __ mov(r1, Operand(masm_->CodeObject()));
- // r0 becomes return address pointer.
- ExternalReference stack_guard_check =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles(isolate);
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
-
- // Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
-
- if (*code_handle != re_code) { // Return address no longer valid
- int delta = code_handle->address() - re_code->address();
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- Handle<String> subject_tmp = subject;
- int slice_offset = 0;
-
- // Extract the underlying string and the slice offset.
- if (StringShape(*subject_tmp).IsCons()) {
- subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
- } else if (StringShape(*subject_tmp).IsSliced()) {
- SlicedString* slice = SlicedString::cast(*subject_tmp);
- subject_tmp = Handle<String>(slice->parent());
- slice_offset = slice->offset();
- }
-
- // String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
- StringShape(*subject_tmp).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject_tmp,
- start_index + slice_offset);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = static_cast<int>(end_address - start_address);
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
- // Subject string might have been a ConsString that underwent
- // short-circuiting during GC. That will not change start_address but
- // will change pointer inside the subject handle.
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- }
-
- return 0;
-}
-
-
-MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
- BranchOrBacktrack(ge, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerARM::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition == al) { // Unconditional.
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ b(condition, &backtrack_label_);
- return;
- }
- __ b(condition, to);
-}
-
-
-void RegExpMacroAssemblerARM::SafeCall(Label* to, Condition cond) {
- __ bl(to, cond);
-}
-
-
-void RegExpMacroAssemblerARM::SafeReturn() {
- __ pop(lr);
- __ add(pc, lr, Operand(masm_->CodeObject()));
-}
-
-
-void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
- __ bind(name);
- __ sub(lr, lr, Operand(masm_->CodeObject()));
- __ push(lr);
-}
-
-
-void RegExpMacroAssemblerARM::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- __ str(source,
- MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
-}
-
-
-void RegExpMacroAssemblerARM::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ ldr(target,
- MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
-}
-
-
-void RegExpMacroAssemblerARM::CheckPreemption() {
- // Check for preemption.
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(sp, r0);
- SafeCall(&check_preempt_label_, ls);
-}
-
-
-void RegExpMacroAssemblerARM::CheckStackLimit() {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(backtrack_stackpointer(), Operand(r0));
- SafeCall(&stack_overflow_label_, ls);
-}
-
-
-void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
- __ CheckConstPool(false, false);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- backtrack_constant_pool_offset_ = masm_->pc_offset();
- for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
- __ emit(0);
- }
-
- backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
-}
-
-
-int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
- while (backtrack_constant_pool_capacity_ > 0) {
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- if (masm_->pc_offset() - offset < 2 * KB) {
- return offset;
- }
- }
- Label new_pool_skip;
- __ jmp(&new_pool_skip);
- EmitBacktrackConstantPool();
- __ bind(&new_pool_skip);
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- return offset;
-}
-
-
-void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ mov(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ ldr(sp, MemOperand(sp, 0));
- }
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-
-bool RegExpMacroAssemblerARM::CanReadUnaligned() {
- return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
-}
-
-
-void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- Register offset = current_input_offset();
- if (cp_offset != 0) {
- // r4 is not being used to store the capture start index at this point.
- __ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
- offset = r4;
- }
- // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
- // and the operating system running on the target allow it.
- // If unaligned load/stores are not supported then this function must only
- // be used to load a single character at a time.
- if (!CanReadUnaligned()) {
- ASSERT(characters == 1);
- }
-
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
- } else if (characters == 2) {
- __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
- } else {
- ASSERT(characters == 1);
- __ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
- } else {
- ASSERT(characters == 1);
- __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
- }
- }
-}
-
-
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the link register.
- __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
- __ mov(r0, sp);
- __ Call(r5);
- __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
-}
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h
deleted file mode 100644
index c45669a..0000000
--- a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
- virtual ~RegExpMacroAssemblerARM();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
-
- // Above the frame pointer - Stored registers and stack passed parameters.
- // Register 4..11.
- static const int kStoredRegisters = kFramePointer;
- // Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
- // Stack parameters placed by caller.
- static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-
- // Below the frame pointer.
- // Register parameters stored by setup code.
- static const int kInputEnd = kFramePointer - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- static const int kBacktrackConstantPoolSize = 4;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- void EmitBacktrackConstantPool();
- int GetBacktrackConstantPoolEntry();
-
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- MemOperand register_location(int register_index);
-
- // Register holding the current input position as negative offset from
- // the end of the string.
- inline Register current_input_offset() { return r6; }
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r7; }
-
- // Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r10; }
-
- // Register holding the frame address. Local variables, parameters and
- // regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return r8; }
-
- // Register holding pointer to the current code object.
- inline Register code_pointer() { return r5; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to, Condition cond = al);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // and increments it by a word size.
- inline void Pop(Register target);
-
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Manage a small pre-allocated pool for writing label targets
- // to for pushing backtrack addresses.
- int backtrack_constant_pool_offset_;
- int backtrack_constant_pool_capacity_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-}} // namespace v8::internal
-
-#endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.cc b/src/3rdparty/v8/src/arm/simulator-arm.cc
deleted file mode 100644
index b7bc839..0000000
--- a/src/3rdparty/v8/src/arm/simulator-arm.cc
+++ /dev/null
@@ -1,3475 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <math.h>
-#include <cstdarg>
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "disasm.h"
-#include "assembler.h"
-#include "codegen.h"
-#include "arm/constants-arm.h"
-#include "arm/simulator-arm.h"
-
-#if defined(USE_SIMULATOR)
-
-// Only build the simulator if not compiling for real ARM hardware.
-namespace v8 {
-namespace internal {
-
-// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent way through
-// ::v8::internal::OS in the same way as SNPrintF is that the
-// Windows C Run-Time Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
-
-// The ArmDebugger class is used by the simulator while debugging simulated ARM
-// code.
-class ArmDebugger {
- public:
- explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
- ~ArmDebugger();
-
- void Stop(Instruction* instr);
- void Debug();
-
- private:
- static const Instr kBreakpointInstr =
- (al | (7*B25) | (1*B24) | kBreakpoint);
- static const Instr kNopInstr = (al | (13*B21));
-
- Simulator* sim_;
-
- int32_t GetRegisterValue(int regnum);
- double GetRegisterPairDoubleValue(int regnum);
- double GetVFPDoubleRegisterValue(int regnum);
- bool GetValue(const char* desc, int32_t* value);
- bool GetVFPSingleValue(const char* desc, float* value);
- bool GetVFPDoubleValue(const char* desc, double* value);
-
- // Set or delete a breakpoint. Returns true if successful.
- bool SetBreakpoint(Instruction* breakpc);
- bool DeleteBreakpoint(Instruction* breakpc);
-
- // Undo and redo all breakpoints. This is needed to bracket disassembly and
- // execution to skip past breakpoints when run from the debugger.
- void UndoBreakpoints();
- void RedoBreakpoints();
-};
-
-
-ArmDebugger::~ArmDebugger() {
-}
-
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void ArmDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
- char* msg = *msg_address;
- ASSERT(msg != NULL);
-
- // Update this stop description.
- if (isWatchedStop(code) && !watched_stops[code].desc) {
- watched_stops[code].desc = msg;
- }
-
- if (strlen(msg) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", msg);
- fflush(coverage_log);
- }
- // Overwrite the instruction and address with nops.
- instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
- }
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
-}
-
-#else // ndef GENERATED_CODE_COVERAGE
-
-static void InitializeCoverage() {
-}
-
-
-void ArmDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc()
- + Instruction::kInstrSize);
- // Update this stop description.
- if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
- sim_->watched_stops[code].desc = msg;
- }
- // Print the stop message and code if it is not the default code.
- if (code != kMaxStopCode) {
- PrintF("Simulator hit stop %u: %s\n", code, msg);
- } else {
- PrintF("Simulator hit %s\n", msg);
- }
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
- Debug();
-}
-#endif
-
-
-int32_t ArmDebugger::GetRegisterValue(int regnum) {
- if (regnum == kPCRegister) {
- return sim_->get_pc();
- } else {
- return sim_->get_register(regnum);
- }
-}
-
-
-double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
- return sim_->get_double_from_register_pair(regnum);
-}
-
-
-double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
- return sim_->get_double_from_d_register(regnum);
-}
-
-
-bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
- int regnum = Registers::Number(desc);
- if (regnum != kNoRegister) {
- *value = GetRegisterValue(regnum);
- return true;
- } else {
- if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
- }
- }
- return false;
-}
-
-
-bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
- bool is_double;
- int regnum = VFPRegisters::Number(desc, &is_double);
- if (regnum != kNoRegister && !is_double) {
- *value = sim_->get_float_from_s_register(regnum);
- return true;
- }
- return false;
-}
-
-
-bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
- bool is_double;
- int regnum = VFPRegisters::Number(desc, &is_double);
- if (regnum != kNoRegister && is_double) {
- *value = sim_->get_double_from_d_register(regnum);
- return true;
- }
- return false;
-}
-
-
-bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
- // Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
- return false;
- }
-
- // Set the breakpoint.
- sim_->break_pc_ = breakpc;
- sim_->break_instr_ = breakpc->InstructionBits();
- // Not setting the breakpoint instruction in the code itself. It will be set
- // when the debugger shell continues.
- return true;
-}
-
-
-bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-
- sim_->break_pc_ = NULL;
- sim_->break_instr_ = 0;
- return true;
-}
-
-
-void ArmDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-}
-
-
-void ArmDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
- }
-}
-
-
-void ArmDebugger::Debug() {
- intptr_t last_pc = -1;
- bool done = false;
-
-#define COMMAND_SIZE 63
-#define ARG_SIZE 255
-
-#define STR(a) #a
-#define XSTR(a) STR(a)
-
- char cmd[COMMAND_SIZE + 1];
- char arg1[ARG_SIZE + 1];
- char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
-
- // make sure to have a proper terminating character if reaching the limit
- cmd[COMMAND_SIZE] = 0;
- arg1[ARG_SIZE] = 0;
- arg2[ARG_SIZE] = 0;
-
- // Undo all set breakpoints while running in the debugger shell. This will
- // make them invisible to all commands.
- UndoBreakpoints();
-
- while (!done && !sim_->has_bad_pc()) {
- if (last_pc != sim_->get_pc()) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
- last_pc = sim_->get_pc();
- }
- char* line = ReadLine("sim> ");
- if (line == NULL) {
- break;
- } else {
- char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != NULL) {
- line = last_input;
- } else {
- // Ownership is transferred to sim_;
- sim_->set_last_debugger_input(line);
- }
- // Use sscanf to parse the individual parts of the command line. At the
- // moment no command expects more than two parameters.
- int argc = SScanF(line,
- "%" XSTR(COMMAND_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s",
- cmd, arg1, arg2);
- if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
- // Execute the one instruction we broke at with breakpoints disabled.
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- // Leave the debugger shell.
- done = true;
- } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
- int32_t value;
- float svalue;
- double dvalue;
- if (strcmp(arg1, "all") == 0) {
- for (int i = 0; i < kNumRegisters; i++) {
- value = GetRegisterValue(i);
- PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value);
- if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
- i < 8 &&
- (i % 2) == 0) {
- dvalue = GetRegisterPairDoubleValue(i);
- PrintF(" (%f)\n", dvalue);
- } else {
- PrintF("\n");
- }
- }
- for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
- dvalue = GetVFPDoubleRegisterValue(i);
- uint64_t as_words = BitCast<uint64_t>(dvalue);
- PrintF("%3s: %f 0x%08x %08x\n",
- VFPRegisters::Name(i, true),
- dvalue,
- static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
- }
- } else {
- if (GetValue(arg1, &value)) {
- PrintF("%s: 0x%08x %d \n", arg1, value, value);
- } else if (GetVFPSingleValue(arg1, &svalue)) {
- uint32_t as_word = BitCast<uint32_t>(svalue);
- PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
- } else if (GetVFPDoubleValue(arg1, &dvalue)) {
- uint64_t as_words = BitCast<uint64_t>(dvalue);
- PrintF("%s: %f 0x%08x %08x\n",
- arg1,
- dvalue,
- static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- }
- } else {
- PrintF("print <register>\n");
- }
- } else if ((strcmp(cmd, "po") == 0)
- || (strcmp(cmd, "printobject") == 0)) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
- PrintF("%s: \n", arg1);
-#ifdef DEBUG
- obj->PrintLn();
-#else
- obj->ShortPrint();
- PrintF("\n");
-#endif
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("printobject <value>\n");
- }
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int32_t* cur = NULL;
- int32_t* end = NULL;
- int next_arg = 1;
-
- if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
- } else { // "mem"
- int32_t value;
- if (!GetValue(arg1, &value)) {
- PrintF("%s unrecognized\n", arg1);
- continue;
- }
- cur = reinterpret_cast<int32_t*>(value);
- next_arg++;
- }
-
- int32_t words;
- if (argc == next_arg) {
- words = 10;
- } else {
- if (!GetValue(argv[next_arg], &words)) {
- words = 10;
- }
- }
- end = cur + words;
-
- while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d",
- reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- int value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (current_heap->Contains(obj) || ((value & 1) == 0)) {
- PrintF(" (");
- if ((value & 1) == 0) {
- PrintF("smi %d", value / 2);
- } else {
- obj->ShortPrint();
- }
- PrintF(")");
- }
- PrintF("\n");
- cur++;
- }
- } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
-
- byte* prev = NULL;
- byte* cur = NULL;
- byte* end = NULL;
-
- if (argc == 1) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
- } else if (argc == 2) {
- int regnum = Registers::Number(arg1);
- if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
- // The argument is an address or a register name.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(value);
- // Disassemble 10 instructions at <arg1>.
- end = cur + (10 * Instruction::kInstrSize);
- }
- } else {
- // The argument is the number of instructions.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- // Disassemble <arg1> instructions.
- end = cur + (value * Instruction::kInstrSize);
- }
- }
- } else {
- int32_t value1;
- int32_t value2;
- if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
- }
- }
-
- while (cur < end) {
- prev = cur;
- cur += dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(prev), buffer.start());
- }
- } else if (strcmp(cmd, "gdb") == 0) {
- PrintF("relinquishing control to gdb\n");
- v8::internal::OS::DebugBreak();
- PrintF("regaining control from gdb\n");
- } else if (strcmp(cmd, "break") == 0) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
- PrintF("setting breakpoint failed\n");
- }
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("break <address>\n");
- }
- } else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
- PrintF("deleting breakpoint failed\n");
- }
- } else if (strcmp(cmd, "flags") == 0) {
- PrintF("N flag: %d; ", sim_->n_flag_);
- PrintF("Z flag: %d; ", sim_->z_flag_);
- PrintF("C flag: %d; ", sim_->c_flag_);
- PrintF("V flag: %d\n", sim_->v_flag_);
- PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
- PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
- PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
- PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
- PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
- } else if (strcmp(cmd, "stop") == 0) {
- int32_t value;
- intptr_t stop_pc = sim_->get_pc() - 2 * Instruction::kInstrSize;
- Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
- Instruction* msg_address =
- reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
- if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
- // Remove the current stop.
- if (sim_->isStopInstruction(stop_instr)) {
- stop_instr->SetInstructionBits(kNopInstr);
- msg_address->SetInstructionBits(kNopInstr);
- } else {
- PrintF("Not at debugger stop.\n");
- }
- } else if (argc == 3) {
- // Print information about all/the specified breakpoint(s).
- if (strcmp(arg1, "info") == 0) {
- if (strcmp(arg2, "all") == 0) {
- PrintF("Stop information:\n");
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->PrintStopInfo(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->PrintStopInfo(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "enable") == 0) {
- // Enable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->EnableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->EnableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "disable") == 0) {
- // Disable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->DisableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->DisableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- }
- } else {
- PrintF("Wrong usage. Use help command for more information.\n");
- }
- } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
- ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
- PrintF("Trace of executed instructions is %s\n",
- ::v8::internal::FLAG_trace_sim ? "on" : "off");
- } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
- PrintF("cont\n");
- PrintF(" continue execution (alias 'c')\n");
- PrintF("stepi\n");
- PrintF(" step one instruction (alias 'si')\n");
- PrintF("print <register>\n");
- PrintF(" print register content (alias 'p')\n");
- PrintF(" use register name 'all' to print all registers\n");
- PrintF(" add argument 'fp' to print register pair double values\n");
- PrintF("printobject <register>\n");
- PrintF(" print an object from a register (alias 'po')\n");
- PrintF("flags\n");
- PrintF(" print flags\n");
- PrintF("stack [<words>]\n");
- PrintF(" dump stack content, default dump 10 words)\n");
- PrintF("mem <address> [<words>]\n");
- PrintF(" dump memory content, default dump 10 words)\n");
- PrintF("disasm [<instructions>]\n");
- PrintF("disasm [<address/register>]\n");
- PrintF("disasm [[<address/register>] <instructions>]\n");
- PrintF(" disassemble code, default is 10 instructions\n");
- PrintF(" from pc (alias 'di')\n");
- PrintF("gdb\n");
- PrintF(" enter gdb\n");
- PrintF("break <address>\n");
- PrintF(" set a break point on the address\n");
- PrintF("del\n");
- PrintF(" delete the breakpoint\n");
- PrintF("trace (alias 't')\n");
- PrintF(" toogle the tracing of all executed statements\n");
- PrintF("stop feature:\n");
- PrintF(" Description:\n");
- PrintF(" Stops are debug instructions inserted by\n");
- PrintF(" the Assembler::stop() function.\n");
- PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the ArmDebugger.\n");
- PrintF(" The first %d stop codes are watched:\n",
- Simulator::kNumOfWatchedStops);
- PrintF(" - They can be enabled / disabled: the Simulator\n");
- PrintF(" will / won't stop when hitting them.\n");
- PrintF(" - The Simulator keeps track of how many times they \n");
- PrintF(" are met. (See the info command.) Going over a\n");
- PrintF(" disabled stop still increases its counter. \n");
- PrintF(" Commands:\n");
- PrintF(" stop info all/<code> : print infos about number <code>\n");
- PrintF(" or all stop(s).\n");
- PrintF(" stop enable/disable all/<code> : enables / disables\n");
- PrintF(" all or number <code> stop(s)\n");
- PrintF(" stop unstop\n");
- PrintF(" ignore the stop instruction at the current location\n");
- PrintF(" from now on\n");
- } else {
- PrintF("Unknown command: %s\n", cmd);
- }
- }
- }
-
- // Add all the breakpoints back to stop execution and enter the debugger
- // shell when hit.
- RedoBreakpoints();
-
-#undef COMMAND_SIZE
-#undef ARG_SIZE
-
-#undef STR
-#undef XSTR
-}
-
-
-static bool ICacheMatch(void* one, void* two) {
- ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
- return one == two;
-}
-
-
-static uint32_t ICacheHash(void* key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
-}
-
-
-static bool AllOnOnePage(uintptr_t start, int size) {
- intptr_t start_page = (start & ~CachePage::kPageMask);
- intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
- return start_page == end_page;
-}
-
-
-void Simulator::set_last_debugger_input(char* input) {
- DeleteArray(last_debugger_input_);
- last_debugger_input_ = input;
-}
-
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
- void* start_addr,
- size_t size) {
- intptr_t start = reinterpret_cast<intptr_t>(start_addr);
- int intra_line = (start & CachePage::kLineMask);
- start -= intra_line;
- size += intra_line;
- size = ((size - 1) | CachePage::kLineMask) + 1;
- int offset = (start & CachePage::kPageMask);
- while (!AllOnOnePage(start, size - 1)) {
- int bytes_to_flush = CachePage::kPageSize - offset;
- FlushOnePage(i_cache, start, bytes_to_flush);
- start += bytes_to_flush;
- size -= bytes_to_flush;
- ASSERT_EQ(0, start & CachePage::kPageMask);
- offset = 0;
- }
- if (size != 0) {
- FlushOnePage(i_cache, start, size);
- }
-}
-
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
- ICacheHash(page),
- true);
- if (entry->value == NULL) {
- CachePage* new_page = new CachePage();
- entry->value = new_page;
- }
- return reinterpret_cast<CachePage*>(entry->value);
-}
-
-
-// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
- intptr_t start,
- int size) {
- ASSERT(size <= CachePage::kPageSize);
- ASSERT(AllOnOnePage(start, size - 1));
- ASSERT((start & CachePage::kLineMask) == 0);
- ASSERT((size & CachePage::kLineMask) == 0);
- void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
- int offset = (start & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* valid_bytemap = cache_page->ValidityByte(offset);
- memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
-}
-
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
- intptr_t address = reinterpret_cast<intptr_t>(instr);
- void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
- void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
- int offset = (address & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* cache_valid_byte = cache_page->ValidityByte(offset);
- bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
- char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
- if (cache_hit) {
- // Check that the data in memory matches the contents of the I-cache.
- CHECK(memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize) == 0);
- } else {
- // Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
- *cache_valid_byte = CachePage::LINE_VALID;
- }
-}
-
-
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
-Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
- Initialize(isolate);
- // Set up simulator support first. Some of this information is needed to
- // setup the architecture state.
- size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
- stack_ = reinterpret_cast<char*>(malloc(stack_size));
- pc_modified_ = false;
- icount_ = 0;
- break_pc_ = NULL;
- break_instr_ = 0;
-
- // Set up architecture state.
- // All registers are initialized to zero to start with.
- for (int i = 0; i < num_registers; i++) {
- registers_[i] = 0;
- }
- n_flag_ = false;
- z_flag_ = false;
- c_flag_ = false;
- v_flag_ = false;
-
- // Initializing VFP registers.
- // All registers are initialized to zero to start with
- // even though s_registers_ & d_registers_ share the same
- // physical registers in the target.
- for (int i = 0; i < num_d_registers * 2; i++) {
- vfp_registers_[i] = 0;
- }
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = false;
- v_flag_FPSCR_ = false;
- FPSCR_rounding_mode_ = RZ;
-
- inv_op_vfp_flag_ = false;
- div_zero_vfp_flag_ = false;
- overflow_vfp_flag_ = false;
- underflow_vfp_flag_ = false;
- inexact_vfp_flag_ = false;
-
- // The sp is initialized to point to the bottom (high address) of the
- // allocated stack area. To be safe in potential stack underflows we leave
- // some buffer below.
- registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
- // The lr and pc are initialized to a known bad value that will cause an
- // access violation if the simulator ever tries to execute it.
- registers_[pc] = bad_lr;
- registers_[lr] = bad_lr;
- InitializeCoverage();
-
- last_debugger_input_ = NULL;
-}
-
-
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(void* external_function, ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
- type_(type),
- next_(NULL) {
- Isolate* isolate = Isolate::Current();
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(void* external_function,
- ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
- Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
- }
- return new Redirection(external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-void* Simulator::RedirectExternalReference(void* external_function,
- ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
-
-// Get the active Simulator for the current thread.
-Simulator* Simulator::current(Isolate* isolate) {
- v8::internal::Isolate::PerIsolateThreadData* isolate_data =
- isolate->FindOrAllocatePerThreadDataForThisThread();
- ASSERT(isolate_data != NULL);
-
- Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread/isolate goes away.
- sim = new Simulator(isolate);
- isolate_data->set_simulator(sim);
- }
- return sim;
-}
-
-
-// Sets the register in the architecture state. It will also deal with updating
-// Simulator internal state for special registers such as PC.
-void Simulator::set_register(int reg, int32_t value) {
- ASSERT((reg >= 0) && (reg < num_registers));
- if (reg == pc) {
- pc_modified_ = true;
- }
- registers_[reg] = value;
-}
-
-
-// Get the register from the architecture state. This function does handle
-// the special case of accessing the PC register.
-int32_t Simulator::get_register(int reg) const {
- ASSERT((reg >= 0) && (reg < num_registers));
- // Stupid code added to avoid bug in GCC.
- // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
- if (reg >= num_registers) return 0;
- // End stupid code.
- return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
-}
-
-
-double Simulator::get_double_from_register_pair(int reg) {
- ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
-
- double dm_val = 0.0;
- // Read the bits from the unsigned integer register_[] array
- // into the double precision floating point value and return it.
- char buffer[2 * sizeof(vfp_registers_[0])];
- memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
- return(dm_val);
-}
-
-
-void Simulator::set_dw_register(int dreg, const int* dbl) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
- registers_[dreg] = dbl[0];
- registers_[dreg + 1] = dbl[1];
-}
-
-
-// Raw access to the PC register.
-void Simulator::set_pc(int32_t value) {
- pc_modified_ = true;
- registers_[pc] = value;
-}
-
-
-bool Simulator::has_bad_pc() const {
- return ((registers_[pc] == bad_lr) || (registers_[pc] == end_sim_pc));
-}
-
-
-// Raw access to the PC register without the special adjustment when reading.
-int32_t Simulator::get_pc() const {
- return registers_[pc];
-}
-
-
-// Getting from and setting into VFP registers.
-void Simulator::set_s_register(int sreg, unsigned int value) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- vfp_registers_[sreg] = value;
-}
-
-
-unsigned int Simulator::get_s_register(int sreg) const {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- return vfp_registers_[sreg];
-}
-
-
-template<class InputType, int register_size>
-void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
- ASSERT(reg_index >= 0);
- if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
-
- char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
- memcpy(&vfp_registers_[reg_index * register_size], buffer,
- register_size * sizeof(vfp_registers_[0]));
-}
-
-
-template<class ReturnType, int register_size>
-ReturnType Simulator::GetFromVFPRegister(int reg_index) {
- ASSERT(reg_index >= 0);
- if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
-
- ReturnType value = 0;
- char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &vfp_registers_[register_size * reg_index],
- register_size * sizeof(vfp_registers_[0]));
- memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
- return value;
-}
-
-
-// For use in calls that take two double values, constructed either
-// from r0-r3 or d0 and d1.
-void Simulator::GetFpArgs(double* x, double* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = vfp_registers_[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- // Registers 2 and 3 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
- }
-}
-
-// For use in calls that take one double value, constructed either
-// from r0 and r1 or d0.
-void Simulator::GetFpArgs(double* x) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from r0 and r1 or d0 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = registers_[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- // Register 2 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
- }
-}
-
-
-// The return value is either in r0/r1 or d0.
-void Simulator::SetFpResult(const double& result) {
- if (use_eabi_hardfloat()) {
- char buffer[2 * sizeof(vfp_registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
- // Copy result to d0.
- memcpy(vfp_registers_, buffer, sizeof(buffer));
- } else {
- char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
- // Copy result to r0 and r1.
- memcpy(registers_, buffer, sizeof(buffer));
- }
-}
-
-
-void Simulator::TrashCallerSaveRegisters() {
- // We don't trash the registers with the return value.
- registers_[2] = 0x50Bad4U;
- registers_[3] = 0x50Bad4U;
- registers_[12] = 0x50Bad4U;
-}
-
-// Some Operating Systems allow unaligned access on ARMv7 targets. We
-// assume that unaligned accesses are not allowed unless the v8 build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-// The following statements below describes the behavior of the ARM CPUs
-// that don't support unaligned access.
-// Some ARM platforms raise an interrupt on detecting unaligned access.
-// On others it does a funky rotation thing. For now we
-// simply disallow unaligned reads. Note that simulator runs have the runtime
-// system running directly on the host system and only generated code is
-// executed in the simulator. Since the host is typically IA32 we will not
-// get the correct ARM-like behaviour on unaligned accesses for those ARM
-// targets that don't support unaligned loads and stores.
-
-
-int Simulator::ReadW(int32_t addr, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
- } else {
- PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- } else {
- PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- }
-}
-
-
-uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
- } else {
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08"
- V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
- } else {
- PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- } else {
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08"
- V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- }
-}
-
-
-void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- } else {
- PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- }
-}
-
-
-uint8_t Simulator::ReadBU(int32_t addr) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
-}
-
-
-int8_t Simulator::ReadB(int32_t addr) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return *ptr;
-}
-
-
-void Simulator::WriteB(int32_t addr, uint8_t value) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
-}
-
-
-void Simulator::WriteB(int32_t addr, int8_t value) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- *ptr = value;
-}
-
-
-int32_t* Simulator::ReadDW(int32_t addr) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
- } else {
- PrintF("Unaligned read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
- } else {
- PrintF("Unaligned write at 0x%08x\n", addr);
- UNIMPLEMENTED();
- }
-}
-
-
-// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 1024;
-}
-
-
-// Unsupported instructions use Format to print an error and stop execution.
-void Simulator::Format(Instruction* instr, const char* format) {
- PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- reinterpret_cast<intptr_t>(instr), format);
- UNIMPLEMENTED();
-}
-
-
-// Checks if the current instruction should be executed based on its
-// condition bits.
-bool Simulator::ConditionallyExecute(Instruction* instr) {
- switch (instr->ConditionField()) {
- case eq: return z_flag_;
- case ne: return !z_flag_;
- case cs: return c_flag_;
- case cc: return !c_flag_;
- case mi: return n_flag_;
- case pl: return !n_flag_;
- case vs: return v_flag_;
- case vc: return !v_flag_;
- case hi: return c_flag_ && !z_flag_;
- case ls: return !c_flag_ || z_flag_;
- case ge: return n_flag_ == v_flag_;
- case lt: return n_flag_ != v_flag_;
- case gt: return !z_flag_ && (n_flag_ == v_flag_);
- case le: return z_flag_ || (n_flag_ != v_flag_);
- case al: return true;
- default: UNREACHABLE();
- }
- return false;
-}
-
-
-// Calculate and set the Negative and Zero flags.
-void Simulator::SetNZFlags(int32_t val) {
- n_flag_ = (val < 0);
- z_flag_ = (val == 0);
-}
-
-
-// Set the Carry flag.
-void Simulator::SetCFlag(bool val) {
- c_flag_ = val;
-}
-
-
-// Set the oVerflow flag.
-void Simulator::SetVFlag(bool val) {
- v_flag_ = val;
-}
-
-
-// Calculate C flag value for additions.
-bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
- uint32_t uleft = static_cast<uint32_t>(left);
- uint32_t uright = static_cast<uint32_t>(right);
- uint32_t urest = 0xffffffffU - uleft;
-
- return (uright > urest) ||
- (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
-}
-
-
-// Calculate C flag value for subtractions.
-bool Simulator::BorrowFrom(int32_t left, int32_t right) {
- uint32_t uleft = static_cast<uint32_t>(left);
- uint32_t uright = static_cast<uint32_t>(right);
-
- return (uright > uleft);
-}
-
-
-// Calculate V flag value for additions and subtractions.
-bool Simulator::OverflowFrom(int32_t alu_out,
- int32_t left, int32_t right, bool addition) {
- bool overflow;
- if (addition) {
- // operands have the same sign
- overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
- // and operands and result have different sign
- && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
- } else {
- // operands have different signs
- overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
- // and first operand and result have different signs
- && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
- }
- return overflow;
-}
-
-
-// Support for VFP comparisons.
-void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
- if (isnan(val1) || isnan(val2)) {
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = true;
- v_flag_FPSCR_ = true;
- // All non-NaN cases.
- } else if (val1 == val2) {
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = true;
- c_flag_FPSCR_ = true;
- v_flag_FPSCR_ = false;
- } else if (val1 < val2) {
- n_flag_FPSCR_ = true;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = false;
- v_flag_FPSCR_ = false;
- } else {
- // Case when (val1 > val2).
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = true;
- v_flag_FPSCR_ = false;
- }
-}
-
-
-void Simulator::Copy_FPSCR_to_APSR() {
- n_flag_ = n_flag_FPSCR_;
- z_flag_ = z_flag_FPSCR_;
- c_flag_ = c_flag_FPSCR_;
- v_flag_ = v_flag_FPSCR_;
-}
-
-
-// Addressing Mode 1 - Data-processing operands:
-// Get the value based on the shifter_operand with register.
-int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
- ShiftOp shift = instr->ShiftField();
- int shift_amount = instr->ShiftAmountValue();
- int32_t result = get_register(instr->RmValue());
- if (instr->Bit(4) == 0) {
- // by immediate
- if ((shift == ROR) && (shift_amount == 0)) {
- UNIMPLEMENTED();
- return result;
- } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
- shift_amount = 32;
- }
- switch (shift) {
- case ASR: {
- if (shift_amount == 0) {
- if (result < 0) {
- result = 0xffffffff;
- *carry_out = true;
- } else {
- result = 0;
- *carry_out = false;
- }
- } else {
- result >>= (shift_amount - 1);
- *carry_out = (result & 1) == 1;
- result >>= 1;
- }
- break;
- }
-
- case LSL: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else {
- result <<= (shift_amount - 1);
- *carry_out = (result < 0);
- result <<= 1;
- }
- break;
- }
-
- case LSR: {
- if (shift_amount == 0) {
- result = 0;
- *carry_out = c_flag_;
- } else {
- uint32_t uresult = static_cast<uint32_t>(result);
- uresult >>= (shift_amount - 1);
- *carry_out = (uresult & 1) == 1;
- uresult >>= 1;
- result = static_cast<int32_t>(uresult);
- }
- break;
- }
-
- case ROR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
- *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
- }
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- } else {
- // by register
- int rs = instr->RsValue();
- shift_amount = get_register(rs) &0xff;
- switch (shift) {
- case ASR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else if (shift_amount < 32) {
- result >>= (shift_amount - 1);
- *carry_out = (result & 1) == 1;
- result >>= 1;
- } else {
- ASSERT(shift_amount >= 32);
- if (result < 0) {
- *carry_out = true;
- result = 0xffffffff;
- } else {
- *carry_out = false;
- result = 0;
- }
- }
- break;
- }
-
- case LSL: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else if (shift_amount < 32) {
- result <<= (shift_amount - 1);
- *carry_out = (result < 0);
- result <<= 1;
- } else if (shift_amount == 32) {
- *carry_out = (result & 1) == 1;
- result = 0;
- } else {
- ASSERT(shift_amount > 32);
- *carry_out = false;
- result = 0;
- }
- break;
- }
-
- case LSR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else if (shift_amount < 32) {
- uint32_t uresult = static_cast<uint32_t>(result);
- uresult >>= (shift_amount - 1);
- *carry_out = (uresult & 1) == 1;
- uresult >>= 1;
- result = static_cast<int32_t>(uresult);
- } else if (shift_amount == 32) {
- *carry_out = (result < 0);
- result = 0;
- } else {
- *carry_out = false;
- result = 0;
- }
- break;
- }
-
- case ROR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
- *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
- }
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- }
- return result;
-}
-
-
-// Addressing Mode 1 - Data-processing operands:
-// Get the value based on the shifter_operand with immediate.
-int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
- int rotate = instr->RotateValue() * 2;
- int immed8 = instr->Immed8Value();
- int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- *carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
- return imm;
-}
-
-
-static int count_bits(int bit_vector) {
- int count = 0;
- while (bit_vector != 0) {
- if ((bit_vector & 1) != 0) {
- count++;
- }
- bit_vector >>= 1;
- }
- return count;
-}
-
-
-void Simulator::ProcessPUW(Instruction* instr,
- int num_regs,
- int reg_size,
- intptr_t* start_address,
- intptr_t* end_address) {
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- switch (instr->PUField()) {
- case da_x: {
- UNIMPLEMENTED();
- break;
- }
- case ia_x: {
- *start_address = rn_val;
- *end_address = rn_val + (num_regs * reg_size) - reg_size;
- rn_val = rn_val + (num_regs * reg_size);
- break;
- }
- case db_x: {
- *start_address = rn_val - (num_regs * reg_size);
- *end_address = rn_val - reg_size;
- rn_val = *start_address;
- break;
- }
- case ib_x: {
- *start_address = rn_val + reg_size;
- *end_address = rn_val + (num_regs * reg_size);
- rn_val = *end_address;
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
-}
-
-// Addressing Mode 4 - Load and Store Multiple
-void Simulator::HandleRList(Instruction* instr, bool load) {
- int rlist = instr->RlistValue();
- int num_regs = count_bits(rlist);
-
- intptr_t start_address = 0;
- intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
-
- intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
- // Catch null pointers a little earlier.
- ASSERT(start_address > 8191 || start_address < 0);
- int reg = 0;
- while (rlist != 0) {
- if ((rlist & 1) != 0) {
- if (load) {
- set_register(reg, *address);
- } else {
- *address = get_register(reg);
- }
- address += 1;
- }
- reg++;
- rlist >>= 1;
- }
- ASSERT(end_address == ((intptr_t)address) - 4);
-}
-
-
-// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
-void Simulator::HandleVList(Instruction* instr) {
- VFPRegPrecision precision =
- (instr->SzValue() == 0) ? kSinglePrecision : kDoublePrecision;
- int operand_size = (precision == kSinglePrecision) ? 4 : 8;
-
- bool load = (instr->VLValue() == 0x1);
-
- int vd;
- int num_regs;
- vd = instr->VFPDRegValue(precision);
- if (precision == kSinglePrecision) {
- num_regs = instr->Immed8Value();
- } else {
- num_regs = instr->Immed8Value() / 2;
- }
-
- intptr_t start_address = 0;
- intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
-
- intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
- for (int reg = vd; reg < vd + num_regs; reg++) {
- if (precision == kSinglePrecision) {
- if (load) {
- set_s_register_from_sinteger(
- reg, ReadW(reinterpret_cast<int32_t>(address), instr));
- } else {
- WriteW(reinterpret_cast<int32_t>(address),
- get_sinteger_from_s_register(reg), instr);
- }
- address += 1;
- } else {
- if (load) {
- int32_t data[] = {
- ReadW(reinterpret_cast<int32_t>(address), instr),
- ReadW(reinterpret_cast<int32_t>(address + 1), instr)
- };
- double d;
- memcpy(&d, data, 8);
- set_d_register_from_double(reg, d);
- } else {
- int32_t data[2];
- double d = get_double_from_d_register(reg);
- memcpy(data, &d, 8);
- WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
- WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
- }
- address += 2;
- }
- }
- ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
-}
-
-
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in runtime.cc
-// uses the ObjectPair which is essentially two 32-bit values stuffed into a
-// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the r1 result register contains a bogus
-// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3,
- int32_t arg4,
- int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
-
-// This signature supports direct call in to API function native callback
-// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-
-// This signature supports direct call to accessor getter callback.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
-
-// Software interrupt instructions are used by the simulator to call into the
-// C-based V8 runtime.
-void Simulator::SoftwareInterrupt(Instruction* instr) {
- int svc = instr->SvcValue();
- switch (svc) {
- case kCallRtRedirected: {
- // Check if stack is aligned. Error if not aligned is reported below to
- // include information on the function called.
- bool stack_aligned =
- (get_register(sp)
- & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
- int32_t arg0 = get_register(r0);
- int32_t arg1 = get_register(r1);
- int32_t arg2 = get_register(r2);
- int32_t arg3 = get_register(r3);
- int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
- int32_t arg4 = stack_pointer[0];
- int32_t arg5 = stack_pointer[1];
- bool fp_call =
- (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
- if (use_eabi_hardfloat()) {
- // With the hard floating point calling convention, double
- // arguments are passed in VFP registers. Fetch the arguments
- // from there and call the builtin using soft floating point
- // convention.
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = vfp_registers_[2];
- arg3 = vfp_registers_[3];
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = get_register(0);
- break;
- default:
- break;
- }
- }
- // This is dodgy but it works because the C entry stubs are never moved.
- // See comment in codegen-arm.cc and bug 1242173.
- int32_t saved_lr = get_register(lr);
- intptr_t external =
- reinterpret_cast<intptr_t>(redirection->external_function());
- if (fp_call) {
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double dval0, dval1;
- int32_t ival;
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
- PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
- PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
- PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
- break;
- default:
- UNREACHABLE();
- break;
- }
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(r0, lo_res);
- set_register(r1, hi_res);
- }
- } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p args %08x",
- FUNCTION_ADDR(target), arg0);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p args %08x %08x",
- FUNCTION_ADDR(target), arg0, arg1);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- // builtin call.
- ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF(
- "Call to host function at %p"
- "args %08x, %08x, %08x, %08x, %08x, %08x",
- FUNCTION_ADDR(target),
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(r0, lo_res);
- set_register(r1, hi_res);
- }
- set_register(lr, saved_lr);
- set_pc(get_register(lr));
- break;
- }
- case kBreakpoint: {
- ArmDebugger dbg(this);
- dbg.Debug();
- break;
- }
- // stop uses all codes greater than 1 << 23.
- default: {
- if (svc >= (1 << 23)) {
- uint32_t code = svc & kStopCodeMask;
- if (isWatchedStop(code)) {
- IncreaseStopCounter(code);
- }
- // Stop if it is enabled, otherwise go on jumping over the stop
- // and the message address.
- if (isEnabledStop(code)) {
- ArmDebugger dbg(this);
- dbg.Stop(instr);
- } else {
- set_pc(get_pc() + 2 * Instruction::kInstrSize);
- }
- } else {
- // This is not a valid svc code.
- UNREACHABLE();
- break;
- }
- }
- }
-}
-
-
-// Stop helper functions.
-bool Simulator::isStopInstruction(Instruction* instr) {
- return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
-}
-
-
-bool Simulator::isWatchedStop(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- return code < kNumOfWatchedStops;
-}
-
-
-bool Simulator::isEnabledStop(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- // Unwatched stops are always enabled.
- return !isWatchedStop(code) ||
- !(watched_stops[code].count & kStopDisabledBit);
-}
-
-
-void Simulator::EnableStop(uint32_t code) {
- ASSERT(isWatchedStop(code));
- if (!isEnabledStop(code)) {
- watched_stops[code].count &= ~kStopDisabledBit;
- }
-}
-
-
-void Simulator::DisableStop(uint32_t code) {
- ASSERT(isWatchedStop(code));
- if (isEnabledStop(code)) {
- watched_stops[code].count |= kStopDisabledBit;
- }
-}
-
-
-void Simulator::IncreaseStopCounter(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- ASSERT(isWatchedStop(code));
- if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
- PrintF("Stop counter for code %i has overflowed.\n"
- "Enabling this code and reseting the counter to 0.\n", code);
- watched_stops[code].count = 0;
- EnableStop(code);
- } else {
- watched_stops[code].count++;
- }
-}
-
-
-// Print a stop status.
-void Simulator::PrintStopInfo(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- if (!isWatchedStop(code)) {
- PrintF("Stop not watched.");
- } else {
- const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
- int32_t count = watched_stops[code].count & ~kStopDisabledBit;
- // Don't print the state of unused breakpoints.
- if (count != 0) {
- if (watched_stops[code].desc) {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
- code, code, state, count, watched_stops[code].desc);
- } else {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
- code, code, state, count);
- }
- }
- }
-}
-
-
-// Handle execution based on instruction types.
-
-// Instruction types 0 and 1 are both rolled into one function because they
-// only differ in the handling of the shifter_operand.
-void Simulator::DecodeType01(Instruction* instr) {
- int type = instr->TypeValue();
- if ((type == 0) && instr->IsSpecialType0()) {
- // multiply instruction or extra loads and stores
- if (instr->Bits(7, 4) == 9) {
- if (instr->Bit(24) == 0) {
- // Raw field decoding here. Multiply instructions have their Rd in
- // funny places.
- int rn = instr->RnValue();
- int rm = instr->RmValue();
- int rs = instr->RsValue();
- int32_t rs_val = get_register(rs);
- int32_t rm_val = get_register(rm);
- if (instr->Bit(23) == 0) {
- if (instr->Bit(21) == 0) {
- // The MUL instruction description (A 4.1.33) refers to Rd as being
- // the destination for the operation, but it confusingly uses the
- // Rn field to encode it.
- // Format(instr, "mul'cond's 'rn, 'rm, 'rs");
- int rd = rn; // Remap the rn field to the Rd register.
- int32_t alu_out = rm_val * rs_val;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- }
- } else {
- int rd = instr->RdValue();
- int32_t acc_value = get_register(rd);
- if (instr->Bit(22) == 0) {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- // Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
- int32_t mul_out = rm_val * rs_val;
- int32_t result = acc_value + mul_out;
- set_register(rn, result);
- } else {
- // Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
- int32_t mul_out = rm_val * rs_val;
- int32_t result = acc_value - mul_out;
- set_register(rn, result);
- }
- }
- } else {
- // The signed/long multiply instructions use the terms RdHi and RdLo
- // when referring to the target registers. They are mapped to the Rn
- // and Rd fields as follows:
- // RdLo == Rd
- // RdHi == Rn (This is confusingly stored in variable rd here
- // because the mul instruction from above uses the
- // Rn field to encode the Rd register. Good luck figuring
- // this out without reading the ARM instruction manual
- // at a very detailed level.)
- // Format(instr, "'um'al'cond's 'rd, 'rn, 'rs, 'rm");
- int rd_hi = rn; // Remap the rn field to the RdHi register.
- int rd_lo = instr->RdValue();
- int32_t hi_res = 0;
- int32_t lo_res = 0;
- if (instr->Bit(22) == 1) {
- int64_t left_op = static_cast<int32_t>(rm_val);
- int64_t right_op = static_cast<int32_t>(rs_val);
- uint64_t result = left_op * right_op;
- hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
- } else {
- // unsigned multiply
- uint64_t left_op = static_cast<uint32_t>(rm_val);
- uint64_t right_op = static_cast<uint32_t>(rs_val);
- uint64_t result = left_op * right_op;
- hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
- }
- set_register(rd_lo, lo_res);
- set_register(rd_hi, hi_res);
- if (instr->HasS()) {
- UNIMPLEMENTED();
- }
- }
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else {
- // extra load/store instructions
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int32_t addr = 0;
- if (instr->Bit(22) == 0) {
- int rm = instr->RmValue();
- int32_t rm_val = get_register(rm);
- switch (instr->PUField()) {
- case da_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val -= rm_val;
- set_register(rn, rn_val);
- break;
- }
- case ia_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val += rm_val;
- set_register(rn, rn_val);
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
- rn_val -= rm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- case ib_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
- rn_val += rm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- } else {
- int32_t imm_val = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
- switch (instr->PUField()) {
- case da_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val -= imm_val;
- set_register(rn, rn_val);
- break;
- }
- case ia_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val += imm_val;
- set_register(rn, rn_val);
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
- rn_val -= imm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- case ib_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
- rn_val += imm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- }
- if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
- ASSERT((rd % 2) == 0);
- if (instr->HasH()) {
- // The strd instruction.
- int32_t value1 = get_register(rd);
- int32_t value2 = get_register(rd+1);
- WriteDW(addr, value1, value2);
- } else {
- // The ldrd instruction.
- int* rn_data = ReadDW(addr);
- set_dw_register(rd, rn_data);
- }
- } else if (instr->HasH()) {
- if (instr->HasSign()) {
- if (instr->HasL()) {
- int16_t val = ReadH(addr, instr);
- set_register(rd, val);
- } else {
- int16_t val = get_register(rd);
- WriteH(addr, val, instr);
- }
- } else {
- if (instr->HasL()) {
- uint16_t val = ReadHU(addr, instr);
- set_register(rd, val);
- } else {
- uint16_t val = get_register(rd);
- WriteH(addr, val, instr);
- }
- }
- } else {
- // signed byte loads
- ASSERT(instr->HasSign());
- ASSERT(instr->HasL());
- int8_t val = ReadB(addr);
- set_register(rd, val);
- }
- return;
- }
- } else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
- int rm = instr->RmValue();
- switch (instr->BitField(7, 4)) {
- case BX:
- set_pc(get_register(rm));
- break;
- case BLX: {
- uint32_t old_pc = get_pc();
- set_pc(get_register(rm));
- set_register(lr, old_pc + Instruction::kInstrSize);
- break;
- }
- case BKPT: {
- ArmDebugger dbg(this);
- PrintF("Simulator hit BKPT.\n");
- dbg.Debug();
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- } else if (instr->Bits(22, 21) == 3) {
- int rm = instr->RmValue();
- int rd = instr->RdValue();
- switch (instr->BitField(7, 4)) {
- case CLZ: {
- uint32_t bits = get_register(rm);
- int leading_zeros = 0;
- if (bits == 0) {
- leading_zeros = 32;
- } else {
- while ((bits & 0x80000000u) == 0) {
- bits <<= 1;
- leading_zeros++;
- }
- }
- set_register(rd, leading_zeros);
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- } else {
- PrintF("%08x\n", instr->InstructionBits());
- UNIMPLEMENTED();
- }
- } else if ((type == 1) && instr->IsNopType1()) {
- // NOP.
- } else {
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int32_t shifter_operand = 0;
- bool shifter_carry_out = 0;
- if (type == 0) {
- shifter_operand = GetShiftRm(instr, &shifter_carry_out);
- } else {
- ASSERT(instr->TypeValue() == 1);
- shifter_operand = GetImm(instr, &shifter_carry_out);
- }
- int32_t alu_out;
-
- switch (instr->OpcodeField()) {
- case AND: {
- // Format(instr, "and'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "and'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val & shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case EOR: {
- // Format(instr, "eor'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "eor'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val ^ shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case SUB: {
- // Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "sub'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(!BorrowFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
- }
- break;
- }
-
- case RSB: {
- // Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
- alu_out = shifter_operand - rn_val;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(!BorrowFrom(shifter_operand, rn_val));
- SetVFlag(OverflowFrom(alu_out, shifter_operand, rn_val, false));
- }
- break;
- }
-
- case ADD: {
- // Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "add'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(CarryFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
- }
- break;
- }
-
- case ADC: {
- // Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "adc'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val + shifter_operand + GetCarry();
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(CarryFrom(rn_val, shifter_operand, GetCarry()));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
- }
- break;
- }
-
- case SBC: {
- Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
- break;
- }
-
- case RSC: {
- Format(instr, "rsc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "rsc'cond's 'rd, 'rn, 'imm");
- break;
- }
-
- case TST: {
- if (instr->HasS()) {
- // Format(instr, "tst'cond 'rn, 'shift_rm");
- // Format(instr, "tst'cond 'rn, 'imm");
- alu_out = rn_val & shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- } else {
- // Format(instr, "movw'cond 'rd, 'imm").
- alu_out = instr->ImmedMovwMovtValue();
- set_register(rd, alu_out);
- }
- break;
- }
-
- case TEQ: {
- if (instr->HasS()) {
- // Format(instr, "teq'cond 'rn, 'shift_rm");
- // Format(instr, "teq'cond 'rn, 'imm");
- alu_out = rn_val ^ shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
-
- case CMP: {
- if (instr->HasS()) {
- // Format(instr, "cmp'cond 'rn, 'shift_rm");
- // Format(instr, "cmp'cond 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(!BorrowFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
- } else {
- // Format(instr, "movt'cond 'rd, 'imm").
- alu_out = (get_register(rd) & 0xffff) |
- (instr->ImmedMovwMovtValue() << 16);
- set_register(rd, alu_out);
- }
- break;
- }
-
- case CMN: {
- if (instr->HasS()) {
- // Format(instr, "cmn'cond 'rn, 'shift_rm");
- // Format(instr, "cmn'cond 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(CarryFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
-
- case ORR: {
- // Format(instr, "orr'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "orr'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val | shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case MOV: {
- // Format(instr, "mov'cond's 'rd, 'shift_rm");
- // Format(instr, "mov'cond's 'rd, 'imm");
- alu_out = shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case BIC: {
- // Format(instr, "bic'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "bic'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val & ~shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case MVN: {
- // Format(instr, "mvn'cond's 'rd, 'shift_rm");
- // Format(instr, "mvn'cond's 'rd, 'imm");
- alu_out = ~shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- }
-}
-
-
-void Simulator::DecodeType2(Instruction* instr) {
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int32_t im_val = instr->Offset12Value();
- int32_t addr = 0;
- switch (instr->PUField()) {
- case da_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val -= im_val;
- set_register(rn, rn_val);
- break;
- }
- case ia_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val += im_val;
- set_register(rn, rn_val);
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
- rn_val -= im_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- case ib_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
- rn_val += im_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- if (instr->HasB()) {
- if (instr->HasL()) {
- byte val = ReadBU(addr);
- set_register(rd, val);
- } else {
- byte val = get_register(rd);
- WriteB(addr, val);
- }
- } else {
- if (instr->HasL()) {
- set_register(rd, ReadW(addr, instr));
- } else {
- WriteW(addr, get_register(rd), instr);
- }
- }
-}
-
-
-void Simulator::DecodeType3(Instruction* instr) {
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- bool shifter_carry_out = 0;
- int32_t shifter_operand = GetShiftRm(instr, &shifter_carry_out);
- int32_t addr = 0;
- switch (instr->PUField()) {
- case da_x: {
- ASSERT(!instr->HasW());
- Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
- UNIMPLEMENTED();
- break;
- }
- case ia_x: {
- if (instr->HasW()) {
- ASSERT(instr->Bits(5, 4) == 0x1);
-
- if (instr->Bit(22) == 0x1) { // USAT.
- int32_t sat_pos = instr->Bits(20, 16);
- int32_t sat_val = (1 << sat_pos) - 1;
- int32_t shift = instr->Bits(11, 7);
- int32_t shift_type = instr->Bit(6);
- int32_t rm_val = get_register(instr->RmValue());
- if (shift_type == 0) { // LSL
- rm_val <<= shift;
- } else { // ASR
- rm_val >>= shift;
- }
- // If saturation occurs, the Q flag should be set in the CPSR.
- // There is no Q flag yet, and no instruction (MRS) to read the
- // CPSR directly.
- if (rm_val > sat_val) {
- rm_val = sat_val;
- } else if (rm_val < 0) {
- rm_val = 0;
- }
- set_register(rd, rm_val);
- } else { // SSAT.
- UNIMPLEMENTED();
- }
- return;
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
- UNIMPLEMENTED();
- }
- break;
- }
- case db_x: {
- if (FLAG_enable_sudiv) {
- if (!instr->HasW()) {
- if (instr->Bits(5, 4) == 0x1) {
- if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
- // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs
- // Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs);
- int rm = instr->RmValue();
- int32_t rm_val = get_register(rm);
- int rs = instr->RsValue();
- int32_t rs_val = get_register(rs);
- int32_t ret_val = 0;
- ASSERT(rs_val != 0);
- ret_val = rm_val/rs_val;
- set_register(rn, ret_val);
- return;
- }
- }
- }
- }
- // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
- addr = rn_val - shifter_operand;
- if (instr->HasW()) {
- set_register(rn, addr);
- }
- break;
- }
- case ib_x: {
- if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
- uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = widthminus1 + lsbit;
- if (msbit <= 31) {
- if (instr->Bit(22)) {
- // ubfx - unsigned bitfield extract.
- uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmValue()));
- uint32_t extr_val = rm_val << (31 - msbit);
- extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdValue(), extr_val);
- } else {
- // sbfx - signed bitfield extract.
- int32_t rm_val = get_register(instr->RmValue());
- int32_t extr_val = rm_val << (31 - msbit);
- extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdValue(), extr_val);
- }
- } else {
- UNREACHABLE();
- }
- return;
- } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
- if (msbit >= lsbit) {
- // bfc or bfi - bitfield clear/insert.
- uint32_t rd_val =
- static_cast<uint32_t>(get_register(instr->RdValue()));
- uint32_t bitcount = msbit - lsbit + 1;
- uint32_t mask = (1 << bitcount) - 1;
- rd_val &= ~(mask << lsbit);
- if (instr->RmValue() != 15) {
- // bfi - bitfield insert.
- uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmValue()));
- rm_val &= mask;
- rd_val |= rm_val << lsbit;
- }
- set_register(instr->RdValue(), rd_val);
- } else {
- UNREACHABLE();
- }
- return;
- } else {
- // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
- addr = rn_val + shifter_operand;
- if (instr->HasW()) {
- set_register(rn, addr);
- }
- }
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- if (instr->HasB()) {
- if (instr->HasL()) {
- uint8_t byte = ReadB(addr);
- set_register(rd, byte);
- } else {
- uint8_t byte = get_register(rd);
- WriteB(addr, byte);
- }
- } else {
- if (instr->HasL()) {
- set_register(rd, ReadW(addr, instr));
- } else {
- WriteW(addr, get_register(rd), instr);
- }
- }
-}
-
-
-void Simulator::DecodeType4(Instruction* instr) {
- ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
- if (instr->HasL()) {
- // Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
- HandleRList(instr, true);
- } else {
- // Format(instr, "stm'cond'pu 'rn'w, 'rlist");
- HandleRList(instr, false);
- }
-}
-
-
-void Simulator::DecodeType5(Instruction* instr) {
- // Format(instr, "b'l'cond 'target");
- int off = (instr->SImmed24Value() << 2);
- intptr_t pc_address = get_pc();
- if (instr->HasLink()) {
- set_register(lr, pc_address + Instruction::kInstrSize);
- }
- int pc_reg = get_register(pc);
- set_pc(pc_reg + off);
-}
-
-
-void Simulator::DecodeType6(Instruction* instr) {
- DecodeType6CoprocessorIns(instr);
-}
-
-
-void Simulator::DecodeType7(Instruction* instr) {
- if (instr->Bit(24) == 1) {
- SoftwareInterrupt(instr);
- } else {
- DecodeTypeVFP(instr);
- }
-}
-
-
-// void Simulator::DecodeTypeVFP(Instruction* instr)
-// The Following ARMv7 VFPv instructions are currently supported.
-// vmov :Sn = Rt
-// vmov :Rt = Sn
-// vcvt: Dd = Sm
-// vcvt: Sd = Dm
-// Dd = vabs(Dm)
-// Dd = vneg(Dm)
-// Dd = vadd(Dn, Dm)
-// Dd = vsub(Dn, Dm)
-// Dd = vmul(Dn, Dm)
-// Dd = vdiv(Dn, Dm)
-// vcmp(Dd, Dm)
-// vmrs
-// Dd = vsqrt(Dm)
-void Simulator::DecodeTypeVFP(Instruction* instr) {
- ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
- ASSERT(instr->Bits(11, 9) == 0x5);
-
- // Obtain double precision register codes.
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int vn = instr->VFPNRegValue(kDoublePrecision);
-
- if (instr->Bit(4) == 0) {
- if (instr->Opc1Value() == 0x7) {
- // Other data processing instructions
- if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
- // vmov register to register.
- if (instr->SzValue() == 0x1) {
- int m = instr->VFPMRegValue(kDoublePrecision);
- int d = instr->VFPDRegValue(kDoublePrecision);
- set_d_register_from_double(d, get_double_from_d_register(m));
- } else {
- int m = instr->VFPMRegValue(kSinglePrecision);
- int d = instr->VFPDRegValue(kSinglePrecision);
- set_s_register_from_float(d, get_float_from_s_register(m));
- }
- } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
- // vabs
- double dm_value = get_double_from_d_register(vm);
- double dd_value = fabs(dm_value);
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
- // vneg
- double dm_value = get_double_from_d_register(vm);
- double dd_value = -dm_value;
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
- DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() >> 1) == 0x6) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCMP(instr);
- } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
- // vsqrt
- double dm_value = get_double_from_d_register(vm);
- double dd_value = sqrt(dm_value);
- set_d_register_from_double(vd, dd_value);
- } else if (instr->Opc3Value() == 0x0) {
- // vmov immediate.
- if (instr->SzValue() == 0x1) {
- set_d_register_from_double(vd, instr->DoubleImmedVmov());
- } else {
- UNREACHABLE(); // Not used by v8.
- }
- } else {
- UNREACHABLE(); // Not used by V8.
- }
- } else if (instr->Opc1Value() == 0x3) {
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- if (instr->Opc3Value() & 0x1) {
- // vsub
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value - dm_value;
- set_d_register_from_double(vd, dd_value);
- } else {
- // vadd
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value + dm_value;
- set_d_register_from_double(vd, dd_value);
- }
- } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
- // vmul
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value * dm_value;
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc1Value() == 0x0)) {
- // vmla, vmls
- const bool is_vmls = (instr->Opc3Value() & 0x1);
-
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- const double dd_val = get_double_from_d_register(vd);
- const double dn_val = get_double_from_d_register(vn);
- const double dm_val = get_double_from_d_register(vm);
-
- // Note: we do the mul and add/sub in separate steps to avoid getting a
- // result with too high precision.
- set_d_register_from_double(vd, dn_val * dm_val);
- if (is_vmls) {
- set_d_register_from_double(vd, dd_val - get_double_from_d_register(vd));
- } else {
- set_d_register_from_double(vd, dd_val + get_double_from_d_register(vd));
- }
- } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
- // vdiv
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value / dm_value;
- div_zero_vfp_flag_ = (dm_value == 0);
- set_d_register_from_double(vd, dd_value);
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else {
- if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0)) {
- DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x1) &&
- (instr->Bit(23) == 0x0)) {
- // vmov (ARM core register to scalar)
- int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
- double dd_value = get_double_from_d_register(vd);
- int32_t data[2];
- memcpy(data, &dd_value, 8);
- data[instr->Bit(21)] = get_register(instr->RtValue());
- memcpy(&dd_value, data, 8);
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->VLValue() == 0x1) &&
- (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
- // vmrs
- uint32_t rt = instr->RtValue();
- if (rt == 0xF) {
- Copy_FPSCR_to_APSR();
- } else {
- // Emulate FPSCR from the Simulator flags.
- uint32_t fpscr = (n_flag_FPSCR_ << 31) |
- (z_flag_FPSCR_ << 30) |
- (c_flag_FPSCR_ << 29) |
- (v_flag_FPSCR_ << 28) |
- (inexact_vfp_flag_ << 4) |
- (underflow_vfp_flag_ << 3) |
- (overflow_vfp_flag_ << 2) |
- (div_zero_vfp_flag_ << 1) |
- (inv_op_vfp_flag_ << 0) |
- (FPSCR_rounding_mode_);
- set_register(rt, fpscr);
- }
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
- // vmsr
- uint32_t rt = instr->RtValue();
- if (rt == pc) {
- UNREACHABLE();
- } else {
- uint32_t rt_value = get_register(rt);
- n_flag_FPSCR_ = (rt_value >> 31) & 1;
- z_flag_FPSCR_ = (rt_value >> 30) & 1;
- c_flag_FPSCR_ = (rt_value >> 29) & 1;
- v_flag_FPSCR_ = (rt_value >> 28) & 1;
- inexact_vfp_flag_ = (rt_value >> 4) & 1;
- underflow_vfp_flag_ = (rt_value >> 3) & 1;
- overflow_vfp_flag_ = (rt_value >> 2) & 1;
- div_zero_vfp_flag_ = (rt_value >> 1) & 1;
- inv_op_vfp_flag_ = (rt_value >> 0) & 1;
- FPSCR_rounding_mode_ =
- static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
- }
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
- }
-}
-
-
-void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
- Instruction* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0));
-
- int t = instr->RtValue();
- int n = instr->VFPNRegValue(kSinglePrecision);
- bool to_arm_register = (instr->VLValue() == 0x1);
-
- if (to_arm_register) {
- int32_t int_value = get_sinteger_from_s_register(n);
- set_register(t, int_value);
- } else {
- int32_t rs_val = get_register(t);
- set_s_register_from_sinteger(n, rs_val);
- }
-}
-
-
-void Simulator::DecodeVCMP(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1));
- // Comparison.
-
- VFPRegPrecision precision = kSinglePrecision;
- if (instr->SzValue() == 1) {
- precision = kDoublePrecision;
- }
-
- int d = instr->VFPDRegValue(precision);
- int m = 0;
- if (instr->Opc2Value() == 0x4) {
- m = instr->VFPMRegValue(precision);
- }
-
- if (precision == kDoublePrecision) {
- double dd_value = get_double_from_d_register(d);
- double dm_value = 0.0;
- if (instr->Opc2Value() == 0x4) {
- dm_value = get_double_from_d_register(m);
- }
-
- // Raise exceptions for quiet NaNs if necessary.
- if (instr->Bit(7) == 1) {
- if (isnan(dd_value)) {
- inv_op_vfp_flag_ = true;
- }
- }
-
- Compute_FPSCR_Flags(dd_value, dm_value);
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
-}
-
-
-void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
-
- VFPRegPrecision dst_precision = kDoublePrecision;
- VFPRegPrecision src_precision = kSinglePrecision;
- if (instr->SzValue() == 1) {
- dst_precision = kSinglePrecision;
- src_precision = kDoublePrecision;
- }
-
- int dst = instr->VFPDRegValue(dst_precision);
- int src = instr->VFPMRegValue(src_precision);
-
- if (dst_precision == kSinglePrecision) {
- double val = get_double_from_d_register(src);
- set_s_register_from_float(dst, static_cast<float>(val));
- } else {
- float val = get_float_from_s_register(src);
- set_d_register_from_double(dst, static_cast<double>(val));
- }
-}
-
-bool get_inv_op_vfp_flag(VFPRoundingMode mode,
- double val,
- bool unsigned_) {
- ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
- double max_uint = static_cast<double>(0xffffffffu);
- double max_int = static_cast<double>(kMaxInt);
- double min_int = static_cast<double>(kMinInt);
-
- // Check for NaN.
- if (val != val) {
- return true;
- }
-
- // Check for overflow. This code works because 32bit integers can be
- // exactly represented by ieee-754 64bit floating-point values.
- switch (mode) {
- case RN:
- return unsigned_ ? (val >= (max_uint + 0.5)) ||
- (val < -0.5)
- : (val >= (max_int + 0.5)) ||
- (val < (min_int - 0.5));
-
- case RM:
- return unsigned_ ? (val >= (max_uint + 1.0)) ||
- (val < 0)
- : (val >= (max_int + 1.0)) ||
- (val < min_int);
-
- case RZ:
- return unsigned_ ? (val >= (max_uint + 1.0)) ||
- (val <= -1)
- : (val >= (max_int + 1.0)) ||
- (val <= (min_int - 1.0));
- default:
- UNREACHABLE();
- return true;
- }
-}
-
-
-// We call this function only if we had a vfp invalid exception.
-// It returns the correct saturated value.
-int VFPConversionSaturate(double val, bool unsigned_res) {
- if (val != val) {
- return 0;
- } else {
- if (unsigned_res) {
- return (val < 0) ? 0 : 0xffffffffu;
- } else {
- return (val < 0) ? kMinInt : kMaxInt;
- }
- }
-}
-
-
-void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
- (instr->Bits(27, 23) == 0x1D));
- ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
- (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
-
- // Conversion between floating-point and integer.
- bool to_integer = (instr->Bit(18) == 1);
-
- VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision
- : kSinglePrecision;
-
- if (to_integer) {
- // We are playing with code close to the C++ standard's limits below,
- // hence the very simple code and heavy checks.
- //
- // Note:
- // C++ defines default type casting from floating point to integer as
- // (close to) rounding toward zero ("fractional part discarded").
-
- int dst = instr->VFPDRegValue(kSinglePrecision);
- int src = instr->VFPMRegValue(src_precision);
-
- // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
- // mode or the default Round to Zero mode.
- VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
- : RZ;
- ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
-
- bool unsigned_integer = (instr->Bit(16) == 0);
- bool double_precision = (src_precision == kDoublePrecision);
-
- double val = double_precision ? get_double_from_d_register(src)
- : get_float_from_s_register(src);
-
- int temp = unsigned_integer ? static_cast<uint32_t>(val)
- : static_cast<int32_t>(val);
-
- inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
-
- double abs_diff =
- unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
- : fabs(val - temp);
-
- inexact_vfp_flag_ = (abs_diff != 0);
-
- if (inv_op_vfp_flag_) {
- temp = VFPConversionSaturate(val, unsigned_integer);
- } else {
- switch (mode) {
- case RN: {
- int val_sign = (val > 0) ? 1 : -1;
- if (abs_diff > 0.5) {
- temp += val_sign;
- } else if (abs_diff == 0.5) {
- // Round to even if exactly halfway.
- temp = ((temp % 2) == 0) ? temp : temp + val_sign;
- }
- break;
- }
-
- case RM:
- temp = temp > val ? temp - 1 : temp;
- break;
-
- case RZ:
- // Nothing to do.
- break;
-
- default:
- UNREACHABLE();
- }
- }
-
- // Update the destination register.
- set_s_register_from_sinteger(dst, temp);
-
- } else {
- bool unsigned_integer = (instr->Bit(7) == 0);
-
- int dst = instr->VFPDRegValue(src_precision);
- int src = instr->VFPMRegValue(kSinglePrecision);
-
- int val = get_sinteger_from_s_register(src);
-
- if (src_precision == kDoublePrecision) {
- if (unsigned_integer) {
- set_d_register_from_double(
- dst, static_cast<double>(static_cast<uint32_t>(val)));
- } else {
- set_d_register_from_double(dst, static_cast<double>(val));
- }
- } else {
- if (unsigned_integer) {
- set_s_register_from_float(
- dst, static_cast<float>(static_cast<uint32_t>(val)));
- } else {
- set_s_register_from_float(dst, static_cast<float>(val));
- }
- }
- }
-}
-
-
-// void Simulator::DecodeType6CoprocessorIns(Instruction* instr)
-// Decode Type 6 coprocessor instructions.
-// Dm = vmov(Rt, Rt2)
-// <Rt, Rt2> = vmov(Dm)
-// Ddst = MEM(Rbase + 4*offset).
-// MEM(Rbase + 4*offset) = Dsrc.
-void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
- ASSERT((instr->TypeValue() == 6));
-
- if (instr->CoprocessorValue() == 0xA) {
- switch (instr->OpcodeValue()) {
- case 0x8:
- case 0xA:
- case 0xC:
- case 0xE: { // Load and store single precision float to memory.
- int rn = instr->RnValue();
- int vd = instr->VFPDRegValue(kSinglePrecision);
- int offset = instr->Immed8Value();
- if (!instr->HasU()) {
- offset = -offset;
- }
-
- int32_t address = get_register(rn) + 4 * offset;
- if (instr->HasL()) {
- // Load double from memory: vldr.
- set_s_register_from_sinteger(vd, ReadW(address, instr));
- } else {
- // Store double to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(vd), instr);
- }
- break;
- }
- case 0x4:
- case 0x5:
- case 0x6:
- case 0x7:
- case 0x9:
- case 0xB:
- // Load/store multiple single from memory: vldm/vstm.
- HandleVList(instr);
- break;
- default:
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else if (instr->CoprocessorValue() == 0xB) {
- switch (instr->OpcodeValue()) {
- case 0x2:
- // Load and store double to two GP registers
- if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
- UNIMPLEMENTED(); // Not used by V8.
- } else {
- int rt = instr->RtValue();
- int rn = instr->RnValue();
- int vm = instr->VFPMRegValue(kDoublePrecision);
- if (instr->HasL()) {
- int32_t data[2];
- double d = get_double_from_d_register(vm);
- memcpy(data, &d, 8);
- set_register(rt, data[0]);
- set_register(rn, data[1]);
- } else {
- int32_t data[] = { get_register(rt), get_register(rn) };
- double d;
- memcpy(&d, data, 8);
- set_d_register_from_double(vm, d);
- }
- }
- break;
- case 0x8:
- case 0xA:
- case 0xC:
- case 0xE: { // Load and store double to memory.
- int rn = instr->RnValue();
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int offset = instr->Immed8Value();
- if (!instr->HasU()) {
- offset = -offset;
- }
- int32_t address = get_register(rn) + 4 * offset;
- if (instr->HasL()) {
- // Load double from memory: vldr.
- int32_t data[] = {
- ReadW(address, instr),
- ReadW(address + 4, instr)
- };
- double val;
- memcpy(&val, data, 8);
- set_d_register_from_double(vd, val);
- } else {
- // Store double to memory: vstr.
- int32_t data[2];
- double val = get_double_from_d_register(vd);
- memcpy(data, &val, 8);
- WriteW(address, data[0], instr);
- WriteW(address + 4, data[1], instr);
- }
- break;
- }
- case 0x4:
- case 0x5:
- case 0x6:
- case 0x7:
- case 0x9:
- case 0xB:
- // Load/store multiple double from memory: vldm/vstm.
- HandleVList(instr);
- break;
- default:
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
-}
-
-
-// Executes the current instruction.
-void Simulator::InstructionDecode(Instruction* instr) {
- if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
- }
- pc_modified_ = false;
- if (::v8::internal::FLAG_trace_sim) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(instr));
- PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
- }
- if (instr->ConditionField() == kSpecialCondition) {
- UNIMPLEMENTED();
- } else if (ConditionallyExecute(instr)) {
- switch (instr->TypeValue()) {
- case 0:
- case 1: {
- DecodeType01(instr);
- break;
- }
- case 2: {
- DecodeType2(instr);
- break;
- }
- case 3: {
- DecodeType3(instr);
- break;
- }
- case 4: {
- DecodeType4(instr);
- break;
- }
- case 5: {
- DecodeType5(instr);
- break;
- }
- case 6: {
- DecodeType6(instr);
- break;
- }
- case 7: {
- DecodeType7(instr);
- break;
- }
- default: {
- UNIMPLEMENTED();
- break;
- }
- }
- // If the instruction is a non taken conditional stop, we need to skip the
- // inlined message address.
- } else if (instr->IsStop()) {
- set_pc(get_pc() + 2 * Instruction::kInstrSize);
- }
- if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int32_t>(instr)
- + Instruction::kInstrSize);
- }
-}
-
-
-void Simulator::Execute() {
- // Get the PC to simulate. Cannot use the accessor here as we need the
- // raw PC value and not the one used as input to arithmetic instructions.
- int program_counter = get_pc();
-
- if (::v8::internal::FLAG_stop_sim_at == 0) {
- // Fast version of the dispatch loop without checking whether the simulator
- // should be stopping at a particular executed instruction.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- InstructionDecode(instr);
- program_counter = get_pc();
- }
- } else {
- // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
- // we reach the particular instuction count.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- ArmDebugger dbg(this);
- dbg.Debug();
- } else {
- InstructionDecode(instr);
- }
- program_counter = get_pc();
- }
- }
-}
-
-
-void Simulator::CallInternal(byte* entry) {
- // Prepare to execute the code at entry
- set_register(pc, reinterpret_cast<int32_t>(entry));
- // Put down marker for end of simulation. The simulator will stop simulation
- // when the PC reaches this value. By saving the "end simulation" value into
- // the LR the simulation stops when returning to this call point.
- set_register(lr, end_sim_pc);
-
- // Remember the values of callee-saved registers.
- // The code below assumes that r9 is not used as sb (static base) in
- // simulator code and therefore is regarded as a callee-saved register.
- int32_t r4_val = get_register(r4);
- int32_t r5_val = get_register(r5);
- int32_t r6_val = get_register(r6);
- int32_t r7_val = get_register(r7);
- int32_t r8_val = get_register(r8);
- int32_t r9_val = get_register(r9);
- int32_t r10_val = get_register(r10);
- int32_t r11_val = get_register(r11);
-
- // Set up the callee-saved registers with a known value. To be able to check
- // that they are preserved properly across JS execution.
- int32_t callee_saved_value = icount_;
- set_register(r4, callee_saved_value);
- set_register(r5, callee_saved_value);
- set_register(r6, callee_saved_value);
- set_register(r7, callee_saved_value);
- set_register(r8, callee_saved_value);
- set_register(r9, callee_saved_value);
- set_register(r10, callee_saved_value);
- set_register(r11, callee_saved_value);
-
- // Start the simulation
- Execute();
-
- // Check that the callee-saved registers have been preserved.
- CHECK_EQ(callee_saved_value, get_register(r4));
- CHECK_EQ(callee_saved_value, get_register(r5));
- CHECK_EQ(callee_saved_value, get_register(r6));
- CHECK_EQ(callee_saved_value, get_register(r7));
- CHECK_EQ(callee_saved_value, get_register(r8));
- CHECK_EQ(callee_saved_value, get_register(r9));
- CHECK_EQ(callee_saved_value, get_register(r10));
- CHECK_EQ(callee_saved_value, get_register(r11));
-
- // Restore callee-saved registers with the original value.
- set_register(r4, r4_val);
- set_register(r5, r5_val);
- set_register(r6, r6_val);
- set_register(r7, r7_val);
- set_register(r8, r8_val);
- set_register(r9, r9_val);
- set_register(r10, r10_val);
- set_register(r11, r11_val);
-}
-
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(r0, va_arg(parameters, int32_t));
- set_register(r1, va_arg(parameters, int32_t));
- set_register(r2, va_arg(parameters, int32_t));
- set_register(r3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
- CallInternal(entry);
-
- // Pop stack passed arguments.
- CHECK_EQ(entry_stack, get_register(sp));
- set_register(sp, original_stack);
-
- int32_t result = get_register(r0);
- return result;
-}
-
-
-double Simulator::CallFP(byte* entry, double d0, double d1) {
- if (use_eabi_hardfloat()) {
- set_d_register_from_double(0, d0);
- set_d_register_from_double(1, d1);
- } else {
- int buffer[2];
- ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
- set_dw_register(0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
- set_dw_register(2, buffer);
- }
- CallInternal(entry);
- if (use_eabi_hardfloat()) {
- return get_double_from_d_register(0);
- } else {
- return get_double_from_register_pair(0);
- }
-}
-
-
-uintptr_t Simulator::PushAddress(uintptr_t address) {
- int new_sp = get_register(sp) - sizeof(uintptr_t);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
- *stack_slot = address;
- set_register(sp, new_sp);
- return new_sp;
-}
-
-
-uintptr_t Simulator::PopAddress() {
- int current_sp = get_register(sp);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
- uintptr_t address = *stack_slot;
- set_register(sp, current_sp + sizeof(uintptr_t));
- return address;
-}
-
-} } // namespace v8::internal
-
-#endif // USE_SIMULATOR
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.h b/src/3rdparty/v8/src/arm/simulator-arm.h
deleted file mode 100644
index 907a590..0000000
--- a/src/3rdparty/v8/src/arm/simulator-arm.h
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Declares a Simulator for ARM instructions if we are not generating a native
-// ARM binary. This Simulator allows us to run and debug ARM code generation on
-// regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
-// which will start execution in the Simulator or forwards to the real entry
-// on a ARM HW platform.
-
-#ifndef V8_ARM_SIMULATOR_ARM_H_
-#define V8_ARM_SIMULATOR_ARM_H_
-
-#include "allocation.h"
-
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native arm platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, int, Address, int, Isolate*);
-
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-// The fifth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on arm uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-#else // !defined(USE_SIMULATOR)
-// Running with a simulator.
-
-#include "constants-arm.h"
-#include "hashmap.h"
-#include "assembler.h"
-
-namespace v8 {
-namespace internal {
-
-class CachePage {
- public:
- static const int LINE_VALID = 0;
- static const int LINE_INVALID = 1;
-
- static const int kPageShift = 12;
- static const int kPageSize = 1 << kPageShift;
- static const int kPageMask = kPageSize - 1;
- static const int kLineShift = 2; // The cache line is only 4 bytes right now.
- static const int kLineLength = 1 << kLineShift;
- static const int kLineMask = kLineLength - 1;
-
- CachePage() {
- memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
- }
-
- char* ValidityByte(int offset) {
- return &validity_map_[offset >> kLineShift];
- }
-
- char* CachedData(int offset) {
- return &data_[offset];
- }
-
- private:
- char data_[kPageSize]; // The cached data.
- static const int kValidityMapSize = kPageSize >> kLineShift;
- char validity_map_[kValidityMapSize]; // One byte per line.
-};
-
-
-class Simulator {
- public:
- friend class ArmDebugger;
- enum Register {
- no_reg = -1,
- r0 = 0, r1, r2, r3, r4, r5, r6, r7,
- r8, r9, r10, r11, r12, r13, r14, r15,
- num_registers,
- sp = 13,
- lr = 14,
- pc = 15,
- s0 = 0, s1, s2, s3, s4, s5, s6, s7,
- s8, s9, s10, s11, s12, s13, s14, s15,
- s16, s17, s18, s19, s20, s21, s22, s23,
- s24, s25, s26, s27, s28, s29, s30, s31,
- num_s_registers = 32,
- d0 = 0, d1, d2, d3, d4, d5, d6, d7,
- d8, d9, d10, d11, d12, d13, d14, d15,
- d16, d17, d18, d19, d20, d21, d22, d23,
- d24, d25, d26, d27, d28, d29, d30, d31,
- num_d_registers = 32
- };
-
- explicit Simulator(Isolate* isolate);
- ~Simulator();
-
- // The currently executing Simulator instance. Potentially there can be one
- // for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
-
- // Accessors for register state. Reading the pc value adheres to the ARM
- // architecture specification and is off by a 8 from the currently executing
- // instruction.
- void set_register(int reg, int32_t value);
- int32_t get_register(int reg) const;
- double get_double_from_register_pair(int reg);
- void set_dw_register(int dreg, const int* dbl);
-
- // Support for VFP.
- void set_s_register(int reg, unsigned int value);
- unsigned int get_s_register(int reg) const;
-
- void set_d_register_from_double(int dreg, const double& dbl) {
- SetVFPRegister<double, 2>(dreg, dbl);
- }
-
- double get_double_from_d_register(int dreg) {
- return GetFromVFPRegister<double, 2>(dreg);
- }
-
- void set_s_register_from_float(int sreg, const float flt) {
- SetVFPRegister<float, 1>(sreg, flt);
- }
-
- float get_float_from_s_register(int sreg) {
- return GetFromVFPRegister<float, 1>(sreg);
- }
-
- void set_s_register_from_sinteger(int sreg, const int sint) {
- SetVFPRegister<int, 1>(sreg, sint);
- }
-
- int get_sinteger_from_s_register(int sreg) {
- return GetFromVFPRegister<int, 1>(sreg);
- }
-
- // Special case of set_register and get_register to access the raw PC value.
- void set_pc(int32_t value);
- int32_t get_pc() const;
-
- // Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
-
- // Executes ARM instructions until the PC reaches end_sim_pc.
- void Execute();
-
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int32_t Call(byte* entry, int argument_count, ...);
- // Alternative: call a 2-argument double function.
- double CallFP(byte* entry, double d0, double d1);
-
- // Push an address onto the JS stack.
- uintptr_t PushAddress(uintptr_t address);
-
- // Pop an address from the JS stack.
- uintptr_t PopAddress();
-
- // Debugger input.
- void set_last_debugger_input(char* input);
- char* last_debugger_input() { return last_debugger_input_; }
-
- // ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
-
- // Returns true if pc register contains one of the 'special_values' defined
- // below (bad_lr, end_sim_pc).
- bool has_bad_pc() const;
-
- // EABI variant for double arguments in use.
- bool use_eabi_hardfloat() {
-#if USE_EABI_HARDFLOAT
- return true;
-#else
- return false;
-#endif
- }
-
- private:
- enum special_values {
- // Known bad pc value to ensure that the simulator does not execute
- // without being properly setup.
- bad_lr = -1,
- // A pc value used to signal the simulator to stop execution. Generally
- // the lr is set to this value on transition from native C code to
- // simulated execution, so that the simulator can "return" to the native
- // C code.
- end_sim_pc = -2
- };
-
- // Unsupported instructions use Format to print an error and stop execution.
- void Format(Instruction* instr, const char* format);
-
- // Checks if the current instruction should be executed based on its
- // condition bits.
- bool ConditionallyExecute(Instruction* instr);
-
- // Helper functions to set the conditional flags in the architecture state.
- void SetNZFlags(int32_t val);
- void SetCFlag(bool val);
- void SetVFlag(bool val);
- bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
- bool BorrowFrom(int32_t left, int32_t right);
- bool OverflowFrom(int32_t alu_out,
- int32_t left,
- int32_t right,
- bool addition);
-
- inline int GetCarry() {
- return c_flag_ ? 1 : 0;
- };
-
- // Support for VFP.
- void Compute_FPSCR_Flags(double val1, double val2);
- void Copy_FPSCR_to_APSR();
-
- // Helper functions to decode common "addressing" modes
- int32_t GetShiftRm(Instruction* instr, bool* carry_out);
- int32_t GetImm(Instruction* instr, bool* carry_out);
- void ProcessPUW(Instruction* instr,
- int num_regs,
- int operand_size,
- intptr_t* start_address,
- intptr_t* end_address);
- void HandleRList(Instruction* instr, bool load);
- void HandleVList(Instruction* inst);
- void SoftwareInterrupt(Instruction* instr);
-
- // Stop helper functions.
- inline bool isStopInstruction(Instruction* instr);
- inline bool isWatchedStop(uint32_t bkpt_code);
- inline bool isEnabledStop(uint32_t bkpt_code);
- inline void EnableStop(uint32_t bkpt_code);
- inline void DisableStop(uint32_t bkpt_code);
- inline void IncreaseStopCounter(uint32_t bkpt_code);
- void PrintStopInfo(uint32_t code);
-
- // Read and write memory.
- inline uint8_t ReadBU(int32_t addr);
- inline int8_t ReadB(int32_t addr);
- inline void WriteB(int32_t addr, uint8_t value);
- inline void WriteB(int32_t addr, int8_t value);
-
- inline uint16_t ReadHU(int32_t addr, Instruction* instr);
- inline int16_t ReadH(int32_t addr, Instruction* instr);
- // Note: Overloaded on the sign of the value.
- inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
-
- inline int ReadW(int32_t addr, Instruction* instr);
- inline void WriteW(int32_t addr, int value, Instruction* instr);
-
- int32_t* ReadDW(int32_t addr);
- void WriteDW(int32_t addr, int32_t value1, int32_t value2);
-
- // Executing is handled based on the instruction type.
- // Both type 0 and type 1 rolled into one.
- void DecodeType01(Instruction* instr);
- void DecodeType2(Instruction* instr);
- void DecodeType3(Instruction* instr);
- void DecodeType4(Instruction* instr);
- void DecodeType5(Instruction* instr);
- void DecodeType6(Instruction* instr);
- void DecodeType7(Instruction* instr);
-
- // Support for VFP.
- void DecodeTypeVFP(Instruction* instr);
- void DecodeType6CoprocessorIns(Instruction* instr);
-
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
- void DecodeVCMP(Instruction* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
-
- // Executes one instruction.
- void InstructionDecode(Instruction* instr);
-
- // ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
-
- // Runtime call support.
- static void* RedirectExternalReference(
- void* external_function,
- v8::internal::ExternalReference::Type type);
-
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
- void SetFpResult(const double& result);
- void TrashCallerSaveRegisters();
-
- template<class ReturnType, int register_size>
- ReturnType GetFromVFPRegister(int reg_index);
-
- template<class InputType, int register_size>
- void SetVFPRegister(int reg_index, const InputType& value);
-
- void CallInternal(byte* entry);
-
- // Architecture state.
- // Saturating instructions require a Q flag to indicate saturation.
- // There is currently no way to read the CPSR directly, and thus read the Q
- // flag, so this is left unimplemented.
- int32_t registers_[16];
- bool n_flag_;
- bool z_flag_;
- bool c_flag_;
- bool v_flag_;
-
- // VFP architecture state.
- unsigned int vfp_registers_[num_d_registers * 2];
- bool n_flag_FPSCR_;
- bool z_flag_FPSCR_;
- bool c_flag_FPSCR_;
- bool v_flag_FPSCR_;
-
- // VFP rounding mode. See ARM DDI 0406B Page A2-29.
- VFPRoundingMode FPSCR_rounding_mode_;
-
- // VFP FP exception flags architecture state.
- bool inv_op_vfp_flag_;
- bool div_zero_vfp_flag_;
- bool overflow_vfp_flag_;
- bool underflow_vfp_flag_;
- bool inexact_vfp_flag_;
-
- // Simulator support.
- char* stack_;
- bool pc_modified_;
- int icount_;
-
- // Debugger input.
- char* last_debugger_input_;
-
- // Icache simulation
- v8::internal::HashMap* i_cache_;
-
- // Registered breakpoints.
- Instruction* break_pc_;
- Instr break_instr_;
-
- v8::internal::Isolate* isolate_;
-
- // A stop is watched if its code is less than kNumOfWatchedStops.
- // Only watched stops support enabling/disabling and the counter feature.
- static const uint32_t kNumOfWatchedStops = 256;
-
- // Breakpoint is disabled if bit 31 is set.
- static const uint32_t kStopDisabledBit = 1 << 31;
-
- // A stop is enabled, meaning the simulator will stop when meeting the
- // instruction, if bit 31 of watched_stops[code].count is unset.
- // The value watched_stops[code].count & ~(1 << 31) indicates how many times
- // the breakpoint was hit or gone through.
- struct StopCountAndDesc {
- uint32_t count;
- char* desc;
- };
- StopCountAndDesc watched_stops[kNumOfWatchedStops];
-};
-
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // !defined(USE_SIMULATOR)
-#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/stub-cache-arm.cc b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
deleted file mode 100644
index 03aa359..0000000
--- a/src/3rdparty/v8/src/arm/stub-cache-arm.cc
+++ /dev/null
@@ -1,4091 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // Number of the cache entry, not scaled.
- Register offset,
- Register scratch,
- Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- ASSERT(value_off_addr > key_off_addr);
- ASSERT((value_off_addr - key_off_addr) % 4 == 0);
- ASSERT((value_off_addr - key_off_addr) < (256 * 4));
- ASSERT(map_off_addr > key_off_addr);
- ASSERT((map_off_addr - key_off_addr) % 4 == 0);
- ASSERT((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ mov(base_addr, Operand(key_offset));
- __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
-
- // Check that the key in the entry matches the name.
- __ ldr(ip, MemOperand(base_addr, 0));
- __ cmp(name, ip);
- __ b(ne, &miss);
-
- // Check the map matches.
- __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ cmp(ip, scratch2);
- __ b(ne, &miss);
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- // It's a nice optimization if this constant is encodable in the bic insn.
-
- uint32_t mask = Code::kFlagsNotUsedInLookup;
- ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
- __ bic(flags_reg, flags_reg, Operand(mask));
- // Using cmn and the negative instead of cmp means we can use movw.
- if (flags < 0) {
- __ cmn(flags_reg, Operand(-flags));
- } else {
- __ cmp(flags_reg, Operand(flags));
- }
- __ b(ne, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be internalized and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register scratch0,
- Register scratch1) {
- ASSERT(name->IsInternalizedString());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ b(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ cmp(map, tmp);
- __ b(ne, miss_label);
-
- // Restore the temporarily used register.
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- ASSERT(sizeof(Entry) == 12);
-
- // Make sure the flags does not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
- ASSERT(!extra.is(receiver));
- ASSERT(!extra.is(name));
- ASSERT(!extra.is(scratch));
- ASSERT(!extra2.is(receiver));
- ASSERT(!extra2.is(name));
- ASSERT(!extra2.is(scratch));
- ASSERT(!extra2.is(extra));
-
- // Check scratch, extra and extra2 registers are valid.
- ASSERT(!scratch.is(no_reg));
- ASSERT(!extra.is(no_reg));
- ASSERT(!extra2.is(no_reg));
- ASSERT(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
- __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ add(scratch, scratch, Operand(ip));
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
- // Mask down the eor argument to the minimum to keep the immediate
- // ARM-encodable.
- __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
- // Prefer and_ to ubfx here because ubfx takes 2 cycles.
- __ and_(scratch, scratch, Operand(mask));
-
- // Probe the primary table.
- ProbeTable(isolate,
- masm,
- flags,
- kPrimary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
- __ and_(scratch, scratch, Operand(mask2));
-
- // Probe the secondary table.
- ProbeTable(isolate,
- masm,
- flags,
- kSecondary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ ldr(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- __ ldr(prototype,
- FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ ldr(prototype,
- FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
- Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ ldr(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ Move(ip, isolate->global_object());
- __ cmp(prototype, ip);
- __ b(ne, miss);
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index) {
- DoGenerateFastPropertyLoad(
- masm, dst, src, index.is_inobject(holder), index.translate(holder));
-}
-
-
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
- int offset = index * kPointerSize;
- if (!inobject) {
- // Calculate the offset into the properties array.
- offset = offset + FixedArray::kHeaderSize;
- __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- src = dst;
- }
- __ ldr(dst, FieldMemOperand(src, offset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, miss_label);
-
- // Load length directly from the JS array.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Ret();
-}
-
-
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
- __ b(ne, non_string_object);
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length directly from the string.
- __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
-
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(r0, scratch1);
- __ Ret();
-}
-
-
-// Generate StoreField code, value is passed in r0 register.
-// When leaving generated code after success, the receiver_reg and name_reg
-// may be clobbered. Upon branch to miss_label, the receiver and name
-// registers have their original values.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- // r0 : value
- Label exit;
-
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
- }
-
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, mode);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ push(receiver_reg);
- __ mov(r2, Operand(transition));
- __ Push(r2, r0);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (!transition.is_null()) {
- // Update the map of the object.
- __ mov(scratch1, Operand(transition));
- __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ str(r0, FieldMemOperand(receiver_reg, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(r0, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, r0);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(r0, FieldMemOperand(scratch1, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(r0, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, r0);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
-
- // Return the value (register r0).
- __ bind(&exit);
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- Handle<Code> code = (kind == Code::STORE_IC)
- ? masm->isolate()->builtins()->StoreIC_Miss()
- : masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateCallFunction(MacroAssembler* masm,
- Handle<Object> object,
- const ParameterCount& arguments,
- Label* miss,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r0: receiver
- // -- r1: function to call
- // -----------------------------------
-
- // Check that the function really is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Operand(interceptor));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
- __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
- __ mov(scratch, Operand(ExternalReference::isolate_address()));
- __ push(scratch);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ mov(r0, Operand(6));
- __ mov(r1, Operand(ref));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-}
-
-
-static const int kFastApiCallArguments = 4;
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- __ mov(scratch, Operand(Smi::FromInt(0)));
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(scratch);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
-
-
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : last JS argument
- // -- ...
- // -- sp[(argc + 3) * 4] : first JS argument
- // -- sp[(argc + 4) * 4] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(r5, function);
- __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(r0, api_call_info);
- __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
- } else {
- __ Move(r6, call_data);
- }
- __ mov(r7, Operand(ExternalReference::isolate_address()));
- // Store JS function, call data and isolate.
- __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
-
- // Prepare arguments.
- __ add(r2, sp, Operand(3 * kPointerSize));
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // r0 = v8::Arguments&
- // Arguments is after the return address.
- __ add(r0, sp, Operand(1 * kPointerSize));
- // v8::Arguments::implicit_args_
- __ str(r2, MemOperand(r0, 0 * kPointerSize));
- // v8::Arguments::values_
- __ add(ip, r2, Operand(argc * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
- __ mov(ip, Operand(argc));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
- __ mov(ip, Operand::Zero());
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference ref = ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
- masm->isolate());
- AllowExternalCallThatCantCauseGC scope(masm);
-
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
- Counters* counters = masm->isolate()->counters();
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ b(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- // Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 6);
- // Restore the name_ register.
- __ pop(name_);
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- }
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch);
- __ b(ne, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_ic_state_;
-};
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ mov(scratch, Operand(cell));
- __ ldr(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ b(ne, miss);
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If VFP3 is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
- } else {
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
- // Negate value if it is negative.
- __ rsb(ival, ival, Operand::Zero(), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(ival, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
- __ b(&done);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ CountLeadingZeros(zeros, ival, scratch1);
-
- // Compute exponent and or it into the exponent register.
- __ rsb(scratch1,
- zeros,
- Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
-
- __ orr(fval,
- fval,
- Operand(scratch1, LSL, kBinary32ExponentShift));
-
- // Shift up the source chopping the top bit off.
- __ add(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ mov(ival, Operand(ival, LSL, zeros));
- // And the top (top 20 bits).
- __ orr(fval,
- fval,
- Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
-
- __ bind(&done);
- __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void StubCompiler::GenerateTailCall(Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- int save_at_depth,
- Label* miss,
- PrototypeCheckType check) {
- Handle<JSObject> first = object;
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsInternalizedString()) {
- name = factory()->InternalizeString(name);
- }
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- Register map_reg = scratch1;
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- Handle<Map> current_map(current->map());
- // CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
- } else {
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (heap()->InNewSpace(*prototype)) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
- __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- __ mov(reg, Operand(prototype));
- }
- }
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
-
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
- Label* miss) {
- if (!miss->is_unused()) {
- __ b(success);
- __ bind(miss);
- GenerateLoadMiss(masm(), kind());
- }
-}
-
-
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success,
- Handle<ExecutableAccessorInfo> callback) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- ASSERT(!reg.is(scratch2()));
- ASSERT(!reg.is(scratch3()));
- ASSERT(!reg.is(scratch4()));
-
- // Load the properties dictionary.
- Register dictionary = scratch4();
- __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &miss,
- &probe_done,
- dictionary,
- this->name(),
- scratch2(),
- scratch3());
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3();
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
- __ cmp(scratch2(), Operand(callback));
- __ b(ne, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
- return reg;
-}
-
-
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- if (!last->HasFastProperties()) {
- __ ldr(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldr(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset));
- __ cmp(scratch2(), Operand(isolate()->factory()->null_value()));
- __ b(ne, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex index) {
- GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
- __ Ret();
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
- // Return the constant value.
- __ LoadHeapObject(r0, value);
- __ Ret();
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver());
- __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
- if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch3(), callback);
- __ ldr(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
- } else {
- __ Move(scratch3(), Handle<Object>(callback->data(),
- callback->GetIsolate()));
- }
- __ Push(reg, scratch3());
- __ mov(scratch3(), Operand(ExternalReference::isolate_address()));
- __ Push(scratch3(), name());
- __ mov(r0, sp); // r0 = Handle<String>
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // Create AccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object** args_) as the data.
- __ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
- __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
-
- const int kStackUnwindSpace = 5;
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference ref =
- ExternalReference(&fun,
- ExternalReference::DIRECT_GETTER_CALL,
- masm()->isolate());
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* callback =
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
- compile_followup_inline = callback->getter() != NULL &&
- callback->IsCompatibleReceiver(*object);
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1());
- __ b(eq, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- PushInterceptorArguments(masm(), receiver(), holder_reg,
- this->name(), interceptor_holder);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(r2, Operand(name));
- __ b(ne, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r0, miss);
- CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ mov(r3, Operand(cell));
- __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ cmp(r4, r3);
- } else {
- __ cmp(r1, Operand(function));
- }
- __ b(ne, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
-
- // Get the receiver of the function from the stack into r0.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r0, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
- GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- Register receiver = r1;
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
- name, &miss);
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- Register elements = r6;
- Register end_elements = r5;
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &check_double,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(r0, r4);
- __ b(gt, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(r4, &with_write_barrier);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(r0, r4);
- __ b(gt, &call_builtin);
-
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(
- r4, r0, elements, r3, r5, r2, r9,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&with_write_barrier);
-
- __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(r3, r7, &not_fast_object);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(r3, r7, &call_builtin);
-
- __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, ip);
- __ b(eq, &call_builtin);
- // edx: receiver
- // r3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r3,
- r7,
- &try_holey_map);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- r3,
- r7,
- &call_builtin);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(r3, r3, &call_builtin);
- }
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- __ RecordWrite(elements,
- end_elements,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&attempt_to_grow_elements);
- // r0: array's length + 1.
- // r4: elements' length.
-
- if (!FLAG_inline_new) {
- __ b(&call_builtin);
- }
-
- __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r7, r7, &call_builtin);
- __ bind(&no_fast_elements_check);
-
- Isolate* isolate = masm()->isolate();
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate);
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate);
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r3, MemOperand(r7));
- __ cmp(end_elements, r3);
- __ b(ne, &call_builtin);
-
- __ mov(r9, Operand(new_space_allocation_limit));
- __ ldr(r9, MemOperand(r9));
- __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
- __ cmp(r3, r9);
- __ b(hi, &call_builtin);
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ str(r3, MemOperand(r7));
- // Push the argument.
- __ str(r2, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ str(r3, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
- __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Ret();
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- masm()->isolate()),
- argc + 1,
- 1);
- }
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss, return_undefined, call_builtin;
- Register receiver = r1;
- Register elements = r3;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
- r4, r0, name, &miss);
-
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r4 and calculate new length.
- __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC);
- __ b(lt, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ cmp(r0, r6);
- __ b(eq, &call_builtin);
-
- // Set the array's length.
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&return_undefined);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
- masm()->isolate()),
- argc + 1,
- 1);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r1, r3, r4, name, &miss);
-
- Register receiver = r1;
- Register index = r4;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r1, r3, r4, name, &miss);
-
- Register receiver = r0;
- Register index = r4;
- Register scratch = r3;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = r1;
- __ ldr(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, r0);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- if (!CpuFeatures::IsSupported(VFP2)) {
- return Handle<Code>::null();
- }
-
- CpuFeatures::Scope scope_vfp2(VFP2);
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss, slow;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ Drop(argc + 1, eq);
- __ Ret(eq);
-
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
-
- Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
-
- // If vfp3 is enabled, we use the fpu rounding with the RM (round towards
- // minus infinity) mode.
-
- // Load the HeapNumber value.
- // We will need access to the value in the core registers, so we load it
- // with ldrd and move it to the fpu. It also spares a sub instruction for
- // updating the HeapNumber value address, as vldr expects a multiple
- // of 4 offset.
- __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(d1, r4, r5);
-
- // Backup FPSCR.
- __ vmrs(r3);
- // Set custom FPCSR:
- // - Set rounding mode to "Round towards Minus Infinity"
- // (i.e. bits [23:22] = 0b10).
- // - Clear vfp cumulative exception flags (bits [3:0]).
- // - Make sure Flush-to-zero mode control bit is unset (bit 22).
- __ bic(r9, r3,
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(r9, r9, Operand(kRoundToMinusInf));
- __ vmsr(r9);
-
- // Convert the argument to an integer.
- __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
-
- // Use vcvt latency to start checking for special cases.
- // Get the argument exponent and clear the sign bit.
- __ bic(r6, r5, Operand(HeapNumber::kSignMask));
- __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
-
- // Retrieve FPSCR and check for vfp exceptions.
- __ vmrs(r9);
- __ tst(r9, Operand(kVFPExceptionMask));
- __ b(&no_vfp_exception, eq);
-
- // Check for NaN, Infinity, and -Infinity.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ sub(r7, r6, Operand(HeapNumber::kExponentMask
- >> HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ b(&restore_fpscr_and_return, eq);
- // We had an overflow or underflow in the conversion. Check if we
- // have a big exponent.
- __ cmp(r7, Operand(HeapNumber::kMantissaBits));
- // If greater or equal, the argument is already round and in r0.
- __ b(&restore_fpscr_and_return, ge);
- __ b(&wont_fit_smi);
-
- __ bind(&no_vfp_exception);
- // Move the result back to general purpose register r0.
- __ vmov(r0, s0);
- // Check if the result fits into a smi.
- __ add(r1, r0, Operand(0x40000000), SetCC);
- __ b(&wont_fit_smi, mi);
- // Tag the result.
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-
- // Check for -0.
- __ cmp(r0, Operand::Zero());
- __ b(&restore_fpscr_and_return, ne);
- // r5 already holds the HeapNumber exponent.
- __ tst(r5, Operand(HeapNumber::kSignMask));
- // If our HeapNumber is negative it was -0, so load its address and return.
- // Else r0 is loaded with 0, so we can also just return.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
-
- __ bind(&restore_fpscr_and_return);
- // Restore FPSCR and return.
- __ vmsr(r3);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&wont_fit_smi);
- // Restore FPCSR and fall to slow case.
- __ vmsr(r3);
-
- __ bind(&slow);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(r0, &not_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ b(mi, &slow);
-
- // Smi case done.
- __ Drop(argc + 1);
- __ Ret();
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(&not_smi);
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ tst(r1, Operand(HeapNumber::kSignMask));
- __ b(ne, &negative_sign);
- __ Drop(argc + 1);
- __ Ret();
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ eor(r1, r1, Operand(HeapNumber::kSignMask));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
- __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ Drop(argc + 1);
- __ Ret();
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r1, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, r0, r3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
-
- ReserveSpaceForFastApiCall(masm(), r0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
- depth, &miss);
-
- GenerateFastApiDirectCall(masm(), optimization, argc);
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(r1, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(masm()->isolate()->counters()->call_const(),
- 1, r0, r3);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE);
- __ b(ne, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(r1, &fast);
- __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
- }
- }
-
- __ b(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
-
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
- &miss);
-
- // Move returned value, the function to call, to r1.
- __ mov(r1, r0);
- // Restore receiver.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Set up the context (function already in r1).
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- r1, r2, r3, r4,
- &miss);
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- __ push(r1); // receiver
- __ mov(ip, Operand(callback)); // callback info
- __ Push(ip, r2, r0);
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(r0);
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- __ Push(r1, r0);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(r0);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ Push(r1, r2, r0); // Receiver, name, value.
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode_)));
- __ push(r0); // strict mode
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ mov(r4, Operand(cell));
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- __ cmp(r5, r6);
- __ b(eq, &miss);
-
- // Store the value in the cell.
- __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
- __ Ret();
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Handle<GlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
-
- __ bind(&success);
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Ret();
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { r0, r2, r3, r1, r4, r5 };
- return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { r1, r0, r2, r3, r4, r5 };
- return registers;
-}
-
-
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Operand(name));
- __ b(ne, miss);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- __ push(r0);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete) {
- Label success, miss;
-
- __ CheckMap(
- receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
-
- // Get the value from the cell.
- __ mov(r3, Operand(cell));
- __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss);
- }
-
- HandlerFrontendFooter(&success, &miss);
- __ bind(&success);
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
- __ mov(r0, r4);
- __ Ret();
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
- Handle<Code> stub = KeyedLoadFastElementStub(
- receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode(isolate());
- __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
- } else {
- Handle<Code> stub =
- KeyedLoadDictionaryElementStub().GetCode(isolate());
- __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
- }
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
- }
-
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
-
- int receiver_count = receiver_maps->length();
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- __ mov(ip, Operand(receiver_maps->at(current)));
- __ cmp(map_reg, ip);
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), kind());
-
- // Return the generated code.
- InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(Code::IC_FRAGMENT, type, name, state);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : name
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
-
- // Check that the name has not changed.
- __ cmp(r1, Operand(name));
- __ b(ne, &miss);
-
- // r3 is used as scratch register. r1 and r2 keep their values if a jump to
- // the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- r2, r1, r3, r4,
- &miss);
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array,
- elements_kind,
- grow_mode_).GetCode(isolate());
-
- __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(r2, &miss);
-
- int receiver_count = receiver_maps->length();
- __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- __ mov(ip, Operand(receiver_maps->at(i)));
- __ cmp(r3, ip);
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ b(ne, &next_map);
- __ mov(r3, Operand(transitioned_maps->at(i)));
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- r0 : argc
- // -- r1 : constructor
- // -- lr : return address
- // -- [sp] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r7 for holding undefined which is used in several places below.
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(r2, r7);
- __ b(ne, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- // r7: undefined
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &generic_stub_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ Check(ne, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- ASSERT(function->has_initial_map());
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
- __ Check(eq, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r7: undefined
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Calculate the location of the first argument. The stack contains only the
- // argc arguments.
- __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- // Fill all the in-object properties with undefined.
- // r0: argc
- // r1: first argument
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- // r7: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed, next;
- // Check if the argument assigned to the property is actually passed.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ cmp(r0, Operand(arg_number));
- __ b(le, &not_passed);
- // Argument passed - find it on the stack.
- __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- __ b(&next);
- __ bind(&not_passed);
- // Set the property to undefined.
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&next);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- isolate());
- __ mov(r2, Operand(constant));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- }
-
- // r0: argc
- // r4: JSObject (not tagged)
- // Move argc to r1 and the JSObject to return to r0 and tag it.
- __ mov(r1, r0);
- __ mov(r0, r4);
- __ orr(r0, r0, Operand(kHeapObjectTag));
-
- // r0: JSObject
- // r1: argc
- // Remove caller arguments and receiver from the stack and return.
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
- __ add(sp, sp, Operand(kPointerSize));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
- __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
- __ Jump(lr);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, miss_force_generic;
-
- Register key = r0;
- Register receiver = r1;
-
- __ JumpIfNotSmi(key, &miss_force_generic);
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
- __ Ret();
-
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static bool IsElementTypeSigned(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- return true;
-
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- return false;
-
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- return false;
- }
- return false;
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch0,
- Register scratch1,
- DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
- Label* fail) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ sub(ip, key, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
- __ EmitVFPTruncate(kRoundToZero,
- scratch0,
- double_scratch0,
- scratch1,
- double_scratch1,
- kCheckForInexactConversion);
- __ b(ne, fail);
- __ TrySmiTag(scratch0, fail, scratch1);
- __ mov(key, scratch0);
- __ bind(&key_ok);
- } else {
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- // r3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // r3: external array.
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
- } else {
- __ JumpIfNotSmi(value, &check_heap_number);
- }
- __ SmiUntag(r5, value);
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r5: value (integer).
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- // Clamp the value to [0..255].
- __ Usat(r5, 8, Operand(r5));
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(r4, key);
- StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ add(r3, r3, Operand(key, LSL, 2));
- // r3: effective address of the double element
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP2)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
- FloatingPointHelper::ConvertIntToDouble(
- masm, r5, destination,
- d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
- r4, s2); // These are: scratch2, single_scratch.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(d0, r3, 0);
- } else {
- __ str(r6, MemOperand(r3, 0));
- __ str(r7, MemOperand(r3, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // r3: external array.
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 1));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 2));
- __ vstr(d0, r5, 0);
- } else {
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can
- // not include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ EmitECMATruncate(r5, d0, d1, r6, r7, r9);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
- } else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
-
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
-
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
-
- __ bind(&done);
- __ str(r5, MemOperand(r3, key, LSL, 1));
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ add(r7, r3, Operand(key, LSL, 2));
- // r7: effective address of destination element.
- __ str(r6, MemOperand(r7, 0));
- __ str(r5, MemOperand(r7, Register::kSizeInBytes));
- __ Ret();
- } else {
- bool is_signed_type = IsElementTypeSigned(elements_kind);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand::Zero(), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand::Zero(), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative then result is 0.
- __ mov(r5, Operand::Zero(), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big then result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand::Zero());
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand::Zero());
- __ rsb(r5, r5, Operand::Zero(), LeaveCC, ne);
-
- __ bind(&done);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register scratch = r4;
- Register elements_reg = r3;
- Register length_reg = r5;
- Register scratch2 = r6;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- __ cmp(key_reg, scratch);
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (r0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
- TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(length_reg, scratch);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch (elements backing store)
- // -- r4 : scratch
- // -- r5 : scratch
- // -- r6 : scratch
- // -- r7 : scratch
- // -- r9 : scratch
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register elements_reg = r3;
- Register scratch1 = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
- Register scratch4 = r7;
- Register scratch5 = r9;
- Register length_reg = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- __ cmp(key_reg, scratch1);
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
- __ Ret();
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
- TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch1,
- Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- scratch5,
- &transition_elements_kind);
-
- __ mov(scratch1, Operand(kHoleNanLower32));
- __ mov(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ str(scratch1, FieldMemOperand(elements_reg, offset));
- __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ cmp(length_reg, scratch1);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/array.js b/src/3rdparty/v8/src/array.js
deleted file mode 100644
index 9b0bfe1..0000000
--- a/src/3rdparty/v8/src/array.js
+++ /dev/null
@@ -1,1570 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file relies on the fact that the following declarations have been made
-// in runtime.js:
-// var $Array = global.Array;
-
-// -------------------------------------------------------------------
-
-// Global list of arrays visited during toString, toLocaleString and
-// join invocations.
-var visited_arrays = new InternalArray();
-
-
-// Gets a sorted array of array keys. Useful for operations on sparse
-// arrays. Dupes have not been removed.
-function GetSortedArrayKeys(array, intervals) {
- var length = intervals.length;
- var keys = [];
- for (var k = 0; k < length; k++) {
- var key = intervals[k];
- if (key < 0) {
- var j = -1 - key;
- var limit = j + intervals[++k];
- for (; j < limit; j++) {
- var e = array[j];
- if (!IS_UNDEFINED(e) || j in array) {
- keys.push(j);
- }
- }
- } else {
- // The case where key is undefined also ends here.
- if (!IS_UNDEFINED(key)) {
- var e = array[key];
- if (!IS_UNDEFINED(e) || key in array) {
- keys.push(key);
- }
- }
- }
- }
- %_CallFunction(keys, function(a, b) { return a - b; }, ArraySort);
- return keys;
-}
-
-
-function SparseJoinWithSeparator(array, len, convert, separator) {
- var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var totalLength = 0;
- var elements = new InternalArray(keys.length * 2);
- var previousKey = -1;
- for (var i = 0; i < keys.length; i++) {
- var key = keys[i];
- if (key != previousKey) { // keys may contain duplicates.
- var e = array[key];
- if (!IS_STRING(e)) e = convert(e);
- elements[i * 2] = key;
- elements[i * 2 + 1] = e;
- previousKey = key;
- }
- }
- return %SparseJoinWithSeparator(elements, len, separator);
-}
-
-
-// Optimized for sparse arrays if separator is ''.
-function SparseJoin(array, len, convert) {
- var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var last_key = -1;
- var keys_length = keys.length;
-
- var elements = new InternalArray(keys_length);
- var elements_length = 0;
-
- for (var i = 0; i < keys_length; i++) {
- var key = keys[i];
- if (key != last_key) {
- var e = array[key];
- if (!IS_STRING(e)) e = convert(e);
- elements[elements_length++] = e;
- last_key = key;
- }
- }
- return %StringBuilderConcat(elements, elements_length, '');
-}
-
-
-function UseSparseVariant(object, length, is_array) {
- return is_array &&
- length > 1000 &&
- (!%_IsSmi(length) ||
- %EstimateNumberOfElements(object) < (length >> 2));
-}
-
-
-function Join(array, length, separator, convert) {
- if (length == 0) return '';
-
- var is_array = IS_ARRAY(array);
-
- if (is_array) {
- // If the array is cyclic, return the empty string for already
- // visited arrays.
- if (!%PushIfAbsent(visited_arrays, array)) return '';
- }
-
- // Attempt to convert the elements.
- try {
- if (UseSparseVariant(array, length, is_array)) {
- if (separator.length == 0) {
- return SparseJoin(array, length, convert);
- } else {
- return SparseJoinWithSeparator(array, length, convert, separator);
- }
- }
-
- // Fast case for one-element arrays.
- if (length == 1) {
- var e = array[0];
- if (IS_STRING(e)) return e;
- return convert(e);
- }
-
- // Construct an array for the elements.
- var elements = new InternalArray(length);
-
- // We pull the empty separator check outside the loop for speed!
- if (separator.length == 0) {
- var elements_length = 0;
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (!IS_STRING(e)) e = convert(e);
- elements[elements_length++] = e;
- }
- elements.length = elements_length;
- var result = %_FastAsciiArrayJoin(elements, '');
- if (!IS_UNDEFINED(result)) return result;
- return %StringBuilderConcat(elements, elements_length, '');
- }
- // Non-empty separator case.
- // If the first element is a number then use the heuristic that the
- // remaining elements are also likely to be numbers.
- if (!IS_NUMBER(array[0])) {
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (!IS_STRING(e)) e = convert(e);
- elements[i] = e;
- }
- } else {
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (IS_NUMBER(e)) {
- e = %_NumberToString(e);
- } else if (!IS_STRING(e)) {
- e = convert(e);
- }
- elements[i] = e;
- }
- }
- var result = %_FastAsciiArrayJoin(elements, separator);
- if (!IS_UNDEFINED(result)) return result;
-
- return %StringBuilderJoin(elements, length, separator);
- } finally {
- // Make sure to remove the last element of the visited array no
- // matter what happens.
- if (is_array) visited_arrays.length = visited_arrays.length - 1;
- }
-}
-
-
-function ConvertToString(x) {
- // Assumes x is a non-string.
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x));
-}
-
-
-function ConvertToLocaleString(e) {
- if (IS_NULL_OR_UNDEFINED(e)) {
- return '';
- } else {
- // According to ES5, section 15.4.4.3, the toLocaleString conversion
- // must throw a TypeError if ToObject(e).toLocaleString isn't
- // callable.
- var e_obj = ToObject(e);
- return %ToString(e_obj.toLocaleString());
- }
-}
-
-
-// This function implements the optimized splice implementation that can use
-// special array operations to handle sparse arrays in a sensible fashion.
-function SmartSlice(array, start_i, del_count, len, deleted_elements) {
- // Move deleted elements to a new array (the return value from splice).
- // Intervals array can contain keys and intervals. See comment in Concat.
- var intervals = %GetArrayKeys(array, start_i + del_count);
- var length = intervals.length;
- for (var k = 0; k < length; k++) {
- var key = intervals[k];
- if (key < 0) {
- var j = -1 - key;
- var interval_limit = j + intervals[++k];
- if (j < start_i) {
- j = start_i;
- }
- for (; j < interval_limit; j++) {
- // ECMA-262 15.4.4.12 line 10. The spec could also be
- // interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[j];
- if (!IS_UNDEFINED(current) || j in array) {
- deleted_elements[j - start_i] = current;
- }
- }
- } else {
- if (!IS_UNDEFINED(key)) {
- if (key >= start_i) {
- // ECMA-262 15.4.4.12 line 10. The spec could also be
- // interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- deleted_elements[key - start_i] = current;
- }
- }
- }
- }
- }
-}
-
-
-// This function implements the optimized splice implementation that can use
-// special array operations to handle sparse arrays in a sensible fashion.
-function SmartMove(array, start_i, del_count, len, num_additional_args) {
- // Move data to new array.
- var new_array = new InternalArray(len - del_count + num_additional_args);
- var intervals = %GetArrayKeys(array, len);
- var length = intervals.length;
- for (var k = 0; k < length; k++) {
- var key = intervals[k];
- if (key < 0) {
- var j = -1 - key;
- var interval_limit = j + intervals[++k];
- while (j < start_i && j < interval_limit) {
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[j];
- if (!IS_UNDEFINED(current) || j in array) {
- new_array[j] = current;
- }
- j++;
- }
- j = start_i + del_count;
- while (j < interval_limit) {
- // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also be
- // interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[j];
- if (!IS_UNDEFINED(current) || j in array) {
- new_array[j - del_count + num_additional_args] = current;
- }
- j++;
- }
- } else {
- if (!IS_UNDEFINED(key)) {
- if (key < start_i) {
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- new_array[key] = current;
- }
- } else if (key >= start_i + del_count) {
- // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also
- // be interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- new_array[key - del_count + num_additional_args] = current;
- }
- }
- }
- }
- }
- // Move contents of new_array into this array
- %MoveArrayContents(new_array, array);
-}
-
-
-// This is part of the old simple-minded splice. We are using it either
-// because the receiver is not an array (so we have no choice) or because we
-// know we are not deleting or moving a lot of elements.
-function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
- for (var i = 0; i < del_count; i++) {
- var index = start_i + i;
- // The spec could also be interpreted such that %HasLocalProperty
- // would be the appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[index];
- if (!IS_UNDEFINED(current) || index in array) {
- deleted_elements[i] = current;
- }
- }
-}
-
-
-function SimpleMove(array, start_i, del_count, len, num_additional_args) {
- if (num_additional_args !== del_count) {
- // Move the existing elements after the elements to be deleted
- // to the right position in the resulting array.
- if (num_additional_args > del_count) {
- for (var i = len - del_count; i > start_i; i--) {
- var from_index = i + del_count - 1;
- var to_index = i + num_additional_args - 1;
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[from_index];
- if (!IS_UNDEFINED(current) || from_index in array) {
- array[to_index] = current;
- } else {
- delete array[to_index];
- }
- }
- } else {
- for (var i = start_i; i < len - del_count; i++) {
- var from_index = i + del_count;
- var to_index = i + num_additional_args;
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[from_index];
- if (!IS_UNDEFINED(current) || from_index in array) {
- array[to_index] = current;
- } else {
- delete array[to_index];
- }
- }
- for (var i = len; i > len - del_count + num_additional_args; i--) {
- delete array[i - 1];
- }
- }
- }
-}
-
-
-// -------------------------------------------------------------------
-
-
-function ArrayToString() {
- var array;
- var func;
- if (IS_ARRAY(this)) {
- func = this.join;
- if (func === ArrayJoin) {
- return Join(this, this.length, ',', ConvertToString);
- }
- array = this;
- } else {
- array = ToObject(this);
- func = array.join;
- }
- if (!IS_SPEC_FUNCTION(func)) {
- return %_CallFunction(array, ObjectToString);
- }
- return %_CallFunction(array, func);
-}
-
-
-function ArrayToLocaleString() {
- var array = ToObject(this);
- var arrayLen = array.length;
- var len = TO_UINT32(arrayLen);
- if (len === 0) return "";
- return Join(array, len, ',', ConvertToLocaleString);
-}
-
-
-function ArrayJoin(separator) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.join"]);
- }
-
- var length = TO_UINT32(this.length);
- if (IS_UNDEFINED(separator)) {
- separator = ',';
- } else if (!IS_STRING(separator)) {
- separator = NonStringToString(separator);
- }
-
- var result = %_FastAsciiArrayJoin(this, separator);
- if (!IS_UNDEFINED(result)) return result;
-
- return Join(this, length, separator, ConvertToString);
-}
-
-
-// Removes the last element from the array and returns it. See
-// ECMA-262, section 15.4.4.6.
-function ArrayPop() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.pop"]);
- }
-
- var n = TO_UINT32(this.length);
- if (n == 0) {
- this.length = n;
- return;
- }
- n--;
- var value = this[n];
- delete this[n];
- this.length = n;
- return value;
-}
-
-
-// Appends the arguments to the end of the array and returns the new
-// length of the array. See ECMA-262, section 15.4.4.7.
-function ArrayPush() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.push"]);
- }
-
- var n = TO_UINT32(this.length);
- var m = %_ArgumentsLength();
- for (var i = 0; i < m; i++) {
- this[i+n] = %_Arguments(i);
- }
- this.length = n + m;
- return this.length;
-}
-
-
-// Returns an array containing the array elements of the object followed
-// by the array elements of each argument in order. See ECMA-262,
-// section 15.4.4.7.
-function ArrayConcat(arg1) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.concat"]);
- }
-
- var array = ToObject(this);
- var arg_count = %_ArgumentsLength();
- var arrays = new InternalArray(1 + arg_count);
- arrays[0] = array;
- for (var i = 0; i < arg_count; i++) {
- arrays[i + 1] = %_Arguments(i);
- }
-
- return %ArrayConcat(arrays);
-}
-
-
-// For implementing reverse() on large, sparse arrays.
-function SparseReverse(array, len) {
- var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var high_counter = keys.length - 1;
- var low_counter = 0;
- while (low_counter <= high_counter) {
- var i = keys[low_counter];
- var j = keys[high_counter];
-
- var j_complement = len - j - 1;
- var low, high;
-
- if (j_complement <= i) {
- high = j;
- while (keys[--high_counter] == j) { }
- low = j_complement;
- }
- if (j_complement >= i) {
- low = i;
- while (keys[++low_counter] == i) { }
- high = len - i - 1;
- }
-
- var current_i = array[low];
- if (!IS_UNDEFINED(current_i) || low in array) {
- var current_j = array[high];
- if (!IS_UNDEFINED(current_j) || high in array) {
- array[low] = current_j;
- array[high] = current_i;
- } else {
- array[high] = current_i;
- delete array[low];
- }
- } else {
- var current_j = array[high];
- if (!IS_UNDEFINED(current_j) || high in array) {
- array[low] = current_j;
- delete array[high];
- }
- }
- }
-}
-
-
-function ArrayReverse() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.reverse"]);
- }
-
- var j = TO_UINT32(this.length) - 1;
-
- if (UseSparseVariant(this, j, IS_ARRAY(this))) {
- SparseReverse(this, j+1);
- return this;
- }
-
- for (var i = 0; i < j; i++, j--) {
- var current_i = this[i];
- if (!IS_UNDEFINED(current_i) || i in this) {
- var current_j = this[j];
- if (!IS_UNDEFINED(current_j) || j in this) {
- this[i] = current_j;
- this[j] = current_i;
- } else {
- this[j] = current_i;
- delete this[i];
- }
- } else {
- var current_j = this[j];
- if (!IS_UNDEFINED(current_j) || j in this) {
- this[i] = current_j;
- delete this[j];
- }
- }
- }
- return this;
-}
-
-
-function ArrayShift() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.shift"]);
- }
-
- var len = TO_UINT32(this.length);
-
- if (len === 0) {
- this.length = 0;
- return;
- }
-
- var first = this[0];
-
- if (IS_ARRAY(this) && !%IsObserved(this)) {
- SmartMove(this, 0, 1, len, 0);
- } else {
- SimpleMove(this, 0, 1, len, 0);
- }
-
- this.length = len - 1;
-
- return first;
-}
-
-
-function ArrayUnshift(arg1) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.unshift"]);
- }
-
- var len = TO_UINT32(this.length);
- var num_arguments = %_ArgumentsLength();
-
- if (IS_ARRAY(this) && !%IsObserved(this)) {
- SmartMove(this, 0, 0, len, num_arguments);
- } else {
- SimpleMove(this, 0, 0, len, num_arguments);
- }
-
- for (var i = 0; i < num_arguments; i++) {
- this[i] = %_Arguments(i);
- }
-
- this.length = len + num_arguments;
-
- return len + num_arguments;
-}
-
-
-function ArraySlice(start, end) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.slice"]);
- }
-
- var len = TO_UINT32(this.length);
- var start_i = TO_INTEGER(start);
- var end_i = len;
-
- if (end !== void 0) end_i = TO_INTEGER(end);
-
- if (start_i < 0) {
- start_i += len;
- if (start_i < 0) start_i = 0;
- } else {
- if (start_i > len) start_i = len;
- }
-
- if (end_i < 0) {
- end_i += len;
- if (end_i < 0) end_i = 0;
- } else {
- if (end_i > len) end_i = len;
- }
-
- var result = [];
-
- if (end_i < start_i) return result;
-
- if (IS_ARRAY(this) &&
- !%IsObserved(this) &&
- (end_i > 1000) &&
- (%EstimateNumberOfElements(this) < end_i)) {
- SmartSlice(this, start_i, end_i - start_i, len, result);
- } else {
- SimpleSlice(this, start_i, end_i - start_i, len, result);
- }
-
- result.length = end_i - start_i;
-
- return result;
-}
-
-
-function ArraySplice(start, delete_count) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.splice"]);
- }
-
- var num_arguments = %_ArgumentsLength();
-
- var len = TO_UINT32(this.length);
- var start_i = TO_INTEGER(start);
-
- if (start_i < 0) {
- start_i += len;
- if (start_i < 0) start_i = 0;
- } else {
- if (start_i > len) start_i = len;
- }
-
- // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given as a request to delete all the elements from the start.
- // And it differs from the case of undefined delete count.
- // This does not follow ECMA-262, but we do the same for
- // compatibility.
- var del_count = 0;
- if (num_arguments == 1) {
- del_count = len - start_i;
- } else {
- del_count = TO_INTEGER(delete_count);
- if (del_count < 0) del_count = 0;
- if (del_count > len - start_i) del_count = len - start_i;
- }
-
- var deleted_elements = [];
- deleted_elements.length = del_count;
-
- // Number of elements to add.
- var num_additional_args = 0;
- if (num_arguments > 2) {
- num_additional_args = num_arguments - 2;
- }
-
- var use_simple_splice = true;
-
- if (IS_ARRAY(this) &&
- !%IsObserved(this) &&
- num_additional_args !== del_count) {
- // If we are only deleting/moving a few things near the end of the
- // array then the simple version is going to be faster, because it
- // doesn't touch most of the array.
- var estimated_non_hole_elements = %EstimateNumberOfElements(this);
- if (len > 20 && (estimated_non_hole_elements >> 2) < (len - start_i)) {
- use_simple_splice = false;
- }
- }
-
- if (use_simple_splice) {
- SimpleSlice(this, start_i, del_count, len, deleted_elements);
- SimpleMove(this, start_i, del_count, len, num_additional_args);
- } else {
- SmartSlice(this, start_i, del_count, len, deleted_elements);
- SmartMove(this, start_i, del_count, len, num_additional_args);
- }
-
- // Insert the arguments into the resulting array in
- // place of the deleted elements.
- var i = start_i;
- var arguments_index = 2;
- var arguments_length = %_ArgumentsLength();
- while (arguments_index < arguments_length) {
- this[i++] = %_Arguments(arguments_index++);
- }
- this.length = len - del_count + num_additional_args;
-
- // Return the deleted elements.
- return deleted_elements;
-}
-
-
-function ArraySort(comparefn) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.sort"]);
- }
-
- // In-place QuickSort algorithm.
- // For short (length <= 22) arrays, insertion sort is used for efficiency.
-
- if (!IS_SPEC_FUNCTION(comparefn)) {
- comparefn = function (x, y) {
- if (x === y) return 0;
- if (%_IsSmi(x) && %_IsSmi(y)) {
- return %SmiLexicographicCompare(x, y);
- }
- x = ToString(x);
- y = ToString(y);
- if (x == y) return 0;
- else return x < y ? -1 : 1;
- };
- }
- var receiver = %GetDefaultReceiver(comparefn);
-
- var InsertionSort = function InsertionSort(a, from, to) {
- for (var i = from + 1; i < to; i++) {
- var element = a[i];
- for (var j = i - 1; j >= from; j--) {
- var tmp = a[j];
- var order = %_CallFunction(receiver, tmp, element, comparefn);
- if (order > 0) {
- a[j + 1] = tmp;
- } else {
- break;
- }
- }
- a[j + 1] = element;
- }
- };
-
- var GetThirdIndex = function(a, from, to) {
- var t_array = [];
- // Use both 'from' and 'to' to determine the pivot candidates.
- var increment = 200 + ((to - from) & 15);
- for (var i = from + 1; i < to - 1; i += increment) {
- t_array.push([i, a[i]]);
- }
- t_array.sort(function(a, b) {
- return %_CallFunction(receiver, a[1], b[1], comparefn) } );
- var third_index = t_array[t_array.length >> 1][0];
- return third_index;
- }
-
- var QuickSort = function QuickSort(a, from, to) {
- var third_index = 0;
- while (true) {
- // Insertion sort is faster for short arrays.
- if (to - from <= 10) {
- InsertionSort(a, from, to);
- return;
- }
- if (to - from > 1000) {
- third_index = GetThirdIndex(a, from, to);
- } else {
- third_index = from + ((to - from) >> 1);
- }
- // Find a pivot as the median of first, last and middle element.
- var v0 = a[from];
- var v1 = a[to - 1];
- var v2 = a[third_index];
- var c01 = %_CallFunction(receiver, v0, v1, comparefn);
- if (c01 > 0) {
- // v1 < v0, so swap them.
- var tmp = v0;
- v0 = v1;
- v1 = tmp;
- } // v0 <= v1.
- var c02 = %_CallFunction(receiver, v0, v2, comparefn);
- if (c02 >= 0) {
- // v2 <= v0 <= v1.
- var tmp = v0;
- v0 = v2;
- v2 = v1;
- v1 = tmp;
- } else {
- // v0 <= v1 && v0 < v2
- var c12 = %_CallFunction(receiver, v1, v2, comparefn);
- if (c12 > 0) {
- // v0 <= v2 < v1
- var tmp = v1;
- v1 = v2;
- v2 = tmp;
- }
- }
- // v0 <= v1 <= v2
- a[from] = v0;
- a[to - 1] = v2;
- var pivot = v1;
- var low_end = from + 1; // Upper bound of elements lower than pivot.
- var high_start = to - 1; // Lower bound of elements greater than pivot.
- a[third_index] = a[low_end];
- a[low_end] = pivot;
-
- // From low_end to i are elements equal to pivot.
- // From i to high_start are elements that haven't been compared yet.
- partition: for (var i = low_end + 1; i < high_start; i++) {
- var element = a[i];
- var order = %_CallFunction(receiver, element, pivot, comparefn);
- if (order < 0) {
- a[i] = a[low_end];
- a[low_end] = element;
- low_end++;
- } else if (order > 0) {
- do {
- high_start--;
- if (high_start == i) break partition;
- var top_elem = a[high_start];
- order = %_CallFunction(receiver, top_elem, pivot, comparefn);
- } while (order > 0);
- a[i] = a[high_start];
- a[high_start] = element;
- if (order < 0) {
- element = a[i];
- a[i] = a[low_end];
- a[low_end] = element;
- low_end++;
- }
- }
- }
- if (to - high_start < low_end - from) {
- QuickSort(a, high_start, to);
- to = low_end;
- } else {
- QuickSort(a, from, low_end);
- from = high_start;
- }
- }
- };
-
- // Copy elements in the range 0..length from obj's prototype chain
- // to obj itself, if obj has holes. Return one more than the maximal index
- // of a prototype property.
- var CopyFromPrototype = function CopyFromPrototype(obj, length) {
- var max = 0;
- for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
- var indices = %GetArrayKeys(proto, length);
- if (indices.length > 0) {
- if (indices[0] == -1) {
- // It's an interval.
- var proto_length = indices[1];
- for (var i = 0; i < proto_length; i++) {
- if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
- obj[i] = proto[i];
- if (i >= max) { max = i + 1; }
- }
- }
- } else {
- for (var i = 0; i < indices.length; i++) {
- var index = indices[i];
- if (!IS_UNDEFINED(index) &&
- !obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
- obj[index] = proto[index];
- if (index >= max) { max = index + 1; }
- }
- }
- }
- }
- }
- return max;
- };
-
- // Set a value of "undefined" on all indices in the range from..to
- // where a prototype of obj has an element. I.e., shadow all prototype
- // elements in that range.
- var ShadowPrototypeElements = function(obj, from, to) {
- for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
- var indices = %GetArrayKeys(proto, to);
- if (indices.length > 0) {
- if (indices[0] == -1) {
- // It's an interval.
- var proto_length = indices[1];
- for (var i = from; i < proto_length; i++) {
- if (proto.hasOwnProperty(i)) {
- obj[i] = void 0;
- }
- }
- } else {
- for (var i = 0; i < indices.length; i++) {
- var index = indices[i];
- if (!IS_UNDEFINED(index) && from <= index &&
- proto.hasOwnProperty(index)) {
- obj[index] = void 0;
- }
- }
- }
- }
- }
- };
-
- var SafeRemoveArrayHoles = function SafeRemoveArrayHoles(obj) {
- // Copy defined elements from the end to fill in all holes and undefineds
- // in the beginning of the array. Write undefineds and holes at the end
- // after loop is finished.
- var first_undefined = 0;
- var last_defined = length - 1;
- var num_holes = 0;
- while (first_undefined < last_defined) {
- // Find first undefined element.
- while (first_undefined < last_defined &&
- !IS_UNDEFINED(obj[first_undefined])) {
- first_undefined++;
- }
- // Maintain the invariant num_holes = the number of holes in the original
- // array with indices <= first_undefined or > last_defined.
- if (!obj.hasOwnProperty(first_undefined)) {
- num_holes++;
- }
-
- // Find last defined element.
- while (first_undefined < last_defined &&
- IS_UNDEFINED(obj[last_defined])) {
- if (!obj.hasOwnProperty(last_defined)) {
- num_holes++;
- }
- last_defined--;
- }
- if (first_undefined < last_defined) {
- // Fill in hole or undefined.
- obj[first_undefined] = obj[last_defined];
- obj[last_defined] = void 0;
- }
- }
- // If there were any undefineds in the entire array, first_undefined
- // points to one past the last defined element. Make this true if
- // there were no undefineds, as well, so that first_undefined == number
- // of defined elements.
- if (!IS_UNDEFINED(obj[first_undefined])) first_undefined++;
- // Fill in the undefineds and the holes. There may be a hole where
- // an undefined should be and vice versa.
- var i;
- for (i = first_undefined; i < length - num_holes; i++) {
- obj[i] = void 0;
- }
- for (i = length - num_holes; i < length; i++) {
- // For compatability with Webkit, do not expose elements in the prototype.
- if (i in obj.__proto__) {
- obj[i] = void 0;
- } else {
- delete obj[i];
- }
- }
-
- // Return the number of defined elements.
- return first_undefined;
- };
-
- var length = TO_UINT32(this.length);
- if (length < 2) return this;
-
- var is_array = IS_ARRAY(this);
- var max_prototype_element;
- if (!is_array) {
- // For compatibility with JSC, we also sort elements inherited from
- // the prototype chain on non-Array objects.
- // We do this by copying them to this object and sorting only
- // local elements. This is not very efficient, but sorting with
- // inherited elements happens very, very rarely, if at all.
- // The specification allows "implementation dependent" behavior
- // if an element on the prototype chain has an element that
- // might interact with sorting.
- max_prototype_element = CopyFromPrototype(this, length);
- }
-
- var num_non_undefined = %RemoveArrayHoles(this, length);
- if (num_non_undefined == -1) {
- // There were indexed accessors in the array. Move array holes and
- // undefineds to the end using a Javascript function that is safe
- // in the presence of accessors.
- num_non_undefined = SafeRemoveArrayHoles(this);
- }
-
- QuickSort(this, 0, num_non_undefined);
-
- if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
- // For compatibility with JSC, we shadow any elements in the prototype
- // chain that has become exposed by sort moving a hole to its position.
- ShadowPrototypeElements(this, num_non_undefined, max_prototype_element);
- }
-
- return this;
-}
-
-
-// The following functions cannot be made efficient on sparse arrays while
-// preserving the semantics, since the calls to the receiver function can add
-// or delete elements from the array.
-function ArrayFilter(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.filter"]);
- }
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = ToUint32(array.length);
-
- if (!IS_SPEC_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- if (IS_NULL_OR_UNDEFINED(receiver)) {
- receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
- receiver = ToObject(receiver);
- }
-
- var result = new $Array();
- var accumulator = new InternalArray();
- var accumulator_length = 0;
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- if (%_CallFunction(receiver, element, i, array, f)) {
- accumulator[accumulator_length++] = element;
- }
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (%_CallFunction(receiver, element, i, array, f)) {
- accumulator[accumulator_length++] = element;
- }
- }
- }
- // End of duplicate.
- }
- %MoveArrayContents(accumulator, result);
- return result;
-}
-
-
-function ArrayForEach(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.forEach"]);
- }
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = TO_UINT32(array.length);
-
- if (!IS_SPEC_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- if (IS_NULL_OR_UNDEFINED(receiver)) {
- receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
- receiver = ToObject(receiver);
- }
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- %_CallFunction(receiver, element, i, array, f);
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- %_CallFunction(receiver, element, i, array, f);
- }
- }
- // End of duplicate.
- }
-}
-
-
-// Executes the function once for each element present in the
-// array until it finds one where callback returns true.
-function ArraySome(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.some"]);
- }
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = TO_UINT32(array.length);
-
- if (!IS_SPEC_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- if (IS_NULL_OR_UNDEFINED(receiver)) {
- receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
- receiver = ToObject(receiver);
- }
-
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- if (%_CallFunction(receiver, element, i, array, f)) return true;
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (%_CallFunction(receiver, element, i, array, f)) return true;
- }
- }
- // End of duplicate.
- }
- return false;
-}
-
-
-function ArrayEvery(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.every"]);
- }
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = TO_UINT32(array.length);
-
- if (!IS_SPEC_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- if (IS_NULL_OR_UNDEFINED(receiver)) {
- receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
- receiver = ToObject(receiver);
- }
-
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- if (!%_CallFunction(receiver, element, i, array, f)) return false;
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (!%_CallFunction(receiver, element, i, array, f)) return false;
- }
- }
- // End of duplicate.
- }
- return true;
-}
-
-function ArrayMap(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.map"]);
- }
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = TO_UINT32(array.length);
-
- if (!IS_SPEC_FUNCTION(f)) {
- throw MakeTypeError('called_non_callable', [ f ]);
- }
- if (IS_NULL_OR_UNDEFINED(receiver)) {
- receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
- receiver = ToObject(receiver);
- }
-
- var result = new $Array();
- var accumulator = new InternalArray(length);
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- accumulator[i] = %_CallFunction(receiver, element, i, array, f);
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- accumulator[i] = %_CallFunction(receiver, element, i, array, f);
- }
- }
- // End of duplicate.
- }
- %MoveArrayContents(accumulator, result);
- return result;
-}
-
-
-function ArrayIndexOf(element, index) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.indexOf"]);
- }
-
- var length = TO_UINT32(this.length);
- if (length == 0) return -1;
- if (IS_UNDEFINED(index)) {
- index = 0;
- } else {
- index = TO_INTEGER(index);
- // If index is negative, index from the end of the array.
- if (index < 0) {
- index = length + index;
- // If index is still negative, search the entire array.
- if (index < 0) index = 0;
- }
- }
- var min = index;
- var max = length;
- if (UseSparseVariant(this, length, IS_ARRAY(this))) {
- var intervals = %GetArrayKeys(this, length);
- if (intervals.length == 2 && intervals[0] < 0) {
- // A single interval.
- var intervalMin = -(intervals[0] + 1);
- var intervalMax = intervalMin + intervals[1];
- if (min < intervalMin) min = intervalMin;
- max = intervalMax; // Capped by length already.
- // Fall through to loop below.
- } else {
- if (intervals.length == 0) return -1;
- // Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(this, intervals);
- var n = sortedKeys.length;
- var i = 0;
- while (i < n && sortedKeys[i] < index) i++;
- while (i < n) {
- var key = sortedKeys[i];
- if (!IS_UNDEFINED(key) && this[key] === element) return key;
- i++;
- }
- return -1;
- }
- }
- // Lookup through the array.
- if (!IS_UNDEFINED(element)) {
- for (var i = min; i < max; i++) {
- if (this[i] === element) return i;
- }
- return -1;
- }
- // Lookup through the array.
- for (var i = min; i < max; i++) {
- if (IS_UNDEFINED(this[i]) && i in this) {
- return i;
- }
- }
- return -1;
-}
-
-
-function ArrayLastIndexOf(element, index) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.lastIndexOf"]);
- }
-
- var length = TO_UINT32(this.length);
- if (length == 0) return -1;
- if (%_ArgumentsLength() < 2) {
- index = length - 1;
- } else {
- index = TO_INTEGER(index);
- // If index is negative, index from end of the array.
- if (index < 0) index += length;
- // If index is still negative, do not search the array.
- if (index < 0) return -1;
- else if (index >= length) index = length - 1;
- }
- var min = 0;
- var max = index;
- if (UseSparseVariant(this, length, IS_ARRAY(this))) {
- var intervals = %GetArrayKeys(this, index + 1);
- if (intervals.length == 2 && intervals[0] < 0) {
- // A single interval.
- var intervalMin = -(intervals[0] + 1);
- var intervalMax = intervalMin + intervals[1];
- if (min < intervalMin) min = intervalMin;
- max = intervalMax; // Capped by index already.
- // Fall through to loop below.
- } else {
- if (intervals.length == 0) return -1;
- // Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(this, intervals);
- var i = sortedKeys.length - 1;
- while (i >= 0) {
- var key = sortedKeys[i];
- if (!IS_UNDEFINED(key) && this[key] === element) return key;
- i--;
- }
- return -1;
- }
- }
- // Lookup through the array.
- if (!IS_UNDEFINED(element)) {
- for (var i = max; i >= min; i--) {
- if (this[i] === element) return i;
- }
- return -1;
- }
- for (var i = max; i >= min; i--) {
- if (IS_UNDEFINED(this[i]) && i in this) {
- return i;
- }
- }
- return -1;
-}
-
-
-function ArrayReduce(callback, current) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.reduce"]);
- }
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = ToUint32(array.length);
-
- if (!IS_SPEC_FUNCTION(callback)) {
- throw MakeTypeError('called_non_callable', [callback]);
- }
-
- var i = 0;
- find_initial: if (%_ArgumentsLength() < 2) {
- for (; i < length; i++) {
- current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- i++;
- break find_initial;
- }
- }
- throw MakeTypeError('reduce_no_initial', []);
- }
-
- var receiver = %GetDefaultReceiver(callback);
-
- if (%DebugCallbackSupportsStepping(callback)) {
- for (; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(callback);
- current =
- %_CallFunction(receiver, current, element, i, array, callback);
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (; i < length; i++) {
- if (i in array) {
- var element = array[i];
- current =
- %_CallFunction(receiver, current, element, i, array, callback);
- }
- }
- // End of duplicate.
- }
- return current;
-}
-
-function ArrayReduceRight(callback, current) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.reduceRight"]);
- }
-
- // Pull out the length so that side effects are visible before the
- // callback function is checked.
- var array = ToObject(this);
- var length = ToUint32(array.length);
-
- if (!IS_SPEC_FUNCTION(callback)) {
- throw MakeTypeError('called_non_callable', [callback]);
- }
-
- var i = length - 1;
- find_initial: if (%_ArgumentsLength() < 2) {
- for (; i >= 0; i--) {
- current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- i--;
- break find_initial;
- }
- }
- throw MakeTypeError('reduce_no_initial', []);
- }
-
- var receiver = %GetDefaultReceiver(callback);
-
- if (%DebugCallbackSupportsStepping(callback)) {
- for (; i >= 0; i--) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(callback);
- current =
- %_CallFunction(receiver, current, element, i, array, callback);
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (; i >= 0; i--) {
- if (i in array) {
- var element = array[i];
- current =
- %_CallFunction(receiver, current, element, i, array, callback);
- }
- }
- // End of duplicate.
- }
- return current;
-}
-
-// ES5, 15.4.3.2
-function ArrayIsArray(obj) {
- return IS_ARRAY(obj);
-}
-
-
-// -------------------------------------------------------------------
-function SetUpArray() {
- %CheckIsBootstrapping();
- // Set up non-enumerable constructor property on the Array.prototype
- // object.
- %SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
-
- // Set up non-enumerable functions on the Array object.
- InstallFunctions($Array, DONT_ENUM, $Array(
- "isArray", ArrayIsArray
- ));
-
- var specialFunctions = %SpecialArrayFunctions({});
-
- var getFunction = function(name, jsBuiltin, len) {
- var f = jsBuiltin;
- if (specialFunctions.hasOwnProperty(name)) {
- f = specialFunctions[name];
- }
- if (!IS_UNDEFINED(len)) {
- %FunctionSetLength(f, len);
- }
- return f;
- };
-
- // Set up non-enumerable functions of the Array.prototype object and
- // set their names.
- // Manipulate the length of some of the functions to meet
- // expectations set by ECMA-262 or Mozilla.
- InstallFunctions($Array.prototype, DONT_ENUM, $Array(
- "toString", getFunction("toString", ArrayToString),
- "toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
- "join", getFunction("join", ArrayJoin),
- "pop", getFunction("pop", ArrayPop),
- "push", getFunction("push", ArrayPush, 1),
- "concat", getFunction("concat", ArrayConcat, 1),
- "reverse", getFunction("reverse", ArrayReverse),
- "shift", getFunction("shift", ArrayShift),
- "unshift", getFunction("unshift", ArrayUnshift, 1),
- "slice", getFunction("slice", ArraySlice, 2),
- "splice", getFunction("splice", ArraySplice, 2),
- "sort", getFunction("sort", ArraySort),
- "filter", getFunction("filter", ArrayFilter, 1),
- "forEach", getFunction("forEach", ArrayForEach, 1),
- "some", getFunction("some", ArraySome, 1),
- "every", getFunction("every", ArrayEvery, 1),
- "map", getFunction("map", ArrayMap, 1),
- "indexOf", getFunction("indexOf", ArrayIndexOf, 1),
- "lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
- "reduce", getFunction("reduce", ArrayReduce, 1),
- "reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
- ));
-
- %FinishArrayPrototypeSetup($Array.prototype);
-
- // The internal Array prototype doesn't need to be fancy, since it's never
- // exposed to user code.
- // Adding only the functions that are actually used.
- SetUpLockedPrototype(InternalArray, $Array(), $Array(
- "indexOf", getFunction("indexOf", ArrayIndexOf),
- "join", getFunction("join", ArrayJoin),
- "pop", getFunction("pop", ArrayPop),
- "push", getFunction("push", ArrayPush),
- "splice", getFunction("splice", ArraySplice)
- ));
-
- SetUpLockedPrototype(InternalPackedArray, $Array(), $Array(
- "join", getFunction("join", ArrayJoin),
- "pop", getFunction("pop", ArrayPop),
- "push", getFunction("push", ArrayPush)
- ));
-}
-
-SetUpArray();
diff --git a/src/3rdparty/v8/src/assembler.cc b/src/3rdparty/v8/src/assembler.cc
deleted file mode 100644
index 2cd9114..0000000
--- a/src/3rdparty/v8/src/assembler.cc
+++ /dev/null
@@ -1,1617 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-#include "assembler.h"
-
-#include <math.h> // For cos, log, pow, sin, tan, etc.
-#include "api.h"
-#include "builtins.h"
-#include "counters.h"
-#include "cpu.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "ic.h"
-#include "isolate.h"
-#include "jsregexp.h"
-#include "lazy-instance.h"
-#include "platform.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "runtime.h"
-#include "serialize.h"
-#include "store-buffer-inl.h"
-#include "stub-cache.h"
-#include "token.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/assembler-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/assembler-mips-inl.h"
-#else
-#error "Unknown architecture."
-#endif
-
-// Include native regexp-macro-assembler.
-#ifndef V8_INTERPRETED_REGEXP
-#if V8_TARGET_ARCH_IA32
-#include "ia32/regexp-macro-assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/regexp-macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/regexp-macro-assembler-mips.h"
-#else // Unknown architecture.
-#error "Unknown architecture."
-#endif // Target architecture.
-#endif // V8_INTERPRETED_REGEXP
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Common double constants.
-
-struct DoubleConstant BASE_EMBEDDED {
- double min_int;
- double one_half;
- double minus_one_half;
- double minus_zero;
- double zero;
- double uint8_max_value;
- double negative_infinity;
- double canonical_non_hole_nan;
- double the_hole_nan;
-};
-
-static DoubleConstant double_constants;
-
-const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
-
-static bool math_exp_data_initialized = false;
-static Mutex* math_exp_data_mutex = NULL;
-static double* math_exp_constants_array = NULL;
-static double* math_exp_log_table_array = NULL;
-
-// -----------------------------------------------------------------------------
-// Implementation of AssemblerBase
-
-AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
- : isolate_(isolate),
- jit_cookie_(0),
- emit_debug_code_(FLAG_debug_code),
- predictable_code_size_(false) {
- if (FLAG_mask_constants_with_cookie && isolate != NULL) {
- jit_cookie_ = V8::RandomPrivate(isolate);
- }
-
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
- if (isolate->assembler_spare_buffer() != NULL) {
- buffer = isolate->assembler_spare_buffer();
- isolate->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
- own_buffer_ = true;
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- own_buffer_ = false;
- }
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
-
- pc_ = buffer_;
-}
-
-
-AssemblerBase::~AssemblerBase() {
- if (own_buffer_) {
- if (isolate() != NULL &&
- isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of PredictableCodeSizeScope
-
-PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
- int expected_size)
- : assembler_(assembler),
- expected_size_(expected_size),
- start_offset_(assembler->pc_offset()),
- old_value_(assembler->predictable_code_size()) {
- assembler_->set_predictable_code_size(true);
-}
-
-
-PredictableCodeSizeScope::~PredictableCodeSizeScope() {
- // TODO(svenpanne) Remove the 'if' when everything works.
- if (expected_size_ >= 0) {
- CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
- }
- assembler_->set_predictable_code_size(old_value_);
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Label
-
-int Label::pos() const {
- if (pos_ < 0) return -pos_ - 1;
- if (pos_ > 0) return pos_ - 1;
- UNREACHABLE();
- return 0;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfoWriter and RelocIterator
-//
-// Relocation information is written backwards in memory, from high addresses
-// towards low addresses, byte by byte. Therefore, in the encodings listed
-// below, the first byte listed it at the highest address, and successive
-// bytes in the record are at progressively lower addresses.
-//
-// Encoding
-//
-// The most common modes are given single-byte encodings. Also, it is
-// easy to identify the type of reloc info and skip unwanted modes in
-// an iteration.
-//
-// The encoding relies on the fact that there are fewer than 14
-// different relocation modes using standard non-compact encoding.
-//
-// The first byte of a relocation record has a tag in its low 2 bits:
-// Here are the record schemes, depending on the low tag and optional higher
-// tags.
-//
-// Low tag:
-// 00: embedded_object: [6-bit pc delta] 00
-//
-// 01: code_target: [6-bit pc delta] 01
-//
-// 10: short_data_record: [6-bit pc delta] 10 followed by
-// [6-bit data delta] [2-bit data type tag]
-//
-// 11: long_record [2-bit high tag][4 bit middle_tag] 11
-// followed by variable data depending on type.
-//
-// 2-bit data type tags, used in short_data_record and data_jump long_record:
-// code_target_with_id: 00
-// position: 01
-// statement_position: 10
-// comment: 11 (not used in short_data_record)
-//
-// Long record format:
-// 4-bit middle_tag:
-// 0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2
-// (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
-// and is between 0000 and 1100)
-// The format is:
-// 00 [4 bit middle_tag] 11 followed by
-// 00 [6 bit pc delta]
-//
-// 1101: constant pool. Used on ARM only for now.
-// The format is: 11 1101 11
-// signed int (size of the constant pool).
-// 1110: long_data_record
-// The format is: [2-bit data_type_tag] 1110 11
-// signed intptr_t, lowest byte written first
-// (except data_type code_target_with_id, which
-// is followed by a signed int, not intptr_t.)
-//
-// 1111: long_pc_jump
-// The format is:
-// pc-jump: 00 1111 11,
-// 00 [6 bits pc delta]
-// or
-// pc-jump (variable length):
-// 01 1111 11,
-// [7 bits data] 0
-// ...
-// [7 bits data] 1
-// (Bits 6..31 of pc delta, with leading zeroes
-// dropped, and last non-zero chunk tagged with 1.)
-
-
-const int kMaxStandardNonCompactModes = 14;
-
-const int kTagBits = 2;
-const int kTagMask = (1 << kTagBits) - 1;
-const int kExtraTagBits = 4;
-const int kLocatableTypeTagBits = 2;
-const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits;
-
-const int kEmbeddedObjectTag = 0;
-const int kCodeTargetTag = 1;
-const int kLocatableTag = 2;
-const int kDefaultTag = 3;
-
-const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1;
-
-const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
-const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
-const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
-
-const int kVariableLengthPCJumpTopTag = 1;
-const int kChunkBits = 7;
-const int kChunkMask = (1 << kChunkBits) - 1;
-const int kLastChunkTagBits = 1;
-const int kLastChunkTagMask = 1;
-const int kLastChunkTag = 1;
-
-
-const int kDataJumpExtraTag = kPCJumpExtraTag - 1;
-
-const int kCodeWithIdTag = 0;
-const int kNonstatementPositionTag = 1;
-const int kStatementPositionTag = 2;
-const int kCommentTag = 3;
-
-const int kConstPoolExtraTag = kPCJumpExtraTag - 2;
-const int kConstPoolTag = 3;
-
-
-uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
- // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
- // Otherwise write a variable length PC jump for the bits that do
- // not fit in the kSmallPCDeltaBits bits.
- if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
- WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag);
- uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
- ASSERT(pc_jump > 0);
- // Write kChunkBits size chunks of the pc_jump.
- for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
- byte b = pc_jump & kChunkMask;
- *--pos_ = b << kLastChunkTagBits;
- }
- // Tag the last chunk so it can be identified.
- *pos_ = *pos_ | kLastChunkTag;
- // Return the remaining kSmallPCDeltaBits of the pc_delta.
- return pc_delta & kSmallPCDeltaMask;
-}
-
-
-void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
- // Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump.
- pc_delta = WriteVariableLengthPCJump(pc_delta);
- *--pos_ = pc_delta << kTagBits | tag;
-}
-
-
-void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
- *--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag);
-}
-
-
-void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
- *--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) |
- extra_tag << kTagBits |
- kDefaultTag);
-}
-
-
-void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
- // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
- pc_delta = WriteVariableLengthPCJump(pc_delta);
- WriteExtraTag(extra_tag, 0);
- *--pos_ = pc_delta;
-}
-
-
-void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
- WriteExtraTag(kDataJumpExtraTag, top_tag);
- for (int i = 0; i < kIntSize; i++) {
- *--pos_ = static_cast<byte>(data_delta);
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- data_delta = data_delta >> kBitsPerByte;
- }
-}
-
-void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
- WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
- for (int i = 0; i < kIntSize; i++) {
- *--pos_ = static_cast<byte>(data);
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- data = data >> kBitsPerByte;
- }
-}
-
-void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
- WriteExtraTag(kDataJumpExtraTag, top_tag);
- for (int i = 0; i < kIntptrSize; i++) {
- *--pos_ = static_cast<byte>(data_delta);
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- data_delta = data_delta >> kBitsPerByte;
- }
-}
-
-
-void RelocInfoWriter::Write(const RelocInfo* rinfo) {
-#ifdef DEBUG
- byte* begin_pos = pos_;
-#endif
- ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
- ASSERT(rinfo->pc() - last_pc_ >= 0);
- ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
- <= kMaxStandardNonCompactModes);
- // Use unsigned delta-encoding for pc.
- uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
- RelocInfo::Mode rmode = rinfo->rmode();
-
- // The two most common modes are given small tags, and usually fit in a byte.
- if (rmode == RelocInfo::EMBEDDED_OBJECT) {
- WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
- } else if (rmode == RelocInfo::CODE_TARGET) {
- WriteTaggedPC(pc_delta, kCodeTargetTag);
- ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
- } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- // Use signed delta-encoding for id.
- ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
- int id_delta = static_cast<int>(rinfo->data()) - last_id_;
- // Check if delta is small enough to fit in a tagged byte.
- if (is_intn(id_delta, kSmallDataBits)) {
- WriteTaggedPC(pc_delta, kLocatableTag);
- WriteTaggedData(id_delta, kCodeWithIdTag);
- } else {
- // Otherwise, use costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
- }
- last_id_ = static_cast<int>(rinfo->data());
- } else if (RelocInfo::IsPosition(rmode)) {
- // Use signed delta-encoding for position.
- ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
- int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
- int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
- : kStatementPositionTag;
- // Check if delta is small enough to fit in a tagged byte.
- if (is_intn(pos_delta, kSmallDataBits)) {
- WriteTaggedPC(pc_delta, kLocatableTag);
- WriteTaggedData(pos_delta, pos_type_tag);
- } else {
- // Otherwise, use costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedIntData(pos_delta, pos_type_tag);
- }
- last_position_ = static_cast<int>(rinfo->data());
- } else if (RelocInfo::IsComment(rmode)) {
- // Comments are normally not generated, so we use the costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedData(rinfo->data(), kCommentTag);
- ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
- } else if (RelocInfo::IsConstPool(rmode)) {
- WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data()));
- } else {
- ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
- int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
- // For all other modes we simply use the mode as the extra tag.
- // None of these modes need a data component.
- ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
- WriteExtraTaggedPC(pc_delta, saved_mode);
- }
- last_pc_ = rinfo->pc();
-#ifdef DEBUG
- ASSERT(begin_pos - pos_ <= kMaxSize);
-#endif
-}
-
-
-inline int RelocIterator::AdvanceGetTag() {
- return *--pos_ & kTagMask;
-}
-
-
-inline int RelocIterator::GetExtraTag() {
- return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1);
-}
-
-
-inline int RelocIterator::GetTopTag() {
- return *pos_ >> (kTagBits + kExtraTagBits);
-}
-
-
-inline void RelocIterator::ReadTaggedPC() {
- rinfo_.pc_ += *pos_ >> kTagBits;
-}
-
-
-inline void RelocIterator::AdvanceReadPC() {
- rinfo_.pc_ += *--pos_;
-}
-
-
-void RelocIterator::AdvanceReadId() {
- int x = 0;
- for (int i = 0; i < kIntSize; i++) {
- x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
- }
- last_id_ += x;
- rinfo_.data_ = last_id_;
-}
-
-
-void RelocIterator::AdvanceReadConstPoolData() {
- int x = 0;
- for (int i = 0; i < kIntSize; i++) {
- x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
- }
- rinfo_.data_ = x;
-}
-
-
-void RelocIterator::AdvanceReadPosition() {
- int x = 0;
- for (int i = 0; i < kIntSize; i++) {
- x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
- }
- last_position_ += x;
- rinfo_.data_ = last_position_;
-}
-
-
-void RelocIterator::AdvanceReadData() {
- intptr_t x = 0;
- for (int i = 0; i < kIntptrSize; i++) {
- x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
- }
- rinfo_.data_ = x;
-}
-
-
-void RelocIterator::AdvanceReadVariableLengthPCJump() {
- // Read the 32-kSmallPCDeltaBits most significant bits of the
- // pc jump in kChunkBits bit chunks and shift them into place.
- // Stop when the last chunk is encountered.
- uint32_t pc_jump = 0;
- for (int i = 0; i < kIntSize; i++) {
- byte pc_jump_part = *--pos_;
- pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
- if ((pc_jump_part & kLastChunkTagMask) == 1) break;
- }
- // The least significant kSmallPCDeltaBits bits will be added
- // later.
- rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
-}
-
-
-inline int RelocIterator::GetLocatableTypeTag() {
- return *pos_ & ((1 << kLocatableTypeTagBits) - 1);
-}
-
-
-inline void RelocIterator::ReadTaggedId() {
- int8_t signed_b = *pos_;
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- last_id_ += signed_b >> kLocatableTypeTagBits;
- rinfo_.data_ = last_id_;
-}
-
-
-inline void RelocIterator::ReadTaggedPosition() {
- int8_t signed_b = *pos_;
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- last_position_ += signed_b >> kLocatableTypeTagBits;
- rinfo_.data_ = last_position_;
-}
-
-
-static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
- ASSERT(tag == kNonstatementPositionTag ||
- tag == kStatementPositionTag);
- return (tag == kNonstatementPositionTag) ?
- RelocInfo::POSITION :
- RelocInfo::STATEMENT_POSITION;
-}
-
-
-void RelocIterator::next() {
- ASSERT(!done());
- // Basically, do the opposite of RelocInfoWriter::Write.
- // Reading of data is as far as possible avoided for unwanted modes,
- // but we must always update the pc.
- //
- // We exit this loop by returning when we find a mode we want.
- while (pos_ > end_) {
- int tag = AdvanceGetTag();
- if (tag == kEmbeddedObjectTag) {
- ReadTaggedPC();
- if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
- } else if (tag == kCodeTargetTag) {
- ReadTaggedPC();
- if (SetMode(RelocInfo::CODE_TARGET)) return;
- } else if (tag == kLocatableTag) {
- ReadTaggedPC();
- Advance();
- int locatable_tag = GetLocatableTypeTag();
- if (locatable_tag == kCodeWithIdTag) {
- if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
- ReadTaggedId();
- return;
- }
- } else {
- // Compact encoding is never used for comments,
- // so it must be a position.
- ASSERT(locatable_tag == kNonstatementPositionTag ||
- locatable_tag == kStatementPositionTag);
- if (mode_mask_ & RelocInfo::kPositionMask) {
- ReadTaggedPosition();
- if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
- }
- }
- } else {
- ASSERT(tag == kDefaultTag);
- int extra_tag = GetExtraTag();
- if (extra_tag == kPCJumpExtraTag) {
- if (GetTopTag() == kVariableLengthPCJumpTopTag) {
- AdvanceReadVariableLengthPCJump();
- } else {
- AdvanceReadPC();
- }
- } else if (extra_tag == kDataJumpExtraTag) {
- int locatable_tag = GetTopTag();
- if (locatable_tag == kCodeWithIdTag) {
- if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
- AdvanceReadId();
- return;
- }
- Advance(kIntSize);
- } else if (locatable_tag != kCommentTag) {
- ASSERT(locatable_tag == kNonstatementPositionTag ||
- locatable_tag == kStatementPositionTag);
- if (mode_mask_ & RelocInfo::kPositionMask) {
- AdvanceReadPosition();
- if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
- } else {
- Advance(kIntSize);
- }
- } else {
- ASSERT(locatable_tag == kCommentTag);
- if (SetMode(RelocInfo::COMMENT)) {
- AdvanceReadData();
- return;
- }
- Advance(kIntptrSize);
- }
- } else if ((extra_tag == kConstPoolExtraTag) &&
- (GetTopTag() == kConstPoolTag)) {
- if (SetMode(RelocInfo::CONST_POOL)) {
- AdvanceReadConstPoolData();
- return;
- }
- Advance(kIntSize);
- } else {
- AdvanceReadPC();
- int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
- if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
- }
- }
- }
- if (code_age_sequence_ != NULL) {
- byte* old_code_age_sequence = code_age_sequence_;
- code_age_sequence_ = NULL;
- if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
- rinfo_.data_ = 0;
- rinfo_.pc_ = old_code_age_sequence;
- return;
- }
- }
- done_ = true;
-}
-
-
-RelocIterator::RelocIterator(Code* code, int mode_mask) {
- rinfo_.host_ = code;
- rinfo_.pc_ = code->instruction_start();
- rinfo_.data_ = 0;
- // Relocation info is read backwards.
- pos_ = code->relocation_start() + code->relocation_size();
- end_ = code->relocation_start();
- done_ = false;
- mode_mask_ = mode_mask;
- last_id_ = 0;
- last_position_ = 0;
- byte* sequence = code->FindCodeAgeSequence();
- if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
- code_age_sequence_ = sequence;
- } else {
- code_age_sequence_ = NULL;
- }
- if (mode_mask_ == 0) pos_ = end_;
- next();
-}
-
-
-RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
- rinfo_.pc_ = desc.buffer;
- rinfo_.data_ = 0;
- // Relocation info is read backwards.
- pos_ = desc.buffer + desc.buffer_size;
- end_ = pos_ - desc.reloc_size;
- done_ = false;
- mode_mask_ = mode_mask;
- last_id_ = 0;
- last_position_ = 0;
- code_age_sequence_ = NULL;
- if (mode_mask_ == 0) pos_ = end_;
- next();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-
-#ifdef DEBUG
-bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
- // Ensure there are no code targets or embedded objects present in the
- // deoptimization entries, they would require relocation after code
- // generation.
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::kApplyMask;
- RelocIterator it(desc, mode_mask);
- return !it.done();
-}
-#endif
-
-
-#ifdef ENABLE_DISASSEMBLER
-const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
- switch (rmode) {
- case RelocInfo::NONE32:
- return "no reloc 32";
- case RelocInfo::NONE64:
- return "no reloc 64";
- case RelocInfo::EMBEDDED_OBJECT:
- return "embedded object";
- case RelocInfo::CONSTRUCT_CALL:
- return "code target (js construct call)";
- case RelocInfo::CODE_TARGET_CONTEXT:
- return "code target (context)";
- case RelocInfo::DEBUG_BREAK:
-#ifndef ENABLE_DEBUGGER_SUPPORT
- UNREACHABLE();
-#endif
- return "debug break";
- case RelocInfo::CODE_TARGET:
- return "code target";
- case RelocInfo::CODE_TARGET_WITH_ID:
- return "code target with id";
- case RelocInfo::GLOBAL_PROPERTY_CELL:
- return "global property cell";
- case RelocInfo::RUNTIME_ENTRY:
- return "runtime entry";
- case RelocInfo::JS_RETURN:
- return "js return";
- case RelocInfo::COMMENT:
- return "comment";
- case RelocInfo::POSITION:
- return "position";
- case RelocInfo::STATEMENT_POSITION:
- return "statement position";
- case RelocInfo::EXTERNAL_REFERENCE:
- return "external reference";
- case RelocInfo::INTERNAL_REFERENCE:
- return "internal reference";
- case RelocInfo::CONST_POOL:
- return "constant pool";
- case RelocInfo::DEBUG_BREAK_SLOT:
-#ifndef ENABLE_DEBUGGER_SUPPORT
- UNREACHABLE();
-#endif
- return "debug break slot";
- case RelocInfo::CODE_AGE_SEQUENCE:
- return "code_age_sequence";
- case RelocInfo::NUMBER_OF_MODES:
- UNREACHABLE();
- return "number_of_modes";
- }
- return "unknown relocation type";
-}
-
-
-void RelocInfo::Print(FILE* out) {
- FPrintF(out, "%p %s", pc_, RelocModeName(rmode_));
- if (IsComment(rmode_)) {
- FPrintF(out, " (%s)", reinterpret_cast<char*>(data_));
- } else if (rmode_ == EMBEDDED_OBJECT) {
- FPrintF(out, " (");
- target_object()->ShortPrint(out);
- FPrintF(out, ")");
- } else if (rmode_ == EXTERNAL_REFERENCE) {
- ExternalReferenceEncoder ref_encoder;
- FPrintF(out, " (%s) (%p)",
- ref_encoder.NameOfAddress(*target_reference_address()),
- *target_reference_address());
- } else if (IsCodeTarget(rmode_)) {
- Code* code = Code::GetCodeFromTargetAddress(target_address());
- FPrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
- target_address());
- if (rmode_ == CODE_TARGET_WITH_ID) {
- PrintF(" (id=%d)", static_cast<int>(data_));
- }
- } else if (IsPosition(rmode_)) {
- FPrintF(out, " (%" V8_PTR_PREFIX "d)", data());
- } else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
- Isolate::Current()->deoptimizer_data() != NULL) {
- // Depotimization bailouts are stored as runtime entries.
- int id = Deoptimizer::GetDeoptimizationId(
- target_address(), Deoptimizer::EAGER);
- if (id != Deoptimizer::kNotDeoptimizationEntry) {
- FPrintF(out, " (deoptimization bailout %d)", id);
- }
- }
-
- FPrintF(out, "\n");
-}
-#endif // ENABLE_DISASSEMBLER
-
-
-#ifdef VERIFY_HEAP
-void RelocInfo::Verify() {
- switch (rmode_) {
- case EMBEDDED_OBJECT:
- Object::VerifyPointer(target_object());
- break;
- case GLOBAL_PROPERTY_CELL:
- Object::VerifyPointer(target_cell());
- break;
- case DEBUG_BREAK:
-#ifndef ENABLE_DEBUGGER_SUPPORT
- UNREACHABLE();
- break;
-#endif
- case CONSTRUCT_CALL:
- case CODE_TARGET_CONTEXT:
- case CODE_TARGET_WITH_ID:
- case CODE_TARGET: {
- // convert inline target address to code object
- Address addr = target_address();
- CHECK(addr != NULL);
- // Check that we can find the right code object.
- Code* code = Code::GetCodeFromTargetAddress(addr);
- Object* found = HEAP->FindCodeObject(addr);
- CHECK(found->IsCode());
- CHECK(code->address() == HeapObject::cast(found)->address());
- break;
- }
- case RUNTIME_ENTRY:
- case JS_RETURN:
- case COMMENT:
- case POSITION:
- case STATEMENT_POSITION:
- case EXTERNAL_REFERENCE:
- case INTERNAL_REFERENCE:
- case CONST_POOL:
- case DEBUG_BREAK_SLOT:
- case NONE32:
- case NONE64:
- break;
- case NUMBER_OF_MODES:
- UNREACHABLE();
- break;
- case CODE_AGE_SEQUENCE:
- ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
- break;
- }
-}
-#endif // VERIFY_HEAP
-
-
-// -----------------------------------------------------------------------------
-// Implementation of ExternalReference
-
-void ExternalReference::SetUp() {
- double_constants.min_int = kMinInt;
- double_constants.one_half = 0.5;
- double_constants.minus_one_half = -0.5;
- double_constants.minus_zero = -0.0;
- double_constants.uint8_max_value = 255;
- double_constants.zero = 0.0;
- double_constants.canonical_non_hole_nan = OS::nan_value();
- double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
- double_constants.negative_infinity = -V8_INFINITY;
-
- math_exp_data_mutex = OS::CreateMutex();
-}
-
-
-void ExternalReference::InitializeMathExpData() {
- // Early return?
- if (math_exp_data_initialized) return;
-
- math_exp_data_mutex->Lock();
- if (!math_exp_data_initialized) {
- // If this is changed, generated code must be adapted too.
- const int kTableSizeBits = 11;
- const int kTableSize = 1 << kTableSizeBits;
- const double kTableSizeDouble = static_cast<double>(kTableSize);
-
- math_exp_constants_array = new double[9];
- // Input values smaller than this always return 0.
- math_exp_constants_array[0] = -708.39641853226408;
- // Input values larger than this always return +Infinity.
- math_exp_constants_array[1] = 709.78271289338397;
- math_exp_constants_array[2] = V8_INFINITY;
- // The rest is black magic. Do not attempt to understand it. It is
- // loosely based on the "expd" function published at:
- // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
- const double constant3 = (1 << kTableSizeBits) / log(2.0);
- math_exp_constants_array[3] = constant3;
- math_exp_constants_array[4] =
- static_cast<double>(static_cast<int64_t>(3) << 51);
- math_exp_constants_array[5] = 1 / constant3;
- math_exp_constants_array[6] = 3.0000000027955394;
- math_exp_constants_array[7] = 0.16666666685227835;
- math_exp_constants_array[8] = 1;
-
- math_exp_log_table_array = new double[kTableSize];
- for (int i = 0; i < kTableSize; i++) {
- double value = pow(2, i / kTableSizeDouble);
-
- uint64_t bits = BitCast<uint64_t, double>(value);
- bits &= (static_cast<uint64_t>(1) << 52) - 1;
- double mantissa = BitCast<double, uint64_t>(bits);
-
- // <just testing>
- uint64_t doublebits;
- memcpy(&doublebits, &value, sizeof doublebits);
- doublebits &= (static_cast<uint64_t>(1) << 52) - 1;
- double mantissa2;
- memcpy(&mantissa2, &doublebits, sizeof mantissa2);
- CHECK_EQ(mantissa, mantissa2);
- // </just testing>
-
- math_exp_log_table_array[i] = mantissa;
- }
-
- math_exp_data_initialized = true;
- }
- math_exp_data_mutex->Unlock();
-}
-
-
-void ExternalReference::TearDownMathExpData() {
- delete[] math_exp_constants_array;
- delete[] math_exp_log_table_array;
- delete math_exp_data_mutex;
-}
-
-
-ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
- : address_(Redirect(isolate, Builtins::c_function_address(id))) {}
-
-
-ExternalReference::ExternalReference(
- ApiFunction* fun,
- Type type = ExternalReference::BUILTIN_CALL,
- Isolate* isolate = NULL)
- : address_(Redirect(isolate, fun->address(), type)) {}
-
-
-ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
- : address_(isolate->builtins()->builtin_address(name)) {}
-
-
-ExternalReference::ExternalReference(Runtime::FunctionId id,
- Isolate* isolate)
- : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
-
-
-ExternalReference::ExternalReference(const Runtime::Function* f,
- Isolate* isolate)
- : address_(Redirect(isolate, f->entry)) {}
-
-
-ExternalReference ExternalReference::isolate_address() {
- return ExternalReference(Isolate::Current());
-}
-
-
-ExternalReference::ExternalReference(const IC_Utility& ic_utility,
- Isolate* isolate)
- : address_(Redirect(isolate, ic_utility.address())) {}
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-ExternalReference::ExternalReference(const Debug_Address& debug_address,
- Isolate* isolate)
- : address_(debug_address.address(isolate)) {}
-#endif
-
-ExternalReference::ExternalReference(StatsCounter* counter)
- : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
-
-
-ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate)
- : address_(isolate->get_address_from_id(id)) {}
-
-
-ExternalReference::ExternalReference(const SCTableReference& table_ref)
- : address_(table_ref.address()) {}
-
-
-ExternalReference ExternalReference::
- incremental_marking_record_write_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
-}
-
-
-ExternalReference ExternalReference::
- incremental_evacuation_record_write_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
-}
-
-
-ExternalReference ExternalReference::
- store_buffer_overflow_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
-}
-
-
-ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
-}
-
-
-ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
- return
- ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
-}
-
-
-ExternalReference ExternalReference::fill_heap_number_with_random_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
-}
-
-
-ExternalReference ExternalReference::delete_handle_scope_extensions(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(HandleScope::DeleteExtensions)));
-}
-
-
-ExternalReference ExternalReference::random_uint32_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(V8::Random)));
-}
-
-
-ExternalReference ExternalReference::get_date_field_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
-}
-
-
-ExternalReference ExternalReference::get_make_code_young_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
-}
-
-
-ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
- return ExternalReference(isolate->date_cache()->stamp_address());
-}
-
-
-ExternalReference ExternalReference::transcendental_cache_array_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->transcendental_cache()->cache_array_address());
-}
-
-
-ExternalReference ExternalReference::new_deoptimizer_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
-}
-
-
-ExternalReference ExternalReference::compute_output_frames_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
-}
-
-
-ExternalReference ExternalReference::log_enter_external_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
-}
-
-
-ExternalReference ExternalReference::log_leave_external_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
-}
-
-
-ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
- return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
-}
-
-
-ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
- Isolate* isolate) {
- return ExternalReference(
- isolate->keyed_lookup_cache()->field_offsets_address());
-}
-
-
-ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
- return ExternalReference(isolate->heap()->roots_array_start());
-}
-
-
-ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
- return ExternalReference(isolate->stack_guard()->address_of_jslimit());
-}
-
-
-ExternalReference ExternalReference::address_of_real_stack_limit(
- Isolate* isolate) {
- return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
-}
-
-
-ExternalReference ExternalReference::address_of_regexp_stack_limit(
- Isolate* isolate) {
- return ExternalReference(isolate->regexp_stack()->limit_address());
-}
-
-
-ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
- return ExternalReference(isolate->heap()->NewSpaceStart());
-}
-
-
-ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
- return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
-}
-
-
-ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
- return ExternalReference(reinterpret_cast<Address>(
- isolate->heap()->NewSpaceMask()));
-}
-
-
-ExternalReference ExternalReference::new_space_allocation_top_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
-}
-
-
-ExternalReference ExternalReference::heap_always_allocate_scope_depth(
- Isolate* isolate) {
- Heap* heap = isolate->heap();
- return ExternalReference(heap->always_allocate_scope_depth_address());
-}
-
-
-ExternalReference ExternalReference::new_space_allocation_limit_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
-}
-
-
-ExternalReference ExternalReference::handle_scope_level_address(
- Isolate* isolate) {
- return ExternalReference(HandleScope::current_level_address(isolate));
-}
-
-
-ExternalReference ExternalReference::handle_scope_next_address(
- Isolate* isolate) {
- return ExternalReference(HandleScope::current_next_address(isolate));
-}
-
-
-ExternalReference ExternalReference::handle_scope_limit_address(
- Isolate* isolate) {
- return ExternalReference(HandleScope::current_limit_address(isolate));
-}
-
-
-ExternalReference ExternalReference::scheduled_exception_address(
- Isolate* isolate) {
- return ExternalReference(isolate->scheduled_exception_address());
-}
-
-
-ExternalReference ExternalReference::address_of_pending_message_obj(
- Isolate* isolate) {
- return ExternalReference(isolate->pending_message_obj_address());
-}
-
-
-ExternalReference ExternalReference::address_of_has_pending_message(
- Isolate* isolate) {
- return ExternalReference(isolate->has_pending_message_address());
-}
-
-
-ExternalReference ExternalReference::address_of_pending_message_script(
- Isolate* isolate) {
- return ExternalReference(isolate->pending_message_script_address());
-}
-
-
-ExternalReference ExternalReference::address_of_min_int() {
- return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
-}
-
-
-ExternalReference ExternalReference::address_of_one_half() {
- return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
-}
-
-
-ExternalReference ExternalReference::address_of_minus_one_half() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.minus_one_half));
-}
-
-
-ExternalReference ExternalReference::address_of_minus_zero() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.minus_zero));
-}
-
-
-ExternalReference ExternalReference::address_of_zero() {
- return ExternalReference(reinterpret_cast<void*>(&double_constants.zero));
-}
-
-
-ExternalReference ExternalReference::address_of_uint8_max_value() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.uint8_max_value));
-}
-
-
-ExternalReference ExternalReference::address_of_negative_infinity() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.negative_infinity));
-}
-
-
-ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.canonical_non_hole_nan));
-}
-
-
-ExternalReference ExternalReference::address_of_the_hole_nan() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.the_hole_nan));
-}
-
-
-#ifndef V8_INTERPRETED_REGEXP
-
-ExternalReference ExternalReference::re_check_stack_guard_state(
- Isolate* isolate) {
- Address function;
-#ifdef V8_TARGET_ARCH_X64
- function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
-#elif V8_TARGET_ARCH_IA32
- function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
-#elif V8_TARGET_ARCH_ARM
- function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
-#elif V8_TARGET_ARCH_MIPS
- function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
-#else
- UNREACHABLE();
-#endif
- return ExternalReference(Redirect(isolate, function));
-}
-
-ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
-}
-
-ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
-}
-
-ExternalReference ExternalReference::re_word_character_map() {
- return ExternalReference(
- NativeRegExpMacroAssembler::word_character_map_address());
-}
-
-ExternalReference ExternalReference::address_of_static_offsets_vector(
- Isolate* isolate) {
- return ExternalReference(
- reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
-}
-
-ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->regexp_stack()->memory_address());
-}
-
-ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
- Isolate* isolate) {
- return ExternalReference(isolate->regexp_stack()->memory_size_address());
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-static double add_two_doubles(double x, double y) {
- return x + y;
-}
-
-
-static double sub_two_doubles(double x, double y) {
- return x - y;
-}
-
-
-static double mul_two_doubles(double x, double y) {
- return x * y;
-}
-
-
-static double div_two_doubles(double x, double y) {
- return x / y;
-}
-
-
-static double mod_two_doubles(double x, double y) {
- return modulo(x, y);
-}
-
-
-static double math_sin_double(double x) {
- return sin(x);
-}
-
-
-static double math_cos_double(double x) {
- return cos(x);
-}
-
-
-static double math_tan_double(double x) {
- return tan(x);
-}
-
-
-static double math_log_double(double x) {
- return log(x);
-}
-
-
-ExternalReference ExternalReference::math_sin_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_sin_double),
- BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_cos_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_cos_double),
- BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_tan_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_tan_double),
- BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_log_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_log_double),
- BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_exp_constants(int constant_index) {
- ASSERT(math_exp_data_initialized);
- return ExternalReference(
- reinterpret_cast<void*>(math_exp_constants_array + constant_index));
-}
-
-
-ExternalReference ExternalReference::math_exp_log_table() {
- ASSERT(math_exp_data_initialized);
- return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
-}
-
-
-ExternalReference ExternalReference::page_flags(Page* page) {
- return ExternalReference(reinterpret_cast<Address>(page) +
- MemoryChunk::kFlagsOffset);
-}
-
-
-ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
- return ExternalReference(entry);
-}
-
-
-double power_helper(double x, double y) {
- int y_int = static_cast<int>(y);
- if (y == y_int) {
- return power_double_int(x, y_int); // Returns 1 if exponent is 0.
- }
- if (y == 0.5) {
- return (isinf(x)) ? V8_INFINITY : fast_sqrt(x + 0.0); // Convert -0 to +0.
- }
- if (y == -0.5) {
- return (isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
- }
- return power_double_double(x, y);
-}
-
-
-// Helper function to compute x^y, where y is known to be an
-// integer. Uses binary decomposition to limit the number of
-// multiplications; see the discussion in "Hacker's Delight" by Henry
-// S. Warren, Jr., figure 11-6, page 213.
-double power_double_int(double x, int y) {
- double m = (y < 0) ? 1 / x : x;
- unsigned n = (y < 0) ? -y : y;
- double p = 1;
- while (n != 0) {
- if ((n & 1) != 0) p *= m;
- m *= m;
- if ((n & 2) != 0) p *= m;
- m *= m;
- n >>= 2;
- }
- return p;
-}
-
-
-double power_double_double(double x, double y) {
-#if defined(__MINGW64_VERSION_MAJOR) && \
- (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
- // MinGW64 has a custom implementation for pow. This handles certain
- // special cases that are different.
- if ((x == 0.0 || isinf(x)) && isfinite(y)) {
- double f;
- if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
- }
-
- if (x == 2.0) {
- int y_int = static_cast<int>(y);
- if (y == y_int) return ldexp(1.0, y_int);
- }
-#endif
-
- // The checks for special cases can be dropped in ia32 because it has already
- // been done in generated code before bailing out here.
- if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
- return pow(x, y);
-}
-
-
-ExternalReference ExternalReference::power_double_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(power_double_double),
- BUILTIN_FP_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::power_double_int_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(power_double_int),
- BUILTIN_FP_INT_CALL));
-}
-
-
-static int native_compare_doubles(double y, double x) {
- if (x == y) return EQUAL;
- return x < y ? LESS : GREATER;
-}
-
-
-bool EvalComparison(Token::Value op, double op1, double op2) {
- ASSERT(Token::IsCompareOp(op));
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT: return (op1 == op2);
- case Token::NE: return (op1 != op2);
- case Token::LT: return (op1 < op2);
- case Token::GT: return (op1 > op2);
- case Token::LTE: return (op1 <= op2);
- case Token::GTE: return (op1 >= op2);
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-ExternalReference ExternalReference::double_fp_operation(
- Token::Value operation, Isolate* isolate) {
- typedef double BinaryFPOperation(double x, double y);
- BinaryFPOperation* function = NULL;
- switch (operation) {
- case Token::ADD:
- function = &add_two_doubles;
- break;
- case Token::SUB:
- function = &sub_two_doubles;
- break;
- case Token::MUL:
- function = &mul_two_doubles;
- break;
- case Token::DIV:
- function = &div_two_doubles;
- break;
- case Token::MOD:
- function = &mod_two_doubles;
- break;
- default:
- UNREACHABLE();
- }
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(function),
- BUILTIN_FP_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::compare_doubles(Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(native_compare_doubles),
- BUILTIN_COMPARE_CALL));
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-ExternalReference ExternalReference::debug_break(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
-}
-
-
-ExternalReference ExternalReference::debug_step_in_fp_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->step_in_fp_addr());
-}
-#endif
-
-
-void PositionsRecorder::RecordPosition(int pos) {
- ASSERT(pos != RelocInfo::kNoPosition);
- ASSERT(pos >= 0);
- state_.current_position = pos;
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (gdbjit_lineinfo_ != NULL) {
- gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
- }
-#endif
- LOG_CODE_EVENT(assembler_->isolate(),
- CodeLinePosInfoAddPositionEvent(jit_handler_data_,
- assembler_->pc_offset(),
- pos));
-}
-
-
-void PositionsRecorder::RecordStatementPosition(int pos) {
- ASSERT(pos != RelocInfo::kNoPosition);
- ASSERT(pos >= 0);
- state_.current_statement_position = pos;
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (gdbjit_lineinfo_ != NULL) {
- gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
- }
-#endif
- LOG_CODE_EVENT(assembler_->isolate(),
- CodeLinePosInfoAddStatementPositionEvent(
- jit_handler_data_,
- assembler_->pc_offset(),
- pos));
-}
-
-
-bool PositionsRecorder::WriteRecordedPositions() {
- bool written = false;
-
- // Write the statement position if it is different from what was written last
- // time.
- if (state_.current_statement_position != state_.written_statement_position) {
- EnsureSpace ensure_space(assembler_);
- assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION,
- state_.current_statement_position);
- state_.written_statement_position = state_.current_statement_position;
- written = true;
- }
-
- // Write the position if it is different from what was written last time and
- // also different from the written statement position.
- if (state_.current_position != state_.written_position &&
- state_.current_position != state_.written_statement_position) {
- EnsureSpace ensure_space(assembler_);
- assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
- state_.written_position = state_.current_position;
- written = true;
- }
-
- // Return whether something was written.
- return written;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/assembler.h b/src/3rdparty/v8/src/assembler.h
deleted file mode 100644
index 06c3b76..0000000
--- a/src/3rdparty/v8/src/assembler.h
+++ /dev/null
@@ -1,1000 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-#ifndef V8_ASSEMBLER_H_
-#define V8_ASSEMBLER_H_
-
-#include "v8.h"
-
-#include "allocation.h"
-#include "builtins.h"
-#include "gdb-jit.h"
-#include "isolate.h"
-#include "runtime.h"
-#include "token.h"
-
-namespace v8 {
-
-class ApiFunction;
-
-namespace internal {
-
-struct StatsCounter;
-// -----------------------------------------------------------------------------
-// Platform independent assembler base class.
-
-class AssemblerBase: public Malloced {
- public:
- AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~AssemblerBase();
-
- Isolate* isolate() const { return isolate_; }
- int jit_cookie() const { return jit_cookie_; }
-
- bool emit_debug_code() const { return emit_debug_code_; }
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- bool predictable_code_size() const { return predictable_code_size_; }
- void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
-
- // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
- // cross-snapshotting.
- static void QuietNaN(HeapObject* nan) { }
-
- int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
-
- static const int kMinimalBufferSize = 4*KB;
-
- protected:
- // The buffer into which code and relocation info are generated. It could
- // either be owned by the assembler or be provided externally.
- byte* buffer_;
- int buffer_size_;
- bool own_buffer_;
-
- // The program counter, which points into the buffer above and moves forward.
- byte* pc_;
-
- private:
- Isolate* isolate_;
- int jit_cookie_;
- bool emit_debug_code_;
- bool predictable_code_size_;
-};
-
-
-// Avoids using instructions that vary in size in unpredictable ways between the
-// snapshot and the running VM.
-class PredictableCodeSizeScope {
- public:
- PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
- ~PredictableCodeSizeScope();
-
- private:
- AssemblerBase* assembler_;
- int expected_size_;
- int start_offset_;
- bool old_value_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Labels represent pc locations; they are typically jump or call targets.
-// After declaration, a label can be freely used to denote known or (yet)
-// unknown pc location. Assembler::bind() is used to bind a label to the
-// current pc. A label can be bound only once.
-
-class Label BASE_EMBEDDED {
- public:
- enum Distance {
- kNear, kFar
- };
-
- INLINE(Label()) {
- Unuse();
- UnuseNear();
- }
-
- INLINE(~Label()) {
- ASSERT(!is_linked());
- ASSERT(!is_near_linked());
- }
-
- INLINE(void Unuse()) { pos_ = 0; }
- INLINE(void UnuseNear()) { near_link_pos_ = 0; }
-
- INLINE(bool is_bound() const) { return pos_ < 0; }
- INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
- INLINE(bool is_linked() const) { return pos_ > 0; }
- INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
-
- // Returns the position of bound or linked labels. Cannot be used
- // for unused labels.
- int pos() const;
- int near_link_pos() const { return near_link_pos_ - 1; }
-
- private:
- // pos_ encodes both the binding state (via its sign)
- // and the binding position (via its value) of a label.
- //
- // pos_ < 0 bound label, pos() returns the jump target position
- // pos_ == 0 unused label
- // pos_ > 0 linked label, pos() returns the last reference position
- int pos_;
-
- // Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
- int near_link_pos_;
-
- void bind_to(int pos) {
- pos_ = -pos - 1;
- ASSERT(is_bound());
- }
- void link_to(int pos, Distance distance = kFar) {
- if (distance == kNear) {
- near_link_pos_ = pos + 1;
- ASSERT(is_near_linked());
- } else {
- pos_ = pos + 1;
- ASSERT(is_linked());
- }
- }
-
- friend class Assembler;
- friend class RegexpAssembler;
- friend class Displacement;
- friend class RegExpMacroAssemblerIrregexp;
-};
-
-
-enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
-
-
-// -----------------------------------------------------------------------------
-// Relocation information
-
-
-// Relocation information consists of the address (pc) of the datum
-// to which the relocation information applies, the relocation mode
-// (rmode), and an optional data field. The relocation mode may be
-// "descriptive" and not indicate a need for relocation, but simply
-// describe a property of the datum. Such rmodes are useful for GC
-// and nice disassembly output.
-
-class RelocInfo BASE_EMBEDDED {
- public:
- // The constant kNoPosition is used with the collecting of source positions
- // in the relocation information. Two types of source positions are collected
- // "position" (RelocMode position) and "statement position" (RelocMode
- // statement_position). The "position" is collected at places in the source
- // code which are of interest when making stack traces to pin-point the source
- // location of a stack frame as close as possible. The "statement position" is
- // collected at the beginning at each statement, and is used to indicate
- // possible break locations. kNoPosition is used to indicate an
- // invalid/uninitialized position value.
- static const int kNoPosition = -1;
-
- // This string is used to add padding comments to the reloc info in cases
- // where we are not sure to have enough space for patching in during
- // lazy deoptimization. This is the case if we have indirect calls for which
- // we do not normally record relocation info.
- static const char* const kFillerCommentString;
-
- // The minimum size of a comment is equal to three bytes for the extra tagged
- // pc + the tag for the data, and kPointerSize for the actual pointer to the
- // comment.
- static const int kMinRelocCommentSize = 3 + kPointerSize;
-
- // The maximum size for a call instruction including pc-jump.
- static const int kMaxCallSize = 6;
-
- // The maximum pc delta that will use the short encoding.
- static const int kMaxSmallPCDelta;
-
- enum Mode {
- // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
- CODE_TARGET, // Code target which is not any of the above.
- CODE_TARGET_WITH_ID,
- CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
- CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
- DEBUG_BREAK, // Code target for the debugger statement.
- EMBEDDED_OBJECT,
- GLOBAL_PROPERTY_CELL,
-
- // Everything after runtime_entry (inclusive) is not GC'ed.
- RUNTIME_ENTRY,
- JS_RETURN, // Marks start of the ExitJSFrame code.
- COMMENT,
- POSITION, // See comment for kNoPosition above.
- STATEMENT_POSITION, // See comment for kNoPosition above.
- DEBUG_BREAK_SLOT, // Additional code inserted for debug break slot.
- EXTERNAL_REFERENCE, // The address of an external C++ function.
- INTERNAL_REFERENCE, // An address inside the same function.
-
- // Marks a constant pool. Only used on ARM.
- // It uses a custom noncompact encoding.
- CONST_POOL,
-
- // add more as needed
- // Pseudo-types
- NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
- NONE32, // never recorded 32-bit value
- NONE64, // never recorded 64-bit value
- CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
- // code aging.
- FIRST_REAL_RELOC_MODE = CODE_TARGET,
- LAST_REAL_RELOC_MODE = CONST_POOL,
- FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
- LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
- LAST_CODE_ENUM = DEBUG_BREAK,
- LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
- // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
- LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
- LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
- };
-
-
- RelocInfo() {}
-
- RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
- : pc_(pc), rmode_(rmode), data_(data), host_(host) {
- }
- RelocInfo(byte* pc, double data64)
- : pc_(pc), rmode_(NONE64), data64_(data64), host_(NULL) {
- }
-
- static inline bool IsRealRelocMode(Mode mode) {
- return mode >= FIRST_REAL_RELOC_MODE &&
- mode <= LAST_REAL_RELOC_MODE;
- }
- static inline bool IsPseudoRelocMode(Mode mode) {
- ASSERT(!IsRealRelocMode(mode));
- return mode >= FIRST_PSEUDO_RELOC_MODE &&
- mode <= LAST_PSEUDO_RELOC_MODE;
- }
- static inline bool IsConstructCall(Mode mode) {
- return mode == CONSTRUCT_CALL;
- }
- static inline bool IsCodeTarget(Mode mode) {
- return mode <= LAST_CODE_ENUM;
- }
- static inline bool IsEmbeddedObject(Mode mode) {
- return mode == EMBEDDED_OBJECT;
- }
- // Is the relocation mode affected by GC?
- static inline bool IsGCRelocMode(Mode mode) {
- return mode <= LAST_GCED_ENUM;
- }
- static inline bool IsJSReturn(Mode mode) {
- return mode == JS_RETURN;
- }
- static inline bool IsComment(Mode mode) {
- return mode == COMMENT;
- }
- static inline bool IsConstPool(Mode mode) {
- return mode == CONST_POOL;
- }
- static inline bool IsPosition(Mode mode) {
- return mode == POSITION || mode == STATEMENT_POSITION;
- }
- static inline bool IsStatementPosition(Mode mode) {
- return mode == STATEMENT_POSITION;
- }
- static inline bool IsExternalReference(Mode mode) {
- return mode == EXTERNAL_REFERENCE;
- }
- static inline bool IsInternalReference(Mode mode) {
- return mode == INTERNAL_REFERENCE;
- }
- static inline bool IsDebugBreakSlot(Mode mode) {
- return mode == DEBUG_BREAK_SLOT;
- }
- static inline bool IsNone(Mode mode) {
- return mode == NONE32 || mode == NONE64;
- }
- static inline bool IsCodeAgeSequence(Mode mode) {
- return mode == CODE_AGE_SEQUENCE;
- }
- static inline int ModeMask(Mode mode) { return 1 << mode; }
-
- // Accessors
- byte* pc() const { return pc_; }
- void set_pc(byte* pc) { pc_ = pc; }
- Mode rmode() const { return rmode_; }
- intptr_t data() const { return data_; }
- double data64() const { return data64_; }
- Code* host() const { return host_; }
-
- // Apply a relocation by delta bytes
- INLINE(void apply(intptr_t delta));
-
- // Is the pointer this relocation info refers to coded like a plain pointer
- // or is it strange in some way (e.g. relative or patched into a series of
- // instructions).
- bool IsCodedSpecially();
-
- // Read/modify the code target in the branch/call instruction
- // this relocation applies to;
- // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
- INLINE(Address target_address());
- INLINE(void set_target_address(Address target,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
- INLINE(Object* target_object());
- INLINE(Handle<Object> target_object_handle(Assembler* origin));
- INLINE(Object** target_object_address());
- INLINE(void set_target_object(Object* target,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
- INLINE(JSGlobalPropertyCell* target_cell());
- INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
- INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
- INLINE(Code* code_age_stub());
- INLINE(void set_code_age_stub(Code* stub));
-
- // Read the address of the word containing the target_address in an
- // instruction stream. What this means exactly is architecture-independent.
- // The only architecture-independent user of this function is the serializer.
- // The serializer uses it to find out how many raw bytes of instruction to
- // output before the next target. Architecture-independent code shouldn't
- // dereference the pointer it gets back from this.
- INLINE(Address target_address_address());
- // This indicates how much space a target takes up when deserializing a code
- // stream. For most architectures this is just the size of a pointer. For
- // an instruction like movw/movt where the target bits are mixed into the
- // instruction bits the size of the target will be zero, indicating that the
- // serializer should not step forwards in memory after a target is resolved
- // and written. In this case the target_address_address function above
- // should return the end of the instructions to be patched, allowing the
- // deserializer to deserialize the instructions as raw bytes and put them in
- // place, ready to be patched with the target.
- INLINE(int target_address_size());
-
- // Read/modify the reference in the instruction this relocation
- // applies to; can only be called if rmode_ is external_reference
- INLINE(Address* target_reference_address());
-
- // Read/modify the address of a call instruction. This is used to relocate
- // the break points where straight-line code is patched with a call
- // instruction.
- INLINE(Address call_address());
- INLINE(void set_call_address(Address target));
- INLINE(Object* call_object());
- INLINE(void set_call_object(Object* target));
- INLINE(Object** call_object_address());
-
- template<typename StaticVisitor> inline void Visit(Heap* heap);
- inline void Visit(ObjectVisitor* v);
-
- // Patch the code with some other code.
- void PatchCode(byte* instructions, int instruction_count);
-
- // Patch the code with a call.
- void PatchCodeWithCall(Address target, int guard_bytes);
-
- // Check whether this return sequence has been patched
- // with a call to the debugger.
- INLINE(bool IsPatchedReturnSequence());
-
- // Check whether this debug break slot has been patched with a call to the
- // debugger.
- INLINE(bool IsPatchedDebugBreakSlotSequence());
-
-#ifdef DEBUG
- // Check whether the given code contains relocation information that
- // either is position-relative or movable by the garbage collector.
- static bool RequiresRelocation(const CodeDesc& desc);
-#endif
-
-#ifdef ENABLE_DISASSEMBLER
- // Printing
- static const char* RelocModeName(Mode rmode);
- void Print(FILE* out);
-#endif // ENABLE_DISASSEMBLER
-#ifdef VERIFY_HEAP
- void Verify();
-#endif
-
- static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
- static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
- static const int kDataMask =
- (1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
- static const int kApplyMask; // Modes affected by apply. Depends on arch.
-
- private:
- // On ARM, note that pc_ is the address of the constant pool entry
- // to be relocated and not the address of the instruction
- // referencing the constant pool entry (except when rmode_ ==
- // comment).
- byte* pc_;
- Mode rmode_;
- union {
- intptr_t data_;
- double data64_;
- };
- Code* host_;
- // Code and Embedded Object pointers on some platforms are stored split
- // across two consecutive 32-bit instructions. Heap management
- // routines expect to access these pointers indirectly. The following
- // location provides a place for these pointers to exist naturally
- // when accessed via the Iterator.
- Object* reconstructed_obj_ptr_;
- // External-reference pointers are also split across instruction-pairs
- // on some platforms, but are accessed via indirect pointers. This location
- // provides a place for that pointer to exist naturally. Its address
- // is returned by RelocInfo::target_reference_address().
- Address reconstructed_adr_ptr_;
- friend class RelocIterator;
-};
-
-
-// RelocInfoWriter serializes a stream of relocation info. It writes towards
-// lower addresses.
-class RelocInfoWriter BASE_EMBEDDED {
- public:
- RelocInfoWriter() : pos_(NULL),
- last_pc_(NULL),
- last_id_(0),
- last_position_(0) {}
- RelocInfoWriter(byte* pos, byte* pc) : pos_(pos),
- last_pc_(pc),
- last_id_(0),
- last_position_(0) {}
-
- byte* pos() const { return pos_; }
- byte* last_pc() const { return last_pc_; }
-
- void Write(const RelocInfo* rinfo);
-
- // Update the state of the stream after reloc info buffer
- // and/or code is moved while the stream is active.
- void Reposition(byte* pos, byte* pc) {
- pos_ = pos;
- last_pc_ = pc;
- }
-
- // Max size (bytes) of a written RelocInfo. Longest encoding is
- // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, ExtraTag, data_delta.
- // On ia32 and arm this is 1 + 4 + 1 + 1 + 1 + 4 = 12.
- // On x64 this is 1 + 4 + 1 + 1 + 1 + 8 == 16;
- // Here we use the maximum of the two.
- static const int kMaxSize = 16;
-
- private:
- inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
- inline void WriteTaggedPC(uint32_t pc_delta, int tag);
- inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
- inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
- inline void WriteExtraTaggedConstPoolData(int data);
- inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
- inline void WriteTaggedData(intptr_t data_delta, int tag);
- inline void WriteExtraTag(int extra_tag, int top_tag);
-
- byte* pos_;
- byte* last_pc_;
- int last_id_;
- int last_position_;
- DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
-};
-
-
-// A RelocIterator iterates over relocation information.
-// Typical use:
-//
-// for (RelocIterator it(code); !it.done(); it.next()) {
-// // do something with it.rinfo() here
-// }
-//
-// A mask can be specified to skip unwanted modes.
-class RelocIterator: public Malloced {
- public:
- // Create a new iterator positioned at
- // the beginning of the reloc info.
- // Relocation information with mode k is included in the
- // iteration iff bit k of mode_mask is set.
- explicit RelocIterator(Code* code, int mode_mask = -1);
- explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
-
- // Iteration
- bool done() const { return done_; }
- void next();
-
- // Return pointer valid until next next().
- RelocInfo* rinfo() {
- ASSERT(!done());
- return &rinfo_;
- }
-
- private:
- // Advance* moves the position before/after reading.
- // *Read* reads from current byte(s) into rinfo_.
- // *Get* just reads and returns info on current byte.
- void Advance(int bytes = 1) { pos_ -= bytes; }
- int AdvanceGetTag();
- int GetExtraTag();
- int GetTopTag();
- void ReadTaggedPC();
- void AdvanceReadPC();
- void AdvanceReadId();
- void AdvanceReadConstPoolData();
- void AdvanceReadPosition();
- void AdvanceReadData();
- void AdvanceReadVariableLengthPCJump();
- int GetLocatableTypeTag();
- void ReadTaggedId();
- void ReadTaggedPosition();
-
- // If the given mode is wanted, set it in rinfo_ and return true.
- // Else return false. Used for efficiently skipping unwanted modes.
- bool SetMode(RelocInfo::Mode mode) {
- return (mode_mask_ & (1 << mode)) ? (rinfo_.rmode_ = mode, true) : false;
- }
-
- byte* pos_;
- byte* end_;
- byte* code_age_sequence_;
- RelocInfo rinfo_;
- bool done_;
- int mode_mask_;
- int last_id_;
- int last_position_;
- DISALLOW_COPY_AND_ASSIGN(RelocIterator);
-};
-
-
-//------------------------------------------------------------------------------
-// External function
-
-//----------------------------------------------------------------------------
-class IC_Utility;
-class SCTableReference;
-#ifdef ENABLE_DEBUGGER_SUPPORT
-class Debug_Address;
-#endif
-
-
-// An ExternalReference represents a C++ address used in the generated
-// code. All references to C++ functions and variables must be encapsulated in
-// an ExternalReference instance. This is done in order to track the origin of
-// all external references in the code so that they can be bound to the correct
-// addresses when deserializing a heap.
-class ExternalReference BASE_EMBEDDED {
- public:
- // Used in the simulator to support different native api calls.
- enum Type {
- // Builtin call.
- // MaybeObject* f(v8::internal::Arguments).
- BUILTIN_CALL, // default
-
- // Builtin that takes float arguments and returns an int.
- // int f(double, double).
- BUILTIN_COMPARE_CALL,
-
- // Builtin call that returns floating point.
- // double f(double, double).
- BUILTIN_FP_FP_CALL,
-
- // Builtin call that returns floating point.
- // double f(double).
- BUILTIN_FP_CALL,
-
- // Builtin call that returns floating point.
- // double f(double, int).
- BUILTIN_FP_INT_CALL,
-
- // Direct call to API function callback.
- // Handle<Value> f(v8::Arguments&)
- DIRECT_API_CALL,
-
- // Direct call to accessor getter callback.
- // Handle<value> f(Local<String> property, AccessorInfo& info)
- DIRECT_GETTER_CALL
- };
-
- static void SetUp();
- static void InitializeMathExpData();
- static void TearDownMathExpData();
-
- typedef void* ExternalReferenceRedirector(void* original, Type type);
-
- ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
-
- ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate);
-
- ExternalReference(Builtins::Name name, Isolate* isolate);
-
- ExternalReference(Runtime::FunctionId id, Isolate* isolate);
-
- ExternalReference(const Runtime::Function* f, Isolate* isolate);
-
- ExternalReference(const IC_Utility& ic_utility, Isolate* isolate);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference(const Debug_Address& debug_address, Isolate* isolate);
-#endif
-
- explicit ExternalReference(StatsCounter* counter);
-
- ExternalReference(Isolate::AddressId id, Isolate* isolate);
-
- explicit ExternalReference(const SCTableReference& table_ref);
-
- // Isolate::Current() as an external reference.
- static ExternalReference isolate_address();
-
- // One-of-a-kind references. These references are not part of a general
- // pattern. This means that they have to be added to the
- // ExternalReferenceTable in serialize.cc manually.
-
- static ExternalReference incremental_marking_record_write_function(
- Isolate* isolate);
- static ExternalReference incremental_evacuation_record_write_function(
- Isolate* isolate);
- static ExternalReference store_buffer_overflow_function(
- Isolate* isolate);
- static ExternalReference flush_icache_function(Isolate* isolate);
- static ExternalReference perform_gc_function(Isolate* isolate);
- static ExternalReference fill_heap_number_with_random_function(
- Isolate* isolate);
- static ExternalReference random_uint32_function(Isolate* isolate);
- static ExternalReference transcendental_cache_array_address(Isolate* isolate);
- static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
-
- static ExternalReference get_date_field_function(Isolate* isolate);
- static ExternalReference date_cache_stamp(Isolate* isolate);
-
- static ExternalReference get_make_code_young_function(Isolate* isolate);
-
- // Deoptimization support.
- static ExternalReference new_deoptimizer_function(Isolate* isolate);
- static ExternalReference compute_output_frames_function(Isolate* isolate);
-
- // Log support.
- static ExternalReference log_enter_external_function(Isolate* isolate);
- static ExternalReference log_leave_external_function(Isolate* isolate);
-
- // Static data in the keyed lookup cache.
- static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
- static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
-
- // Static variable Heap::roots_array_start()
- static ExternalReference roots_array_start(Isolate* isolate);
-
- // Static variable StackGuard::address_of_jslimit()
- static ExternalReference address_of_stack_limit(Isolate* isolate);
-
- // Static variable StackGuard::address_of_real_jslimit()
- static ExternalReference address_of_real_stack_limit(Isolate* isolate);
-
- // Static variable RegExpStack::limit_address()
- static ExternalReference address_of_regexp_stack_limit(Isolate* isolate);
-
- // Static variables for RegExp.
- static ExternalReference address_of_static_offsets_vector(Isolate* isolate);
- static ExternalReference address_of_regexp_stack_memory_address(
- Isolate* isolate);
- static ExternalReference address_of_regexp_stack_memory_size(
- Isolate* isolate);
-
- // Static variable Heap::NewSpaceStart()
- static ExternalReference new_space_start(Isolate* isolate);
- static ExternalReference new_space_mask(Isolate* isolate);
- static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
- static ExternalReference new_space_mark_bits(Isolate* isolate);
-
- // Write barrier.
- static ExternalReference store_buffer_top(Isolate* isolate);
-
- // Used for fast allocation in generated code.
- static ExternalReference new_space_allocation_top_address(Isolate* isolate);
- static ExternalReference new_space_allocation_limit_address(Isolate* isolate);
-
- static ExternalReference double_fp_operation(Token::Value operation,
- Isolate* isolate);
- static ExternalReference compare_doubles(Isolate* isolate);
- static ExternalReference power_double_double_function(Isolate* isolate);
- static ExternalReference power_double_int_function(Isolate* isolate);
-
- static ExternalReference handle_scope_next_address(Isolate* isolate);
- static ExternalReference handle_scope_limit_address(Isolate* isolate);
- static ExternalReference handle_scope_level_address(Isolate* isolate);
-
- static ExternalReference scheduled_exception_address(Isolate* isolate);
- static ExternalReference address_of_pending_message_obj(Isolate* isolate);
- static ExternalReference address_of_has_pending_message(Isolate* isolate);
- static ExternalReference address_of_pending_message_script(Isolate* isolate);
-
- // Static variables containing common double constants.
- static ExternalReference address_of_min_int();
- static ExternalReference address_of_one_half();
- static ExternalReference address_of_minus_one_half();
- static ExternalReference address_of_minus_zero();
- static ExternalReference address_of_zero();
- static ExternalReference address_of_uint8_max_value();
- static ExternalReference address_of_negative_infinity();
- static ExternalReference address_of_canonical_non_hole_nan();
- static ExternalReference address_of_the_hole_nan();
-
- static ExternalReference math_sin_double_function(Isolate* isolate);
- static ExternalReference math_cos_double_function(Isolate* isolate);
- static ExternalReference math_tan_double_function(Isolate* isolate);
- static ExternalReference math_log_double_function(Isolate* isolate);
-
- static ExternalReference math_exp_constants(int constant_index);
- static ExternalReference math_exp_log_table();
-
- static ExternalReference page_flags(Page* page);
-
- static ExternalReference ForDeoptEntry(Address entry);
-
- static ExternalReference cpu_features();
-
- Address address() const {return reinterpret_cast<Address>(address_);}
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Function Debug::Break()
- static ExternalReference debug_break(Isolate* isolate);
-
- // Used to check if single stepping is enabled in generated code.
- static ExternalReference debug_step_in_fp_address(Isolate* isolate);
-#endif
-
-#ifndef V8_INTERPRETED_REGEXP
- // C functions called from RegExp generated code.
-
- // Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
- static ExternalReference re_case_insensitive_compare_uc16(Isolate* isolate);
-
- // Function RegExpMacroAssembler*::CheckStackGuardState()
- static ExternalReference re_check_stack_guard_state(Isolate* isolate);
-
- // Function NativeRegExpMacroAssembler::GrowStack()
- static ExternalReference re_grow_stack(Isolate* isolate);
-
- // byte NativeRegExpMacroAssembler::word_character_bitmap
- static ExternalReference re_word_character_map();
-
-#endif
-
- // This lets you register a function that rewrites all external references.
- // Used by the ARM simulator to catch calls to external references.
- static void set_redirector(Isolate* isolate,
- ExternalReferenceRedirector* redirector) {
- // We can't stack them.
- ASSERT(isolate->external_reference_redirector() == NULL);
- isolate->set_external_reference_redirector(
- reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
- }
-
- private:
- explicit ExternalReference(void* address)
- : address_(address) {}
-
- static void* Redirect(Isolate* isolate,
- void* address,
- Type type = ExternalReference::BUILTIN_CALL) {
- ExternalReferenceRedirector* redirector =
- reinterpret_cast<ExternalReferenceRedirector*>(
- isolate->external_reference_redirector());
- if (redirector == NULL) return address;
- void* answer = (*redirector)(address, type);
- return answer;
- }
-
- static void* Redirect(Isolate* isolate,
- Address address_arg,
- Type type = ExternalReference::BUILTIN_CALL) {
- ExternalReferenceRedirector* redirector =
- reinterpret_cast<ExternalReferenceRedirector*>(
- isolate->external_reference_redirector());
- void* address = reinterpret_cast<void*>(address_arg);
- void* answer = (redirector == NULL) ?
- address :
- (*redirector)(address, type);
- return answer;
- }
-
- void* address_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Position recording support
-
-struct PositionState {
- PositionState() : current_position(RelocInfo::kNoPosition),
- written_position(RelocInfo::kNoPosition),
- current_statement_position(RelocInfo::kNoPosition),
- written_statement_position(RelocInfo::kNoPosition) {}
-
- int current_position;
- int written_position;
-
- int current_statement_position;
- int written_statement_position;
-};
-
-
-class PositionsRecorder BASE_EMBEDDED {
- public:
- explicit PositionsRecorder(Assembler* assembler)
- : assembler_(assembler) {
-#ifdef ENABLE_GDB_JIT_INTERFACE
- gdbjit_lineinfo_ = NULL;
-#endif
- jit_handler_data_ = NULL;
- }
-
-#ifdef ENABLE_GDB_JIT_INTERFACE
- ~PositionsRecorder() {
- delete gdbjit_lineinfo_;
- }
-
- void StartGDBJITLineInfoRecording() {
- if (FLAG_gdbjit) {
- gdbjit_lineinfo_ = new GDBJITLineInfo();
- }
- }
-
- GDBJITLineInfo* DetachGDBJITLineInfo() {
- GDBJITLineInfo* lineinfo = gdbjit_lineinfo_;
- gdbjit_lineinfo_ = NULL; // To prevent deallocation in destructor.
- return lineinfo;
- }
-#endif
- void AttachJITHandlerData(void* user_data) {
- jit_handler_data_ = user_data;
- }
-
- void* DetachJITHandlerData() {
- void* old_data = jit_handler_data_;
- jit_handler_data_ = NULL;
- return old_data;
- }
- // Set current position to pos.
- void RecordPosition(int pos);
-
- // Set current statement position to pos.
- void RecordStatementPosition(int pos);
-
- // Write recorded positions to relocation information.
- bool WriteRecordedPositions();
-
- int current_position() const { return state_.current_position; }
-
- int current_statement_position() const {
- return state_.current_statement_position;
- }
-
- private:
- Assembler* assembler_;
- PositionState state_;
-#ifdef ENABLE_GDB_JIT_INTERFACE
- GDBJITLineInfo* gdbjit_lineinfo_;
-#endif
-
- // Currently jit_handler_data_ is used to store JITHandler-specific data
- // over the lifetime of a PositionsRecorder
- void* jit_handler_data_;
- friend class PreservePositionScope;
-
- DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
-};
-
-
-class PreservePositionScope BASE_EMBEDDED {
- public:
- explicit PreservePositionScope(PositionsRecorder* positions_recorder)
- : positions_recorder_(positions_recorder),
- saved_state_(positions_recorder->state_) {}
-
- ~PreservePositionScope() {
- positions_recorder_->state_ = saved_state_;
- }
-
- private:
- PositionsRecorder* positions_recorder_;
- const PositionState saved_state_;
-
- DISALLOW_COPY_AND_ASSIGN(PreservePositionScope);
-};
-
-
-// -----------------------------------------------------------------------------
-// Utility functions
-
-inline bool is_intn(int x, int n) {
- return -(1 << (n-1)) <= x && x < (1 << (n-1));
-}
-
-inline bool is_int8(int x) { return is_intn(x, 8); }
-inline bool is_int16(int x) { return is_intn(x, 16); }
-inline bool is_int18(int x) { return is_intn(x, 18); }
-inline bool is_int24(int x) { return is_intn(x, 24); }
-
-inline bool is_uintn(int x, int n) {
- return (x & -(1 << n)) == 0;
-}
-
-inline bool is_uint2(int x) { return is_uintn(x, 2); }
-inline bool is_uint3(int x) { return is_uintn(x, 3); }
-inline bool is_uint4(int x) { return is_uintn(x, 4); }
-inline bool is_uint5(int x) { return is_uintn(x, 5); }
-inline bool is_uint6(int x) { return is_uintn(x, 6); }
-inline bool is_uint8(int x) { return is_uintn(x, 8); }
-inline bool is_uint10(int x) { return is_uintn(x, 10); }
-inline bool is_uint12(int x) { return is_uintn(x, 12); }
-inline bool is_uint16(int x) { return is_uintn(x, 16); }
-inline bool is_uint24(int x) { return is_uintn(x, 24); }
-inline bool is_uint26(int x) { return is_uintn(x, 26); }
-inline bool is_uint28(int x) { return is_uintn(x, 28); }
-
-inline int NumberOfBitsSet(uint32_t x) {
- unsigned int num_bits_set;
- for (num_bits_set = 0; x; x >>= 1) {
- num_bits_set += x & 1;
- }
- return num_bits_set;
-}
-
-bool EvalComparison(Token::Value op, double op1, double op2);
-
-// Computes pow(x, y) with the special cases in the spec for Math.pow.
-double power_helper(double x, double y);
-double power_double_int(double x, int y);
-double power_double_double(double x, double y);
-
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class CallWrapper {
- public:
- CallWrapper() { }
- virtual ~CallWrapper() { }
- // Called just before emitting a call. Argument is the size of the generated
- // call code.
- virtual void BeforeCall(int call_size) const = 0;
- // Called just after emitting a call, i.e., at the return site for the call.
- virtual void AfterCall() const = 0;
-};
-
-class NullCallWrapper : public CallWrapper {
- public:
- NullCallWrapper() { }
- virtual ~NullCallWrapper() { }
- virtual void BeforeCall(int call_size) const { }
- virtual void AfterCall() const { }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ASSEMBLER_H_
diff --git a/src/3rdparty/v8/src/ast.cc b/src/3rdparty/v8/src/ast.cc
deleted file mode 100644
index dc5865e..0000000
--- a/src/3rdparty/v8/src/ast.cc
+++ /dev/null
@@ -1,1134 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "ast.h"
-
-#include <math.h> // For isfinite.
-#include "builtins.h"
-#include "code-stubs.h"
-#include "conversions.h"
-#include "hashmap.h"
-#include "parser.h"
-#include "property-details.h"
-#include "property.h"
-#include "scopes.h"
-#include "string-stream.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// All the Accept member functions for each syntax tree node type.
-
-#define DECL_ACCEPT(type) \
- void type::Accept(AstVisitor* v) { v->Visit##type(this); }
-AST_NODE_LIST(DECL_ACCEPT)
-#undef DECL_ACCEPT
-
-
-// ----------------------------------------------------------------------------
-// Implementation of other node functionality.
-
-
-bool Expression::IsSmiLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
-}
-
-
-bool Expression::IsStringLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
-}
-
-
-bool Expression::IsNullLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
-}
-
-
-VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
- : Expression(isolate),
- name_(var->name()),
- var_(NULL), // Will be set by the call to BindTo.
- is_this_(var->is_this()),
- is_trivial_(false),
- is_lvalue_(false),
- position_(RelocInfo::kNoPosition),
- interface_(var->interface()) {
- BindTo(var);
-}
-
-
-VariableProxy::VariableProxy(Isolate* isolate,
- Handle<String> name,
- bool is_this,
- Interface* interface,
- int position)
- : Expression(isolate),
- name_(name),
- var_(NULL),
- is_this_(is_this),
- is_trivial_(false),
- is_lvalue_(false),
- position_(position),
- interface_(interface) {
- // Names must be canonicalized for fast equality checks.
- ASSERT(name->IsInternalizedString());
-}
-
-
-void VariableProxy::BindTo(Variable* var) {
- ASSERT(var_ == NULL); // must be bound only once
- ASSERT(var != NULL); // must bind
- ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
- ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
- // Ideally CONST-ness should match. However, this is very hard to achieve
- // because we don't know the exact semantics of conflicting (const and
- // non-const) multiple variable declarations, const vars introduced via
- // eval() etc. Const-ness and variable declarations are a complete mess
- // in JS. Sigh...
- var_ = var;
- var->set_is_used(true);
-}
-
-
-Assignment::Assignment(Isolate* isolate,
- Token::Value op,
- Expression* target,
- Expression* value,
- int pos)
- : Expression(isolate),
- op_(op),
- target_(target),
- value_(value),
- pos_(pos),
- binary_operation_(NULL),
- assignment_id_(GetNextId(isolate)),
- is_monomorphic_(false) { }
-
-
-Token::Value Assignment::binary_op() const {
- switch (op_) {
- case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
- case Token::ASSIGN_BIT_XOR: return Token::BIT_XOR;
- case Token::ASSIGN_BIT_AND: return Token::BIT_AND;
- case Token::ASSIGN_SHL: return Token::SHL;
- case Token::ASSIGN_SAR: return Token::SAR;
- case Token::ASSIGN_SHR: return Token::SHR;
- case Token::ASSIGN_ADD: return Token::ADD;
- case Token::ASSIGN_SUB: return Token::SUB;
- case Token::ASSIGN_MUL: return Token::MUL;
- case Token::ASSIGN_DIV: return Token::DIV;
- case Token::ASSIGN_MOD: return Token::MOD;
- default: UNREACHABLE();
- }
- return Token::ILLEGAL;
-}
-
-
-bool FunctionLiteral::AllowsLazyCompilation() {
- return scope()->AllowsLazyCompilation();
-}
-
-
-bool FunctionLiteral::AllowsLazyCompilationWithoutContext() {
- return scope()->AllowsLazyCompilationWithoutContext();
-}
-
-
-int FunctionLiteral::start_position() const {
- return scope()->start_position();
-}
-
-
-int FunctionLiteral::end_position() const {
- return scope()->end_position();
-}
-
-
-LanguageMode FunctionLiteral::language_mode() const {
- return scope()->language_mode();
-}
-
-
-QmlModeFlag FunctionLiteral::qml_mode_flag() const {
- return scope()->qml_mode_flag();
-}
-
-
-ObjectLiteral::Property::Property(Literal* key,
- Expression* value,
- Isolate* isolate) {
- emit_store_ = true;
- key_ = key;
- value_ = value;
- Object* k = *key->handle();
- if (k->IsInternalizedString() &&
- isolate->heap()->proto_string()->Equals(String::cast(k))) {
- kind_ = PROTOTYPE;
- } else if (value_->AsMaterializedLiteral() != NULL) {
- kind_ = MATERIALIZED_LITERAL;
- } else if (value_->AsLiteral() != NULL) {
- kind_ = CONSTANT;
- } else {
- kind_ = COMPUTED;
- }
-}
-
-
-ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
- emit_store_ = true;
- value_ = value;
- kind_ = is_getter ? GETTER : SETTER;
-}
-
-
-bool ObjectLiteral::Property::IsCompileTimeValue() {
- return kind_ == CONSTANT ||
- (kind_ == MATERIALIZED_LITERAL &&
- CompileTimeValue::IsCompileTimeValue(value_));
-}
-
-
-void ObjectLiteral::Property::set_emit_store(bool emit_store) {
- emit_store_ = emit_store;
-}
-
-
-bool ObjectLiteral::Property::emit_store() {
- return emit_store_;
-}
-
-
-bool IsEqualString(void* first, void* second) {
- ASSERT((*reinterpret_cast<String**>(first))->IsString());
- ASSERT((*reinterpret_cast<String**>(second))->IsString());
- Handle<String> h1(reinterpret_cast<String**>(first));
- Handle<String> h2(reinterpret_cast<String**>(second));
- return (*h1)->Equals(*h2);
-}
-
-
-bool IsEqualNumber(void* first, void* second) {
- ASSERT((*reinterpret_cast<Object**>(first))->IsNumber());
- ASSERT((*reinterpret_cast<Object**>(second))->IsNumber());
-
- Handle<Object> h1(reinterpret_cast<Object**>(first));
- Handle<Object> h2(reinterpret_cast<Object**>(second));
- if (h1->IsSmi()) {
- return h2->IsSmi() && *h1 == *h2;
- }
- if (h2->IsSmi()) return false;
- Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
- Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
- ASSERT(isfinite(n1->value()));
- ASSERT(isfinite(n2->value()));
- return n1->value() == n2->value();
-}
-
-
-void ObjectLiteral::CalculateEmitStore(Zone* zone) {
- ZoneAllocationPolicy allocator(zone);
-
- ZoneHashMap table(Literal::Match, ZoneHashMap::kDefaultHashMapCapacity,
- allocator);
- for (int i = properties()->length() - 1; i >= 0; i--) {
- ObjectLiteral::Property* property = properties()->at(i);
- Literal* literal = property->key();
- if (literal->handle()->IsNull()) continue;
- uint32_t hash = literal->Hash();
- // If the key of a computed property is in the table, do not emit
- // a store for the property later.
- if (property->kind() == ObjectLiteral::Property::COMPUTED &&
- table.Lookup(literal, hash, false, allocator) != NULL) {
- property->set_emit_store(false);
- } else {
- // Add key to the table.
- table.Lookup(literal, hash, true, allocator);
- }
- }
-}
-
-
-void TargetCollector::AddTarget(Label* target, Zone* zone) {
- // Add the label to the collector, but discard duplicates.
- int length = targets_.length();
- for (int i = 0; i < length; i++) {
- if (targets_[i] == target) return;
- }
- targets_.Add(target, zone);
-}
-
-
-bool UnaryOperation::ResultOverwriteAllowed() {
- switch (op_) {
- case Token::BIT_NOT:
- case Token::SUB:
- return true;
- default:
- return false;
- }
-}
-
-
-bool BinaryOperation::ResultOverwriteAllowed() {
- switch (op_) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- return false;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- return true;
- default:
- UNREACHABLE();
- }
- return false;
-}
-
-
-static bool IsTypeof(Expression* expr) {
- UnaryOperation* maybe_unary = expr->AsUnaryOperation();
- return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
-}
-
-
-// Check for the pattern: typeof <expression> equals <string literal>.
-static bool MatchLiteralCompareTypeof(Expression* left,
- Token::Value op,
- Expression* right,
- Expression** expr,
- Handle<String>* check) {
- if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
- *expr = left->AsUnaryOperation()->expression();
- *check = Handle<String>::cast(right->AsLiteral()->handle());
- return true;
- }
- return false;
-}
-
-
-bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
- Handle<String>* check) {
- return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
- MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
-}
-
-
-static bool IsVoidOfLiteral(Expression* expr) {
- UnaryOperation* maybe_unary = expr->AsUnaryOperation();
- return maybe_unary != NULL &&
- maybe_unary->op() == Token::VOID &&
- maybe_unary->expression()->AsLiteral() != NULL;
-}
-
-
-// Check for the pattern: void <literal> equals <expression>
-static bool MatchLiteralCompareUndefined(Expression* left,
- Token::Value op,
- Expression* right,
- Expression** expr) {
- if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
- *expr = right;
- return true;
- }
- return false;
-}
-
-
-bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
- return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
- MatchLiteralCompareUndefined(right_, op_, left_, expr);
-}
-
-
-// Check for the pattern: null equals <expression>
-static bool MatchLiteralCompareNull(Expression* left,
- Token::Value op,
- Expression* right,
- Expression** expr) {
- if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
- *expr = right;
- return true;
- }
- return false;
-}
-
-
-bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
- return MatchLiteralCompareNull(left_, op_, right_, expr) ||
- MatchLiteralCompareNull(right_, op_, left_, expr);
-}
-
-
-// ----------------------------------------------------------------------------
-// Inlining support
-
-bool Declaration::IsInlineable() const {
- return proxy()->var()->IsStackAllocated();
-}
-
-bool FunctionDeclaration::IsInlineable() const {
- return false;
-}
-
-
-// ----------------------------------------------------------------------------
-// Recording of type feedback
-
-void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- Zone* zone) {
- // Record type feedback from the oracle in the AST.
- is_uninitialized_ = oracle->LoadIsUninitialized(this);
- if (is_uninitialized_) return;
-
- is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
- receiver_types_.Clear();
- if (key()->IsPropertyName()) {
- ArrayLengthStub array_stub(Code::LOAD_IC);
- FunctionPrototypeStub proto_stub(Code::LOAD_IC);
- StringLengthStub string_stub(Code::LOAD_IC, false);
- if (oracle->LoadIsStub(this, &array_stub)) {
- is_array_length_ = true;
- } else if (oracle->LoadIsStub(this, &string_stub)) {
- is_string_length_ = true;
- } else if (oracle->LoadIsStub(this, &proto_stub)) {
- is_function_prototype_ = true;
- } else {
- Literal* lit_key = key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->handle());
- oracle->LoadReceiverTypes(this, name, &receiver_types_);
- }
- } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
- is_string_access_ = true;
- } else if (is_monomorphic_) {
- receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
- zone);
- } else if (oracle->LoadIsPolymorphic(this)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
- }
-}
-
-
-void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- Zone* zone) {
- Property* prop = target()->AsProperty();
- ASSERT(prop != NULL);
- TypeFeedbackId id = AssignmentFeedbackId();
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
- receiver_types_.Clear();
- if (prop->key()->IsPropertyName()) {
- Literal* lit_key = prop->key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->handle());
- oracle->StoreReceiverTypes(this, name, &receiver_types_);
- } else if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
- } else if (oracle->StoreIsPolymorphic(id)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
- }
-}
-
-
-void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- Zone* zone) {
- TypeFeedbackId id = CountStoreFeedbackId();
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
- receiver_types_.Clear();
- if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(
- oracle->StoreMonomorphicReceiverType(id), zone);
- } else if (oracle->StoreIsPolymorphic(id)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
- }
-}
-
-
-void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo info = oracle->SwitchType(this);
- if (info.IsUninitialized()) info = TypeInfo::Unknown();
- if (info.IsSmi()) {
- compare_type_ = SMI_ONLY;
- } else if (info.IsInternalizedString()) {
- compare_type_ = NAME_ONLY;
- } else if (info.IsNonInternalizedString()) {
- compare_type_ = STRING_ONLY;
- } else if (info.IsNonPrimitive()) {
- compare_type_ = OBJECT_ONLY;
- } else {
- ASSERT(compare_type_ == NONE);
- }
-}
-
-
-bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
- // If there is an interceptor, we can't compute the target for a direct call.
- if (type->has_named_interceptor()) return false;
-
- if (check_type_ == RECEIVER_MAP_CHECK) {
- // For primitive checks the holder is set up to point to the corresponding
- // prototype object, i.e. one step of the algorithm below has been already
- // performed. For non-primitive checks we clear it to allow computing
- // targets for polymorphic calls.
- holder_ = Handle<JSObject>::null();
- }
- LookupResult lookup(type->GetIsolate());
- while (true) {
- type->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) {
- switch (lookup.type()) {
- case CONSTANT_FUNCTION:
- // We surely know the target for a constant function.
- target_ =
- Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
- return true;
- case NORMAL:
- case FIELD:
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- // We don't know the target.
- return false;
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
- // If we reach the end of the prototype chain, we don't know the target.
- if (!type->prototype()->IsJSObject()) return false;
- // Go up the prototype chain, recording where we are currently.
- holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
- if (!holder_->HasFastProperties()) return false;
- type = Handle<Map>(holder()->map());
- }
-}
-
-
-bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
- LookupResult* lookup) {
- target_ = Handle<JSFunction>::null();
- cell_ = Handle<JSGlobalPropertyCell>::null();
- ASSERT(lookup->IsFound() &&
- lookup->type() == NORMAL &&
- lookup->holder() == *global);
- cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
- if (cell_->value()->IsJSFunction()) {
- Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
- // If the function is in new space we assume it's more likely to
- // change and thus prefer the general IC code.
- if (!HEAP->InNewSpace(*candidate)) {
- target_ = candidate;
- return true;
- }
- }
- return false;
-}
-
-
-void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- CallKind call_kind) {
- is_monomorphic_ = oracle->CallIsMonomorphic(this);
- Property* property = expression()->AsProperty();
- if (property == NULL) {
- if (VariableProxy *proxy = expression()->AsVariableProxy()) {
- if (proxy->var()->IsUnallocated() && proxy->var()->is_qml_global())
- return;
- }
-
- // Function call. Specialize for monomorphic calls.
- if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
- } else {
- // Method call. Specialize for the receiver types seen at runtime.
- Literal* key = property->key()->AsLiteral();
- ASSERT(key != NULL && key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(key->handle());
- receiver_types_.Clear();
- oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- int length = receiver_types_.length();
- for (int i = 0; i < length; i++) {
- Handle<Map> map = receiver_types_.at(i);
- ASSERT(!map.is_null() && *map != NULL);
- }
- }
-#endif
- check_type_ = oracle->GetCallCheckType(this);
- if (is_monomorphic_) {
- Handle<Map> map;
- if (receiver_types_.length() > 0) {
- ASSERT(check_type_ == RECEIVER_MAP_CHECK);
- map = receiver_types_.at(0);
- } else {
- ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- holder_ = Handle<JSObject>(
- oracle->GetPrototypeForPrimitiveCheck(check_type_));
- map = Handle<Map>(holder_->map());
- }
- is_monomorphic_ = ComputeTarget(map, name);
- }
- }
-}
-
-
-void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
- if (is_monomorphic_) {
- target_ = oracle->GetCallNewTarget(this);
- elements_kind_ = oracle->GetCallNewElementsKind(this);
- }
-}
-
-
-void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
- ? oracle->GetObjectLiteralStoreMap(this)
- : Handle<Map>::null();
-}
-
-
-// ----------------------------------------------------------------------------
-// Implementation of AstVisitor
-
-void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- Visit(declarations->at(i));
- }
-}
-
-
-void AstVisitor::VisitStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
- }
-}
-
-
-void AstVisitor::VisitExpressions(ZoneList<Expression*>* expressions) {
- for (int i = 0; i < expressions->length(); i++) {
- // The variable statement visiting code may pass NULL expressions
- // to this code. Maybe this should be handled by introducing an
- // undefined expression or literal? Revisit this code if this
- // changes
- Expression* expression = expressions->at(i);
- if (expression != NULL) Visit(expression);
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// Regular expressions
-
-#define MAKE_ACCEPT(Name) \
- void* RegExp##Name::Accept(RegExpVisitor* visitor, void* data) { \
- return visitor->Visit##Name(this, data); \
- }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ACCEPT)
-#undef MAKE_ACCEPT
-
-#define MAKE_TYPE_CASE(Name) \
- RegExp##Name* RegExpTree::As##Name() { \
- return NULL; \
- } \
- bool RegExpTree::Is##Name() { return false; }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
-#undef MAKE_TYPE_CASE
-
-#define MAKE_TYPE_CASE(Name) \
- RegExp##Name* RegExp##Name::As##Name() { \
- return this; \
- } \
- bool RegExp##Name::Is##Name() { return true; }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
-#undef MAKE_TYPE_CASE
-
-
-static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
- Interval result = Interval::Empty();
- for (int i = 0; i < children->length(); i++)
- result = result.Union(children->at(i)->CaptureRegisters());
- return result;
-}
-
-
-Interval RegExpAlternative::CaptureRegisters() {
- return ListCaptureRegisters(nodes());
-}
-
-
-Interval RegExpDisjunction::CaptureRegisters() {
- return ListCaptureRegisters(alternatives());
-}
-
-
-Interval RegExpLookahead::CaptureRegisters() {
- return body()->CaptureRegisters();
-}
-
-
-Interval RegExpCapture::CaptureRegisters() {
- Interval self(StartRegister(index()), EndRegister(index()));
- return self.Union(body()->CaptureRegisters());
-}
-
-
-Interval RegExpQuantifier::CaptureRegisters() {
- return body()->CaptureRegisters();
-}
-
-
-bool RegExpAssertion::IsAnchoredAtStart() {
- return type() == RegExpAssertion::START_OF_INPUT;
-}
-
-
-bool RegExpAssertion::IsAnchoredAtEnd() {
- return type() == RegExpAssertion::END_OF_INPUT;
-}
-
-
-bool RegExpAlternative::IsAnchoredAtStart() {
- ZoneList<RegExpTree*>* nodes = this->nodes();
- for (int i = 0; i < nodes->length(); i++) {
- RegExpTree* node = nodes->at(i);
- if (node->IsAnchoredAtStart()) { return true; }
- if (node->max_match() > 0) { return false; }
- }
- return false;
-}
-
-
-bool RegExpAlternative::IsAnchoredAtEnd() {
- ZoneList<RegExpTree*>* nodes = this->nodes();
- for (int i = nodes->length() - 1; i >= 0; i--) {
- RegExpTree* node = nodes->at(i);
- if (node->IsAnchoredAtEnd()) { return true; }
- if (node->max_match() > 0) { return false; }
- }
- return false;
-}
-
-
-bool RegExpDisjunction::IsAnchoredAtStart() {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- if (!alternatives->at(i)->IsAnchoredAtStart())
- return false;
- }
- return true;
-}
-
-
-bool RegExpDisjunction::IsAnchoredAtEnd() {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- if (!alternatives->at(i)->IsAnchoredAtEnd())
- return false;
- }
- return true;
-}
-
-
-bool RegExpLookahead::IsAnchoredAtStart() {
- return is_positive() && body()->IsAnchoredAtStart();
-}
-
-
-bool RegExpCapture::IsAnchoredAtStart() {
- return body()->IsAnchoredAtStart();
-}
-
-
-bool RegExpCapture::IsAnchoredAtEnd() {
- return body()->IsAnchoredAtEnd();
-}
-
-
-// Convert regular expression trees to a simple sexp representation.
-// This representation should be different from the input grammar
-// in as many cases as possible, to make it more difficult for incorrect
-// parses to look as correct ones which is likely if the input and
-// output formats are alike.
-class RegExpUnparser: public RegExpVisitor {
- public:
- explicit RegExpUnparser(Zone* zone);
- void VisitCharacterRange(CharacterRange that);
- SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
-#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
-#undef MAKE_CASE
- private:
- StringStream* stream() { return &stream_; }
- HeapStringAllocator alloc_;
- StringStream stream_;
- Zone* zone_;
-};
-
-
-RegExpUnparser::RegExpUnparser(Zone* zone) : stream_(&alloc_), zone_(zone) {
-}
-
-
-void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) {
- stream()->Add("(|");
- for (int i = 0; i < that->alternatives()->length(); i++) {
- stream()->Add(" ");
- that->alternatives()->at(i)->Accept(this, data);
- }
- stream()->Add(")");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
- stream()->Add("(:");
- for (int i = 0; i < that->nodes()->length(); i++) {
- stream()->Add(" ");
- that->nodes()->at(i)->Accept(this, data);
- }
- stream()->Add(")");
- return NULL;
-}
-
-
-void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
- stream()->Add("%k", that.from());
- if (!that.IsSingleton()) {
- stream()->Add("-%k", that.to());
- }
-}
-
-
-
-void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
- void* data) {
- if (that->is_negated())
- stream()->Add("^");
- stream()->Add("[");
- for (int i = 0; i < that->ranges(zone_)->length(); i++) {
- if (i > 0) stream()->Add(" ");
- VisitCharacterRange(that->ranges(zone_)->at(i));
- }
- stream()->Add("]");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
- switch (that->type()) {
- case RegExpAssertion::START_OF_INPUT:
- stream()->Add("@^i");
- break;
- case RegExpAssertion::END_OF_INPUT:
- stream()->Add("@$i");
- break;
- case RegExpAssertion::START_OF_LINE:
- stream()->Add("@^l");
- break;
- case RegExpAssertion::END_OF_LINE:
- stream()->Add("@$l");
- break;
- case RegExpAssertion::BOUNDARY:
- stream()->Add("@b");
- break;
- case RegExpAssertion::NON_BOUNDARY:
- stream()->Add("@B");
- break;
- }
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
- stream()->Add("'");
- Vector<const uc16> chardata = that->data();
- for (int i = 0; i < chardata.length(); i++) {
- stream()->Add("%k", chardata[i]);
- }
- stream()->Add("'");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
- if (that->elements()->length() == 1) {
- that->elements()->at(0).data.u_atom->Accept(this, data);
- } else {
- stream()->Add("(!");
- for (int i = 0; i < that->elements()->length(); i++) {
- stream()->Add(" ");
- that->elements()->at(i).data.u_atom->Accept(this, data);
- }
- stream()->Add(")");
- }
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
- stream()->Add("(# %i ", that->min());
- if (that->max() == RegExpTree::kInfinity) {
- stream()->Add("- ");
- } else {
- stream()->Add("%i ", that->max());
- }
- stream()->Add(that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
- that->body()->Accept(this, data);
- stream()->Add(")");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
- stream()->Add("(^ ");
- that->body()->Accept(this, data);
- stream()->Add(")");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitLookahead(RegExpLookahead* that, void* data) {
- stream()->Add("(-> ");
- stream()->Add(that->is_positive() ? "+ " : "- ");
- that->body()->Accept(this, data);
- stream()->Add(")");
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
- void* data) {
- stream()->Add("(<- %i)", that->index());
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
- stream()->Put('%');
- return NULL;
-}
-
-
-SmartArrayPointer<const char> RegExpTree::ToString(Zone* zone) {
- RegExpUnparser unparser(zone);
- Accept(&unparser, NULL);
- return unparser.ToString();
-}
-
-
-RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
- : alternatives_(alternatives) {
- ASSERT(alternatives->length() > 1);
- RegExpTree* first_alternative = alternatives->at(0);
- min_match_ = first_alternative->min_match();
- max_match_ = first_alternative->max_match();
- for (int i = 1; i < alternatives->length(); i++) {
- RegExpTree* alternative = alternatives->at(i);
- min_match_ = Min(min_match_, alternative->min_match());
- max_match_ = Max(max_match_, alternative->max_match());
- }
-}
-
-
-static int IncreaseBy(int previous, int increase) {
- if (RegExpTree::kInfinity - previous < increase) {
- return RegExpTree::kInfinity;
- } else {
- return previous + increase;
- }
-}
-
-RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
- : nodes_(nodes) {
- ASSERT(nodes->length() > 1);
- min_match_ = 0;
- max_match_ = 0;
- for (int i = 0; i < nodes->length(); i++) {
- RegExpTree* node = nodes->at(i);
- int node_min_match = node->min_match();
- min_match_ = IncreaseBy(min_match_, node_min_match);
- int node_max_match = node->max_match();
- max_match_ = IncreaseBy(max_match_, node_max_match);
- }
-}
-
-
-CaseClause::CaseClause(Isolate* isolate,
- Expression* label,
- ZoneList<Statement*>* statements,
- int pos)
- : label_(label),
- statements_(statements),
- position_(pos),
- compare_type_(NONE),
- compare_id_(AstNode::GetNextId(isolate)),
- entry_id_(AstNode::GetNextId(isolate)) {
-}
-
-
-#define REGULAR_NODE(NodeType) \
- void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- }
-#define DONT_OPTIMIZE_NODE(NodeType) \
- void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_flag(kDontOptimize); \
- add_flag(kDontInline); \
- add_flag(kDontSelfOptimize); \
- }
-#define DONT_INLINE_NODE(NodeType) \
- void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_flag(kDontInline); \
- }
-#define DONT_SELFOPTIMIZE_NODE(NodeType) \
- void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_flag(kDontSelfOptimize); \
- }
-#define DONT_CACHE_NODE(NodeType) \
- void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_flag(kDontOptimize); \
- add_flag(kDontInline); \
- add_flag(kDontSelfOptimize); \
- add_flag(kDontCache); \
- }
-
-REGULAR_NODE(VariableDeclaration)
-REGULAR_NODE(FunctionDeclaration)
-REGULAR_NODE(Block)
-REGULAR_NODE(ExpressionStatement)
-REGULAR_NODE(EmptyStatement)
-REGULAR_NODE(IfStatement)
-REGULAR_NODE(ContinueStatement)
-REGULAR_NODE(BreakStatement)
-REGULAR_NODE(ReturnStatement)
-REGULAR_NODE(SwitchStatement)
-REGULAR_NODE(Conditional)
-REGULAR_NODE(Literal)
-REGULAR_NODE(ObjectLiteral)
-REGULAR_NODE(RegExpLiteral)
-REGULAR_NODE(Assignment)
-REGULAR_NODE(Throw)
-REGULAR_NODE(Property)
-REGULAR_NODE(UnaryOperation)
-REGULAR_NODE(CountOperation)
-REGULAR_NODE(BinaryOperation)
-REGULAR_NODE(CompareOperation)
-REGULAR_NODE(ThisFunction)
-REGULAR_NODE(Call)
-REGULAR_NODE(CallNew)
-// In theory, for VariableProxy we'd have to add:
-// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
-// But node->var() is usually not bound yet at VariableProxy creation time, and
-// LOOKUP variables only result from constructs that cannot be inlined anyway.
-REGULAR_NODE(VariableProxy)
-
-// We currently do not optimize any modules.
-DONT_OPTIMIZE_NODE(ModuleDeclaration)
-DONT_OPTIMIZE_NODE(ImportDeclaration)
-DONT_OPTIMIZE_NODE(ExportDeclaration)
-DONT_OPTIMIZE_NODE(ModuleVariable)
-DONT_OPTIMIZE_NODE(ModulePath)
-DONT_OPTIMIZE_NODE(ModuleUrl)
-DONT_OPTIMIZE_NODE(ModuleStatement)
-DONT_OPTIMIZE_NODE(WithStatement)
-DONT_OPTIMIZE_NODE(TryCatchStatement)
-DONT_OPTIMIZE_NODE(TryFinallyStatement)
-DONT_OPTIMIZE_NODE(DebuggerStatement)
-DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
-
-DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals.
-DONT_INLINE_NODE(FunctionLiteral)
-
-DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
-DONT_SELFOPTIMIZE_NODE(WhileStatement)
-DONT_SELFOPTIMIZE_NODE(ForStatement)
-DONT_SELFOPTIMIZE_NODE(ForInStatement)
-
-DONT_CACHE_NODE(ModuleLiteral)
-
-void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
- increase_node_count();
- if (node->is_jsruntime()) {
- // Don't try to inline JS runtime calls because we don't (currently) even
- // optimize them.
- add_flag(kDontInline);
- } else if (node->function()->intrinsic_type == Runtime::INLINE &&
- (node->name()->IsOneByteEqualTo(
- STATIC_ASCII_VECTOR("_ArgumentsLength")) ||
- node->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_Arguments")))) {
- // Don't inline the %_ArgumentsLength or %_Arguments because their
- // implementation will not work. There is no stack frame to get them
- // from.
- add_flag(kDontInline);
- }
-}
-
-#undef REGULAR_NODE
-#undef DONT_OPTIMIZE_NODE
-#undef DONT_INLINE_NODE
-#undef DONT_SELFOPTIMIZE_NODE
-#undef DONT_CACHE_NODE
-
-
-Handle<String> Literal::ToString() {
- if (handle_->IsString()) return Handle<String>::cast(handle_);
- ASSERT(handle_->IsNumber());
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str;
- if (handle_->IsSmi()) {
- // Optimization only, the heap number case would subsume this.
- OS::SNPrintF(buffer, "%d", Smi::cast(*handle_)->value());
- str = arr;
- } else {
- str = DoubleToCString(handle_->Number(), buffer);
- }
- return FACTORY->NewStringFromAscii(CStrVector(str));
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/ast.h b/src/3rdparty/v8/src/ast.h
deleted file mode 100644
index f7e23e8..0000000
--- a/src/3rdparty/v8/src/ast.h
+++ /dev/null
@@ -1,2946 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_AST_H_
-#define V8_AST_H_
-
-#include "v8.h"
-
-#include "assembler.h"
-#include "factory.h"
-#include "isolate.h"
-#include "jsregexp.h"
-#include "list-inl.h"
-#include "runtime.h"
-#include "small-pointer-list.h"
-#include "smart-pointers.h"
-#include "token.h"
-#include "utils.h"
-#include "variables.h"
-#include "interface.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// The abstract syntax tree is an intermediate, light-weight
-// representation of the parsed JavaScript code suitable for
-// compilation to native code.
-
-// Nodes are allocated in a separate zone, which allows faster
-// allocation and constant-time deallocation of the entire syntax
-// tree.
-
-
-// ----------------------------------------------------------------------------
-// Nodes of the abstract syntax tree. Only concrete classes are
-// enumerated here.
-
-#define DECLARATION_NODE_LIST(V) \
- V(VariableDeclaration) \
- V(FunctionDeclaration) \
- V(ModuleDeclaration) \
- V(ImportDeclaration) \
- V(ExportDeclaration) \
-
-#define MODULE_NODE_LIST(V) \
- V(ModuleLiteral) \
- V(ModuleVariable) \
- V(ModulePath) \
- V(ModuleUrl)
-
-#define STATEMENT_NODE_LIST(V) \
- V(Block) \
- V(ModuleStatement) \
- V(ExpressionStatement) \
- V(EmptyStatement) \
- V(IfStatement) \
- V(ContinueStatement) \
- V(BreakStatement) \
- V(ReturnStatement) \
- V(WithStatement) \
- V(SwitchStatement) \
- V(DoWhileStatement) \
- V(WhileStatement) \
- V(ForStatement) \
- V(ForInStatement) \
- V(TryCatchStatement) \
- V(TryFinallyStatement) \
- V(DebuggerStatement)
-
-#define EXPRESSION_NODE_LIST(V) \
- V(FunctionLiteral) \
- V(SharedFunctionInfoLiteral) \
- V(Conditional) \
- V(VariableProxy) \
- V(Literal) \
- V(RegExpLiteral) \
- V(ObjectLiteral) \
- V(ArrayLiteral) \
- V(Assignment) \
- V(Throw) \
- V(Property) \
- V(Call) \
- V(CallNew) \
- V(CallRuntime) \
- V(UnaryOperation) \
- V(CountOperation) \
- V(BinaryOperation) \
- V(CompareOperation) \
- V(ThisFunction)
-
-#define AST_NODE_LIST(V) \
- DECLARATION_NODE_LIST(V) \
- MODULE_NODE_LIST(V) \
- STATEMENT_NODE_LIST(V) \
- EXPRESSION_NODE_LIST(V)
-
-// Forward declarations
-class AstConstructionVisitor;
-template<class> class AstNodeFactory;
-class AstVisitor;
-class Declaration;
-class Module;
-class BreakableStatement;
-class Expression;
-class IterationStatement;
-class MaterializedLiteral;
-class Statement;
-class TargetCollector;
-class TypeFeedbackOracle;
-
-class RegExpAlternative;
-class RegExpAssertion;
-class RegExpAtom;
-class RegExpBackReference;
-class RegExpCapture;
-class RegExpCharacterClass;
-class RegExpCompiler;
-class RegExpDisjunction;
-class RegExpEmpty;
-class RegExpLookahead;
-class RegExpQuantifier;
-class RegExpText;
-
-#define DEF_FORWARD_DECLARATION(type) class type;
-AST_NODE_LIST(DEF_FORWARD_DECLARATION)
-#undef DEF_FORWARD_DECLARATION
-
-
-// Typedef only introduced to avoid unreadable code.
-// Please do appreciate the required space in "> >".
-typedef ZoneList<Handle<String> > ZoneStringList;
-typedef ZoneList<Handle<Object> > ZoneObjectList;
-
-
-#define DECLARE_NODE_TYPE(type) \
- virtual void Accept(AstVisitor* v); \
- virtual AstNode::Type node_type() const { return AstNode::k##type; } \
- template<class> friend class AstNodeFactory;
-
-
-enum AstPropertiesFlag {
- kDontInline,
- kDontOptimize,
- kDontSelfOptimize,
- kDontSoftInline,
- kDontCache
-};
-
-
-class AstProperties BASE_EMBEDDED {
- public:
- class Flags : public EnumSet<AstPropertiesFlag, int> {};
-
- AstProperties() : node_count_(0) { }
-
- Flags* flags() { return &flags_; }
- int node_count() { return node_count_; }
- void add_node_count(int count) { node_count_ += count; }
-
- private:
- Flags flags_;
- int node_count_;
-};
-
-
-class AstNode: public ZoneObject {
- public:
-#define DECLARE_TYPE_ENUM(type) k##type,
- enum Type {
- AST_NODE_LIST(DECLARE_TYPE_ENUM)
- kInvalid = -1
- };
-#undef DECLARE_TYPE_ENUM
-
- void* operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
- }
-
- AstNode() { }
-
- virtual ~AstNode() { }
-
- virtual void Accept(AstVisitor* v) = 0;
- virtual Type node_type() const = 0;
-
- // Type testing & conversion functions overridden by concrete subclasses.
-#define DECLARE_NODE_FUNCTIONS(type) \
- bool Is##type() { return node_type() == AstNode::k##type; } \
- type* As##type() { return Is##type() ? reinterpret_cast<type*>(this) : NULL; }
- AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
-#undef DECLARE_NODE_FUNCTIONS
-
- virtual TargetCollector* AsTargetCollector() { return NULL; }
- virtual BreakableStatement* AsBreakableStatement() { return NULL; }
- virtual IterationStatement* AsIterationStatement() { return NULL; }
- virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
-
- protected:
- static int GetNextId(Isolate* isolate) {
- return ReserveIdRange(isolate, 1);
- }
-
- static int ReserveIdRange(Isolate* isolate, int n) {
- int tmp = isolate->ast_node_id();
- isolate->set_ast_node_id(tmp + n);
- return tmp;
- }
-
- // Some nodes re-use bailout IDs for type feedback.
- static TypeFeedbackId reuse(BailoutId id) {
- return TypeFeedbackId(id.ToInt());
- }
-
-
- private:
- // Hidden to prevent accidental usage. It would have to load the
- // current zone from the TLS.
- void* operator new(size_t size);
-
- friend class CaseClause; // Generates AST IDs.
-};
-
-
-class Statement: public AstNode {
- public:
- Statement() : statement_pos_(RelocInfo::kNoPosition) {}
-
- bool IsEmpty() { return AsEmptyStatement() != NULL; }
-
- void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
- int statement_pos() const { return statement_pos_; }
-
- private:
- int statement_pos_;
-};
-
-
-class SmallMapList {
- public:
- SmallMapList() {}
- SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
-
- void Reserve(int capacity, Zone* zone) { list_.Reserve(capacity, zone); }
- void Clear() { list_.Clear(); }
- void Sort() { list_.Sort(); }
-
- bool is_empty() const { return list_.is_empty(); }
- int length() const { return list_.length(); }
-
- void Add(Handle<Map> handle, Zone* zone) {
- list_.Add(handle.location(), zone);
- }
-
- Handle<Map> at(int i) const {
- return Handle<Map>(list_.at(i));
- }
-
- Handle<Map> first() const { return at(0); }
- Handle<Map> last() const { return at(length() - 1); }
-
- private:
- // The list stores pointers to Map*, that is Map**, so it's GC safe.
- SmallPointerList<Map*> list_;
-
- DISALLOW_COPY_AND_ASSIGN(SmallMapList);
-};
-
-
-class Expression: public AstNode {
- public:
- enum Context {
- // Not assigned a context yet, or else will not be visited during
- // code generation.
- kUninitialized,
- // Evaluated for its side effects.
- kEffect,
- // Evaluated for its value (and side effects).
- kValue,
- // Evaluated for control flow (and side effects).
- kTest
- };
-
- virtual int position() const {
- UNREACHABLE();
- return 0;
- }
-
- virtual bool IsValidLeftHandSide() { return false; }
-
- // Helpers for ToBoolean conversion.
- virtual bool ToBooleanIsTrue() { return false; }
- virtual bool ToBooleanIsFalse() { return false; }
-
- // Symbols that cannot be parsed as array indices are considered property
- // names. We do not treat symbols that can be array indexes as property
- // names because [] for string objects is handled only by keyed ICs.
- virtual bool IsPropertyName() { return false; }
-
- // True iff the result can be safely overwritten (to avoid allocation).
- // False for operations that can return one of their operands.
- virtual bool ResultOverwriteAllowed() { return false; }
-
- // True iff the expression is a literal represented as a smi.
- bool IsSmiLiteral();
-
- // True iff the expression is a string literal.
- bool IsStringLiteral();
-
- // True iff the expression is the null literal.
- bool IsNullLiteral();
-
- // Type feedback information for assignments and properties.
- virtual bool IsMonomorphic() {
- UNREACHABLE();
- return false;
- }
- virtual SmallMapList* GetReceiverTypes() {
- UNREACHABLE();
- return NULL;
- }
- Handle<Map> GetMonomorphicReceiverType() {
- ASSERT(IsMonomorphic());
- SmallMapList* types = GetReceiverTypes();
- ASSERT(types != NULL && types->length() == 1);
- return types->at(0);
- }
-
- BailoutId id() const { return id_; }
- TypeFeedbackId test_id() const { return test_id_; }
-
- protected:
- explicit Expression(Isolate* isolate)
- : id_(GetNextId(isolate)),
- test_id_(GetNextId(isolate)) {}
-
- private:
- const BailoutId id_;
- const TypeFeedbackId test_id_;
-};
-
-
-class BreakableStatement: public Statement {
- public:
- enum Type {
- TARGET_FOR_ANONYMOUS,
- TARGET_FOR_NAMED_ONLY
- };
-
- // The labels associated with this statement. May be NULL;
- // if it is != NULL, guaranteed to contain at least one entry.
- ZoneStringList* labels() const { return labels_; }
-
- // Type testing & conversion.
- virtual BreakableStatement* AsBreakableStatement() { return this; }
-
- // Code generation
- Label* break_target() { return &break_target_; }
-
- // Testers.
- bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
-
- BailoutId EntryId() const { return entry_id_; }
- BailoutId ExitId() const { return exit_id_; }
-
- protected:
- BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
- : labels_(labels),
- type_(type),
- entry_id_(GetNextId(isolate)),
- exit_id_(GetNextId(isolate)) {
- ASSERT(labels == NULL || labels->length() > 0);
- }
-
-
- private:
- ZoneStringList* labels_;
- Type type_;
- Label break_target_;
- const BailoutId entry_id_;
- const BailoutId exit_id_;
-};
-
-
-class Block: public BreakableStatement {
- public:
- DECLARE_NODE_TYPE(Block)
-
- void AddStatement(Statement* statement, Zone* zone) {
- statements_.Add(statement, zone);
- }
-
- ZoneList<Statement*>* statements() { return &statements_; }
- bool is_initializer_block() const { return is_initializer_block_; }
-
- Scope* scope() const { return scope_; }
- void set_scope(Scope* scope) { scope_ = scope; }
-
- protected:
- Block(Isolate* isolate,
- ZoneStringList* labels,
- int capacity,
- bool is_initializer_block,
- Zone* zone)
- : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
- statements_(capacity, zone),
- is_initializer_block_(is_initializer_block),
- scope_(NULL) {
- }
-
- private:
- ZoneList<Statement*> statements_;
- bool is_initializer_block_;
- Scope* scope_;
-};
-
-
-class Declaration: public AstNode {
- public:
- VariableProxy* proxy() const { return proxy_; }
- VariableMode mode() const { return mode_; }
- Scope* scope() const { return scope_; }
- virtual InitializationFlag initialization() const = 0;
- virtual bool IsInlineable() const;
-
- protected:
- Declaration(VariableProxy* proxy,
- VariableMode mode,
- Scope* scope)
- : proxy_(proxy),
- mode_(mode),
- scope_(scope) {
- ASSERT(IsDeclaredVariableMode(mode));
- }
-
- private:
- VariableProxy* proxy_;
- VariableMode mode_;
-
- // Nested scope from which the declaration originated.
- Scope* scope_;
-};
-
-
-class VariableDeclaration: public Declaration {
- public:
- DECLARE_NODE_TYPE(VariableDeclaration)
-
- virtual InitializationFlag initialization() const {
- return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
- }
-
- protected:
- VariableDeclaration(VariableProxy* proxy,
- VariableMode mode,
- Scope* scope)
- : Declaration(proxy, mode, scope) {
- }
-};
-
-
-class FunctionDeclaration: public Declaration {
- public:
- DECLARE_NODE_TYPE(FunctionDeclaration)
-
- FunctionLiteral* fun() const { return fun_; }
- virtual InitializationFlag initialization() const {
- return kCreatedInitialized;
- }
- virtual bool IsInlineable() const;
-
- protected:
- FunctionDeclaration(VariableProxy* proxy,
- VariableMode mode,
- FunctionLiteral* fun,
- Scope* scope)
- : Declaration(proxy, mode, scope),
- fun_(fun) {
- // At the moment there are no "const functions" in JavaScript...
- ASSERT(mode == VAR || mode == LET);
- ASSERT(fun != NULL);
- }
-
- private:
- FunctionLiteral* fun_;
-};
-
-
-class ModuleDeclaration: public Declaration {
- public:
- DECLARE_NODE_TYPE(ModuleDeclaration)
-
- Module* module() const { return module_; }
- virtual InitializationFlag initialization() const {
- return kCreatedInitialized;
- }
-
- protected:
- ModuleDeclaration(VariableProxy* proxy,
- Module* module,
- Scope* scope)
- : Declaration(proxy, MODULE, scope),
- module_(module) {
- }
-
- private:
- Module* module_;
-};
-
-
-class ImportDeclaration: public Declaration {
- public:
- DECLARE_NODE_TYPE(ImportDeclaration)
-
- Module* module() const { return module_; }
- virtual InitializationFlag initialization() const {
- return kCreatedInitialized;
- }
-
- protected:
- ImportDeclaration(VariableProxy* proxy,
- Module* module,
- Scope* scope)
- : Declaration(proxy, LET, scope),
- module_(module) {
- }
-
- private:
- Module* module_;
-};
-
-
-class ExportDeclaration: public Declaration {
- public:
- DECLARE_NODE_TYPE(ExportDeclaration)
-
- virtual InitializationFlag initialization() const {
- return kCreatedInitialized;
- }
-
- protected:
- ExportDeclaration(VariableProxy* proxy, Scope* scope)
- : Declaration(proxy, LET, scope) {}
-};
-
-
-class Module: public AstNode {
- public:
- Interface* interface() const { return interface_; }
- Block* body() const { return body_; }
-
- protected:
- explicit Module(Zone* zone)
- : interface_(Interface::NewModule(zone)),
- body_(NULL) {}
- explicit Module(Interface* interface, Block* body = NULL)
- : interface_(interface),
- body_(body) {}
-
- private:
- Interface* interface_;
- Block* body_;
-};
-
-
-class ModuleLiteral: public Module {
- public:
- DECLARE_NODE_TYPE(ModuleLiteral)
-
- protected:
- ModuleLiteral(Block* body, Interface* interface) : Module(interface, body) {}
-};
-
-
-class ModuleVariable: public Module {
- public:
- DECLARE_NODE_TYPE(ModuleVariable)
-
- VariableProxy* proxy() const { return proxy_; }
-
- protected:
- inline explicit ModuleVariable(VariableProxy* proxy);
-
- private:
- VariableProxy* proxy_;
-};
-
-
-class ModulePath: public Module {
- public:
- DECLARE_NODE_TYPE(ModulePath)
-
- Module* module() const { return module_; }
- Handle<String> name() const { return name_; }
-
- protected:
- ModulePath(Module* module, Handle<String> name, Zone* zone)
- : Module(zone),
- module_(module),
- name_(name) {
- }
-
- private:
- Module* module_;
- Handle<String> name_;
-};
-
-
-class ModuleUrl: public Module {
- public:
- DECLARE_NODE_TYPE(ModuleUrl)
-
- Handle<String> url() const { return url_; }
-
- protected:
- ModuleUrl(Handle<String> url, Zone* zone)
- : Module(zone), url_(url) {
- }
-
- private:
- Handle<String> url_;
-};
-
-
-class ModuleStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(ModuleStatement)
-
- VariableProxy* proxy() const { return proxy_; }
- Block* body() const { return body_; }
-
- protected:
- ModuleStatement(VariableProxy* proxy, Block* body)
- : proxy_(proxy),
- body_(body) {
- }
-
- private:
- VariableProxy* proxy_;
- Block* body_;
-};
-
-
-class IterationStatement: public BreakableStatement {
- public:
- // Type testing & conversion.
- virtual IterationStatement* AsIterationStatement() { return this; }
-
- Statement* body() const { return body_; }
-
- BailoutId OsrEntryId() const { return osr_entry_id_; }
- virtual BailoutId ContinueId() const = 0;
- virtual BailoutId StackCheckId() const = 0;
-
- // Code generation
- Label* continue_target() { return &continue_target_; }
-
- protected:
- IterationStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
- body_(NULL),
- osr_entry_id_(GetNextId(isolate)) {
- }
-
- void Initialize(Statement* body) {
- body_ = body;
- }
-
- private:
- Statement* body_;
- Label continue_target_;
- const BailoutId osr_entry_id_;
-};
-
-
-class DoWhileStatement: public IterationStatement {
- public:
- DECLARE_NODE_TYPE(DoWhileStatement)
-
- void Initialize(Expression* cond, Statement* body) {
- IterationStatement::Initialize(body);
- cond_ = cond;
- }
-
- Expression* cond() const { return cond_; }
-
- // Position where condition expression starts. We need it to make
- // the loop's condition a breakable location.
- int condition_position() { return condition_position_; }
- void set_condition_position(int pos) { condition_position_ = pos; }
-
- virtual BailoutId ContinueId() const { return continue_id_; }
- virtual BailoutId StackCheckId() const { return back_edge_id_; }
- BailoutId BackEdgeId() const { return back_edge_id_; }
-
- protected:
- DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- cond_(NULL),
- condition_position_(-1),
- continue_id_(GetNextId(isolate)),
- back_edge_id_(GetNextId(isolate)) {
- }
-
- private:
- Expression* cond_;
- int condition_position_;
- const BailoutId continue_id_;
- const BailoutId back_edge_id_;
-};
-
-
-class WhileStatement: public IterationStatement {
- public:
- DECLARE_NODE_TYPE(WhileStatement)
-
- void Initialize(Expression* cond, Statement* body) {
- IterationStatement::Initialize(body);
- cond_ = cond;
- }
-
- Expression* cond() const { return cond_; }
- bool may_have_function_literal() const {
- return may_have_function_literal_;
- }
- void set_may_have_function_literal(bool value) {
- may_have_function_literal_ = value;
- }
-
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
- BailoutId BodyId() const { return body_id_; }
-
- protected:
- WhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- cond_(NULL),
- may_have_function_literal_(true),
- body_id_(GetNextId(isolate)) {
- }
-
- private:
- Expression* cond_;
- // True if there is a function literal subexpression in the condition.
- bool may_have_function_literal_;
- const BailoutId body_id_;
-};
-
-
-class ForStatement: public IterationStatement {
- public:
- DECLARE_NODE_TYPE(ForStatement)
-
- void Initialize(Statement* init,
- Expression* cond,
- Statement* next,
- Statement* body) {
- IterationStatement::Initialize(body);
- init_ = init;
- cond_ = cond;
- next_ = next;
- }
-
- Statement* init() const { return init_; }
- Expression* cond() const { return cond_; }
- Statement* next() const { return next_; }
-
- bool may_have_function_literal() const {
- return may_have_function_literal_;
- }
- void set_may_have_function_literal(bool value) {
- may_have_function_literal_ = value;
- }
-
- virtual BailoutId ContinueId() const { return continue_id_; }
- virtual BailoutId StackCheckId() const { return body_id_; }
- BailoutId BodyId() const { return body_id_; }
-
- bool is_fast_smi_loop() { return loop_variable_ != NULL; }
- Variable* loop_variable() { return loop_variable_; }
- void set_loop_variable(Variable* var) { loop_variable_ = var; }
-
- protected:
- ForStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- init_(NULL),
- cond_(NULL),
- next_(NULL),
- may_have_function_literal_(true),
- loop_variable_(NULL),
- continue_id_(GetNextId(isolate)),
- body_id_(GetNextId(isolate)) {
- }
-
- private:
- Statement* init_;
- Expression* cond_;
- Statement* next_;
- // True if there is a function literal subexpression in the condition.
- bool may_have_function_literal_;
- Variable* loop_variable_;
- const BailoutId continue_id_;
- const BailoutId body_id_;
-};
-
-
-class ForInStatement: public IterationStatement {
- public:
- DECLARE_NODE_TYPE(ForInStatement)
-
- void Initialize(Expression* each, Expression* enumerable, Statement* body) {
- IterationStatement::Initialize(body);
- each_ = each;
- enumerable_ = enumerable;
- }
-
- Expression* each() const { return each_; }
- Expression* enumerable() const { return enumerable_; }
-
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
- BailoutId BodyId() const { return body_id_; }
- BailoutId PrepareId() const { return prepare_id_; }
-
- TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
-
- protected:
- ForInStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- each_(NULL),
- enumerable_(NULL),
- body_id_(GetNextId(isolate)),
- prepare_id_(GetNextId(isolate)) {
- }
-
- private:
- Expression* each_;
- Expression* enumerable_;
- const BailoutId body_id_;
- const BailoutId prepare_id_;
-};
-
-
-class ExpressionStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(ExpressionStatement)
-
- void set_expression(Expression* e) { expression_ = e; }
- Expression* expression() const { return expression_; }
-
- protected:
- explicit ExpressionStatement(Expression* expression)
- : expression_(expression) { }
-
- private:
- Expression* expression_;
-};
-
-
-class ContinueStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(ContinueStatement)
-
- IterationStatement* target() const { return target_; }
-
- protected:
- explicit ContinueStatement(IterationStatement* target)
- : target_(target) { }
-
- private:
- IterationStatement* target_;
-};
-
-
-class BreakStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(BreakStatement)
-
- BreakableStatement* target() const { return target_; }
-
- protected:
- explicit BreakStatement(BreakableStatement* target)
- : target_(target) { }
-
- private:
- BreakableStatement* target_;
-};
-
-
-class ReturnStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(ReturnStatement)
-
- Expression* expression() const { return expression_; }
-
- protected:
- explicit ReturnStatement(Expression* expression)
- : expression_(expression) { }
-
- private:
- Expression* expression_;
-};
-
-
-class WithStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(WithStatement)
-
- Expression* expression() const { return expression_; }
- Statement* statement() const { return statement_; }
-
- protected:
- WithStatement(Expression* expression, Statement* statement)
- : expression_(expression),
- statement_(statement) { }
-
- private:
- Expression* expression_;
- Statement* statement_;
-};
-
-
-class CaseClause: public ZoneObject {
- public:
- CaseClause(Isolate* isolate,
- Expression* label,
- ZoneList<Statement*>* statements,
- int pos);
-
- bool is_default() const { return label_ == NULL; }
- Expression* label() const {
- CHECK(!is_default());
- return label_;
- }
- Label* body_target() { return &body_target_; }
- ZoneList<Statement*>* statements() const { return statements_; }
-
- int position() const { return position_; }
- void set_position(int pos) { position_ = pos; }
-
- BailoutId EntryId() const { return entry_id_; }
-
- // Type feedback information.
- TypeFeedbackId CompareId() { return compare_id_; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsNameCompare() { return compare_type_ == NAME_ONLY; }
- bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
- bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
-
- private:
- Expression* label_;
- Label body_target_;
- ZoneList<Statement*>* statements_;
- int position_;
- enum CompareTypeFeedback {
- NONE,
- SMI_ONLY,
- NAME_ONLY,
- STRING_ONLY,
- OBJECT_ONLY
- };
- CompareTypeFeedback compare_type_;
- const TypeFeedbackId compare_id_;
- const BailoutId entry_id_;
-};
-
-
-class SwitchStatement: public BreakableStatement {
- public:
- DECLARE_NODE_TYPE(SwitchStatement)
-
- void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
- tag_ = tag;
- cases_ = cases;
- }
-
- Expression* tag() const { return tag_; }
- ZoneList<CaseClause*>* cases() const { return cases_; }
-
- protected:
- SwitchStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
- tag_(NULL),
- cases_(NULL) { }
-
- private:
- Expression* tag_;
- ZoneList<CaseClause*>* cases_;
-};
-
-
-// If-statements always have non-null references to their then- and
-// else-parts. When parsing if-statements with no explicit else-part,
-// the parser implicitly creates an empty statement. Use the
-// HasThenStatement() and HasElseStatement() functions to check if a
-// given if-statement has a then- or an else-part containing code.
-class IfStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(IfStatement)
-
- bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
- bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
-
- Expression* condition() const { return condition_; }
- Statement* then_statement() const { return then_statement_; }
- Statement* else_statement() const { return else_statement_; }
-
- BailoutId IfId() const { return if_id_; }
- BailoutId ThenId() const { return then_id_; }
- BailoutId ElseId() const { return else_id_; }
-
- protected:
- IfStatement(Isolate* isolate,
- Expression* condition,
- Statement* then_statement,
- Statement* else_statement)
- : condition_(condition),
- then_statement_(then_statement),
- else_statement_(else_statement),
- if_id_(GetNextId(isolate)),
- then_id_(GetNextId(isolate)),
- else_id_(GetNextId(isolate)) {
- }
-
- private:
- Expression* condition_;
- Statement* then_statement_;
- Statement* else_statement_;
- const BailoutId if_id_;
- const BailoutId then_id_;
- const BailoutId else_id_;
-};
-
-
-// NOTE: TargetCollectors are represented as nodes to fit in the target
-// stack in the compiler; this should probably be reworked.
-class TargetCollector: public AstNode {
- public:
- explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
-
- // Adds a jump target to the collector. The collector stores a pointer not
- // a copy of the target to make binding work, so make sure not to pass in
- // references to something on the stack.
- void AddTarget(Label* target, Zone* zone);
-
- // Virtual behaviour. TargetCollectors are never part of the AST.
- virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
- virtual Type node_type() const { return kInvalid; }
- virtual TargetCollector* AsTargetCollector() { return this; }
-
- ZoneList<Label*>* targets() { return &targets_; }
-
- private:
- ZoneList<Label*> targets_;
-};
-
-
-class TryStatement: public Statement {
- public:
- void set_escaping_targets(ZoneList<Label*>* targets) {
- escaping_targets_ = targets;
- }
-
- int index() const { return index_; }
- Block* try_block() const { return try_block_; }
- ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
-
- protected:
- TryStatement(int index, Block* try_block)
- : index_(index),
- try_block_(try_block),
- escaping_targets_(NULL) { }
-
- private:
- // Unique (per-function) index of this handler. This is not an AST ID.
- int index_;
-
- Block* try_block_;
- ZoneList<Label*>* escaping_targets_;
-};
-
-
-class TryCatchStatement: public TryStatement {
- public:
- DECLARE_NODE_TYPE(TryCatchStatement)
-
- Scope* scope() { return scope_; }
- Variable* variable() { return variable_; }
- Block* catch_block() const { return catch_block_; }
-
- protected:
- TryCatchStatement(int index,
- Block* try_block,
- Scope* scope,
- Variable* variable,
- Block* catch_block)
- : TryStatement(index, try_block),
- scope_(scope),
- variable_(variable),
- catch_block_(catch_block) {
- }
-
- private:
- Scope* scope_;
- Variable* variable_;
- Block* catch_block_;
-};
-
-
-class TryFinallyStatement: public TryStatement {
- public:
- DECLARE_NODE_TYPE(TryFinallyStatement)
-
- Block* finally_block() const { return finally_block_; }
-
- protected:
- TryFinallyStatement(int index, Block* try_block, Block* finally_block)
- : TryStatement(index, try_block),
- finally_block_(finally_block) { }
-
- private:
- Block* finally_block_;
-};
-
-
-class DebuggerStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(DebuggerStatement)
-
- protected:
- DebuggerStatement() {}
-};
-
-
-class EmptyStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(EmptyStatement)
-
- protected:
- EmptyStatement() {}
-};
-
-
-class Literal: public Expression {
- public:
- DECLARE_NODE_TYPE(Literal)
-
- virtual bool IsPropertyName() {
- if (handle_->IsInternalizedString()) {
- uint32_t ignored;
- return !String::cast(*handle_)->AsArrayIndex(&ignored);
- }
- return false;
- }
-
- Handle<String> AsPropertyName() {
- ASSERT(IsPropertyName());
- return Handle<String>::cast(handle_);
- }
-
- virtual bool ToBooleanIsTrue() { return handle_->ToBoolean()->IsTrue(); }
- virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); }
-
- // Identity testers.
- bool IsNull() const {
- ASSERT(!handle_.is_null());
- return handle_->IsNull();
- }
- bool IsTrue() const {
- ASSERT(!handle_.is_null());
- return handle_->IsTrue();
- }
- bool IsFalse() const {
- ASSERT(!handle_.is_null());
- return handle_->IsFalse();
- }
-
- Handle<Object> handle() const { return handle_; }
-
- // Support for using Literal as a HashMap key. NOTE: Currently, this works
- // only for string and number literals!
- uint32_t Hash() { return ToString()->Hash(); }
-
- static bool Match(void* literal1, void* literal2) {
- Handle<String> s1 = static_cast<Literal*>(literal1)->ToString();
- Handle<String> s2 = static_cast<Literal*>(literal2)->ToString();
- return s1->Equals(*s2);
- }
-
- TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
-
- protected:
- Literal(Isolate* isolate, Handle<Object> handle)
- : Expression(isolate),
- handle_(handle) { }
-
- private:
- Handle<String> ToString();
-
- Handle<Object> handle_;
-};
-
-
-// Base class for literals that needs space in the corresponding JSFunction.
-class MaterializedLiteral: public Expression {
- public:
- virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
-
- int literal_index() { return literal_index_; }
-
- // A materialized literal is simple if the values consist of only
- // constants and simple object and array literals.
- bool is_simple() const { return is_simple_; }
-
- int depth() const { return depth_; }
-
- protected:
- MaterializedLiteral(Isolate* isolate,
- int literal_index,
- bool is_simple,
- int depth)
- : Expression(isolate),
- literal_index_(literal_index),
- is_simple_(is_simple),
- depth_(depth) {}
-
- private:
- int literal_index_;
- bool is_simple_;
- int depth_;
-};
-
-
-// An object literal has a boilerplate object that is used
-// for minimizing the work when constructing it at runtime.
-class ObjectLiteral: public MaterializedLiteral {
- public:
- // Property is used for passing information
- // about an object literal's properties from the parser
- // to the code generator.
- class Property: public ZoneObject {
- public:
- enum Kind {
- CONSTANT, // Property with constant value (compile time).
- COMPUTED, // Property with computed value (execution time).
- MATERIALIZED_LITERAL, // Property value is a materialized literal.
- GETTER, SETTER, // Property is an accessor function.
- PROTOTYPE // Property is __proto__.
- };
-
- Property(Literal* key, Expression* value, Isolate* isolate);
-
- Literal* key() { return key_; }
- Expression* value() { return value_; }
- Kind kind() { return kind_; }
-
- // Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsMonomorphic() { return !receiver_type_.is_null(); }
- Handle<Map> GetReceiverType() { return receiver_type_; }
-
- bool IsCompileTimeValue();
-
- void set_emit_store(bool emit_store);
- bool emit_store();
-
- protected:
- template<class> friend class AstNodeFactory;
-
- Property(bool is_getter, FunctionLiteral* value);
- void set_key(Literal* key) { key_ = key; }
-
- private:
- Literal* key_;
- Expression* value_;
- Kind kind_;
- bool emit_store_;
- Handle<Map> receiver_type_;
- };
-
- DECLARE_NODE_TYPE(ObjectLiteral)
-
- Handle<FixedArray> constant_properties() const {
- return constant_properties_;
- }
- ZoneList<Property*>* properties() const { return properties_; }
-
- bool fast_elements() const { return fast_elements_; }
-
- bool has_function() { return has_function_; }
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- void CalculateEmitStore(Zone* zone);
-
- enum Flags {
- kNoFlags = 0,
- kFastElements = 1,
- kHasFunction = 1 << 1
- };
-
- struct Accessors: public ZoneObject {
- Accessors() : getter(NULL), setter(NULL) { }
- Expression* getter;
- Expression* setter;
- };
-
- protected:
- ObjectLiteral(Isolate* isolate,
- Handle<FixedArray> constant_properties,
- ZoneList<Property*>* properties,
- int literal_index,
- bool is_simple,
- bool fast_elements,
- int depth,
- bool has_function)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth),
- constant_properties_(constant_properties),
- properties_(properties),
- fast_elements_(fast_elements),
- has_function_(has_function) {}
-
- private:
- Handle<FixedArray> constant_properties_;
- ZoneList<Property*>* properties_;
- bool fast_elements_;
- bool has_function_;
-};
-
-
-// Node for capturing a regexp literal.
-class RegExpLiteral: public MaterializedLiteral {
- public:
- DECLARE_NODE_TYPE(RegExpLiteral)
-
- Handle<String> pattern() const { return pattern_; }
- Handle<String> flags() const { return flags_; }
-
- protected:
- RegExpLiteral(Isolate* isolate,
- Handle<String> pattern,
- Handle<String> flags,
- int literal_index)
- : MaterializedLiteral(isolate, literal_index, false, 1),
- pattern_(pattern),
- flags_(flags) {}
-
- private:
- Handle<String> pattern_;
- Handle<String> flags_;
-};
-
-// An array literal has a literals object that is used
-// for minimizing the work when constructing it at runtime.
-class ArrayLiteral: public MaterializedLiteral {
- public:
- DECLARE_NODE_TYPE(ArrayLiteral)
-
- Handle<FixedArray> constant_elements() const { return constant_elements_; }
- ZoneList<Expression*>* values() const { return values_; }
-
- // Return an AST id for an element that is used in simulate instructions.
- BailoutId GetIdForElement(int i) {
- return BailoutId(first_element_id_.ToInt() + i);
- }
-
- protected:
- ArrayLiteral(Isolate* isolate,
- Handle<FixedArray> constant_elements,
- ZoneList<Expression*>* values,
- int literal_index,
- bool is_simple,
- int depth)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth),
- constant_elements_(constant_elements),
- values_(values),
- first_element_id_(ReserveIdRange(isolate, values->length())) {}
-
- private:
- Handle<FixedArray> constant_elements_;
- ZoneList<Expression*>* values_;
- const BailoutId first_element_id_;
-};
-
-
-class VariableProxy: public Expression {
- public:
- DECLARE_NODE_TYPE(VariableProxy)
-
- virtual bool IsValidLeftHandSide() {
- return var_ == NULL ? true : var_->IsValidLeftHandSide();
- }
-
- bool IsVariable(Handle<String> n) {
- return !is_this() && name().is_identical_to(n);
- }
-
- bool IsArguments() { return var_ != NULL && var_->is_arguments(); }
-
- bool IsLValue() {
- return is_lvalue_;
- }
-
- Handle<String> name() const { return name_; }
- Variable* var() const { return var_; }
- bool is_this() const { return is_this_; }
- int position() const { return position_; }
- Interface* interface() const { return interface_; }
-
-
- void MarkAsTrivial() { is_trivial_ = true; }
- void MarkAsLValue() { is_lvalue_ = true; }
-
- // Bind this proxy to the variable var. Interfaces must match.
- void BindTo(Variable* var);
-
- protected:
- VariableProxy(Isolate* isolate, Variable* var);
-
- VariableProxy(Isolate* isolate,
- Handle<String> name,
- bool is_this,
- Interface* interface,
- int position);
-
- Handle<String> name_;
- Variable* var_; // resolved variable, or NULL
- bool is_this_;
- bool is_trivial_;
- // True if this variable proxy is being used in an assignment
- // or with a increment/decrement operator.
- bool is_lvalue_;
- int position_;
- Interface* interface_;
-};
-
-
-class Property: public Expression {
- public:
- DECLARE_NODE_TYPE(Property)
-
- virtual bool IsValidLeftHandSide() { return true; }
-
- Expression* obj() const { return obj_; }
- Expression* key() const { return key_; }
- virtual int position() const { return pos_; }
-
- BailoutId LoadId() const { return load_id_; }
-
- bool IsStringLength() const { return is_string_length_; }
- bool IsStringAccess() const { return is_string_access_; }
- bool IsFunctionPrototype() const { return is_function_prototype_; }
-
- // Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- bool IsArrayLength() { return is_array_length_; }
- bool IsUninitialized() { return is_uninitialized_; }
- TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
-
- protected:
- Property(Isolate* isolate,
- Expression* obj,
- Expression* key,
- int pos)
- : Expression(isolate),
- obj_(obj),
- key_(key),
- pos_(pos),
- load_id_(GetNextId(isolate)),
- is_monomorphic_(false),
- is_uninitialized_(false),
- is_array_length_(false),
- is_string_length_(false),
- is_string_access_(false),
- is_function_prototype_(false) { }
-
- private:
- Expression* obj_;
- Expression* key_;
- int pos_;
- const BailoutId load_id_;
-
- SmallMapList receiver_types_;
- bool is_monomorphic_ : 1;
- bool is_uninitialized_ : 1;
- bool is_array_length_ : 1;
- bool is_string_length_ : 1;
- bool is_string_access_ : 1;
- bool is_function_prototype_ : 1;
-};
-
-
-class Call: public Expression {
- public:
- DECLARE_NODE_TYPE(Call)
-
- Expression* expression() const { return expression_; }
- ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const { return pos_; }
-
- // Type feedback information.
- TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind);
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- CheckType check_type() const { return check_type_; }
- Handle<JSFunction> target() { return target_; }
-
- // A cache for the holder, set as a side effect of computing the target of the
- // call. Note that it contains the null handle when the receiver is the same
- // as the holder!
- Handle<JSObject> holder() { return holder_; }
-
- Handle<JSGlobalPropertyCell> cell() { return cell_; }
-
- bool ComputeTarget(Handle<Map> type, Handle<String> name);
- bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
-
- BailoutId ReturnId() const { return return_id_; }
-
-#ifdef DEBUG
- // Used to assert that the FullCodeGenerator records the return site.
- bool return_is_recorded_;
-#endif
-
- protected:
- Call(Isolate* isolate,
- Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos)
- : Expression(isolate),
- expression_(expression),
- arguments_(arguments),
- pos_(pos),
- is_monomorphic_(false),
- check_type_(RECEIVER_MAP_CHECK),
- return_id_(GetNextId(isolate)) { }
-
- private:
- Expression* expression_;
- ZoneList<Expression*>* arguments_;
- int pos_;
-
- bool is_monomorphic_;
- CheckType check_type_;
- SmallMapList receiver_types_;
- Handle<JSFunction> target_;
- Handle<JSObject> holder_;
- Handle<JSGlobalPropertyCell> cell_;
-
- const BailoutId return_id_;
-};
-
-
-class CallNew: public Expression {
- public:
- DECLARE_NODE_TYPE(CallNew)
-
- Expression* expression() const { return expression_; }
- ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const { return pos_; }
-
- // Type feedback information.
- TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- Handle<JSFunction> target() { return target_; }
-
- BailoutId ReturnId() const { return return_id_; }
- ElementsKind elements_kind() const { return elements_kind_; }
-
- protected:
- CallNew(Isolate* isolate,
- Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos)
- : Expression(isolate),
- expression_(expression),
- arguments_(arguments),
- pos_(pos),
- is_monomorphic_(false),
- return_id_(GetNextId(isolate)),
- elements_kind_(GetInitialFastElementsKind()) { }
-
- private:
- Expression* expression_;
- ZoneList<Expression*>* arguments_;
- int pos_;
-
- bool is_monomorphic_;
- Handle<JSFunction> target_;
-
- const BailoutId return_id_;
- ElementsKind elements_kind_;
-};
-
-
-// The CallRuntime class does not represent any official JavaScript
-// language construct. Instead it is used to call a C or JS function
-// with a set of arguments. This is used from the builtins that are
-// implemented in JavaScript (see "v8natives.js").
-class CallRuntime: public Expression {
- public:
- DECLARE_NODE_TYPE(CallRuntime)
-
- Handle<String> name() const { return name_; }
- const Runtime::Function* function() const { return function_; }
- ZoneList<Expression*>* arguments() const { return arguments_; }
- bool is_jsruntime() const { return function_ == NULL; }
-
- TypeFeedbackId CallRuntimeFeedbackId() const { return reuse(id()); }
-
- protected:
- CallRuntime(Isolate* isolate,
- Handle<String> name,
- const Runtime::Function* function,
- ZoneList<Expression*>* arguments)
- : Expression(isolate),
- name_(name),
- function_(function),
- arguments_(arguments) { }
-
- private:
- Handle<String> name_;
- const Runtime::Function* function_;
- ZoneList<Expression*>* arguments_;
-};
-
-
-class UnaryOperation: public Expression {
- public:
- DECLARE_NODE_TYPE(UnaryOperation)
-
- virtual bool ResultOverwriteAllowed();
-
- Token::Value op() const { return op_; }
- Expression* expression() const { return expression_; }
- virtual int position() const { return pos_; }
-
- BailoutId MaterializeTrueId() { return materialize_true_id_; }
- BailoutId MaterializeFalseId() { return materialize_false_id_; }
-
- TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); }
-
- protected:
- UnaryOperation(Isolate* isolate,
- Token::Value op,
- Expression* expression,
- int pos)
- : Expression(isolate),
- op_(op),
- expression_(expression),
- pos_(pos),
- materialize_true_id_(GetNextId(isolate)),
- materialize_false_id_(GetNextId(isolate)) {
- ASSERT(Token::IsUnaryOp(op));
- }
-
- private:
- Token::Value op_;
- Expression* expression_;
- int pos_;
-
- // For unary not (Token::NOT), the AST ids where true and false will
- // actually be materialized, respectively.
- const BailoutId materialize_true_id_;
- const BailoutId materialize_false_id_;
-};
-
-
-class BinaryOperation: public Expression {
- public:
- DECLARE_NODE_TYPE(BinaryOperation)
-
- virtual bool ResultOverwriteAllowed();
-
- Token::Value op() const { return op_; }
- Expression* left() const { return left_; }
- Expression* right() const { return right_; }
- virtual int position() const { return pos_; }
-
- BailoutId RightId() const { return right_id_; }
-
- TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
-
- protected:
- BinaryOperation(Isolate* isolate,
- Token::Value op,
- Expression* left,
- Expression* right,
- int pos)
- : Expression(isolate),
- op_(op),
- left_(left),
- right_(right),
- pos_(pos),
- right_id_(GetNextId(isolate)) {
- ASSERT(Token::IsBinaryOp(op));
- }
-
- private:
- Token::Value op_;
- Expression* left_;
- Expression* right_;
- int pos_;
- // The short-circuit logical operations need an AST ID for their
- // right-hand subexpression.
- const BailoutId right_id_;
-};
-
-
-class CountOperation: public Expression {
- public:
- DECLARE_NODE_TYPE(CountOperation)
-
- bool is_prefix() const { return is_prefix_; }
- bool is_postfix() const { return !is_prefix_; }
-
- Token::Value op() const { return op_; }
- Token::Value binary_op() {
- return (op() == Token::INC) ? Token::ADD : Token::SUB;
- }
-
- Expression* expression() const { return expression_; }
- virtual int position() const { return pos_; }
-
- virtual void MarkAsStatement() { is_prefix_ = true; }
-
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
-
- BailoutId AssignmentId() const { return assignment_id_; }
-
- TypeFeedbackId CountBinOpFeedbackId() const { return count_id_; }
- TypeFeedbackId CountStoreFeedbackId() const { return reuse(id()); }
-
- protected:
- CountOperation(Isolate* isolate,
- Token::Value op,
- bool is_prefix,
- Expression* expr,
- int pos)
- : Expression(isolate),
- op_(op),
- is_prefix_(is_prefix),
- expression_(expr),
- pos_(pos),
- assignment_id_(GetNextId(isolate)),
- count_id_(GetNextId(isolate)) {}
-
- private:
- Token::Value op_;
- bool is_prefix_;
- bool is_monomorphic_;
- Expression* expression_;
- int pos_;
- const BailoutId assignment_id_;
- const TypeFeedbackId count_id_;
- SmallMapList receiver_types_;
-};
-
-
-class CompareOperation: public Expression {
- public:
- DECLARE_NODE_TYPE(CompareOperation)
-
- Token::Value op() const { return op_; }
- Expression* left() const { return left_; }
- Expression* right() const { return right_; }
- virtual int position() const { return pos_; }
-
- // Type feedback information.
- TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
-
- // Match special cases.
- bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
- bool IsLiteralCompareUndefined(Expression** expr);
- bool IsLiteralCompareNull(Expression** expr);
-
- protected:
- CompareOperation(Isolate* isolate,
- Token::Value op,
- Expression* left,
- Expression* right,
- int pos)
- : Expression(isolate),
- op_(op),
- left_(left),
- right_(right),
- pos_(pos) {
- ASSERT(Token::IsCompareOp(op));
- }
-
- private:
- Token::Value op_;
- Expression* left_;
- Expression* right_;
- int pos_;
-};
-
-
-class Conditional: public Expression {
- public:
- DECLARE_NODE_TYPE(Conditional)
-
- Expression* condition() const { return condition_; }
- Expression* then_expression() const { return then_expression_; }
- Expression* else_expression() const { return else_expression_; }
-
- int then_expression_position() const { return then_expression_position_; }
- int else_expression_position() const { return else_expression_position_; }
-
- BailoutId ThenId() const { return then_id_; }
- BailoutId ElseId() const { return else_id_; }
-
- protected:
- Conditional(Isolate* isolate,
- Expression* condition,
- Expression* then_expression,
- Expression* else_expression,
- int then_expression_position,
- int else_expression_position)
- : Expression(isolate),
- condition_(condition),
- then_expression_(then_expression),
- else_expression_(else_expression),
- then_expression_position_(then_expression_position),
- else_expression_position_(else_expression_position),
- then_id_(GetNextId(isolate)),
- else_id_(GetNextId(isolate)) { }
-
- private:
- Expression* condition_;
- Expression* then_expression_;
- Expression* else_expression_;
- int then_expression_position_;
- int else_expression_position_;
- const BailoutId then_id_;
- const BailoutId else_id_;
-};
-
-
-class Assignment: public Expression {
- public:
- DECLARE_NODE_TYPE(Assignment)
-
- Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
-
- Token::Value binary_op() const;
-
- Token::Value op() const { return op_; }
- Expression* target() const { return target_; }
- Expression* value() const { return value_; }
- virtual int position() const { return pos_; }
- BinaryOperation* binary_operation() const { return binary_operation_; }
-
- // This check relies on the definition order of token in token.h.
- bool is_compound() const { return op() > Token::ASSIGN; }
-
- BailoutId AssignmentId() const { return assignment_id_; }
-
- // Type feedback information.
- TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
-
- protected:
- Assignment(Isolate* isolate,
- Token::Value op,
- Expression* target,
- Expression* value,
- int pos);
-
- template<class Visitor>
- void Init(Isolate* isolate, AstNodeFactory<Visitor>* factory) {
- ASSERT(Token::IsAssignmentOp(op_));
- if (is_compound()) {
- binary_operation_ =
- factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1);
- }
- }
-
- private:
- Token::Value op_;
- Expression* target_;
- Expression* value_;
- int pos_;
- BinaryOperation* binary_operation_;
- const BailoutId assignment_id_;
-
- bool is_monomorphic_;
- SmallMapList receiver_types_;
-};
-
-
-class Throw: public Expression {
- public:
- DECLARE_NODE_TYPE(Throw)
-
- Expression* exception() const { return exception_; }
- virtual int position() const { return pos_; }
-
- protected:
- Throw(Isolate* isolate, Expression* exception, int pos)
- : Expression(isolate), exception_(exception), pos_(pos) {}
-
- private:
- Expression* exception_;
- int pos_;
-};
-
-
-class FunctionLiteral: public Expression {
- public:
- enum Type {
- ANONYMOUS_EXPRESSION,
- NAMED_EXPRESSION,
- DECLARATION
- };
-
- enum ParameterFlag {
- kNoDuplicateParameters = 0,
- kHasDuplicateParameters = 1
- };
-
- enum IsFunctionFlag {
- kGlobalOrEval,
- kIsFunction
- };
-
- enum IsParenthesizedFlag {
- kIsParenthesized,
- kNotParenthesized
- };
-
- DECLARE_NODE_TYPE(FunctionLiteral)
-
- Handle<String> name() const { return name_; }
- Scope* scope() const { return scope_; }
- ZoneList<Statement*>* body() const { return body_; }
- void set_function_token_position(int pos) { function_token_position_ = pos; }
- int function_token_position() const { return function_token_position_; }
- int start_position() const;
- int end_position() const;
- int SourceSize() const { return end_position() - start_position(); }
- bool is_expression() const { return IsExpression::decode(bitfield_); }
- bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
- bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
- LanguageMode language_mode() const;
- bool qml_mode() const { return qml_mode_flag() == kQmlMode; }
- QmlModeFlag qml_mode_flag() const;
-
- int materialized_literal_count() { return materialized_literal_count_; }
- int expected_property_count() { return expected_property_count_; }
- int handler_count() { return handler_count_; }
- bool has_only_simple_this_property_assignments() {
- return HasOnlySimpleThisPropertyAssignments::decode(bitfield_);
- }
- Handle<FixedArray> this_property_assignments() {
- return this_property_assignments_;
- }
- int parameter_count() { return parameter_count_; }
-
- bool AllowsLazyCompilation();
- bool AllowsLazyCompilationWithoutContext();
-
- Handle<String> debug_name() const {
- if (name_->length() > 0) return name_;
- return inferred_name();
- }
-
- Handle<String> inferred_name() const { return inferred_name_; }
- void set_inferred_name(Handle<String> inferred_name) {
- inferred_name_ = inferred_name;
- }
-
- bool pretenure() { return Pretenure::decode(bitfield_); }
- void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
-
- bool has_duplicate_parameters() {
- return HasDuplicateParameters::decode(bitfield_);
- }
-
- bool is_function() { return IsFunction::decode(bitfield_) == kIsFunction; }
-
- // This is used as a heuristic on when to eagerly compile a function
- // literal. We consider the following constructs as hints that the
- // function will be called immediately:
- // - (function() { ... })();
- // - var x = function() { ... }();
- bool is_parenthesized() {
- return IsParenthesized::decode(bitfield_) == kIsParenthesized;
- }
- void set_parenthesized() {
- bitfield_ = IsParenthesized::update(bitfield_, kIsParenthesized);
- }
-
- int ast_node_count() { return ast_properties_.node_count(); }
- AstProperties::Flags* flags() { return ast_properties_.flags(); }
- void set_ast_properties(AstProperties* ast_properties) {
- ast_properties_ = *ast_properties;
- }
-
- protected:
- FunctionLiteral(Isolate* isolate,
- Handle<String> name,
- Scope* scope,
- ZoneList<Statement*>* body,
- int materialized_literal_count,
- int expected_property_count,
- int handler_count,
- bool has_only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments,
- int parameter_count,
- Type type,
- ParameterFlag has_duplicate_parameters,
- IsFunctionFlag is_function,
- IsParenthesizedFlag is_parenthesized)
- : Expression(isolate),
- name_(name),
- scope_(scope),
- body_(body),
- this_property_assignments_(this_property_assignments),
- inferred_name_(isolate->factory()->empty_string()),
- materialized_literal_count_(materialized_literal_count),
- expected_property_count_(expected_property_count),
- handler_count_(handler_count),
- parameter_count_(parameter_count),
- function_token_position_(RelocInfo::kNoPosition) {
- bitfield_ =
- HasOnlySimpleThisPropertyAssignments::encode(
- has_only_simple_this_property_assignments) |
- IsExpression::encode(type != DECLARATION) |
- IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
- Pretenure::encode(false) |
- HasDuplicateParameters::encode(has_duplicate_parameters) |
- IsFunction::encode(is_function) |
- IsParenthesized::encode(is_parenthesized);
- }
-
- private:
- Handle<String> name_;
- Scope* scope_;
- ZoneList<Statement*>* body_;
- Handle<FixedArray> this_property_assignments_;
- Handle<String> inferred_name_;
- AstProperties ast_properties_;
-
- int materialized_literal_count_;
- int expected_property_count_;
- int handler_count_;
- int parameter_count_;
- int function_token_position_;
-
- unsigned bitfield_;
- class HasOnlySimpleThisPropertyAssignments: public BitField<bool, 0, 1> {};
- class IsExpression: public BitField<bool, 1, 1> {};
- class IsAnonymous: public BitField<bool, 2, 1> {};
- class Pretenure: public BitField<bool, 3, 1> {};
- class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
- class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
- class IsParenthesized: public BitField<IsParenthesizedFlag, 6, 1> {};
-};
-
-
-class SharedFunctionInfoLiteral: public Expression {
- public:
- DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
-
- Handle<SharedFunctionInfo> shared_function_info() const {
- return shared_function_info_;
- }
-
- protected:
- SharedFunctionInfoLiteral(
- Isolate* isolate,
- Handle<SharedFunctionInfo> shared_function_info)
- : Expression(isolate),
- shared_function_info_(shared_function_info) { }
-
- private:
- Handle<SharedFunctionInfo> shared_function_info_;
-};
-
-
-class ThisFunction: public Expression {
- public:
- DECLARE_NODE_TYPE(ThisFunction)
-
- protected:
- explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
-};
-
-#undef DECLARE_NODE_TYPE
-
-
-// ----------------------------------------------------------------------------
-// Regular expressions
-
-
-class RegExpVisitor BASE_EMBEDDED {
- public:
- virtual ~RegExpVisitor() { }
-#define MAKE_CASE(Name) \
- virtual void* Visit##Name(RegExp##Name*, void* data) = 0;
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
-#undef MAKE_CASE
-};
-
-
-class RegExpTree: public ZoneObject {
- public:
- static const int kInfinity = kMaxInt;
- virtual ~RegExpTree() { }
- virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) = 0;
- virtual bool IsTextElement() { return false; }
- virtual bool IsAnchoredAtStart() { return false; }
- virtual bool IsAnchoredAtEnd() { return false; }
- virtual int min_match() = 0;
- virtual int max_match() = 0;
- // Returns the interval of registers used for captures within this
- // expression.
- virtual Interval CaptureRegisters() { return Interval::Empty(); }
- virtual void AppendToText(RegExpText* text, Zone* zone);
- SmartArrayPointer<const char> ToString(Zone* zone);
-#define MAKE_ASTYPE(Name) \
- virtual RegExp##Name* As##Name(); \
- virtual bool Is##Name();
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ASTYPE)
-#undef MAKE_ASTYPE
-};
-
-
-class RegExpDisjunction: public RegExpTree {
- public:
- explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpDisjunction* AsDisjunction();
- virtual Interval CaptureRegisters();
- virtual bool IsDisjunction();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
- ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
- private:
- ZoneList<RegExpTree*>* alternatives_;
- int min_match_;
- int max_match_;
-};
-
-
-class RegExpAlternative: public RegExpTree {
- public:
- explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAlternative* AsAlternative();
- virtual Interval CaptureRegisters();
- virtual bool IsAlternative();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
- ZoneList<RegExpTree*>* nodes() { return nodes_; }
- private:
- ZoneList<RegExpTree*>* nodes_;
- int min_match_;
- int max_match_;
-};
-
-
-class RegExpAssertion: public RegExpTree {
- public:
- enum Type {
- START_OF_LINE,
- START_OF_INPUT,
- END_OF_LINE,
- END_OF_INPUT,
- BOUNDARY,
- NON_BOUNDARY
- };
- explicit RegExpAssertion(Type type) : type_(type) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAssertion* AsAssertion();
- virtual bool IsAssertion();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
- Type type() { return type_; }
- private:
- Type type_;
-};
-
-
-class CharacterSet BASE_EMBEDDED {
- public:
- explicit CharacterSet(uc16 standard_set_type)
- : ranges_(NULL),
- standard_set_type_(standard_set_type) {}
- explicit CharacterSet(ZoneList<CharacterRange>* ranges)
- : ranges_(ranges),
- standard_set_type_(0) {}
- ZoneList<CharacterRange>* ranges(Zone* zone);
- uc16 standard_set_type() { return standard_set_type_; }
- void set_standard_set_type(uc16 special_set_type) {
- standard_set_type_ = special_set_type;
- }
- bool is_standard() { return standard_set_type_ != 0; }
- void Canonicalize();
- private:
- ZoneList<CharacterRange>* ranges_;
- // If non-zero, the value represents a standard set (e.g., all whitespace
- // characters) without having to expand the ranges.
- uc16 standard_set_type_;
-};
-
-
-class RegExpCharacterClass: public RegExpTree {
- public:
- RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
- : set_(ranges),
- is_negated_(is_negated) { }
- explicit RegExpCharacterClass(uc16 type)
- : set_(type),
- is_negated_(false) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpCharacterClass* AsCharacterClass();
- virtual bool IsCharacterClass();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return 1; }
- virtual int max_match() { return 1; }
- virtual void AppendToText(RegExpText* text, Zone* zone);
- CharacterSet character_set() { return set_; }
- // TODO(lrn): Remove need for complex version if is_standard that
- // recognizes a mangled standard set and just do { return set_.is_special(); }
- bool is_standard(Zone* zone);
- // Returns a value representing the standard character set if is_standard()
- // returns true.
- // Currently used values are:
- // s : unicode whitespace
- // S : unicode non-whitespace
- // w : ASCII word character (digit, letter, underscore)
- // W : non-ASCII word character
- // d : ASCII digit
- // D : non-ASCII digit
- // . : non-unicode non-newline
- // * : All characters
- uc16 standard_type() { return set_.standard_set_type(); }
- ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
- bool is_negated() { return is_negated_; }
-
- private:
- CharacterSet set_;
- bool is_negated_;
-};
-
-
-class RegExpAtom: public RegExpTree {
- public:
- explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAtom* AsAtom();
- virtual bool IsAtom();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return data_.length(); }
- virtual int max_match() { return data_.length(); }
- virtual void AppendToText(RegExpText* text, Zone* zone);
- Vector<const uc16> data() { return data_; }
- int length() { return data_.length(); }
- private:
- Vector<const uc16> data_;
-};
-
-
-class RegExpText: public RegExpTree {
- public:
- explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpText* AsText();
- virtual bool IsText();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return length_; }
- virtual int max_match() { return length_; }
- virtual void AppendToText(RegExpText* text, Zone* zone);
- void AddElement(TextElement elm, Zone* zone) {
- elements_.Add(elm, zone);
- length_ += elm.length();
- }
- ZoneList<TextElement>* elements() { return &elements_; }
- private:
- ZoneList<TextElement> elements_;
- int length_;
-};
-
-
-class RegExpQuantifier: public RegExpTree {
- public:
- enum Type { GREEDY, NON_GREEDY, POSSESSIVE };
- RegExpQuantifier(int min, int max, Type type, RegExpTree* body)
- : body_(body),
- min_(min),
- max_(max),
- min_match_(min * body->min_match()),
- type_(type) {
- if (max > 0 && body->max_match() > kInfinity / max) {
- max_match_ = kInfinity;
- } else {
- max_match_ = max * body->max_match();
- }
- }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- static RegExpNode* ToNode(int min,
- int max,
- bool is_greedy,
- RegExpTree* body,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- bool not_at_start = false);
- virtual RegExpQuantifier* AsQuantifier();
- virtual Interval CaptureRegisters();
- virtual bool IsQuantifier();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
- int min() { return min_; }
- int max() { return max_; }
- bool is_possessive() { return type_ == POSSESSIVE; }
- bool is_non_greedy() { return type_ == NON_GREEDY; }
- bool is_greedy() { return type_ == GREEDY; }
- RegExpTree* body() { return body_; }
-
- private:
- RegExpTree* body_;
- int min_;
- int max_;
- int min_match_;
- int max_match_;
- Type type_;
-};
-
-
-class RegExpCapture: public RegExpTree {
- public:
- explicit RegExpCapture(RegExpTree* body, int index)
- : body_(body), index_(index) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- static RegExpNode* ToNode(RegExpTree* body,
- int index,
- RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpCapture* AsCapture();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual Interval CaptureRegisters();
- virtual bool IsCapture();
- virtual int min_match() { return body_->min_match(); }
- virtual int max_match() { return body_->max_match(); }
- RegExpTree* body() { return body_; }
- int index() { return index_; }
- static int StartRegister(int index) { return index * 2; }
- static int EndRegister(int index) { return index * 2 + 1; }
-
- private:
- RegExpTree* body_;
- int index_;
-};
-
-
-class RegExpLookahead: public RegExpTree {
- public:
- RegExpLookahead(RegExpTree* body,
- bool is_positive,
- int capture_count,
- int capture_from)
- : body_(body),
- is_positive_(is_positive),
- capture_count_(capture_count),
- capture_from_(capture_from) { }
-
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpLookahead* AsLookahead();
- virtual Interval CaptureRegisters();
- virtual bool IsLookahead();
- virtual bool IsAnchoredAtStart();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
- RegExpTree* body() { return body_; }
- bool is_positive() { return is_positive_; }
- int capture_count() { return capture_count_; }
- int capture_from() { return capture_from_; }
-
- private:
- RegExpTree* body_;
- bool is_positive_;
- int capture_count_;
- int capture_from_;
-};
-
-
-class RegExpBackReference: public RegExpTree {
- public:
- explicit RegExpBackReference(RegExpCapture* capture)
- : capture_(capture) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpBackReference* AsBackReference();
- virtual bool IsBackReference();
- virtual int min_match() { return 0; }
- virtual int max_match() { return capture_->max_match(); }
- int index() { return capture_->index(); }
- RegExpCapture* capture() { return capture_; }
- private:
- RegExpCapture* capture_;
-};
-
-
-class RegExpEmpty: public RegExpTree {
- public:
- RegExpEmpty() { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpEmpty* AsEmpty();
- virtual bool IsEmpty();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
- static RegExpEmpty* GetInstance() {
- static RegExpEmpty* instance = ::new RegExpEmpty();
- return instance;
- }
-};
-
-
-// ----------------------------------------------------------------------------
-// Out-of-line inline constructors (to side-step cyclic dependencies).
-
-inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
- : Module(proxy->interface()),
- proxy_(proxy) {
-}
-
-
-// ----------------------------------------------------------------------------
-// Basic visitor
-// - leaf node visitors are abstract.
-
-class AstVisitor BASE_EMBEDDED {
- public:
- AstVisitor() {}
- virtual ~AstVisitor() { }
-
- // Stack overflow check and dynamic dispatch.
- virtual void Visit(AstNode* node) = 0;
-
- // Iteration left-to-right.
- virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
- virtual void VisitStatements(ZoneList<Statement*>* statements);
- virtual void VisitExpressions(ZoneList<Expression*>* expressions);
-
- // Individual AST nodes.
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node) = 0;
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-};
-
-
-#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
-public: \
- virtual void Visit(AstNode* node) { \
- if (!CheckStackOverflow()) node->Accept(this); \
- } \
- \
- void SetStackOverflow() { stack_overflow_ = true; } \
- void ClearStackOverflow() { stack_overflow_ = false; } \
- bool HasStackOverflow() const { return stack_overflow_; } \
- \
- bool CheckStackOverflow() { \
- if (stack_overflow_) return true; \
- StackLimitCheck check(isolate_); \
- if (!check.HasOverflowed()) return false; \
- return (stack_overflow_ = true); \
- } \
- \
-private: \
- void InitializeAstVisitor() { \
- isolate_ = Isolate::Current(); \
- stack_overflow_ = false; \
- } \
- Isolate* isolate() { return isolate_; } \
- \
- Isolate* isolate_; \
- bool stack_overflow_
-
-
-// ----------------------------------------------------------------------------
-// Construction time visitor.
-
-class AstConstructionVisitor BASE_EMBEDDED {
- public:
- AstConstructionVisitor() { }
-
- AstProperties* ast_properties() { return &properties_; }
-
- private:
- template<class> friend class AstNodeFactory;
-
- // Node visitors.
-#define DEF_VISIT(type) \
- void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- void increase_node_count() { properties_.add_node_count(1); }
- void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
-
- AstProperties properties_;
-};
-
-
-class AstNullVisitor BASE_EMBEDDED {
- public:
- // Node visitors.
-#define DEF_VISIT(type) \
- void Visit##type(type* node) {}
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-};
-
-
-
-// ----------------------------------------------------------------------------
-// AstNode factory
-
-template<class Visitor>
-class AstNodeFactory BASE_EMBEDDED {
- public:
- AstNodeFactory(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone) { }
-
- Visitor* visitor() { return &visitor_; }
-
-#define VISIT_AND_RETURN(NodeType, node) \
- visitor_.Visit##NodeType((node)); \
- return node;
-
- VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
- VariableMode mode,
- Scope* scope) {
- VariableDeclaration* decl =
- new(zone_) VariableDeclaration(proxy, mode, scope);
- VISIT_AND_RETURN(VariableDeclaration, decl)
- }
-
- FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
- VariableMode mode,
- FunctionLiteral* fun,
- Scope* scope) {
- FunctionDeclaration* decl =
- new(zone_) FunctionDeclaration(proxy, mode, fun, scope);
- VISIT_AND_RETURN(FunctionDeclaration, decl)
- }
-
- ModuleDeclaration* NewModuleDeclaration(VariableProxy* proxy,
- Module* module,
- Scope* scope) {
- ModuleDeclaration* decl =
- new(zone_) ModuleDeclaration(proxy, module, scope);
- VISIT_AND_RETURN(ModuleDeclaration, decl)
- }
-
- ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
- Module* module,
- Scope* scope) {
- ImportDeclaration* decl =
- new(zone_) ImportDeclaration(proxy, module, scope);
- VISIT_AND_RETURN(ImportDeclaration, decl)
- }
-
- ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
- Scope* scope) {
- ExportDeclaration* decl =
- new(zone_) ExportDeclaration(proxy, scope);
- VISIT_AND_RETURN(ExportDeclaration, decl)
- }
-
- ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface) {
- ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface);
- VISIT_AND_RETURN(ModuleLiteral, module)
- }
-
- ModuleVariable* NewModuleVariable(VariableProxy* proxy) {
- ModuleVariable* module = new(zone_) ModuleVariable(proxy);
- VISIT_AND_RETURN(ModuleVariable, module)
- }
-
- ModulePath* NewModulePath(Module* origin, Handle<String> name) {
- ModulePath* module = new(zone_) ModulePath(origin, name, zone_);
- VISIT_AND_RETURN(ModulePath, module)
- }
-
- ModuleUrl* NewModuleUrl(Handle<String> url) {
- ModuleUrl* module = new(zone_) ModuleUrl(url, zone_);
- VISIT_AND_RETURN(ModuleUrl, module)
- }
-
- Block* NewBlock(ZoneStringList* labels,
- int capacity,
- bool is_initializer_block) {
- Block* block = new(zone_) Block(
- isolate_, labels, capacity, is_initializer_block, zone_);
- VISIT_AND_RETURN(Block, block)
- }
-
-#define STATEMENT_WITH_LABELS(NodeType) \
- NodeType* New##NodeType(ZoneStringList* labels) { \
- NodeType* stmt = new(zone_) NodeType(isolate_, labels); \
- VISIT_AND_RETURN(NodeType, stmt); \
- }
- STATEMENT_WITH_LABELS(DoWhileStatement)
- STATEMENT_WITH_LABELS(WhileStatement)
- STATEMENT_WITH_LABELS(ForStatement)
- STATEMENT_WITH_LABELS(ForInStatement)
- STATEMENT_WITH_LABELS(SwitchStatement)
-#undef STATEMENT_WITH_LABELS
-
- ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
- ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
- VISIT_AND_RETURN(ModuleStatement, stmt)
- }
-
- ExpressionStatement* NewExpressionStatement(Expression* expression) {
- ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
- VISIT_AND_RETURN(ExpressionStatement, stmt)
- }
-
- ContinueStatement* NewContinueStatement(IterationStatement* target) {
- ContinueStatement* stmt = new(zone_) ContinueStatement(target);
- VISIT_AND_RETURN(ContinueStatement, stmt)
- }
-
- BreakStatement* NewBreakStatement(BreakableStatement* target) {
- BreakStatement* stmt = new(zone_) BreakStatement(target);
- VISIT_AND_RETURN(BreakStatement, stmt)
- }
-
- ReturnStatement* NewReturnStatement(Expression* expression) {
- ReturnStatement* stmt = new(zone_) ReturnStatement(expression);
- VISIT_AND_RETURN(ReturnStatement, stmt)
- }
-
- WithStatement* NewWithStatement(Expression* expression,
- Statement* statement) {
- WithStatement* stmt = new(zone_) WithStatement(expression, statement);
- VISIT_AND_RETURN(WithStatement, stmt)
- }
-
- IfStatement* NewIfStatement(Expression* condition,
- Statement* then_statement,
- Statement* else_statement) {
- IfStatement* stmt = new(zone_) IfStatement(
- isolate_, condition, then_statement, else_statement);
- VISIT_AND_RETURN(IfStatement, stmt)
- }
-
- TryCatchStatement* NewTryCatchStatement(int index,
- Block* try_block,
- Scope* scope,
- Variable* variable,
- Block* catch_block) {
- TryCatchStatement* stmt = new(zone_) TryCatchStatement(
- index, try_block, scope, variable, catch_block);
- VISIT_AND_RETURN(TryCatchStatement, stmt)
- }
-
- TryFinallyStatement* NewTryFinallyStatement(int index,
- Block* try_block,
- Block* finally_block) {
- TryFinallyStatement* stmt =
- new(zone_) TryFinallyStatement(index, try_block, finally_block);
- VISIT_AND_RETURN(TryFinallyStatement, stmt)
- }
-
- DebuggerStatement* NewDebuggerStatement() {
- DebuggerStatement* stmt = new(zone_) DebuggerStatement();
- VISIT_AND_RETURN(DebuggerStatement, stmt)
- }
-
- EmptyStatement* NewEmptyStatement() {
- return new(zone_) EmptyStatement();
- }
-
- Literal* NewLiteral(Handle<Object> handle) {
- Literal* lit = new(zone_) Literal(isolate_, handle);
- VISIT_AND_RETURN(Literal, lit)
- }
-
- Literal* NewNumberLiteral(double number) {
- return NewLiteral(isolate_->factory()->NewNumber(number, TENURED));
- }
-
- ObjectLiteral* NewObjectLiteral(
- Handle<FixedArray> constant_properties,
- ZoneList<ObjectLiteral::Property*>* properties,
- int literal_index,
- bool is_simple,
- bool fast_elements,
- int depth,
- bool has_function) {
- ObjectLiteral* lit = new(zone_) ObjectLiteral(
- isolate_, constant_properties, properties, literal_index,
- is_simple, fast_elements, depth, has_function);
- VISIT_AND_RETURN(ObjectLiteral, lit)
- }
-
- ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
- FunctionLiteral* value) {
- ObjectLiteral::Property* prop =
- new(zone_) ObjectLiteral::Property(is_getter, value);
- prop->set_key(NewLiteral(value->name()));
- return prop; // Not an AST node, will not be visited.
- }
-
- RegExpLiteral* NewRegExpLiteral(Handle<String> pattern,
- Handle<String> flags,
- int literal_index) {
- RegExpLiteral* lit =
- new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index);
- VISIT_AND_RETURN(RegExpLiteral, lit);
- }
-
- ArrayLiteral* NewArrayLiteral(Handle<FixedArray> constant_elements,
- ZoneList<Expression*>* values,
- int literal_index,
- bool is_simple,
- int depth) {
- ArrayLiteral* lit = new(zone_) ArrayLiteral(
- isolate_, constant_elements, values, literal_index, is_simple, depth);
- VISIT_AND_RETURN(ArrayLiteral, lit)
- }
-
- VariableProxy* NewVariableProxy(Variable* var) {
- VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var);
- VISIT_AND_RETURN(VariableProxy, proxy)
- }
-
- VariableProxy* NewVariableProxy(Handle<String> name,
- bool is_this,
- Interface* interface = Interface::NewValue(),
- int position = RelocInfo::kNoPosition) {
- VariableProxy* proxy =
- new(zone_) VariableProxy(isolate_, name, is_this, interface, position);
- VISIT_AND_RETURN(VariableProxy, proxy)
- }
-
- Property* NewProperty(Expression* obj, Expression* key, int pos) {
- Property* prop = new(zone_) Property(isolate_, obj, key, pos);
- VISIT_AND_RETURN(Property, prop)
- }
-
- Call* NewCall(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- Call* call = new(zone_) Call(isolate_, expression, arguments, pos);
- VISIT_AND_RETURN(Call, call)
- }
-
- CallNew* NewCallNew(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- CallNew* call = new(zone_) CallNew(isolate_, expression, arguments, pos);
- VISIT_AND_RETURN(CallNew, call)
- }
-
- CallRuntime* NewCallRuntime(Handle<String> name,
- const Runtime::Function* function,
- ZoneList<Expression*>* arguments) {
- CallRuntime* call =
- new(zone_) CallRuntime(isolate_, name, function, arguments);
- VISIT_AND_RETURN(CallRuntime, call)
- }
-
- UnaryOperation* NewUnaryOperation(Token::Value op,
- Expression* expression,
- int pos) {
- UnaryOperation* node =
- new(zone_) UnaryOperation(isolate_, op, expression, pos);
- VISIT_AND_RETURN(UnaryOperation, node)
- }
-
- BinaryOperation* NewBinaryOperation(Token::Value op,
- Expression* left,
- Expression* right,
- int pos) {
- BinaryOperation* node =
- new(zone_) BinaryOperation(isolate_, op, left, right, pos);
- VISIT_AND_RETURN(BinaryOperation, node)
- }
-
- CountOperation* NewCountOperation(Token::Value op,
- bool is_prefix,
- Expression* expr,
- int pos) {
- CountOperation* node =
- new(zone_) CountOperation(isolate_, op, is_prefix, expr, pos);
- VISIT_AND_RETURN(CountOperation, node)
- }
-
- CompareOperation* NewCompareOperation(Token::Value op,
- Expression* left,
- Expression* right,
- int pos) {
- CompareOperation* node =
- new(zone_) CompareOperation(isolate_, op, left, right, pos);
- VISIT_AND_RETURN(CompareOperation, node)
- }
-
- Conditional* NewConditional(Expression* condition,
- Expression* then_expression,
- Expression* else_expression,
- int then_expression_position,
- int else_expression_position) {
- Conditional* cond = new(zone_) Conditional(
- isolate_, condition, then_expression, else_expression,
- then_expression_position, else_expression_position);
- VISIT_AND_RETURN(Conditional, cond)
- }
-
- Assignment* NewAssignment(Token::Value op,
- Expression* target,
- Expression* value,
- int pos) {
- Assignment* assign =
- new(zone_) Assignment(isolate_, op, target, value, pos);
- assign->Init(isolate_, this);
- VISIT_AND_RETURN(Assignment, assign)
- }
-
- Throw* NewThrow(Expression* exception, int pos) {
- Throw* t = new(zone_) Throw(isolate_, exception, pos);
- VISIT_AND_RETURN(Throw, t)
- }
-
- FunctionLiteral* NewFunctionLiteral(
- Handle<String> name,
- Scope* scope,
- ZoneList<Statement*>* body,
- int materialized_literal_count,
- int expected_property_count,
- int handler_count,
- bool has_only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments,
- int parameter_count,
- FunctionLiteral::ParameterFlag has_duplicate_parameters,
- FunctionLiteral::Type type,
- FunctionLiteral::IsFunctionFlag is_function,
- FunctionLiteral::IsParenthesizedFlag is_parenthesized) {
- FunctionLiteral* lit = new(zone_) FunctionLiteral(
- isolate_, name, scope, body,
- materialized_literal_count, expected_property_count, handler_count,
- has_only_simple_this_property_assignments, this_property_assignments,
- parameter_count, type, has_duplicate_parameters, is_function,
- is_parenthesized);
- // Top-level literal doesn't count for the AST's properties.
- if (is_function == FunctionLiteral::kIsFunction) {
- visitor_.VisitFunctionLiteral(lit);
- }
- return lit;
- }
-
- SharedFunctionInfoLiteral* NewSharedFunctionInfoLiteral(
- Handle<SharedFunctionInfo> shared_function_info) {
- SharedFunctionInfoLiteral* lit =
- new(zone_) SharedFunctionInfoLiteral(isolate_, shared_function_info);
- VISIT_AND_RETURN(SharedFunctionInfoLiteral, lit)
- }
-
- ThisFunction* NewThisFunction() {
- ThisFunction* fun = new(zone_) ThisFunction(isolate_);
- VISIT_AND_RETURN(ThisFunction, fun)
- }
-
-#undef VISIT_AND_RETURN
-
- private:
- Isolate* isolate_;
- Zone* zone_;
- Visitor visitor_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_AST_H_
diff --git a/src/3rdparty/v8/src/atomicops.h b/src/3rdparty/v8/src/atomicops.h
deleted file mode 100644
index d4fe042..0000000
--- a/src/3rdparty/v8/src/atomicops.h
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The routines exported by this module are subtle. If you use them, even if
-// you get the code right, it will depend on careful reasoning about atomicity
-// and memory ordering; it will be less readable, and harder to maintain. If
-// you plan to use these routines, you should have a good reason, such as solid
-// evidence that performance would otherwise suffer, or there being no
-// alternative. You should assume only properties explicitly guaranteed by the
-// specifications in this file. You are almost certainly _not_ writing code
-// just for the x86; if you assume x86 semantics, x86 hardware bugs and
-// implementations on other archtectures will cause your code to break. If you
-// do not know what you are doing, avoid these routines, and use a Mutex.
-//
-// It is incorrect to make direct assignments to/from an atomic variable.
-// You should use one of the Load or Store routines. The NoBarrier
-// versions are provided when no barriers are needed:
-// NoBarrier_Store()
-// NoBarrier_Load()
-// Although there are currently no compiler enforcement, you are encouraged
-// to use these.
-//
-
-#ifndef V8_ATOMICOPS_H_
-#define V8_ATOMICOPS_H_
-
-#include "../include/v8.h"
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-typedef int32_t Atomic32;
-#ifdef V8_HOST_ARCH_64_BIT
-// We need to be able to go between Atomic64 and AtomicWord implicitly. This
-// means Atomic64 and AtomicWord should be the same type on 64-bit.
-#if defined(__APPLE__)
-// MacOS is an exception to the implicit conversion rule above,
-// because it uses long for intptr_t.
-typedef int64_t Atomic64;
-#else
-typedef intptr_t Atomic64;
-#endif
-#endif
-
-// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
-// Atomic64 routines below, depending on your architecture.
-#if defined(__OpenBSD__) && defined(__i386__)
-typedef Atomic32 AtomicWord;
-#else
-typedef intptr_t AtomicWord;
-#endif
-
-// Atomically execute:
-// result = *ptr;
-// if (*ptr == old_value)
-// *ptr = new_value;
-// return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value);
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
-
-Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment);
-
-// These following lower-level operations are typically useful only to people
-// implementing higher-level synchronization operations like spinlocks,
-// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
-// a store with appropriate memory-ordering instructions. "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value);
-Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value);
-
-void MemoryBarrier();
-void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
-void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
-void Release_Store(volatile Atomic32* ptr, Atomic32 value);
-
-Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
-Atomic32 Acquire_Load(volatile const Atomic32* ptr);
-Atomic32 Release_Load(volatile const Atomic32* ptr);
-
-// 64-bit atomic operations (only available on 64-bit processors).
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
-Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
-Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
-
-Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value);
-void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
-void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
-void Release_Store(volatile Atomic64* ptr, Atomic64 value);
-Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
-Atomic64 Acquire_Load(volatile const Atomic64* ptr);
-Atomic64 Release_Load(volatile const Atomic64* ptr);
-#endif // V8_HOST_ARCH_64_BIT
-
-} } // namespace v8::internal
-
-// Include our platform specific implementation.
-#if defined(THREAD_SANITIZER)
-#include "atomicops_internals_tsan.h"
-#elif defined(_MSC_VER) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64) || defined(_WIN32_WCE))
-#include "atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
-#include "atomicops_internals_x86_macosx.h"
-#elif defined(__GNUC__) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
-#include "atomicops_internals_x86_gcc.h"
-#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
- // We need special handling for QNX as the existing code in
- // atomicops_internals_arm_gcc.h is actually Linux-specific. This is due to
- // it using a magic hard-wired function address for LinuxKernelCmpxchgFunc.
- // The QNX implementation uses the equivalent system call for that platform
- // but is not source compatible.
- #if defined(__QNXNTO__)
- #include "atomicops_internals_arm_qnx.h"
- #else
- #include "atomicops_internals_arm_gcc.h"
- #endif
-#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
-#include "atomicops_internals_mips_gcc.h"
-#else
-#error "Atomic operations are not supported on your platform"
-#endif
-
-#endif // V8_ATOMICOPS_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_arm_gcc.h b/src/3rdparty/v8/src/atomicops_internals_arm_gcc.h
deleted file mode 100644
index 6c30256..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_arm_gcc.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
-
-#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-namespace v8 {
-namespace internal {
-
-// 0xffff0fc0 is the hard coded address of a function provided by
-// the kernel which implements an atomic compare-exchange. On older
-// ARM architecture revisions (pre-v6) this may be implemented using
-// a syscall. This address is stable, and in active use (hard coded)
-// by at least glibc-2.7 and the Android C library.
-typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
- Atomic32 new_value,
- volatile Atomic32* ptr);
-LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
- (LinuxKernelCmpxchgFunc) 0xffff0fc0;
-
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
- (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
-
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value = *ptr;
- do {
- if (!pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- // Atomic exchange the old value with an incremented one.
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)) == 0) {
- // The exchange took place as expected.
- return new_value;
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- pLinuxKernelMemoryBarrier();
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_arm_qnx.h b/src/3rdparty/v8/src/atomicops_internals_arm_qnx.h
deleted file mode 100644
index 4a8e562..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_arm_qnx.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2012 Research in Motion. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_ATOMICOPS_INTERNALS_ARM_QNX_H_
-#define V8_ATOMICOPS_INTERNALS_ARM_QNX_H_
-
-#include <arm/cpuinline.h>
-#include <arm/smpxchg.h>
-
-namespace v8 {
-namespace internal {
-
-inline void MemoryBarrier() {
- __cpu_membarrier();
-}
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return _smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr),
- old_value,
- new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return _smp_xchg(reinterpret_cast<volatile unsigned*>(ptr), new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- // Atomic exchange the old value with an incremented one.
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (_smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr),
- old_value,
- new_value)) {
- // The exchange took place as expected.
- return new_value;
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- MemoryBarrier();
- return NoBarrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_ARM_QNX_H_
-
diff --git a/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h b/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h
deleted file mode 100644
index 9498fd7..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-namespace v8 {
-namespace internal {
-
-// Atomically execute:
-// result = *ptr;
-// if (*ptr == old_value)
-// *ptr = new_value;
-// return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev, tmp;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, %5\n" // prev = *ptr
- "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "move %2, %4\n" // tmp = new_value
- "sc %2, %1\n" // *ptr = tmp (with atomic check)
- "beqz %2, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- "2:\n"
- ".set pop\n"
- : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
- : "Ir" (old_value), "r" (new_value), "m" (*ptr)
- : "memory");
- return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 temp, old;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %1, %2\n" // old = *ptr
- "move %0, %3\n" // temp = new_value
- "sc %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- ".set pop\n"
- : "=&r" (temp), "=&r" (old), "=m" (*ptr)
- : "r" (new_value), "m" (*ptr)
- : "memory");
-
- return old;
-}
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp, temp2;
-
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, %2\n" // temp = *ptr
- "addu %1, %0, %3\n" // temp2 = temp + increment
- "sc %1, %2\n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "addu %1, %0, %3\n" // temp2 = temp + increment
- ".set pop\n"
- : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
- : "Ir" (increment), "m" (*ptr)
- : "memory");
- // temp2 now holds the final value.
- return temp2;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- ATOMICOPS_COMPILER_BARRIER();
- Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
- ATOMICOPS_COMPILER_BARRIER();
- return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- ATOMICOPS_COMPILER_BARRIER();
- Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- ATOMICOPS_COMPILER_BARRIER();
- return res;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- ATOMICOPS_COMPILER_BARRIER();
- Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- ATOMICOPS_COMPILER_BARRIER();
- return res;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} } // namespace v8::internal
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_tsan.h b/src/3rdparty/v8/src/atomicops_internals_tsan.h
deleted file mode 100644
index 6559336..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_tsan.h
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// This file is an internal atomic implementation for compiler-based
-// ThreadSanitizer. Use base/atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
-#define V8_ATOMICOPS_INTERNALS_TSAN_H_
-
-// This struct is not part of the public API of this module; clients may not
-// use it. (However, it's exported via BASE_EXPORT because clients implicitly
-// do use it at link time by inlining these functions.)
-// Features of this x86. Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
- bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
- // after acquire compare-and-swap.
- bool has_sse2; // Processor has SSE2.
-};
-extern struct AtomicOps_x86CPUFeatureStruct
- AtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-namespace v8 {
-namespace internal {
-
-#ifndef TSAN_INTERFACE_ATOMIC_H
-#define TSAN_INTERFACE_ATOMIC_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef char __tsan_atomic8;
-typedef short __tsan_atomic16; // NOLINT
-typedef int __tsan_atomic32;
-typedef long __tsan_atomic64; // NOLINT
-
-typedef enum {
- __tsan_memory_order_relaxed = (1 << 0) + 100500,
- __tsan_memory_order_consume = (1 << 1) + 100500,
- __tsan_memory_order_acquire = (1 << 2) + 100500,
- __tsan_memory_order_release = (1 << 3) + 100500,
- __tsan_memory_order_acq_rel = (1 << 4) + 100500,
- __tsan_memory_order_seq_cst = (1 << 5) + 100500,
-} __tsan_memory_order;
-
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
- __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
- __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
- __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
- __tsan_memory_order mo);
-
-void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
- __tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
- __tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
- __tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
- __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
-
-void __tsan_atomic_thread_fence(__tsan_memory_order mo);
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release);
- return cmp;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release);
- return cmp;
-}
-
-inline void MemoryBarrier() {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-} // namespace internal
-} // namespace v8
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_x86_gcc.cc b/src/3rdparty/v8/src/atomicops_internals_x86_gcc.cc
deleted file mode 100644
index 181c202..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_x86_gcc.cc
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This module gets enough CPU information to optimize the
-// atomicops module on x86.
-
-#include <string.h>
-
-#include "atomicops.h"
-
-// This file only makes sense with atomicops_internals_x86_gcc.h -- it
-// depends on structs that are defined in that file. If atomicops.h
-// doesn't sub-include that file, then we aren't needed, and shouldn't
-// try to do anything.
-#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-// Inline cpuid instruction. In PIC compilations, %ebx contains the address
-// of the global offset table. To avoid breaking such executables, this code
-// must preserve that register's value across cpuid instructions.
-#if defined(__i386__)
-#define cpuid(a, b, c, d, inp) \
- asm("mov %%ebx, %%edi\n" \
- "cpuid\n" \
- "xchg %%edi, %%ebx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#elif defined(__x86_64__)
-#define cpuid(a, b, c, d, inp) \
- asm("mov %%rbx, %%rdi\n" \
- "cpuid\n" \
- "xchg %%rdi, %%rbx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#endif
-
-#if defined(cpuid) // initialize the struct only on x86
-
-namespace v8 {
-namespace internal {
-
-// Set the flags so that code will run correctly and conservatively, so even
-// if we haven't been initialized yet, we're probably single threaded, and our
-// default values should hopefully be pretty safe.
-struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
- false, // bug can't exist before process spawns multiple threads
- false, // no SSE2
-};
-
-} } // namespace v8::internal
-
-namespace {
-
-// Initialize the AtomicOps_Internalx86CPUFeatures struct.
-void AtomicOps_Internalx86CPUFeaturesInit() {
- using v8::internal::AtomicOps_Internalx86CPUFeatures;
-
- uint32_t eax;
- uint32_t ebx;
- uint32_t ecx;
- uint32_t edx;
-
- // Get vendor string (issue CPUID with eax = 0)
- cpuid(eax, ebx, ecx, edx, 0);
- char vendor[13];
- memcpy(vendor, &ebx, 4);
- memcpy(vendor + 4, &edx, 4);
- memcpy(vendor + 8, &ecx, 4);
- vendor[12] = 0;
-
- // get feature flags in ecx/edx, and family/model in eax
- cpuid(eax, ebx, ecx, edx, 1);
-
- int family = (eax >> 8) & 0xf; // family and model fields
- int model = (eax >> 4) & 0xf;
- if (family == 0xf) { // use extended family and model fields
- family += (eax >> 20) & 0xff;
- model += ((eax >> 16) & 0xf) << 4;
- }
-
- // Opteron Rev E has a bug in which on very rare occasions a locked
- // instruction doesn't act as a read-acquire barrier if followed by a
- // non-locked read-modify-write instruction. Rev F has this bug in
- // pre-release versions, but not in versions released to customers,
- // so we test only for Rev E, which is family 15, model 32..63 inclusive.
- if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
- family == 15 &&
- 32 <= model && model <= 63) {
- AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
- } else {
- AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
- }
-
- // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
- AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
-}
-
-class AtomicOpsx86Initializer {
- public:
- AtomicOpsx86Initializer() {
- AtomicOps_Internalx86CPUFeaturesInit();
- }
-};
-
-// A global to get use initialized on startup via static initialization :/
-AtomicOpsx86Initializer g_initer;
-
-} // namespace
-
-#endif // if x86
-
-#endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_x86_gcc.h b/src/3rdparty/v8/src/atomicops_internals_x86_gcc.h
deleted file mode 100644
index 6e55b50..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_x86_gcc.h
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-namespace v8 {
-namespace internal {
-
-// This struct is not part of the public API of this module; clients may not
-// use it.
-// Features of this x86. Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
- bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
- // after acquire compare-and-swap.
- bool has_sse2; // Processor has SSE2.
-};
-extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-// 32-bit low-level operations on any platform.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- __asm__ __volatile__("lock; cmpxchgl %1,%2"
- : "=a" (prev)
- : "q" (new_value), "m" (*ptr), "0" (old_value)
- : "memory");
- return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
- : "=r" (new_value)
- : "m" (*ptr), "0" (new_value)
- : "memory");
- return new_value; // Now it's the previous value.
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- return temp + increment;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return x;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-#if defined(__x86_64__)
-
-// 64-bit implementations of memory barrier can be simpler, because it
-// "mfence" is guaranteed to exist.
-inline void MemoryBarrier() {
- __asm__ __volatile__("mfence" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-#else
-
-inline void MemoryBarrier() {
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
- __asm__ __volatile__("mfence" : : : "memory");
- } else { // mfence is faster but not present on PIII
- Atomic32 x = 0;
- NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
- }
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
- *ptr = value;
- __asm__ __volatile__("mfence" : : : "memory");
- } else {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier on PIII
- }
-}
-#endif
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- ATOMICOPS_COMPILER_BARRIER();
- *ptr = value; // An x86 store acts as a release barrier.
- // See comments in Atomic64 version of Release_Store(), below.
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
- // See comments in Atomic64 version of Release_Store(), below.
- ATOMICOPS_COMPILER_BARRIER();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#if defined(__x86_64__)
-
-// 64-bit low-level operations on 64-bit platform.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
- __asm__ __volatile__("lock; cmpxchgq %1,%2"
- : "=a" (prev)
- : "q" (new_value), "m" (*ptr), "0" (old_value)
- : "memory");
- return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
- : "=r" (new_value)
- : "m" (*ptr), "0" (new_value)
- : "memory");
- return new_value; // Now it's the previous value.
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- return temp + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- ATOMICOPS_COMPILER_BARRIER();
-
- *ptr = value; // An x86 store acts as a release barrier
- // for current AMD/Intel chips as of Jan 2008.
- // See also Acquire_Load(), below.
-
- // When new chips come out, check:
- // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
- // System Programming Guide, Chatper 7: Multiple-processor management,
- // Section 7.2, Memory Ordering.
- // Last seen at:
- // http://developer.intel.com/design/pentium4/manuals/index_new.htm
- //
- // x86 stores/loads fail to act as barriers for a few instructions (clflush
- // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
- // not generated by the compiler, and are rare. Users of these instructions
- // need to know about cache behaviour in any case since all of these involve
- // either flushing cache lines or non-temporal cache hints.
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
- // for current AMD/Intel chips as of Jan 2008.
- // See also Release_Store(), above.
- ATOMICOPS_COMPILER_BARRIER();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return x;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-#endif // defined(__x86_64__)
-
-} } // namespace v8::internal
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif // V8_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_x86_macosx.h b/src/3rdparty/v8/src/atomicops_internals_x86_macosx.h
deleted file mode 100644
index bfb02b3..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_x86_macosx.h
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
-#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
-
-#include <libkern/OSAtomic.h>
-
-namespace v8 {
-namespace internal {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline void MemoryBarrier() {
- OSMemoryBarrier();
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#ifdef __LP64__
-
-// 64-bit implementation on 64-bit platform
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr)));
- return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- // The lib kern interface does not distinguish between
- // Acquire and Release memory barriers; they are equivalent.
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#endif // defined(__LP64__)
-
-// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
-// on the Mac, even when they are the same size. We need to explicitly cast
-// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
-#ifdef __LP64__
-#define AtomicWordCastType Atomic64
-#else
-#define AtomicWordCastType Atomic32
-#endif
-
-inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return NoBarrier_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
- AtomicWord new_value) {
- return NoBarrier_AtomicExchange(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return NoBarrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return Barrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Acquire_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Release_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
- NoBarrier_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Acquire_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Release_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
- return NoBarrier_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Acquire_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Release_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-#undef AtomicWordCastType
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
diff --git a/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h b/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h
deleted file mode 100644
index 6677e64..0000000
--- a/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
-#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
-
-#include "checks.h"
-#include "win32-headers.h"
-
-namespace v8 {
-namespace internal {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- LONG result = InterlockedCompareExchange(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(new_value),
- static_cast<LONG>(old_value));
- return static_cast<Atomic32>(result);
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- LONG result = InterlockedExchange(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(new_value));
- return static_cast<Atomic32>(result);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return InterlockedExchangeAdd(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(increment)) + increment;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
-#error "We require at least vs2005 for MemoryBarrier"
-#endif
-// For Windows CE there is no MemoryBarrier needed
-#ifdef _WIN32_WCE
-inline void MemoryBarrier() {
-}
-#else
-inline void MemoryBarrier() {
- // We use MemoryBarrier from WinNT.h
- ::MemoryBarrier();
-}
-#endif
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value; // works w/o barrier for current Intel chips as of June 2005
- // See comments in Atomic64 version of Release_Store() below.
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#if defined(_WIN64)
-
-// 64-bit low-level operations on 64-bit platform.
-
-STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- PVOID result = InterlockedCompareExchangePointer(
- reinterpret_cast<volatile PVOID*>(ptr),
- reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
- return reinterpret_cast<Atomic64>(result);
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- PVOID result = InterlockedExchangePointer(
- reinterpret_cast<volatile PVOID*>(ptr),
- reinterpret_cast<PVOID>(new_value));
- return reinterpret_cast<Atomic64>(result);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return InterlockedExchangeAdd64(
- reinterpret_cast<volatile LONGLONG*>(ptr),
- static_cast<LONGLONG>(increment)) + increment;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value; // works w/o barrier for current Intel chips as of June 2005
-
- // When new chips come out, check:
- // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
- // System Programming Guide, Chatper 7: Multiple-processor management,
- // Section 7.2, Memory Ordering.
- // Last seen at:
- // http://developer.intel.com/design/pentium4/manuals/index_new.htm
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-
-#endif // defined(_WIN64)
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/src/3rdparty/v8/src/bignum-dtoa.cc b/src/3rdparty/v8/src/bignum-dtoa.cc
deleted file mode 100644
index a961690..0000000
--- a/src/3rdparty/v8/src/bignum-dtoa.cc
+++ /dev/null
@@ -1,658 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <math.h>
-
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "utils.h"
-
-#include "bignum-dtoa.h"
-
-#include "bignum.h"
-#include "double.h"
-
-namespace v8 {
-namespace internal {
-
-static int NormalizedExponent(uint64_t significand, int exponent) {
- ASSERT(significand != 0);
- while ((significand & Double::kHiddenBit) == 0) {
- significand = significand << 1;
- exponent = exponent - 1;
- }
- return exponent;
-}
-
-
-// Forward declarations:
-// Returns an estimation of k such that 10^(k-1) <= v < 10^k.
-static int EstimatePower(int exponent);
-// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
-// and denominator.
-static void InitialScaledStartValues(double v,
- int estimated_power,
- bool need_boundary_deltas,
- Bignum* numerator,
- Bignum* denominator,
- Bignum* delta_minus,
- Bignum* delta_plus);
-// Multiplies numerator/denominator so that its values lies in the range 1-10.
-// Returns decimal_point s.t.
-// v = numerator'/denominator' * 10^(decimal_point-1)
-// where numerator' and denominator' are the values of numerator and
-// denominator after the call to this function.
-static void FixupMultiply10(int estimated_power, bool is_even,
- int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus);
-// Generates digits from the left to the right and stops when the generated
-// digits yield the shortest decimal representation of v.
-static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus,
- bool is_even,
- Vector<char> buffer, int* length);
-// Generates 'requested_digits' after the decimal point.
-static void BignumToFixed(int requested_digits, int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Vector<char>(buffer), int* length);
-// Generates 'count' digits of numerator/denominator.
-// Once 'count' digits have been produced rounds the result depending on the
-// remainder (remainders of exactly .5 round upwards). Might update the
-// decimal_point when rounding up (for example for 0.9999).
-static void GenerateCountedDigits(int count, int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Vector<char>(buffer), int* length);
-
-
-void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
- Vector<char> buffer, int* length, int* decimal_point) {
- ASSERT(v > 0);
- ASSERT(!Double(v).IsSpecial());
- uint64_t significand = Double(v).Significand();
- bool is_even = (significand & 1) == 0;
- int exponent = Double(v).Exponent();
- int normalized_exponent = NormalizedExponent(significand, exponent);
- // estimated_power might be too low by 1.
- int estimated_power = EstimatePower(normalized_exponent);
-
- // Shortcut for Fixed.
- // The requested digits correspond to the digits after the point. If the
- // number is much too small, then there is no need in trying to get any
- // digits.
- if (mode == BIGNUM_DTOA_FIXED && -estimated_power - 1 > requested_digits) {
- buffer[0] = '\0';
- *length = 0;
- // Set decimal-point to -requested_digits. This is what Gay does.
- // Note that it should not have any effect anyways since the string is
- // empty.
- *decimal_point = -requested_digits;
- return;
- }
-
- Bignum numerator;
- Bignum denominator;
- Bignum delta_minus;
- Bignum delta_plus;
- // Make sure the bignum can grow large enough. The smallest double equals
- // 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
- // The maximum double is 1.7976931348623157e308 which needs fewer than
- // 308*4 binary digits.
- ASSERT(Bignum::kMaxSignificantBits >= 324*4);
- bool need_boundary_deltas = (mode == BIGNUM_DTOA_SHORTEST);
- InitialScaledStartValues(v, estimated_power, need_boundary_deltas,
- &numerator, &denominator,
- &delta_minus, &delta_plus);
- // We now have v = (numerator / denominator) * 10^estimated_power.
- FixupMultiply10(estimated_power, is_even, decimal_point,
- &numerator, &denominator,
- &delta_minus, &delta_plus);
- // We now have v = (numerator / denominator) * 10^(decimal_point-1), and
- // 1 <= (numerator + delta_plus) / denominator < 10
- switch (mode) {
- case BIGNUM_DTOA_SHORTEST:
- GenerateShortestDigits(&numerator, &denominator,
- &delta_minus, &delta_plus,
- is_even, buffer, length);
- break;
- case BIGNUM_DTOA_FIXED:
- BignumToFixed(requested_digits, decimal_point,
- &numerator, &denominator,
- buffer, length);
- break;
- case BIGNUM_DTOA_PRECISION:
- GenerateCountedDigits(requested_digits, decimal_point,
- &numerator, &denominator,
- buffer, length);
- break;
- default:
- UNREACHABLE();
- }
- buffer[*length] = '\0';
-}
-
-
-// The procedure starts generating digits from the left to the right and stops
-// when the generated digits yield the shortest decimal representation of v. A
-// decimal representation of v is a number lying closer to v than to any other
-// double, so it converts to v when read.
-//
-// This is true if d, the decimal representation, is between m- and m+, the
-// upper and lower boundaries. d must be strictly between them if !is_even.
-// m- := (numerator - delta_minus) / denominator
-// m+ := (numerator + delta_plus) / denominator
-//
-// Precondition: 0 <= (numerator+delta_plus) / denominator < 10.
-// If 1 <= (numerator+delta_plus) / denominator < 10 then no leading 0 digit
-// will be produced. This should be the standard precondition.
-static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus,
- bool is_even,
- Vector<char> buffer, int* length) {
- // Small optimization: if delta_minus and delta_plus are the same just reuse
- // one of the two bignums.
- if (Bignum::Equal(*delta_minus, *delta_plus)) {
- delta_plus = delta_minus;
- }
- *length = 0;
- while (true) {
- uint16_t digit;
- digit = numerator->DivideModuloIntBignum(*denominator);
- ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
- // digit = numerator / denominator (integer division).
- // numerator = numerator % denominator.
- buffer[(*length)++] = digit + '0';
-
- // Can we stop already?
- // If the remainder of the division is less than the distance to the lower
- // boundary we can stop. In this case we simply round down (discarding the
- // remainder).
- // Similarly we test if we can round up (using the upper boundary).
- bool in_delta_room_minus;
- bool in_delta_room_plus;
- if (is_even) {
- in_delta_room_minus = Bignum::LessEqual(*numerator, *delta_minus);
- } else {
- in_delta_room_minus = Bignum::Less(*numerator, *delta_minus);
- }
- if (is_even) {
- in_delta_room_plus =
- Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
- } else {
- in_delta_room_plus =
- Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
- }
- if (!in_delta_room_minus && !in_delta_room_plus) {
- // Prepare for next iteration.
- numerator->Times10();
- delta_minus->Times10();
- // We optimized delta_plus to be equal to delta_minus (if they share the
- // same value). So don't multiply delta_plus if they point to the same
- // object.
- if (delta_minus != delta_plus) {
- delta_plus->Times10();
- }
- } else if (in_delta_room_minus && in_delta_room_plus) {
- // Let's see if 2*numerator < denominator.
- // If yes, then the next digit would be < 5 and we can round down.
- int compare = Bignum::PlusCompare(*numerator, *numerator, *denominator);
- if (compare < 0) {
- // Remaining digits are less than .5. -> Round down (== do nothing).
- } else if (compare > 0) {
- // Remaining digits are more than .5 of denominator. -> Round up.
- // Note that the last digit could not be a '9' as otherwise the whole
- // loop would have stopped earlier.
- // We still have an assert here in case the preconditions were not
- // satisfied.
- ASSERT(buffer[(*length) - 1] != '9');
- buffer[(*length) - 1]++;
- } else {
- // Halfway case.
- // TODO(floitsch): need a way to solve half-way cases.
- // For now let's round towards even (since this is what Gay seems to
- // do).
-
- if ((buffer[(*length) - 1] - '0') % 2 == 0) {
- // Round down => Do nothing.
- } else {
- ASSERT(buffer[(*length) - 1] != '9');
- buffer[(*length) - 1]++;
- }
- }
- return;
- } else if (in_delta_room_minus) {
- // Round down (== do nothing).
- return;
- } else { // in_delta_room_plus
- // Round up.
- // Note again that the last digit could not be '9' since this would have
- // stopped the loop earlier.
- // We still have an ASSERT here, in case the preconditions were not
- // satisfied.
- ASSERT(buffer[(*length) -1] != '9');
- buffer[(*length) - 1]++;
- return;
- }
- }
-}
-
-
-// Let v = numerator / denominator < 10.
-// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point)
-// from left to right. Once 'count' digits have been produced we decide wether
-// to round up or down. Remainders of exactly .5 round upwards. Numbers such
-// as 9.999999 propagate a carry all the way, and change the
-// exponent (decimal_point), when rounding upwards.
-static void GenerateCountedDigits(int count, int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Vector<char>(buffer), int* length) {
- ASSERT(count >= 0);
- for (int i = 0; i < count - 1; ++i) {
- uint16_t digit;
- digit = numerator->DivideModuloIntBignum(*denominator);
- ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
- // digit = numerator / denominator (integer division).
- // numerator = numerator % denominator.
- buffer[i] = digit + '0';
- // Prepare for next iteration.
- numerator->Times10();
- }
- // Generate the last digit.
- uint16_t digit;
- digit = numerator->DivideModuloIntBignum(*denominator);
- if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
- digit++;
- }
- buffer[count - 1] = digit + '0';
- // Correct bad digits (in case we had a sequence of '9's). Propagate the
- // carry until we hat a non-'9' or til we reach the first digit.
- for (int i = count - 1; i > 0; --i) {
- if (buffer[i] != '0' + 10) break;
- buffer[i] = '0';
- buffer[i - 1]++;
- }
- if (buffer[0] == '0' + 10) {
- // Propagate a carry past the top place.
- buffer[0] = '1';
- (*decimal_point)++;
- }
- *length = count;
-}
-
-
-// Generates 'requested_digits' after the decimal point. It might omit
-// trailing '0's. If the input number is too small then no digits at all are
-// generated (ex.: 2 fixed digits for 0.00001).
-//
-// Input verifies: 1 <= (numerator + delta) / denominator < 10.
-static void BignumToFixed(int requested_digits, int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Vector<char>(buffer), int* length) {
- // Note that we have to look at more than just the requested_digits, since
- // a number could be rounded up. Example: v=0.5 with requested_digits=0.
- // Even though the power of v equals 0 we can't just stop here.
- if (-(*decimal_point) > requested_digits) {
- // The number is definitively too small.
- // Ex: 0.001 with requested_digits == 1.
- // Set decimal-point to -requested_digits. This is what Gay does.
- // Note that it should not have any effect anyways since the string is
- // empty.
- *decimal_point = -requested_digits;
- *length = 0;
- return;
- } else if (-(*decimal_point) == requested_digits) {
- // We only need to verify if the number rounds down or up.
- // Ex: 0.04 and 0.06 with requested_digits == 1.
- ASSERT(*decimal_point == -requested_digits);
- // Initially the fraction lies in range (1, 10]. Multiply the denominator
- // by 10 so that we can compare more easily.
- denominator->Times10();
- if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
- // If the fraction is >= 0.5 then we have to include the rounded
- // digit.
- buffer[0] = '1';
- *length = 1;
- (*decimal_point)++;
- } else {
- // Note that we caught most of similar cases earlier.
- *length = 0;
- }
- return;
- } else {
- // The requested digits correspond to the digits after the point.
- // The variable 'needed_digits' includes the digits before the point.
- int needed_digits = (*decimal_point) + requested_digits;
- GenerateCountedDigits(needed_digits, decimal_point,
- numerator, denominator,
- buffer, length);
- }
-}
-
-
-// Returns an estimation of k such that 10^(k-1) <= v < 10^k where
-// v = f * 2^exponent and 2^52 <= f < 2^53.
-// v is hence a normalized double with the given exponent. The output is an
-// approximation for the exponent of the decimal approimation .digits * 10^k.
-//
-// The result might undershoot by 1 in which case 10^k <= v < 10^k+1.
-// Note: this property holds for v's upper boundary m+ too.
-// 10^k <= m+ < 10^k+1.
-// (see explanation below).
-//
-// Examples:
-// EstimatePower(0) => 16
-// EstimatePower(-52) => 0
-//
-// Note: e >= 0 => EstimatedPower(e) > 0. No similar claim can be made for e<0.
-static int EstimatePower(int exponent) {
- // This function estimates log10 of v where v = f*2^e (with e == exponent).
- // Note that 10^floor(log10(v)) <= v, but v <= 10^ceil(log10(v)).
- // Note that f is bounded by its container size. Let p = 53 (the double's
- // significand size). Then 2^(p-1) <= f < 2^p.
- //
- // Given that log10(v) == log2(v)/log2(10) and e+(len(f)-1) is quite close
- // to log2(v) the function is simplified to (e+(len(f)-1)/log2(10)).
- // The computed number undershoots by less than 0.631 (when we compute log3
- // and not log10).
- //
- // Optimization: since we only need an approximated result this computation
- // can be performed on 64 bit integers. On x86/x64 architecture the speedup is
- // not really measurable, though.
- //
- // Since we want to avoid overshooting we decrement by 1e10 so that
- // floating-point imprecisions don't affect us.
- //
- // Explanation for v's boundary m+: the computation takes advantage of
- // the fact that 2^(p-1) <= f < 2^p. Boundaries still satisfy this requirement
- // (even for denormals where the delta can be much more important).
-
- const double k1Log10 = 0.30102999566398114; // 1/lg(10)
-
- // For doubles len(f) == 53 (don't forget the hidden bit).
- const int kSignificandSize = 53;
- double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
- return static_cast<int>(estimate);
-}
-
-
-// See comments for InitialScaledStartValues.
-static void InitialScaledStartValuesPositiveExponent(
- double v, int estimated_power, bool need_boundary_deltas,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
- // A positive exponent implies a positive power.
- ASSERT(estimated_power >= 0);
- // Since the estimated_power is positive we simply multiply the denominator
- // by 10^estimated_power.
-
- // numerator = v.
- numerator->AssignUInt64(Double(v).Significand());
- numerator->ShiftLeft(Double(v).Exponent());
- // denominator = 10^estimated_power.
- denominator->AssignPowerUInt16(10, estimated_power);
-
- if (need_boundary_deltas) {
- // Introduce a common denominator so that the deltas to the boundaries are
- // integers.
- denominator->ShiftLeft(1);
- numerator->ShiftLeft(1);
- // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
- // denominator (of 2) delta_plus equals 2^e.
- delta_plus->AssignUInt16(1);
- delta_plus->ShiftLeft(Double(v).Exponent());
- // Same for delta_minus (with adjustments below if f == 2^p-1).
- delta_minus->AssignUInt16(1);
- delta_minus->ShiftLeft(Double(v).Exponent());
-
- // If the significand (without the hidden bit) is 0, then the lower
- // boundary is closer than just half a ulp (unit in the last place).
- // There is only one exception: if the next lower number is a denormal then
- // the distance is 1 ulp. This cannot be the case for exponent >= 0 (but we
- // have to test it in the other function where exponent < 0).
- uint64_t v_bits = Double(v).AsUint64();
- if ((v_bits & Double::kSignificandMask) == 0) {
- // The lower boundary is closer at half the distance of "normal" numbers.
- // Increase the common denominator and adapt all but the delta_minus.
- denominator->ShiftLeft(1); // *2
- numerator->ShiftLeft(1); // *2
- delta_plus->ShiftLeft(1); // *2
- }
- }
-}
-
-
-// See comments for InitialScaledStartValues
-static void InitialScaledStartValuesNegativeExponentPositivePower(
- double v, int estimated_power, bool need_boundary_deltas,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
- uint64_t significand = Double(v).Significand();
- int exponent = Double(v).Exponent();
- // v = f * 2^e with e < 0, and with estimated_power >= 0.
- // This means that e is close to 0 (have a look at how estimated_power is
- // computed).
-
- // numerator = significand
- // since v = significand * 2^exponent this is equivalent to
- // numerator = v * / 2^-exponent
- numerator->AssignUInt64(significand);
- // denominator = 10^estimated_power * 2^-exponent (with exponent < 0)
- denominator->AssignPowerUInt16(10, estimated_power);
- denominator->ShiftLeft(-exponent);
-
- if (need_boundary_deltas) {
- // Introduce a common denominator so that the deltas to the boundaries are
- // integers.
- denominator->ShiftLeft(1);
- numerator->ShiftLeft(1);
- // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
- // denominator (of 2) delta_plus equals 2^e.
- // Given that the denominator already includes v's exponent the distance
- // to the boundaries is simply 1.
- delta_plus->AssignUInt16(1);
- // Same for delta_minus (with adjustments below if f == 2^p-1).
- delta_minus->AssignUInt16(1);
-
- // If the significand (without the hidden bit) is 0, then the lower
- // boundary is closer than just one ulp (unit in the last place).
- // There is only one exception: if the next lower number is a denormal
- // then the distance is 1 ulp. Since the exponent is close to zero
- // (otherwise estimated_power would have been negative) this cannot happen
- // here either.
- uint64_t v_bits = Double(v).AsUint64();
- if ((v_bits & Double::kSignificandMask) == 0) {
- // The lower boundary is closer at half the distance of "normal" numbers.
- // Increase the denominator and adapt all but the delta_minus.
- denominator->ShiftLeft(1); // *2
- numerator->ShiftLeft(1); // *2
- delta_plus->ShiftLeft(1); // *2
- }
- }
-}
-
-
-// See comments for InitialScaledStartValues
-static void InitialScaledStartValuesNegativeExponentNegativePower(
- double v, int estimated_power, bool need_boundary_deltas,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
- const uint64_t kMinimalNormalizedExponent =
- V8_2PART_UINT64_C(0x00100000, 00000000);
- uint64_t significand = Double(v).Significand();
- int exponent = Double(v).Exponent();
- // Instead of multiplying the denominator with 10^estimated_power we
- // multiply all values (numerator and deltas) by 10^-estimated_power.
-
- // Use numerator as temporary container for power_ten.
- Bignum* power_ten = numerator;
- power_ten->AssignPowerUInt16(10, -estimated_power);
-
- if (need_boundary_deltas) {
- // Since power_ten == numerator we must make a copy of 10^estimated_power
- // before we complete the computation of the numerator.
- // delta_plus = delta_minus = 10^estimated_power
- delta_plus->AssignBignum(*power_ten);
- delta_minus->AssignBignum(*power_ten);
- }
-
- // numerator = significand * 2 * 10^-estimated_power
- // since v = significand * 2^exponent this is equivalent to
- // numerator = v * 10^-estimated_power * 2 * 2^-exponent.
- // Remember: numerator has been abused as power_ten. So no need to assign it
- // to itself.
- ASSERT(numerator == power_ten);
- numerator->MultiplyByUInt64(significand);
-
- // denominator = 2 * 2^-exponent with exponent < 0.
- denominator->AssignUInt16(1);
- denominator->ShiftLeft(-exponent);
-
- if (need_boundary_deltas) {
- // Introduce a common denominator so that the deltas to the boundaries are
- // integers.
- numerator->ShiftLeft(1);
- denominator->ShiftLeft(1);
- // With this shift the boundaries have their correct value, since
- // delta_plus = 10^-estimated_power, and
- // delta_minus = 10^-estimated_power.
- // These assignments have been done earlier.
-
- // The special case where the lower boundary is twice as close.
- // This time we have to look out for the exception too.
- uint64_t v_bits = Double(v).AsUint64();
- if ((v_bits & Double::kSignificandMask) == 0 &&
- // The only exception where a significand == 0 has its boundaries at
- // "normal" distances:
- (v_bits & Double::kExponentMask) != kMinimalNormalizedExponent) {
- numerator->ShiftLeft(1); // *2
- denominator->ShiftLeft(1); // *2
- delta_plus->ShiftLeft(1); // *2
- }
- }
-}
-
-
-// Let v = significand * 2^exponent.
-// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
-// and denominator. The functions GenerateShortestDigits and
-// GenerateCountedDigits will then convert this ratio to its decimal
-// representation d, with the required accuracy.
-// Then d * 10^estimated_power is the representation of v.
-// (Note: the fraction and the estimated_power might get adjusted before
-// generating the decimal representation.)
-//
-// The initial start values consist of:
-// - a scaled numerator: s.t. numerator/denominator == v / 10^estimated_power.
-// - a scaled (common) denominator.
-// optionally (used by GenerateShortestDigits to decide if it has the shortest
-// decimal converting back to v):
-// - v - m-: the distance to the lower boundary.
-// - m+ - v: the distance to the upper boundary.
-//
-// v, m+, m-, and therefore v - m- and m+ - v all share the same denominator.
-//
-// Let ep == estimated_power, then the returned values will satisfy:
-// v / 10^ep = numerator / denominator.
-// v's boundarys m- and m+:
-// m- / 10^ep == v / 10^ep - delta_minus / denominator
-// m+ / 10^ep == v / 10^ep + delta_plus / denominator
-// Or in other words:
-// m- == v - delta_minus * 10^ep / denominator;
-// m+ == v + delta_plus * 10^ep / denominator;
-//
-// Since 10^(k-1) <= v < 10^k (with k == estimated_power)
-// or 10^k <= v < 10^(k+1)
-// we then have 0.1 <= numerator/denominator < 1
-// or 1 <= numerator/denominator < 10
-//
-// It is then easy to kickstart the digit-generation routine.
-//
-// The boundary-deltas are only filled if need_boundary_deltas is set.
-static void InitialScaledStartValues(double v,
- int estimated_power,
- bool need_boundary_deltas,
- Bignum* numerator,
- Bignum* denominator,
- Bignum* delta_minus,
- Bignum* delta_plus) {
- if (Double(v).Exponent() >= 0) {
- InitialScaledStartValuesPositiveExponent(
- v, estimated_power, need_boundary_deltas,
- numerator, denominator, delta_minus, delta_plus);
- } else if (estimated_power >= 0) {
- InitialScaledStartValuesNegativeExponentPositivePower(
- v, estimated_power, need_boundary_deltas,
- numerator, denominator, delta_minus, delta_plus);
- } else {
- InitialScaledStartValuesNegativeExponentNegativePower(
- v, estimated_power, need_boundary_deltas,
- numerator, denominator, delta_minus, delta_plus);
- }
-}
-
-
-// This routine multiplies numerator/denominator so that its values lies in the
-// range 1-10. That is after a call to this function we have:
-// 1 <= (numerator + delta_plus) /denominator < 10.
-// Let numerator the input before modification and numerator' the argument
-// after modification, then the output-parameter decimal_point is such that
-// numerator / denominator * 10^estimated_power ==
-// numerator' / denominator' * 10^(decimal_point - 1)
-// In some cases estimated_power was too low, and this is already the case. We
-// then simply adjust the power so that 10^(k-1) <= v < 10^k (with k ==
-// estimated_power) but do not touch the numerator or denominator.
-// Otherwise the routine multiplies the numerator and the deltas by 10.
-static void FixupMultiply10(int estimated_power, bool is_even,
- int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
- bool in_range;
- if (is_even) {
- // For IEEE doubles half-way cases (in decimal system numbers ending with 5)
- // are rounded to the closest floating-point number with even significand.
- in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
- } else {
- in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
- }
- if (in_range) {
- // Since numerator + delta_plus >= denominator we already have
- // 1 <= numerator/denominator < 10. Simply update the estimated_power.
- *decimal_point = estimated_power + 1;
- } else {
- *decimal_point = estimated_power;
- numerator->Times10();
- if (Bignum::Equal(*delta_minus, *delta_plus)) {
- delta_minus->Times10();
- delta_plus->AssignBignum(*delta_minus);
- } else {
- delta_minus->Times10();
- delta_plus->Times10();
- }
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/bignum-dtoa.h b/src/3rdparty/v8/src/bignum-dtoa.h
deleted file mode 100644
index 93ec1f7..0000000
--- a/src/3rdparty/v8/src/bignum-dtoa.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_BIGNUM_DTOA_H_
-#define V8_BIGNUM_DTOA_H_
-
-namespace v8 {
-namespace internal {
-
-enum BignumDtoaMode {
- // Return the shortest correct representation.
- // For example the output of 0.299999999999999988897 is (the less accurate but
- // correct) 0.3.
- BIGNUM_DTOA_SHORTEST,
- // Return a fixed number of digits after the decimal point.
- // For instance fixed(0.1, 4) becomes 0.1000
- // If the input number is big, the output will be big.
- BIGNUM_DTOA_FIXED,
- // Return a fixed number of digits, no matter what the exponent is.
- BIGNUM_DTOA_PRECISION
-};
-
-// Converts the given double 'v' to ASCII.
-// The result should be interpreted as buffer * 10^(point-length).
-// The buffer will be null-terminated.
-//
-// The input v must be > 0 and different from NaN, and Infinity.
-//
-// The output depends on the given mode:
-// - SHORTEST: produce the least amount of digits for which the internal
-// identity requirement is still satisfied. If the digits are printed
-// (together with the correct exponent) then reading this number will give
-// 'v' again. The buffer will choose the representation that is closest to
-// 'v'. If there are two at the same distance, than the number is round up.
-// In this mode the 'requested_digits' parameter is ignored.
-// - FIXED: produces digits necessary to print a given number with
-// 'requested_digits' digits after the decimal point. The produced digits
-// might be too short in which case the caller has to fill the gaps with '0's.
-// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
-// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns
-// buffer="2", point=0.
-// Note: the length of the returned buffer has no meaning wrt the significance
-// of its digits. That is, just because it contains '0's does not mean that
-// any other digit would not satisfy the internal identity requirement.
-// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
-// Even though the length of produced digits usually equals
-// 'requested_digits', the function is allowed to return fewer digits, in
-// which case the caller has to fill the missing digits with '0's.
-// Halfway cases are again rounded up.
-// 'BignumDtoa' expects the given buffer to be big enough to hold all digits
-// and a terminating null-character.
-void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
- Vector<char> buffer, int* length, int* point);
-
-} } // namespace v8::internal
-
-#endif // V8_BIGNUM_DTOA_H_
diff --git a/src/3rdparty/v8/src/bignum.cc b/src/3rdparty/v8/src/bignum.cc
deleted file mode 100644
index 9436322..0000000
--- a/src/3rdparty/v8/src/bignum.cc
+++ /dev/null
@@ -1,767 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "utils.h"
-#include "bignum.h"
-
-namespace v8 {
-namespace internal {
-
-Bignum::Bignum()
- : bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) {
- for (int i = 0; i < kBigitCapacity; ++i) {
- bigits_[i] = 0;
- }
-}
-
-
-template<typename S>
-static int BitSize(S value) {
- return 8 * sizeof(value);
-}
-
-// Guaranteed to lie in one Bigit.
-void Bignum::AssignUInt16(uint16_t value) {
- ASSERT(kBigitSize >= BitSize(value));
- Zero();
- if (value == 0) return;
-
- EnsureCapacity(1);
- bigits_[0] = value;
- used_digits_ = 1;
-}
-
-
-void Bignum::AssignUInt64(uint64_t value) {
- const int kUInt64Size = 64;
-
- Zero();
- if (value == 0) return;
-
- int needed_bigits = kUInt64Size / kBigitSize + 1;
- EnsureCapacity(needed_bigits);
- for (int i = 0; i < needed_bigits; ++i) {
- bigits_[i] = static_cast<Chunk>(value & kBigitMask);
- value = value >> kBigitSize;
- }
- used_digits_ = needed_bigits;
- Clamp();
-}
-
-
-void Bignum::AssignBignum(const Bignum& other) {
- exponent_ = other.exponent_;
- for (int i = 0; i < other.used_digits_; ++i) {
- bigits_[i] = other.bigits_[i];
- }
- // Clear the excess digits (if there were any).
- for (int i = other.used_digits_; i < used_digits_; ++i) {
- bigits_[i] = 0;
- }
- used_digits_ = other.used_digits_;
-}
-
-
-static uint64_t ReadUInt64(Vector<const char> buffer,
- int from,
- int digits_to_read) {
- uint64_t result = 0;
- for (int i = from; i < from + digits_to_read; ++i) {
- int digit = buffer[i] - '0';
- ASSERT(0 <= digit && digit <= 9);
- result = result * 10 + digit;
- }
- return result;
-}
-
-
-void Bignum::AssignDecimalString(Vector<const char> value) {
- // 2^64 = 18446744073709551616 > 10^19
- const int kMaxUint64DecimalDigits = 19;
- Zero();
- int length = value.length();
- int pos = 0;
- // Let's just say that each digit needs 4 bits.
- while (length >= kMaxUint64DecimalDigits) {
- uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits);
- pos += kMaxUint64DecimalDigits;
- length -= kMaxUint64DecimalDigits;
- MultiplyByPowerOfTen(kMaxUint64DecimalDigits);
- AddUInt64(digits);
- }
- uint64_t digits = ReadUInt64(value, pos, length);
- MultiplyByPowerOfTen(length);
- AddUInt64(digits);
- Clamp();
-}
-
-
-static int HexCharValue(char c) {
- if ('0' <= c && c <= '9') return c - '0';
- if ('a' <= c && c <= 'f') return 10 + c - 'a';
- if ('A' <= c && c <= 'F') return 10 + c - 'A';
- UNREACHABLE();
- return 0; // To make compiler happy.
-}
-
-
-void Bignum::AssignHexString(Vector<const char> value) {
- Zero();
- int length = value.length();
-
- int needed_bigits = length * 4 / kBigitSize + 1;
- EnsureCapacity(needed_bigits);
- int string_index = length - 1;
- for (int i = 0; i < needed_bigits - 1; ++i) {
- // These bigits are guaranteed to be "full".
- Chunk current_bigit = 0;
- for (int j = 0; j < kBigitSize / 4; j++) {
- current_bigit += HexCharValue(value[string_index--]) << (j * 4);
- }
- bigits_[i] = current_bigit;
- }
- used_digits_ = needed_bigits - 1;
-
- Chunk most_significant_bigit = 0; // Could be = 0;
- for (int j = 0; j <= string_index; ++j) {
- most_significant_bigit <<= 4;
- most_significant_bigit += HexCharValue(value[j]);
- }
- if (most_significant_bigit != 0) {
- bigits_[used_digits_] = most_significant_bigit;
- used_digits_++;
- }
- Clamp();
-}
-
-
-void Bignum::AddUInt64(uint64_t operand) {
- if (operand == 0) return;
- Bignum other;
- other.AssignUInt64(operand);
- AddBignum(other);
-}
-
-
-void Bignum::AddBignum(const Bignum& other) {
- ASSERT(IsClamped());
- ASSERT(other.IsClamped());
-
- // If this has a greater exponent than other append zero-bigits to this.
- // After this call exponent_ <= other.exponent_.
- Align(other);
-
- // There are two possibilities:
- // aaaaaaaaaaa 0000 (where the 0s represent a's exponent)
- // bbbbb 00000000
- // ----------------
- // ccccccccccc 0000
- // or
- // aaaaaaaaaa 0000
- // bbbbbbbbb 0000000
- // -----------------
- // cccccccccccc 0000
- // In both cases we might need a carry bigit.
-
- EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_);
- Chunk carry = 0;
- int bigit_pos = other.exponent_ - exponent_;
- ASSERT(bigit_pos >= 0);
- for (int i = 0; i < other.used_digits_; ++i) {
- Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry;
- bigits_[bigit_pos] = sum & kBigitMask;
- carry = sum >> kBigitSize;
- bigit_pos++;
- }
-
- while (carry != 0) {
- Chunk sum = bigits_[bigit_pos] + carry;
- bigits_[bigit_pos] = sum & kBigitMask;
- carry = sum >> kBigitSize;
- bigit_pos++;
- }
- used_digits_ = Max(bigit_pos, used_digits_);
- ASSERT(IsClamped());
-}
-
-
-void Bignum::SubtractBignum(const Bignum& other) {
- ASSERT(IsClamped());
- ASSERT(other.IsClamped());
- // We require this to be bigger than other.
- ASSERT(LessEqual(other, *this));
-
- Align(other);
-
- int offset = other.exponent_ - exponent_;
- Chunk borrow = 0;
- int i;
- for (i = 0; i < other.used_digits_; ++i) {
- ASSERT((borrow == 0) || (borrow == 1));
- Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow;
- bigits_[i + offset] = difference & kBigitMask;
- borrow = difference >> (kChunkSize - 1);
- }
- while (borrow != 0) {
- Chunk difference = bigits_[i + offset] - borrow;
- bigits_[i + offset] = difference & kBigitMask;
- borrow = difference >> (kChunkSize - 1);
- ++i;
- }
- Clamp();
-}
-
-
-void Bignum::ShiftLeft(int shift_amount) {
- if (used_digits_ == 0) return;
- exponent_ += shift_amount / kBigitSize;
- int local_shift = shift_amount % kBigitSize;
- EnsureCapacity(used_digits_ + 1);
- BigitsShiftLeft(local_shift);
-}
-
-
-void Bignum::MultiplyByUInt32(uint32_t factor) {
- if (factor == 1) return;
- if (factor == 0) {
- Zero();
- return;
- }
- if (used_digits_ == 0) return;
-
- // The product of a bigit with the factor is of size kBigitSize + 32.
- // Assert that this number + 1 (for the carry) fits into double chunk.
- ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1);
- DoubleChunk carry = 0;
- for (int i = 0; i < used_digits_; ++i) {
- DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry;
- bigits_[i] = static_cast<Chunk>(product & kBigitMask);
- carry = (product >> kBigitSize);
- }
- while (carry != 0) {
- EnsureCapacity(used_digits_ + 1);
- bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
- used_digits_++;
- carry >>= kBigitSize;
- }
-}
-
-
-void Bignum::MultiplyByUInt64(uint64_t factor) {
- if (factor == 1) return;
- if (factor == 0) {
- Zero();
- return;
- }
- ASSERT(kBigitSize < 32);
- uint64_t carry = 0;
- uint64_t low = factor & 0xFFFFFFFF;
- uint64_t high = factor >> 32;
- for (int i = 0; i < used_digits_; ++i) {
- uint64_t product_low = low * bigits_[i];
- uint64_t product_high = high * bigits_[i];
- uint64_t tmp = (carry & kBigitMask) + product_low;
- bigits_[i] = static_cast<Chunk>(tmp & kBigitMask);
- carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
- (product_high << (32 - kBigitSize));
- }
- while (carry != 0) {
- EnsureCapacity(used_digits_ + 1);
- bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
- used_digits_++;
- carry >>= kBigitSize;
- }
-}
-
-
-void Bignum::MultiplyByPowerOfTen(int exponent) {
- const uint64_t kFive27 = V8_2PART_UINT64_C(0x6765c793, fa10079d);
- const uint16_t kFive1 = 5;
- const uint16_t kFive2 = kFive1 * 5;
- const uint16_t kFive3 = kFive2 * 5;
- const uint16_t kFive4 = kFive3 * 5;
- const uint16_t kFive5 = kFive4 * 5;
- const uint16_t kFive6 = kFive5 * 5;
- const uint32_t kFive7 = kFive6 * 5;
- const uint32_t kFive8 = kFive7 * 5;
- const uint32_t kFive9 = kFive8 * 5;
- const uint32_t kFive10 = kFive9 * 5;
- const uint32_t kFive11 = kFive10 * 5;
- const uint32_t kFive12 = kFive11 * 5;
- const uint32_t kFive13 = kFive12 * 5;
- const uint32_t kFive1_to_12[] =
- { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
- kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
-
- ASSERT(exponent >= 0);
- if (exponent == 0) return;
- if (used_digits_ == 0) return;
-
- // We shift by exponent at the end just before returning.
- int remaining_exponent = exponent;
- while (remaining_exponent >= 27) {
- MultiplyByUInt64(kFive27);
- remaining_exponent -= 27;
- }
- while (remaining_exponent >= 13) {
- MultiplyByUInt32(kFive13);
- remaining_exponent -= 13;
- }
- if (remaining_exponent > 0) {
- MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]);
- }
- ShiftLeft(exponent);
-}
-
-
-void Bignum::Square() {
- ASSERT(IsClamped());
- int product_length = 2 * used_digits_;
- EnsureCapacity(product_length);
-
- // Comba multiplication: compute each column separately.
- // Example: r = a2a1a0 * b2b1b0.
- // r = 1 * a0b0 +
- // 10 * (a1b0 + a0b1) +
- // 100 * (a2b0 + a1b1 + a0b2) +
- // 1000 * (a2b1 + a1b2) +
- // 10000 * a2b2
- //
- // In the worst case we have to accumulate nb-digits products of digit*digit.
- //
- // Assert that the additional number of bits in a DoubleChunk are enough to
- // sum up used_digits of Bigit*Bigit.
- if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_digits_) {
- UNIMPLEMENTED();
- }
- DoubleChunk accumulator = 0;
- // First shift the digits so we don't overwrite them.
- int copy_offset = used_digits_;
- for (int i = 0; i < used_digits_; ++i) {
- bigits_[copy_offset + i] = bigits_[i];
- }
- // We have two loops to avoid some 'if's in the loop.
- for (int i = 0; i < used_digits_; ++i) {
- // Process temporary digit i with power i.
- // The sum of the two indices must be equal to i.
- int bigit_index1 = i;
- int bigit_index2 = 0;
- // Sum all of the sub-products.
- while (bigit_index1 >= 0) {
- Chunk chunk1 = bigits_[copy_offset + bigit_index1];
- Chunk chunk2 = bigits_[copy_offset + bigit_index2];
- accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
- bigit_index1--;
- bigit_index2++;
- }
- bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
- accumulator >>= kBigitSize;
- }
- for (int i = used_digits_; i < product_length; ++i) {
- int bigit_index1 = used_digits_ - 1;
- int bigit_index2 = i - bigit_index1;
- // Invariant: sum of both indices is again equal to i.
- // Inner loop runs 0 times on last iteration, emptying accumulator.
- while (bigit_index2 < used_digits_) {
- Chunk chunk1 = bigits_[copy_offset + bigit_index1];
- Chunk chunk2 = bigits_[copy_offset + bigit_index2];
- accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
- bigit_index1--;
- bigit_index2++;
- }
- // The overwritten bigits_[i] will never be read in further loop iterations,
- // because bigit_index1 and bigit_index2 are always greater
- // than i - used_digits_.
- bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
- accumulator >>= kBigitSize;
- }
- // Since the result was guaranteed to lie inside the number the
- // accumulator must be 0 now.
- ASSERT(accumulator == 0);
-
- // Don't forget to update the used_digits and the exponent.
- used_digits_ = product_length;
- exponent_ *= 2;
- Clamp();
-}
-
-
-void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
- ASSERT(base != 0);
- ASSERT(power_exponent >= 0);
- if (power_exponent == 0) {
- AssignUInt16(1);
- return;
- }
- Zero();
- int shifts = 0;
- // We expect base to be in range 2-32, and most often to be 10.
- // It does not make much sense to implement different algorithms for counting
- // the bits.
- while ((base & 1) == 0) {
- base >>= 1;
- shifts++;
- }
- int bit_size = 0;
- int tmp_base = base;
- while (tmp_base != 0) {
- tmp_base >>= 1;
- bit_size++;
- }
- int final_size = bit_size * power_exponent;
- // 1 extra bigit for the shifting, and one for rounded final_size.
- EnsureCapacity(final_size / kBigitSize + 2);
-
- // Left to Right exponentiation.
- int mask = 1;
- while (power_exponent >= mask) mask <<= 1;
-
- // The mask is now pointing to the bit above the most significant 1-bit of
- // power_exponent.
- // Get rid of first 1-bit;
- mask >>= 2;
- uint64_t this_value = base;
-
- bool delayed_multipliciation = false;
- const uint64_t max_32bits = 0xFFFFFFFF;
- while (mask != 0 && this_value <= max_32bits) {
- this_value = this_value * this_value;
- // Verify that there is enough space in this_value to perform the
- // multiplication. The first bit_size bits must be 0.
- if ((power_exponent & mask) != 0) {
- uint64_t base_bits_mask =
- ~((static_cast<uint64_t>(1) << (64 - bit_size)) - 1);
- bool high_bits_zero = (this_value & base_bits_mask) == 0;
- if (high_bits_zero) {
- this_value *= base;
- } else {
- delayed_multipliciation = true;
- }
- }
- mask >>= 1;
- }
- AssignUInt64(this_value);
- if (delayed_multipliciation) {
- MultiplyByUInt32(base);
- }
-
- // Now do the same thing as a bignum.
- while (mask != 0) {
- Square();
- if ((power_exponent & mask) != 0) {
- MultiplyByUInt32(base);
- }
- mask >>= 1;
- }
-
- // And finally add the saved shifts.
- ShiftLeft(shifts * power_exponent);
-}
-
-
-// Precondition: this/other < 16bit.
-uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
- ASSERT(IsClamped());
- ASSERT(other.IsClamped());
- ASSERT(other.used_digits_ > 0);
-
- // Easy case: if we have less digits than the divisor than the result is 0.
- // Note: this handles the case where this == 0, too.
- if (BigitLength() < other.BigitLength()) {
- return 0;
- }
-
- Align(other);
-
- uint16_t result = 0;
-
- // Start by removing multiples of 'other' until both numbers have the same
- // number of digits.
- while (BigitLength() > other.BigitLength()) {
- // This naive approach is extremely inefficient if the this divided other
- // might be big. This function is implemented for doubleToString where
- // the result should be small (less than 10).
- ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16));
- // Remove the multiples of the first digit.
- // Example this = 23 and other equals 9. -> Remove 2 multiples.
- result += bigits_[used_digits_ - 1];
- SubtractTimes(other, bigits_[used_digits_ - 1]);
- }
-
- ASSERT(BigitLength() == other.BigitLength());
-
- // Both bignums are at the same length now.
- // Since other has more than 0 digits we know that the access to
- // bigits_[used_digits_ - 1] is safe.
- Chunk this_bigit = bigits_[used_digits_ - 1];
- Chunk other_bigit = other.bigits_[other.used_digits_ - 1];
-
- if (other.used_digits_ == 1) {
- // Shortcut for easy (and common) case.
- int quotient = this_bigit / other_bigit;
- bigits_[used_digits_ - 1] = this_bigit - other_bigit * quotient;
- result += quotient;
- Clamp();
- return result;
- }
-
- int division_estimate = this_bigit / (other_bigit + 1);
- result += division_estimate;
- SubtractTimes(other, division_estimate);
-
- if (other_bigit * (division_estimate + 1) > this_bigit) {
- // No need to even try to subtract. Even if other's remaining digits were 0
- // another subtraction would be too much.
- return result;
- }
-
- while (LessEqual(other, *this)) {
- SubtractBignum(other);
- result++;
- }
- return result;
-}
-
-
-template<typename S>
-static int SizeInHexChars(S number) {
- ASSERT(number > 0);
- int result = 0;
- while (number != 0) {
- number >>= 4;
- result++;
- }
- return result;
-}
-
-
-static char HexCharOfValue(int value) {
- ASSERT(0 <= value && value <= 16);
- if (value < 10) return value + '0';
- return value - 10 + 'A';
-}
-
-
-bool Bignum::ToHexString(char* buffer, int buffer_size) const {
- ASSERT(IsClamped());
- // Each bigit must be printable as separate hex-character.
- ASSERT(kBigitSize % 4 == 0);
- const int kHexCharsPerBigit = kBigitSize / 4;
-
- if (used_digits_ == 0) {
- if (buffer_size < 2) return false;
- buffer[0] = '0';
- buffer[1] = '\0';
- return true;
- }
- // We add 1 for the terminating '\0' character.
- int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit +
- SizeInHexChars(bigits_[used_digits_ - 1]) + 1;
- if (needed_chars > buffer_size) return false;
- int string_index = needed_chars - 1;
- buffer[string_index--] = '\0';
- for (int i = 0; i < exponent_; ++i) {
- for (int j = 0; j < kHexCharsPerBigit; ++j) {
- buffer[string_index--] = '0';
- }
- }
- for (int i = 0; i < used_digits_ - 1; ++i) {
- Chunk current_bigit = bigits_[i];
- for (int j = 0; j < kHexCharsPerBigit; ++j) {
- buffer[string_index--] = HexCharOfValue(current_bigit & 0xF);
- current_bigit >>= 4;
- }
- }
- // And finally the last bigit.
- Chunk most_significant_bigit = bigits_[used_digits_ - 1];
- while (most_significant_bigit != 0) {
- buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF);
- most_significant_bigit >>= 4;
- }
- return true;
-}
-
-
-Bignum::Chunk Bignum::BigitAt(int index) const {
- if (index >= BigitLength()) return 0;
- if (index < exponent_) return 0;
- return bigits_[index - exponent_];
-}
-
-
-int Bignum::Compare(const Bignum& a, const Bignum& b) {
- ASSERT(a.IsClamped());
- ASSERT(b.IsClamped());
- int bigit_length_a = a.BigitLength();
- int bigit_length_b = b.BigitLength();
- if (bigit_length_a < bigit_length_b) return -1;
- if (bigit_length_a > bigit_length_b) return +1;
- for (int i = bigit_length_a - 1; i >= Min(a.exponent_, b.exponent_); --i) {
- Chunk bigit_a = a.BigitAt(i);
- Chunk bigit_b = b.BigitAt(i);
- if (bigit_a < bigit_b) return -1;
- if (bigit_a > bigit_b) return +1;
- // Otherwise they are equal up to this digit. Try the next digit.
- }
- return 0;
-}
-
-
-int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
- ASSERT(a.IsClamped());
- ASSERT(b.IsClamped());
- ASSERT(c.IsClamped());
- if (a.BigitLength() < b.BigitLength()) {
- return PlusCompare(b, a, c);
- }
- if (a.BigitLength() + 1 < c.BigitLength()) return -1;
- if (a.BigitLength() > c.BigitLength()) return +1;
- // The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than
- // 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one
- // of 'a'.
- if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) {
- return -1;
- }
-
- Chunk borrow = 0;
- // Starting at min_exponent all digits are == 0. So no need to compare them.
- int min_exponent = Min(Min(a.exponent_, b.exponent_), c.exponent_);
- for (int i = c.BigitLength() - 1; i >= min_exponent; --i) {
- Chunk chunk_a = a.BigitAt(i);
- Chunk chunk_b = b.BigitAt(i);
- Chunk chunk_c = c.BigitAt(i);
- Chunk sum = chunk_a + chunk_b;
- if (sum > chunk_c + borrow) {
- return +1;
- } else {
- borrow = chunk_c + borrow - sum;
- if (borrow > 1) return -1;
- borrow <<= kBigitSize;
- }
- }
- if (borrow == 0) return 0;
- return -1;
-}
-
-
-void Bignum::Clamp() {
- while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) {
- used_digits_--;
- }
- if (used_digits_ == 0) {
- // Zero.
- exponent_ = 0;
- }
-}
-
-
-bool Bignum::IsClamped() const {
- return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0;
-}
-
-
-void Bignum::Zero() {
- for (int i = 0; i < used_digits_; ++i) {
- bigits_[i] = 0;
- }
- used_digits_ = 0;
- exponent_ = 0;
-}
-
-
-void Bignum::Align(const Bignum& other) {
- if (exponent_ > other.exponent_) {
- // If "X" represents a "hidden" digit (by the exponent) then we are in the
- // following case (a == this, b == other):
- // a: aaaaaaXXXX or a: aaaaaXXX
- // b: bbbbbbX b: bbbbbbbbXX
- // We replace some of the hidden digits (X) of a with 0 digits.
- // a: aaaaaa000X or a: aaaaa0XX
- int zero_digits = exponent_ - other.exponent_;
- EnsureCapacity(used_digits_ + zero_digits);
- for (int i = used_digits_ - 1; i >= 0; --i) {
- bigits_[i + zero_digits] = bigits_[i];
- }
- for (int i = 0; i < zero_digits; ++i) {
- bigits_[i] = 0;
- }
- used_digits_ += zero_digits;
- exponent_ -= zero_digits;
- ASSERT(used_digits_ >= 0);
- ASSERT(exponent_ >= 0);
- }
-}
-
-
-void Bignum::BigitsShiftLeft(int shift_amount) {
- ASSERT(shift_amount < kBigitSize);
- ASSERT(shift_amount >= 0);
- Chunk carry = 0;
- for (int i = 0; i < used_digits_; ++i) {
- Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount);
- bigits_[i] = ((bigits_[i] << shift_amount) + carry) & kBigitMask;
- carry = new_carry;
- }
- if (carry != 0) {
- bigits_[used_digits_] = carry;
- used_digits_++;
- }
-}
-
-
-void Bignum::SubtractTimes(const Bignum& other, int factor) {
- ASSERT(exponent_ <= other.exponent_);
- if (factor < 3) {
- for (int i = 0; i < factor; ++i) {
- SubtractBignum(other);
- }
- return;
- }
- Chunk borrow = 0;
- int exponent_diff = other.exponent_ - exponent_;
- for (int i = 0; i < other.used_digits_; ++i) {
- DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
- DoubleChunk remove = borrow + product;
- Chunk difference =
- bigits_[i + exponent_diff] - static_cast<Chunk>(remove & kBigitMask);
- bigits_[i + exponent_diff] = difference & kBigitMask;
- borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
- (remove >> kBigitSize));
- }
- for (int i = other.used_digits_ + exponent_diff; i < used_digits_; ++i) {
- if (borrow == 0) return;
- Chunk difference = bigits_[i] - borrow;
- bigits_[i] = difference & kBigitMask;
- borrow = difference >> (kChunkSize - 1);
- ++i;
- }
- Clamp();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/bignum.h b/src/3rdparty/v8/src/bignum.h
deleted file mode 100644
index dcc4fa7..0000000
--- a/src/3rdparty/v8/src/bignum.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_BIGNUM_H_
-#define V8_BIGNUM_H_
-
-namespace v8 {
-namespace internal {
-
-class Bignum {
- public:
- // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately.
- // This bignum can encode much bigger numbers, since it contains an
- // exponent.
- static const int kMaxSignificantBits = 3584;
-
- Bignum();
- void AssignUInt16(uint16_t value);
- void AssignUInt64(uint64_t value);
- void AssignBignum(const Bignum& other);
-
- void AssignDecimalString(Vector<const char> value);
- void AssignHexString(Vector<const char> value);
-
- void AssignPowerUInt16(uint16_t base, int exponent);
-
- void AddUInt16(uint16_t operand);
- void AddUInt64(uint64_t operand);
- void AddBignum(const Bignum& other);
- // Precondition: this >= other.
- void SubtractBignum(const Bignum& other);
-
- void Square();
- void ShiftLeft(int shift_amount);
- void MultiplyByUInt32(uint32_t factor);
- void MultiplyByUInt64(uint64_t factor);
- void MultiplyByPowerOfTen(int exponent);
- void Times10() { return MultiplyByUInt32(10); }
- // Pseudocode:
- // int result = this / other;
- // this = this % other;
- // In the worst case this function is in O(this/other).
- uint16_t DivideModuloIntBignum(const Bignum& other);
-
- bool ToHexString(char* buffer, int buffer_size) const;
-
- static int Compare(const Bignum& a, const Bignum& b);
- static bool Equal(const Bignum& a, const Bignum& b) {
- return Compare(a, b) == 0;
- }
- static bool LessEqual(const Bignum& a, const Bignum& b) {
- return Compare(a, b) <= 0;
- }
- static bool Less(const Bignum& a, const Bignum& b) {
- return Compare(a, b) < 0;
- }
- // Returns Compare(a + b, c);
- static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c);
- // Returns a + b == c
- static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
- return PlusCompare(a, b, c) == 0;
- }
- // Returns a + b <= c
- static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
- return PlusCompare(a, b, c) <= 0;
- }
- // Returns a + b < c
- static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) {
- return PlusCompare(a, b, c) < 0;
- }
-
- private:
- typedef uint32_t Chunk;
- typedef uint64_t DoubleChunk;
-
- static const int kChunkSize = sizeof(Chunk) * 8;
- static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
- // With bigit size of 28 we loose some bits, but a double still fits easily
- // into two chunks, and more importantly we can use the Comba multiplication.
- static const int kBigitSize = 28;
- static const Chunk kBigitMask = (1 << kBigitSize) - 1;
- // Every instance allocates kBigitLength chunks on the stack. Bignums cannot
- // grow. There are no checks if the stack-allocated space is sufficient.
- static const int kBigitCapacity = kMaxSignificantBits / kBigitSize;
-
- void EnsureCapacity(int size) {
- if (size > kBigitCapacity) {
- UNREACHABLE();
- }
- }
- void Align(const Bignum& other);
- void Clamp();
- bool IsClamped() const;
- void Zero();
- // Requires this to have enough capacity (no tests done).
- // Updates used_digits_ if necessary.
- // by must be < kBigitSize.
- void BigitsShiftLeft(int shift_amount);
- // BigitLength includes the "hidden" digits encoded in the exponent.
- int BigitLength() const { return used_digits_ + exponent_; }
- Chunk BigitAt(int index) const;
- void SubtractTimes(const Bignum& other, int factor);
-
- Chunk bigits_buffer_[kBigitCapacity];
- // A vector backed by bigits_buffer_. This way accesses to the array are
- // checked for out-of-bounds errors.
- Vector<Chunk> bigits_;
- int used_digits_;
- // The Bignum's value equals value(bigits_) * 2^(exponent_ * kBigitSize).
- int exponent_;
-
- DISALLOW_COPY_AND_ASSIGN(Bignum);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_BIGNUM_H_
diff --git a/src/3rdparty/v8/src/bootstrapper.cc b/src/3rdparty/v8/src/bootstrapper.cc
deleted file mode 100644
index 368047c..0000000
--- a/src/3rdparty/v8/src/bootstrapper.cc
+++ /dev/null
@@ -1,2512 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "isolate-inl.h"
-#include "macro-assembler.h"
-#include "natives.h"
-#include "objects-visiting.h"
-#include "platform.h"
-#include "snapshot.h"
-#include "extensions/externalize-string-extension.h"
-#include "extensions/gc-extension.h"
-#include "extensions/statistics-extension.h"
-
-namespace v8 {
-namespace internal {
-
-
-NativesExternalStringResource::NativesExternalStringResource(
- Bootstrapper* bootstrapper,
- const char* source,
- size_t length)
- : data_(source), length_(length) {
- if (bootstrapper->delete_these_non_arrays_on_tear_down_ == NULL) {
- bootstrapper->delete_these_non_arrays_on_tear_down_ = new List<char*>(2);
- }
- // The resources are small objects and we only make a fixed number of
- // them, but let's clean them up on exit for neatness.
- bootstrapper->delete_these_non_arrays_on_tear_down_->
- Add(reinterpret_cast<char*>(this));
-}
-
-
-Bootstrapper::Bootstrapper(Isolate* isolate)
- : isolate_(isolate),
- nesting_(0),
- extensions_cache_(Script::TYPE_EXTENSION),
- delete_these_non_arrays_on_tear_down_(NULL),
- delete_these_arrays_on_tear_down_(NULL) {
-}
-
-
-Handle<String> Bootstrapper::NativesSourceLookup(int index) {
- ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
- Heap* heap = isolate_->heap();
- if (heap->natives_source_cache()->get(index)->IsUndefined()) {
- // We can use external strings for the natives.
- Vector<const char> source = Natives::GetRawScriptSource(index);
- NativesExternalStringResource* resource =
- new NativesExternalStringResource(this,
- source.start(),
- source.length());
- Handle<String> source_code =
- isolate_->factory()->NewExternalStringFromAscii(resource);
- heap->natives_source_cache()->set(index, *source_code);
- }
- Handle<Object> cached_source(heap->natives_source_cache()->get(index),
- isolate_);
- return Handle<String>::cast(cached_source);
-}
-
-
-void Bootstrapper::Initialize(bool create_heap_objects) {
- extensions_cache_.Initialize(create_heap_objects);
- GCExtension::Register();
- ExternalizeStringExtension::Register();
- StatisticsExtension::Register();
-}
-
-
-char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
- char* memory = new char[bytes];
- if (memory != NULL) {
- if (delete_these_arrays_on_tear_down_ == NULL) {
- delete_these_arrays_on_tear_down_ = new List<char*>(2);
- }
- delete_these_arrays_on_tear_down_->Add(memory);
- }
- return memory;
-}
-
-
-void Bootstrapper::TearDown() {
- if (delete_these_non_arrays_on_tear_down_ != NULL) {
- int len = delete_these_non_arrays_on_tear_down_->length();
- ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
- for (int i = 0; i < len; i++) {
- delete delete_these_non_arrays_on_tear_down_->at(i);
- delete_these_non_arrays_on_tear_down_->at(i) = NULL;
- }
- delete delete_these_non_arrays_on_tear_down_;
- delete_these_non_arrays_on_tear_down_ = NULL;
- }
-
- if (delete_these_arrays_on_tear_down_ != NULL) {
- int len = delete_these_arrays_on_tear_down_->length();
- ASSERT(len < 1000); // Don't use this mechanism for unbounded allocations.
- for (int i = 0; i < len; i++) {
- delete[] delete_these_arrays_on_tear_down_->at(i);
- delete_these_arrays_on_tear_down_->at(i) = NULL;
- }
- delete delete_these_arrays_on_tear_down_;
- delete_these_arrays_on_tear_down_ = NULL;
- }
-
- extensions_cache_.Initialize(false); // Yes, symmetrical
-}
-
-
-class Genesis BASE_EMBEDDED {
- public:
- Genesis(Isolate* isolate,
- Handle<Object> global_object,
- v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions);
- ~Genesis() { }
-
- Handle<Context> result() { return result_; }
-
- Genesis* previous() { return previous_; }
-
- Isolate* isolate() const { return isolate_; }
- Factory* factory() const { return isolate_->factory(); }
- Heap* heap() const { return isolate_->heap(); }
-
- private:
- Handle<Context> native_context_;
- Isolate* isolate_;
-
- // There may be more than one active genesis object: When GC is
- // triggered during environment creation there may be weak handle
- // processing callbacks which may create new environments.
- Genesis* previous_;
-
- Handle<Context> native_context() { return native_context_; }
-
- // Creates some basic objects. Used for creating a context from scratch.
- void CreateRoots();
- // Creates the empty function. Used for creating a context from scratch.
- Handle<JSFunction> CreateEmptyFunction(Isolate* isolate);
- // Creates the ThrowTypeError function. ECMA 5th Ed. 13.2.3
- Handle<JSFunction> GetThrowTypeErrorFunction();
-
- void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
-
- // Make the "arguments" and "caller" properties throw a TypeError on access.
- void PoisonArgumentsAndCaller(Handle<Map> map);
-
- // Creates the global objects using the global and the template passed in
- // through the API. We call this regardless of whether we are building a
- // context from scratch or using a deserialized one from the partial snapshot
- // but in the latter case we don't use the objects it produces directly, as
- // we have to used the deserialized ones that are linked together with the
- // rest of the context snapshot.
- Handle<JSGlobalProxy> CreateNewGlobals(
- v8::Handle<v8::ObjectTemplate> global_template,
- Handle<Object> global_object,
- Handle<GlobalObject>* global_proxy_out);
- // Hooks the given global proxy into the context. If the context was created
- // by deserialization then this will unhook the global proxy that was
- // deserialized, leaving the GC to pick it up.
- void HookUpGlobalProxy(Handle<GlobalObject> inner_global,
- Handle<JSGlobalProxy> global_proxy);
- // Similarly, we want to use the inner global that has been created by the
- // templates passed through the API. The inner global from the snapshot is
- // detached from the other objects in the snapshot.
- void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
- // New context initialization. Used for creating a context from scratch.
- bool InitializeGlobal(Handle<GlobalObject> inner_global,
- Handle<JSFunction> empty_function);
- void InitializeExperimentalGlobal();
- // Installs the contents of the native .js files on the global objects.
- // Used for creating a context from scratch.
- void InstallNativeFunctions();
- void InstallExperimentalNativeFunctions();
- Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
- const char* name,
- ElementsKind elements_kind);
- bool InstallNatives();
- bool InstallExperimentalNatives();
- void InstallBuiltinFunctionIds();
- void InstallJSFunctionResultCaches();
- void InitializeNormalizedMapCaches();
-
- enum ExtensionTraversalState {
- UNVISITED, VISITED, INSTALLED
- };
-
- class ExtensionStates {
- public:
- ExtensionStates();
- ExtensionTraversalState get_state(RegisteredExtension* extension);
- void set_state(RegisteredExtension* extension,
- ExtensionTraversalState state);
- private:
- HashMap map_;
- DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
- };
-
- // Used both for deserialized and from-scratch contexts to add the extensions
- // provided.
- static bool InstallExtensions(Handle<Context> native_context,
- v8::ExtensionConfiguration* extensions);
- static bool InstallExtension(Isolate* isolate,
- const char* name,
- ExtensionStates* extension_states);
- static bool InstallExtension(Isolate* isolate,
- v8::RegisteredExtension* current,
- ExtensionStates* extension_states);
- static void InstallSpecialObjects(Handle<Context> native_context);
- bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
- bool ConfigureApiObject(Handle<JSObject> object,
- Handle<ObjectTemplateInfo> object_template);
- bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template);
-
- // Migrates all properties from the 'from' object to the 'to'
- // object and overrides the prototype in 'to' with the one from
- // 'from'.
- void TransferObject(Handle<JSObject> from, Handle<JSObject> to);
- void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
- void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
-
- enum PrototypePropertyMode {
- DONT_ADD_PROTOTYPE,
- ADD_READONLY_PROTOTYPE,
- ADD_WRITEABLE_PROTOTYPE
- };
-
- Handle<Map> CreateFunctionMap(PrototypePropertyMode prototype_mode);
-
- void SetFunctionInstanceDescriptor(Handle<Map> map,
- PrototypePropertyMode prototypeMode);
- void MakeFunctionInstancePrototypeWritable();
-
- Handle<Map> CreateStrictModeFunctionMap(
- PrototypePropertyMode prototype_mode,
- Handle<JSFunction> empty_function);
-
- void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
- PrototypePropertyMode propertyMode);
-
- static bool CompileBuiltin(Isolate* isolate, int index);
- static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
- static bool CompileNative(Isolate* isolate,
- Vector<const char> name,
- Handle<String> source);
- static bool CompileScriptCached(Isolate* isolate,
- Vector<const char> name,
- Handle<String> source,
- SourceCodeCache* cache,
- v8::Extension* extension,
- Handle<Context> top_context,
- bool use_runtime_context);
-
- Handle<Context> result_;
-
- // Function instance maps. Function literal maps are created initially with
- // a read only prototype for the processing of JS builtins. Later the function
- // instance maps are replaced in order to make prototype writable.
- // These are the final, writable prototype, maps.
- Handle<Map> function_instance_map_writable_prototype_;
- Handle<Map> strict_mode_function_instance_map_writable_prototype_;
- Handle<JSFunction> throw_type_error_function;
-
- BootstrapperActive active_;
- friend class Bootstrapper;
-};
-
-
-void Bootstrapper::Iterate(ObjectVisitor* v) {
- extensions_cache_.Iterate(v);
- v->Synchronize(VisitorSynchronization::kExtensions);
-}
-
-
-Handle<Context> Bootstrapper::CreateEnvironment(
- Handle<Object> global_object,
- v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions) {
- HandleScope scope(isolate_);
- Handle<Context> env;
- Genesis genesis(isolate_, global_object, global_template, extensions);
- env = genesis.result();
- if (!env.is_null()) {
- if (InstallExtensions(env, extensions)) {
- return env;
- }
- }
- return Handle<Context>();
-}
-
-
-static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
- // object.__proto__ = proto;
- Factory* factory = object->GetIsolate()->factory();
- Handle<Map> old_to_map = Handle<Map>(object->map());
- Handle<Map> new_to_map = factory->CopyMap(old_to_map);
- new_to_map->set_prototype(*proto);
- object->set_map(*new_to_map);
-}
-
-
-void Bootstrapper::DetachGlobal(Handle<Context> env) {
- Factory* factory = env->GetIsolate()->factory();
- Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
- global_proxy->set_native_context(*factory->null_value());
- SetObjectPrototype(global_proxy, factory->null_value());
- env->set_global_proxy(env->global_object());
- env->global_object()->set_global_receiver(env->global_object());
-}
-
-
-void Bootstrapper::ReattachGlobal(Handle<Context> env,
- Handle<JSGlobalProxy> global_proxy) {
- env->global_object()->set_global_receiver(*global_proxy);
- env->set_global_proxy(*global_proxy);
- SetObjectPrototype(global_proxy, Handle<JSObject>(env->global_object()));
- global_proxy->set_native_context(*env);
-}
-
-
-static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
- const char* name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Builtins::Name call,
- bool is_ecma_native) {
- Isolate* isolate = target->GetIsolate();
- Factory* factory = isolate->factory();
- Handle<String> internalized_name = factory->InternalizeUtf8String(name);
- Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
- Handle<JSFunction> function = prototype.is_null() ?
- factory->NewFunctionWithoutPrototype(internalized_name, call_code) :
- factory->NewFunctionWithPrototype(internalized_name,
- type,
- instance_size,
- prototype,
- call_code,
- is_ecma_native);
- PropertyAttributes attributes;
- if (target->IsJSBuiltinsObject()) {
- attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- } else {
- attributes = DONT_ENUM;
- }
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- target, internalized_name, function, attributes));
- if (is_ecma_native) {
- function->shared()->set_instance_class_name(*internalized_name);
- }
- function->shared()->set_native(true);
- return function;
-}
-
-
-void Genesis::SetFunctionInstanceDescriptor(
- Handle<Map> map, PrototypePropertyMode prototypeMode) {
- int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
- Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size));
- DescriptorArray::WhitenessWitness witness(*descriptors);
-
- Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
- Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
- Handle<Foreign> args(factory()->NewForeign(&Accessors::FunctionArguments));
- Handle<Foreign> caller(factory()->NewForeign(&Accessors::FunctionCaller));
- Handle<Foreign> prototype;
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
- prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
- }
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- map->set_instance_descriptors(*descriptors);
-
- { // Add length.
- CallbacksDescriptor d(*factory()->length_string(), *length, attribs);
- map->AppendDescriptor(&d, witness);
- }
- { // Add name.
- CallbacksDescriptor d(*factory()->name_string(), *name, attribs);
- map->AppendDescriptor(&d, witness);
- }
- { // Add arguments.
- CallbacksDescriptor d(*factory()->arguments_string(), *args, attribs);
- map->AppendDescriptor(&d, witness);
- }
- { // Add caller.
- CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs);
- map->AppendDescriptor(&d, witness);
- }
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
- // Add prototype.
- if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
- attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
- }
- CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs);
- map->AppendDescriptor(&d, witness);
- }
-}
-
-
-Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
- Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetFunctionInstanceDescriptor(map, prototype_mode);
- map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
- return map;
-}
-
-
-Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
- // Allocate the map for function instances. Maps are allocated first and their
- // prototypes patched later, once empty function is created.
-
- // Please note that the prototype property for function instances must be
- // writable.
- Handle<Map> function_instance_map =
- CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
- native_context()->set_function_instance_map(*function_instance_map);
-
- // Functions with this map will not have a 'prototype' property, and
- // can not be used as constructors.
- Handle<Map> function_without_prototype_map =
- CreateFunctionMap(DONT_ADD_PROTOTYPE);
- native_context()->set_function_without_prototype_map(
- *function_without_prototype_map);
-
- // Allocate the function map. This map is temporary, used only for processing
- // of builtins.
- // Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
- native_context()->set_function_map(*function_map);
-
- // The final map for functions. Writeable prototype.
- // This map is installed in MakeFunctionInstancePrototypeWritable.
- function_instance_map_writable_prototype_ =
- CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
-
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- Handle<String> object_name = Handle<String>(heap->Object_string());
-
- { // --- O b j e c t ---
- Handle<JSFunction> object_fun =
- factory->NewFunction(object_name, factory->null_value());
- Handle<Map> object_function_map =
- factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- object_fun->set_initial_map(*object_function_map);
- object_function_map->set_constructor(*object_fun);
-
- native_context()->set_object_function(*object_fun);
-
- // Allocate a new prototype for the object function.
- Handle<Map> object_prototype_map =
- factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- Handle<DescriptorArray> prototype_descriptors(
- factory->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*prototype_descriptors);
-
- Handle<Foreign> object_prototype(
- factory->NewForeign(&Accessors::ObjectPrototype));
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
- object_prototype_map->set_instance_descriptors(*prototype_descriptors);
-
- { // Add __proto__.
- CallbacksDescriptor d(heap->proto_string(), *object_prototype, attribs);
- object_prototype_map->AppendDescriptor(&d, witness);
- }
-
- Handle<JSObject> prototype = factory->NewJSObjectFromMap(
- object_prototype_map,
- TENURED);
- native_context()->set_initial_object_prototype(*prototype);
- SetPrototype(object_fun, prototype);
- }
-
- // Allocate the empty function as the prototype for function ECMAScript
- // 262 15.3.4.
- Handle<String> empty_string =
- factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
- Handle<JSFunction> empty_function =
- factory->NewFunctionWithoutPrototype(empty_string, CLASSIC_MODE);
-
- // --- E m p t y ---
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kEmptyFunction));
- empty_function->set_code(*code);
- empty_function->shared()->set_code(*code);
- Handle<String> source =
- factory->NewStringFromOneByte(STATIC_ASCII_VECTOR("() {}"));
- Handle<Script> script = factory->NewScript(source);
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- empty_function->shared()->set_script(*script);
- empty_function->shared()->set_start_position(0);
- empty_function->shared()->set_end_position(source->length());
- empty_function->shared()->DontAdaptArguments();
-
- // Set prototypes for the function maps.
- native_context()->function_map()->set_prototype(*empty_function);
- native_context()->function_instance_map()->set_prototype(*empty_function);
- native_context()->function_without_prototype_map()->
- set_prototype(*empty_function);
- function_instance_map_writable_prototype_->set_prototype(*empty_function);
-
- // Allocate the function map first and then patch the prototype later
- Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
- empty_function_map->set_prototype(
- native_context()->object_function()->prototype());
- empty_function->set_map(*empty_function_map);
- return empty_function;
-}
-
-
-void Genesis::SetStrictFunctionInstanceDescriptor(
- Handle<Map> map, PrototypePropertyMode prototypeMode) {
- int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
- Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size));
- DescriptorArray::WhitenessWitness witness(*descriptors);
-
- Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
- Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
- Handle<AccessorPair> arguments(factory()->NewAccessorPair());
- Handle<AccessorPair> caller(factory()->NewAccessorPair());
- Handle<Foreign> prototype;
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
- prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
- }
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
- map->set_instance_descriptors(*descriptors);
-
- { // Add length.
- CallbacksDescriptor d(*factory()->length_string(), *length, attribs);
- map->AppendDescriptor(&d, witness);
- }
- { // Add name.
- CallbacksDescriptor d(*factory()->name_string(), *name, attribs);
- map->AppendDescriptor(&d, witness);
- }
- { // Add arguments.
- CallbacksDescriptor d(*factory()->arguments_string(), *arguments, attribs);
- map->AppendDescriptor(&d, witness);
- }
- { // Add caller.
- CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs);
- map->AppendDescriptor(&d, witness);
- }
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
- // Add prototype.
- if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
- attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
- }
- CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs);
- map->AppendDescriptor(&d, witness);
- }
-}
-
-
-// ECMAScript 5th Edition, 13.2.3
-Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
- if (throw_type_error_function.is_null()) {
- Handle<String> name = factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("ThrowTypeError"));
- throw_type_error_function =
- factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
- Handle<Code> code(isolate()->builtins()->builtin(
- Builtins::kStrictModePoisonPill));
- throw_type_error_function->set_map(
- native_context()->function_map());
- throw_type_error_function->set_code(*code);
- throw_type_error_function->shared()->set_code(*code);
- throw_type_error_function->shared()->DontAdaptArguments();
-
- JSObject::PreventExtensions(throw_type_error_function);
- }
- return throw_type_error_function;
-}
-
-
-Handle<Map> Genesis::CreateStrictModeFunctionMap(
- PrototypePropertyMode prototype_mode,
- Handle<JSFunction> empty_function) {
- Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetStrictFunctionInstanceDescriptor(map, prototype_mode);
- map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
- map->set_prototype(*empty_function);
- return map;
-}
-
-
-void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
- // Allocate map for the strict mode function instances.
- Handle<Map> strict_mode_function_instance_map =
- CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_instance_map(
- *strict_mode_function_instance_map);
-
- // Allocate map for the prototype-less strict mode instances.
- Handle<Map> strict_mode_function_without_prototype_map =
- CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_without_prototype_map(
- *strict_mode_function_without_prototype_map);
-
- // Allocate map for the strict mode functions. This map is temporary, used
- // only for processing of builtins.
- // Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> strict_mode_function_map =
- CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_map(
- *strict_mode_function_map);
-
- // The final map for the strict mode functions. Writeable prototype.
- // This map is installed in MakeFunctionInstancePrototypeWritable.
- strict_mode_function_instance_map_writable_prototype_ =
- CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
-
- // Complete the callbacks.
- PoisonArgumentsAndCaller(strict_mode_function_instance_map);
- PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map);
- PoisonArgumentsAndCaller(strict_mode_function_map);
- PoisonArgumentsAndCaller(
- strict_mode_function_instance_map_writable_prototype_);
-}
-
-
-static void SetAccessors(Handle<Map> map,
- Handle<String> name,
- Handle<JSFunction> func) {
- DescriptorArray* descs = map->instance_descriptors();
- int number = descs->SearchWithCache(*name, *map);
- AccessorPair* accessors = AccessorPair::cast(descs->GetValue(number));
- accessors->set_getter(*func);
- accessors->set_setter(*func);
-}
-
-
-void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) {
- SetAccessors(map, factory()->arguments_string(), GetThrowTypeErrorFunction());
- SetAccessors(map, factory()->caller_string(), GetThrowTypeErrorFunction());
-}
-
-
-static void AddToWeakNativeContextList(Context* context) {
- ASSERT(context->IsNativeContext());
- Heap* heap = context->GetIsolate()->heap();
-#ifdef DEBUG
- { // NOLINT
- ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
- // Check that context is not in the list yet.
- for (Object* current = heap->native_contexts_list();
- !current->IsUndefined();
- current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
- ASSERT(current != context);
- }
- }
-#endif
- context->set(Context::NEXT_CONTEXT_LINK, heap->native_contexts_list());
- heap->set_native_contexts_list(context);
-}
-
-
-void Genesis::CreateRoots() {
- // Allocate the native context FixedArray first and then patch the
- // closure and extension object later (we need the empty function
- // and the global object, but in order to create those, we need the
- // native context).
- native_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
- *factory()->NewNativeContext()));
- AddToWeakNativeContextList(*native_context_);
- isolate()->set_context(*native_context());
-
- // Allocate the message listeners object.
- {
- v8::NeanderArray listeners;
- native_context()->set_message_listeners(*listeners.value());
- }
-}
-
-
-Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
- v8::Handle<v8::ObjectTemplate> global_template,
- Handle<Object> global_object,
- Handle<GlobalObject>* inner_global_out) {
- // The argument global_template aka data is an ObjectTemplateInfo.
- // It has a constructor pointer that points at global_constructor which is a
- // FunctionTemplateInfo.
- // The global_constructor is used to create or reinitialize the global_proxy.
- // The global_constructor also has a prototype_template pointer that points at
- // js_global_template which is an ObjectTemplateInfo.
- // That in turn has a constructor pointer that points at
- // js_global_constructor which is a FunctionTemplateInfo.
- // js_global_constructor is used to make js_global_function
- // js_global_function is used to make the new inner_global.
- //
- // --- G l o b a l ---
- // Step 1: Create a fresh inner JSGlobalObject.
- Handle<JSFunction> js_global_function;
- Handle<ObjectTemplateInfo> js_global_template;
- if (!global_template.IsEmpty()) {
- // Get prototype template of the global_template.
- Handle<ObjectTemplateInfo> data =
- v8::Utils::OpenHandle(*global_template);
- Handle<FunctionTemplateInfo> global_constructor =
- Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(data->constructor()));
- Handle<Object> proto_template(global_constructor->prototype_template(),
- isolate());
- if (!proto_template->IsUndefined()) {
- js_global_template =
- Handle<ObjectTemplateInfo>::cast(proto_template);
- }
- }
-
- if (js_global_template.is_null()) {
- Handle<String> name = Handle<String>(heap()->empty_string());
- Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
- Builtins::kIllegal));
- js_global_function =
- factory()->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
- JSGlobalObject::kSize, code, true);
- // Change the constructor property of the prototype of the
- // hidden global function to refer to the Object function.
- Handle<JSObject> prototype =
- Handle<JSObject>(
- JSObject::cast(js_global_function->instance_prototype()));
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- prototype, factory()->constructor_string(),
- isolate()->object_function(), NONE));
- } else {
- Handle<FunctionTemplateInfo> js_global_constructor(
- FunctionTemplateInfo::cast(js_global_template->constructor()));
- js_global_function =
- factory()->CreateApiFunction(js_global_constructor,
- factory()->InnerGlobalObject);
- }
-
- js_global_function->initial_map()->set_is_hidden_prototype();
- js_global_function->initial_map()->set_dictionary_map(true);
- Handle<GlobalObject> inner_global =
- factory()->NewGlobalObject(js_global_function);
- if (inner_global_out != NULL) {
- *inner_global_out = inner_global;
- }
-
- // Step 2: create or re-initialize the global proxy object.
- Handle<JSFunction> global_proxy_function;
- if (global_template.IsEmpty()) {
- Handle<String> name = Handle<String>(heap()->empty_string());
- Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
- Builtins::kIllegal));
- global_proxy_function =
- factory()->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
- JSGlobalProxy::kSize, code, true);
- } else {
- Handle<ObjectTemplateInfo> data =
- v8::Utils::OpenHandle(*global_template);
- Handle<FunctionTemplateInfo> global_constructor(
- FunctionTemplateInfo::cast(data->constructor()));
- global_proxy_function =
- factory()->CreateApiFunction(global_constructor,
- factory()->OuterGlobalObject);
- }
-
- Handle<String> global_name = factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("global"));
- global_proxy_function->shared()->set_instance_class_name(*global_name);
- global_proxy_function->initial_map()->set_is_access_check_needed(true);
-
- // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
- // Return the global proxy.
-
- if (global_object.location() != NULL) {
- ASSERT(global_object->IsJSGlobalProxy());
- return ReinitializeJSGlobalProxy(
- global_proxy_function,
- Handle<JSGlobalProxy>::cast(global_object));
- } else {
- return Handle<JSGlobalProxy>::cast(
- factory()->NewJSObject(global_proxy_function, TENURED));
- }
-}
-
-
-void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global,
- Handle<JSGlobalProxy> global_proxy) {
- // Set the native context for the global object.
- inner_global->set_native_context(*native_context());
- inner_global->set_global_context(*native_context());
- inner_global->set_global_receiver(*global_proxy);
- global_proxy->set_native_context(*native_context());
- native_context()->set_global_proxy(*global_proxy);
-}
-
-
-void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
- Handle<GlobalObject> inner_global_from_snapshot(
- GlobalObject::cast(native_context_->extension()));
- Handle<JSBuiltinsObject> builtins_global(native_context_->builtins());
- native_context_->set_extension(*inner_global);
- native_context_->set_global_object(*inner_global);
- native_context_->set_qml_global_object(*inner_global);
- native_context_->set_security_token(*inner_global);
- static const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- ForceSetProperty(builtins_global,
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("global")),
- inner_global,
- attributes);
- // Set up the reference from the global object to the builtins object.
- JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
- TransferNamedProperties(inner_global_from_snapshot, inner_global);
- TransferIndexedProperties(inner_global_from_snapshot, inner_global);
-}
-
-
-// This is only called if we are not using snapshots. The equivalent
-// work in the snapshot case is done in HookUpInnerGlobal.
-bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
- Handle<JSFunction> empty_function) {
- // --- G l o b a l C o n t e x t ---
- // Use the empty function as closure (no scope info).
- native_context()->set_closure(*empty_function);
- native_context()->set_previous(NULL);
- // Set extension and global object.
- native_context()->set_extension(*inner_global);
- native_context()->set_global_object(*inner_global);
- native_context()->set_qml_global_object(*inner_global);
- // Security setup: Set the security token of the global object to
- // its the inner global. This makes the security check between two
- // different contexts fail by default even in case of global
- // object reinitialization.
- native_context()->set_security_token(*inner_global);
-
- Isolate* isolate = inner_global->GetIsolate();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- Handle<String> object_name = Handle<String>(heap->Object_string());
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- inner_global, object_name,
- isolate->object_function(), DONT_ENUM));
-
- Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
-
- // Install global Function object
- InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- empty_function, Builtins::kIllegal, true); // ECMA native.
-
- { // --- A r r a y ---
- Handle<JSFunction> array_function =
- InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
- isolate->initial_object_prototype(),
- Builtins::kArrayCode, true);
- array_function->shared()->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kArrayConstructCode));
- array_function->shared()->DontAdaptArguments();
-
- // This seems a bit hackish, but we need to make sure Array.length
- // is 1.
- array_function->shared()->set_length(1);
-
- Handle<Map> initial_map(array_function->initial_map());
- Handle<DescriptorArray> array_descriptors(
- factory->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*array_descriptors);
-
- Handle<Foreign> array_length(factory->NewForeign(&Accessors::ArrayLength));
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
- initial_map->set_instance_descriptors(*array_descriptors);
-
- { // Add length.
- CallbacksDescriptor d(*factory->length_string(), *array_length, attribs);
- array_function->initial_map()->AppendDescriptor(&d, witness);
- }
-
- // array_function is used internally. JS code creating array object should
- // search for the 'Array' property on the global object and use that one
- // as the constructor. 'Array' property on a global object can be
- // overwritten by JS code.
- native_context()->set_array_function(*array_function);
- }
-
- { // --- N u m b e r ---
- Handle<JSFunction> number_fun =
- InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
- native_context()->set_number_function(*number_fun);
- }
-
- { // --- B o o l e a n ---
- Handle<JSFunction> boolean_fun =
- InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
- native_context()->set_boolean_function(*boolean_fun);
- }
-
- { // --- S t r i n g ---
- Handle<JSFunction> string_fun =
- InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
- string_fun->shared()->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kStringConstructCode));
- native_context()->set_string_function(*string_fun);
-
- Handle<Map> string_map =
- Handle<Map>(native_context()->string_function()->initial_map());
- Handle<DescriptorArray> string_descriptors(
- factory->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*string_descriptors);
-
- Handle<Foreign> string_length(
- factory->NewForeign(&Accessors::StringLength));
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- string_map->set_instance_descriptors(*string_descriptors);
-
- { // Add length.
- CallbacksDescriptor d(*factory->length_string(), *string_length, attribs);
- string_map->AppendDescriptor(&d, witness);
- }
- }
-
- { // --- D a t e ---
- // Builtin functions for Date.prototype.
- Handle<JSFunction> date_fun =
- InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
-
- native_context()->set_date_function(*date_fun);
- }
-
-
- { // -- R e g E x p
- // Builtin functions for RegExp.prototype.
- Handle<JSFunction> regexp_fun =
- InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
- native_context()->set_regexp_function(*regexp_fun);
-
- ASSERT(regexp_fun->has_initial_map());
- Handle<Map> initial_map(regexp_fun->initial_map());
-
- ASSERT_EQ(0, initial_map->inobject_properties());
-
- PropertyAttributes final =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 5);
- DescriptorArray::WhitenessWitness witness(*descriptors);
- initial_map->set_instance_descriptors(*descriptors);
-
- {
- // ECMA-262, section 15.10.7.1.
- FieldDescriptor field(heap->source_string(),
- JSRegExp::kSourceFieldIndex,
- final);
- initial_map->AppendDescriptor(&field, witness);
- }
- {
- // ECMA-262, section 15.10.7.2.
- FieldDescriptor field(heap->global_string(),
- JSRegExp::kGlobalFieldIndex,
- final);
- initial_map->AppendDescriptor(&field, witness);
- }
- {
- // ECMA-262, section 15.10.7.3.
- FieldDescriptor field(heap->ignore_case_string(),
- JSRegExp::kIgnoreCaseFieldIndex,
- final);
- initial_map->AppendDescriptor(&field, witness);
- }
- {
- // ECMA-262, section 15.10.7.4.
- FieldDescriptor field(heap->multiline_string(),
- JSRegExp::kMultilineFieldIndex,
- final);
- initial_map->AppendDescriptor(&field, witness);
- }
- {
- // ECMA-262, section 15.10.7.5.
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- FieldDescriptor field(heap->last_index_string(),
- JSRegExp::kLastIndexFieldIndex,
- writable);
- initial_map->AppendDescriptor(&field, witness);
- }
-
- initial_map->set_inobject_properties(5);
- initial_map->set_pre_allocated_property_fields(5);
- initial_map->set_unused_property_fields(0);
- initial_map->set_instance_size(
- initial_map->instance_size() + 5 * kPointerSize);
- initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
-
- // RegExp prototype object is itself a RegExp.
- Handle<Map> proto_map = factory->CopyMap(initial_map);
- proto_map->set_prototype(native_context()->initial_object_prototype());
- Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
- proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
- heap->query_colon_string());
- proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0),
- SKIP_WRITE_BARRIER); // It's a Smi.
- initial_map->set_prototype(*proto);
- factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
- JSRegExp::IRREGEXP, factory->empty_string(),
- JSRegExp::Flags(0), 0);
- }
-
- { // -- J S O N
- Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
- Handle<JSFunction> cons = factory->NewFunction(name,
- factory->the_hole_value());
- { MaybeObject* result = cons->SetInstancePrototype(
- native_context()->initial_object_prototype());
- if (result->IsFailure()) return false;
- }
- cons->SetInstanceClassName(*name);
- Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
- ASSERT(json_object->IsJSObject());
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- global, name, json_object, DONT_ENUM));
- native_context()->set_json_object(*json_object);
- }
-
- { // --- arguments_boilerplate_
- // Make sure we can recognize argument objects at runtime.
- // This is done by introducing an anonymous function with
- // class_name equals 'Arguments'.
- Handle<String> arguments_string = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("Arguments"));
- Handle<Code> code = Handle<Code>(
- isolate->builtins()->builtin(Builtins::kIllegal));
- Handle<JSObject> prototype =
- Handle<JSObject>(
- JSObject::cast(native_context()->object_function()->prototype()));
-
- Handle<JSFunction> function =
- factory->NewFunctionWithPrototype(arguments_string,
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- prototype,
- code,
- false);
- ASSERT(!function->has_initial_map());
- function->shared()->set_instance_class_name(*arguments_string);
- function->shared()->set_expected_nof_properties(2);
- Handle<JSObject> result = factory->NewJSObject(function);
-
- native_context()->set_arguments_boilerplate(*result);
- // Note: length must be added as the first property and
- // callee must be added as the second property.
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->length_string(),
- factory->undefined_value(), DONT_ENUM));
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->callee_string(),
- factory->undefined_value(), DONT_ENUM));
-
-#ifdef DEBUG
- LookupResult lookup(isolate);
- result->LocalLookup(heap->callee_string(), &lookup);
- ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex);
-
- result->LocalLookup(heap->length_string(), &lookup);
- ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
-
- ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
- ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
-
- // Check the state of the object.
- ASSERT(result->HasFastProperties());
- ASSERT(result->HasFastObjectElements());
-#endif
- }
-
- { // --- aliased_arguments_boilerplate_
- // Set up a well-formed parameter map to make assertions happy.
- Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set_map(heap->non_strict_arguments_elements_map());
- Handle<FixedArray> array;
- array = factory->NewFixedArray(0);
- elements->set(0, *array);
- array = factory->NewFixedArray(0);
- elements->set(1, *array);
-
- Handle<Map> old_map(native_context()->arguments_boilerplate()->map());
- Handle<Map> new_map = factory->CopyMap(old_map);
- new_map->set_pre_allocated_property_fields(2);
- Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
- // Set elements kind after allocating the object because
- // NewJSObjectFromMap assumes a fast elements map.
- new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
- result->set_elements(*elements);
- ASSERT(result->HasNonStrictArgumentsElements());
- native_context()->set_aliased_arguments_boilerplate(*result);
- }
-
- { // --- strict mode arguments boilerplate
- const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // Create the ThrowTypeError functions.
- Handle<AccessorPair> callee = factory->NewAccessorPair();
- Handle<AccessorPair> caller = factory->NewAccessorPair();
-
- Handle<JSFunction> throw_function =
- GetThrowTypeErrorFunction();
-
- // Install the ThrowTypeError functions.
- callee->set_getter(*throw_function);
- callee->set_setter(*throw_function);
- caller->set_getter(*throw_function);
- caller->set_setter(*throw_function);
-
- // Create the map. Allocate one in-object field for length.
- Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kArgumentsObjectSizeStrict);
- // Create the descriptor array for the arguments object.
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 3);
- DescriptorArray::WhitenessWitness witness(*descriptors);
- map->set_instance_descriptors(*descriptors);
-
- { // length
- FieldDescriptor d(*factory->length_string(), 0, DONT_ENUM);
- map->AppendDescriptor(&d, witness);
- }
- { // callee
- CallbacksDescriptor d(*factory->callee_string(),
- *callee,
- attributes);
- map->AppendDescriptor(&d, witness);
- }
- { // caller
- CallbacksDescriptor d(*factory->caller_string(),
- *caller,
- attributes);
- map->AppendDescriptor(&d, witness);
- }
-
- map->set_function_with_prototype(true);
- map->set_prototype(native_context()->object_function()->prototype());
- map->set_pre_allocated_property_fields(1);
- map->set_inobject_properties(1);
-
- // Copy constructor from the non-strict arguments boilerplate.
- map->set_constructor(
- native_context()->arguments_boilerplate()->map()->constructor());
-
- // Allocate the arguments boilerplate object.
- Handle<JSObject> result = factory->NewJSObjectFromMap(map);
- native_context()->set_strict_mode_arguments_boilerplate(*result);
-
- // Add length property only for strict mode boilerplate.
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->length_string(),
- factory->undefined_value(), DONT_ENUM));
-
-#ifdef DEBUG
- LookupResult lookup(isolate);
- result->LocalLookup(heap->length_string(), &lookup);
- ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
-
- ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
-
- // Check the state of the object.
- ASSERT(result->HasFastProperties());
- ASSERT(result->HasFastObjectElements());
-#endif
- }
-
- { // --- context extension
- // Create a function for the context extension objects.
- Handle<Code> code = Handle<Code>(
- isolate->builtins()->builtin(Builtins::kIllegal));
- Handle<JSFunction> context_extension_fun =
- factory->NewFunction(factory->empty_string(),
- JS_CONTEXT_EXTENSION_OBJECT_TYPE,
- JSObject::kHeaderSize,
- code,
- true);
-
- Handle<String> name = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("context_extension"));
- context_extension_fun->shared()->set_instance_class_name(*name);
- native_context()->set_context_extension_function(*context_extension_fun);
- }
-
-
- {
- // Set up the call-as-function delegate.
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kHandleApiCallAsFunction));
- Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, code, true);
- native_context()->set_call_as_function_delegate(*delegate);
- delegate->shared()->DontAdaptArguments();
- }
-
- {
- // Set up the call-as-constructor delegate.
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kHandleApiCallAsConstructor));
- Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, code, true);
- native_context()->set_call_as_constructor_delegate(*delegate);
- delegate->shared()->DontAdaptArguments();
- }
-
- // Initialize the out of memory slot.
- native_context()->set_out_of_memory(heap->false_value());
-
- // Initialize the embedder data slot.
- Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
- native_context()->set_embedder_data(*embedder_data);
-
- {
- // Initialize the random seed slot.
- Handle<ByteArray> zeroed_byte_array(
- factory->NewByteArray(kRandomStateSize));
- native_context()->set_random_seed(*zeroed_byte_array);
- memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
- }
- return true;
-}
-
-
-void Genesis::InitializeExperimentalGlobal() {
- Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
-
- // TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
- // longer need to live behind a flag, so functions get added to the snapshot.
- if (FLAG_harmony_collections) {
- { // -- S e t
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
- prototype, Builtins::kIllegal, true);
- }
- { // -- M a p
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
- prototype, Builtins::kIllegal, true);
- }
- { // -- W e a k M a p
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- prototype, Builtins::kIllegal, true);
- }
- }
-}
-
-
-bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
- Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> source_code =
- isolate->bootstrapper()->NativesSourceLookup(index);
- return CompileNative(isolate, name, source_code);
-}
-
-
-bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
- Vector<const char> name = ExperimentalNatives::GetScriptName(index);
- Factory* factory = isolate->factory();
- Handle<String> source_code =
- factory->NewStringFromAscii(
- ExperimentalNatives::GetRawScriptSource(index));
- return CompileNative(isolate, name, source_code);
-}
-
-
-bool Genesis::CompileNative(Isolate* isolate,
- Vector<const char> name,
- Handle<String> source) {
- HandleScope scope(isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate->debugger()->set_compiling_natives(true);
-#endif
- // During genesis, the boilerplate for stack overflow won't work until the
- // environment has been at least partially initialized. Add a stack check
- // before entering JS code to catch overflow early.
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return false;
-
- bool result = CompileScriptCached(isolate,
- name,
- source,
- NULL,
- NULL,
- Handle<Context>(isolate->context()),
- true);
- ASSERT(isolate->has_pending_exception() != result);
- if (!result) isolate->clear_pending_exception();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate->debugger()->set_compiling_natives(false);
-#endif
- return result;
-}
-
-
-bool Genesis::CompileScriptCached(Isolate* isolate,
- Vector<const char> name,
- Handle<String> source,
- SourceCodeCache* cache,
- v8::Extension* extension,
- Handle<Context> top_context,
- bool use_runtime_context) {
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
- Handle<SharedFunctionInfo> function_info;
-
- // If we can't find the function in the cache, we compile a new
- // function and insert it into the cache.
- if (cache == NULL || !cache->Lookup(name, &function_info)) {
- ASSERT(source->IsOneByteRepresentation());
- Handle<String> script_name = factory->NewStringFromUtf8(name);
- function_info = Compiler::Compile(
- source,
- script_name,
- 0,
- 0,
- top_context,
- extension,
- NULL,
- Handle<String>::null(),
- use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
- if (function_info.is_null()) return false;
- if (cache != NULL) cache->Add(name, function_info);
- }
-
- // Set up the function context. Conceptually, we should clone the
- // function before overwriting the context but since we're in a
- // single-threaded environment it is not strictly necessary.
- ASSERT(top_context->IsNativeContext());
- Handle<Context> context =
- Handle<Context>(use_runtime_context
- ? Handle<Context>(top_context->runtime_context())
- : top_context);
- Handle<JSFunction> fun =
- factory->NewFunctionFromSharedFunctionInfo(function_info, context);
-
- // Call function using either the runtime object or the global
- // object as the receiver. Provide no parameters.
- Handle<Object> receiver =
- Handle<Object>(use_runtime_context
- ? top_context->builtins()
- : top_context->global_object(),
- isolate);
- bool has_pending_exception;
- Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
- if (has_pending_exception) return false;
- return true;
-}
-
-
-#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = \
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \
- Object* var##_native = \
- native_context()->builtins()->GetPropertyNoExceptionThrown( \
- *var##_name); \
- native_context()->set_##var(Type::cast(var##_native));
-
-
-void Genesis::InstallNativeFunctions() {
- HandleScope scope(isolate());
- INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
- INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
- INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
- INSTALL_NATIVE(JSFunction, "ToDetailString", to_detail_string_fun);
- INSTALL_NATIVE(JSFunction, "ToObject", to_object_fun);
- INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
- INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
- INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
- INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
- INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
- INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
- configure_instance_fun);
- INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
- INSTALL_NATIVE(JSObject, "functionCache", function_cache);
- INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
- to_complete_property_descriptor);
-}
-
-void Genesis::InstallExperimentalNativeFunctions() {
- if (FLAG_harmony_symbols) {
- INSTALL_NATIVE(JSObject, "SymbolDelegate", symbol_delegate);
- }
- if (FLAG_harmony_proxies) {
- INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
- INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
- INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
- INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
- }
- if (FLAG_harmony_observation) {
- INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
- INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
- observers_deliver_changes);
- }
-}
-
-#undef INSTALL_NATIVE
-
-
-Handle<JSFunction> Genesis::InstallInternalArray(
- Handle<JSBuiltinsObject> builtins,
- const char* name,
- ElementsKind elements_kind) {
- // --- I n t e r n a l A r r a y ---
- // An array constructor on the builtins object that works like
- // the public Array constructor, except that its prototype
- // doesn't inherit from Object.prototype.
- // To be used only for internal work by builtins. Instances
- // must not be leaked to user code.
- Handle<JSFunction> array_function =
- InstallFunction(builtins,
- name,
- JS_ARRAY_TYPE,
- JSArray::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kInternalArrayCode,
- true);
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(array_function, prototype);
-
- // TODO(mvstanton): For performance reasons, this code would have to
- // be changed to successfully run with FLAG_optimize_constructed_arrays.
- // The next checkin to enable FLAG_optimize_constructed_arrays by
- // default will address this.
- CHECK(!FLAG_optimize_constructed_arrays);
- array_function->shared()->set_construct_stub(
- isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
-
- array_function->shared()->DontAdaptArguments();
-
- MaybeObject* maybe_map = array_function->initial_map()->Copy();
- Map* new_map;
- if (!maybe_map->To(&new_map)) return Handle<JSFunction>::null();
- new_map->set_elements_kind(elements_kind);
- array_function->set_initial_map(new_map);
-
- // Make "length" magic on instances.
- Handle<Map> initial_map(array_function->initial_map());
- Handle<DescriptorArray> array_descriptors(
- factory()->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*array_descriptors);
-
- Handle<Foreign> array_length(factory()->NewForeign(
- &Accessors::ArrayLength));
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
- initial_map->set_instance_descriptors(*array_descriptors);
-
- { // Add length.
- CallbacksDescriptor d(
- *factory()->length_string(), *array_length, attribs);
- array_function->initial_map()->AppendDescriptor(&d, witness);
- }
-
- return array_function;
-}
-
-
-bool Genesis::InstallNatives() {
- HandleScope scope(isolate());
-
- // Create a function for the builtins object. Allocate space for the
- // JavaScript builtins, a reference to the builtins object
- // (itself) and a reference to the native_context directly in the object.
- Handle<Code> code = Handle<Code>(
- isolate()->builtins()->builtin(Builtins::kIllegal));
- Handle<JSFunction> builtins_fun =
- factory()->NewFunction(factory()->empty_string(),
- JS_BUILTINS_OBJECT_TYPE,
- JSBuiltinsObject::kSize, code, true);
-
- Handle<String> name =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
- builtins_fun->shared()->set_instance_class_name(*name);
- builtins_fun->initial_map()->set_dictionary_map(true);
- builtins_fun->initial_map()->set_prototype(heap()->null_value());
-
- // Allocate the builtins object.
- Handle<JSBuiltinsObject> builtins =
- Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
- builtins->set_builtins(*builtins);
- builtins->set_native_context(*native_context());
- builtins->set_global_context(*native_context());
- builtins->set_global_receiver(*builtins);
-
- // Set up the 'global' properties of the builtins object. The
- // 'global' property that refers to the global object is the only
- // way to get from code running in the builtins context to the
- // global object.
- static const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Handle<String> global_string =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("global"));
- Handle<Object> global_obj(native_context()->global_object(), isolate());
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- builtins, global_string, global_obj, attributes));
-
- // Set up the reference from the global object to the builtins object.
- JSGlobalObject::cast(native_context()->global_object())->
- set_builtins(*builtins);
-
- // Create a bridge function that has context in the native context.
- Handle<JSFunction> bridge =
- factory()->NewFunction(factory()->empty_string(),
- factory()->undefined_value());
- ASSERT(bridge->context() == *isolate()->native_context());
-
- // Allocate the builtins context.
- Handle<Context> context =
- factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
- context->set_global_object(*builtins); // override builtins global object
-
- native_context()->set_runtime_context(*context);
-
- { // -- S c r i p t
- // Builtin functions for Script.
- Handle<JSFunction> script_fun =
- InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, false);
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(script_fun, prototype);
- native_context()->set_script_function(*script_fun);
-
- Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
-
- Handle<DescriptorArray> script_descriptors(
- factory()->NewDescriptorArray(0, 13));
- DescriptorArray::WhitenessWitness witness(*script_descriptors);
-
- Handle<Foreign> script_source(
- factory()->NewForeign(&Accessors::ScriptSource));
- Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName));
- Handle<String> id_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("id")));
- Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId));
- Handle<String> line_offset_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("line_offset")));
- Handle<Foreign> script_line_offset(
- factory()->NewForeign(&Accessors::ScriptLineOffset));
- Handle<String> column_offset_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("column_offset")));
- Handle<Foreign> script_column_offset(
- factory()->NewForeign(&Accessors::ScriptColumnOffset));
- Handle<String> data_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("data")));
- Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
- Handle<String> type_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("type")));
- Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
- Handle<String> compilation_type_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("compilation_type")));
- Handle<Foreign> script_compilation_type(
- factory()->NewForeign(&Accessors::ScriptCompilationType));
- Handle<String> line_ends_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("line_ends")));
- Handle<Foreign> script_line_ends(
- factory()->NewForeign(&Accessors::ScriptLineEnds));
- Handle<String> context_data_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("context_data")));
- Handle<Foreign> script_context_data(
- factory()->NewForeign(&Accessors::ScriptContextData));
- Handle<String> eval_from_script_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("eval_from_script")));
- Handle<Foreign> script_eval_from_script(
- factory()->NewForeign(&Accessors::ScriptEvalFromScript));
- Handle<String> eval_from_script_position_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("eval_from_script_position")));
- Handle<Foreign> script_eval_from_script_position(
- factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition));
- Handle<String> eval_from_function_name_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("eval_from_function_name")));
- Handle<Foreign> script_eval_from_function_name(
- factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
- PropertyAttributes attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- script_map->set_instance_descriptors(*script_descriptors);
-
- {
- CallbacksDescriptor d(
- *factory()->source_string(), *script_source, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(*factory()->name_string(), *script_name, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(*id_string, *script_id, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(*line_offset_string, *script_line_offset, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(
- *column_offset_string, *script_column_offset, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(*data_string, *script_data, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(*type_string, *script_type, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(
- *compilation_type_string, *script_compilation_type, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(*line_ends_string, *script_line_ends, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(
- *context_data_string, *script_context_data, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(
- *eval_from_script_string, *script_eval_from_script, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(
- *eval_from_script_position_string,
- *script_eval_from_script_position,
- attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
- CallbacksDescriptor d(
- *eval_from_function_name_string,
- *script_eval_from_function_name,
- attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- // Allocate the empty script.
- Handle<Script> script = factory()->NewScript(factory()->empty_string());
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- heap()->public_set_empty_script(*script);
- }
- {
- // Builtin function for OpaqueReference -- a JSValue-based object,
- // that keeps its field isolated from JavaScript code. It may store
- // objects, that JavaScript code may not access.
- Handle<JSFunction> opaque_reference_fun =
- InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
- JSValue::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, false);
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(opaque_reference_fun, prototype);
- native_context()->set_opaque_reference_function(*opaque_reference_fun);
- }
-
- // InternalArrays should not use Smi-Only array optimizations. There are too
- // many places in the C++ runtime code (e.g. RegEx) that assume that
- // elements in InternalArrays can be set to non-Smi values without going
- // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
- // transition easy to trap. Moreover, they rarely are smi-only.
- {
- Handle<JSFunction> array_function =
- InstallInternalArray(builtins, "InternalArray", FAST_HOLEY_ELEMENTS);
- if (array_function.is_null()) return false;
- native_context()->set_internal_array_function(*array_function);
- }
-
- {
- Handle<JSFunction> array_function =
- InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS);
- if (array_function.is_null()) return false;
- }
-
- if (FLAG_disable_native_files) {
- PrintF("Warning: Running without installed natives!\n");
- return true;
- }
-
- // Install natives.
- for (int i = Natives::GetDebuggerCount();
- i < Natives::GetBuiltinsCount();
- i++) {
- if (!CompileBuiltin(isolate(), i)) return false;
- // TODO(ager): We really only need to install the JS builtin
- // functions on the builtins object after compiling and running
- // runtime.js.
- if (!InstallJSBuiltins(builtins)) return false;
- }
-
- InstallNativeFunctions();
-
- // Store the map for the string prototype after the natives has been compiled
- // and the String function has been set up.
- Handle<JSFunction> string_function(native_context()->string_function());
- ASSERT(JSObject::cast(
- string_function->initial_map()->prototype())->HasFastProperties());
- native_context()->set_string_function_prototype_map(
- HeapObject::cast(string_function->initial_map()->prototype())->map());
-
- // Install Function.prototype.call and apply.
- { Handle<String> key = factory()->function_class_string();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(
- GetProperty(isolate(), isolate()->global_object(), key));
- Handle<JSObject> proto =
- Handle<JSObject>(JSObject::cast(function->instance_prototype()));
-
- // Install the call and the apply functions.
- Handle<JSFunction> call =
- InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- Handle<JSObject>::null(),
- Builtins::kFunctionCall,
- false);
- Handle<JSFunction> apply =
- InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- Handle<JSObject>::null(),
- Builtins::kFunctionApply,
- false);
-
- // Make sure that Function.prototype.call appears to be compiled.
- // The code will never be called, but inline caching for call will
- // only work if it appears to be compiled.
- call->shared()->DontAdaptArguments();
- ASSERT(call->is_compiled());
-
- // Set the expected parameters for apply to 2; required by builtin.
- apply->shared()->set_formal_parameter_count(2);
-
- // Set the lengths for the functions to satisfy ECMA-262.
- call->shared()->set_length(1);
- apply->shared()->set_length(2);
- }
-
- InstallBuiltinFunctionIds();
-
- // Create a constructor for RegExp results (a variant of Array that
- // predefines the two properties index and match).
- {
- // RegExpResult initial map.
-
- // Find global.Array.prototype to inherit from.
- Handle<JSFunction> array_constructor(native_context()->array_function());
- Handle<JSObject> array_prototype(
- JSObject::cast(array_constructor->instance_prototype()));
-
- // Add initial map.
- Handle<Map> initial_map =
- factory()->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
- initial_map->set_constructor(*array_constructor);
-
- // Set prototype on map.
- initial_map->set_non_instance_prototype(false);
- initial_map->set_prototype(*array_prototype);
-
- // Update map with length accessor from Array and add "index" and "input".
- Handle<DescriptorArray> reresult_descriptors =
- factory()->NewDescriptorArray(0, 3);
- DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
- initial_map->set_instance_descriptors(*reresult_descriptors);
-
- {
- JSFunction* array_function = native_context()->array_function();
- Handle<DescriptorArray> array_descriptors(
- array_function->initial_map()->instance_descriptors());
- String* length = heap()->length_string();
- int old = array_descriptors->SearchWithCache(
- length, array_function->initial_map());
- ASSERT(old != DescriptorArray::kNotFound);
- CallbacksDescriptor desc(length,
- array_descriptors->GetValue(old),
- array_descriptors->GetDetails(old).attributes());
- initial_map->AppendDescriptor(&desc, witness);
- }
- {
- FieldDescriptor index_field(heap()->index_string(),
- JSRegExpResult::kIndexIndex,
- NONE);
- initial_map->AppendDescriptor(&index_field, witness);
- }
-
- {
- FieldDescriptor input_field(heap()->input_string(),
- JSRegExpResult::kInputIndex,
- NONE);
- initial_map->AppendDescriptor(&input_field, witness);
- }
-
- initial_map->set_inobject_properties(2);
- initial_map->set_pre_allocated_property_fields(2);
- initial_map->set_unused_property_fields(0);
-
- native_context()->set_regexp_result_map(*initial_map);
- }
-
-#ifdef VERIFY_HEAP
- builtins->Verify();
-#endif
-
- return true;
-}
-
-
-bool Genesis::InstallExperimentalNatives() {
- for (int i = ExperimentalNatives::GetDebuggerCount();
- i < ExperimentalNatives::GetBuiltinsCount();
- i++) {
- if (FLAG_harmony_symbols &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native symbol.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_proxies &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native proxy.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_collections &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native collection.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_observation &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native object-observe.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- }
-
- InstallExperimentalNativeFunctions();
-
- return true;
-}
-
-
-static Handle<JSObject> ResolveBuiltinIdHolder(
- Handle<Context> native_context,
- const char* holder_expr) {
- Isolate* isolate = native_context->GetIsolate();
- Factory* factory = isolate->factory();
- Handle<GlobalObject> global(native_context->global_object());
- const char* period_pos = strchr(holder_expr, '.');
- if (period_pos == NULL) {
- return Handle<JSObject>::cast(GetProperty(
- isolate, global, factory->InternalizeUtf8String(holder_expr)));
- }
- ASSERT_EQ(".prototype", period_pos);
- Vector<const char> property(holder_expr,
- static_cast<int>(period_pos - holder_expr));
- Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(isolate, global, factory->InternalizeUtf8String(property)));
- return Handle<JSObject>(JSObject::cast(function->prototype()));
-}
-
-
-static void InstallBuiltinFunctionId(Handle<JSObject> holder,
- const char* function_name,
- BuiltinFunctionId id) {
- Factory* factory = holder->GetIsolate()->factory();
- Handle<String> name = factory->InternalizeUtf8String(function_name);
- Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
- Handle<JSFunction> function(JSFunction::cast(function_object));
- function->shared()->set_function_data(Smi::FromInt(id));
-}
-
-
-void Genesis::InstallBuiltinFunctionIds() {
- HandleScope scope(isolate());
-#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
- { \
- Handle<JSObject> holder = ResolveBuiltinIdHolder( \
- native_context(), #holder_expr); \
- BuiltinFunctionId id = k##name; \
- InstallBuiltinFunctionId(holder, #fun_name, id); \
- }
- FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)
-#undef INSTALL_BUILTIN_ID
-}
-
-
-// Do not forget to update macros.py with named constant
-// of cache id.
-#define JSFUNCTION_RESULT_CACHE_LIST(F) \
- F(16, native_context()->regexp_function())
-
-
-static FixedArray* CreateCache(int size, Handle<JSFunction> factory_function) {
- Factory* factory = factory_function->GetIsolate()->factory();
- // Caches are supposed to live for a long time, allocate in old space.
- int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
- // Cannot use cast as object is not fully initialized yet.
- JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
- *factory->NewFixedArrayWithHoles(array_size, TENURED));
- cache->set(JSFunctionResultCache::kFactoryIndex, *factory_function);
- cache->MakeZeroSize();
- return cache;
-}
-
-
-void Genesis::InstallJSFunctionResultCaches() {
- const int kNumberOfCaches = 0 +
-#define F(size, func) + 1
- JSFUNCTION_RESULT_CACHE_LIST(F)
-#undef F
- ;
-
- Handle<FixedArray> caches = FACTORY->NewFixedArray(kNumberOfCaches, TENURED);
-
- int index = 0;
-
-#define F(size, func) do { \
- FixedArray* cache = CreateCache((size), Handle<JSFunction>(func)); \
- caches->set(index++, cache); \
- } while (false)
-
- JSFUNCTION_RESULT_CACHE_LIST(F);
-
-#undef F
-
- native_context()->set_jsfunction_result_caches(*caches);
-}
-
-
-void Genesis::InitializeNormalizedMapCaches() {
- Handle<FixedArray> array(
- FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
- native_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
-}
-
-
-bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
- v8::ExtensionConfiguration* extensions) {
- BootstrapperActive active(this);
- SaveContext saved_context(isolate_);
- isolate_->set_context(*native_context);
- if (!Genesis::InstallExtensions(native_context, extensions)) return false;
- Genesis::InstallSpecialObjects(native_context);
- return true;
-}
-
-
-void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
- Isolate* isolate = native_context->GetIsolate();
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
- Handle<JSGlobalObject> global(JSGlobalObject::cast(
- native_context->global_object()));
- // Expose the natives in global if a name for it is specified.
- if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
- Handle<String> natives =
- factory->InternalizeUtf8String(FLAG_expose_natives_as);
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- global, natives,
- Handle<JSObject>(global->builtins()),
- DONT_ENUM));
- }
-
- Handle<Object> Error = GetProperty(global, "Error");
- if (Error->IsJSObject()) {
- Handle<String> name = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("stackTraceLimit"));
- Handle<Smi> stack_trace_limit(
- Smi::FromInt(FLAG_stack_trace_limit), isolate);
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- Handle<JSObject>::cast(Error), name,
- stack_trace_limit, NONE));
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Expose the debug global object in global if a name for it is specified.
- if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
- Debug* debug = isolate->debug();
- // If loading fails we just bail out without installing the
- // debugger but without tanking the whole context.
- if (!debug->Load()) return;
- // Set the security token for the debugger context to the same as
- // the shell native context to allow calling between these (otherwise
- // exposing debug global object doesn't make much sense).
- debug->debug_context()->set_security_token(
- native_context->security_token());
-
- Handle<String> debug_string =
- factory->InternalizeUtf8String(FLAG_expose_debug_as);
- Handle<Object> global_proxy(
- debug->debug_context()->global_proxy(), isolate);
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- global, debug_string, global_proxy, DONT_ENUM));
- }
-#endif
-}
-
-static uint32_t Hash(RegisteredExtension* extension) {
- return v8::internal::ComputePointerHash(extension);
-}
-
-static bool MatchRegisteredExtensions(void* key1, void* key2) {
- return key1 == key2;
-}
-
-Genesis::ExtensionStates::ExtensionStates()
- : map_(MatchRegisteredExtensions, 8) { }
-
-Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
- RegisteredExtension* extension) {
- i::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension), false);
- if (entry == NULL) {
- return UNVISITED;
- }
- return static_cast<ExtensionTraversalState>(
- reinterpret_cast<intptr_t>(entry->value));
-}
-
-void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
- ExtensionTraversalState state) {
- map_.Lookup(extension, Hash(extension), true)->value =
- reinterpret_cast<void*>(static_cast<intptr_t>(state));
-}
-
-bool Genesis::InstallExtensions(Handle<Context> native_context,
- v8::ExtensionConfiguration* extensions) {
- Isolate* isolate = native_context->GetIsolate();
- ExtensionStates extension_states; // All extensions have state UNVISITED.
- // Install auto extensions.
- v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
- while (current != NULL) {
- if (current->extension()->auto_enable())
- InstallExtension(isolate, current, &extension_states);
- current = current->next();
- }
-
- if (FLAG_expose_gc) InstallExtension(isolate, "v8/gc", &extension_states);
- if (FLAG_expose_externalize_string) {
- InstallExtension(isolate, "v8/externalize", &extension_states);
- }
- if (FLAG_track_gc_object_stats) {
- InstallExtension(isolate, "v8/statistics", &extension_states);
- }
-
- if (extensions == NULL) return true;
- // Install required extensions
- int count = v8::ImplementationUtilities::GetNameCount(extensions);
- const char** names = v8::ImplementationUtilities::GetNames(extensions);
- for (int i = 0; i < count; i++) {
- if (!InstallExtension(isolate, names[i], &extension_states))
- return false;
- }
-
- return true;
-}
-
-
-// Installs a named extension. This methods is unoptimized and does
-// not scale well if we want to support a large number of extensions.
-bool Genesis::InstallExtension(Isolate* isolate,
- const char* name,
- ExtensionStates* extension_states) {
- v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
- // Loop until we find the relevant extension
- while (current != NULL) {
- if (strcmp(name, current->extension()->name()) == 0) break;
- current = current->next();
- }
- // Didn't find the extension; fail.
- if (current == NULL) {
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Cannot find required extension");
- return false;
- }
- return InstallExtension(isolate, current, extension_states);
-}
-
-
-bool Genesis::InstallExtension(Isolate* isolate,
- v8::RegisteredExtension* current,
- ExtensionStates* extension_states) {
- HandleScope scope(isolate);
-
- if (extension_states->get_state(current) == INSTALLED) return true;
- // The current node has already been visited so there must be a
- // cycle in the dependency graph; fail.
- if (extension_states->get_state(current) == VISITED) {
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Circular extension dependency");
- return false;
- }
- ASSERT(extension_states->get_state(current) == UNVISITED);
- extension_states->set_state(current, VISITED);
- v8::Extension* extension = current->extension();
- // Install the extension's dependencies
- for (int i = 0; i < extension->dependency_count(); i++) {
- if (!InstallExtension(isolate,
- extension->dependencies()[i],
- extension_states)) {
- return false;
- }
- }
- Handle<String> source_code =
- isolate->factory()->NewExternalStringFromAscii(extension->source());
- bool result = CompileScriptCached(isolate,
- CStrVector(extension->name()),
- source_code,
- isolate->bootstrapper()->extensions_cache(),
- extension,
- Handle<Context>(isolate->context()),
- false);
- ASSERT(isolate->has_pending_exception() != result);
- if (!result) {
- // We print out the name of the extension that fail to install.
- // When an error is thrown during bootstrapping we automatically print
- // the line number at which this happened to the console in the isolate
- // error throwing functionality.
- OS::PrintError("Error installing extension '%s'.\n",
- current->extension()->name());
- isolate->clear_pending_exception();
- }
- extension_states->set_state(current, INSTALLED);
- isolate->NotifyExtensionInstalled();
- return result;
-}
-
-
-bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
- HandleScope scope(isolate());
- for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
- Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<String> name =
- factory()->InternalizeUtf8String(Builtins::GetName(id));
- Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
- Handle<JSFunction> function
- = Handle<JSFunction>(JSFunction::cast(function_object));
- builtins->set_javascript_builtin(id, *function);
- if (!JSFunction::CompileLazy(function, CLEAR_EXCEPTION)) {
- return false;
- }
- builtins->set_javascript_builtin_code(id, function->shared()->code());
- }
- return true;
-}
-
-
-bool Genesis::ConfigureGlobalObjects(
- v8::Handle<v8::ObjectTemplate> global_proxy_template) {
- Handle<JSObject> global_proxy(
- JSObject::cast(native_context()->global_proxy()));
- Handle<JSObject> inner_global(
- JSObject::cast(native_context()->global_object()));
-
- if (!global_proxy_template.IsEmpty()) {
- // Configure the global proxy object.
- Handle<ObjectTemplateInfo> proxy_data =
- v8::Utils::OpenHandle(*global_proxy_template);
- if (!ConfigureApiObject(global_proxy, proxy_data)) return false;
-
- // Configure the inner global object.
- Handle<FunctionTemplateInfo> proxy_constructor(
- FunctionTemplateInfo::cast(proxy_data->constructor()));
- if (!proxy_constructor->prototype_template()->IsUndefined()) {
- Handle<ObjectTemplateInfo> inner_data(
- ObjectTemplateInfo::cast(proxy_constructor->prototype_template()));
- if (!ConfigureApiObject(inner_global, inner_data)) return false;
- }
- }
-
- SetObjectPrototype(global_proxy, inner_global);
- return true;
-}
-
-
-bool Genesis::ConfigureApiObject(Handle<JSObject> object,
- Handle<ObjectTemplateInfo> object_template) {
- ASSERT(!object_template.is_null());
- ASSERT(object->IsInstanceOf(
- FunctionTemplateInfo::cast(object_template->constructor())));
-
- bool pending_exception = false;
- Handle<JSObject> obj =
- Execution::InstantiateObject(object_template, &pending_exception);
- if (pending_exception) {
- ASSERT(isolate()->has_pending_exception());
- isolate()->clear_pending_exception();
- return false;
- }
- TransferObject(obj, object);
- return true;
-}
-
-
-void Genesis::TransferNamedProperties(Handle<JSObject> from,
- Handle<JSObject> to) {
- if (from->HasFastProperties()) {
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(from->map()->instance_descriptors());
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- switch (details.type()) {
- case FIELD: {
- HandleScope inner(isolate());
- Handle<String> key = Handle<String>(descs->GetKey(i));
- int index = descs->GetFieldIndex(i);
- Handle<Object> value = Handle<Object>(from->FastPropertyAt(index),
- isolate());
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- to, key, value, details.attributes()));
- break;
- }
- case CONSTANT_FUNCTION: {
- HandleScope inner(isolate());
- Handle<String> key = Handle<String>(descs->GetKey(i));
- Handle<JSFunction> fun =
- Handle<JSFunction>(descs->GetConstantFunction(i));
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- to, key, fun, details.attributes()));
- break;
- }
- case CALLBACKS: {
- LookupResult result(isolate());
- to->LocalLookup(descs->GetKey(i), &result);
- // If the property is already there we skip it
- if (result.IsFound()) continue;
- HandleScope inner(isolate());
- ASSERT(!to->HasFastProperties());
- // Add to dictionary.
- Handle<String> key = Handle<String>(descs->GetKey(i));
- Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
- PropertyDetails d = PropertyDetails(details.attributes(),
- CALLBACKS,
- details.descriptor_index());
- JSObject::SetNormalizedProperty(to, key, callbacks, d);
- break;
- }
- case NORMAL:
- // Do not occur since the from object has fast properties.
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
- // No element in instance descriptors have proxy or interceptor type.
- UNREACHABLE();
- break;
- }
- }
- } else {
- Handle<StringDictionary> properties =
- Handle<StringDictionary>(from->property_dictionary());
- int capacity = properties->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* raw_key(properties->KeyAt(i));
- if (properties->IsKey(raw_key)) {
- ASSERT(raw_key->IsString());
- // If the property is already there we skip it.
- LookupResult result(isolate());
- to->LocalLookup(String::cast(raw_key), &result);
- if (result.IsFound()) continue;
- // Set the property.
- Handle<String> key = Handle<String>(String::cast(raw_key));
- Handle<Object> value = Handle<Object>(properties->ValueAt(i),
- isolate());
- if (value->IsJSGlobalPropertyCell()) {
- value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value(),
- isolate());
- }
- PropertyDetails details = properties->DetailsAt(i);
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- to, key, value, details.attributes()));
- }
- }
- }
-}
-
-
-void Genesis::TransferIndexedProperties(Handle<JSObject> from,
- Handle<JSObject> to) {
- // Cloning the elements array is sufficient.
- Handle<FixedArray> from_elements =
- Handle<FixedArray>(FixedArray::cast(from->elements()));
- Handle<FixedArray> to_elements = FACTORY->CopyFixedArray(from_elements);
- to->set_elements(*to_elements);
-}
-
-
-void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
- HandleScope outer(isolate());
- Factory* factory = isolate()->factory();
-
- ASSERT(!from->IsJSArray());
- ASSERT(!to->IsJSArray());
-
- TransferNamedProperties(from, to);
- TransferIndexedProperties(from, to);
-
- // Transfer the prototype (new map is needed).
- Handle<Map> old_to_map = Handle<Map>(to->map());
- Handle<Map> new_to_map = factory->CopyMap(old_to_map);
- new_to_map->set_prototype(from->map()->prototype());
- to->set_map(*new_to_map);
-}
-
-
-void Genesis::MakeFunctionInstancePrototypeWritable() {
- // The maps with writable prototype are created in CreateEmptyFunction
- // and CreateStrictModeFunctionMaps respectively. Initially the maps are
- // created with read-only prototype for JS builtins processing.
- ASSERT(!function_instance_map_writable_prototype_.is_null());
- ASSERT(!strict_mode_function_instance_map_writable_prototype_.is_null());
-
- // Replace function instance maps to make prototype writable.
- native_context()->set_function_map(
- *function_instance_map_writable_prototype_);
- native_context()->set_strict_mode_function_map(
- *strict_mode_function_instance_map_writable_prototype_);
-}
-
-
-Genesis::Genesis(Isolate* isolate,
- Handle<Object> global_object,
- v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions)
- : isolate_(isolate),
- active_(isolate->bootstrapper()) {
- result_ = Handle<Context>::null();
- // If V8 isn't running and cannot be initialized, just return.
- if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
-
- // Before creating the roots we must save the context and restore it
- // on all function exits.
- HandleScope scope(isolate);
- SaveContext saved_context(isolate);
-
- // During genesis, the boilerplate for stack overflow won't work until the
- // environment has been at least partially initialized. Add a stack check
- // before entering JS code to catch overflow early.
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return;
-
- Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
- if (!new_context.is_null()) {
- native_context_ =
- Handle<Context>::cast(isolate->global_handles()->Create(*new_context));
- AddToWeakNativeContextList(*native_context_);
- isolate->set_context(*native_context_);
- isolate->counters()->contexts_created_by_snapshot()->Increment();
- Handle<GlobalObject> inner_global;
- Handle<JSGlobalProxy> global_proxy =
- CreateNewGlobals(global_template,
- global_object,
- &inner_global);
-
- HookUpGlobalProxy(inner_global, global_proxy);
- HookUpInnerGlobal(inner_global);
-
- if (!ConfigureGlobalObjects(global_template)) return;
- } else {
- // We get here if there was no context snapshot.
- CreateRoots();
- Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
- CreateStrictModeFunctionMaps(empty_function);
- Handle<GlobalObject> inner_global;
- Handle<JSGlobalProxy> global_proxy =
- CreateNewGlobals(global_template, global_object, &inner_global);
- HookUpGlobalProxy(inner_global, global_proxy);
- if (!InitializeGlobal(inner_global, empty_function)) return;
- InstallJSFunctionResultCaches();
- InitializeNormalizedMapCaches();
- if (!InstallNatives()) return;
-
- MakeFunctionInstancePrototypeWritable();
-
- if (!ConfigureGlobalObjects(global_template)) return;
- isolate->counters()->contexts_created_from_scratch()->Increment();
- }
-
- // Initialize experimental globals and install experimental natives.
- InitializeExperimentalGlobal();
- if (!InstallExperimentalNatives()) return;
-
- result_ = native_context_;
-}
-
-
-// Support for thread preemption.
-
-// Reserve space for statics needing saving and restoring.
-int Bootstrapper::ArchiveSpacePerThread() {
- return sizeof(NestingCounterType);
-}
-
-
-// Archive statics that are thread local.
-char* Bootstrapper::ArchiveState(char* to) {
- *reinterpret_cast<NestingCounterType*>(to) = nesting_;
- nesting_ = 0;
- return to + sizeof(NestingCounterType);
-}
-
-
-// Restore statics that are thread local.
-char* Bootstrapper::RestoreState(char* from) {
- nesting_ = *reinterpret_cast<NestingCounterType*>(from);
- return from + sizeof(NestingCounterType);
-}
-
-
-// Called when the top-level V8 mutex is destroyed.
-void Bootstrapper::FreeThreadResources() {
- ASSERT(!IsActive());
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/bootstrapper.h b/src/3rdparty/v8/src/bootstrapper.h
deleted file mode 100644
index e33415e..0000000
--- a/src/3rdparty/v8/src/bootstrapper.h
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_BOOTSTRAPPER_H_
-#define V8_BOOTSTRAPPER_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-// A SourceCodeCache uses a FixedArray to store pairs of
-// (AsciiString*, JSFunction*), mapping names of native code files
-// (runtime.js, etc.) to precompiled functions. Instead of mapping
-// names to functions it might make sense to let the JS2C tool
-// generate an index for each native JS file.
-class SourceCodeCache BASE_EMBEDDED {
- public:
- explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
-
- void Initialize(bool create_heap_objects) {
- cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
- }
-
- void Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
- }
-
- bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
- for (int i = 0; i < cache_->length(); i+=2) {
- SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
- if (str->IsUtf8EqualTo(name)) {
- *handle = Handle<SharedFunctionInfo>(
- SharedFunctionInfo::cast(cache_->get(i + 1)));
- return true;
- }
- }
- return false;
- }
-
- void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
- HandleScope scope(shared->GetIsolate());
- int length = cache_->length();
- Handle<FixedArray> new_array =
- FACTORY->NewFixedArray(length + 2, TENURED);
- cache_->CopyTo(0, *new_array, 0, cache_->length());
- cache_ = *new_array;
- Handle<String> str = FACTORY->NewStringFromAscii(name, TENURED);
- cache_->set(length, *str);
- cache_->set(length + 1, *shared);
- Script::cast(shared->script())->set_type(Smi::FromInt(type_));
- }
-
- private:
- Script::Type type_;
- FixedArray* cache_;
- DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
-};
-
-
-// The Boostrapper is the public interface for creating a JavaScript global
-// context.
-class Bootstrapper {
- public:
- // Requires: Heap::SetUp has been called.
- void Initialize(bool create_heap_objects);
- void TearDown();
-
- // Creates a JavaScript Global Context with initial object graph.
- // The returned value is a global handle casted to V8Environment*.
- Handle<Context> CreateEnvironment(
- Handle<Object> global_object,
- v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions);
-
- // Detach the environment from its outer global object.
- void DetachGlobal(Handle<Context> env);
-
- // Reattach an outer global object to an environment.
- void ReattachGlobal(Handle<Context> env, Handle<JSGlobalProxy> global_proxy);
-
- // Traverses the pointers for memory management.
- void Iterate(ObjectVisitor* v);
-
- // Accessor for the native scripts source code.
- Handle<String> NativesSourceLookup(int index);
-
- // Tells whether bootstrapping is active.
- bool IsActive() const { return nesting_ != 0; }
-
- // Support for thread preemption.
- static int ArchiveSpacePerThread();
- char* ArchiveState(char* to);
- char* RestoreState(char* from);
- void FreeThreadResources();
-
- // This will allocate a char array that is deleted when V8 is shut down.
- // It should only be used for strictly finite allocations.
- char* AllocateAutoDeletedArray(int bytes);
-
- // Used for new context creation.
- bool InstallExtensions(Handle<Context> native_context,
- v8::ExtensionConfiguration* extensions);
-
- SourceCodeCache* extensions_cache() { return &extensions_cache_; }
-
- private:
- Isolate* isolate_;
- typedef int NestingCounterType;
- NestingCounterType nesting_;
- SourceCodeCache extensions_cache_;
- // This is for delete, not delete[].
- List<char*>* delete_these_non_arrays_on_tear_down_;
- // This is for delete[]
- List<char*>* delete_these_arrays_on_tear_down_;
-
- friend class BootstrapperActive;
- friend class Isolate;
- friend class NativesExternalStringResource;
-
- explicit Bootstrapper(Isolate* isolate);
-
- DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
-};
-
-
-class BootstrapperActive BASE_EMBEDDED {
- public:
- explicit BootstrapperActive(Bootstrapper* bootstrapper)
- : bootstrapper_(bootstrapper) {
- ++bootstrapper_->nesting_;
- }
-
- ~BootstrapperActive() {
- --bootstrapper_->nesting_;
- }
-
- private:
- Bootstrapper* bootstrapper_;
-
- DISALLOW_COPY_AND_ASSIGN(BootstrapperActive);
-};
-
-
-class NativesExternalStringResource
- : public v8::String::ExternalAsciiStringResource {
- public:
- NativesExternalStringResource(Bootstrapper* bootstrapper,
- const char* source,
- size_t length);
-
- const char* data() const {
- return data_;
- }
-
- size_t length() const {
- return length_;
- }
- private:
- const char* data_;
- size_t length_;
-};
-
-}} // namespace v8::internal
-
-#endif // V8_BOOTSTRAPPER_H_
diff --git a/src/3rdparty/v8/src/builtins.cc b/src/3rdparty/v8/src/builtins.cc
deleted file mode 100644
index aa69203..0000000
--- a/src/3rdparty/v8/src/builtins.cc
+++ /dev/null
@@ -1,1876 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "builtins.h"
-#include "gdb-jit.h"
-#include "ic-inl.h"
-#include "heap-profiler.h"
-#include "mark-compact.h"
-#include "stub-cache.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// Arguments object passed to C++ builtins.
-template <BuiltinExtraArguments extra_args>
-class BuiltinArguments : public Arguments {
- public:
- BuiltinArguments(int length, Object** arguments)
- : Arguments(length, arguments) { }
-
- Object*& operator[] (int index) {
- ASSERT(index < length());
- return Arguments::operator[](index);
- }
-
- template <class S> Handle<S> at(int index) {
- ASSERT(index < length());
- return Arguments::at<S>(index);
- }
-
- Handle<Object> receiver() {
- return Arguments::at<Object>(0);
- }
-
- Handle<JSFunction> called_function() {
- STATIC_ASSERT(extra_args == NEEDS_CALLED_FUNCTION);
- return Arguments::at<JSFunction>(Arguments::length() - 1);
- }
-
- // Gets the total number of arguments including the receiver (but
- // excluding extra arguments).
- int length() const {
- STATIC_ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- return Arguments::length();
- }
-
-#ifdef DEBUG
- void Verify() {
- // Check we have at least the receiver.
- ASSERT(Arguments::length() >= 1);
- }
-#endif
-};
-
-
-// Specialize BuiltinArguments for the called function extra argument.
-
-template <>
-int BuiltinArguments<NEEDS_CALLED_FUNCTION>::length() const {
- return Arguments::length() - 1;
-}
-
-#ifdef DEBUG
-template <>
-void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
- // Check we have at least the receiver and the called function.
- ASSERT(Arguments::length() >= 2);
- // Make sure cast to JSFunction succeeds.
- called_function();
-}
-#endif
-
-
-#define DEF_ARG_TYPE(name, spec) \
- typedef BuiltinArguments<spec> name##ArgumentsType;
-BUILTIN_LIST_C(DEF_ARG_TYPE)
-#undef DEF_ARG_TYPE
-
-} // namespace
-
-// ----------------------------------------------------------------------------
-// Support macro for defining builtins in C++.
-// ----------------------------------------------------------------------------
-//
-// A builtin function is defined by writing:
-//
-// BUILTIN(name) {
-// ...
-// }
-//
-// In the body of the builtin function the arguments can be accessed
-// through the BuiltinArguments object args.
-
-#ifdef DEBUG
-
-#define BUILTIN(name) \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate); \
- MUST_USE_RESULT static MaybeObject* Builtin_##name( \
- name##ArgumentsType args, Isolate* isolate) { \
- ASSERT(isolate == Isolate::Current()); \
- args.Verify(); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate)
-
-#else // For release mode.
-
-#define BUILTIN(name) \
- static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
-
-#endif
-
-
-static inline bool CalledAsConstructor(Isolate* isolate) {
-#ifdef DEBUG
- // Calculate the result using a full stack frame iterator and check
- // that the state of the stack is as we assume it to be in the
- // code below.
- StackFrameIterator it(isolate);
- ASSERT(it.frame()->is_exit());
- it.Advance();
- StackFrame* frame = it.frame();
- bool reference_result = frame->is_construct();
-#endif
- Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
- // Because we know fp points to an exit frame we can use the relevant
- // part of ExitFrame::ComputeCallerState directly.
- const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
- Address caller_fp = Memory::Address_at(fp + kCallerOffset);
- // This inlines the part of StackFrame::ComputeType that grabs the
- // type of the current frame. Note that StackFrame::ComputeType
- // has been specialized for each architecture so if any one of them
- // changes this code has to be changed as well.
- const int kMarkerOffset = StandardFrameConstants::kMarkerOffset;
- const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
- Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
- bool result = (marker == kConstructMarker);
- ASSERT_EQ(result, reference_result);
- return result;
-}
-
-// ----------------------------------------------------------------------------
-
-BUILTIN(Illegal) {
- UNREACHABLE();
- return isolate->heap()->undefined_value(); // Make compiler happy.
-}
-
-
-BUILTIN(EmptyFunction) {
- return isolate->heap()->undefined_value();
-}
-
-
-#define CONVERT_ARG_STUB_CALLER_ARGS(name) \
- Arguments* name = reinterpret_cast<Arguments*>(args[0]);
-
-
-RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
- CONVERT_ARG_STUB_CALLER_ARGS(caller_args);
- // ASSERT(args.length() == 3);
- Handle<JSFunction> function = args.at<JSFunction>(1);
- Handle<Object> type_info = args.at<Object>(2);
-
- JSArray* array = NULL;
- bool holey = false;
- if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) {
- int value = Smi::cast((*caller_args)[0])->value();
- holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray);
- }
-
- ASSERT(function->has_initial_map());
- ElementsKind kind = function->initial_map()->elements_kind();
- if (holey) {
- kind = GetHoleyElementsKind(kind);
- }
-
- MaybeObject* maybe_array;
- if (*type_info != isolate->heap()->undefined_value()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
- if (cell->value()->IsSmi()) {
- Smi* smi = Smi::cast(cell->value());
- ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
- if (holey && !IsFastHoleyElementsKind(to_kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- // Update the allocation site info to reflect the advice alteration.
- cell->set_value(Smi::FromInt(to_kind));
- }
-
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(to_kind);
- if (mode == TRACK_ALLOCATION_SITE) {
- maybe_array = isolate->heap()->AllocateEmptyJSArrayWithAllocationSite(
- kind, type_info);
- } else {
- maybe_array = isolate->heap()->AllocateEmptyJSArray(kind);
- }
- if (!maybe_array->To(&array)) return maybe_array;
- }
- }
-
- if (array == NULL) {
- maybe_array = isolate->heap()->AllocateEmptyJSArray(kind);
- if (!maybe_array->To(&array)) return maybe_array;
- }
-
- maybe_array = ArrayConstructInitializeElements(array, caller_args);
- if (maybe_array->IsFailure()) return maybe_array;
- return array;
-}
-
-
-static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
- Isolate* isolate,
- JSFunction* constructor) {
- ASSERT(args->length() >= 1);
- Heap* heap = isolate->heap();
- isolate->counters()->array_function_runtime()->Increment();
-
- JSArray* array;
- if (CalledAsConstructor(isolate)) {
- array = JSArray::cast((*args)[0]);
- // Initialize elements and length in case later allocations fail so that the
- // array object is initialized in a valid state.
- MaybeObject* maybe_array = array->Initialize(0);
- if (maybe_array->IsFailure()) return maybe_array;
-
- if (FLAG_optimize_constructed_arrays) {
- AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
- ElementsKind to_kind = array->GetElementsKind();
- if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
- if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
- to_kind)) {
- // We have advice that we should change the elements kind
- if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
- reinterpret_cast<void*>(array),
- ElementsKindToString(array->GetElementsKind()),
- ElementsKindToString(to_kind));
- }
-
- maybe_array = array->TransitionElementsKind(to_kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
- }
- }
-
- if (!FLAG_smi_only_arrays) {
- Context* native_context = isolate->context()->native_context();
- if (array->GetElementsKind() == GetInitialFastElementsKind() &&
- !native_context->js_array_maps()->IsUndefined()) {
- FixedArray* map_array =
- FixedArray::cast(native_context->js_array_maps());
- array->set_map(Map::cast(map_array->
- get(TERMINAL_FAST_ELEMENTS_KIND)));
- }
- }
- } else {
- // Allocate the JS Array
- MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
- if (!maybe_obj->To(&array)) return maybe_obj;
- }
-
- Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1);
- ASSERT(adjusted_arguments.length() < 1 ||
- adjusted_arguments[0] == (*args)[1]);
- return ArrayConstructInitializeElements(array, &adjusted_arguments);
-}
-
-
-BUILTIN(InternalArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->internal_array_function());
-}
-
-
-BUILTIN(ArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->array_function());
-}
-
-
-static void MoveDoubleElements(FixedDoubleArray* dst,
- int dst_index,
- FixedDoubleArray* src,
- int src_index,
- int len) {
- if (len == 0) return;
- memmove(dst->data_start() + dst_index,
- src->data_start() + src_index,
- len * kDoubleSize);
-}
-
-
-static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
- ASSERT(dst->map() != heap->fixed_cow_array_map());
- MemsetPointer(dst->data_start() + from, heap->the_hole_value(), to - from);
-}
-
-
-static void FillWithHoles(FixedDoubleArray* dst, int from, int to) {
- for (int i = from; i < to; i++) {
- dst->set_the_hole(i);
- }
-}
-
-
-static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
- FixedArrayBase* elms,
- int to_trim) {
- Map* map = elms->map();
- int entry_size;
- if (elms->IsFixedArray()) {
- entry_size = kPointerSize;
- } else {
- entry_size = kDoubleSize;
- }
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
- // In large object space the object's start must coincide with chunk
- // and thus the trick is just not applicable.
- ASSERT(!HEAP->lo_space()->Contains(elms));
-
- STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
- STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
- STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
-
- Object** former_start = HeapObject::RawField(elms, 0);
-
- const int len = elms->length();
-
- if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
- elms->IsFixedArray() &&
- !heap->new_space()->Contains(elms)) {
- // If we are doing a big trim in old space then we zap the space that was
- // formerly part of the array so that the GC (aided by the card-based
- // remembered set) won't find pointers to new-space there.
- Object** zap = reinterpret_cast<Object**>(elms->address());
- zap++; // Header of filler must be at least one word so skip that.
- for (int i = 1; i < to_trim; i++) {
- *zap++ = Smi::FromInt(0);
- }
- }
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
-
- int new_start_index = to_trim * (entry_size / kPointerSize);
- former_start[new_start_index] = map;
- former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
-
- // Maintain marking consistency for HeapObjectIterator and
- // IncrementalMarking.
- int size_delta = to_trim * entry_size;
- if (heap->marking()->TransferMark(elms->address(),
- elms->address() + size_delta)) {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
-
- HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
- elms->address() + size_delta));
- return FixedArrayBase::cast(HeapObject::FromAddress(
- elms->address() + to_trim * entry_size));
-}
-
-
-static bool ArrayPrototypeHasNoElements(Heap* heap,
- Context* native_context,
- JSObject* array_proto) {
- // This method depends on non writability of Object and Array prototype
- // fields.
- if (array_proto->elements() != heap->empty_fixed_array()) return false;
- // Object.prototype
- Object* proto = array_proto->GetPrototype();
- if (proto == heap->null_value()) return false;
- array_proto = JSObject::cast(proto);
- if (array_proto != native_context->initial_object_prototype()) return false;
- if (array_proto->elements() != heap->empty_fixed_array()) return false;
- return array_proto->GetPrototype()->IsNull();
-}
-
-
-MUST_USE_RESULT
-static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
- Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
- if (!receiver->IsJSArray()) return NULL;
- JSArray* array = JSArray::cast(receiver);
- HeapObject* elms = array->elements();
- Map* map = elms->map();
- if (map == heap->fixed_array_map()) {
- if (args == NULL || array->HasFastObjectElements()) return elms;
- } else if (map == heap->fixed_cow_array_map()) {
- MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
- if (args == NULL || array->HasFastObjectElements() ||
- !maybe_writable_result->To(&elms)) {
- return maybe_writable_result;
- }
- } else if (map == heap->fixed_double_array_map()) {
- if (args == NULL) return elms;
- } else {
- return NULL;
- }
-
- // Need to ensure that the arguments passed in args can be contained in
- // the array.
- int args_length = args->length();
- if (first_added_arg >= args_length) return array->elements();
-
- ElementsKind origin_kind = array->map()->elements_kind();
- ASSERT(!IsFastObjectElementsKind(origin_kind));
- ElementsKind target_kind = origin_kind;
- int arg_count = args->length() - first_added_arg;
- Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
- for (int i = 0; i < arg_count; i++) {
- Object* arg = arguments[i];
- if (arg->IsHeapObject()) {
- if (arg->IsHeapNumber()) {
- target_kind = FAST_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_ELEMENTS;
- break;
- }
- }
- }
- if (target_kind != origin_kind) {
- MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
- return array->elements();
- }
- return elms;
-}
-
-
-static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
- JSArray* receiver) {
- if (!FLAG_clever_optimizations) return false;
- Context* native_context = heap->isolate()->context()->native_context();
- JSObject* array_proto =
- JSObject::cast(native_context->array_function()->prototype());
- return receiver->GetPrototype() == array_proto &&
- ArrayPrototypeHasNoElements(heap, native_context, array_proto);
-}
-
-
-MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
- Isolate* isolate,
- const char* name,
- BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
- HandleScope handleScope(isolate);
-
- Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(isolate->native_context()->builtins()),
- name);
- Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
- int argc = args.length() - 1;
- ScopedVector<Handle<Object> > argv(argc);
- for (int i = 0; i < argc; ++i) {
- argv[i] = args.at<Object>(i + 1);
- }
- bool pending_exception;
- Handle<Object> result = Execution::Call(function,
- args.receiver(),
- argc,
- argv.start(),
- &pending_exception);
- if (pending_exception) return Failure::Exception();
- return *result;
-}
-
-
-BUILTIN(ArrayPush) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
- if (maybe_elms_obj == NULL) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
- }
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- JSArray* array = JSArray::cast(receiver);
- ASSERT(!array->map()->is_observed());
-
- ElementsKind kind = array->GetElementsKind();
-
- if (IsFastSmiOrObjectElementsKind(kind)) {
- FixedArray* elms = FixedArray::cast(elms_obj);
-
- int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
- if (to_add == 0) {
- return Smi::FromInt(len);
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
-
- int new_length = len + to_add;
-
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
- elms = new_elms;
- }
-
- // Add the provided values.
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < to_add; index++) {
- elms->set(index + len, args[index + 1], mode);
- }
-
- if (elms != array->elements()) {
- array->set_elements(elms);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
- } else {
- int len = Smi::cast(array->length())->value();
- int elms_len = elms_obj->length();
-
- int to_add = args.length() - 1;
- if (to_add == 0) {
- return Smi::FromInt(len);
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
-
- int new_length = len + to_add;
-
- FixedDoubleArray* new_elms;
-
- if (new_length > elms_len) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedDoubleArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
- } else {
- // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
- // empty_fixed_array.
- new_elms = FixedDoubleArray::cast(elms_obj);
- }
-
- // Add the provided values.
- AssertNoAllocation no_gc;
- int index;
- for (index = 0; index < to_add; index++) {
- Object* arg = args[index + 1];
- new_elms->set(index + len, arg->Number());
- }
-
- if (new_elms != array->elements()) {
- array->set_elements(new_elms);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
- }
-}
-
-
-BUILTIN(ArrayPop) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
- JSArray* array = JSArray::cast(receiver);
- ASSERT(!array->map()->is_observed());
-
- int len = Smi::cast(array->length())->value();
- if (len == 0) return heap->undefined_value();
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = len - 1;
- MaybeObject* maybe_result;
- if (accessor->HasElement(array, array, new_length, elms_obj)) {
- maybe_result = accessor->Get(array, array, new_length, elms_obj);
- } else {
- maybe_result = array->GetPrototype()->GetElement(len - 1);
- }
- if (maybe_result->IsFailure()) return maybe_result;
- MaybeObject* maybe_failure =
- accessor->SetLength(array, Smi::FromInt(new_length));
- if (maybe_failure->IsFailure()) return maybe_failure;
- return maybe_result;
-}
-
-
-BUILTIN(ArrayShift) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayShift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
- return CallJsBuiltin(isolate, "ArrayShift", args);
- }
- JSArray* array = JSArray::cast(receiver);
- ASSERT(!array->map()->is_observed());
-
- int len = Smi::cast(array->length())->value();
- if (len == 0) return heap->undefined_value();
-
- // Get first element
- ElementsAccessor* accessor = array->GetElementsAccessor();
- Object* first;
- MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
- if (!maybe_first->To(&first)) return maybe_first;
- if (first->IsTheHole()) {
- first = heap->undefined_value();
- }
-
- if (!heap->lo_space()->Contains(elms_obj)) {
- array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
- } else {
- // Shift the elements.
- if (elms_obj->IsFixedArray()) {
- FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
- heap->MoveElements(elms, 0, 1, len - 1);
- elms->set(len - 1, heap->the_hole_value());
- } else {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, 0, elms, 1, len - 1);
- elms->set_the_hole(len - 1);
- }
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(len - 1));
-
- return first;
-}
-
-
-BUILTIN(ArrayUnshift) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- }
- JSArray* array = JSArray::cast(receiver);
- ASSERT(!array->map()->is_observed());
- if (!array->HasFastSmiOrObjectElements()) {
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
-
- int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
- int new_length = len + to_add;
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
-
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(&args, 1, to_add,
- DONT_ALLOW_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
-
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_elms->To(&new_elms)) return maybe_elms;
-
- ElementsKind kind = array->GetElementsKind();
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, to_add,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
- elms = new_elms;
- array->set_elements(elms);
- } else {
- AssertNoAllocation no_gc;
- heap->MoveElements(elms, to_add, 0, len);
- }
-
- // Add the provided values.
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < to_add; i++) {
- elms->set(i, args[i + 1], mode);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
-}
-
-
-BUILTIN(ArraySlice) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms;
- int len = -1;
- if (receiver->IsJSArray()) {
- JSArray* array = JSArray::cast(receiver);
- if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
-
- if (array->HasFastElements()) {
- elms = array->elements();
- } else {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
-
- len = Smi::cast(array->length())->value();
- } else {
- // Array.slice(arguments, ...) is quite a common idiom (notably more
- // than 50% of invocations in Web apps). Treat it in C++ as well.
- Map* arguments_map =
- isolate->context()->native_context()->arguments_boilerplate()->map();
-
- bool is_arguments_object_with_fast_elements =
- receiver->IsJSObject() &&
- JSObject::cast(receiver)->map() == arguments_map;
- if (!is_arguments_object_with_fast_elements) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- JSObject* object = JSObject::cast(receiver);
-
- if (object->HasFastElements()) {
- elms = object->elements();
- } else {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
- if (!len_obj->IsSmi()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- len = Smi::cast(len_obj)->value();
- if (len > elms->length()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- }
-
- JSObject* object = JSObject::cast(receiver);
-
- ASSERT(len >= 0);
- int n_arguments = args.length() - 1;
-
- // Note carefully choosen defaults---if argument is missing,
- // it's undefined which gets converted to 0 for relative_start
- // and to len for relative_end.
- int relative_start = 0;
- int relative_end = len;
- if (n_arguments > 0) {
- Object* arg1 = args[1];
- if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
- } else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
- if (start < kMinInt || start > kMaxInt) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- relative_start = static_cast<int>(start);
- } else if (!arg1->IsUndefined()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- if (n_arguments > 1) {
- Object* arg2 = args[2];
- if (arg2->IsSmi()) {
- relative_end = Smi::cast(arg2)->value();
- } else if (arg2->IsHeapNumber()) {
- double end = HeapNumber::cast(arg2)->value();
- if (end < kMinInt || end > kMaxInt) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- relative_end = static_cast<int>(end);
- } else if (!arg2->IsUndefined()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- }
- }
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
- int k = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
- int final = (relative_end < 0) ? Max(len + relative_end, 0)
- : Min(relative_end, len);
-
- // Calculate the length of result array.
- int result_len = Max(final - k, 0);
-
- ElementsKind kind = object->GetElementsKind();
- if (IsHoleyElementsKind(kind)) {
- bool packed = true;
- ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
- for (int i = k; i < final; i++) {
- if (!accessor->HasElement(object, object, i, elms)) {
- packed = false;
- break;
- }
- }
- if (packed) {
- kind = GetPackedElementsKind(kind);
- } else if (!receiver->IsJSArray()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- }
-
- JSArray* result_array;
- MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
- result_len,
- result_len);
-
- AssertNoAllocation no_gc;
- if (result_len == 0) return maybe_array;
- if (!maybe_array->To(&result_array)) return maybe_array;
-
- ElementsAccessor* accessor = object->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, k, kind, result_array->elements(), 0, result_len, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
- return result_array;
-}
-
-
-BUILTIN(ArraySplice) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
- if (maybe_elms == NULL) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- JSArray* array = JSArray::cast(receiver);
- ASSERT(!array->map()->is_observed());
-
- int len = Smi::cast(array->length())->value();
-
- int n_arguments = args.length() - 1;
-
- int relative_start = 0;
- if (n_arguments > 0) {
- Object* arg1 = args[1];
- if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
- } else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
- if (start < kMinInt || start > kMaxInt) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- relative_start = static_cast<int>(start);
- } else if (!arg1->IsUndefined()) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- }
- int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
-
- // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given as a request to delete all the elements from the start.
- // And it differs from the case of undefined delete count.
- // This does not follow ECMA-262, but we do the same for
- // compatibility.
- int actual_delete_count;
- if (n_arguments == 1) {
- ASSERT(len - actual_start >= 0);
- actual_delete_count = len - actual_start;
- } else {
- int value = 0; // ToInteger(undefined) == 0
- if (n_arguments > 1) {
- Object* arg2 = args[2];
- if (arg2->IsSmi()) {
- value = Smi::cast(arg2)->value();
- } else {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- }
- actual_delete_count = Min(Max(value, 0), len - actual_start);
- }
-
- ElementsKind elements_kind = array->GetElementsKind();
-
- int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
- int new_length = len - actual_delete_count + item_count;
-
- // For double mode we do not support changing the length.
- if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
-
- if (new_length == 0) {
- MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
- elms_obj, elements_kind, actual_delete_count);
- if (maybe_array->IsFailure()) return maybe_array;
- array->set_elements(heap->empty_fixed_array());
- array->set_length(Smi::FromInt(0));
- return maybe_array;
- }
-
- JSArray* result_array = NULL;
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- actual_delete_count,
- actual_delete_count);
- if (!maybe_array->To(&result_array)) return maybe_array;
-
- if (actual_delete_count > 0) {
- AssertNoAllocation no_gc;
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, actual_start, elements_kind, result_array->elements(),
- 0, actual_delete_count, elms_obj);
- // Cannot fail since the origin and target array are of the same elements
- // kind.
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
- }
-
- bool elms_changed = false;
- if (item_count < actual_delete_count) {
- // Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
- ((actual_start + item_count) <
- (len - actual_delete_count - actual_start));
- if (trim_array) {
- const int delta = actual_delete_count - item_count;
-
- if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, delta, elms, 0, actual_start);
- } else {
- FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
- heap->MoveElements(elms, delta, 0, actual_start);
- }
-
- elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
-
- elms_changed = true;
- } else {
- if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- FillWithHoles(elms, new_length, len);
- } else {
- FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
- heap->MoveElements(elms, actual_start + item_count,
- actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- FillWithHoles(heap, elms, new_length, len);
- }
- }
- } else if (item_count > actual_delete_count) {
- FixedArray* elms = FixedArray::cast(elms_obj);
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
-
- // Check if array need to grow.
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
-
- AssertNoAllocation no_gc;
-
- ElementsKind kind = array->GetElementsKind();
- ElementsAccessor* accessor = array->GetElementsAccessor();
- if (actual_start > 0) {
- // Copy the part before actual_start as is.
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0, actual_start, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
- }
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, actual_start + actual_delete_count, kind, new_elms,
- actual_start + item_count,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
- elms_obj = new_elms;
- elms_changed = true;
- } else {
- AssertNoAllocation no_gc;
- heap->MoveElements(elms, actual_start + item_count,
- actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- }
- }
-
- if (IsFastDoubleElementsKind(elements_kind)) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- Object* arg = args[3 + k - actual_start];
- if (arg->IsSmi()) {
- elms->set(k, Smi::cast(arg)->value());
- } else {
- elms->set(k, HeapNumber::cast(arg)->value());
- }
- }
- } else {
- FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- elms->set(k, args[3 + k - actual_start], mode);
- }
- }
-
- if (elms_changed) {
- array->set_elements(elms_obj);
- }
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
-
- return result_array;
-}
-
-
-BUILTIN(ArrayConcat) {
- Heap* heap = isolate->heap();
- Context* native_context = isolate->context()->native_context();
- JSObject* array_proto =
- JSObject::cast(native_context->array_function()->prototype());
- if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
- return CallJsBuiltin(isolate, "ArrayConcat", args);
- }
-
- // Iterate through all the arguments performing checks
- // and calculating total length.
- int n_arguments = args.length();
- int result_len = 0;
- ElementsKind elements_kind = GetInitialFastElementsKind();
- bool has_double = false;
- bool is_holey = false;
- for (int i = 0; i < n_arguments; i++) {
- Object* arg = args[i];
- if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastElements() ||
- JSArray::cast(arg)->GetPrototype() != array_proto) {
- return CallJsBuiltin(isolate, "ArrayConcat", args);
- }
- int len = Smi::cast(JSArray::cast(arg)->length())->value();
-
- // We shouldn't overflow when adding another len.
- const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
- STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
- USE(kHalfOfMaxInt);
- result_len += len;
- ASSERT(result_len >= 0);
-
- if (result_len > FixedDoubleArray::kMaxLength) {
- return CallJsBuiltin(isolate, "ArrayConcat", args);
- }
-
- ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
- has_double = has_double || IsFastDoubleElementsKind(arg_kind);
- is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
- if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
- elements_kind = arg_kind;
- }
- }
-
- if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
-
- // If a double array is concatted into a fast elements array, the fast
- // elements array needs to be initialized to contain proper holes, since
- // boxing doubles may cause incremental marking.
- ArrayStorageAllocationMode mode =
- has_double && IsFastObjectElementsKind(elements_kind)
- ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
- JSArray* result_array;
- // Allocate result.
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- result_len,
- result_len,
- mode);
- if (!maybe_array->To(&result_array)) return maybe_array;
- if (result_len == 0) return result_array;
-
- int j = 0;
- FixedArrayBase* storage = result_array->elements();
- ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
- for (int i = 0; i < n_arguments; i++) {
- JSArray* array = JSArray::cast(args[i]);
- int len = Smi::cast(array->length())->value();
- ElementsKind from_kind = array->GetElementsKind();
- if (len > 0) {
- MaybeObject* maybe_failure =
- accessor->CopyElements(array, 0, from_kind, storage, j, len);
- if (maybe_failure->IsFailure()) return maybe_failure;
- j += len;
- }
- }
-
- ASSERT(j == result_len);
-
- return result_array;
-}
-
-
-// -----------------------------------------------------------------------------
-// Strict mode poison pills
-
-
-BUILTIN(StrictModePoisonPill) {
- HandleScope scope(isolate);
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_poison_pill", HandleVector<Object>(NULL, 0)));
-}
-
-// -----------------------------------------------------------------------------
-//
-
-
-// Searches the hidden prototype chain of the given object for the first
-// object that is an instance of the given type. If no such object can
-// be found then Heap::null_value() is returned.
-static inline Object* FindHidden(Heap* heap,
- Object* object,
- FunctionTemplateInfo* type) {
- if (object->IsInstanceOf(type)) return object;
- Object* proto = object->GetPrototype(heap->isolate());
- if (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype()) {
- return FindHidden(heap, proto, type);
- }
- return heap->null_value();
-}
-
-
-// Returns the holder JSObject if the function can legally be called
-// with this receiver. Returns Heap::null_value() if the call is
-// illegal. Any arguments that don't fit the expected type is
-// overwritten with undefined. Note that holder and the arguments are
-// implicitly rewritten with the first object in the hidden prototype
-// chain that actually has the expected type.
-static inline Object* TypeCheck(Heap* heap,
- int argc,
- Object** argv,
- FunctionTemplateInfo* info) {
- Object* recv = argv[0];
- // API calls are only supported with JSObject receivers.
- if (!recv->IsJSObject()) return heap->null_value();
- Object* sig_obj = info->signature();
- if (sig_obj->IsUndefined()) return recv;
- SignatureInfo* sig = SignatureInfo::cast(sig_obj);
- // If necessary, check the receiver
- Object* recv_type = sig->receiver();
- Object* holder = recv;
- if (!recv_type->IsUndefined()) {
- holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
- if (holder == heap->null_value()) return heap->null_value();
- }
- Object* args_obj = sig->args();
- // If there is no argument signature we're done
- if (args_obj->IsUndefined()) return holder;
- FixedArray* args = FixedArray::cast(args_obj);
- int length = args->length();
- if (argc <= length) length = argc - 1;
- for (int i = 0; i < length; i++) {
- Object* argtype = args->get(i);
- if (argtype->IsUndefined()) continue;
- Object** arg = &argv[-1 - i];
- Object* current = *arg;
- current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
- if (current == heap->null_value()) current = heap->undefined_value();
- *arg = current;
- }
- return holder;
-}
-
-
-template <bool is_construct>
-MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
- BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
- ASSERT(is_construct == CalledAsConstructor(isolate));
- Heap* heap = isolate->heap();
-
- HandleScope scope(isolate);
- Handle<JSFunction> function = args.called_function();
- ASSERT(function->shared()->IsApiFunction());
-
- FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
- if (is_construct) {
- Handle<FunctionTemplateInfo> desc(fun_data, isolate);
- bool pending_exception = false;
- isolate->factory()->ConfigureInstance(
- desc, Handle<JSObject>::cast(args.receiver()), &pending_exception);
- ASSERT(isolate->has_pending_exception() == pending_exception);
- if (pending_exception) return Failure::Exception();
- fun_data = *desc;
- }
-
- Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data);
-
- if (raw_holder->IsNull()) {
- // This function cannot be called with the given receiver. Abort!
- Handle<Object> obj =
- isolate->factory()->NewTypeError(
- "illegal_invocation", HandleVector(&function, 1));
- return isolate->Throw(*obj);
- }
-
- Object* raw_call_data = fun_data->call_code();
- if (!raw_call_data->IsUndefined()) {
- CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
- Object* data_obj = call_data->data();
- Object* result;
-
- LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
- ASSERT(raw_holder->IsJSObject());
-
- CustomArguments custom(isolate);
- v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
- isolate, data_obj, *function, raw_holder);
-
- v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
- custom.end(),
- &args[0] - 1,
- args.length() - 1,
- is_construct);
-
- v8::Handle<v8::Value> value;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(callback_obj));
- value = callback(new_args);
- }
- if (value.IsEmpty()) {
- result = heap->undefined_value();
- } else {
- result = *reinterpret_cast<Object**>(*value);
- result->VerifyApiCallResultType();
- }
-
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!is_construct || result->IsJSObject()) return result;
- }
-
- return *args.receiver();
-}
-
-
-BUILTIN(HandleApiCall) {
- return HandleApiCallHelper<false>(args, isolate);
-}
-
-
-BUILTIN(HandleApiCallConstruct) {
- return HandleApiCallHelper<true>(args, isolate);
-}
-
-
-// Helper function to handle calls to non-function objects created through the
-// API. The object can be called as either a constructor (using new) or just as
-// a function (without new).
-MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
- Isolate* isolate,
- bool is_construct_call,
- BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
- // Non-functions are never called as constructors. Even if this is an object
- // called as a constructor the delegate call is not a construct call.
- ASSERT(!CalledAsConstructor(isolate));
- Heap* heap = isolate->heap();
-
- Handle<Object> receiver = args.receiver();
-
- // Get the object called.
- JSObject* obj = JSObject::cast(*receiver);
-
- // Get the invocation callback from the function descriptor that was
- // used to create the called object.
- ASSERT(obj->map()->has_instance_call_handler());
- JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
- ASSERT(constructor->shared()->IsApiFunction());
- Object* handler =
- constructor->shared()->get_api_func_data()->instance_call_handler();
- ASSERT(!handler->IsUndefined());
- CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
- Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
-
- // Get the data for the call and perform the callback.
- Object* result;
- {
- HandleScope scope(isolate);
- LOG(isolate, ApiObjectAccess("call non-function", obj));
-
- CustomArguments custom(isolate);
- v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
- isolate, call_data->data(), constructor, obj);
- v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
- custom.end(),
- &args[0] - 1,
- args.length() - 1,
- is_construct_call);
- v8::Handle<v8::Value> value;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(callback_obj));
- value = callback(new_args);
- }
- if (value.IsEmpty()) {
- result = heap->undefined_value();
- } else {
- result = *reinterpret_cast<Object**>(*value);
- result->VerifyApiCallResultType();
- }
- }
- // Check for exceptions and return result.
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
-}
-
-
-// Handle calls to non-function objects created through the API. This delegate
-// function is used when the call is a normal function call.
-BUILTIN(HandleApiCallAsFunction) {
- return HandleApiCallAsFunctionOrConstructor(isolate, false, args);
-}
-
-
-// Handle calls to non-function objects created through the API. This delegate
-// function is used when the call is a construct call.
-BUILTIN(HandleApiCallAsConstructor) {
- return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
-}
-
-
-static void Generate_LoadIC_Initialize(MacroAssembler* masm) {
- LoadIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_LoadIC_PreMonomorphic(MacroAssembler* masm) {
- LoadIC::GeneratePreMonomorphic(masm);
-}
-
-
-static void Generate_LoadIC_Miss(MacroAssembler* masm) {
- LoadIC::GenerateMiss(masm);
-}
-
-
-static void Generate_LoadIC_Megamorphic(MacroAssembler* masm) {
- LoadIC::GenerateMegamorphic(masm);
-}
-
-
-static void Generate_LoadIC_Normal(MacroAssembler* masm) {
- LoadIC::GenerateNormal(masm);
-}
-
-
-static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>());
-}
-
-
-static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
- KeyedLoadIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
- KeyedLoadIC::GenerateRuntimeGetProperty(masm);
-}
-
-
-static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, MISS);
-}
-
-
-static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
-}
-
-
-static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
- KeyedLoadIC::GenerateGeneric(masm);
-}
-
-
-static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
- KeyedLoadIC::GenerateString(masm);
-}
-
-
-static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
- KeyedLoadIC::GeneratePreMonomorphic(masm);
-}
-
-static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
- KeyedLoadIC::GenerateIndexedInterceptor(masm);
-}
-
-static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
- KeyedLoadIC::GenerateNonStrictArguments(masm);
-}
-
-static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
- StoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
- StoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_StoreIC_Miss(MacroAssembler* masm) {
- StoreIC::GenerateMiss(masm);
-}
-
-
-static void Generate_StoreIC_Normal(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
-}
-
-
-static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
-}
-
-
-static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm, kStrictMode);
-}
-
-
-static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
- StoreIC::GenerateGlobalProxy(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
- StoreIC::GenerateGlobalProxy(masm, kStrictMode);
-}
-
-
-static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
- StoreStubCompiler::GenerateStoreViaSetter(masm, Handle<JSFunction>());
-}
-
-
-static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
-}
-
-
-static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kStrictMode);
-}
-
-
-static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, MISS);
-}
-
-
-static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
-}
-
-
-static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
- KeyedStoreIC::GenerateSlow(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
- KeyedStoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateInitialize(masm);
-}
-
-static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
- KeyedStoreIC::GenerateNonStrictArguments(masm);
-}
-
-static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
- KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
-}
-
-static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
- KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
-}
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateLoadICDebugBreak(masm);
-}
-
-
-static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateStoreICDebugBreak(masm);
-}
-
-
-static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateKeyedLoadICDebugBreak(masm);
-}
-
-
-static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateKeyedStoreICDebugBreak(masm);
-}
-
-
-static void Generate_Return_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateReturnDebugBreak(masm);
-}
-
-
-static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateCallFunctionStubDebugBreak(masm);
-}
-
-
-static void Generate_CallFunctionStub_Recording_DebugBreak(
- MacroAssembler* masm) {
- Debug::GenerateCallFunctionStubRecordDebugBreak(masm);
-}
-
-
-static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateCallConstructStubDebugBreak(masm);
-}
-
-
-static void Generate_CallConstructStub_Recording_DebugBreak(
- MacroAssembler* masm) {
- Debug::GenerateCallConstructStubRecordDebugBreak(masm);
-}
-
-
-static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateSlotDebugBreak(masm);
-}
-
-
-static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
- Debug::GeneratePlainReturnLiveEdit(masm);
-}
-
-
-static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
- Debug::GenerateFrameDropperLiveEdit(masm);
-}
-#endif
-
-
-Builtins::Builtins() : initialized_(false) {
- memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
- memset(names_, 0, sizeof(names_[0]) * builtin_count);
-}
-
-
-Builtins::~Builtins() {
-}
-
-
-#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
-Address const Builtins::c_functions_[cfunction_count] = {
- BUILTIN_LIST_C(DEF_ENUM_C)
-};
-#undef DEF_ENUM_C
-
-#define DEF_JS_NAME(name, ignore) #name,
-#define DEF_JS_ARGC(ignore, argc) argc,
-const char* const Builtins::javascript_names_[id_count] = {
- BUILTINS_LIST_JS(DEF_JS_NAME)
-};
-
-int const Builtins::javascript_argc_[id_count] = {
- BUILTINS_LIST_JS(DEF_JS_ARGC)
-};
-#undef DEF_JS_NAME
-#undef DEF_JS_ARGC
-
-struct BuiltinDesc {
- byte* generator;
- byte* c_code;
- const char* s_name; // name is only used for generating log information.
- int name;
- Code::Flags flags;
- BuiltinExtraArguments extra_args;
-};
-
-#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
-
-class BuiltinFunctionTable {
- public:
- BuiltinDesc* functions() {
- CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
- return functions_;
- }
-
- OnceType once_;
- BuiltinDesc functions_[Builtins::builtin_count + 1];
-
- friend class Builtins;
-};
-
-static BuiltinFunctionTable builtin_function_table =
- BUILTIN_FUNCTION_TABLE_INIT;
-
-// Define array of pointers to generators and C builtin functions.
-// We do this in a sort of roundabout way so that we can do the initialization
-// within the lexical scope of Builtins:: and within a context where
-// Code::Flags names a non-abstract type.
-void Builtins::InitBuiltinFunctionTable() {
- BuiltinDesc* functions = builtin_function_table.functions_;
- functions[builtin_count].generator = NULL;
- functions[builtin_count].c_code = NULL;
- functions[builtin_count].s_name = NULL;
- functions[builtin_count].name = builtin_count;
- functions[builtin_count].flags = static_cast<Code::Flags>(0);
- functions[builtin_count].extra_args = NO_EXTRA_ARGUMENTS;
-
-#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
- functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
- functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
- functions->s_name = #aname; \
- functions->name = c_##aname; \
- functions->flags = Code::ComputeFlags(Code::BUILTIN); \
- functions->extra_args = aextra_args; \
- ++functions;
-
-#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
- functions->generator = FUNCTION_ADDR(Generate_##aname); \
- functions->c_code = NULL; \
- functions->s_name = #aname; \
- functions->name = k##aname; \
- functions->flags = Code::ComputeFlags(Code::kind, \
- state, \
- extra); \
- functions->extra_args = NO_EXTRA_ARGUMENTS; \
- ++functions;
-
- BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
- BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
- BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
-
-#undef DEF_FUNCTION_PTR_C
-#undef DEF_FUNCTION_PTR_A
-}
-
-void Builtins::SetUp(bool create_heap_objects) {
- ASSERT(!initialized_);
- Isolate* isolate = Isolate::Current();
- Heap* heap = isolate->heap();
-
- // Create a scope for the handles in the builtins.
- HandleScope scope(isolate);
-
- const BuiltinDesc* functions = builtin_function_table.functions();
-
- // For now we generate builtin adaptor code into a stack-allocated
- // buffer, before copying it into individual code objects. Be careful
- // with alignment, some platforms don't like unaligned code.
- union { int force_alignment; byte buffer[8*KB]; } u;
-
- // Traverse the list of builtins and generate an adaptor in a
- // separate code object for each one.
- for (int i = 0; i < builtin_count; i++) {
- if (create_heap_objects) {
- MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
- // Generate the code/adaptor.
- typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
- Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
- // We pass all arguments to the generator, but it may not use all of
- // them. This works because the first arguments are on top of the
- // stack.
- ASSERT(!masm.has_frame());
- g(&masm, functions[i].name, functions[i].extra_args);
- // Move the code into the object heap.
- CodeDesc desc;
- masm.GetCode(&desc);
- Code::Flags flags = functions[i].flags;
- Object* code = NULL;
- {
- // During startup it's OK to always allocate and defer GC to later.
- // This simplifies things because we don't need to retry.
- AlwaysAllocateScope __scope__;
- { MaybeObject* maybe_code =
- heap->CreateCode(desc, flags, masm.CodeObject());
- if (!maybe_code->ToObject(&code)) {
- v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
- }
- }
- }
- // Log the event and add the code to the builtins array.
- PROFILE(isolate,
- CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code),
- functions[i].s_name));
- GDBJIT(AddCode(GDBJITInterface::BUILTIN,
- functions[i].s_name,
- Code::cast(code)));
- builtins_[i] = code;
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- PrintF("Builtin: %s\n", functions[i].s_name);
- Code::cast(code)->Disassemble(functions[i].s_name);
- PrintF("\n");
- }
-#endif
- } else {
- // Deserializing. The values will be filled in during IterateBuiltins.
- builtins_[i] = NULL;
- }
- names_[i] = functions[i].s_name;
- }
-
- // Mark as initialized.
- initialized_ = true;
-}
-
-
-void Builtins::TearDown() {
- initialized_ = false;
-}
-
-
-void Builtins::IterateBuiltins(ObjectVisitor* v) {
- v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
-}
-
-
-const char* Builtins::Lookup(byte* pc) {
- // may be called during initialization (disassembler!)
- if (initialized_) {
- for (int i = 0; i < builtin_count; i++) {
- Code* entry = Code::cast(builtins_[i]);
- if (entry->contains(pc)) {
- return names_[i];
- }
- }
- }
- return NULL;
-}
-
-
-#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
-Handle<Code> Builtins::name() { \
- Code** code_address = \
- reinterpret_cast<Code**>(builtin_address(k##name)); \
- return Handle<Code>(code_address); \
-}
-#define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
-Handle<Code> Builtins::name() { \
- Code** code_address = \
- reinterpret_cast<Code**>(builtin_address(k##name)); \
- return Handle<Code>(code_address); \
-}
-BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
-BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
-BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
-#undef DEFINE_BUILTIN_ACCESSOR_C
-#undef DEFINE_BUILTIN_ACCESSOR_A
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/builtins.h b/src/3rdparty/v8/src/builtins.h
deleted file mode 100644
index 15abeb1..0000000
--- a/src/3rdparty/v8/src/builtins.h
+++ /dev/null
@@ -1,413 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_BUILTINS_H_
-#define V8_BUILTINS_H_
-
-namespace v8 {
-namespace internal {
-
-// Specifies extra arguments required by a C++ builtin.
-enum BuiltinExtraArguments {
- NO_EXTRA_ARGUMENTS = 0,
- NEEDS_CALLED_FUNCTION = 1
-};
-
-
-#define CODE_AGE_LIST_WITH_ARG(V, A) \
- V(Quadragenarian, A) \
- V(Quinquagenarian, A) \
- V(Sexagenarian, A) \
- V(Septuagenarian, A) \
- V(Octogenarian, A)
-
-#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
-
-#define CODE_AGE_LIST(V) \
- CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
-
-#define DECLARE_CODE_AGE_BUILTIN(C, V) \
- V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
- UNINITIALIZED, Code::kNoExtraICState) \
- V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
- UNINITIALIZED, Code::kNoExtraICState)
-
-
-// Define list of builtins implemented in C++.
-#define BUILTIN_LIST_C(V) \
- V(Illegal, NO_EXTRA_ARGUMENTS) \
- \
- V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
- \
- V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- \
- V(ArrayPush, NO_EXTRA_ARGUMENTS) \
- V(ArrayPop, NO_EXTRA_ARGUMENTS) \
- V(ArrayShift, NO_EXTRA_ARGUMENTS) \
- V(ArrayUnshift, NO_EXTRA_ARGUMENTS) \
- V(ArraySlice, NO_EXTRA_ARGUMENTS) \
- V(ArraySplice, NO_EXTRA_ARGUMENTS) \
- V(ArrayConcat, NO_EXTRA_ARGUMENTS) \
- \
- V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
- V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
- V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
- V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \
- \
- V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS)
-
-// Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LazyCompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LazyRecompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyOSR, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_Slow, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
- Code::IC_FRAGMENT) \
- V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- \
- V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- \
- V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_GlobalProxy, STORE_IC, GENERIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
- kStrictMode) \
- V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
- V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
- kStrictMode) \
- V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \
- kStrictMode) \
- V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
- \
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
- Code::kNoExtraICState) \
- \
- V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- kStrictMode) \
- V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
- kStrictMode) \
- V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(TransitionElementsSmiToDouble, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(TransitionElementsDoubleToObject, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
- V(FunctionCall, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(FunctionApply, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(InternalArrayCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(ArrayCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(StringConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-// Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V) \
- V(Return_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(LoadIC_DebugBreak, LOAD_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(StoreIC_DebugBreak, STORE_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK)
-#else
-#define BUILTIN_LIST_DEBUG_A(V)
-#endif
-
-// Define list of builtins implemented in JavaScript.
-#define BUILTINS_LIST_JS(V) \
- V(EQUALS, 1) \
- V(STRICT_EQUALS, 1) \
- V(COMPARE, 2) \
- V(ADD, 1) \
- V(SUB, 1) \
- V(MUL, 1) \
- V(DIV, 1) \
- V(MOD, 1) \
- V(BIT_OR, 1) \
- V(BIT_AND, 1) \
- V(BIT_XOR, 1) \
- V(UNARY_MINUS, 0) \
- V(BIT_NOT, 0) \
- V(SHL, 1) \
- V(SAR, 1) \
- V(SHR, 1) \
- V(DELETE, 2) \
- V(IN, 1) \
- V(INSTANCE_OF, 1) \
- V(FILTER_KEY, 1) \
- V(CALL_NON_FUNCTION, 0) \
- V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
- V(CALL_FUNCTION_PROXY, 1) \
- V(CALL_FUNCTION_PROXY_AS_CONSTRUCTOR, 1) \
- V(TO_OBJECT, 0) \
- V(TO_NUMBER, 0) \
- V(TO_STRING, 0) \
- V(STRING_ADD_LEFT, 1) \
- V(STRING_ADD_RIGHT, 1) \
- V(APPLY_PREPARE, 1) \
- V(APPLY_OVERFLOW, 1)
-
-MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate);
-
-class BuiltinFunctionTable;
-class ObjectVisitor;
-
-
-class Builtins {
- public:
- ~Builtins();
-
- // Generate all builtin code objects. Should be called once during
- // isolate initialization.
- void SetUp(bool create_heap_objects);
- void TearDown();
-
- // Garbage collection support.
- void IterateBuiltins(ObjectVisitor* v);
-
- // Disassembler support.
- const char* Lookup(byte* pc);
-
- enum Name {
-#define DEF_ENUM_C(name, ignore) k##name,
-#define DEF_ENUM_A(name, kind, state, extra) k##name,
- BUILTIN_LIST_C(DEF_ENUM_C)
- BUILTIN_LIST_A(DEF_ENUM_A)
- BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
-#undef DEF_ENUM_C
-#undef DEF_ENUM_A
- builtin_count
- };
-
- enum CFunctionId {
-#define DEF_ENUM_C(name, ignore) c_##name,
- BUILTIN_LIST_C(DEF_ENUM_C)
-#undef DEF_ENUM_C
- cfunction_count
- };
-
- enum JavaScript {
-#define DEF_ENUM(name, ignore) name,
- BUILTINS_LIST_JS(DEF_ENUM)
-#undef DEF_ENUM
- id_count
- };
-
-#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
-#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
- Handle<Code> name();
- BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
- BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
- BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
-#undef DECLARE_BUILTIN_ACCESSOR_C
-#undef DECLARE_BUILTIN_ACCESSOR_A
-
- Code* builtin(Name name) {
- // Code::cast cannot be used here since we access builtins
- // during the marking phase of mark sweep. See IC::Clear.
- return reinterpret_cast<Code*>(builtins_[name]);
- }
-
- Address builtin_address(Name name) {
- return reinterpret_cast<Address>(&builtins_[name]);
- }
-
- static Address c_function_address(CFunctionId id) {
- return c_functions_[id];
- }
-
- static const char* GetName(JavaScript id) { return javascript_names_[id]; }
- static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
- Handle<Code> GetCode(JavaScript id, bool* resolved);
- static int NumberOfJavaScriptBuiltins() { return id_count; }
-
- bool is_initialized() const { return initialized_; }
-
- private:
- Builtins();
-
- // The external C++ functions called from the code.
- static Address const c_functions_[cfunction_count];
-
- // Note: These are always Code objects, but to conform with
- // IterateBuiltins() above which assumes Object**'s for the callback
- // function f, we use an Object* array here.
- Object* builtins_[builtin_count];
- const char* names_[builtin_count];
- static const char* const javascript_names_[id_count];
- static int const javascript_argc_[id_count];
-
- static void Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args);
- static void Generate_InRecompileQueue(MacroAssembler* masm);
- static void Generate_ParallelRecompile(MacroAssembler* masm);
- static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
- static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
- static void Generate_JSConstructStubApi(MacroAssembler* masm);
- static void Generate_JSEntryTrampoline(MacroAssembler* masm);
- static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
- static void Generate_LazyCompile(MacroAssembler* masm);
- static void Generate_LazyRecompile(MacroAssembler* masm);
- static void Generate_NotifyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyOSR(MacroAssembler* masm);
- static void Generate_NotifyStubFailure(MacroAssembler* masm);
- static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
-
- static void Generate_FunctionCall(MacroAssembler* masm);
- static void Generate_FunctionApply(MacroAssembler* masm);
-
- static void Generate_InternalArrayCode(MacroAssembler* masm);
- static void Generate_ArrayCode(MacroAssembler* masm);
- static void Generate_ArrayConstructCode(MacroAssembler* masm);
-
- static void Generate_StringConstructCode(MacroAssembler* masm);
- static void Generate_OnStackReplacement(MacroAssembler* masm);
-
-#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
- static void Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm); \
- static void Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm);
- CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
-
- static void InitBuiltinFunctionTable();
-
- bool initialized_;
-
- friend class BuiltinFunctionTable;
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(Builtins);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_BUILTINS_H_
diff --git a/src/3rdparty/v8/src/bytecodes-irregexp.h b/src/3rdparty/v8/src/bytecodes-irregexp.h
deleted file mode 100644
index c7cc66e..0000000
--- a/src/3rdparty/v8/src/bytecodes-irregexp.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_BYTECODES_IRREGEXP_H_
-#define V8_BYTECODES_IRREGEXP_H_
-
-namespace v8 {
-namespace internal {
-
-
-const int BYTECODE_MASK = 0xff;
-// The first argument is packed in with the byte code in one word, but so it
-// has 24 bits, but it can be positive and negative so only use 23 bits for
-// positive values.
-const unsigned int MAX_FIRST_ARG = 0x7fffffu;
-const int BYTECODE_SHIFT = 8;
-
-#define BYTECODE_ITERATOR(V) \
-V(BREAK, 0, 4) /* bc8 */ \
-V(PUSH_CP, 1, 4) /* bc8 pad24 */ \
-V(PUSH_BT, 2, 8) /* bc8 pad24 offset32 */ \
-V(PUSH_REGISTER, 3, 4) /* bc8 reg_idx24 */ \
-V(SET_REGISTER_TO_CP, 4, 8) /* bc8 reg_idx24 offset32 */ \
-V(SET_CP_TO_REGISTER, 5, 4) /* bc8 reg_idx24 */ \
-V(SET_REGISTER_TO_SP, 6, 4) /* bc8 reg_idx24 */ \
-V(SET_SP_TO_REGISTER, 7, 4) /* bc8 reg_idx24 */ \
-V(SET_REGISTER, 8, 8) /* bc8 reg_idx24 value32 */ \
-V(ADVANCE_REGISTER, 9, 8) /* bc8 reg_idx24 value32 */ \
-V(POP_CP, 10, 4) /* bc8 pad24 */ \
-V(POP_BT, 11, 4) /* bc8 pad24 */ \
-V(POP_REGISTER, 12, 4) /* bc8 reg_idx24 */ \
-V(FAIL, 13, 4) /* bc8 pad24 */ \
-V(SUCCEED, 14, 4) /* bc8 pad24 */ \
-V(ADVANCE_CP, 15, 4) /* bc8 offset24 */ \
-V(GOTO, 16, 8) /* bc8 pad24 addr32 */ \
-V(LOAD_CURRENT_CHAR, 17, 8) /* bc8 offset24 addr32 */ \
-V(LOAD_CURRENT_CHAR_UNCHECKED, 18, 4) /* bc8 offset24 */ \
-V(LOAD_2_CURRENT_CHARS, 19, 8) /* bc8 offset24 addr32 */ \
-V(LOAD_2_CURRENT_CHARS_UNCHECKED, 20, 4) /* bc8 offset24 */ \
-V(LOAD_4_CURRENT_CHARS, 21, 8) /* bc8 offset24 addr32 */ \
-V(LOAD_4_CURRENT_CHARS_UNCHECKED, 22, 4) /* bc8 offset24 */ \
-V(CHECK_4_CHARS, 23, 12) /* bc8 pad24 uint32 addr32 */ \
-V(CHECK_CHAR, 24, 8) /* bc8 pad8 uint16 addr32 */ \
-V(CHECK_NOT_4_CHARS, 25, 12) /* bc8 pad24 uint32 addr32 */ \
-V(CHECK_NOT_CHAR, 26, 8) /* bc8 pad8 uint16 addr32 */ \
-V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
-V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
-V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
-V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
-V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */ \
-V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
-V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
-V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
-V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
-V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
-V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_REGS_EQUAL, 39, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
-V(CHECK_REGISTER_LT, 40, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_GE, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_EQ_POS, 42, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_AT_START, 43, 8) /* bc8 pad24 addr32 */ \
-V(CHECK_NOT_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
-V(CHECK_GREEDY, 45, 8) /* bc8 pad24 addr32 */ \
-V(ADVANCE_CP_AND_GOTO, 46, 8) /* bc8 offset24 addr32 */ \
-V(SET_CURRENT_POSITION_FROM_END, 47, 4) /* bc8 idx24 */
-
-#define DECLARE_BYTECODES(name, code, length) \
- static const int BC_##name = code;
-BYTECODE_ITERATOR(DECLARE_BYTECODES)
-#undef DECLARE_BYTECODES
-
-#define DECLARE_BYTECODE_LENGTH(name, code, length) \
- static const int BC_##name##_LENGTH = length;
-BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
-#undef DECLARE_BYTECODE_LENGTH
-} }
-
-#endif // V8_BYTECODES_IRREGEXP_H_
diff --git a/src/3rdparty/v8/src/cached-powers.cc b/src/3rdparty/v8/src/cached-powers.cc
deleted file mode 100644
index 9241d26..0000000
--- a/src/3rdparty/v8/src/cached-powers.cc
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include <math.h>
-#include <limits.h>
-
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "cached-powers.h"
-
-namespace v8 {
-namespace internal {
-
-struct CachedPower {
- uint64_t significand;
- int16_t binary_exponent;
- int16_t decimal_exponent;
-};
-
-static const CachedPower kCachedPowers[] = {
- {V8_2PART_UINT64_C(0xfa8fd5a0, 081c0288), -1220, -348},
- {V8_2PART_UINT64_C(0xbaaee17f, a23ebf76), -1193, -340},
- {V8_2PART_UINT64_C(0x8b16fb20, 3055ac76), -1166, -332},
- {V8_2PART_UINT64_C(0xcf42894a, 5dce35ea), -1140, -324},
- {V8_2PART_UINT64_C(0x9a6bb0aa, 55653b2d), -1113, -316},
- {V8_2PART_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
- {V8_2PART_UINT64_C(0xab70fe17, c79ac6ca), -1060, -300},
- {V8_2PART_UINT64_C(0xff77b1fc, bebcdc4f), -1034, -292},
- {V8_2PART_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
- {V8_2PART_UINT64_C(0x8dd01fad, 907ffc3c), -980, -276},
- {V8_2PART_UINT64_C(0xd3515c28, 31559a83), -954, -268},
- {V8_2PART_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
- {V8_2PART_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
- {V8_2PART_UINT64_C(0xaecc4991, 4078536d), -874, -244},
- {V8_2PART_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
- {V8_2PART_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
- {V8_2PART_UINT64_C(0x9096ea6f, 3848984f), -794, -220},
- {V8_2PART_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
- {V8_2PART_UINT64_C(0xa086cfcd, 97bf97f4), -741, -204},
- {V8_2PART_UINT64_C(0xef340a98, 172aace5), -715, -196},
- {V8_2PART_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
- {V8_2PART_UINT64_C(0x84c8d4df, d2c63f3b), -661, -180},
- {V8_2PART_UINT64_C(0xc5dd4427, 1ad3cdba), -635, -172},
- {V8_2PART_UINT64_C(0x936b9fce, bb25c996), -608, -164},
- {V8_2PART_UINT64_C(0xdbac6c24, 7d62a584), -582, -156},
- {V8_2PART_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
- {V8_2PART_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
- {V8_2PART_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
- {V8_2PART_UINT64_C(0x87625f05, 6c7c4a8b), -475, -124},
- {V8_2PART_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
- {V8_2PART_UINT64_C(0x964e858c, 91ba2655), -422, -108},
- {V8_2PART_UINT64_C(0xdff97724, 70297ebd), -396, -100},
- {V8_2PART_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
- {V8_2PART_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
- {V8_2PART_UINT64_C(0xb9447093, 8fa89bcf), -316, -76},
- {V8_2PART_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
- {V8_2PART_UINT64_C(0xcdb02555, 653131b6), -263, -60},
- {V8_2PART_UINT64_C(0x993fe2c6, d07b7fac), -236, -52},
- {V8_2PART_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
- {V8_2PART_UINT64_C(0xaa242499, 697392d3), -183, -36},
- {V8_2PART_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
- {V8_2PART_UINT64_C(0xbce50864, 92111aeb), -130, -20},
- {V8_2PART_UINT64_C(0x8cbccc09, 6f5088cc), -103, -12},
- {V8_2PART_UINT64_C(0xd1b71758, e219652c), -77, -4},
- {V8_2PART_UINT64_C(0x9c400000, 00000000), -50, 4},
- {V8_2PART_UINT64_C(0xe8d4a510, 00000000), -24, 12},
- {V8_2PART_UINT64_C(0xad78ebc5, ac620000), 3, 20},
- {V8_2PART_UINT64_C(0x813f3978, f8940984), 30, 28},
- {V8_2PART_UINT64_C(0xc097ce7b, c90715b3), 56, 36},
- {V8_2PART_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
- {V8_2PART_UINT64_C(0xd5d238a4, abe98068), 109, 52},
- {V8_2PART_UINT64_C(0x9f4f2726, 179a2245), 136, 60},
- {V8_2PART_UINT64_C(0xed63a231, d4c4fb27), 162, 68},
- {V8_2PART_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
- {V8_2PART_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
- {V8_2PART_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
- {V8_2PART_UINT64_C(0x924d692c, a61be758), 269, 100},
- {V8_2PART_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
- {V8_2PART_UINT64_C(0xa26da399, 9aef774a), 322, 116},
- {V8_2PART_UINT64_C(0xf209787b, b47d6b85), 348, 124},
- {V8_2PART_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
- {V8_2PART_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
- {V8_2PART_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
- {V8_2PART_UINT64_C(0x952ab45c, fa97a0b3), 455, 156},
- {V8_2PART_UINT64_C(0xde469fbd, 99a05fe3), 481, 164},
- {V8_2PART_UINT64_C(0xa59bc234, db398c25), 508, 172},
- {V8_2PART_UINT64_C(0xf6c69a72, a3989f5c), 534, 180},
- {V8_2PART_UINT64_C(0xb7dcbf53, 54e9bece), 561, 188},
- {V8_2PART_UINT64_C(0x88fcf317, f22241e2), 588, 196},
- {V8_2PART_UINT64_C(0xcc20ce9b, d35c78a5), 614, 204},
- {V8_2PART_UINT64_C(0x98165af3, 7b2153df), 641, 212},
- {V8_2PART_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
- {V8_2PART_UINT64_C(0xa8d9d153, 5ce3b396), 694, 228},
- {V8_2PART_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
- {V8_2PART_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
- {V8_2PART_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
- {V8_2PART_UINT64_C(0xd01fef10, a657842c), 800, 260},
- {V8_2PART_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
- {V8_2PART_UINT64_C(0xe7109bfb, a19c0c9d), 853, 276},
- {V8_2PART_UINT64_C(0xac2820d9, 623bf429), 880, 284},
- {V8_2PART_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
- {V8_2PART_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
- {V8_2PART_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
- {V8_2PART_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
- {V8_2PART_UINT64_C(0x9e19db92, b4e31ba9), 1013, 324},
- {V8_2PART_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
- {V8_2PART_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
-};
-
-static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
-static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
-static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
-// Difference between the decimal exponents in the table above.
-const int PowersOfTenCache::kDecimalExponentDistance = 8;
-const int PowersOfTenCache::kMinDecimalExponent = -348;
-const int PowersOfTenCache::kMaxDecimalExponent = 340;
-
-void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
- int min_exponent,
- int max_exponent,
- DiyFp* power,
- int* decimal_exponent) {
- int kQ = DiyFp::kSignificandSize;
- // Some platforms return incorrect sign on 0 result. We can ignore that here,
- // which means we can avoid depending on platform.h.
- double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
- int foo = kCachedPowersOffset;
- int index =
- (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
- ASSERT(0 <= index && index < kCachedPowersLength);
- CachedPower cached_power = kCachedPowers[index];
- ASSERT(min_exponent <= cached_power.binary_exponent);
- ASSERT(cached_power.binary_exponent <= max_exponent);
- *decimal_exponent = cached_power.decimal_exponent;
- *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
-}
-
-
-void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
- DiyFp* power,
- int* found_exponent) {
- ASSERT(kMinDecimalExponent <= requested_exponent);
- ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
- int index =
- (requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance;
- CachedPower cached_power = kCachedPowers[index];
- *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
- *found_exponent = cached_power.decimal_exponent;
- ASSERT(*found_exponent <= requested_exponent);
- ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/cached-powers.h b/src/3rdparty/v8/src/cached-powers.h
deleted file mode 100644
index 88df222..0000000
--- a/src/3rdparty/v8/src/cached-powers.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CACHED_POWERS_H_
-#define V8_CACHED_POWERS_H_
-
-#include "diy-fp.h"
-
-namespace v8 {
-namespace internal {
-
-class PowersOfTenCache {
- public:
- // Not all powers of ten are cached. The decimal exponent of two neighboring
- // cached numbers will differ by kDecimalExponentDistance.
- static const int kDecimalExponentDistance;
-
- static const int kMinDecimalExponent;
- static const int kMaxDecimalExponent;
-
- // Returns a cached power-of-ten with a binary exponent in the range
- // [min_exponent; max_exponent] (boundaries included).
- static void GetCachedPowerForBinaryExponentRange(int min_exponent,
- int max_exponent,
- DiyFp* power,
- int* decimal_exponent);
-
- // Returns a cached power of ten x ~= 10^k such that
- // k <= decimal_exponent < k + kCachedPowersDecimalDistance.
- // The given decimal_exponent must satisfy
- // kMinDecimalExponent <= requested_exponent, and
- // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance.
- static void GetCachedPowerForDecimalExponent(int requested_exponent,
- DiyFp* power,
- int* found_exponent);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CACHED_POWERS_H_
diff --git a/src/3rdparty/v8/src/char-predicates-inl.h b/src/3rdparty/v8/src/char-predicates-inl.h
deleted file mode 100644
index 1a89ef3..0000000
--- a/src/3rdparty/v8/src/char-predicates-inl.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CHAR_PREDICATES_INL_H_
-#define V8_CHAR_PREDICATES_INL_H_
-
-#include "char-predicates.h"
-
-namespace v8 {
-namespace internal {
-
-
-// If c is in 'A'-'Z' or 'a'-'z', return its lower-case.
-// Else, return something outside of 'A'-'Z' and 'a'-'z'.
-// Note: it ignores LOCALE.
-inline int AsciiAlphaToLower(uc32 c) {
- return c | 0x20;
-}
-
-
-inline bool IsCarriageReturn(uc32 c) {
- return c == 0x000D;
-}
-
-
-inline bool IsLineFeed(uc32 c) {
- return c == 0x000A;
-}
-
-
-inline bool IsInRange(int value, int lower_limit, int higher_limit) {
- ASSERT(lower_limit <= higher_limit);
- return static_cast<unsigned int>(value - lower_limit) <=
- static_cast<unsigned int>(higher_limit - lower_limit);
-}
-
-
-inline bool IsDecimalDigit(uc32 c) {
- // ECMA-262, 3rd, 7.8.3 (p 16)
- return IsInRange(c, '0', '9');
-}
-
-
-inline bool IsHexDigit(uc32 c) {
- // ECMA-262, 3rd, 7.6 (p 15)
- return IsDecimalDigit(c) || IsInRange(AsciiAlphaToLower(c), 'a', 'f');
-}
-
-
-inline bool IsRegExpWord(uc16 c) {
- return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
- || IsDecimalDigit(c)
- || (c == '_');
-}
-
-
-inline bool IsRegExpNewline(uc16 c) {
- switch (c) {
- // CR LF LS PS
- case 0x000A: case 0x000D: case 0x2028: case 0x2029:
- return false;
- default:
- return true;
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_CHAR_PREDICATES_INL_H_
diff --git a/src/3rdparty/v8/src/char-predicates.h b/src/3rdparty/v8/src/char-predicates.h
deleted file mode 100644
index b97191f..0000000
--- a/src/3rdparty/v8/src/char-predicates.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CHAR_PREDICATES_H_
-#define V8_CHAR_PREDICATES_H_
-
-#include "unicode.h"
-
-namespace v8 {
-namespace internal {
-
-// Unicode character predicates as defined by ECMA-262, 3rd,
-// used for lexical analysis.
-
-inline bool IsCarriageReturn(uc32 c);
-inline bool IsLineFeed(uc32 c);
-inline bool IsDecimalDigit(uc32 c);
-inline bool IsHexDigit(uc32 c);
-inline bool IsRegExpWord(uc32 c);
-inline bool IsRegExpNewline(uc32 c);
-
-struct IdentifierStart {
- static inline bool Is(uc32 c) {
- switch (c) {
- case '$': case '_': case '\\': return true;
- default: return unibrow::Letter::Is(c);
- }
- }
-};
-
-
-struct IdentifierPart {
- static inline bool Is(uc32 c) {
- return IdentifierStart::Is(c)
- || unibrow::Number::Is(c)
- || c == 0x200C // U+200C is Zero-Width Non-Joiner.
- || c == 0x200D // U+200D is Zero-Width Joiner.
- || unibrow::CombiningMark::Is(c)
- || unibrow::ConnectorPunctuation::Is(c);
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CHAR_PREDICATES_H_
diff --git a/src/3rdparty/v8/src/checks.cc b/src/3rdparty/v8/src/checks.cc
deleted file mode 100644
index a6405ec..0000000
--- a/src/3rdparty/v8/src/checks.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-
-#include "v8.h"
-
-#include "platform.h"
-
-// TODO(isolates): is it necessary to lift this?
-static int fatal_error_handler_nesting_depth = 0;
-
-// Contains protection against recursive calls (faults while handling faults).
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
- fflush(stdout);
- fflush(stderr);
- fatal_error_handler_nesting_depth++;
- // First time we try to print an error message
- if (fatal_error_handler_nesting_depth < 2) {
- i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n");
- i::OS::DumpBacktrace();
- }
- // First two times we may try to print a stack dump.
- if (fatal_error_handler_nesting_depth < 3) {
- if (i::FLAG_stack_trace_on_abort) {
- // Call this one twice on double fault
- i::Isolate::Current()->PrintStack();
- }
- }
- i::OS::Abort();
-}
-
-
-void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- v8::Handle<v8::Value> expected,
- const char* value_source,
- v8::Handle<v8::Value> value) {
- if (!expected->Equals(value)) {
- v8::String::Utf8Value value_str(value);
- v8::String::Utf8Value expected_str(expected);
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
- expected_source, value_source, *expected_str, *value_str);
- }
-}
-
-
-void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- v8::Handle<v8::Value> unexpected,
- const char* value_source,
- v8::Handle<v8::Value> value) {
- if (unexpected->Equals(value)) {
- v8::String::Utf8Value value_str(value);
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
- unexpected_source, value_source, *value_str);
- }
-}
-
-
-void API_Fatal(const char* location, const char* format, ...) {
- i::OS::PrintError("\n#\n# Fatal error in %s\n# ", location);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n\n");
- i::OS::Abort();
-}
-
-
-namespace v8 { namespace internal {
-
- bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
-
- intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
-
-} } // namespace v8::internal
-
diff --git a/src/3rdparty/v8/src/checks.h b/src/3rdparty/v8/src/checks.h
deleted file mode 100644
index d0a0c2b..0000000
--- a/src/3rdparty/v8/src/checks.h
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CHECKS_H_
-#define V8_CHECKS_H_
-
-#include <string.h>
-
-#include "../include/v8stdint.h"
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
-
-// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
-// development, but they should not be relied on in the final product.
-#ifdef DEBUG
-#define FATAL(msg) \
- V8_Fatal(__FILE__, __LINE__, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal(__FILE__, __LINE__, "unimplemented code")
-#define UNREACHABLE() \
- V8_Fatal(__FILE__, __LINE__, "unreachable code")
-#else
-#define FATAL(msg) \
- V8_Fatal("", 0, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal("", 0, "unimplemented code")
-#define UNREACHABLE() ((void) 0)
-#endif
-
-
-// The CHECK macro checks that the given condition is true; if not, it
-// prints a message to stderr and aborts.
-#define CHECK(condition) do { \
- if (!(condition)) { \
- V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
- } \
- } while (0)
-
-
-// Helper function used by the CHECK_EQ function when given int
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source, int expected,
- const char* value_source, int value) {
- if (expected != value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
- expected_source, value_source, expected, value);
- }
-}
-
-
-// Helper function used by the CHECK_EQ function when given int64_t
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source,
- int64_t expected,
- const char* value_source,
- int64_t value) {
- if (expected != value) {
- // Print int64_t values in hex, as two int32s,
- // to avoid platform-dependencies.
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n#"
- " Expected: 0x%08x%08x\n# Found: 0x%08x%08x",
- expected_source, value_source,
- static_cast<uint32_t>(expected >> 32),
- static_cast<uint32_t>(expected),
- static_cast<uint32_t>(value >> 32),
- static_cast<uint32_t>(value));
- }
-}
-
-
-// Helper function used by the CHECK_NE function when given int
-// arguments. Should not be called directly.
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- int unexpected,
- const char* value_source,
- int value) {
- if (unexpected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
- unexpected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given string
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
- if ((expected == NULL && value != NULL) ||
- (expected != NULL && value == NULL) ||
- (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
- expected_source, value_source, expected, value);
- }
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
- if (expected == value ||
- (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
- expected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given pointer
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
- if (expected != value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p",
- expected_source, value_source,
- expected, value);
- }
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
- if (expected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
- expected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given floating
-// point arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- double expected,
- const char* value_source,
- double value) {
- // Force values to 64 bit memory to truncate 80 bit precision on IA32.
- volatile double* exp = new double[1];
- *exp = expected;
- volatile double* val = new double[1];
- *val = value;
- if (*exp != *val) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
- expected_source, value_source, *exp, *val);
- }
- delete[] exp;
- delete[] val;
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- double expected,
- const char* value_source,
- double value) {
- // Force values to 64 bit memory to truncate 80 bit precision on IA32.
- volatile double* exp = new double[1];
- *exp = expected;
- volatile double* val = new double[1];
- *val = value;
- if (*exp == *val) {
- V8_Fatal(file, line,
- "CHECK_NE(%s, %s) failed\n# Value: %f",
- expected_source, value_source, *val);
- }
- delete[] exp;
- delete[] val;
-}
-
-
-#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
- #expected, expected, #value, value)
-
-
-#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
- #unexpected, unexpected, #value, value)
-
-
-#define CHECK_GT(a, b) CHECK((a) > (b))
-#define CHECK_GE(a, b) CHECK((a) >= (b))
-#define CHECK_LT(a, b) CHECK((a) < (b))
-#define CHECK_LE(a, b) CHECK((a) <= (b))
-
-
-// This is inspired by the static assertion facility in boost. This
-// is pretty magical. If it causes you trouble on a platform you may
-// find a fix in the boost code.
-template <bool> class StaticAssertion;
-template <> class StaticAssertion<true> { };
-// This macro joins two tokens. If one of the tokens is a macro the
-// helper call causes it to be resolved before joining.
-#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
-#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
-// Causes an error during compilation of the condition is not
-// statically known to be true. It is formulated as a typedef so that
-// it can be used wherever a typedef can be used. Beware that this
-// actually causes each use to introduce a new defined type with a
-// name depending on the source line.
-template <int> class StaticAssertionHelper { };
-#define STATIC_CHECK(test) \
- typedef \
- StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
-
-
-extern bool FLAG_enable_slow_asserts;
-
-
-// The ASSERT macro is equivalent to CHECK except that it only
-// generates code in debug builds.
-#ifdef DEBUG
-#define ASSERT_RESULT(expr) CHECK(expr)
-#define ASSERT(condition) CHECK(condition)
-#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
-#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
-#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
-#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
-#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
-#else
-#define ASSERT_RESULT(expr) (expr)
-#define ASSERT(condition) ((void) 0)
-#define ASSERT_EQ(v1, v2) ((void) 0)
-#define ASSERT_NE(v1, v2) ((void) 0)
-#define ASSERT_GE(v1, v2) ((void) 0)
-#define ASSERT_LT(v1, v2) ((void) 0)
-#define ASSERT_LE(v1, v2) ((void) 0)
-#define SLOW_ASSERT(condition) ((void) 0)
-#endif
-// Static asserts has no impact on runtime performance, so they can be
-// safely enabled in release mode. Moreover, the ((void) 0) expression
-// obeys different syntax rules than typedef's, e.g. it can't appear
-// inside class declaration, this leads to inconsistency between debug
-// and release compilation modes behavior.
-#define STATIC_ASSERT(test) STATIC_CHECK(test)
-
-#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)
-
-// "Extra checks" are lightweight checks that are enabled in some release
-// builds.
-#ifdef ENABLE_EXTRA_CHECKS
-#define EXTRA_CHECK(condition) CHECK(condition)
-#else
-#define EXTRA_CHECK(condition) ((void) 0)
-#endif
-
-#endif // V8_CHECKS_H_
diff --git a/src/3rdparty/v8/src/circular-queue-inl.h b/src/3rdparty/v8/src/circular-queue-inl.h
deleted file mode 100644
index 373bf60..0000000
--- a/src/3rdparty/v8/src/circular-queue-inl.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CIRCULAR_QUEUE_INL_H_
-#define V8_CIRCULAR_QUEUE_INL_H_
-
-#include "circular-queue.h"
-
-namespace v8 {
-namespace internal {
-
-
-void* SamplingCircularQueue::Enqueue() {
- WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
- void* result = producer_pos_->enqueue_pos;
- producer_pos_->enqueue_pos += record_size_;
- return result;
-}
-
-
-void SamplingCircularQueue::WrapPositionIfNeeded(
- SamplingCircularQueue::Cell** pos) {
- if (**pos == kEnd) *pos = buffer_;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_CIRCULAR_QUEUE_INL_H_
diff --git a/src/3rdparty/v8/src/circular-queue.cc b/src/3rdparty/v8/src/circular-queue.cc
deleted file mode 100644
index 2818ce9..0000000
--- a/src/3rdparty/v8/src/circular-queue.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "circular-queue-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-SamplingCircularQueue::SamplingCircularQueue(
- int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks,
- bool keep_producer_consumer_distance)
- : record_size_(record_size_in_bytes / sizeof(Cell)),
- chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
- record_size_in_bytes),
- chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
- buffer_size_(chunk_size_ * buffer_size_in_chunks),
- buffer_(NewArray<Cell>(buffer_size_ + 1)) {
- ASSERT(buffer_size_in_chunks > 2);
- // Clean up the whole buffer to avoid encountering a random kEnd
- // while enqueuing.
- for (int i = 0; i < buffer_size_; ++i) {
- buffer_[i] = kClear;
- }
- buffer_[buffer_size_] = kEnd;
-
- // Layout producer and consumer position pointers each on their own
- // cache lines to avoid cache lines thrashing due to simultaneous
- // updates of positions by different processor cores.
- const int positions_size =
- RoundUp(1, kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ProducerPosition)),
- kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ConsumerPosition)),
- kProcessorCacheLineSize);
- positions_ = NewArray<byte>(positions_size);
-
- producer_pos_ = reinterpret_cast<ProducerPosition*>(
- RoundUp(positions_, kProcessorCacheLineSize));
- producer_pos_->enqueue_pos = buffer_;
-
- consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
- reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize);
- ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
- positions_ + positions_size);
- consumer_pos_->dequeue_chunk_pos = buffer_;
- consumer_pos_->dequeue_chunk_poll_pos = buffer_;
- // The distance ensures that producer and consumer never step on
- // each other's chunks and helps eviction of produced data from
- // the CPU cache (having that chunk size is bigger than the cache.)
- if (keep_producer_consumer_distance) {
- consumer_pos_->dequeue_chunk_poll_pos += 2 * chunk_size_;
- }
- consumer_pos_->dequeue_pos = NULL;
-}
-
-
-SamplingCircularQueue::~SamplingCircularQueue() {
- DeleteArray(positions_);
- DeleteArray(buffer_);
-}
-
-
-void* SamplingCircularQueue::StartDequeue() {
- if (consumer_pos_->dequeue_pos != NULL) {
- return consumer_pos_->dequeue_pos;
- } else {
- if (*consumer_pos_->dequeue_chunk_poll_pos != kClear) {
- consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos;
- consumer_pos_->dequeue_end_pos = consumer_pos_->dequeue_pos + chunk_size_;
- return consumer_pos_->dequeue_pos;
- } else {
- return NULL;
- }
- }
-}
-
-
-void SamplingCircularQueue::FinishDequeue() {
- consumer_pos_->dequeue_pos += record_size_;
- if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return;
- // Move to next chunk.
- consumer_pos_->dequeue_pos = NULL;
- *consumer_pos_->dequeue_chunk_pos = kClear;
- consumer_pos_->dequeue_chunk_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos);
- consumer_pos_->dequeue_chunk_poll_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos);
-}
-
-
-void SamplingCircularQueue::FlushResidualRecords() {
- // Eliminate producer / consumer distance.
- consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/circular-queue.h b/src/3rdparty/v8/src/circular-queue.h
deleted file mode 100644
index a8eb524..0000000
--- a/src/3rdparty/v8/src/circular-queue.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CIRCULAR_QUEUE_H_
-#define V8_CIRCULAR_QUEUE_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Lock-free cache-friendly sampling circular queue for large
-// records. Intended for fast transfer of large records between a
-// single producer and a single consumer. If the queue is full,
-// previous unread records are overwritten. The queue is designed with
-// a goal in mind to evade cache lines thrashing by preventing
-// simultaneous reads and writes to adjanced memory locations.
-//
-// IMPORTANT: as a producer never checks for chunks cleanness, it is
-// possible that it can catch up and overwrite a chunk that a consumer
-// is currently reading, resulting in a corrupt record being read.
-class SamplingCircularQueue {
- public:
- // Executed on the application thread.
- SamplingCircularQueue(int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks,
- bool keep_producer_consumer_distance = true);
- ~SamplingCircularQueue();
-
- // Enqueue returns a pointer to a memory location for storing the next
- // record.
- INLINE(void* Enqueue());
-
- // Executed on the consumer (analyzer) thread.
- // StartDequeue returns a pointer to a memory location for retrieving
- // the next record. After the record had been read by a consumer,
- // FinishDequeue must be called. Until that moment, subsequent calls
- // to StartDequeue will return the same pointer.
- void* StartDequeue();
- void FinishDequeue();
- // Due to a presence of slipping between the producer and the consumer,
- // the queue must be notified whether producing has been finished in order
- // to process remaining records from the buffer.
- void FlushResidualRecords();
-
- typedef AtomicWord Cell;
- // Reserved values for the first cell of a record.
- static const Cell kClear = 0; // Marks clean (processed) chunks.
- static const Cell kEnd = -1; // Marks the end of the buffer.
-
- private:
- struct ProducerPosition {
- Cell* enqueue_pos;
- };
- struct ConsumerPosition {
- Cell* dequeue_chunk_pos;
- Cell* dequeue_chunk_poll_pos;
- Cell* dequeue_pos;
- Cell* dequeue_end_pos;
- };
-
- INLINE(void WrapPositionIfNeeded(Cell** pos));
-
- const int record_size_;
- const int chunk_size_in_bytes_;
- const int chunk_size_;
- const int buffer_size_;
- Cell* buffer_;
- byte* positions_;
- ProducerPosition* producer_pos_;
- ConsumerPosition* consumer_pos_;
-
- DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CIRCULAR_QUEUE_H_
diff --git a/src/3rdparty/v8/src/code-stubs-hydrogen.cc b/src/3rdparty/v8/src/code-stubs-hydrogen.cc
deleted file mode 100644
index 491e255..0000000
--- a/src/3rdparty/v8/src/code-stubs-hydrogen.cc
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "hydrogen.h"
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-
-static LChunk* OptimizeGraph(HGraph* graph) {
- Isolate* isolate = graph->isolate();
- AssertNoAllocation no_gc;
- NoHandleAllocation no_handles(isolate);
- NoHandleDereference no_deref(isolate);
-
- ASSERT(graph != NULL);
- SmartArrayPointer<char> bailout_reason;
- if (!graph->Optimize(&bailout_reason)) {
- FATAL(bailout_reason.is_empty() ? "unknown" : *bailout_reason);
- }
- LChunk* chunk = LChunk::NewChunk(graph);
- if (chunk == NULL) {
- FATAL(graph->info()->bailout_reason());
- }
- return chunk;
-}
-
-
-class CodeStubGraphBuilderBase : public HGraphBuilder {
- public:
- CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
- : HGraphBuilder(&info_), info_(stub, isolate), context_(NULL) {
- int major_key = stub->MajorKey();
- descriptor_ = info_.isolate()->code_stub_interface_descriptor(major_key);
- if (descriptor_->register_param_count_ < 0) {
- stub->InitializeInterfaceDescriptor(info_.isolate(), descriptor_);
- }
- parameters_.Reset(new HParameter*[descriptor_->register_param_count_]);
- }
- virtual bool BuildGraph();
-
- protected:
- virtual void BuildCodeStub() = 0;
- HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
- CompilationInfo* info() { return &info_; }
- HydrogenCodeStub* stub() { return info_.code_stub(); }
- HContext* context() { return context_; }
- Isolate* isolate() { return info_.isolate(); }
-
- private:
- SmartArrayPointer<HParameter*> parameters_;
- CompilationInfoWithZone info_;
- CodeStubInterfaceDescriptor* descriptor_;
- HContext* context_;
-};
-
-
-bool CodeStubGraphBuilderBase::BuildGraph() {
- if (FLAG_trace_hydrogen) {
- const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
- PrintF("-----------------------------------------------------------\n");
- PrintF("Compiling stub %s using hydrogen\n", name);
- HTracer::Instance()->TraceCompilation(&info_);
- }
-
- Zone* zone = this->zone();
- HEnvironment* start_environment =
- new(zone) HEnvironment(zone, descriptor_->register_param_count_);
- HBasicBlock* next_block = CreateBasicBlock(start_environment);
-
- current_block()->Goto(next_block);
- next_block->SetJoinId(BailoutId::StubEntry());
- set_current_block(next_block);
-
- HConstant* undefined_constant = new(zone) HConstant(
- isolate()->factory()->undefined_value(), Representation::Tagged());
- AddInstruction(undefined_constant);
- graph()->set_undefined_constant(undefined_constant);
-
- int param_count = descriptor_->register_param_count_;
- for (int i = 0; i < param_count; ++i) {
- HParameter* param =
- new(zone) HParameter(i, HParameter::REGISTER_PARAMETER);
- AddInstruction(param);
- start_environment->Bind(i, param);
- parameters_[i] = param;
- }
-
- context_ = new(zone) HContext();
- AddInstruction(context_);
- start_environment->Bind(param_count, context_);
-
- AddSimulate(BailoutId::StubEntry());
-
- BuildCodeStub();
-
- return true;
-}
-
-template <class Stub>
-class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
- public:
- explicit CodeStubGraphBuilder(Stub* stub)
- : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
-
- protected:
- virtual void BuildCodeStub();
- Stub* casted_stub() { return static_cast<Stub*>(stub()); }
-};
-
-
-template <>
-void CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
- Zone* zone = this->zone();
- Factory* factory = isolate()->factory();
-
- HInstruction* boilerplate =
- AddInstruction(new(zone) HLoadKeyed(GetParameter(0),
- GetParameter(1),
- NULL,
- FAST_ELEMENTS));
-
- CheckBuilder builder(this, BailoutId::StubEntry());
- builder.CheckNotUndefined(boilerplate);
-
- int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
- HValue* boilerplate_size =
- AddInstruction(new(zone) HInstanceSize(boilerplate));
- HValue* size_in_words =
- AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2,
- Representation::Integer32()));
- builder.CheckIntegerEq(boilerplate_size, size_in_words);
-
- HValue* size_in_bytes =
- AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
- HInstruction* object =
- AddInstruction(new(zone) HAllocate(context(),
- size_in_bytes,
- HType::JSObject(),
- HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
-
- for (int i = 0; i < size; i += kPointerSize) {
- HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
- AddInstruction(new(zone) HStoreNamedField(object,
- factory->empty_string(),
- value,
- true, i));
- AddSimulate(BailoutId::StubEntry());
- }
-
- builder.End();
-
- HReturn* ret = new(zone) HReturn(object, context());
- current_block()->Finish(ret);
-}
-
-
-Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
- CodeStubGraphBuilder<FastCloneShallowObjectStub> builder(this);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen(Code::COMPILED_STUB);
-}
-
-
-template <>
-void CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
- Zone* zone = this->zone();
-
- HInstruction* load = BuildUncheckedMonomorphicElementAccess(
- GetParameter(0), GetParameter(1), NULL, NULL,
- casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- false, Representation::Tagged());
- AddInstruction(load);
-
- HReturn* ret = new(zone) HReturn(load, context());
- current_block()->Finish(ret);
-}
-
-
-Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
- CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen(Code::COMPILED_STUB);
-}
-
-
-template <>
-void CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
- Zone* zone = this->zone();
-
- HValue* js_array = GetParameter(0);
- HValue* map = GetParameter(1);
-
- info()->MarkAsSavesCallerDoubles();
-
- AddInstruction(new(zone) HTrapAllocationMemento(js_array));
-
- HInstruction* array_length =
- AddInstruction(new(zone) HJSArrayLength(js_array,
- js_array,
- HType::Smi()));
-
- Heap* heap = isolate()->heap();
- const int kMinFreeNewSpaceAfterGC =
- ((heap->InitialSemiSpaceSize() - sizeof(FixedArrayBase)) / 2) /
- kDoubleSize;
-
- HConstant* max_alloc_size =
- new(zone) HConstant(kMinFreeNewSpaceAfterGC, Representation::Integer32());
- AddInstruction(max_alloc_size);
- // Since we're forcing Integer32 representation for this HBoundsCheck,
- // there's no need to Smi-check the index.
- AddInstruction(
- new(zone) HBoundsCheck(array_length, max_alloc_size,
- DONT_ALLOW_SMI_KEY, Representation::Integer32()));
-
- IfBuilder if_builder(this, BailoutId::StubEntry());
-
- if_builder.BeginTrue(array_length, graph()->GetConstant0(), Token::EQ);
-
- // Nothing to do, just change the map.
-
- if_builder.BeginFalse();
-
- HInstruction* elements =
- AddInstruction(new(zone) HLoadElements(js_array, js_array));
-
- HInstruction* elements_length =
- AddInstruction(new(zone) HFixedArrayBaseLength(elements));
-
- ElementsKind to_kind = casted_stub()->to_kind();
- HValue* new_elements =
- BuildAllocateElements(context(), to_kind, elements_length);
-
- // Fast elements kinds need to be initialized in case statements below cause a
- // garbage collection.
- Factory* factory = isolate()->factory();
-
- ASSERT(!IsFastSmiElementsKind(to_kind));
- double nan_double = FixedDoubleArray::hole_nan_as_double();
- HValue* hole = IsFastObjectElementsKind(to_kind)
- ? AddInstruction(new(zone) HConstant(factory->the_hole_value(),
- Representation::Tagged()))
- : AddInstruction(new(zone) HConstant(nan_double,
- Representation::Double()));
-
- LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
- BailoutId::StubEntry());
-
- HValue* zero = graph()->GetConstant0();
- HValue* start = IsFastElementsKind(to_kind) ? zero : array_length;
- HValue* key = builder.BeginBody(start, elements_length, Token::LT);
-
- AddInstruction(new(zone) HStoreKeyed(new_elements, key, hole, to_kind));
- AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
-
- builder.EndBody();
-
- BuildCopyElements(context(), elements,
- casted_stub()->from_kind(), new_elements,
- to_kind, array_length);
-
- AddInstruction(new(zone) HStoreNamedField(js_array,
- factory->elements_field_string(),
- new_elements, true,
- JSArray::kElementsOffset));
- AddSimulate(BailoutId::StubEntry());
-
- if_builder.End();
-
- AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
- map, true, JSArray::kMapOffset));
- AddSimulate(BailoutId::StubEntry());
-
- HReturn* ret = new(zone) HReturn(js_array, context());
- current_block()->Finish(ret);
-}
-
-
-template <>
-void CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
- HInstruction* deopt = new(zone()) HSoftDeoptimize();
- AddInstruction(deopt);
- current_block()->MarkAsDeoptimizing();
- HReturn* ret = new(zone()) HReturn(GetParameter(0), context());
- current_block()->Finish(ret);
-}
-
-
-Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
- CodeStubGraphBuilder<ArrayNoArgumentConstructorStub> builder(this);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen(Code::COMPILED_STUB);
-}
-
-
-template <>
-void CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::BuildCodeStub() {
- HInstruction* deopt = new(zone()) HSoftDeoptimize();
- AddInstruction(deopt);
- current_block()->MarkAsDeoptimizing();
- HReturn* ret = new(zone()) HReturn(GetParameter(0), context());
- current_block()->Finish(ret);
-}
-
-
-Handle<Code> TransitionElementsKindStub::GenerateCode() {
- CodeStubGraphBuilder<TransitionElementsKindStub> builder(this);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen(Code::COMPILED_STUB);
-}
-
-
-Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
- CodeStubGraphBuilder<ArraySingleArgumentConstructorStub> builder(this);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen(Code::COMPILED_STUB);
-}
-
-
-template <>
-void CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
- HInstruction* deopt = new(zone()) HSoftDeoptimize();
- AddInstruction(deopt);
- current_block()->MarkAsDeoptimizing();
- HReturn* ret = new(zone()) HReturn(GetParameter(0), context());
- current_block()->Finish(ret);
-}
-
-
-Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
- CodeStubGraphBuilder<ArrayNArgumentsConstructorStub> builder(this);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen(Code::COMPILED_STUB);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/code-stubs.cc b/src/3rdparty/v8/src/code-stubs.cc
deleted file mode 100644
index 4a401cd..0000000
--- a/src/3rdparty/v8/src/code-stubs.cc
+++ /dev/null
@@ -1,644 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-#include "factory.h"
-#include "gdb-jit.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
- UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
- int index = stubs->FindEntry(GetKey());
- if (index != UnseededNumberDictionary::kNotFound) {
- *code_out = Code::cast(stubs->ValueAt(index));
- return true;
- }
- return false;
-}
-
-
-SmartArrayPointer<const char> CodeStub::GetName() {
- char buffer[100];
- NoAllocationStringAllocator allocator(buffer,
- static_cast<unsigned>(sizeof(buffer)));
- StringStream stream(&allocator);
- PrintName(&stream);
- return stream.ToCString();
-}
-
-
-void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
- SmartArrayPointer<const char> name = GetName();
- PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
- GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
- Counters* counters = isolate->counters();
- counters->total_stubs_code_size()->Increment(code->instruction_size());
-}
-
-
-int CodeStub::GetCodeKind() {
- return Code::STUB;
-}
-
-
-Handle<Code> PlatformCodeStub::GenerateCode() {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- // Generate the new code.
- MacroAssembler masm(isolate, NULL, 256);
-
- {
- // Update the static counter each time a new code stub is generated.
- isolate->counters()->code_stubs()->Increment();
-
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(&masm, false);
-
- // Generate the code for the stub.
- masm.set_generating_stub(true);
- NoCurrentFrameScope scope(&masm);
- Generate(&masm);
- }
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- GetICState(),
- GetExtraICState(),
- GetStubType());
- Handle<Code> new_object = factory->NewCode(
- desc, flags, masm.CodeObject(), NeedsImmovableCode());
- return new_object;
-}
-
-
-Handle<Code> CodeStub::GetCode(Isolate* isolate) {
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
- Code* code;
- if (UseSpecialCache()
- ? FindCodeInSpecialCache(&code, isolate)
- : FindCodeInCache(&code, isolate)) {
- ASSERT(IsPregenerated() == code->is_pregenerated());
- return Handle<Code>(code);
- }
-
- {
- HandleScope scope(isolate);
-
- Handle<Code> new_object = GenerateCode();
- new_object->set_major_key(MajorKey());
- FinishCode(new_object);
- RecordCodeGeneration(*new_object, isolate);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs) {
- new_object->Disassemble(*GetName());
- PrintF("\n");
- }
-#endif
-
- if (UseSpecialCache()) {
- AddToSpecialCache(new_object);
- } else {
- // Update the dictionary and the root in Heap.
- Handle<UnseededNumberDictionary> dict =
- factory->DictionaryAtNumberPut(
- Handle<UnseededNumberDictionary>(heap->code_stubs()),
- GetKey(),
- new_object);
- heap->public_set_code_stubs(*dict);
- }
- code = *new_object;
- }
-
- Activate(code);
- ASSERT(!NeedsImmovableCode() ||
- heap->lo_space()->Contains(code) ||
- heap->code_space()->FirstPage()->Contains(code->address()));
- return Handle<Code>(code, isolate);
-}
-
-
-const char* CodeStub::MajorName(CodeStub::Major major_key,
- bool allow_unknown_keys) {
- switch (major_key) {
-#define DEF_CASE(name) case name: return #name "Stub";
- CODE_STUB_LIST(DEF_CASE)
-#undef DEF_CASE
- default:
- if (!allow_unknown_keys) {
- UNREACHABLE();
- }
- return NULL;
- }
-}
-
-
-void CodeStub::PrintName(StringStream* stream) {
- stream->Add("%s", MajorName(MajorKey(), false));
-}
-
-
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
- if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
- // The OddballStub handles a number and an oddball, not two oddballs.
- operands_type = BinaryOpIC::GENERIC;
- }
- switch (operands_type) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s+%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(left_type_),
- BinaryOpIC::GetName(right_type_));
-}
-
-
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
- GenerateBothStringStub(masm);
- return;
- }
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
-void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
- ASSERT(*known_map_ != NULL);
- Isolate* isolate = new_object->GetIsolate();
- Factory* factory = isolate->factory();
- return Map::UpdateCodeCache(known_map_,
- strict() ?
- factory->strict_compare_ic_string() :
- factory->compare_ic_string(),
- new_object);
-}
-
-
-bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
- Factory* factory = isolate->factory();
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- UNINITIALIZED);
- ASSERT(op_ == Token::EQ || op_ == Token::EQ_STRICT);
- Handle<Object> probe(
- known_map_->FindInCodeCache(
- strict() ?
- *factory->strict_compare_ic_string() :
- *factory->compare_ic_string(),
- flags),
- isolate);
- if (probe->IsCode()) {
- *code_out = Code::cast(*probe);
-#ifdef DEBUG
- Token::Value cached_op;
- ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
- &cached_op);
- ASSERT(op_ == cached_op);
-#endif
- return true;
- }
- return false;
-}
-
-
-int ICCompareStub::MinorKey() {
- return OpField::encode(op_ - Token::EQ) |
- LeftStateField::encode(left_) |
- RightStateField::encode(right_) |
- HandlerStateField::encode(state_);
-}
-
-
-void ICCompareStub::DecodeMinorKey(int minor_key,
- CompareIC::State* left_state,
- CompareIC::State* right_state,
- CompareIC::State* handler_state,
- Token::Value* op) {
- if (left_state) {
- *left_state =
- static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
- }
- if (right_state) {
- *right_state =
- static_cast<CompareIC::State>(RightStateField::decode(minor_key));
- }
- if (handler_state) {
- *handler_state =
- static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
- }
- if (op) {
- *op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
- }
-}
-
-
-void ICCompareStub::Generate(MacroAssembler* masm) {
- switch (state_) {
- case CompareIC::UNINITIALIZED:
- GenerateMiss(masm);
- break;
- case CompareIC::SMI:
- GenerateSmis(masm);
- break;
- case CompareIC::NUMBER:
- GenerateNumbers(masm);
- break;
- case CompareIC::STRING:
- GenerateStrings(masm);
- break;
- case CompareIC::INTERNALIZED_STRING:
- GenerateInternalizedStrings(masm);
- break;
- case CompareIC::UNIQUE_NAME:
- GenerateUniqueNames(masm);
- break;
- case CompareIC::OBJECT:
- GenerateObjects(masm);
- break;
- case CompareIC::KNOWN_OBJECT:
- ASSERT(*known_map_ != NULL);
- GenerateKnownObjects(masm);
- break;
- case CompareIC::GENERIC:
- GenerateGeneric(masm);
- break;
- }
-}
-
-
-void InstanceofStub::PrintName(StringStream* stream) {
- const char* args = "";
- if (HasArgsInRegisters()) {
- args = "_REGS";
- }
-
- const char* inline_check = "";
- if (HasCallSiteInlineCheck()) {
- inline_check = "_INLINE";
- }
-
- const char* return_true_false_object = "";
- if (ReturnTrueFalseObject()) {
- return_true_false_object = "_TRUEFALSE";
- }
-
- stream->Add("InstanceofStub%s%s%s",
- args,
- inline_check,
- return_true_false_object);
-}
-
-
-void JSEntryStub::FinishCode(Handle<Code> code) {
- Handle<FixedArray> handler_table =
- code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
- handler_table->set(0, Smi::FromInt(handler_offset_));
- code->set_handler_table(*handler_table);
-}
-
-
-void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
- KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
-}
-
-
-void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
- switch (elements_kind_) {
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS: {
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
- is_js_array_,
- elements_kind_,
- grow_mode_);
- }
- break;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
- is_js_array_,
- grow_mode_);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
- break;
- case DICTIONARY_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-}
-
-
-void ArgumentsAccessStub::PrintName(StringStream* stream) {
- stream->Add("ArgumentsAccessStub_");
- switch (type_) {
- case READ_ELEMENT: stream->Add("ReadElement"); break;
- case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
- case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
- case NEW_STRICT: stream->Add("NewStrict"); break;
- }
-}
-
-
-void CallFunctionStub::PrintName(StringStream* stream) {
- stream->Add("CallFunctionStub_Args%d", argc_);
- if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
- if (RecordCallTarget()) stream->Add("_Recording");
-}
-
-
-void CallConstructStub::PrintName(StringStream* stream) {
- stream->Add("CallConstructStub");
- if (RecordCallTarget()) stream->Add("_Recording");
-}
-
-
-void ToBooleanStub::PrintName(StringStream* stream) {
- stream->Add("ToBooleanStub_");
- types_.Print(stream);
-}
-
-
-void ToBooleanStub::Types::Print(StringStream* stream) const {
- if (IsEmpty()) stream->Add("None");
- if (Contains(UNDEFINED)) stream->Add("Undefined");
- if (Contains(BOOLEAN)) stream->Add("Bool");
- if (Contains(NULL_TYPE)) stream->Add("Null");
- if (Contains(SMI)) stream->Add("Smi");
- if (Contains(SPEC_OBJECT)) stream->Add("SpecObject");
- if (Contains(STRING)) stream->Add("String");
- if (Contains(HEAP_NUMBER)) stream->Add("HeapNumber");
-}
-
-
-void ToBooleanStub::Types::TraceTransition(Types to) const {
- if (!FLAG_trace_ic) return;
- char buffer[100];
- NoAllocationStringAllocator allocator(buffer,
- static_cast<unsigned>(sizeof(buffer)));
- StringStream stream(&allocator);
- stream.Add("[ToBooleanIC (");
- Print(&stream);
- stream.Add("->");
- to.Print(&stream);
- stream.Add(")]\n");
- stream.OutputToStdOut();
-}
-
-
-bool ToBooleanStub::Types::Record(Handle<Object> object) {
- if (object->IsUndefined()) {
- Add(UNDEFINED);
- return false;
- } else if (object->IsBoolean()) {
- Add(BOOLEAN);
- return object->IsTrue();
- } else if (object->IsNull()) {
- Add(NULL_TYPE);
- return false;
- } else if (object->IsSmi()) {
- Add(SMI);
- return Smi::cast(*object)->value() != 0;
- } else if (object->IsSpecObject()) {
- Add(SPEC_OBJECT);
- return !object->IsUndetectableObject();
- } else if (object->IsString()) {
- Add(STRING);
- return !object->IsUndetectableObject() &&
- String::cast(*object)->length() != 0;
- } else if (object->IsHeapNumber()) {
- ASSERT(!object->IsUndetectableObject());
- Add(HEAP_NUMBER);
- double value = HeapNumber::cast(*object)->value();
- return value != 0 && !isnan(value);
- } else {
- // We should never see an internal object at runtime here!
- UNREACHABLE();
- return true;
- }
-}
-
-
-bool ToBooleanStub::Types::NeedsMap() const {
- return Contains(ToBooleanStub::SPEC_OBJECT)
- || Contains(ToBooleanStub::STRING)
- || Contains(ToBooleanStub::HEAP_NUMBER);
-}
-
-
-bool ToBooleanStub::Types::CanBeUndetectable() const {
- return Contains(ToBooleanStub::SPEC_OBJECT)
- || Contains(ToBooleanStub::STRING);
-}
-
-
-void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(from_, to_);
- ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
- if (!FLAG_trace_elements_transitions) {
- if (IsFastSmiOrObjectElementsKind(to_)) {
- if (IsFastSmiOrObjectElementsKind(from_)) {
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm, mode, &fail);
- } else if (IsFastDoubleElementsKind(from_)) {
- ASSERT(!IsFastSmiElementsKind(to_));
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
- } else {
- UNREACHABLE();
- }
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
- is_jsarray_,
- to_,
- grow_mode_);
- } else if (IsFastSmiElementsKind(from_) &&
- IsFastDoubleElementsKind(to_)) {
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
- KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
- is_jsarray_,
- grow_mode_);
- } else if (IsFastDoubleElementsKind(from_)) {
- ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm, mode, &fail);
- } else {
- UNREACHABLE();
- }
- }
- masm->bind(&fail);
- KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
-}
-
-
-void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
- int i = 0;
- for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) {
- StubFailureTrampolineStub(i).GetCode(isolate);
- }
-}
-
-
-FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;
-
-
-void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer) {
- if (entry_hook_ != NULL)
- entry_hook_(function, stack_pointer);
-}
-
-
-bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
- // We don't allow setting a new entry hook over one that's
- // already active, as the hooks won't stack.
- if (entry_hook != 0 && entry_hook_ != 0)
- return false;
-
- entry_hook_ = entry_hook;
- return true;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/code-stubs.h b/src/3rdparty/v8/src/code-stubs.h
deleted file mode 100644
index 1f187c0..0000000
--- a/src/3rdparty/v8/src/code-stubs.h
+++ /dev/null
@@ -1,1623 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CODE_STUBS_H_
-#define V8_CODE_STUBS_H_
-
-#include "allocation.h"
-#include "globals.h"
-#include "codegen.h"
-
-namespace v8 {
-namespace internal {
-
-// List of code stubs used on all platforms.
-#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
- V(CallFunction) \
- V(CallConstruct) \
- V(UnaryOp) \
- V(BinaryOp) \
- V(StringAdd) \
- V(SubString) \
- V(StringCompare) \
- V(Compare) \
- V(CompareIC) \
- V(MathPow) \
- V(ArrayLength) \
- V(StringLength) \
- V(FunctionPrototype) \
- V(StoreArrayLength) \
- V(RecordWrite) \
- V(StoreBufferOverflow) \
- V(RegExpExec) \
- V(TranscendentalCache) \
- V(Instanceof) \
- V(ConvertToDouble) \
- V(WriteInt32ToHeapNumber) \
- V(StackCheck) \
- V(Interrupt) \
- V(FastNewClosure) \
- V(FastNewContext) \
- V(FastNewBlockContext) \
- V(FastCloneShallowArray) \
- V(FastCloneShallowObject) \
- V(ToBoolean) \
- V(ToNumber) \
- V(ArgumentsAccess) \
- V(RegExpConstructResult) \
- V(NumberToString) \
- V(CEntry) \
- V(JSEntry) \
- V(KeyedLoadElement) \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(KeyedStoreElement) \
- V(DebuggerStatement) \
- V(StringDictionaryLookup) \
- V(ElementsTransitionAndStore) \
- V(TransitionElementsKind) \
- V(StoreArrayLiteralElement) \
- V(StubFailureTrampoline) \
- V(ProfileEntryHook) \
- /* IC Handler stubs */ \
- V(LoadField)
-
-// List of code stubs only used on ARM platforms.
-#ifdef V8_TARGET_ARCH_ARM
-#define CODE_STUB_LIST_ARM(V) \
- V(GetProperty) \
- V(SetProperty) \
- V(InvokeBuiltin) \
- V(RegExpCEntry) \
- V(DirectCEntry)
-#else
-#define CODE_STUB_LIST_ARM(V)
-#endif
-
-// List of code stubs only used on MIPS platforms.
-#ifdef V8_TARGET_ARCH_MIPS
-#define CODE_STUB_LIST_MIPS(V) \
- V(RegExpCEntry) \
- V(DirectCEntry)
-#else
-#define CODE_STUB_LIST_MIPS(V)
-#endif
-
-// Combined list of code stubs.
-#define CODE_STUB_LIST(V) \
- CODE_STUB_LIST_ALL_PLATFORMS(V) \
- CODE_STUB_LIST_ARM(V) \
- CODE_STUB_LIST_MIPS(V)
-
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
-
-
-// Stub is base classes of all stubs.
-class CodeStub BASE_EMBEDDED {
- public:
- enum Major {
-#define DEF_ENUM(name) name,
- CODE_STUB_LIST(DEF_ENUM)
-#undef DEF_ENUM
- NoCache, // marker for stubs that do custom caching
- NUMBER_OF_IDS
- };
-
- // Retrieve the code for the stub. Generate the code if needed.
- Handle<Code> GetCode(Isolate* isolate);
-
- static Major MajorKeyFromKey(uint32_t key) {
- return static_cast<Major>(MajorKeyBits::decode(key));
- }
- static int MinorKeyFromKey(uint32_t key) {
- return MinorKeyBits::decode(key);
- }
-
- // Gets the major key from a code object that is a code stub or binary op IC.
- static Major GetMajorKey(Code* code_stub) {
- return static_cast<Major>(code_stub->major_key());
- }
-
- static const char* MajorName(Major major_key, bool allow_unknown_keys);
-
- virtual ~CodeStub() {}
-
- bool CompilingCallsToThisStubIsGCSafe() {
- bool is_pregenerated = IsPregenerated();
- Code* code = NULL;
- CHECK(!is_pregenerated || FindCodeInCache(&code, Isolate::Current()));
- return is_pregenerated;
- }
-
- // See comment above, where Instanceof is defined.
- virtual bool IsPregenerated() { return false; }
-
- static void GenerateStubsAheadOfTime(Isolate* isolate);
- static void GenerateFPStubs(Isolate* isolate);
-
- // Some stubs put untagged junk on the stack that cannot be scanned by the
- // GC. This means that we must be statically sure that no GC can occur while
- // they are running. If that is the case they should override this to return
- // true, which will cause an assertion if we try to call something that can
- // GC or if we try to put a stack frame on top of the junk, which would not
- // result in a traversable stack.
- virtual bool SometimesSetsUpAFrame() { return true; }
-
- // Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out, Isolate* isolate);
-
- // Returns information for computing the number key.
- virtual Major MajorKey() = 0;
- virtual int MinorKey() = 0;
-
- protected:
- static bool CanUseFPRegisters();
-
- // Generates the assembler code for the stub.
- virtual Handle<Code> GenerateCode() = 0;
-
- // BinaryOpStub needs to override this.
- virtual InlineCacheState GetICState() {
- return UNINITIALIZED;
- }
- virtual Code::ExtraICState GetExtraICState() {
- return Code::kNoExtraICState;
- }
- virtual Code::StubType GetStubType() {
- return Code::NORMAL;
- }
-
- // Returns whether the code generated for this stub needs to be allocated as
- // a fixed (non-moveable) code object.
- virtual bool NeedsImmovableCode() { return false; }
-
- private:
- // Perform bookkeeping required after code generation when stub code is
- // initially generated.
- void RecordCodeGeneration(Code* code, Isolate* isolate);
-
- // Finish the code object after it has been generated.
- virtual void FinishCode(Handle<Code> code) { }
-
- // Activate newly generated stub. Is called after
- // registering stub in the stub cache.
- virtual void Activate(Code* code) { }
-
- // BinaryOpStub needs to override this.
- virtual int GetCodeKind();
-
- // Add the code to a specialized cache, specific to an individual
- // stub type. Please note, this method must add the code object to a
- // roots object, otherwise we will remove the code during GC.
- virtual void AddToSpecialCache(Handle<Code> new_object) { }
-
- // Find code in a specialized cache, work is delegated to the specific stub.
- virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
- return false;
- }
-
- // If a stub uses a special cache override this.
- virtual bool UseSpecialCache() { return false; }
-
- // Returns a name for logging/debugging purposes.
- SmartArrayPointer<const char> GetName();
- virtual void PrintName(StringStream* stream);
-
- // Computes the key based on major and minor.
- uint32_t GetKey() {
- ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
- return MinorKeyBits::encode(MinorKey()) |
- MajorKeyBits::encode(MajorKey());
- }
-
- class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
- class MinorKeyBits: public BitField<uint32_t,
- kStubMajorKeyBits, kStubMinorKeyBits> {}; // NOLINT
-
- friend class BreakPointIterator;
-};
-
-
-class PlatformCodeStub : public CodeStub {
- public:
- // Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode();
-
- virtual int GetCodeKind() { return Code::STUB; }
-
- protected:
- // Generates the assembler code for the stub.
- virtual void Generate(MacroAssembler* masm) = 0;
-};
-
-
-struct CodeStubInterfaceDescriptor {
- CodeStubInterfaceDescriptor()
- : register_param_count_(-1),
- stack_parameter_count_(NULL),
- extra_expression_stack_count_(0),
- register_params_(NULL) { }
- int register_param_count_;
- const Register* stack_parameter_count_;
- int extra_expression_stack_count_;
- Register* register_params_;
- Address deoptimization_handler_;
-};
-
-
-class HydrogenCodeStub : public CodeStub {
- public:
- // Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode() = 0;
-
- virtual int GetCodeKind() { return Code::COMPILED_STUB; }
-
- CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate) {
- return isolate->code_stub_interface_descriptor(MajorKey());
- }
-
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) = 0;
-};
-
-
-// Helper interface to prepare to/restore after making runtime calls.
-class RuntimeCallHelper {
- public:
- virtual ~RuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const = 0;
-
- virtual void AfterCall(MacroAssembler* masm) const = 0;
-
- protected:
- RuntimeCallHelper() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
-};
-
-} } // namespace v8::internal
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/code-stubs-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/code-stubs-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/code-stubs-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/code-stubs-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-// RuntimeCallHelper implementation used in stubs: enters/leaves a
-// newly created internal frame before/after the runtime call.
-class StubRuntimeCallHelper : public RuntimeCallHelper {
- public:
- StubRuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const;
-
- virtual void AfterCall(MacroAssembler* masm) const;
-};
-
-
-// Trivial RuntimeCallHelper implementation.
-class NopRuntimeCallHelper : public RuntimeCallHelper {
- public:
- NopRuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const {}
-
- virtual void AfterCall(MacroAssembler* masm) const {}
-};
-
-
-class StackCheckStub : public PlatformCodeStub {
- public:
- StackCheckStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return StackCheck; }
- int MinorKey() { return 0; }
-};
-
-
-class InterruptStub : public PlatformCodeStub {
- public:
- InterruptStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return Interrupt; }
- int MinorKey() { return 0; }
-};
-
-
-class ToNumberStub: public PlatformCodeStub {
- public:
- ToNumberStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToNumber; }
- int MinorKey() { return 0; }
-};
-
-
-class FastNewClosureStub : public PlatformCodeStub {
- public:
- explicit FastNewClosureStub(LanguageMode language_mode)
- : language_mode_(language_mode) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return FastNewClosure; }
- int MinorKey() { return language_mode_ == CLASSIC_MODE
- ? kNonStrictMode : kStrictMode; }
-
- LanguageMode language_mode_;
-};
-
-
-class FastNewContextStub : public PlatformCodeStub {
- public:
- static const int kMaximumSlots = 64;
-
- explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ >= 0 && slots_ <= kMaximumSlots);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int slots_;
-
- Major MajorKey() { return FastNewContext; }
- int MinorKey() { return slots_; }
-};
-
-
-class FastNewBlockContextStub : public PlatformCodeStub {
- public:
- static const int kMaximumSlots = 64;
-
- explicit FastNewBlockContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ >= 0 && slots_ <= kMaximumSlots);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int slots_;
-
- Major MajorKey() { return FastNewBlockContext; }
- int MinorKey() { return slots_; }
-};
-
-
-class FastCloneShallowArrayStub : public PlatformCodeStub {
- public:
- // Maximum length of copied elements array.
- static const int kMaximumClonedLength = 8;
- enum Mode {
- CLONE_ELEMENTS,
- CLONE_DOUBLE_ELEMENTS,
- COPY_ON_WRITE_ELEMENTS,
- CLONE_ANY_ELEMENTS,
- LAST_CLONE_MODE = CLONE_ANY_ELEMENTS
- };
-
- static const int kFastCloneModeCount = LAST_CLONE_MODE + 1;
-
- FastCloneShallowArrayStub(Mode mode,
- AllocationSiteMode allocation_site_mode,
- int length)
- : mode_(mode),
- allocation_site_mode_(allocation_site_mode),
- length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
- ASSERT_GE(length_, 0);
- ASSERT_LE(length_, kMaximumClonedLength);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Mode mode_;
- AllocationSiteMode allocation_site_mode_;
- int length_;
-
- class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
- class ModeBits: public BitField<Mode, 1, 4> {};
- class LengthBits: public BitField<int, 5, 4> {};
- // Ensure data fits within available bits.
- STATIC_ASSERT(LAST_ALLOCATION_SITE_MODE == 1);
- STATIC_ASSERT(kFastCloneModeCount < 16);
- STATIC_ASSERT(kMaximumClonedLength < 16);
- Major MajorKey() { return FastCloneShallowArray; }
- int MinorKey() {
- return AllocationSiteModeBits::encode(allocation_site_mode_)
- | ModeBits::encode(mode_)
- | LengthBits::encode(length_);
- }
-};
-
-
-class FastCloneShallowObjectStub : public HydrogenCodeStub {
- public:
- // Maximum number of properties in copied object.
- static const int kMaximumClonedProperties = 6;
-
- explicit FastCloneShallowObjectStub(int length) : length_(length) {
- ASSERT_GE(length_, 0);
- ASSERT_LE(length_, kMaximumClonedProperties);
- }
-
- int length() const { return length_; }
-
- virtual Handle<Code> GenerateCode();
-
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- private:
- int length_;
-
- Major MajorKey() { return FastCloneShallowObject; }
- int MinorKey() { return length_; }
-
- DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub);
-};
-
-
-class InstanceofStub: public PlatformCodeStub {
- public:
- enum Flags {
- kNoFlags = 0,
- kArgsInRegisters = 1 << 0,
- kCallSiteInlineCheck = 1 << 1,
- kReturnTrueFalseObject = 1 << 2
- };
-
- explicit InstanceofStub(Flags flags) : flags_(flags) { }
-
- static Register left();
- static Register right();
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return Instanceof; }
- int MinorKey() { return static_cast<int>(flags_); }
-
- bool HasArgsInRegisters() const {
- return (flags_ & kArgsInRegisters) != 0;
- }
-
- bool HasCallSiteInlineCheck() const {
- return (flags_ & kCallSiteInlineCheck) != 0;
- }
-
- bool ReturnTrueFalseObject() const {
- return (flags_ & kReturnTrueFalseObject) != 0;
- }
-
- virtual void PrintName(StringStream* stream);
-
- Flags flags_;
-};
-
-
-class MathPowStub: public PlatformCodeStub {
- public:
- enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
-
- explicit MathPowStub(ExponentType exponent_type)
- : exponent_type_(exponent_type) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- virtual CodeStub::Major MajorKey() { return MathPow; }
- virtual int MinorKey() { return exponent_type_; }
-
- ExponentType exponent_type_;
-};
-
-
-class ICStub: public PlatformCodeStub {
- public:
- explicit ICStub(Code::Kind kind) : kind_(kind) { }
- virtual int GetCodeKind() { return kind_; }
- virtual InlineCacheState GetICState() { return MONOMORPHIC; }
-
- bool Describes(Code* code) {
- return GetMajorKey(code) == MajorKey() && code->stub_info() == MinorKey();
- }
-
- protected:
- class KindBits: public BitField<Code::Kind, 0, 4> {};
- virtual void FinishCode(Handle<Code> code) {
- code->set_stub_info(MinorKey());
- }
- Code::Kind kind() { return kind_; }
-
- virtual int MinorKey() {
- return KindBits::encode(kind_);
- }
-
- private:
- Code::Kind kind_;
-};
-
-
-class ArrayLengthStub: public ICStub {
- public:
- explicit ArrayLengthStub(Code::Kind kind) : ICStub(kind) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- virtual CodeStub::Major MajorKey() { return ArrayLength; }
-};
-
-
-class FunctionPrototypeStub: public ICStub {
- public:
- explicit FunctionPrototypeStub(Code::Kind kind) : ICStub(kind) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- virtual CodeStub::Major MajorKey() { return FunctionPrototype; }
-};
-
-
-class StringLengthStub: public ICStub {
- public:
- StringLengthStub(Code::Kind kind, bool support_wrapper)
- : ICStub(kind), support_wrapper_(support_wrapper) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- STATIC_ASSERT(KindBits::kSize == 4);
- class WrapperModeBits: public BitField<bool, 4, 1> {};
- virtual CodeStub::Major MajorKey() { return StringLength; }
- virtual int MinorKey() {
- return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_);
- }
-
- bool support_wrapper_;
-};
-
-
-class StoreICStub: public ICStub {
- public:
- StoreICStub(Code::Kind kind, StrictModeFlag strict_mode)
- : ICStub(kind), strict_mode_(strict_mode) { }
-
- protected:
- virtual Code::ExtraICState GetExtraICState() {
- return strict_mode_;
- }
-
- private:
- STATIC_ASSERT(KindBits::kSize == 4);
- class StrictModeBits: public BitField<bool, 4, 1> {};
- virtual int MinorKey() {
- return KindBits::encode(kind()) | StrictModeBits::encode(strict_mode_);
- }
-
- StrictModeFlag strict_mode_;
-};
-
-
-class StoreArrayLengthStub: public StoreICStub {
- public:
- explicit StoreArrayLengthStub(Code::Kind kind, StrictModeFlag strict_mode)
- : StoreICStub(kind, strict_mode) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- virtual CodeStub::Major MajorKey() { return StoreArrayLength; }
-};
-
-
-class HandlerStub: public ICStub {
- public:
- explicit HandlerStub(Code::Kind kind) : ICStub(kind) { }
-
- protected:
- virtual Code::ExtraICState GetExtraICState() {
- return Code::HANDLER_FRAGMENT;
- }
-};
-
-
-class LoadFieldStub: public HandlerStub {
- public:
- LoadFieldStub(Register reg, bool inobject, int index)
- : HandlerStub(Code::LOAD_IC),
- reg_(reg),
- inobject_(inobject),
- index_(index) { }
- virtual void Generate(MacroAssembler* masm);
-
- protected:
- virtual Code::StubType GetStubType() { return Code::FIELD; }
-
- private:
- STATIC_ASSERT(KindBits::kSize == 4);
- class RegisterBits: public BitField<int, 4, 6> {};
- class InobjectBits: public BitField<bool, 10, 1> {};
- class IndexBits: public BitField<int, 11, 11> {};
- virtual CodeStub::Major MajorKey() { return LoadField; }
- virtual int MinorKey() {
- return KindBits::encode(kind())
- | RegisterBits::encode(reg_.code())
- | InobjectBits::encode(inobject_)
- | IndexBits::encode(index_);
- }
-
- Register reg_;
- bool inobject_;
- int index_;
-};
-
-
-class BinaryOpStub: public PlatformCodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- platform_specific_bit_(false),
- left_type_(BinaryOpIC::UNINITIALIZED),
- right_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- Initialize();
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- platform_specific_bit_(PlatformSpecificBits::decode(key)),
- left_type_(left_type),
- right_type_(right_type),
- result_type_(result_type) { }
-
- static void decode_types_from_minor_key(int minor_key,
- BinaryOpIC::TypeInfo* left_type,
- BinaryOpIC::TypeInfo* right_type,
- BinaryOpIC::TypeInfo* result_type) {
- *left_type =
- static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
- *right_type =
- static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
- *result_type =
- static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
- }
-
- static Token::Value decode_op_from_minor_key(int minor_key) {
- return static_cast<Token::Value>(OpBits::decode(minor_key));
- }
-
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo left_type_;
- BinaryOpIC::TypeInfo right_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class PlatformSpecificBits: public BitField<bool, 9, 1> {};
- class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
- class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | PlatformSpecificBits::encode(platform_specific_bit_)
- | LeftTypeBits::encode(left_type_)
- | RightTypeBits::encode(right_type_)
- | ResultTypeBits::encode(result_type_);
- }
-
-
- // Platform-independent implementation.
- void Generate(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- // Platform-independent signature, platform-specific implementation.
- void Initialize();
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
-
- // Entirely platform-specific methods are defined as static helper
- // functions in the <arch>/code-stubs-<arch>.cc files.
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(Max(left_type_, right_type_));
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_stub_info(MinorKey());
- }
-
- friend class CodeGenerator;
-};
-
-
-class ICCompareStub: public PlatformCodeStub {
- public:
- ICCompareStub(Token::Value op,
- CompareIC::State left,
- CompareIC::State right,
- CompareIC::State handler)
- : op_(op),
- left_(left),
- right_(right),
- state_(handler) {
- ASSERT(Token::IsCompareOp(op));
- }
-
- virtual void Generate(MacroAssembler* masm);
-
- void set_known_map(Handle<Map> map) { known_map_ = map; }
-
- static void DecodeMinorKey(int minor_key,
- CompareIC::State* left_state,
- CompareIC::State* right_state,
- CompareIC::State* handler_state,
- Token::Value* op);
-
- static CompareIC::State CompareState(int minor_key) {
- return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
- }
-
- private:
- class OpField: public BitField<int, 0, 3> { };
- class LeftStateField: public BitField<int, 3, 4> { };
- class RightStateField: public BitField<int, 7, 4> { };
- class HandlerStateField: public BitField<int, 11, 4> { };
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_stub_info(MinorKey());
- }
-
- virtual CodeStub::Major MajorKey() { return CompareIC; }
- virtual int MinorKey();
-
- virtual int GetCodeKind() { return Code::COMPARE_IC; }
-
- void GenerateSmis(MacroAssembler* masm);
- void GenerateNumbers(MacroAssembler* masm);
- void GenerateInternalizedStrings(MacroAssembler* masm);
- void GenerateStrings(MacroAssembler* masm);
- void GenerateUniqueNames(MacroAssembler* masm);
- void GenerateObjects(MacroAssembler* masm);
- void GenerateMiss(MacroAssembler* masm);
- void GenerateKnownObjects(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
-
- bool strict() const { return op_ == Token::EQ_STRICT; }
- Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
-
- virtual void AddToSpecialCache(Handle<Code> new_object);
- virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate);
- virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECT; }
-
- Token::Value op_;
- CompareIC::State left_;
- CompareIC::State right_;
- CompareIC::State state_;
- Handle<Map> known_map_;
-};
-
-
-class CEntryStub : public PlatformCodeStub {
- public:
- explicit CEntryStub(int result_size,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs)
- : result_size_(result_size), save_doubles_(save_doubles) { }
-
- void Generate(MacroAssembler* masm);
-
- // The version of this stub that doesn't save doubles is generated ahead of
- // time, so it's OK to call it from other stubs that can't cope with GC during
- // their code generation. On machines that always have gp registers (x64) we
- // can generate both variants ahead of time.
- virtual bool IsPregenerated();
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- void GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope);
-
- // Number of pointers/values returned.
- const int result_size_;
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return CEntry; }
- int MinorKey();
-
- bool NeedsImmovableCode();
-};
-
-
-class JSEntryStub : public PlatformCodeStub {
- public:
- JSEntryStub() { }
-
- void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
-
- protected:
- void GenerateBody(MacroAssembler* masm, bool is_construct);
-
- private:
- Major MajorKey() { return JSEntry; }
- int MinorKey() { return 0; }
-
- virtual void FinishCode(Handle<Code> code);
-
- int handler_offset_;
-};
-
-
-class JSConstructEntryStub : public JSEntryStub {
- public:
- JSConstructEntryStub() { }
-
- void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
-
- private:
- int MinorKey() { return 1; }
-
- virtual void PrintName(StringStream* stream) {
- stream->Add("JSConstructEntryStub");
- }
-};
-
-
-class ArgumentsAccessStub: public PlatformCodeStub {
- public:
- enum Type {
- READ_ELEMENT,
- NEW_NON_STRICT_FAST,
- NEW_NON_STRICT_SLOW,
- NEW_STRICT
- };
-
- explicit ArgumentsAccessStub(Type type) : type_(type) { }
-
- private:
- Type type_;
-
- Major MajorKey() { return ArgumentsAccess; }
- int MinorKey() { return type_; }
-
- void Generate(MacroAssembler* masm);
- void GenerateReadElement(MacroAssembler* masm);
- void GenerateNewStrict(MacroAssembler* masm);
- void GenerateNewNonStrictFast(MacroAssembler* masm);
- void GenerateNewNonStrictSlow(MacroAssembler* masm);
-
- virtual void PrintName(StringStream* stream);
-};
-
-
-class RegExpExecStub: public PlatformCodeStub {
- public:
- RegExpExecStub() { }
-
- private:
- Major MajorKey() { return RegExpExec; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class RegExpConstructResultStub: public PlatformCodeStub {
- public:
- RegExpConstructResultStub() { }
-
- private:
- Major MajorKey() { return RegExpConstructResult; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class CallFunctionStub: public PlatformCodeStub {
- public:
- CallFunctionStub(int argc, CallFunctionFlags flags)
- : argc_(argc), flags_(flags) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_has_function_cache(RecordCallTarget());
- }
-
- static int ExtractArgcFromMinorKey(int minor_key) {
- return ArgcBits::decode(minor_key);
- }
-
- private:
- int argc_;
- CallFunctionFlags flags_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
- class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
- class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() {
- // Encode the parameters in a unique 32 bit value.
- return FlagBits::encode(flags_) | ArgcBits::encode(argc_);
- }
-
- bool ReceiverMightBeImplicit() {
- return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
- }
-
- bool RecordCallTarget() {
- return (flags_ & RECORD_CALL_TARGET) != 0;
- }
-};
-
-
-class CallConstructStub: public PlatformCodeStub {
- public:
- explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
-
- void Generate(MacroAssembler* masm);
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_has_function_cache(RecordCallTarget());
- }
-
- private:
- CallFunctionFlags flags_;
-
- virtual void PrintName(StringStream* stream);
-
- Major MajorKey() { return CallConstruct; }
- int MinorKey() { return flags_; }
-
- bool RecordCallTarget() {
- return (flags_ & RECORD_CALL_TARGET) != 0;
- }
-};
-
-
-enum StringIndexFlags {
- // Accepts smis or heap numbers.
- STRING_INDEX_IS_NUMBER,
-
- // Accepts smis or heap numbers that are valid array indices
- // (ECMA-262 15.4). Invalid indices are reported as being out of
- // range.
- STRING_INDEX_IS_ARRAY_INDEX
-};
-
-
-// Generates code implementing String.prototype.charCodeAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch| and |result| are clobbered.
-class StringCharCodeAtGenerator {
- public:
- StringCharCodeAtGenerator(Register object,
- Register index,
- Register result,
- Label* receiver_not_string,
- Label* index_not_number,
- Label* index_out_of_range,
- StringIndexFlags index_flags)
- : object_(object),
- index_(index),
- result_(result),
- receiver_not_string_(receiver_not_string),
- index_not_number_(index_not_number),
- index_out_of_range_(index_out_of_range),
- index_flags_(index_flags) {
- ASSERT(!result_.is(object_));
- ASSERT(!result_.is(index_));
- }
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- // Skip handling slow case and directly jump to bailout.
- void SkipSlow(MacroAssembler* masm, Label* bailout) {
- masm->bind(&index_not_smi_);
- masm->bind(&call_runtime_);
- masm->jmp(bailout);
- }
-
- private:
- Register object_;
- Register index_;
- Register result_;
-
- Label* receiver_not_string_;
- Label* index_not_number_;
- Label* index_out_of_range_;
-
- StringIndexFlags index_flags_;
-
- Label call_runtime_;
- Label index_not_smi_;
- Label got_smi_index_;
- Label exit_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
-};
-
-
-// Generates code for creating a one-char string from a char code.
-class StringCharFromCodeGenerator {
- public:
- StringCharFromCodeGenerator(Register code,
- Register result)
- : code_(code),
- result_(result) {
- ASSERT(!code_.is(result_));
- }
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- // Skip handling slow case and directly jump to bailout.
- void SkipSlow(MacroAssembler* masm, Label* bailout) {
- masm->bind(&slow_case_);
- masm->jmp(bailout);
- }
-
- private:
- Register code_;
- Register result_;
-
- Label slow_case_;
- Label exit_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
-};
-
-
-// Generates code implementing String.prototype.charAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
-class StringCharAtGenerator {
- public:
- StringCharAtGenerator(Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_number,
- Label* index_out_of_range,
- StringIndexFlags index_flags)
- : char_code_at_generator_(object,
- index,
- scratch,
- receiver_not_string,
- index_not_number,
- index_out_of_range,
- index_flags),
- char_from_code_generator_(scratch, result) {}
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
- }
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
- }
-
- // Skip handling slow case and directly jump to bailout.
- void SkipSlow(MacroAssembler* masm, Label* bailout) {
- char_code_at_generator_.SkipSlow(masm, bailout);
- char_from_code_generator_.SkipSlow(masm, bailout);
- }
-
- private:
- StringCharCodeAtGenerator char_code_at_generator_;
- StringCharFromCodeGenerator char_from_code_generator_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
-};
-
-
-class AllowStubCallsScope {
- public:
- AllowStubCallsScope(MacroAssembler* masm, bool allow)
- : masm_(masm), previous_allow_(masm->allow_stub_calls()) {
- masm_->set_allow_stub_calls(allow);
- }
- ~AllowStubCallsScope() {
- masm_->set_allow_stub_calls(previous_allow_);
- }
-
- private:
- MacroAssembler* masm_;
- bool previous_allow_;
-
- DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
-};
-
-
-class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
- public:
- KeyedLoadDictionaryElementStub() {}
-
- Major MajorKey() { return KeyedLoadElement; }
- int MinorKey() { return DICTIONARY_ELEMENTS; }
-
- void Generate(MacroAssembler* masm);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
-};
-
-
-class KeyedLoadFastElementStub : public HydrogenCodeStub {
- public:
- KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
- bit_field_ = ElementsKindBits::encode(elements_kind) |
- IsJSArrayBits::encode(is_js_array);
- }
-
- bool is_js_array() const {
- return IsJSArrayBits::decode(bit_field_);
- }
-
- ElementsKind elements_kind() const {
- return ElementsKindBits::decode(bit_field_);
- }
-
- virtual Handle<Code> GenerateCode();
-
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- private:
- class IsJSArrayBits: public BitField<bool, 8, 1> {};
- class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
- uint32_t bit_field_;
-
- Major MajorKey() { return KeyedLoadElement; }
- int MinorKey() { return bit_field_; }
-
- DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
-};
-
-
-class TransitionElementsKindStub : public HydrogenCodeStub {
- public:
- TransitionElementsKindStub(ElementsKind from_kind,
- ElementsKind to_kind) {
- bit_field_ = FromKindBits::encode(from_kind) |
- ToKindBits::encode(to_kind);
- }
-
- ElementsKind from_kind() const {
- return FromKindBits::decode(bit_field_);
- }
-
- ElementsKind to_kind() const {
- return ToKindBits::decode(bit_field_);
- }
-
- virtual Handle<Code> GenerateCode();
-
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- private:
- class FromKindBits: public BitField<ElementsKind, 8, 8> {};
- class ToKindBits: public BitField<ElementsKind, 0, 8> {};
- uint32_t bit_field_;
-
- Major MajorKey() { return TransitionElementsKind; }
- int MinorKey() { return bit_field_; }
-
- DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub);
-};
-
-
-class ArrayNoArgumentConstructorStub : public HydrogenCodeStub {
- public:
- ArrayNoArgumentConstructorStub() {
- }
-
- Major MajorKey() { return ArrayNoArgumentConstructor; }
- int MinorKey() { return 0; }
-
- virtual Handle<Code> GenerateCode();
-
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
-};
-
-
-class ArraySingleArgumentConstructorStub : public HydrogenCodeStub {
- public:
- ArraySingleArgumentConstructorStub() {
- }
-
- Major MajorKey() { return ArraySingleArgumentConstructor; }
- int MinorKey() { return 0; }
-
- virtual Handle<Code> GenerateCode();
-
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
-};
-
-
-class ArrayNArgumentsConstructorStub : public HydrogenCodeStub {
- public:
- ArrayNArgumentsConstructorStub() {
- }
-
- Major MajorKey() { return ArrayNArgumentsConstructor; }
- int MinorKey() { return 0; }
-
- virtual Handle<Code> GenerateCode();
-
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
-};
-
-
-class KeyedStoreElementStub : public PlatformCodeStub {
- public:
- KeyedStoreElementStub(bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode)
- : is_js_array_(is_js_array),
- elements_kind_(elements_kind),
- grow_mode_(grow_mode),
- fp_registers_(CanUseFPRegisters()) { }
-
- Major MajorKey() { return KeyedStoreElement; }
- int MinorKey() {
- return ElementsKindBits::encode(elements_kind_) |
- IsJSArrayBits::encode(is_js_array_) |
- GrowModeBits::encode(grow_mode_) |
- FPRegisters::encode(fp_registers_);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
- class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {};
- class IsJSArrayBits: public BitField<bool, 9, 1> {};
- class FPRegisters: public BitField<bool, 10, 1> {};
-
- bool is_js_array_;
- ElementsKind elements_kind_;
- KeyedAccessGrowMode grow_mode_;
- bool fp_registers_;
-
- DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
-};
-
-
-class ToBooleanStub: public PlatformCodeStub {
- public:
- enum Type {
- UNDEFINED,
- BOOLEAN,
- NULL_TYPE,
- SMI,
- SPEC_OBJECT,
- STRING,
- HEAP_NUMBER,
- NUMBER_OF_TYPES
- };
-
- // At most 8 different types can be distinguished, because the Code object
- // only has room for a single byte to hold a set of these types. :-P
- STATIC_ASSERT(NUMBER_OF_TYPES <= 8);
-
- class Types {
- public:
- Types() {}
- explicit Types(byte bits) : set_(bits) {}
-
- bool IsEmpty() const { return set_.IsEmpty(); }
- bool Contains(Type type) const { return set_.Contains(type); }
- bool ContainsAnyOf(Types types) const {
- return set_.ContainsAnyOf(types.set_);
- }
- void Add(Type type) { set_.Add(type); }
- byte ToByte() const { return set_.ToIntegral(); }
- void Print(StringStream* stream) const;
- void TraceTransition(Types to) const;
- bool Record(Handle<Object> object);
- bool NeedsMap() const;
- bool CanBeUndetectable() const;
-
- private:
- EnumSet<Type, byte> set_;
- };
-
- static Types no_types() { return Types(); }
- static Types all_types() { return Types((1 << NUMBER_OF_TYPES) - 1); }
-
- explicit ToBooleanStub(Register tos, Types types = Types())
- : tos_(tos), types_(types) { }
-
- void Generate(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
- virtual void PrintName(StringStream* stream);
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_to_boolean_state(types_.ToByte());
- }
-
- void CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- Register tos_;
- Types types_;
-};
-
-
-class ElementsTransitionAndStoreStub : public PlatformCodeStub {
- public:
- ElementsTransitionAndStoreStub(ElementsKind from,
- ElementsKind to,
- bool is_jsarray,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode)
- : from_(from),
- to_(to),
- is_jsarray_(is_jsarray),
- strict_mode_(strict_mode),
- grow_mode_(grow_mode) {}
-
- private:
- class FromBits: public BitField<ElementsKind, 0, 8> {};
- class ToBits: public BitField<ElementsKind, 8, 8> {};
- class IsJSArrayBits: public BitField<bool, 16, 1> {};
- class StrictModeBits: public BitField<StrictModeFlag, 17, 1> {};
- class GrowModeBits: public BitField<KeyedAccessGrowMode, 18, 1> {};
-
- Major MajorKey() { return ElementsTransitionAndStore; }
- int MinorKey() {
- return FromBits::encode(from_) |
- ToBits::encode(to_) |
- IsJSArrayBits::encode(is_jsarray_) |
- StrictModeBits::encode(strict_mode_) |
- GrowModeBits::encode(grow_mode_);
- }
-
- void Generate(MacroAssembler* masm);
-
- ElementsKind from_;
- ElementsKind to_;
- bool is_jsarray_;
- StrictModeFlag strict_mode_;
- KeyedAccessGrowMode grow_mode_;
-
- DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
-};
-
-
-class StoreArrayLiteralElementStub : public PlatformCodeStub {
- public:
- StoreArrayLiteralElementStub()
- : fp_registers_(CanUseFPRegisters()) { }
-
- private:
- class FPRegisters: public BitField<bool, 0, 1> {};
-
- Major MajorKey() { return StoreArrayLiteralElement; }
- int MinorKey() { return FPRegisters::encode(fp_registers_); }
-
- void Generate(MacroAssembler* masm);
-
- bool fp_registers_;
-
- DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
-};
-
-
-class StubFailureTrampolineStub : public PlatformCodeStub {
- public:
- static const int kMaxExtraExpressionStackCount = 1;
-
- explicit StubFailureTrampolineStub(int extra_expression_stack_count)
- : extra_expression_stack_count_(extra_expression_stack_count) {}
-
- virtual bool IsPregenerated() { return true; }
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- Major MajorKey() { return StubFailureTrampoline; }
- int MinorKey() { return extra_expression_stack_count_; }
-
- void Generate(MacroAssembler* masm);
-
- int extra_expression_stack_count_;
-
- DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);
-};
-
-
-class ProfileEntryHookStub : public PlatformCodeStub {
- public:
- explicit ProfileEntryHookStub() {}
-
- // The profile entry hook function is not allowed to cause a GC.
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- // Generates a call to the entry hook if it's enabled.
- static void MaybeCallEntryHook(MacroAssembler* masm);
-
- // Sets or unsets the entry hook function. Returns true on success,
- // false on an attempt to replace a non-NULL entry hook with another
- // non-NULL hook.
- static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
-
- static bool HasEntryHook() { return entry_hook_ != NULL; }
-
- private:
- static void EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer);
-
- Major MajorKey() { return ProfileEntryHook; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- // The current function entry hook.
- static FunctionEntryHook entry_hook_;
-
- DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CODE_STUBS_H_
diff --git a/src/3rdparty/v8/src/code.h b/src/3rdparty/v8/src/code.h
deleted file mode 100644
index 766c932..0000000
--- a/src/3rdparty/v8/src/code.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CODE_H_
-#define V8_CODE_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Wrapper class for passing expected and actual parameter counts as
-// either registers or immediate values. Used to make sure that the
-// caller provides exactly the expected number of parameters to the
-// callee.
-class ParameterCount BASE_EMBEDDED {
- public:
- explicit ParameterCount(Register reg)
- : reg_(reg), immediate_(0) { }
- explicit ParameterCount(int immediate)
- : reg_(no_reg), immediate_(immediate) { }
-
- bool is_reg() const { return !reg_.is(no_reg); }
- bool is_immediate() const { return !is_reg(); }
-
- Register reg() const {
- ASSERT(is_reg());
- return reg_;
- }
- int immediate() const {
- ASSERT(is_immediate());
- return immediate_;
- }
-
- private:
- const Register reg_;
- const int immediate_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ParameterCount);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CODE_H_
diff --git a/src/3rdparty/v8/src/codegen.cc b/src/3rdparty/v8/src/codegen.cc
deleted file mode 100644
index 508e221..0000000
--- a/src/3rdparty/v8/src/codegen.cc
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "prettyprinter.h"
-#include "rewriter.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-#ifdef DEBUG
-
-Comment::Comment(MacroAssembler* masm, const char* msg)
- : masm_(masm), msg_(msg) {
- __ RecordComment(msg);
-}
-
-
-Comment::~Comment() {
- if (msg_[0] == '[') __ RecordComment("]");
-}
-
-#endif // DEBUG
-
-#undef __
-
-
-void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
-#ifdef DEBUG
- bool print_source = false;
- bool print_ast = false;
- const char* ftype;
-
- if (Isolate::Current()->bootstrapper()->IsActive()) {
- print_source = FLAG_print_builtin_source;
- print_ast = FLAG_print_builtin_ast;
- ftype = "builtin";
- } else {
- print_source = FLAG_print_source;
- print_ast = FLAG_print_ast;
- ftype = "user-defined";
- }
-
- if (FLAG_trace_codegen || print_source || print_ast) {
- PrintF("*** Generate code for %s function: ", ftype);
- if (info->IsStub()) {
- const char* name =
- CodeStub::MajorName(info->code_stub()->MajorKey(), true);
- PrintF("%s", name == NULL ? "<unknown>" : name);
- } else {
- info->function()->name()->ShortPrint();
- }
- PrintF(" ***\n");
- }
-
- if (!info->IsStub() && print_source) {
- PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter().PrintProgram(info->function()));
- }
-
- if (!info->IsStub() && print_ast) {
- PrintF("--- AST ---\n%s\n",
- AstPrinter().PrintProgram(info->function()));
- }
-#endif // DEBUG
-}
-
-
-Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info) {
- Isolate* isolate = info->isolate();
-
- // Allocate and install the code.
- CodeDesc desc;
- masm->GetCode(&desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, flags, masm->CodeObject());
-
- if (!code.is_null()) {
- isolate->counters()->total_compiled_code_size()->Increment(
- code->instruction_size());
- code->set_prologue_offset(info->prologue_offset());
- }
- return code;
-}
-
-
-void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
-#ifdef ENABLE_DISASSEMBLER
- bool print_code = Isolate::Current()->bootstrapper()->IsActive()
- ? FLAG_print_builtin_code
- : (FLAG_print_code ||
- (info->IsStub() && FLAG_print_code_stubs) ||
- (info->IsOptimizing() && FLAG_print_opt_code));
- if (print_code) {
- // Print the source code if available.
- FunctionLiteral* function = info->function();
- if (code->kind() != Code::COMPILED_STUB) {
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- ConsStringIteratorOp op;
- StringCharacterStream stream(String::cast(script->source()),
- &op,
- function->start_position());
- // fun->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len =
- function->end_position() - function->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.HasMore()) PrintF("%c", stream.GetNext());
- }
- PrintF("\n\n");
- }
- }
- if (info->IsOptimizing()) {
- if (FLAG_print_unopt_code) {
- PrintF("--- Unoptimized code ---\n");
- info->closure()->shared()->code()->Disassemble(
- *function->debug_name()->ToCString());
- }
- PrintF("--- Optimized code ---\n");
- } else {
- PrintF("--- Code ---\n");
- }
- if (info->IsStub()) {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- code->Disassemble(CodeStub::MajorName(major_key, false));
- } else {
- code->Disassemble(*function->debug_name()->ToCString());
- }
- }
-#endif // ENABLE_DISASSEMBLER
-}
-
-
-bool CodeGenerator::ShouldGenerateLog(Expression* type) {
- ASSERT(type != NULL);
- Isolate* isolate = Isolate::Current();
- if (!isolate->logger()->is_logging() && !CpuProfiler::is_profiling(isolate)) {
- return false;
- }
- Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
- if (FLAG_log_regexp) {
- if (name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("regexp")))
- return true;
- }
- return false;
-}
-
-
-bool CodeGenerator::RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here) {
- if (pos != RelocInfo::kNoPosition) {
- masm->positions_recorder()->RecordStatementPosition(pos);
- masm->positions_recorder()->RecordPosition(pos);
- if (right_here) {
- return masm->positions_recorder()->WriteRecordedPositions();
- }
- }
- return false;
-}
-
-
-void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
- switch (type_) {
- case READ_ELEMENT:
- GenerateReadElement(masm);
- break;
- case NEW_NON_STRICT_FAST:
- GenerateNewNonStrictFast(masm);
- break;
- case NEW_NON_STRICT_SLOW:
- GenerateNewNonStrictSlow(masm);
- break;
- case NEW_STRICT:
- GenerateNewStrict(masm);
- break;
- }
-}
-
-
-int CEntryStub::MinorKey() {
- int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
- ASSERT(result_size_ == 1 || result_size_ == 2);
-#ifdef _WIN64
- return result | ((result_size_ == 1) ? 0 : 2);
-#else
- return result;
-#endif
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/codegen.h b/src/3rdparty/v8/src/codegen.h
deleted file mode 100644
index 09907c4..0000000
--- a/src/3rdparty/v8/src/codegen.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CODEGEN_H_
-#define V8_CODEGEN_H_
-
-#include "code-stubs.h"
-#include "runtime.h"
-#include "type-info.h"
-
-// Include the declaration of the architecture defined class CodeGenerator.
-// The contract to the shared code is that the the CodeGenerator is a subclass
-// of Visitor and that the following methods are available publicly:
-// MakeCode
-// MakeCodePrologue
-// MakeCodeEpilogue
-// masm
-// frame
-// script
-// has_valid_frame
-// SetFrame
-// DeleteFrame
-// allocator
-// AddDeferred
-// in_spilled_code
-// set_in_spilled_code
-// RecordPositions
-//
-// These methods are either used privately by the shared code or implemented as
-// shared code:
-// CodeGenerator
-// ~CodeGenerator
-// Generate
-// ComputeLazyCompile
-// BuildFunctionInfo
-// ProcessDeclarations
-// DeclareGlobals
-// CheckForInlineRuntimeCall
-// AnalyzeCondition
-// CodeForFunctionPosition
-// CodeForReturnPosition
-// CodeForStatementPosition
-// CodeForDoWhileConditionPosition
-// CodeForSourcePosition
-
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/codegen-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/codegen-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/codegen-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/codegen-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Results of the library implementation of transcendental functions may differ
-// from the one we use in our generated code. Therefore we use the same
-// generated code both in runtime and compiled code.
-typedef double (*UnaryMathFunction)(double x);
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
-UnaryMathFunction CreateExpFunction();
-UnaryMathFunction CreateSqrtFunction();
-
-
-class ElementsTransitionGenerator : public AllStatic {
- public:
- // If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
- // |allocation_site_info_found| may be NULL.
- static void GenerateMapChangeElementsTransition(MacroAssembler* masm,
- AllocationSiteMode mode,
- Label* allocation_site_info_found);
- static void GenerateSmiToDouble(MacroAssembler* masm,
- AllocationSiteMode mode,
- Label* fail);
- static void GenerateDoubleToObject(MacroAssembler* masm,
- AllocationSiteMode mode,
- Label* fail);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
-};
-
-
-class SeqStringSetCharGenerator : public AllStatic {
- public:
- static void Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value);
- private:
- DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CODEGEN_H_
diff --git a/src/3rdparty/v8/src/collection.js b/src/3rdparty/v8/src/collection.js
deleted file mode 100644
index b3c2db7..0000000
--- a/src/3rdparty/v8/src/collection.js
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"use strict";
-
-var $Set = global.Set;
-var $Map = global.Map;
-var $WeakMap = global.WeakMap;
-
-//-------------------------------------------------------------------
-
-// Global sentinel to be used instead of undefined keys, which are not
-// supported internally but required for Harmony sets and maps.
-var undefined_sentinel = {};
-
-
-function SetConstructor() {
- if (%_IsConstructCall()) {
- %SetInitialize(this);
- } else {
- return new $Set();
- }
-}
-
-
-function SetAdd(key) {
- if (!IS_SET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Set.prototype.add', this]);
- }
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %SetAdd(this, key);
-}
-
-
-function SetHas(key) {
- if (!IS_SET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Set.prototype.has', this]);
- }
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %SetHas(this, key);
-}
-
-
-function SetDelete(key) {
- if (!IS_SET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Set.prototype.delete', this]);
- }
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- if (%SetHas(this, key)) {
- %SetDelete(this, key);
- return true;
- } else {
- return false;
- }
-}
-
-
-function SetGetSize() {
- if (!IS_SET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Set.prototype.size', this]);
- }
- return %SetGetSize(this);
-}
-
-
-function SetClear() {
- if (!IS_SET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Set.prototype.clear', this]);
- }
- // Replace the internal table with a new empty table.
- %SetInitialize(this);
-}
-
-
-function MapConstructor() {
- if (%_IsConstructCall()) {
- %MapInitialize(this);
- } else {
- return new $Map();
- }
-}
-
-
-function MapGet(key) {
- if (!IS_MAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Map.prototype.get', this]);
- }
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %MapGet(this, key);
-}
-
-
-function MapSet(key, value) {
- if (!IS_MAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Map.prototype.set', this]);
- }
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %MapSet(this, key, value);
-}
-
-
-function MapHas(key) {
- if (!IS_MAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Map.prototype.has', this]);
- }
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %MapHas(this, key);
-}
-
-
-function MapDelete(key) {
- if (!IS_MAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Map.prototype.delete', this]);
- }
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %MapDelete(this, key);
-}
-
-
-function MapGetSize() {
- if (!IS_MAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Map.prototype.size', this]);
- }
- return %MapGetSize(this);
-}
-
-
-function MapClear() {
- if (!IS_MAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Map.prototype.clear', this]);
- }
- // Replace the internal table with a new empty table.
- %MapInitialize(this);
-}
-
-
-function WeakMapConstructor() {
- if (%_IsConstructCall()) {
- %WeakMapInitialize(this);
- } else {
- return new $WeakMap();
- }
-}
-
-
-function WeakMapGet(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.get', this]);
- }
- if (!IS_SPEC_OBJECT(key)) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakMapGet(this, key);
-}
-
-
-function WeakMapSet(key, value) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.set', this]);
- }
- if (!IS_SPEC_OBJECT(key)) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakMapSet(this, key, value);
-}
-
-
-function WeakMapHas(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.has', this]);
- }
- if (!IS_SPEC_OBJECT(key)) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakMapHas(this, key);
-}
-
-
-function WeakMapDelete(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.delete', this]);
- }
- if (!IS_SPEC_OBJECT(key)) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakMapDelete(this, key);
-}
-
-// -------------------------------------------------------------------
-
-(function () {
- %CheckIsBootstrapping();
-
- // Set up the Set and Map constructor function.
- %SetCode($Set, SetConstructor);
- %SetCode($Map, MapConstructor);
-
- // Set up the constructor property on the Set and Map prototype object.
- %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
- %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
-
- // Set up the non-enumerable functions on the Set prototype object.
- InstallGetter($Set.prototype, "size", SetGetSize);
- InstallFunctions($Set.prototype, DONT_ENUM, $Array(
- "add", SetAdd,
- "has", SetHas,
- "delete", SetDelete,
- "clear", SetClear
- ));
-
- // Set up the non-enumerable functions on the Map prototype object.
- InstallGetter($Map.prototype, "size", MapGetSize);
- InstallFunctions($Map.prototype, DONT_ENUM, $Array(
- "get", MapGet,
- "set", MapSet,
- "has", MapHas,
- "delete", MapDelete,
- "clear", MapClear
- ));
-
- // Set up the WeakMap constructor function.
- %SetCode($WeakMap, WeakMapConstructor);
-
- // Set up the constructor property on the WeakMap prototype object.
- %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
-
- // Set up the non-enumerable functions on the WeakMap prototype object.
- InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
- "get", WeakMapGet,
- "set", WeakMapSet,
- "has", WeakMapHas,
- "delete", WeakMapDelete
- ));
-})();
diff --git a/src/3rdparty/v8/src/compilation-cache.cc b/src/3rdparty/v8/src/compilation-cache.cc
deleted file mode 100644
index 904e84f..0000000
--- a/src/3rdparty/v8/src/compilation-cache.cc
+++ /dev/null
@@ -1,516 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "assembler.h"
-#include "compilation-cache.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-
-// The number of generations for each sub cache.
-// The number of ScriptGenerations is carefully chosen based on histograms.
-// See issue 458: http://code.google.com/p/v8/issues/detail?id=458
-static const int kScriptGenerations = 5;
-static const int kEvalGlobalGenerations = 2;
-static const int kEvalContextualGenerations = 2;
-static const int kRegExpGenerations = 2;
-
-// Initial size of each compilation cache table allocated.
-static const int kInitialCacheSize = 64;
-
-
-CompilationCache::CompilationCache(Isolate* isolate)
- : isolate_(isolate),
- script_(isolate, kScriptGenerations),
- eval_global_(isolate, kEvalGlobalGenerations),
- eval_contextual_(isolate, kEvalContextualGenerations),
- reg_exp_(isolate, kRegExpGenerations),
- enabled_(true) {
- CompilationSubCache* subcaches[kSubCacheCount] =
- {&script_, &eval_global_, &eval_contextual_, &reg_exp_};
- for (int i = 0; i < kSubCacheCount; ++i) {
- subcaches_[i] = subcaches[i];
- }
-}
-
-
-CompilationCache::~CompilationCache() {}
-
-
-static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
- CALL_HEAP_FUNCTION(isolate,
- CompilationCacheTable::Allocate(size),
- CompilationCacheTable);
-}
-
-
-Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
- ASSERT(generation < generations_);
- Handle<CompilationCacheTable> result;
- if (tables_[generation]->IsUndefined()) {
- result = AllocateTable(isolate(), kInitialCacheSize);
- tables_[generation] = *result;
- } else {
- CompilationCacheTable* table =
- CompilationCacheTable::cast(tables_[generation]);
- result = Handle<CompilationCacheTable>(table, isolate());
- }
- return result;
-}
-
-void CompilationSubCache::Age() {
- // Age the generations implicitly killing off the oldest.
- for (int i = generations_ - 1; i > 0; i--) {
- tables_[i] = tables_[i - 1];
- }
-
- // Set the first generation as unborn.
- tables_[0] = isolate()->heap()->undefined_value();
-}
-
-
-void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
- Object* undefined = isolate()->heap()->undefined_value();
- for (int i = 0; i < generations_; i++) {
- if (tables_[i] != undefined) {
- reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
- }
- }
-}
-
-
-void CompilationSubCache::Iterate(ObjectVisitor* v) {
- v->VisitPointers(&tables_[0], &tables_[generations_]);
-}
-
-
-void CompilationSubCache::Clear() {
- MemsetPointer(tables_, isolate()->heap()->undefined_value(), generations_);
-}
-
-
-void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
- // Probe the script generation tables. Make sure not to leak handles
- // into the caller's handle scope.
- { HandleScope scope(isolate());
- for (int generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- table->Remove(*function_info);
- }
- }
-}
-
-
-CompilationCacheScript::CompilationCacheScript(Isolate* isolate,
- int generations)
- : CompilationSubCache(isolate, generations),
- script_histogram_(NULL),
- script_histogram_initialized_(false) { }
-
-
-// We only re-use a cached function for some script source code if the
-// script originates from the same place. This is to avoid issues
-// when reporting errors, etc.
-bool CompilationCacheScript::HasOrigin(
- Handle<SharedFunctionInfo> function_info,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
- Handle<Script> script =
- Handle<Script>(Script::cast(function_info->script()), isolate());
- // If the script name isn't set, the boilerplate script should have
- // an undefined name to have the same origin.
- if (name.is_null()) {
- return script->name()->IsUndefined();
- }
- // Do the fast bailout checks first.
- if (line_offset != script->line_offset()->value()) return false;
- if (column_offset != script->column_offset()->value()) return false;
- // Check that both names are strings. If not, no match.
- if (!name->IsString() || !script->name()->IsString()) return false;
- // Compare the two name strings for equality.
- return String::cast(*name)->Equals(String::cast(script->name()));
-}
-
-
-// TODO(245): Need to allow identical code from different contexts to
-// be cached in the same script generation. Currently the first use
-// will be cached, but subsequent code from different source / line
-// won't.
-Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
- Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset,
- Handle<Context> context) {
- Object* result = NULL;
- int generation;
-
- // Probe the script generation tables. Make sure not to leak handles
- // into the caller's handle scope.
- { HandleScope scope(isolate());
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- Handle<Object> probe(table->Lookup(*source, *context), isolate());
- if (probe->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> function_info =
- Handle<SharedFunctionInfo>::cast(probe);
- // Break when we've found a suitable shared function info that
- // matches the origin.
- if (HasOrigin(function_info, name, line_offset, column_offset)) {
- result = *function_info;
- break;
- }
- }
- }
- }
-
- if (!script_histogram_initialized_) {
- script_histogram_ = isolate()->stats_table()->CreateHistogram(
- "V8.ScriptCache",
- 0,
- kScriptGenerations,
- kScriptGenerations + 1);
- script_histogram_initialized_ = true;
- }
-
- if (script_histogram_ != NULL) {
- // The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
- isolate()->stats_table()->AddHistogramSample(script_histogram_, generation);
- }
-
- // Once outside the manacles of the handle scope, we need to recheck
- // to see if we actually found a cached script. If so, we return a
- // handle created in the caller's handle scope.
- if (result != NULL) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
- isolate());
- ASSERT(HasOrigin(shared, name, line_offset, column_offset));
- // If the script was found in a later generation, we promote it to
- // the first generation to let it survive longer in the cache.
- if (generation != 0) Put(source, context, shared);
- isolate()->counters()->compilation_cache_hits()->Increment();
- return shared;
- } else {
- isolate()->counters()->compilation_cache_misses()->Increment();
- return Handle<SharedFunctionInfo>::null();
- }
-}
-
-
-MaybeObject* CompilationCacheScript::TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
- Handle<CompilationCacheTable> table = GetFirstTable();
- return table->Put(*source, *context, *function_info);
-}
-
-
-Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, context, function_info),
- CompilationCacheTable);
-}
-
-
-void CompilationCacheScript::Put(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
- HandleScope scope(isolate());
- SetFirstTable(TablePut(source, context, function_info));
-}
-
-
-Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
- Handle<String> source,
- Handle<Context> context,
- LanguageMode language_mode,
- int scope_position) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- Object* result = NULL;
- int generation;
- { HandleScope scope(isolate());
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupEval(
- *source, *context, language_mode, scope_position);
- if (result->IsSharedFunctionInfo()) {
- break;
- }
- }
- }
- if (result->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo>
- function_info(SharedFunctionInfo::cast(result), isolate());
- if (generation != 0) {
- Put(source, context, function_info, scope_position);
- }
- isolate()->counters()->compilation_cache_hits()->Increment();
- return function_info;
- } else {
- isolate()->counters()->compilation_cache_misses()->Increment();
- return Handle<SharedFunctionInfo>::null();
- }
-}
-
-
-MaybeObject* CompilationCacheEval::TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position) {
- Handle<CompilationCacheTable> table = GetFirstTable();
- return table->PutEval(*source, *context, *function_info, scope_position);
-}
-
-
-Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position) {
- CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(
- source, context, function_info, scope_position),
- CompilationCacheTable);
-}
-
-
-void CompilationCacheEval::Put(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position) {
- HandleScope scope(isolate());
- SetFirstTable(TablePut(source, context, function_info, scope_position));
-}
-
-
-Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
- JSRegExp::Flags flags) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- Object* result = NULL;
- int generation;
- { HandleScope scope(isolate());
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupRegExp(*source, flags);
- if (result->IsFixedArray()) {
- break;
- }
- }
- }
- if (result->IsFixedArray()) {
- Handle<FixedArray> data(FixedArray::cast(result), isolate());
- if (generation != 0) {
- Put(source, flags, data);
- }
- isolate()->counters()->compilation_cache_hits()->Increment();
- return data;
- } else {
- isolate()->counters()->compilation_cache_misses()->Increment();
- return Handle<FixedArray>::null();
- }
-}
-
-
-MaybeObject* CompilationCacheRegExp::TryTablePut(
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- Handle<CompilationCacheTable> table = GetFirstTable();
- return table->PutRegExp(*source, flags, *data);
-}
-
-
-Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, flags, data),
- CompilationCacheTable);
-}
-
-
-void CompilationCacheRegExp::Put(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- HandleScope scope(isolate());
- SetFirstTable(TablePut(source, flags, data));
-}
-
-
-void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) return;
-
- eval_global_.Remove(function_info);
- eval_contextual_.Remove(function_info);
- script_.Remove(function_info);
-}
-
-
-Handle<SharedFunctionInfo> CompilationCache::LookupScript(
- Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset,
- Handle<Context> context) {
- if (!IsEnabled()) {
- return Handle<SharedFunctionInfo>::null();
- }
-
- return script_.Lookup(source, name, line_offset, column_offset, context);
-}
-
-
-Handle<SharedFunctionInfo> CompilationCache::LookupEval(
- Handle<String> source,
- Handle<Context> context,
- bool is_global,
- LanguageMode language_mode,
- int scope_position) {
- if (!IsEnabled()) {
- return Handle<SharedFunctionInfo>::null();
- }
-
- Handle<SharedFunctionInfo> result;
- if (is_global) {
- result = eval_global_.Lookup(
- source, context, language_mode, scope_position);
- } else {
- ASSERT(scope_position != RelocInfo::kNoPosition);
- result = eval_contextual_.Lookup(
- source, context, language_mode, scope_position);
- }
- return result;
-}
-
-
-Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
- JSRegExp::Flags flags) {
- if (!IsEnabled()) {
- return Handle<FixedArray>::null();
- }
-
- return reg_exp_.Lookup(source, flags);
-}
-
-
-void CompilationCache::PutScript(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) {
- return;
- }
-
- script_.Put(source, context, function_info);
-}
-
-
-void CompilationCache::PutEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- Handle<SharedFunctionInfo> function_info,
- int scope_position) {
- if (!IsEnabled()) {
- return;
- }
-
- HandleScope scope(isolate());
- if (is_global) {
- eval_global_.Put(source, context, function_info, scope_position);
- } else {
- ASSERT(scope_position != RelocInfo::kNoPosition);
- eval_contextual_.Put(source, context, function_info, scope_position);
- }
-}
-
-
-
-void CompilationCache::PutRegExp(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- if (!IsEnabled()) {
- return;
- }
-
- reg_exp_.Put(source, flags, data);
-}
-
-
-void CompilationCache::Clear() {
- for (int i = 0; i < kSubCacheCount; i++) {
- subcaches_[i]->Clear();
- }
-}
-
-
-void CompilationCache::Iterate(ObjectVisitor* v) {
- for (int i = 0; i < kSubCacheCount; i++) {
- subcaches_[i]->Iterate(v);
- }
-}
-
-
-void CompilationCache::IterateFunctions(ObjectVisitor* v) {
- for (int i = 0; i < kSubCacheCount; i++) {
- subcaches_[i]->IterateFunctions(v);
- }
-}
-
-
-void CompilationCache::MarkCompactPrologue() {
- for (int i = 0; i < kSubCacheCount; i++) {
- subcaches_[i]->Age();
- }
-}
-
-
-void CompilationCache::Enable() {
- enabled_ = true;
-}
-
-
-void CompilationCache::Disable() {
- enabled_ = false;
- Clear();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/compilation-cache.h b/src/3rdparty/v8/src/compilation-cache.h
deleted file mode 100644
index 7a236e8..0000000
--- a/src/3rdparty/v8/src/compilation-cache.h
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_COMPILATION_CACHE_H_
-#define V8_COMPILATION_CACHE_H_
-
-namespace v8 {
-namespace internal {
-
-// The compilation cache consists of several generational sub-caches which uses
-// this class as a base class. A sub-cache contains a compilation cache tables
-// for each generation of the sub-cache. Since the same source code string has
-// different compiled code for scripts and evals, we use separate sub-caches
-// for different compilation modes, to avoid retrieving the wrong result.
-class CompilationSubCache {
- public:
- CompilationSubCache(Isolate* isolate, int generations)
- : isolate_(isolate),
- generations_(generations) {
- tables_ = NewArray<Object*>(generations);
- }
-
- ~CompilationSubCache() { DeleteArray(tables_); }
-
- // Index for the first generation in the cache.
- static const int kFirstGeneration = 0;
-
- // Get the compilation cache tables for a specific generation.
- Handle<CompilationCacheTable> GetTable(int generation);
-
- // Accessors for first generation.
- Handle<CompilationCacheTable> GetFirstTable() {
- return GetTable(kFirstGeneration);
- }
- void SetFirstTable(Handle<CompilationCacheTable> value) {
- ASSERT(kFirstGeneration < generations_);
- tables_[kFirstGeneration] = *value;
- }
-
- // Age the sub-cache by evicting the oldest generation and creating a new
- // young generation.
- void Age();
-
- // GC support.
- void Iterate(ObjectVisitor* v);
- void IterateFunctions(ObjectVisitor* v);
-
- // Clear this sub-cache evicting all its content.
- void Clear();
-
- // Remove given shared function info from sub-cache.
- void Remove(Handle<SharedFunctionInfo> function_info);
-
- // Number of generations in this sub-cache.
- inline int generations() { return generations_; }
-
- protected:
- Isolate* isolate() { return isolate_; }
-
- private:
- Isolate* isolate_;
- int generations_; // Number of generations.
- Object** tables_; // Compilation cache tables - one for each generation.
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
-};
-
-
-// Sub-cache for scripts.
-class CompilationCacheScript : public CompilationSubCache {
- public:
- CompilationCacheScript(Isolate* isolate, int generations);
-
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset,
- Handle<Context> context);
- void Put(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- bool HasOrigin(Handle<SharedFunctionInfo> function_info,
- Handle<Object> name,
- int line_offset,
- int column_offset);
-
- void* script_histogram_;
- bool script_histogram_initialized_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
-};
-
-
-// Sub-cache for eval scripts. Two caches for eval are used. One for eval calls
-// in native contexts and one for eval calls in other contexts. The cache
-// considers the following pieces of information when checking for matching
-// entries:
-// 1. The source string.
-// 2. The shared function info of the calling function.
-// 3. Whether the source should be compiled as strict code or as non-strict
-// code.
-// Note: Currently there are clients of CompileEval that always compile
-// non-strict code even if the calling function is a strict mode function.
-// More specifically these are the CompileString, DebugEvaluate and
-// DebugEvaluateGlobal runtime functions.
-// 4. The start position of the calling scope.
-class CompilationCacheEval: public CompilationSubCache {
- public:
- CompilationCacheEval(Isolate* isolate, int generations)
- : CompilationSubCache(isolate, generations) { }
-
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Context> context,
- LanguageMode language_mode,
- int scope_position);
-
- void Put(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position);
-
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
-};
-
-
-// Sub-cache for regular expressions.
-class CompilationCacheRegExp: public CompilationSubCache {
- public:
- CompilationCacheRegExp(Isolate* isolate, int generations)
- : CompilationSubCache(isolate, generations) { }
-
- Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
-
- void Put(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
-};
-
-
-// The compilation cache keeps shared function infos for compiled
-// scripts and evals. The shared function infos are looked up using
-// the source string as the key. For regular expressions the
-// compilation data is cached.
-class CompilationCache {
- public:
- // Finds the script shared function info for a source
- // string. Returns an empty handle if the cache doesn't contain a
- // script for the given source string with the right origin.
- Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset,
- Handle<Context> context);
-
- // Finds the shared function info for a source string for eval in a
- // given context. Returns an empty handle if the cache doesn't
- // contain a script for the given source string.
- Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- LanguageMode language_mode,
- int scope_position);
-
- // Returns the regexp data associated with the given regexp if it
- // is in cache, otherwise an empty handle.
- Handle<FixedArray> LookupRegExp(Handle<String> source,
- JSRegExp::Flags flags);
-
- // Associate the (source, kind) pair to the shared function
- // info. This may overwrite an existing mapping.
- void PutScript(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- // Associate the (source, context->closure()->shared(), kind) triple
- // with the shared function info. This may overwrite an existing mapping.
- void PutEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- Handle<SharedFunctionInfo> function_info,
- int scope_position);
-
- // Associate the (source, flags) pair to the given regexp data.
- // This may overwrite an existing mapping.
- void PutRegExp(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- // Clear the cache - also used to initialize the cache at startup.
- void Clear();
-
- // Remove given shared function info from all caches.
- void Remove(Handle<SharedFunctionInfo> function_info);
-
- // GC support.
- void Iterate(ObjectVisitor* v);
- void IterateFunctions(ObjectVisitor* v);
-
- // Notify the cache that a mark-sweep garbage collection is about to
- // take place. This is used to retire entries from the cache to
- // avoid keeping them alive too long without using them.
- void MarkCompactPrologue();
-
- // Enable/disable compilation cache. Used by debugger to disable compilation
- // cache during debugging to make sure new scripts are always compiled.
- void Enable();
- void Disable();
-
- private:
- explicit CompilationCache(Isolate* isolate);
- ~CompilationCache();
-
- HashMap* EagerOptimizingSet();
-
- // The number of sub caches covering the different types to cache.
- static const int kSubCacheCount = 4;
-
- bool IsEnabled() { return FLAG_compilation_cache && enabled_; }
-
- Isolate* isolate() { return isolate_; }
-
- Isolate* isolate_;
-
- CompilationCacheScript script_;
- CompilationCacheEval eval_global_;
- CompilationCacheEval eval_contextual_;
- CompilationCacheRegExp reg_exp_;
- CompilationSubCache* subcaches_[kSubCacheCount];
-
- // Current enable state of the compilation cache.
- bool enabled_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(CompilationCache);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_COMPILATION_CACHE_H_
diff --git a/src/3rdparty/v8/src/compiler-intrinsics.h b/src/3rdparty/v8/src/compiler-intrinsics.h
deleted file mode 100644
index c1693b0..0000000
--- a/src/3rdparty/v8/src/compiler-intrinsics.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_COMPILER_INTRINSICS_H_
-#define V8_COMPILER_INTRINSICS_H_
-
-#if defined(_WIN32_WCE)
-#include <cmnintrin.h>
-#endif
-
-namespace v8 {
-namespace internal {
-
-class CompilerIntrinsics {
- public:
- // Returns number of zero bits preceding least significant 1 bit.
- // Undefined for zero value.
- INLINE(static int CountTrailingZeros(uint32_t value));
-
- // Returns number of zero bits following most significant 1 bit.
- // Undefined for zero value.
- INLINE(static int CountLeadingZeros(uint32_t value));
-
- // Returns the number of bits set.
- INLINE(static int CountSetBits(uint32_t value));
-};
-
-#ifdef __GNUC__
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
- return __builtin_ctz(value);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
- return __builtin_clz(value);
-}
-
-int CompilerIntrinsics::CountSetBits(uint32_t value) {
- return __builtin_popcount(value);
-}
-
-#elif defined(_MSC_VER) && !defined(_WIN32_WCE)
-
-#pragma intrinsic(_BitScanForward)
-#pragma intrinsic(_BitScanReverse)
-
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
- unsigned long result; //NOLINT
- _BitScanForward(&result, static_cast<long>(value)); //NOLINT
- return static_cast<int>(result);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
- unsigned long result; //NOLINT
- _BitScanReverse(&result, static_cast<long>(value)); //NOLINT
- return 31 - static_cast<int>(result);
-}
-
-#elif defined(_WIN32_WCE)
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
- // taken from http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightFloatCast
- float f = (float)(value & -value); // cast the least significant bit in v to a float
- return (*(uint32_t *)&f >> 23) - 0x7f;
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
- return _CountLeadingZeros(value);
-}
-#else
-#error Unsupported compiler
-#endif
-
-#if defined(_MSC_VER)
-int CompilerIntrinsics::CountSetBits(uint32_t value) {
- // Manually count set bits.
- value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
- value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
- value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
- value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
- value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
- return value;
-}
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_COMPILER_INTRINSICS_H_
diff --git a/src/3rdparty/v8/src/compiler.cc b/src/3rdparty/v8/src/compiler.cc
deleted file mode 100644
index 1e53cfe..0000000
--- a/src/3rdparty/v8/src/compiler.cc
+++ /dev/null
@@ -1,1144 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "compiler.h"
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "gdb-jit.h"
-#include "hydrogen.h"
-#include "isolate-inl.h"
-#include "lithium.h"
-#include "liveedit.h"
-#include "parser.h"
-#include "rewriter.h"
-#include "runtime-profiler.h"
-#include "scanner-character-streams.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE)),
- script_(script),
- osr_ast_id_(BailoutId::None()) {
- Initialize(script->GetIsolate(), BASE, zone);
-}
-
-
-CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
- Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
- shared_info_(shared_info),
- script_(Handle<Script>(Script::cast(shared_info->script()))),
- osr_ast_id_(BailoutId::None()) {
- Initialize(script_->GetIsolate(), BASE, zone);
-}
-
-
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
- closure_(closure),
- shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
- script_(Handle<Script>(Script::cast(shared_info_->script()))),
- context_(closure->context()),
- osr_ast_id_(BailoutId::None()) {
- Initialize(script_->GetIsolate(), BASE, zone);
-}
-
-
-CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
- Isolate* isolate, Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) |
- IsLazy::encode(true)),
- osr_ast_id_(BailoutId::None()) {
- Initialize(isolate, STUB, zone);
- code_stub_ = stub;
-}
-
-
-void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
- isolate_ = isolate;
- function_ = NULL;
- scope_ = NULL;
- global_scope_ = NULL;
- extension_ = NULL;
- pre_parse_data_ = NULL;
- zone_ = zone;
- deferred_handles_ = NULL;
- code_stub_ = NULL;
- prologue_offset_ = kPrologueOffsetNotSet;
- opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
- if (mode == STUB) {
- mode_ = STUB;
- return;
- }
- mode_ = V8::UseCrankshaft() ? mode : NONOPT;
- if (script_->type()->value() == Script::TYPE_NATIVE) {
- MarkAsNative();
- }
- if (!shared_info_.is_null()) {
- ASSERT(language_mode() == CLASSIC_MODE);
- SetLanguageMode(shared_info_->language_mode());
- }
- if (!shared_info_.is_null() && shared_info_->qml_mode()) {
- MarkAsQmlMode();
- }
- set_bailout_reason("unknown");
-}
-
-
-CompilationInfo::~CompilationInfo() {
- delete deferred_handles_;
-}
-
-
-int CompilationInfo::num_parameters() const {
- if (IsStub()) {
- return 0;
- } else {
- return scope()->num_parameters();
- }
-}
-
-
-int CompilationInfo::num_heap_slots() const {
- if (IsStub()) {
- return 0;
- } else {
- return scope()->num_heap_slots();
- }
-}
-
-
-Code::Flags CompilationInfo::flags() const {
- if (IsStub()) {
- return Code::ComputeFlags(Code::COMPILED_STUB);
- } else {
- return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
- }
-}
-
-
-// Disable optimization for the rest of the compilation pipeline.
-void CompilationInfo::DisableOptimization() {
- bool is_optimizable_closure =
- FLAG_optimize_closures &&
- closure_.is_null() &&
- !scope_->HasTrivialOuterContext() &&
- !scope_->outer_scope_calls_non_strict_eval() &&
- !scope_->inside_with();
- SetMode(is_optimizable_closure ? BASE : NONOPT);
-}
-
-
-// Primitive functions are unlikely to be picked up by the stack-walking
-// profiler, so they trigger their own optimization when they're called
-// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
-bool CompilationInfo::ShouldSelfOptimize() {
- return FLAG_self_optimization &&
- FLAG_crankshaft &&
- !function()->flags()->Contains(kDontSelfOptimize) &&
- !function()->flags()->Contains(kDontOptimize) &&
- function()->scope()->AllowsLazyCompilation() &&
- (shared_info().is_null() || !shared_info()->optimization_disabled());
-}
-
-
-void CompilationInfo::AbortOptimization() {
- Handle<Code> code(shared_info()->code());
- SetCode(code);
-}
-
-
-// Determine whether to use the full compiler for all code. If the flag
-// --always-full-compiler is specified this is the case. For the virtual frame
-// based compiler the full compiler is also used if a debugger is connected, as
-// the code from the full compiler supports mode precise break points. For the
-// crankshaft adaptive compiler debugging the optimized code is not possible at
-// all. However crankshaft support recompilation of functions, so in this case
-// the full compiler need not be be used if a debugger is attached, but only if
-// break points has actually been set.
-static bool IsDebuggerActive(Isolate* isolate) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- return V8::UseCrankshaft() ?
- isolate->debug()->has_break_points() :
- isolate->debugger()->IsDebuggerActive();
-#else
- return false;
-#endif
-}
-
-
-static bool AlwaysFullCompiler(Isolate* isolate) {
- return FLAG_always_full_compiler || IsDebuggerActive(isolate);
-}
-
-
-void OptimizingCompiler::RecordOptimizationStats() {
- Handle<JSFunction> function = info()->closure();
- int opt_count = function->shared()->opt_count();
- function->shared()->set_opt_count(opt_count + 1);
- double ms_creategraph =
- static_cast<double>(time_taken_to_create_graph_) / 1000;
- double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
- double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
- if (FLAG_trace_opt) {
- PrintF("[optimizing: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
- PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
- ms_codegen);
- }
- if (FLAG_trace_opt_stats) {
- static double compilation_time = 0.0;
- static int compiled_functions = 0;
- static int code_size = 0;
-
- compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
- compiled_functions++;
- code_size += function->shared()->SourceSize();
- PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
- compiled_functions,
- code_size,
- compilation_time);
- }
- if (FLAG_hydrogen_stats) {
- HStatistics::Instance()->IncrementSubtotals(time_taken_to_create_graph_,
- time_taken_to_optimize_,
- time_taken_to_codegen_);
- }
-}
-
-
-// A return value of true indicates the compilation pipeline is still
-// going, not necessarily that we optimized the code.
-static bool MakeCrankshaftCode(CompilationInfo* info) {
- OptimizingCompiler compiler(info);
- OptimizingCompiler::Status status = compiler.CreateGraph();
-
- if (status != OptimizingCompiler::SUCCEEDED) {
- return status != OptimizingCompiler::FAILED;
- }
- status = compiler.OptimizeGraph();
- if (status != OptimizingCompiler::SUCCEEDED) {
- status = compiler.AbortOptimization();
- return status != OptimizingCompiler::FAILED;
- }
- status = compiler.GenerateAndInstallCode();
- return status != OptimizingCompiler::FAILED;
-}
-
-
-OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
- ASSERT(V8::UseCrankshaft());
- ASSERT(info()->IsOptimizing());
- ASSERT(!info()->IsCompilingForDebugging());
-
- // We should never arrive here if there is no code object on the
- // shared function object.
- Handle<Code> code(info()->shared_info()->code());
- ASSERT(code->kind() == Code::FUNCTION);
-
- // We should never arrive here if optimization has been disabled on the
- // shared function info.
- ASSERT(!info()->shared_info()->optimization_disabled());
-
- // Fall back to using the full code generator if it's not possible
- // to use the Hydrogen-based optimizing compiler. We already have
- // generated code for this from the shared function object.
- if (AlwaysFullCompiler(info()->isolate())) {
- info()->SetCode(code);
- return SetLastStatus(BAILED_OUT);
- }
-
- // Limit the number of times we re-compile a functions with
- // the optimizing compiler.
- const int kMaxOptCount =
- FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
- if (info()->opt_count() > kMaxOptCount) {
- info()->set_bailout_reason("optimized too many times");
- return AbortOptimization();
- }
-
- // Due to an encoding limit on LUnallocated operands in the Lithium
- // language, we cannot optimize functions with too many formal parameters
- // or perform on-stack replacement for function with too many
- // stack-allocated local variables.
- //
- // The encoding is as a signed value, with parameters and receiver using
- // the negative indices and locals the non-negative ones.
- const int parameter_limit = -LUnallocated::kMinFixedIndex;
- Scope* scope = info()->scope();
- if ((scope->num_parameters() + 1) > parameter_limit) {
- info()->set_bailout_reason("too many parameters");
- return AbortOptimization();
- }
-
- const int locals_limit = LUnallocated::kMaxFixedIndex;
- if (!info()->osr_ast_id().IsNone() &&
- scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
- info()->set_bailout_reason("too many parameters/locals");
- return AbortOptimization();
- }
-
- // Take --hydrogen-filter into account.
- Handle<String> name = info()->function()->debug_name();
- if (*FLAG_hydrogen_filter != '\0') {
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- if ((filter[0] == '-'
- && name->IsUtf8EqualTo(filter.SubVector(1, filter.length())))
- || (filter[0] != '-' && !name->IsUtf8EqualTo(filter))) {
- info()->SetCode(code);
- return SetLastStatus(BAILED_OUT);
- }
- }
-
- // Recompile the unoptimized version of the code if the current version
- // doesn't have deoptimization support. Alternatively, we may decide to
- // run the full code generator to get a baseline for the compile-time
- // performance of the hydrogen-based compiler.
- bool should_recompile = !info()->shared_info()->has_deoptimization_support();
- if (should_recompile || FLAG_hydrogen_stats) {
- HPhase phase(HPhase::kFullCodeGen);
- CompilationInfoWithZone unoptimized(info()->shared_info());
- // Note that we use the same AST that we will use for generating the
- // optimized code.
- unoptimized.SetFunction(info()->function());
- unoptimized.SetScope(info()->scope());
- unoptimized.SetContext(info()->context());
- if (should_recompile) unoptimized.EnableDeoptimizationSupport();
- bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
- if (should_recompile) {
- if (!succeeded) return SetLastStatus(FAILED);
- Handle<SharedFunctionInfo> shared = info()->shared_info();
- shared->EnableDeoptimizationSupport(*unoptimized.code());
- // The existing unoptimized code was replaced with the new one.
- Compiler::RecordFunctionCompilation(
- Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
- }
- }
-
- // Check that the unoptimized, shared code is ready for
- // optimizations. When using the always_opt flag we disregard the
- // optimizable marker in the code object and optimize anyway. This
- // is safe as long as the unoptimized code has deoptimization
- // support.
- ASSERT(FLAG_always_opt || code->optimizable());
- ASSERT(info()->shared_info()->has_deoptimization_support());
-
- if (FLAG_trace_hydrogen) {
- PrintF("-----------------------------------------------------------\n");
- PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
- HTracer::Instance()->TraceCompilation(info());
- }
- Handle<Context> native_context(
- info()->closure()->context()->native_context());
- oracle_ = new(info()->zone()) TypeFeedbackOracle(
- code, native_context, info()->isolate(), info()->zone());
- graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
-
- Timer t(this, &time_taken_to_create_graph_);
- graph_ = graph_builder_->CreateGraph();
-
- if (info()->isolate()->has_pending_exception()) {
- info()->SetCode(Handle<Code>::null());
- return SetLastStatus(FAILED);
- }
-
- // The function being compiled may have bailed out due to an inline
- // candidate bailing out. In such a case, we don't disable
- // optimization on the shared_info.
- ASSERT(!graph_builder_->inline_bailout() || graph_ == NULL);
- if (graph_ == NULL) {
- if (graph_builder_->inline_bailout()) {
- info_->AbortOptimization();
- return SetLastStatus(BAILED_OUT);
- } else {
- return AbortOptimization();
- }
- }
-
- return SetLastStatus(SUCCEEDED);
-}
-
-OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
- AssertNoAllocation no_gc;
- NoHandleAllocation no_handles(isolate());
- NoHandleDereference no_deref(isolate());
-
- ASSERT(last_status() == SUCCEEDED);
- Timer t(this, &time_taken_to_optimize_);
- ASSERT(graph_ != NULL);
- SmartArrayPointer<char> bailout_reason;
- if (!graph_->Optimize(&bailout_reason)) {
- if (!bailout_reason.is_empty()) graph_builder_->Bailout(*bailout_reason);
- return SetLastStatus(BAILED_OUT);
- } else {
- chunk_ = LChunk::NewChunk(graph_);
- if (chunk_ == NULL) {
- return SetLastStatus(BAILED_OUT);
- }
- }
- return SetLastStatus(SUCCEEDED);
-}
-
-
-OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
- ASSERT(last_status() == SUCCEEDED);
- { // Scope for timer.
- Timer timer(this, &time_taken_to_codegen_);
- ASSERT(chunk_ != NULL);
- ASSERT(graph_ != NULL);
- Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION);
- if (optimized_code.is_null()) {
- info()->set_bailout_reason("code generation failed");
- return AbortOptimization();
- }
- info()->SetCode(optimized_code);
- }
- RecordOptimizationStats();
- return SetLastStatus(SUCCEEDED);
-}
-
-
-static bool GenerateCode(CompilationInfo* info) {
- bool is_optimizing = V8::UseCrankshaft() &&
- !info->IsCompilingForDebugging() &&
- info->IsOptimizing();
- if (is_optimizing) {
- Logger::TimerEventScope timer(
- info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
- return MakeCrankshaftCode(info);
- } else {
- if (info->IsOptimizing()) {
- // Have the CompilationInfo decide if the compilation should be
- // BASE or NONOPT.
- info->DisableOptimization();
- }
- Logger::TimerEventScope timer(
- info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
- return FullCodeGenerator::MakeCode(info);
- }
-}
-
-
-static bool MakeCode(CompilationInfo* info) {
- // Precondition: code has been parsed. Postcondition: the code field in
- // the compilation info is set if compilation succeeded.
- ASSERT(info->function() != NULL);
- return Rewriter::Rewrite(info) && Scope::Analyze(info) && GenerateCode(info);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
- // Precondition: code has been parsed. Postcondition: the code field in
- // the compilation info is set if compilation succeeded.
- bool succeeded = MakeCode(info);
- if (!info->shared_info().is_null()) {
- Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
- info->zone());
- info->shared_info()->set_scope_info(*scope_info);
- }
- return succeeded;
-}
-#endif
-
-
-static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
- bool allow_lazy_without_ctx = false) {
- return LiveEditFunctionTracker::IsActive(info->isolate()) ||
- (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
-}
-
-
-static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
- Isolate* isolate = info->isolate();
- ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
- PostponeInterruptsScope postpone(isolate);
-
- ASSERT(!isolate->native_context().is_null());
- Handle<Script> script = info->script();
- // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
- FixedArray* array = isolate->native_context()->embedder_data();
- script->set_context_data(array->get(0));
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (info->is_eval()) {
- Script::CompilationType compilation_type = Script::COMPILATION_TYPE_EVAL;
- script->set_compilation_type(Smi::FromInt(compilation_type));
- // For eval scripts add information on the function from which eval was
- // called.
- if (info->is_eval()) {
- StackTraceFrameIterator it(isolate);
- if (!it.done()) {
- script->set_eval_from_shared(
- JSFunction::cast(it.frame()->function())->shared());
- Code* code = it.frame()->LookupCode();
- int offset = static_cast<int>(
- it.frame()->pc() - code->instruction_start());
- script->set_eval_from_instructions_offset(Smi::FromInt(offset));
- }
- }
- }
-
- // Notify debugger
- isolate->debugger()->OnBeforeCompile(script);
-#endif
-
- // Only allow non-global compiles for eval.
- ASSERT(info->is_eval() || info->is_global());
- ParsingFlags flags = kNoParsingFlags;
- if ((info->pre_parse_data() != NULL ||
- String::cast(script->source())->length() > FLAG_min_preparse_length) &&
- !DebuggerWantsEagerCompilation(info)) {
- flags = kAllowLazy;
- }
- if (!ParserApi::Parse(info, flags)) {
- return Handle<SharedFunctionInfo>::null();
- }
-
- // Measure how long it takes to do the compilation; only take the
- // rest of the function into account to avoid overlap with the
- // parsing statistics.
- HistogramTimer* rate = info->is_eval()
- ? info->isolate()->counters()->compile_eval()
- : info->isolate()->counters()->compile();
- HistogramTimerScope timer(rate);
-
- // Compile the code.
- FunctionLiteral* lit = info->function();
- LiveEditFunctionTracker live_edit_tracker(isolate, lit);
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return Handle<SharedFunctionInfo>::null();
- }
-
- // Allocate function.
- ASSERT(!info->code().is_null());
- Handle<SharedFunctionInfo> result =
- isolate->factory()->NewSharedFunctionInfo(
- lit->name(),
- lit->materialized_literal_count(),
- info->code(),
- ScopeInfo::Create(info->scope(), info->zone()));
-
- ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- Compiler::SetFunctionInfo(result, lit, true, script);
-
- if (script->name()->IsString()) {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- String::cast(script->name())));
- GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
- script,
- info->code(),
- info));
- } else {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- isolate->heap()->empty_string()));
- GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
- }
-
- // Hint to the runtime system used when allocating space for initial
- // property space by setting the expected number of properties for
- // the instances of the function.
- SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
-
- script->set_compilation_state(
- Smi::FromInt(Script::COMPILATION_STATE_COMPILED));
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger
- isolate->debugger()->OnAfterCompile(
- script, Debugger::NO_AFTER_COMPILE_FLAGS);
-#endif
-
- live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
-
- return result;
-}
-
-
-Handle<SharedFunctionInfo> Compiler::Compile(
- Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag natives,
- v8::Script::CompileFlags compile_flags) {
- Isolate* isolate = source->GetIsolate();
- int source_length = source->length();
- isolate->counters()->total_load_size()->Increment(source_length);
- isolate->counters()->total_compile_size()->Increment(source_length);
-
- // The VM is in the COMPILER state until exiting this function.
- VMState state(isolate, COMPILER);
-
- CompilationCache* compilation_cache = isolate->compilation_cache();
-
- // Do a lookup in the compilation cache but not for extensions.
- Handle<SharedFunctionInfo> result;
- if (extension == NULL) {
- result = compilation_cache->LookupScript(source,
- script_name,
- line_offset,
- column_offset,
- context);
- }
-
- if (result.is_null()) {
- // No cache entry found. Do pre-parsing, if it makes sense, and compile
- // the script.
- // Building preparse data that is only used immediately after is only a
- // saving if we might skip building the AST for lazily compiled functions.
- // I.e., preparse data isn't relevant when the lazy flag is off, and
- // for small sources, odds are that there aren't many functions
- // that would be compiled lazily anyway, so we skip the preparse step
- // in that case too.
-
- // Create a script object describing the script to be compiled.
- Handle<Script> script = FACTORY->NewScript(source);
- if (natives == NATIVES_CODE || compile_flags & v8::Script::NativeMode) {
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- }
- if (!script_name.is_null()) {
- script->set_name(*script_name);
- script->set_line_offset(Smi::FromInt(line_offset));
- script->set_column_offset(Smi::FromInt(column_offset));
- }
-
- script->set_data(script_data.is_null() ? HEAP->undefined_value()
- : *script_data);
-
- // Compile the function and add it to the cache.
- CompilationInfoWithZone info(script);
- info.MarkAsGlobal();
- info.SetExtension(extension);
- info.SetPreParseData(pre_data);
- info.SetContext(context);
- if (FLAG_use_strict) {
- info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
- }
- if (compile_flags & v8::Script::QmlMode) info.MarkAsQmlMode();
- result = MakeFunctionInfo(&info);
- if (extension == NULL && !result.is_null() && !result->dont_cache()) {
- compilation_cache->PutScript(source, context, result);
- }
- } else {
- if (result->ic_age() != HEAP->global_ic_age()) {
- result->ResetForNewContext(HEAP->global_ic_age());
- }
- }
-
- if (result.is_null()) isolate->ReportPendingMessages();
- return result;
-}
-
-
-Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- LanguageMode language_mode,
- int scope_position,
- bool qml_mode) {
- Isolate* isolate = source->GetIsolate();
- int source_length = source->length();
- isolate->counters()->total_eval_size()->Increment(source_length);
- isolate->counters()->total_compile_size()->Increment(source_length);
-
- // The VM is in the COMPILER state until exiting this function.
- VMState state(isolate, COMPILER);
-
- // Do a lookup in the compilation cache; if the entry is not there, invoke
- // the compiler and add the result to the cache.
- Handle<SharedFunctionInfo> result;
- CompilationCache* compilation_cache = isolate->compilation_cache();
- result = compilation_cache->LookupEval(source,
- context,
- is_global,
- language_mode,
- scope_position);
-
- if (result.is_null()) {
- // Create a script object describing the script to be compiled.
- Handle<Script> script = isolate->factory()->NewScript(source);
- CompilationInfoWithZone info(script);
- info.MarkAsEval();
- if (is_global) info.MarkAsGlobal();
- info.SetLanguageMode(language_mode);
- if (qml_mode) info.MarkAsQmlMode();
- info.SetContext(context);
- result = MakeFunctionInfo(&info);
- if (!result.is_null()) {
- // Explicitly disable optimization for eval code. We're not yet prepared
- // to handle eval-code in the optimizing compiler.
- result->DisableOptimization("eval");
-
- // If caller is strict mode, the result must be in strict mode or
- // extended mode as well, but not the other way around. Consider:
- // eval("'use strict'; ...");
- ASSERT(language_mode != STRICT_MODE || !result->is_classic_mode());
- // If caller is in extended mode, the result must also be in
- // extended mode.
- ASSERT(language_mode != EXTENDED_MODE ||
- result->is_extended_mode());
- if (!result->dont_cache()) {
- compilation_cache->PutEval(
- source, context, is_global, result, scope_position);
- }
- }
- } else {
- if (result->ic_age() != HEAP->global_ic_age()) {
- result->ResetForNewContext(HEAP->global_ic_age());
- }
- }
-
- return result;
-}
-
-
-static bool InstallFullCode(CompilationInfo* info) {
- // Update the shared function info with the compiled code and the
- // scope info. Please note, that the order of the shared function
- // info initialization is important since set_scope_info might
- // trigger a GC, causing the ASSERT below to be invalid if the code
- // was flushed. By setting the code object last we avoid this.
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<Code> code = info->code();
- Handle<JSFunction> function = info->closure();
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info->scope(), info->zone());
- shared->set_scope_info(*scope_info);
- shared->ReplaceCode(*code);
- if (!function.is_null()) {
- function->ReplaceCode(*code);
- ASSERT(!function->IsOptimized());
- }
-
- // Set the expected number of properties for instances.
- FunctionLiteral* lit = info->function();
- int expected = lit->expected_property_count();
- SetExpectedNofPropertiesFromEstimate(shared, expected);
-
- // Set the optimization hints after performing lazy compilation, as
- // these are not set when the function is set up as a lazily
- // compiled function.
- shared->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
-
- // Check the function has compiled code.
- ASSERT(shared->is_compiled());
- shared->set_code_age(0);
- shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
- shared->set_dont_inline(lit->flags()->Contains(kDontInline));
- shared->set_ast_node_count(lit->ast_node_count());
-
- if (V8::UseCrankshaft() &&
- !function.is_null() &&
- !shared->optimization_disabled()) {
- // If we're asked to always optimize, we compile the optimized
- // version of the function right away - unless the debugger is
- // active as it makes no sense to compile optimized code then.
- if (FLAG_always_opt &&
- !Isolate::Current()->DebuggerHasBreakPoints()) {
- CompilationInfoWithZone optimized(function);
- optimized.SetOptimizing(BailoutId::None());
- return Compiler::CompileLazy(&optimized);
- }
- }
- return true;
-}
-
-
-static void InstallCodeCommon(CompilationInfo* info) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<Code> code = info->code();
- ASSERT(!code.is_null());
-
- // Set optimizable to false if this is disallowed by the shared
- // function info, e.g., we might have flushed the code and must
- // reset this bit when lazy compiling the code again.
- if (shared->optimization_disabled()) code->set_optimizable(false);
-
- Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
-}
-
-
-static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
- Handle<Code> code = info->code();
- if (FLAG_cache_optimized_code &&
- info->osr_ast_id().IsNone() &&
- code->kind() == Code::OPTIMIZED_FUNCTION) {
- Handle<JSFunction> function = info->closure();
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<FixedArray> literals(function->literals());
- Handle<Context> native_context(function->context()->native_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(
- shared, native_context, code, literals);
- }
-}
-
-
-static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
- if (FLAG_cache_optimized_code &&
- info->osr_ast_id().IsNone() &&
- info->IsOptimizing()) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<JSFunction> function = info->closure();
- ASSERT(!function.is_null());
- Handle<Context> native_context(function->context()->native_context());
- int index = shared->SearchOptimizedCodeMap(*native_context);
- if (index > 0) {
- if (FLAG_trace_opt) {
- PrintF("[found optimized code for: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(*function));
- }
- // Caching of optimized code enabled and optimized code found.
- shared->InstallFromOptimizedCodeMap(*function, index);
- return true;
- }
- }
- return false;
-}
-
-
-bool Compiler::CompileLazy(CompilationInfo* info) {
- Isolate* isolate = info->isolate();
-
- ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
-
- // The VM is in the COMPILER state until exiting this function.
- VMState state(isolate, COMPILER);
-
- PostponeInterruptsScope postpone(isolate);
-
- Handle<SharedFunctionInfo> shared = info->shared_info();
- int compiled_size = shared->end_position() - shared->start_position();
- isolate->counters()->total_compile_size()->Increment(compiled_size);
-
- if (InstallCodeFromOptimizedCodeMap(info)) return true;
-
- // Generate the AST for the lazily compiled function.
- if (ParserApi::Parse(info, kNoParsingFlags)) {
- // Measure how long it takes to do the lazy compilation; only take the
- // rest of the function into account to avoid overlap with the lazy
- // parsing statistics.
- HistogramTimerScope timer(isolate->counters()->compile_lazy());
-
- // After parsing we know the function's language mode. Remember it.
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
- shared->set_language_mode(language_mode);
-
- // After parsing we know function's qml mode. Remember it.
- if (info->function()->qml_mode()) {
- shared->set_qml_mode(true);
- info->MarkAsQmlMode();
- }
-
- // Compile the code.
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) {
- isolate->StackOverflow();
- }
- } else {
- InstallCodeCommon(info);
-
- if (info->IsOptimizing()) {
- Handle<Code> code = info->code();
- ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
- info->closure()->ReplaceCode(*code);
- InsertCodeIntoOptimizedCodeMap(info);
- return true;
- } else {
- return InstallFullCode(info);
- }
- }
- }
-
- ASSERT(info->code().is_null());
- return false;
-}
-
-
-void Compiler::RecompileParallel(Handle<JSFunction> closure) {
- if (closure->IsInRecompileQueue()) return;
- ASSERT(closure->IsMarkedForParallelRecompilation());
-
- Isolate* isolate = closure->GetIsolate();
- // Here we prepare compile data for the parallel recompilation thread, but
- // this still happens synchronously and interrupts execution.
- Logger::TimerEventScope timer(
- isolate, Logger::TimerEventScope::v8_recompile_synchronous);
-
- if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
- if (FLAG_trace_parallel_recompilation) {
- PrintF(" ** Compilation queue, will retry opting on next run.\n");
- }
- return;
- }
-
- SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
- VMState state(isolate, PARALLEL_COMPILER);
- PostponeInterruptsScope postpone(isolate);
-
- Handle<SharedFunctionInfo> shared = info->shared_info();
- int compiled_size = shared->end_position() - shared->start_position();
- isolate->counters()->total_compile_size()->Increment(compiled_size);
- info->SetOptimizing(BailoutId::None());
-
- {
- CompilationHandleScope handle_scope(*info);
-
- if (!FLAG_manual_parallel_recompilation &&
- InstallCodeFromOptimizedCodeMap(*info)) {
- return;
- }
-
- if (ParserApi::Parse(*info, kNoParsingFlags)) {
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
- shared->set_language_mode(language_mode);
- info->SaveHandles();
-
- if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
- OptimizingCompiler* compiler =
- new(info->zone()) OptimizingCompiler(*info);
- OptimizingCompiler::Status status = compiler->CreateGraph();
- if (status == OptimizingCompiler::SUCCEEDED) {
- isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
- shared->code()->set_profiler_ticks(0);
- closure->ReplaceCode(isolate->builtins()->builtin(
- Builtins::kInRecompileQueue));
- info.Detach();
- } else if (status == OptimizingCompiler::BAILED_OUT) {
- isolate->clear_pending_exception();
- InstallFullCode(*info);
- }
- }
- }
- }
-
- if (isolate->has_pending_exception()) {
- isolate->clear_pending_exception();
- }
-}
-
-
-void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
- SmartPointer<CompilationInfo> info(optimizing_compiler->info());
- Isolate* isolate = info->isolate();
- VMState state(isolate, PARALLEL_COMPILER);
- Logger::TimerEventScope timer(
- isolate, Logger::TimerEventScope::v8_recompile_synchronous);
- // If crankshaft succeeded, install the optimized code else install
- // the unoptimized code.
- OptimizingCompiler::Status status = optimizing_compiler->last_status();
- if (status != OptimizingCompiler::SUCCEEDED) {
- optimizing_compiler->info()->set_bailout_reason(
- "failed/bailed out last time");
- status = optimizing_compiler->AbortOptimization();
- } else {
- status = optimizing_compiler->GenerateAndInstallCode();
- ASSERT(status == OptimizingCompiler::SUCCEEDED ||
- status == OptimizingCompiler::BAILED_OUT);
- }
-
- InstallCodeCommon(*info);
- if (status == OptimizingCompiler::SUCCEEDED) {
- Handle<Code> code = info->code();
- ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
- info->closure()->ReplaceCode(*code);
- if (info->shared_info()->SearchOptimizedCodeMap(
- info->closure()->context()->native_context()) == -1) {
- InsertCodeIntoOptimizedCodeMap(*info);
- }
- } else {
- info->SetCode(Handle<Code>(info->shared_info()->code()));
- InstallFullCode(*info);
- }
-}
-
-
-Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
- Handle<Script> script) {
- // Precondition: code has been parsed and scopes have been analyzed.
- CompilationInfoWithZone info(script);
- info.SetFunction(literal);
- info.SetScope(literal->scope());
- info.SetLanguageMode(literal->scope()->language_mode());
-
- Isolate* isolate = info.isolate();
- LiveEditFunctionTracker live_edit_tracker(isolate, literal);
- // Determine if the function can be lazily compiled. This is necessary to
- // allow some of our builtin JS files to be lazily compiled. These
- // builtins cannot be handled lazily by the parser, since we have to know
- // if a function uses the special natives syntax, which is something the
- // parser records.
- // If the debugger requests compilation for break points, we cannot be
- // aggressive about lazy compilation, because it might trigger compilation
- // of functions without an outer context when setting a breakpoint through
- // Debug::FindSharedFunctionInfoInScript.
- bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
- bool allow_lazy = literal->AllowsLazyCompilation() &&
- !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
-
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate));
-
- // Generate code
- if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
- Handle<Code> code = isolate->builtins()->LazyCompile();
- info.SetCode(code);
- } else if (GenerateCode(&info)) {
- ASSERT(!info.code().is_null());
- scope_info = ScopeInfo::Create(info.scope(), info.zone());
- } else {
- return Handle<SharedFunctionInfo>::null();
- }
-
- // Create a shared function info object.
- Handle<SharedFunctionInfo> result =
- FACTORY->NewSharedFunctionInfo(literal->name(),
- literal->materialized_literal_count(),
- info.code(),
- scope_info);
- SetFunctionInfo(result, literal, false, script);
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
- result->set_allows_lazy_compilation(allow_lazy);
- result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
-
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(result,
- literal->expected_property_count());
- live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
- return result;
-}
-
-
-// Sets the function info on a function.
-// The start_position points to the first '(' character after the function name
-// in the full script source. When counting characters in the script source the
-// the first character is number 0 (not 1).
-void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script) {
- function_info->set_length(lit->parameter_count());
- function_info->set_formal_parameter_count(lit->parameter_count());
- function_info->set_script(*script);
- function_info->set_function_token_position(lit->function_token_position());
- function_info->set_start_position(lit->start_position());
- function_info->set_end_position(lit->end_position());
- function_info->set_is_expression(lit->is_expression());
- function_info->set_is_anonymous(lit->is_anonymous());
- function_info->set_is_toplevel(is_toplevel);
- function_info->set_inferred_name(*lit->inferred_name());
- function_info->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
- function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
- function_info->set_allows_lazy_compilation_without_context(
- lit->AllowsLazyCompilationWithoutContext());
- function_info->set_language_mode(lit->language_mode());
- function_info->set_qml_mode(lit->qml_mode());
- function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
- function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
- function_info->set_ast_node_count(lit->ast_node_count());
- function_info->set_is_function(lit->is_function());
- function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
- function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
- function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
-}
-
-
-void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- CompilationInfo* info,
- Handle<SharedFunctionInfo> shared) {
- // SharedFunctionInfo is passed separately, because if CompilationInfo
- // was created using Script object, it will not have it.
-
- // Log the code generation. If source information is available include
- // script name and line number. Check explicitly whether logging is
- // enabled as finding the line number is not free.
- if (info->isolate()->logger()->is_logging_code_events() ||
- CpuProfiler::is_profiling(info->isolate())) {
- Handle<Script> script = info->script();
- Handle<Code> code = info->code();
- if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
- return;
- if (script->name()->IsString()) {
- int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
- USE(line_num);
- PROFILE(info->isolate(),
- CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
- *code,
- *shared,
- String::cast(script->name()),
- line_num));
- } else {
- PROFILE(info->isolate(),
- CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
- *code,
- *shared,
- shared->DebugName()));
- }
- }
-
- GDBJIT(AddCode(Handle<String>(shared->DebugName()),
- Handle<Script>(info->script()),
- Handle<Code>(info->code()),
- info));
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/compiler.h b/src/3rdparty/v8/src/compiler.h
deleted file mode 100644
index 6abaafa..0000000
--- a/src/3rdparty/v8/src/compiler.h
+++ /dev/null
@@ -1,548 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_COMPILER_H_
-#define V8_COMPILER_H_
-
-#include "allocation.h"
-#include "ast.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-static const int kPrologueOffsetNotSet = -1;
-
-class ScriptDataImpl;
-class HydrogenCodeStub;
-
-// CompilationInfo encapsulates some information known at compile time. It
-// is constructed based on the resources available at compile-time.
-class CompilationInfo {
- public:
- CompilationInfo(Handle<Script> script, Zone* zone);
- CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
- CompilationInfo(Handle<JSFunction> closure, Zone* zone);
- CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
-
- virtual ~CompilationInfo();
-
- Isolate* isolate() {
- ASSERT(Isolate::Current() == isolate_);
- return isolate_;
- }
- Zone* zone() {
- return zone_;
- }
- bool is_lazy() const { return IsLazy::decode(flags_); }
- bool is_eval() const { return IsEval::decode(flags_); }
- bool is_global() const { return IsGlobal::decode(flags_); }
- bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
- bool is_extended_mode() const { return language_mode() == EXTENDED_MODE; }
- LanguageMode language_mode() const {
- return LanguageModeField::decode(flags_);
- }
- bool is_in_loop() const { return IsInLoop::decode(flags_); }
- bool is_qml_mode() const { return IsQmlMode::decode(flags_); }
- FunctionLiteral* function() const { return function_; }
- Scope* scope() const { return scope_; }
- Scope* global_scope() const { return global_scope_; }
- Handle<Code> code() const { return code_; }
- Handle<JSFunction> closure() const { return closure_; }
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- Handle<Script> script() const { return script_; }
- HydrogenCodeStub* code_stub() {return code_stub_; }
- v8::Extension* extension() const { return extension_; }
- ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
- Handle<Context> context() const { return context_; }
- BailoutId osr_ast_id() const { return osr_ast_id_; }
- int opt_count() const { return opt_count_; }
- int num_parameters() const;
- int num_heap_slots() const;
- Code::Flags flags() const;
-
- void MarkAsEval() {
- ASSERT(!is_lazy());
- flags_ |= IsEval::encode(true);
- }
- void MarkAsGlobal() {
- ASSERT(!is_lazy());
- flags_ |= IsGlobal::encode(true);
- }
- void SetLanguageMode(LanguageMode language_mode) {
- ASSERT(this->language_mode() == CLASSIC_MODE ||
- this->language_mode() == language_mode ||
- language_mode == EXTENDED_MODE);
- flags_ = LanguageModeField::update(flags_, language_mode);
- }
- void MarkAsInLoop() {
- ASSERT(is_lazy());
- flags_ |= IsInLoop::encode(true);
- }
- void MarkAsQmlMode() {
- flags_ |= IsQmlMode::encode(true);
- }
- void MarkAsNative() {
- flags_ |= IsNative::encode(true);
- }
-
- bool is_native() const {
- return IsNative::decode(flags_);
- }
-
- bool is_calling() const {
- return is_deferred_calling() || is_non_deferred_calling();
- }
-
- void MarkAsDeferredCalling() {
- flags_ |= IsDeferredCalling::encode(true);
- }
-
- bool is_deferred_calling() const {
- return IsDeferredCalling::decode(flags_);
- }
-
- void MarkAsNonDeferredCalling() {
- flags_ |= IsNonDeferredCalling::encode(true);
- }
-
- bool is_non_deferred_calling() const {
- return IsNonDeferredCalling::decode(flags_);
- }
-
- void MarkAsSavesCallerDoubles() {
- flags_ |= SavesCallerDoubles::encode(true);
- }
-
- bool saves_caller_doubles() const {
- return SavesCallerDoubles::decode(flags_);
- }
-
- void SetFunction(FunctionLiteral* literal) {
- ASSERT(function_ == NULL);
- function_ = literal;
- }
- void SetScope(Scope* scope) {
- ASSERT(scope_ == NULL);
- scope_ = scope;
- }
- void SetGlobalScope(Scope* global_scope) {
- ASSERT(global_scope_ == NULL);
- global_scope_ = global_scope;
- }
- void SetCode(Handle<Code> code) { code_ = code; }
- void SetExtension(v8::Extension* extension) {
- ASSERT(!is_lazy());
- extension_ = extension;
- }
- void SetPreParseData(ScriptDataImpl* pre_parse_data) {
- ASSERT(!is_lazy());
- pre_parse_data_ = pre_parse_data;
- }
- void SetContext(Handle<Context> context) {
- context_ = context;
- }
- void MarkCompilingForDebugging(Handle<Code> current_code) {
- ASSERT(mode_ != OPTIMIZE);
- ASSERT(current_code->kind() == Code::FUNCTION);
- flags_ |= IsCompilingForDebugging::encode(true);
- if (current_code->is_compiled_optimizable()) {
- EnableDeoptimizationSupport();
- } else {
- mode_ = CompilationInfo::NONOPT;
- }
- }
- bool IsCompilingForDebugging() {
- return IsCompilingForDebugging::decode(flags_);
- }
-
- bool has_global_object() const {
- return !closure().is_null() &&
- (closure()->context()->global_object() != NULL);
- }
-
- GlobalObject* global_object() const {
- return has_global_object() ? closure()->context()->global_object() : NULL;
- }
-
- // Accessors for the different compilation modes.
- bool IsOptimizing() const { return mode_ == OPTIMIZE; }
- bool IsOptimizable() const { return mode_ == BASE; }
- bool IsStub() const { return mode_ == STUB; }
- void SetOptimizing(BailoutId osr_ast_id) {
- SetMode(OPTIMIZE);
- osr_ast_id_ = osr_ast_id;
- }
- void DisableOptimization();
-
- // Deoptimization support.
- bool HasDeoptimizationSupport() const {
- return SupportsDeoptimization::decode(flags_);
- }
- void EnableDeoptimizationSupport() {
- ASSERT(IsOptimizable());
- flags_ |= SupportsDeoptimization::encode(true);
- }
-
- // Determines whether or not to insert a self-optimization header.
- bool ShouldSelfOptimize();
-
- // Disable all optimization attempts of this info for the rest of the
- // current compilation pipeline.
- void AbortOptimization();
-
- void set_deferred_handles(DeferredHandles* deferred_handles) {
- ASSERT(deferred_handles_ == NULL);
- deferred_handles_ = deferred_handles;
- }
-
- void SaveHandles() {
- SaveHandle(&closure_);
- SaveHandle(&shared_info_);
- SaveHandle(&context_);
- SaveHandle(&script_);
- }
-
- const char* bailout_reason() const { return bailout_reason_; }
- void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
-
- int prologue_offset() const {
- ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
- return prologue_offset_;
- }
-
- void set_prologue_offset(int prologue_offset) {
- ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
- prologue_offset_ = prologue_offset;
- }
-
- private:
- Isolate* isolate_;
-
- // Compilation mode.
- // BASE is generated by the full codegen, optionally prepared for bailouts.
- // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
- // NONOPT is generated by the full codegen and is not prepared for
- // recompilation/bailouts. These functions are never recompiled.
- enum Mode {
- BASE,
- OPTIMIZE,
- NONOPT,
- STUB
- };
-
- void Initialize(Isolate* isolate, Mode mode, Zone* zone);
-
- void SetMode(Mode mode) {
- ASSERT(V8::UseCrankshaft());
- mode_ = mode;
- }
-
- // Flags using template class BitField<type, start, length>. All are
- // false by default.
- //
- // Compilation is either eager or lazy.
- class IsLazy: public BitField<bool, 0, 1> {};
- // Flags that can be set for eager compilation.
- class IsEval: public BitField<bool, 1, 1> {};
- class IsGlobal: public BitField<bool, 2, 1> {};
- // Flags that can be set for lazy compilation.
- class IsInLoop: public BitField<bool, 3, 1> {};
- // Strict mode - used in eager compilation.
- class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
- // Is this a function from our natives.
- class IsNative: public BitField<bool, 6, 1> {};
- // Is this code being compiled with support for deoptimization..
- class SupportsDeoptimization: public BitField<bool, 7, 1> {};
- // If compiling for debugging produce just full code matching the
- // initial mode setting.
- class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
- // If the compiled code contains calls that require building a frame
- class IsCalling: public BitField<bool, 9, 1> {};
- // If the compiled code contains calls that require building a frame
- class IsDeferredCalling: public BitField<bool, 10, 1> {};
- // If the compiled code contains calls that require building a frame
- class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
- // If the compiled code saves double caller registers that it clobbers.
- class SavesCallerDoubles: public BitField<bool, 12, 1> {};
- // Qml mode
- class IsQmlMode: public BitField<bool, 13, 1> {};
-
- unsigned flags_;
-
- // Fields filled in by the compilation pipeline.
- // AST filled in by the parser.
- FunctionLiteral* function_;
- // The scope of the function literal as a convenience. Set to indicate
- // that scopes have been analyzed.
- Scope* scope_;
- // The global scope provided as a convenience.
- Scope* global_scope_;
- // For compiled stubs, the stub object
- HydrogenCodeStub* code_stub_;
- // The compiled code.
- Handle<Code> code_;
-
- // Possible initial inputs to the compilation process.
- Handle<JSFunction> closure_;
- Handle<SharedFunctionInfo> shared_info_;
- Handle<Script> script_;
-
- // Fields possibly needed for eager compilation, NULL by default.
- v8::Extension* extension_;
- ScriptDataImpl* pre_parse_data_;
-
- // The context of the caller for eval code, and the global context for a
- // global script. Will be a null handle otherwise.
- Handle<Context> context_;
-
- // Compilation mode flag and whether deoptimization is allowed.
- Mode mode_;
- BailoutId osr_ast_id_;
-
- // The zone from which the compilation pipeline working on this
- // CompilationInfo allocates.
- Zone* zone_;
-
- DeferredHandles* deferred_handles_;
-
- template<typename T>
- void SaveHandle(Handle<T> *object) {
- if (!object->is_null()) {
- Handle<T> handle(*(*object));
- *object = handle;
- }
- }
-
- const char* bailout_reason_;
-
- int prologue_offset_;
-
- // A copy of shared_info()->opt_count() to avoid handle deref
- // during graph optimization.
- int opt_count_;
-
- DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
-};
-
-
-// Exactly like a CompilationInfo, except also creates and enters a
-// Zone on construction and deallocates it on exit.
-class CompilationInfoWithZone: public CompilationInfo {
- public:
- INLINE(void* operator new(size_t size)) { return Malloced::New(size); }
-
- explicit CompilationInfoWithZone(Handle<Script> script)
- : CompilationInfo(script, &zone_),
- zone_(script->GetIsolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
- explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
- : CompilationInfo(shared_info, &zone_),
- zone_(shared_info->GetIsolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
- explicit CompilationInfoWithZone(Handle<JSFunction> closure)
- : CompilationInfo(closure, &zone_),
- zone_(closure->GetIsolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
- explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
- : CompilationInfo(stub, isolate, &zone_),
- zone_(isolate),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
-
- private:
- Zone zone_;
- ZoneScope zone_scope_;
-};
-
-
-// A wrapper around a CompilationInfo that detaches the Handles from
-// the underlying DeferredHandleScope and stores them in info_ on
-// destruction.
-class CompilationHandleScope BASE_EMBEDDED {
- public:
- explicit CompilationHandleScope(CompilationInfo* info)
- : deferred_(info->isolate()), info_(info) {}
- ~CompilationHandleScope() {
- info_->set_deferred_handles(deferred_.Detach());
- }
-
- private:
- DeferredHandleScope deferred_;
- CompilationInfo* info_;
-};
-
-
-class HGraph;
-class HOptimizedGraphBuilder;
-class LChunk;
-
-// A helper class that calls the three compilation phases in
-// Crankshaft and keeps track of its state. The three phases
-// CreateGraph, OptimizeGraph and GenerateAndInstallCode can either
-// fail, bail-out to the full code generator or succeed. Apart from
-// their return value, the status of the phase last run can be checked
-// using last_status().
-class OptimizingCompiler: public ZoneObject {
- public:
- explicit OptimizingCompiler(CompilationInfo* info)
- : info_(info),
- oracle_(NULL),
- graph_builder_(NULL),
- graph_(NULL),
- chunk_(NULL),
- time_taken_to_create_graph_(0),
- time_taken_to_optimize_(0),
- time_taken_to_codegen_(0),
- last_status_(FAILED) { }
-
- enum Status {
- FAILED, BAILED_OUT, SUCCEEDED
- };
-
- MUST_USE_RESULT Status CreateGraph();
- MUST_USE_RESULT Status OptimizeGraph();
- MUST_USE_RESULT Status GenerateAndInstallCode();
-
- Status last_status() const { return last_status_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info()->isolate(); }
-
- MUST_USE_RESULT Status AbortOptimization() {
- info_->AbortOptimization();
- info_->shared_info()->DisableOptimization(info_->bailout_reason());
- return SetLastStatus(BAILED_OUT);
- }
-
- private:
- CompilationInfo* info_;
- TypeFeedbackOracle* oracle_;
- HOptimizedGraphBuilder* graph_builder_;
- HGraph* graph_;
- LChunk* chunk_;
- int64_t time_taken_to_create_graph_;
- int64_t time_taken_to_optimize_;
- int64_t time_taken_to_codegen_;
- Status last_status_;
-
- MUST_USE_RESULT Status SetLastStatus(Status status) {
- last_status_ = status;
- return last_status_;
- }
- void RecordOptimizationStats();
-
- struct Timer {
- Timer(OptimizingCompiler* compiler, int64_t* location)
- : compiler_(compiler),
- start_(OS::Ticks()),
- location_(location) { }
-
- ~Timer() {
- *location_ += (OS::Ticks() - start_);
- }
-
- OptimizingCompiler* compiler_;
- int64_t start_;
- int64_t* location_;
- };
-};
-
-
-// The V8 compiler
-//
-// General strategy: Source code is translated into an anonymous function w/o
-// parameters which then can be executed. If the source code contains other
-// functions, they will be compiled and allocated as part of the compilation
-// of the source code.
-
-// Please note this interface returns shared function infos. This means you
-// need to call Factory::NewFunctionFromSharedFunctionInfo before you have a
-// real function with a context.
-
-class Compiler : public AllStatic {
- public:
- static const int kMaxInliningLevels = 3;
-
- // Call count before primitive functions trigger their own optimization.
- static const int kCallsUntilPrimitiveOpt = 200;
-
- // All routines return a SharedFunctionInfo.
- // If an error occurs an exception is raised and the return handle
- // contains NULL.
-
- // Compile a String source within a context.
- static Handle<SharedFunctionInfo> Compile(
- Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag is_natives_code,
- v8::Script::CompileFlags = v8::Script::Default);
-
- // Compile a String source within a context for Eval.
- static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- LanguageMode language_mode,
- int scope_position,
- bool qml_mode);
-
- // Compile from function info (used for lazy compilation). Returns true on
- // success and false if the compilation resulted in a stack overflow.
- static bool CompileLazy(CompilationInfo* info);
-
- static void RecompileParallel(Handle<JSFunction> function);
-
- // Compile a shared function info object (the function is possibly lazily
- // compiled).
- static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
- Handle<Script> script);
-
- // Set the function info for a newly compiled function.
- static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static void InstallOptimizedCode(OptimizingCompiler* info);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static bool MakeCodeForLiveEdit(CompilationInfo* info);
-#endif
-
- static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- CompilationInfo* info,
- Handle<SharedFunctionInfo> shared);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_COMPILER_H_
diff --git a/src/3rdparty/v8/src/contexts.cc b/src/3rdparty/v8/src/contexts.cc
deleted file mode 100644
index 26d8c1a..0000000
--- a/src/3rdparty/v8/src/contexts.cc
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "debug.h"
-#include "scopeinfo.h"
-
-namespace v8 {
-namespace internal {
-
-Context* Context::declaration_context() {
- Context* current = this;
- while (!current->IsFunctionContext() && !current->IsNativeContext()) {
- current = current->previous();
- ASSERT(current->closure() == closure());
- }
- return current;
-}
-
-
-JSBuiltinsObject* Context::builtins() {
- GlobalObject* object = global_object();
- if (object->IsJSGlobalObject()) {
- return JSGlobalObject::cast(object)->builtins();
- } else {
- ASSERT(object->IsJSBuiltinsObject());
- return JSBuiltinsObject::cast(object);
- }
-}
-
-
-Context* Context::global_context() {
- Context* current = this;
- while (!current->IsGlobalContext()) {
- current = current->previous();
- }
- return current;
-}
-
-
-Context* Context::native_context() {
- // Fast case: the global object for this context has been set. In
- // that case, the global object has a direct pointer to the global
- // context.
- if (global_object()->IsGlobalObject()) {
- return global_object()->native_context();
- }
-
- // During bootstrapping, the global object might not be set and we
- // have to search the context chain to find the native context.
- ASSERT(Isolate::Current()->bootstrapper()->IsActive());
- Context* current = this;
- while (!current->IsNativeContext()) {
- JSFunction* closure = JSFunction::cast(current->closure());
- current = Context::cast(closure->context());
- }
- return current;
-}
-
-
-JSObject* Context::global_proxy() {
- return native_context()->global_proxy_object();
-}
-
-void Context::set_global_proxy(JSObject* object) {
- native_context()->set_global_proxy_object(object);
-}
-
-
-Handle<Object> Context::Lookup(Handle<String> name,
- ContextLookupFlags flags,
- int* index,
- PropertyAttributes* attributes,
- BindingFlags* binding_flags) {
- Isolate* isolate = GetIsolate();
- Handle<Context> context(this, isolate);
-
- bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
- *index = -1;
- *attributes = ABSENT;
- *binding_flags = MISSING_BINDING;
-
- if (FLAG_trace_contexts) {
- PrintF("Context::Lookup(");
- name->ShortPrint();
- PrintF(")\n");
- }
-
- Handle<JSObject> qml_global;
- Handle<JSObject> qml_global_global;
-
- do {
- if (FLAG_trace_contexts) {
- PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
- if (context->IsNativeContext()) PrintF(" (native context)");
- PrintF("\n");
- }
-
- if (qml_global.is_null() && !context->qml_global_object()->IsUndefined()) {
- qml_global = Handle<JSObject>(context->qml_global_object(), isolate);
- qml_global_global = Handle<JSObject>(context->global_object(), isolate);
- }
-
- // 1. Check global objects, subjects of with, and extension objects.
- if (context->IsNativeContext() ||
- context->IsWithContext() ||
- (context->IsFunctionContext() && context->has_extension())) {
- Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
- // Context extension objects needs to behave as if they have no
- // prototype. So even if we want to follow prototype chains, we need
- // to only do a local lookup for context extension objects.
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
- object->IsJSContextExtensionObject()) {
- *attributes = object->GetLocalPropertyAttribute(*name);
- } else {
- *attributes = object->GetPropertyAttribute(*name);
- }
- if (*attributes != ABSENT) {
- if (FLAG_trace_contexts) {
- PrintF("=> found property in context object %p\n",
- reinterpret_cast<void*>(*object));
- }
- return object;
- }
- }
-
- // 2. Check the context proper if it has slots.
- if (context->IsFunctionContext() || context->IsBlockContext()) {
- // Use serialized scope information of functions and blocks to search
- // for the context index.
- Handle<ScopeInfo> scope_info;
- if (context->IsFunctionContext()) {
- scope_info = Handle<ScopeInfo>(
- context->closure()->shared()->scope_info(), isolate);
- } else {
- scope_info = Handle<ScopeInfo>(
- ScopeInfo::cast(context->extension()), isolate);
- }
- VariableMode mode;
- InitializationFlag init_flag;
- int slot_index = scope_info->ContextSlotIndex(*name, &mode, &init_flag);
- ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
- if (slot_index >= 0) {
- if (FLAG_trace_contexts) {
- PrintF("=> found local in context slot %d (mode = %d)\n",
- slot_index, mode);
- }
- *index = slot_index;
- // Note: Fixed context slots are statically allocated by the compiler.
- // Statically allocated variables always have a statically known mode,
- // which is the mode with which they were declared when added to the
- // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
- // declared variables that were introduced through declaration nodes)
- // must not appear here.
- switch (mode) {
- case INTERNAL: // Fall through.
- case VAR:
- *attributes = NONE;
- *binding_flags = MUTABLE_IS_INITIALIZED;
- break;
- case LET:
- *attributes = NONE;
- *binding_flags = (init_flag == kNeedsInitialization)
- ? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
- break;
- case CONST:
- *attributes = READ_ONLY;
- *binding_flags = (init_flag == kNeedsInitialization)
- ? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
- break;
- case CONST_HARMONY:
- *attributes = READ_ONLY;
- *binding_flags = (init_flag == kNeedsInitialization)
- ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
- IMMUTABLE_IS_INITIALIZED_HARMONY;
- break;
- case MODULE:
- *attributes = READ_ONLY;
- *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
- break;
- case DYNAMIC:
- case DYNAMIC_GLOBAL:
- case DYNAMIC_LOCAL:
- case TEMPORARY:
- UNREACHABLE();
- break;
- }
- return context;
- }
-
- // Check the slot corresponding to the intermediate context holding
- // only the function name variable.
- if (follow_context_chain && context->IsFunctionContext()) {
- VariableMode mode;
- int function_index = scope_info->FunctionContextSlotIndex(*name, &mode);
- if (function_index >= 0) {
- if (FLAG_trace_contexts) {
- PrintF("=> found intermediate function in context slot %d\n",
- function_index);
- }
- *index = function_index;
- *attributes = READ_ONLY;
- ASSERT(mode == CONST || mode == CONST_HARMONY);
- *binding_flags = (mode == CONST)
- ? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
- return context;
- }
- }
-
- } else if (context->IsCatchContext()) {
- // Catch contexts have the variable name in the extension slot.
- if (name->Equals(String::cast(context->extension()))) {
- if (FLAG_trace_contexts) {
- PrintF("=> found in catch context\n");
- }
- *index = Context::THROWN_OBJECT_INDEX;
- *attributes = NONE;
- *binding_flags = MUTABLE_IS_INITIALIZED;
- return context;
- }
- }
-
- // 3. Prepare to continue with the previous (next outermost) context.
- if (context->IsNativeContext()) {
- follow_context_chain = false;
- } else {
- context = Handle<Context>(context->previous(), isolate);
- }
- } while (follow_context_chain);
-
- if (!qml_global.is_null()) {
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0) {
- *attributes = qml_global_global->GetLocalPropertyAttribute(*name);
- } else {
- *attributes = qml_global_global->GetPropertyAttribute(*name);
- }
-
- if (*attributes != ABSENT) {
- *attributes = ABSENT;
- } else {
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0) {
- *attributes = qml_global->GetLocalPropertyAttribute(*name);
- } else {
- *attributes = qml_global->GetPropertyAttribute(*name);
- }
-
- if (*attributes != ABSENT) {
- // property found
- if (FLAG_trace_contexts) {
- PrintF("=> found property in qml global object %p\n",
- reinterpret_cast<void*>(*qml_global));
- }
- return qml_global;
- }
- }
- }
-
- if (FLAG_trace_contexts) {
- PrintF("=> no property/slot found\n");
- }
- return Handle<Object>::null();
-}
-
-
-void Context::AddOptimizedFunction(JSFunction* function) {
- ASSERT(IsNativeContext());
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
- while (!element->IsUndefined()) {
- CHECK(element != function);
- element = JSFunction::cast(element)->next_function_link();
- }
- }
-
- // Check that the context belongs to the weak native contexts list.
- bool found = false;
- Object* context = GetHeap()->native_contexts_list();
- while (!context->IsUndefined()) {
- if (context == this) {
- found = true;
- break;
- }
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
- CHECK(found);
-#endif
-
- // If the function link field is already used then the function was
- // enqueued as a code flushing candidate and we remove it now.
- if (!function->next_function_link()->IsUndefined()) {
- CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
- flusher->EvictCandidate(function);
- }
-
- ASSERT(function->next_function_link()->IsUndefined());
-
- function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
- set(OPTIMIZED_FUNCTIONS_LIST, function);
-}
-
-
-void Context::RemoveOptimizedFunction(JSFunction* function) {
- ASSERT(IsNativeContext());
- Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
- JSFunction* prev = NULL;
- while (!element->IsUndefined()) {
- JSFunction* element_function = JSFunction::cast(element);
- ASSERT(element_function->next_function_link()->IsUndefined() ||
- element_function->next_function_link()->IsJSFunction());
- if (element_function == function) {
- if (prev == NULL) {
- set(OPTIMIZED_FUNCTIONS_LIST, element_function->next_function_link());
- } else {
- prev->set_next_function_link(element_function->next_function_link());
- }
- element_function->set_next_function_link(GetHeap()->undefined_value());
- return;
- }
- prev = element_function;
- element = element_function->next_function_link();
- }
- UNREACHABLE();
-}
-
-
-Object* Context::OptimizedFunctionsListHead() {
- ASSERT(IsNativeContext());
- return get(OPTIMIZED_FUNCTIONS_LIST);
-}
-
-
-void Context::ClearOptimizedFunctions() {
- set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value());
-}
-
-
-Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
- Handle<Object> result(error_message_for_code_gen_from_strings(),
- GetIsolate());
- if (!result->IsUndefined()) return result;
- return GetIsolate()->factory()->NewStringFromAscii(i::CStrVector(
- "Code generation from strings disallowed for this context"));
-}
-
-
-#ifdef DEBUG
-bool Context::IsBootstrappingOrValidParentContext(
- Object* object, Context* child) {
- // During bootstrapping we allow all objects to pass as
- // contexts. This is necessary to fix circular dependencies.
- if (child->GetIsolate()->bootstrapper()->IsActive()) return true;
- if (!object->IsContext()) return false;
- Context* context = Context::cast(object);
- return context->IsNativeContext() || context->IsGlobalContext() ||
- context->IsModuleContext() || !child->IsModuleContext();
-}
-
-
-bool Context::IsBootstrappingOrGlobalObject(Object* object) {
- // During bootstrapping we allow all objects to pass as global
- // objects. This is necessary to fix circular dependencies.
- Isolate* isolate = Isolate::Current();
- return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
- isolate->bootstrapper()->IsActive() ||
- object->IsGlobalObject();
-}
-#endif
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/contexts.h b/src/3rdparty/v8/src/contexts.h
deleted file mode 100644
index 96473df..0000000
--- a/src/3rdparty/v8/src/contexts.h
+++ /dev/null
@@ -1,477 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CONTEXTS_H_
-#define V8_CONTEXTS_H_
-
-#include "heap.h"
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-enum ContextLookupFlags {
- FOLLOW_CONTEXT_CHAIN = 1,
- FOLLOW_PROTOTYPE_CHAIN = 2,
-
- DONT_FOLLOW_CHAINS = 0,
- FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN
-};
-
-
-// ES5 10.2 defines lexical environments with mutable and immutable bindings.
-// Immutable bindings have two states, initialized and uninitialized, and
-// their state is changed by the InitializeImmutableBinding method. The
-// BindingFlags enum represents information if a binding has definitely been
-// initialized. A mutable binding does not need to be checked and thus has
-// the BindingFlag MUTABLE_IS_INITIALIZED.
-//
-// There are two possibilities for immutable bindings
-// * 'const' declared variables. They are initialized when evaluating the
-// corresponding declaration statement. They need to be checked for being
-// initialized and thus get the flag IMMUTABLE_CHECK_INITIALIZED.
-// * The function name of a named function literal. The binding is immediately
-// initialized when entering the function and thus does not need to be
-// checked. it gets the BindingFlag IMMUTABLE_IS_INITIALIZED.
-// Accessing an uninitialized binding produces the undefined value.
-//
-// The harmony proposal for block scoped bindings also introduces the
-// uninitialized state for mutable bindings.
-// * A 'let' declared variable. They are initialized when evaluating the
-// corresponding declaration statement. They need to be checked for being
-// initialized and thus get the flag MUTABLE_CHECK_INITIALIZED.
-// * A 'var' declared variable. It is initialized immediately upon creation
-// and thus doesn't need to be checked. It gets the flag
-// MUTABLE_IS_INITIALIZED.
-// * Catch bound variables, function parameters and variables introduced by
-// function declarations are initialized immediately and do not need to be
-// checked. Thus they get the flag MUTABLE_IS_INITIALIZED.
-// Immutable bindings in harmony mode get the _HARMONY flag variants. Accessing
-// an uninitialized binding produces a reference error.
-//
-// In V8 uninitialized bindings are set to the hole value upon creation and set
-// to a different value upon initialization.
-enum BindingFlags {
- MUTABLE_IS_INITIALIZED,
- MUTABLE_CHECK_INITIALIZED,
- IMMUTABLE_IS_INITIALIZED,
- IMMUTABLE_CHECK_INITIALIZED,
- IMMUTABLE_IS_INITIALIZED_HARMONY,
- IMMUTABLE_CHECK_INITIALIZED_HARMONY,
- MISSING_BINDING
-};
-
-
-// Heap-allocated activation contexts.
-//
-// Contexts are implemented as FixedArray objects; the Context
-// class is a convenience interface casted on a FixedArray object.
-//
-// Note: Context must have no virtual functions and Context objects
-// must always be allocated via Heap::AllocateContext() or
-// Factory::NewContext.
-
-#define NATIVE_CONTEXT_FIELDS(V) \
- V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
- V(SECURITY_TOKEN_INDEX, Object, security_token) \
- V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
- V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
- V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
- V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
- V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
- V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
- V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
- V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
- V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
- V(JSON_OBJECT_INDEX, JSObject, json_object) \
- V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
- V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
- V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
- V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
- V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
- V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
- V(TO_OBJECT_FUN_INDEX, JSFunction, to_object_fun) \
- V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
- V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
- V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
- V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
- V(FUNCTION_MAP_INDEX, Map, function_map) \
- V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
- V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
- V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
- strict_mode_function_without_prototype_map) \
- V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
- V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
- strict_mode_function_instance_map) \
- V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
- V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
- V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
- aliased_arguments_boilerplate) \
- V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
- strict_mode_arguments_boilerplate) \
- V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
- V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
- V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
- V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
- V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
- V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
- V(NORMALIZED_MAP_CACHE_INDEX, NormalizedMapCache, normalized_map_cache) \
- V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
- V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
- V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
- call_as_constructor_delegate) \
- V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
- V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
- V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
- V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
- V(MAP_CACHE_INDEX, Object, map_cache) \
- V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
- V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
- V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
- error_message_for_code_gen_from_strings) \
- V(SYMBOL_DELEGATE_INDEX, JSObject, symbol_delegate) \
- V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
- to_complete_property_descriptor) \
- V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
- V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
- V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
- V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
- V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
- V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
- V(RANDOM_SEED_INDEX, ByteArray, random_seed)
-
-// JSFunctions are pairs (context, function code), sometimes also called
-// closures. A Context object is used to represent function contexts and
-// dynamically pushed 'with' contexts (or 'scopes' in ECMA-262 speak).
-//
-// At runtime, the contexts build a stack in parallel to the execution
-// stack, with the top-most context being the current context. All contexts
-// have the following slots:
-//
-// [ closure ] This is the current function. It is the same for all
-// contexts inside a function. It provides access to the
-// incoming context (i.e., the outer context, which may
-// or may not become the current function's context), and
-// it provides access to the functions code and thus it's
-// scope information, which in turn contains the names of
-// statically allocated context slots. The names are needed
-// for dynamic lookups in the presence of 'with' or 'eval'.
-//
-// [ previous ] A pointer to the previous context. It is NULL for
-// function contexts, and non-NULL for 'with' contexts.
-// Used to implement the 'with' statement.
-//
-// [ extension ] A pointer to an extension JSObject, or NULL. Used to
-// implement 'with' statements and dynamic declarations
-// (through 'eval'). The object in a 'with' statement is
-// stored in the extension slot of a 'with' context.
-// Dynamically declared variables/functions are also added
-// to lazily allocated extension object. Context::Lookup
-// searches the extension object for properties.
-// For global and block contexts, contains the respective
-// ScopeInfo.
-// For module contexts, points back to the respective JSModule.
-//
-// [ global_object ] A pointer to the global object. Provided for quick
-// access to the global object from inside the code (since
-// we always have a context pointer).
-//
-// In addition, function contexts may have statically allocated context slots
-// to store local variables/functions that are accessed from inner functions
-// (via static context addresses) or through 'eval' (dynamic context lookups).
-// Finally, the native context contains additional slots for fast access to
-// native properties.
-
-class Context: public FixedArray {
- public:
- // Conversions.
- static Context* cast(Object* context) {
- ASSERT(context->IsContext());
- return reinterpret_cast<Context*>(context);
- }
-
- // The default context slot layout; indices are FixedArray slot indices.
- enum {
- // These slots are in all contexts.
- CLOSURE_INDEX,
- PREVIOUS_INDEX,
- // The extension slot is used for either the global object (in global
- // contexts), eval extension object (function contexts), subject of with
- // (with contexts), or the variable name (catch contexts), the serialized
- // scope info (block contexts), or the module instance (module contexts).
- EXTENSION_INDEX,
- QML_GLOBAL_OBJECT_INDEX,
- GLOBAL_OBJECT_INDEX,
- MIN_CONTEXT_SLOTS,
-
- // This slot holds the thrown value in catch contexts.
- THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
-
- // These slots are only in native contexts.
- GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
- SECURITY_TOKEN_INDEX,
- ARGUMENTS_BOILERPLATE_INDEX,
- ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
- STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
- REGEXP_RESULT_MAP_INDEX,
- FUNCTION_MAP_INDEX,
- STRICT_MODE_FUNCTION_MAP_INDEX,
- FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- FUNCTION_INSTANCE_MAP_INDEX,
- STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX,
- INITIAL_OBJECT_PROTOTYPE_INDEX,
- BOOLEAN_FUNCTION_INDEX,
- NUMBER_FUNCTION_INDEX,
- STRING_FUNCTION_INDEX,
- STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
- OBJECT_FUNCTION_INDEX,
- INTERNAL_ARRAY_FUNCTION_INDEX,
- ARRAY_FUNCTION_INDEX,
- JS_ARRAY_MAPS_INDEX,
- DATE_FUNCTION_INDEX,
- JSON_OBJECT_INDEX,
- REGEXP_FUNCTION_INDEX,
- CREATE_DATE_FUN_INDEX,
- TO_NUMBER_FUN_INDEX,
- TO_STRING_FUN_INDEX,
- TO_DETAIL_STRING_FUN_INDEX,
- TO_OBJECT_FUN_INDEX,
- TO_INTEGER_FUN_INDEX,
- TO_UINT32_FUN_INDEX,
- TO_INT32_FUN_INDEX,
- TO_BOOLEAN_FUN_INDEX,
- GLOBAL_EVAL_FUN_INDEX,
- INSTANTIATE_FUN_INDEX,
- CONFIGURE_INSTANCE_FUN_INDEX,
- MESSAGE_LISTENERS_INDEX,
- MAKE_MESSAGE_FUN_INDEX,
- GET_STACK_TRACE_LINE_INDEX,
- CONFIGURE_GLOBAL_INDEX,
- FUNCTION_CACHE_INDEX,
- JSFUNCTION_RESULT_CACHES_INDEX,
- NORMALIZED_MAP_CACHE_INDEX,
- RUNTIME_CONTEXT_INDEX,
- CALL_AS_FUNCTION_DELEGATE_INDEX,
- CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
- SCRIPT_FUNCTION_INDEX,
- OPAQUE_REFERENCE_FUNCTION_INDEX,
- CONTEXT_EXTENSION_FUNCTION_INDEX,
- OUT_OF_MEMORY_INDEX,
- EMBEDDER_DATA_INDEX,
- ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
- ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
- SYMBOL_DELEGATE_INDEX,
- TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
- DERIVED_HAS_TRAP_INDEX,
- DERIVED_GET_TRAP_INDEX,
- DERIVED_SET_TRAP_INDEX,
- PROXY_ENUMERATE_INDEX,
- OBSERVERS_NOTIFY_CHANGE_INDEX,
- OBSERVERS_DELIVER_CHANGES_INDEX,
- RANDOM_SEED_INDEX,
-
- // Properties from here are treated as weak references by the full GC.
- // Scavenge treats them as strong references.
- OPTIMIZED_FUNCTIONS_LIST, // Weak.
- MAP_CACHE_INDEX, // Weak.
- NEXT_CONTEXT_LINK, // Weak.
-
- // Total number of slots.
- NATIVE_CONTEXT_SLOTS,
-
- FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
- };
-
- // Direct slot access.
- JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
- void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
-
- Context* previous() {
- Object* result = unchecked_previous();
- ASSERT(IsBootstrappingOrValidParentContext(result, this));
- return reinterpret_cast<Context*>(result);
- }
- void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
-
- bool has_extension() { return extension() != NULL; }
- Object* extension() { return get(EXTENSION_INDEX); }
- void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
-
- JSModule* module() { return JSModule::cast(get(EXTENSION_INDEX)); }
- void set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
-
- // Get the context where var declarations will be hoisted to, which
- // may be the context itself.
- Context* declaration_context();
-
- GlobalObject* global_object() {
- Object* result = get(GLOBAL_OBJECT_INDEX);
- ASSERT(IsBootstrappingOrGlobalObject(result));
- return reinterpret_cast<GlobalObject*>(result);
- }
- void set_global_object(GlobalObject* object) {
- set(GLOBAL_OBJECT_INDEX, object);
- }
-
- JSObject* qml_global_object() {
- return reinterpret_cast<JSObject *>(get(QML_GLOBAL_OBJECT_INDEX));
- }
- void set_qml_global_object(JSObject *qml_global) {
- set(QML_GLOBAL_OBJECT_INDEX, qml_global);
- }
-
- // Returns a JSGlobalProxy object or null.
- JSObject* global_proxy();
- void set_global_proxy(JSObject* global);
-
- // The builtins object.
- JSBuiltinsObject* builtins();
-
- // Get the innermost global context by traversing the context chain.
- Context* global_context();
-
- // Compute the native context by traversing the context chain.
- Context* native_context();
-
- // Predicates for context types. IsNativeContext is also defined on Object
- // because we frequently have to know if arbitrary objects are natives
- // contexts.
- bool IsNativeContext() {
- Map* map = this->map();
- return map == map->GetHeap()->native_context_map();
- }
- bool IsFunctionContext() {
- Map* map = this->map();
- return map == map->GetHeap()->function_context_map();
- }
- bool IsCatchContext() {
- Map* map = this->map();
- return map == map->GetHeap()->catch_context_map();
- }
- bool IsWithContext() {
- Map* map = this->map();
- return map == map->GetHeap()->with_context_map();
- }
- bool IsBlockContext() {
- Map* map = this->map();
- return map == map->GetHeap()->block_context_map();
- }
- bool IsModuleContext() {
- Map* map = this->map();
- return map == map->GetHeap()->module_context_map();
- }
- bool IsGlobalContext() {
- Map* map = this->map();
- return map == map->GetHeap()->global_context_map();
- }
-
- // Tells whether the native context is marked with out of memory.
- inline bool has_out_of_memory();
-
- // Mark the native context with out of memory.
- inline void mark_out_of_memory();
-
- // A native context hold a list of all functions which have been optimized.
- void AddOptimizedFunction(JSFunction* function);
- void RemoveOptimizedFunction(JSFunction* function);
- Object* OptimizedFunctionsListHead();
- void ClearOptimizedFunctions();
-
- Handle<Object> ErrorMessageForCodeGenerationFromStrings();
-
-#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
- void set_##name(type* value) { \
- ASSERT(IsNativeContext()); \
- set(index, value); \
- } \
- type* name() { \
- ASSERT(IsNativeContext()); \
- return type::cast(get(index)); \
- }
- NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
-#undef NATIVE_CONTEXT_FIELD_ACCESSORS
-
- // Lookup the slot called name, starting with the current context.
- // There are three possibilities:
- //
- // 1) result->IsContext():
- // The binding was found in a context. *index is always the
- // non-negative slot index. *attributes is NONE for var and let
- // declarations, READ_ONLY for const declarations (never ABSENT).
- //
- // 2) result->IsJSObject():
- // The binding was found as a named property in a context extension
- // object (i.e., was introduced via eval), as a property on the subject
- // of with, or as a property of the global object. *index is -1 and
- // *attributes is not ABSENT.
- //
- // 3) result.is_null():
- // There was no binding found, *index is always -1 and *attributes is
- // always ABSENT.
- Handle<Object> Lookup(Handle<String> name,
- ContextLookupFlags flags,
- int* index,
- PropertyAttributes* attributes,
- BindingFlags* binding_flags);
-
- // Code generation support.
- static int SlotOffset(int index) {
- return kHeaderSize + index * kPointerSize - kHeapObjectTag;
- }
-
- static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
-
- // GC support.
- typedef FixedBodyDescriptor<
- kHeaderSize, kSize, kSize> ScavengeBodyDescriptor;
-
- typedef FixedBodyDescriptor<
- kHeaderSize,
- kHeaderSize + FIRST_WEAK_SLOT * kPointerSize,
- kSize> MarkCompactBodyDescriptor;
-
- private:
- // Unchecked access to the slots.
- Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
-
-#ifdef DEBUG
- // Bootstrapping-aware type checks.
- static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
- static bool IsBootstrappingOrGlobalObject(Object* object);
-#endif
-
- STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
- STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CONTEXTS_H_
diff --git a/src/3rdparty/v8/src/conversions-inl.h b/src/3rdparty/v8/src/conversions-inl.h
deleted file mode 100644
index 7edaf22..0000000
--- a/src/3rdparty/v8/src/conversions-inl.h
+++ /dev/null
@@ -1,678 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CONVERSIONS_INL_H_
-#define V8_CONVERSIONS_INL_H_
-
-#include <limits.h> // Required for INT_MAX etc.
-#include <math.h>
-#include <float.h> // Required for DBL_MAX and on Win32 for finite()
-#include <stdarg.h>
-#include "globals.h" // Required for V8_INFINITY
-
-// ----------------------------------------------------------------------------
-// Extra POSIX/ANSI functions for Win32/MSVC.
-
-#include "conversions.h"
-#include "double.h"
-#include "platform.h"
-#include "scanner.h"
-#include "strtod.h"
-
-namespace v8 {
-namespace internal {
-
-inline double JunkStringValue() {
- return BitCast<double, uint64_t>(kQuietNaNMask);
-}
-
-
-inline double SignedZero(bool negative) {
- return negative ? uint64_to_double(Double::kSignMask) : 0.0;
-}
-
-
-// The fast double-to-unsigned-int conversion routine does not guarantee
-// rounding towards zero, or any reasonable value if the argument is larger
-// than what fits in an unsigned 32-bit integer.
-inline unsigned int FastD2UI(double x) {
- // There is no unsigned version of lrint, so there is no fast path
- // in this function as there is in FastD2I. Using lrint doesn't work
- // for values of 2^31 and above.
-
- // Convert "small enough" doubles to uint32_t by fixing the 32
- // least significant non-fractional bits in the low 32 bits of the
- // double, and reading them from there.
- const double k2Pow52 = 4503599627370496.0;
- bool negative = x < 0;
- if (negative) {
- x = -x;
- }
- if (x < k2Pow52) {
- x += k2Pow52;
- uint32_t result;
- Address mantissa_ptr = reinterpret_cast<Address>(&x);
- // Copy least significant 32 bits of mantissa.
- memcpy(&result, mantissa_ptr, sizeof(result));
- return negative ? ~result + 1 : result;
- }
- // Large number (outside uint32 range), Infinity or NaN.
- return 0x80000000u; // Return integer indefinite.
-}
-
-
-inline double DoubleToInteger(double x) {
- if (isnan(x)) return 0;
- if (!isfinite(x) || x == 0) return x;
- return (x >= 0) ? floor(x) : ceil(x);
-}
-
-
-int32_t DoubleToInt32(double x) {
- int32_t i = FastD2I(x);
- if (FastI2D(i) == x) return i;
- Double d(x);
- int exponent = d.Exponent();
- if (exponent < 0) {
- if (exponent <= -Double::kSignificandSize) return 0;
- return d.Sign() * static_cast<int32_t>(d.Significand() >> -exponent);
- } else {
- if (exponent > 31) return 0;
- return d.Sign() * static_cast<int32_t>(d.Significand() << exponent);
- }
-}
-
-
-template <class Iterator, class EndMark>
-bool SubStringEquals(Iterator* current,
- EndMark end,
- const char* substring) {
- ASSERT(**current == *substring);
- for (substring++; *substring != '\0'; substring++) {
- ++*current;
- if (*current == end || **current != *substring) return false;
- }
- ++*current;
- return true;
-}
-
-
-// Returns true if a nonspace character has been found and false if the
-// end was been reached before finding a nonspace character.
-template <class Iterator, class EndMark>
-inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
- Iterator* current,
- EndMark end) {
- while (*current != end) {
- if (!unicode_cache->IsWhiteSpace(**current)) return true;
- ++*current;
- }
- return false;
-}
-
-
-// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
-template <int radix_log_2, class Iterator, class EndMark>
-double InternalStringToIntDouble(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- bool negative,
- bool allow_trailing_junk) {
- ASSERT(current != end);
-
- // Skip leading 0s.
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- }
-
- int64_t number = 0;
- int exponent = 0;
- const int radix = (1 << radix_log_2);
-
- do {
- int digit;
- if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
- digit = static_cast<char>(*current) - '0';
- } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
- digit = static_cast<char>(*current) - 'a' + 10;
- } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
- digit = static_cast<char>(*current) - 'A' + 10;
- } else {
- if (allow_trailing_junk ||
- !AdvanceToNonspace(unicode_cache, &current, end)) {
- break;
- } else {
- return JunkStringValue();
- }
- }
-
- number = number * radix + digit;
- int overflow = static_cast<int>(number >> 53);
- if (overflow != 0) {
- // Overflow occurred. Need to determine which direction to round the
- // result.
- int overflow_bits_count = 1;
- while (overflow > 1) {
- overflow_bits_count++;
- overflow >>= 1;
- }
-
- int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
- int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
- number >>= overflow_bits_count;
- exponent = overflow_bits_count;
-
- bool zero_tail = true;
- while (true) {
- ++current;
- if (current == end || !isDigit(*current, radix)) break;
- zero_tail = zero_tail && *current == '0';
- exponent += radix_log_2;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- int middle_value = (1 << (overflow_bits_count - 1));
- if (dropped_bits > middle_value) {
- number++; // Rounding up.
- } else if (dropped_bits == middle_value) {
- // Rounding to even to consistency with decimals: half-way case rounds
- // up if significant part is odd and down otherwise.
- if ((number & 1) != 0 || !zero_tail) {
- number++; // Rounding up.
- }
- }
-
- // Rounding up may cause overflow.
- if ((number & (static_cast<int64_t>(1) << 53)) != 0) {
- exponent++;
- number >>= 1;
- }
- break;
- }
- ++current;
- } while (current != end);
-
- ASSERT(number < ((int64_t)1 << 53));
- ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
-
- if (exponent == 0) {
- if (negative) {
- if (number == 0) return -0.0;
- number = -number;
- }
- return static_cast<double>(number);
- }
-
- ASSERT(number != 0);
- return ldexp(static_cast<double>(negative ? -number : number), exponent);
-}
-
-
-template <class Iterator, class EndMark>
-double InternalStringToInt(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- int radix) {
- const bool allow_trailing_junk = true;
- const double empty_string_val = JunkStringValue();
-
- if (!AdvanceToNonspace(unicode_cache, &current, end)) {
- return empty_string_val;
- }
-
- bool negative = false;
- bool leading_zero = false;
-
- if (*current == '+') {
- // Ignore leading sign; skip following spaces.
- ++current;
- if (current == end) {
- return JunkStringValue();
- }
- } else if (*current == '-') {
- ++current;
- if (current == end) {
- return JunkStringValue();
- }
- negative = true;
- }
-
- if (radix == 0) {
- // Radix detection.
- radix = 10;
- if (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- if (*current == 'x' || *current == 'X') {
- radix = 16;
- ++current;
- if (current == end) return JunkStringValue();
- } else {
- leading_zero = true;
- }
- }
- } else if (radix == 16) {
- if (*current == '0') {
- // Allow "0x" prefix.
- ++current;
- if (current == end) return SignedZero(negative);
- if (*current == 'x' || *current == 'X') {
- ++current;
- if (current == end) return JunkStringValue();
- } else {
- leading_zero = true;
- }
- }
- }
-
- if (radix < 2 || radix > 36) return JunkStringValue();
-
- // Skip leading zeros.
- while (*current == '0') {
- leading_zero = true;
- ++current;
- if (current == end) return SignedZero(negative);
- }
-
- if (!leading_zero && !isDigit(*current, radix)) {
- return JunkStringValue();
- }
-
- if (IsPowerOf2(radix)) {
- switch (radix) {
- case 2:
- return InternalStringToIntDouble<1>(
- unicode_cache, current, end, negative, allow_trailing_junk);
- case 4:
- return InternalStringToIntDouble<2>(
- unicode_cache, current, end, negative, allow_trailing_junk);
- case 8:
- return InternalStringToIntDouble<3>(
- unicode_cache, current, end, negative, allow_trailing_junk);
-
- case 16:
- return InternalStringToIntDouble<4>(
- unicode_cache, current, end, negative, allow_trailing_junk);
-
- case 32:
- return InternalStringToIntDouble<5>(
- unicode_cache, current, end, negative, allow_trailing_junk);
- default:
- UNREACHABLE();
- }
- }
-
- if (radix == 10) {
- // Parsing with strtod.
- const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308.
- // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
- // end.
- const int kBufferSize = kMaxSignificantDigits + 2;
- char buffer[kBufferSize];
- int buffer_pos = 0;
- while (*current >= '0' && *current <= '9') {
- if (buffer_pos <= kMaxSignificantDigits) {
- // If the number has more than kMaxSignificantDigits it will be parsed
- // as infinity.
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- }
- ++current;
- if (current == end) break;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos] = '\0';
- Vector<const char> buffer_vector(buffer, buffer_pos);
- return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
- }
-
- // The following code causes accumulating rounding error for numbers greater
- // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
- // 16, or 32, then mathInt may be an implementation-dependent approximation to
- // the mathematical integer value" (15.1.2.2).
-
- int lim_0 = '0' + (radix < 10 ? radix : 10);
- int lim_a = 'a' + (radix - 10);
- int lim_A = 'A' + (radix - 10);
-
- // NOTE: The code for computing the value may seem a bit complex at
- // first glance. It is structured to use 32-bit multiply-and-add
- // loops as long as possible to avoid loosing precision.
-
- double v = 0.0;
- bool done = false;
- do {
- // Parse the longest part of the string starting at index j
- // possible while keeping the multiplier, and thus the part
- // itself, within 32 bits.
- unsigned int part = 0, multiplier = 1;
- while (true) {
- int d;
- if (*current >= '0' && *current < lim_0) {
- d = *current - '0';
- } else if (*current >= 'a' && *current < lim_a) {
- d = *current - 'a' + 10;
- } else if (*current >= 'A' && *current < lim_A) {
- d = *current - 'A' + 10;
- } else {
- done = true;
- break;
- }
-
- // Update the value of the part as long as the multiplier fits
- // in 32 bits. When we can't guarantee that the next iteration
- // will not overflow the multiplier, we stop parsing the part
- // by leaving the loop.
- const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
- uint32_t m = multiplier * radix;
- if (m > kMaximumMultiplier) break;
- part = part * radix + d;
- multiplier = m;
- ASSERT(multiplier > part);
-
- ++current;
- if (current == end) {
- done = true;
- break;
- }
- }
-
- // Update the value and skip the part in the string.
- v = v * multiplier + part;
- } while (!done);
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- return negative ? -v : v;
-}
-
-
-// Converts a string to a double value. Assumes the Iterator supports
-// the following operations:
-// 1. current == end (other ops are not allowed), current != end.
-// 2. *current - gets the current character in the sequence.
-// 3. ++current (advances the position).
-template <class Iterator, class EndMark>
-double InternalStringToDouble(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- int flags,
- double empty_string_val) {
- // To make sure that iterator dereferencing is valid the following
- // convention is used:
- // 1. Each '++current' statement is followed by check for equality to 'end'.
- // 2. If AdvanceToNonspace returned false then current == end.
- // 3. If 'current' becomes be equal to 'end' the function returns or goes to
- // 'parsing_done'.
- // 4. 'current' is not dereferenced after the 'parsing_done' label.
- // 5. Code before 'parsing_done' may rely on 'current != end'.
- if (!AdvanceToNonspace(unicode_cache, &current, end)) {
- return empty_string_val;
- }
-
- const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
-
- // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
- const int kBufferSize = kMaxSignificantDigits + 10;
- char buffer[kBufferSize]; // NOLINT: size is known at compile time.
- int buffer_pos = 0;
-
- // Exponent will be adjusted if insignificant digits of the integer part
- // or insignificant leading zeros of the fractional part are dropped.
- int exponent = 0;
- int significant_digits = 0;
- int insignificant_digits = 0;
- bool nonzero_digit_dropped = false;
-
- enum Sign {
- NONE,
- NEGATIVE,
- POSITIVE
- };
-
- Sign sign = NONE;
-
- if (*current == '+') {
- // Ignore leading sign.
- ++current;
- if (current == end) return JunkStringValue();
- sign = POSITIVE;
- } else if (*current == '-') {
- ++current;
- if (current == end) return JunkStringValue();
- sign = NEGATIVE;
- }
-
- static const char kInfinityString[] = "Infinity";
- if (*current == kInfinityString[0]) {
- if (!SubStringEquals(&current, end, kInfinityString)) {
- return JunkStringValue();
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- ASSERT(buffer_pos == 0);
- return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
- }
-
- bool leading_zero = false;
- if (*current == '0') {
- ++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
-
- leading_zero = true;
-
- // It could be hexadecimal value.
- if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
- ++current;
- if (current == end || !isDigit(*current, 16) || sign != NONE) {
- return JunkStringValue(); // "0x".
- }
-
- return InternalStringToIntDouble<4>(unicode_cache,
- current,
- end,
- false,
- allow_trailing_junk);
- }
-
- // Ignore leading zeros in the integer part.
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
- }
- }
-
- bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
-
- // Copy significant digits of the integer part (if any) to the buffer.
- while (*current >= '0' && *current <= '9') {
- if (significant_digits < kMaxSignificantDigits) {
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- significant_digits++;
- // Will later check if it's an octal in the buffer.
- } else {
- insignificant_digits++; // Move the digit into the exponential part.
- nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
- }
- octal = octal && *current < '8';
- ++current;
- if (current == end) goto parsing_done;
- }
-
- if (significant_digits == 0) {
- octal = false;
- }
-
- if (*current == '.') {
- if (octal && !allow_trailing_junk) return JunkStringValue();
- if (octal) goto parsing_done;
-
- ++current;
- if (current == end) {
- if (significant_digits == 0 && !leading_zero) {
- return JunkStringValue();
- } else {
- goto parsing_done;
- }
- }
-
- if (significant_digits == 0) {
- // octal = false;
- // Integer part consists of 0 or is absent. Significant digits start after
- // leading zeros (if any).
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
- exponent--; // Move this 0 into the exponent.
- }
- }
-
- // There is a fractional part. We don't emit a '.', but adjust the exponent
- // instead.
- while (*current >= '0' && *current <= '9') {
- if (significant_digits < kMaxSignificantDigits) {
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- significant_digits++;
- exponent--;
- } else {
- // Ignore insignificant digits in the fractional part.
- nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
- }
- ++current;
- if (current == end) goto parsing_done;
- }
- }
-
- if (!leading_zero && exponent == 0 && significant_digits == 0) {
- // If leading_zeros is true then the string contains zeros.
- // If exponent < 0 then string was [+-]\.0*...
- // If significant_digits != 0 the string is not equal to 0.
- // Otherwise there are no digits in the string.
- return JunkStringValue();
- }
-
- // Parse exponential part.
- if (*current == 'e' || *current == 'E') {
- if (octal) return JunkStringValue();
- ++current;
- if (current == end) {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JunkStringValue();
- }
- }
- char sign = '+';
- if (*current == '+' || *current == '-') {
- sign = static_cast<char>(*current);
- ++current;
- if (current == end) {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JunkStringValue();
- }
- }
- }
-
- if (current == end || *current < '0' || *current > '9') {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JunkStringValue();
- }
- }
-
- const int max_exponent = INT_MAX / 2;
- ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
- int num = 0;
- do {
- // Check overflow.
- int digit = *current - '0';
- if (num >= max_exponent / 10
- && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
- num = max_exponent;
- } else {
- num = num * 10 + digit;
- }
- ++current;
- } while (current != end && *current >= '0' && *current <= '9');
-
- exponent += (sign == '-' ? -num : num);
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- parsing_done:
- exponent += insignificant_digits;
-
- if (octal) {
- return InternalStringToIntDouble<3>(unicode_cache,
- buffer,
- buffer + buffer_pos,
- sign == NEGATIVE,
- allow_trailing_junk);
- }
-
- if (nonzero_digit_dropped) {
- buffer[buffer_pos++] = '1';
- exponent--;
- }
-
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos] = '\0';
-
- double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
- return (sign == NEGATIVE) ? -converted : converted;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_CONVERSIONS_INL_H_
diff --git a/src/3rdparty/v8/src/conversions.cc b/src/3rdparty/v8/src/conversions.cc
deleted file mode 100644
index 5bfddd0..0000000
--- a/src/3rdparty/v8/src/conversions.cc
+++ /dev/null
@@ -1,432 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include <math.h>
-#include <limits.h>
-
-#include "conversions-inl.h"
-#include "dtoa.h"
-#include "strtod.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-
-double StringToDouble(UnicodeCache* unicode_cache,
- const char* str, int flags, double empty_string_val) {
- const char* end = str + StrLength(str);
- return InternalStringToDouble(unicode_cache, str, end, flags,
- empty_string_val);
-}
-
-
-double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const char> str,
- int flags,
- double empty_string_val) {
- const char* end = str.start() + str.length();
- return InternalStringToDouble(unicode_cache, str.start(), end, flags,
- empty_string_val);
-}
-
-double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const uc16> str,
- int flags,
- double empty_string_val) {
- const uc16* end = str.start() + str.length();
- return InternalStringToDouble(unicode_cache, str.start(), end, flags,
- empty_string_val);
-}
-
-
-const char* DoubleToCString(double v, Vector<char> buffer) {
- switch (fpclassify(v)) {
- case FP_NAN: return "NaN";
- case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
- case FP_ZERO: return "0";
- default: {
- SimpleStringBuilder builder(buffer.start(), buffer.length());
- int decimal_point;
- int sign;
- const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
- char decimal_rep[kV8DtoaBufferCapacity];
- int length;
-
- DoubleToAscii(v, DTOA_SHORTEST, 0,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &length, &decimal_point);
-
- if (sign) builder.AddCharacter('-');
-
- if (length <= decimal_point && decimal_point <= 21) {
- // ECMA-262 section 9.8.1 step 6.
- builder.AddString(decimal_rep);
- builder.AddPadding('0', decimal_point - length);
-
- } else if (0 < decimal_point && decimal_point <= 21) {
- // ECMA-262 section 9.8.1 step 7.
- builder.AddSubstring(decimal_rep, decimal_point);
- builder.AddCharacter('.');
- builder.AddString(decimal_rep + decimal_point);
-
- } else if (decimal_point <= 0 && decimal_point > -6) {
- // ECMA-262 section 9.8.1 step 8.
- builder.AddString("0.");
- builder.AddPadding('0', -decimal_point);
- builder.AddString(decimal_rep);
-
- } else {
- // ECMA-262 section 9.8.1 step 9 and 10 combined.
- builder.AddCharacter(decimal_rep[0]);
- if (length != 1) {
- builder.AddCharacter('.');
- builder.AddString(decimal_rep + 1);
- }
- builder.AddCharacter('e');
- builder.AddCharacter((decimal_point >= 0) ? '+' : '-');
- int exponent = decimal_point - 1;
- if (exponent < 0) exponent = -exponent;
- builder.AddDecimalInteger(exponent);
- }
- return builder.Finalize();
- }
- }
-}
-
-
-const char* IntToCString(int n, Vector<char> buffer) {
- bool negative = false;
- if (n < 0) {
- // We must not negate the most negative int.
- if (n == kMinInt) return DoubleToCString(n, buffer);
- negative = true;
- n = -n;
- }
- // Build the string backwards from the least significant digit.
- int i = buffer.length();
- buffer[--i] = '\0';
- do {
- buffer[--i] = '0' + (n % 10);
- n /= 10;
- } while (n);
- if (negative) buffer[--i] = '-';
- return buffer.start() + i;
-}
-
-
-char* DoubleToFixedCString(double value, int f) {
- const int kMaxDigitsBeforePoint = 21;
- const double kFirstNonFixed = 1e21;
- const int kMaxDigitsAfterPoint = 20;
- ASSERT(f >= 0);
- ASSERT(f <= kMaxDigitsAfterPoint);
-
- bool negative = false;
- double abs_value = value;
- if (value < 0) {
- abs_value = -value;
- negative = true;
- }
-
- // If abs_value has more than kMaxDigitsBeforePoint digits before the point
- // use the non-fixed conversion routine.
- if (abs_value >= kFirstNonFixed) {
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- return StrDup(DoubleToCString(value, buffer));
- }
-
- // Find a sufficiently precise decimal representation of n.
- int decimal_point;
- int sign;
- // Add space for the '\0' byte.
- const int kDecimalRepCapacity =
- kMaxDigitsBeforePoint + kMaxDigitsAfterPoint + 1;
- char decimal_rep[kDecimalRepCapacity];
- int decimal_rep_length;
- DoubleToAscii(value, DTOA_FIXED, f,
- Vector<char>(decimal_rep, kDecimalRepCapacity),
- &sign, &decimal_rep_length, &decimal_point);
-
- // Create a representation that is padded with zeros if needed.
- int zero_prefix_length = 0;
- int zero_postfix_length = 0;
-
- if (decimal_point <= 0) {
- zero_prefix_length = -decimal_point + 1;
- decimal_point = 1;
- }
-
- if (zero_prefix_length + decimal_rep_length < decimal_point + f) {
- zero_postfix_length = decimal_point + f - decimal_rep_length -
- zero_prefix_length;
- }
-
- unsigned rep_length =
- zero_prefix_length + decimal_rep_length + zero_postfix_length;
- SimpleStringBuilder rep_builder(rep_length + 1);
- rep_builder.AddPadding('0', zero_prefix_length);
- rep_builder.AddString(decimal_rep);
- rep_builder.AddPadding('0', zero_postfix_length);
- char* rep = rep_builder.Finalize();
-
- // Create the result string by appending a minus and putting in a
- // decimal point if needed.
- unsigned result_size = decimal_point + f + 2;
- SimpleStringBuilder builder(result_size + 1);
- if (negative) builder.AddCharacter('-');
- builder.AddSubstring(rep, decimal_point);
- if (f > 0) {
- builder.AddCharacter('.');
- builder.AddSubstring(rep + decimal_point, f);
- }
- DeleteArray(rep);
- return builder.Finalize();
-}
-
-
-static char* CreateExponentialRepresentation(char* decimal_rep,
- int exponent,
- bool negative,
- int significant_digits) {
- bool negative_exponent = false;
- if (exponent < 0) {
- negative_exponent = true;
- exponent = -exponent;
- }
-
- // Leave room in the result for appending a minus, for a period, the
- // letter 'e', a minus or a plus depending on the exponent, and a
- // three digit exponent.
- unsigned result_size = significant_digits + 7;
- SimpleStringBuilder builder(result_size + 1);
-
- if (negative) builder.AddCharacter('-');
- builder.AddCharacter(decimal_rep[0]);
- if (significant_digits != 1) {
- builder.AddCharacter('.');
- builder.AddString(decimal_rep + 1);
- int rep_length = StrLength(decimal_rep);
- builder.AddPadding('0', significant_digits - rep_length);
- }
-
- builder.AddCharacter('e');
- builder.AddCharacter(negative_exponent ? '-' : '+');
- builder.AddDecimalInteger(exponent);
- return builder.Finalize();
-}
-
-
-
-char* DoubleToExponentialCString(double value, int f) {
- const int kMaxDigitsAfterPoint = 20;
- // f might be -1 to signal that f was undefined in JavaScript.
- ASSERT(f >= -1 && f <= kMaxDigitsAfterPoint);
-
- bool negative = false;
- if (value < 0) {
- value = -value;
- negative = true;
- }
-
- // Find a sufficiently precise decimal representation of n.
- int decimal_point;
- int sign;
- // f corresponds to the digits after the point. There is always one digit
- // before the point. The number of requested_digits equals hence f + 1.
- // And we have to add one character for the null-terminator.
- const int kV8DtoaBufferCapacity = kMaxDigitsAfterPoint + 1 + 1;
- // Make sure that the buffer is big enough, even if we fall back to the
- // shortest representation (which happens when f equals -1).
- ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1);
- char decimal_rep[kV8DtoaBufferCapacity];
- int decimal_rep_length;
-
- if (f == -1) {
- DoubleToAscii(value, DTOA_SHORTEST, 0,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point);
- f = decimal_rep_length - 1;
- } else {
- DoubleToAscii(value, DTOA_PRECISION, f + 1,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point);
- }
- ASSERT(decimal_rep_length > 0);
- ASSERT(decimal_rep_length <= f + 1);
-
- int exponent = decimal_point - 1;
- char* result =
- CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
-
- return result;
-}
-
-
-char* DoubleToPrecisionCString(double value, int p) {
- const int kMinimalDigits = 1;
- const int kMaximalDigits = 21;
- ASSERT(p >= kMinimalDigits && p <= kMaximalDigits);
- USE(kMinimalDigits);
-
- bool negative = false;
- if (value < 0) {
- value = -value;
- negative = true;
- }
-
- // Find a sufficiently precise decimal representation of n.
- int decimal_point;
- int sign;
- // Add one for the terminating null character.
- const int kV8DtoaBufferCapacity = kMaximalDigits + 1;
- char decimal_rep[kV8DtoaBufferCapacity];
- int decimal_rep_length;
-
- DoubleToAscii(value, DTOA_PRECISION, p,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point);
- ASSERT(decimal_rep_length <= p);
-
- int exponent = decimal_point - 1;
-
- char* result = NULL;
-
- if (exponent < -6 || exponent >= p) {
- result =
- CreateExponentialRepresentation(decimal_rep, exponent, negative, p);
- } else {
- // Use fixed notation.
- //
- // Leave room in the result for appending a minus, a period and in
- // the case where decimal_point is not positive for a zero in
- // front of the period.
- unsigned result_size = (decimal_point <= 0)
- ? -decimal_point + p + 3
- : p + 2;
- SimpleStringBuilder builder(result_size + 1);
- if (negative) builder.AddCharacter('-');
- if (decimal_point <= 0) {
- builder.AddString("0.");
- builder.AddPadding('0', -decimal_point);
- builder.AddString(decimal_rep);
- builder.AddPadding('0', p - decimal_rep_length);
- } else {
- const int m = Min(decimal_rep_length, decimal_point);
- builder.AddSubstring(decimal_rep, m);
- builder.AddPadding('0', decimal_point - decimal_rep_length);
- if (decimal_point < p) {
- builder.AddCharacter('.');
- const int extra = negative ? 2 : 1;
- if (decimal_rep_length > decimal_point) {
- const int len = StrLength(decimal_rep + decimal_point);
- const int n = Min(len, p - (builder.position() - extra));
- builder.AddSubstring(decimal_rep + decimal_point, n);
- }
- builder.AddPadding('0', extra + (p - builder.position()));
- }
- }
- result = builder.Finalize();
- }
-
- return result;
-}
-
-
-char* DoubleToRadixCString(double value, int radix) {
- ASSERT(radix >= 2 && radix <= 36);
-
- // Character array used for conversion.
- static const char chars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
-
- // Buffer for the integer part of the result. 1024 chars is enough
- // for max integer value in radix 2. We need room for a sign too.
- static const int kBufferSize = 1100;
- char integer_buffer[kBufferSize];
- integer_buffer[kBufferSize - 1] = '\0';
-
- // Buffer for the decimal part of the result. We only generate up
- // to kBufferSize - 1 chars for the decimal part.
- char decimal_buffer[kBufferSize];
- decimal_buffer[kBufferSize - 1] = '\0';
-
- // Make sure the value is positive.
- bool is_negative = value < 0.0;
- if (is_negative) value = -value;
-
- // Get the integer part and the decimal part.
- double integer_part = floor(value);
- double decimal_part = value - integer_part;
-
- // Convert the integer part starting from the back. Always generate
- // at least one digit.
- int integer_pos = kBufferSize - 2;
- do {
- integer_buffer[integer_pos--] =
- chars[static_cast<int>(fmod(integer_part, radix))];
- integer_part /= radix;
- } while (integer_part >= 1.0);
- // Sanity check.
- ASSERT(integer_pos > 0);
- // Add sign if needed.
- if (is_negative) integer_buffer[integer_pos--] = '-';
-
- // Convert the decimal part. Repeatedly multiply by the radix to
- // generate the next char. Never generate more than kBufferSize - 1
- // chars.
- //
- // TODO(1093998): We will often generate a full decimal_buffer of
- // chars because hitting zero will often not happen. The right
- // solution would be to continue until the string representation can
- // be read back and yield the original value. To implement this
- // efficiently, we probably have to modify dtoa.
- int decimal_pos = 0;
- while ((decimal_part > 0.0) && (decimal_pos < kBufferSize - 1)) {
- decimal_part *= radix;
- decimal_buffer[decimal_pos++] =
- chars[static_cast<int>(floor(decimal_part))];
- decimal_part -= floor(decimal_part);
- }
- decimal_buffer[decimal_pos] = '\0';
-
- // Compute the result size.
- int integer_part_size = kBufferSize - 2 - integer_pos;
- // Make room for zero termination.
- unsigned result_size = integer_part_size + decimal_pos;
- // If the number has a decimal part, leave room for the period.
- if (decimal_pos > 0) result_size++;
- // Allocate result and fill in the parts.
- SimpleStringBuilder builder(result_size + 1);
- builder.AddSubstring(integer_buffer + integer_pos + 1, integer_part_size);
- if (decimal_pos > 0) builder.AddCharacter('.');
- builder.AddSubstring(decimal_buffer, decimal_pos);
- return builder.Finalize();
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/conversions.h b/src/3rdparty/v8/src/conversions.h
deleted file mode 100644
index 1fbb5f1..0000000
--- a/src/3rdparty/v8/src/conversions.h
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CONVERSIONS_H_
-#define V8_CONVERSIONS_H_
-
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-class UnicodeCache;
-
-// Maximum number of significant digits in decimal representation.
-// The longest possible double in decimal representation is
-// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
-// (768 digits). If we parse a number whose first digits are equal to a
-// mean of 2 adjacent doubles (that could have up to 769 digits) the result
-// must be rounded to the bigger one unless the tail consists of zeros, so
-// we don't need to preserve all the digits.
-const int kMaxSignificantDigits = 772;
-
-
-inline bool isDigit(int x, int radix) {
- return (x >= '0' && x <= '9' && x < '0' + radix)
- || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
- || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
-}
-
-
-// The fast double-to-(unsigned-)int conversion routine does not guarantee
-// rounding towards zero.
-// For NaN and values outside the int range, return INT_MIN or INT_MAX.
-inline int FastD2IChecked(double x) {
- if (!(x >= INT_MIN)) return INT_MIN; // Negation to catch NaNs.
- if (x > INT_MAX) return INT_MAX;
- return static_cast<int>(x);
-}
-
-
-// The fast double-to-(unsigned-)int conversion routine does not guarantee
-// rounding towards zero.
-// The result is unspecified if x is infinite or NaN, or if the rounded
-// integer value is outside the range of type int.
-inline int FastD2I(double x) {
- return static_cast<int>(x);
-}
-
-inline unsigned int FastD2UI(double x);
-
-
-inline double FastI2D(int x) {
- // There is no rounding involved in converting an integer to a
- // double, so this code should compile to a few instructions without
- // any FPU pipeline stalls.
- return static_cast<double>(x);
-}
-
-
-inline double FastUI2D(unsigned x) {
- // There is no rounding involved in converting an unsigned integer to a
- // double, so this code should compile to a few instructions without
- // any FPU pipeline stalls.
- return static_cast<double>(x);
-}
-
-
-// This function should match the exact semantics of ECMA-262 9.4.
-inline double DoubleToInteger(double x);
-
-
-// This function should match the exact semantics of ECMA-262 9.5.
-inline int32_t DoubleToInt32(double x);
-
-
-// This function should match the exact semantics of ECMA-262 9.6.
-inline uint32_t DoubleToUint32(double x) {
- return static_cast<uint32_t>(DoubleToInt32(x));
-}
-
-
-// Enumeration for allowing octals and ignoring junk when converting
-// strings to numbers.
-enum ConversionFlags {
- NO_FLAGS = 0,
- ALLOW_HEX = 1,
- ALLOW_OCTALS = 2,
- ALLOW_TRAILING_JUNK = 4
-};
-
-
-// Converts a string into a double value according to ECMA-262 9.3.1
-double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const char> str,
- int flags,
- double empty_string_val = 0);
-double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const uc16> str,
- int flags,
- double empty_string_val = 0);
-// This version expects a zero-terminated character array.
-double StringToDouble(UnicodeCache* unicode_cache,
- const char* str,
- int flags,
- double empty_string_val = 0);
-
-const int kDoubleToCStringMinBufferSize = 100;
-
-// Converts a double to a string value according to ECMA-262 9.8.1.
-// The buffer should be large enough for any floating point number.
-// 100 characters is enough.
-const char* DoubleToCString(double value, Vector<char> buffer);
-
-// Convert an int to a null-terminated string. The returned string is
-// located inside the buffer, but not necessarily at the start.
-const char* IntToCString(int n, Vector<char> buffer);
-
-// Additional number to string conversions for the number type.
-// The caller is responsible for calling free on the returned pointer.
-char* DoubleToFixedCString(double value, int f);
-char* DoubleToExponentialCString(double value, int f);
-char* DoubleToPrecisionCString(double value, int f);
-char* DoubleToRadixCString(double value, int radix);
-
-} } // namespace v8::internal
-
-#endif // V8_CONVERSIONS_H_
diff --git a/src/3rdparty/v8/src/counters.cc b/src/3rdparty/v8/src/counters.cc
deleted file mode 100644
index 7c8265e..0000000
--- a/src/3rdparty/v8/src/counters.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "counters.h"
-#include "isolate.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-StatsTable::StatsTable()
- : lookup_function_(NULL),
- create_histogram_function_(NULL),
- add_histogram_sample_function_(NULL) {}
-
-
-int* StatsCounter::FindLocationInStatsTable() const {
- return Isolate::Current()->stats_table()->FindLocation(name_);
-}
-
-
-// Start the timer.
-void StatsCounterTimer::Start() {
- if (!counter_.Enabled())
- return;
- stop_time_ = 0;
- start_time_ = OS::Ticks();
-}
-
-// Stop the timer and record the results.
-void StatsCounterTimer::Stop() {
- if (!counter_.Enabled())
- return;
- stop_time_ = OS::Ticks();
-
- // Compute the delta between start and stop, in milliseconds.
- int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- counter_.Increment(milliseconds);
-}
-
-void Histogram::AddSample(int sample) {
- if (Enabled()) {
- Isolate::Current()->stats_table()->AddHistogramSample(histogram_, sample);
- }
-}
-
-void* Histogram::CreateHistogram() const {
- return Isolate::Current()->stats_table()->
- CreateHistogram(name_, min_, max_, num_buckets_);
-}
-
-// Start the timer.
-void HistogramTimer::Start() {
- if (histogram_.Enabled()) {
- stop_time_ = 0;
- start_time_ = OS::Ticks();
- }
- if (FLAG_log_internal_timer_events) {
- LOG(Isolate::Current(), TimerEvent(Logger::START, histogram_.name_));
- }
-}
-
-// Stop the timer and record the results.
-void HistogramTimer::Stop() {
- if (histogram_.Enabled()) {
- stop_time_ = OS::Ticks();
- // Compute the delta between start and stop, in milliseconds.
- int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- histogram_.AddSample(milliseconds);
- }
- if (FLAG_log_internal_timer_events) {
- LOG(Isolate::Current(), TimerEvent(Logger::END, histogram_.name_));
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/counters.h b/src/3rdparty/v8/src/counters.h
deleted file mode 100644
index 577280f..0000000
--- a/src/3rdparty/v8/src/counters.h
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_COUNTERS_H_
-#define V8_COUNTERS_H_
-
-#include "../include/v8.h"
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// StatsCounters is an interface for plugging into external
-// counters for monitoring. Counters can be looked up and
-// manipulated by name.
-
-class StatsTable {
- public:
- // Register an application-defined function where
- // counters can be looked up.
- void SetCounterFunction(CounterLookupCallback f) {
- lookup_function_ = f;
- }
-
- // Register an application-defined function to create
- // a histogram for passing to the AddHistogramSample function
- void SetCreateHistogramFunction(CreateHistogramCallback f) {
- create_histogram_function_ = f;
- }
-
- // Register an application-defined function to add a sample
- // to a histogram created with CreateHistogram function
- void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
- add_histogram_sample_function_ = f;
- }
-
- bool HasCounterFunction() const {
- return lookup_function_ != NULL;
- }
-
- // Lookup the location of a counter by name. If the lookup
- // is successful, returns a non-NULL pointer for writing the
- // value of the counter. Each thread calling this function
- // may receive a different location to store it's counter.
- // The return value must not be cached and re-used across
- // threads, although a single thread is free to cache it.
- int* FindLocation(const char* name) {
- if (!lookup_function_) return NULL;
- return lookup_function_(name);
- }
-
- // Create a histogram by name. If the create is successful,
- // returns a non-NULL pointer for use with AddHistogramSample
- // function. min and max define the expected minimum and maximum
- // sample values. buckets is the maximum number of buckets
- // that the samples will be grouped into.
- void* CreateHistogram(const char* name,
- int min,
- int max,
- size_t buckets) {
- if (!create_histogram_function_) return NULL;
- return create_histogram_function_(name, min, max, buckets);
- }
-
- // Add a sample to a histogram created with the CreateHistogram
- // function.
- void AddHistogramSample(void* histogram, int sample) {
- if (!add_histogram_sample_function_) return;
- return add_histogram_sample_function_(histogram, sample);
- }
-
- private:
- StatsTable();
-
- CounterLookupCallback lookup_function_;
- CreateHistogramCallback create_histogram_function_;
- AddHistogramSampleCallback add_histogram_sample_function_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(StatsTable);
-};
-
-// StatsCounters are dynamically created values which can be tracked in
-// the StatsTable. They are designed to be lightweight to create and
-// easy to use.
-//
-// Internally, a counter represents a value in a row of a StatsTable.
-// The row has a 32bit value for each process/thread in the table and also
-// a name (stored in the table metadata). Since the storage location can be
-// thread-specific, this class cannot be shared across threads.
-//
-// This class is designed to be POD initialized. It will be registered with
-// the counter system on first use. For example:
-// StatsCounter c = { "c:myctr", NULL, false };
-struct StatsCounter {
- const char* name_;
- int* ptr_;
- bool lookup_done_;
-
- // Sets the counter to a specific value.
- void Set(int value) {
- int* loc = GetPtr();
- if (loc) *loc = value;
- }
-
- // Increments the counter.
- void Increment() {
- int* loc = GetPtr();
- if (loc) (*loc)++;
- }
-
- void Increment(int value) {
- int* loc = GetPtr();
- if (loc)
- (*loc) += value;
- }
-
- // Decrements the counter.
- void Decrement() {
- int* loc = GetPtr();
- if (loc) (*loc)--;
- }
-
- void Decrement(int value) {
- int* loc = GetPtr();
- if (loc) (*loc) -= value;
- }
-
- // Is this counter enabled?
- // Returns false if table is full.
- bool Enabled() {
- return GetPtr() != NULL;
- }
-
- // Get the internal pointer to the counter. This is used
- // by the code generator to emit code that manipulates a
- // given counter without calling the runtime system.
- int* GetInternalPointer() {
- int* loc = GetPtr();
- ASSERT(loc != NULL);
- return loc;
- }
-
- protected:
- // Returns the cached address of this counter location.
- int* GetPtr() {
- if (lookup_done_) return ptr_;
- lookup_done_ = true;
- ptr_ = FindLocationInStatsTable();
- return ptr_;
- }
-
- private:
- int* FindLocationInStatsTable() const;
-};
-
-// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
-struct StatsCounterTimer {
- StatsCounter counter_;
-
- int64_t start_time_;
- int64_t stop_time_;
-
- // Start the timer.
- void Start();
-
- // Stop the timer and record the results.
- void Stop();
-
- // Returns true if the timer is running.
- bool Running() {
- return counter_.Enabled() && start_time_ != 0 && stop_time_ == 0;
- }
-};
-
-// A Histogram represents a dynamically created histogram in the StatsTable.
-//
-// This class is designed to be POD initialized. It will be registered with
-// the histogram system on first use. For example:
-// Histogram h = { "myhist", 0, 10000, 50, NULL, false };
-struct Histogram {
- const char* name_;
- int min_;
- int max_;
- int num_buckets_;
- void* histogram_;
- bool lookup_done_;
-
- // Add a single sample to this histogram.
- void AddSample(int sample);
-
- // Returns true if this histogram is enabled.
- bool Enabled() {
- return GetHistogram() != NULL;
- }
-
- // Reset the cached internal pointer.
- void Reset() {
- lookup_done_ = false;
- }
-
- protected:
- // Returns the handle to the histogram.
- void* GetHistogram() {
- if (!lookup_done_) {
- lookup_done_ = true;
- histogram_ = CreateHistogram();
- }
- return histogram_;
- }
-
- private:
- void* CreateHistogram() const;
-};
-
-// A HistogramTimer allows distributions of results to be created
-// HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 };
-struct HistogramTimer {
- Histogram histogram_;
-
- int64_t start_time_;
- int64_t stop_time_;
-
- // Start the timer.
- void Start();
-
- // Stop the timer and record the results.
- void Stop();
-
- // Returns true if the timer is running.
- bool Running() {
- return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0);
- }
-
- void Reset() {
- histogram_.Reset();
- }
-};
-
-// Helper class for scoping a HistogramTimer.
-class HistogramTimerScope BASE_EMBEDDED {
- public:
- explicit HistogramTimerScope(HistogramTimer* timer) :
- timer_(timer) {
- timer_->Start();
- }
- ~HistogramTimerScope() {
- timer_->Stop();
- }
- private:
- HistogramTimer* timer_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_COUNTERS_H_
diff --git a/src/3rdparty/v8/src/cpu-profiler-inl.h b/src/3rdparty/v8/src/cpu-profiler-inl.h
deleted file mode 100644
index 4982197..0000000
--- a/src/3rdparty/v8/src/cpu-profiler-inl.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CPU_PROFILER_INL_H_
-#define V8_CPU_PROFILER_INL_H_
-
-#include "cpu-profiler.h"
-
-#include <new>
-#include "circular-queue-inl.h"
-#include "profile-generator-inl.h"
-#include "unbound-queue-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->AddCode(start, entry, size);
- if (shared != NULL) {
- entry->set_shared_id(code_map->GetSharedId(shared));
- }
-}
-
-
-void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->MoveCode(from, to);
-}
-
-
-void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->MoveCode(from, to);
-}
-
-
-TickSample* ProfilerEventsProcessor::TickSampleEvent() {
- generator_->Tick();
- TickSampleEventRecord* evt =
- new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
- return &evt->sample;
-}
-
-
-bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
- Logger::LogEventsAndTags tag) {
- return FLAG_prof_browser_mode
- && (tag != Logger::CALLBACK_TAG
- && tag != Logger::FUNCTION_TAG
- && tag != Logger::LAZY_COMPILE_TAG
- && tag != Logger::REG_EXP_TAG
- && tag != Logger::SCRIPT_TAG);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_CPU_PROFILER_INL_H_
diff --git a/src/3rdparty/v8/src/cpu-profiler.cc b/src/3rdparty/v8/src/cpu-profiler.cc
deleted file mode 100644
index 3d5e697..0000000
--- a/src/3rdparty/v8/src/cpu-profiler.cc
+++ /dev/null
@@ -1,616 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cpu-profiler-inl.h"
-
-#include "frames-inl.h"
-#include "hashmap.h"
-#include "log-inl.h"
-#include "vm-state-inl.h"
-
-#include "../include/v8-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-static const int kEventsBufferSize = 256 * KB;
-static const int kTickSamplesBufferChunkSize = 64 * KB;
-static const int kTickSamplesBufferChunksCount = 16;
-static const int kProfilerStackSize = 64 * KB;
-
-
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
- int period_in_useconds)
- : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
- generator_(generator),
- sampler_(sampler),
- running_(true),
- period_in_useconds_(period_in_useconds),
- ticks_buffer_(sizeof(TickSampleEventRecord),
- kTickSamplesBufferChunkSize,
- kTickSamplesBufferChunksCount,
- !Sampler::CanSampleOnProfilerEventsProcessorThread()),
- enqueue_order_(0) {
-}
-
-
-void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix,
- String* name,
- Address start) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, prefix, name);
- rec->size = 1;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name,
- int line_number,
- Address start,
- unsigned size,
- Address shared) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
- rec->size = size;
- rec->shared = shared;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* name,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, name);
- rec->size = size;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- int args_count,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, args_count);
- rec->size = size;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
- CodeEventsContainer evt_rec;
- CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->type = CodeEventRecord::CODE_MOVE;
- rec->order = ++enqueue_order_;
- rec->from = from;
- rec->to = to;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from,
- Address to) {
- CodeEventsContainer evt_rec;
- SharedFunctionInfoMoveEventRecord* rec =
- &evt_rec.SharedFunctionInfoMoveEventRecord_;
- rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
- rec->order = ++enqueue_order_;
- rec->from = from;
- rec->to = to;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::RegExpCodeCreateEvent(
- Logger::LogEventsAndTags tag,
- const char* prefix,
- String* name,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, prefix, name);
- rec->size = size;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::AddCurrentStack() {
- TickSampleEventRecord record(enqueue_order_);
- TickSample* sample = &record.sample;
- Isolate* isolate = Isolate::Current();
- sample->state = isolate->current_vm_state();
- sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
- for (StackTraceFrameIterator it(isolate);
- !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
- it.Advance()) {
- sample->stack[sample->frames_count++] = it.frame()->pc();
- }
- ticks_from_vm_buffer_.Enqueue(record);
-}
-
-
-bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
- if (!events_buffer_.IsEmpty()) {
- CodeEventsContainer record;
- events_buffer_.Dequeue(&record);
- switch (record.generic.type) {
-#define PROFILER_TYPE_CASE(type, clss) \
- case CodeEventRecord::type: \
- record.clss##_.UpdateCodeMap(generator_->code_map()); \
- break;
-
- CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
-
-#undef PROFILER_TYPE_CASE
- default: return true; // Skip record.
- }
- *dequeue_order = record.generic.order;
- return true;
- }
- return false;
-}
-
-
-bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
- while (true) {
- if (!ticks_from_vm_buffer_.IsEmpty()
- && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
- TickSampleEventRecord record;
- ticks_from_vm_buffer_.Dequeue(&record);
- generator_->RecordTickSample(record.sample);
- }
-
- const TickSampleEventRecord* rec =
- TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
- if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
- // Make a local copy of tick sample record to ensure that it won't
- // be modified as we are processing it. This is possible as the
- // sampler writes w/o any sync to the queue, so if the processor
- // will get far behind, a record may be modified right under its
- // feet.
- TickSampleEventRecord record = *rec;
- if (record.order == dequeue_order) {
- // A paranoid check to make sure that we don't get a memory overrun
- // in case of frames_count having a wild value.
- if (record.sample.frames_count < 0
- || record.sample.frames_count > TickSample::kMaxFramesCount)
- record.sample.frames_count = 0;
- generator_->RecordTickSample(record.sample);
- ticks_buffer_.FinishDequeue();
- } else {
- return true;
- }
- }
-}
-
-
-void ProfilerEventsProcessor::ProcessEventsAndDoSample(
- unsigned* dequeue_order) {
- int64_t stop_time = OS::Ticks() + period_in_useconds_;
- // Keep processing existing events until we need to do next sample.
- while (OS::Ticks() < stop_time) {
- if (ProcessTicks(*dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(dequeue_order);
- }
- }
- // Schedule next sample. sampler_ is NULL in tests.
- if (sampler_)
- sampler_->DoSample();
-}
-
-
-void ProfilerEventsProcessor::ProcessEventsAndYield(unsigned* dequeue_order) {
- if (ProcessTicks(*dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(dequeue_order);
- }
- YieldCPU();
-}
-
-
-void ProfilerEventsProcessor::Run() {
- unsigned dequeue_order = 0;
-
- while (running_) {
- if (Sampler::CanSampleOnProfilerEventsProcessorThread()) {
- ProcessEventsAndDoSample(&dequeue_order);
- } else {
- ProcessEventsAndYield(&dequeue_order);
- }
- }
-
- // Process remaining tick events.
- ticks_buffer_.FlushResidualRecords();
- // Perform processing until we have tick events, skip remaining code events.
- while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
-}
-
-
-void CpuProfiler::StartProfiling(const char* title) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
-}
-
-
-void CpuProfiler::StartProfiling(String* title) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
-}
-
-
-CpuProfile* CpuProfiler::StopProfiling(const char* title) {
- Isolate* isolate = Isolate::Current();
- return is_profiling(isolate) ?
- isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
-}
-
-
-CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
- Isolate* isolate = Isolate::Current();
- return is_profiling(isolate) ?
- isolate->cpu_profiler()->StopCollectingProfile(
- security_token, title) : NULL;
-}
-
-
-int CpuProfiler::GetProfilesCount() {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- // The count of profiles doesn't depend on a security token.
- return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
- TokenEnumerator::kNoSecurityToken)->length();
-}
-
-
-CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
- const int token = profiler->token_enumerator_->GetTokenId(security_token);
- return profiler->profiles_->Profiles(token)->at(index);
-}
-
-
-CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
- const int token = profiler->token_enumerator_->GetTokenId(security_token);
- return profiler->profiles_->GetProfile(token, uid);
-}
-
-
-TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
- if (CpuProfiler::is_profiling(isolate)) {
- return isolate->cpu_profiler()->processor_->TickSampleEvent();
- } else {
- return NULL;
- }
-}
-
-
-void CpuProfiler::DeleteAllProfiles() {
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->cpu_profiler() != NULL);
- if (is_profiling(isolate)) {
- isolate->cpu_profiler()->StopProcessor();
- }
- isolate->cpu_profiler()->ResetProfiles();
-}
-
-
-void CpuProfiler::DeleteProfile(CpuProfile* profile) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
- delete profile;
-}
-
-
-bool CpuProfiler::HasDetachedProfiles() {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
-}
-
-
-void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
- Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, const char* comment) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
- tag, comment, code->address(), code->ExecutableSize());
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, String* name) {
- Isolate* isolate = Isolate::Current();
- isolate->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- name,
- isolate->heap()->empty_string(),
- v8::CpuProfileNode::kNoLineNumberInfo,
- code->address(),
- code->ExecutableSize(),
- NULL);
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name) {
- Isolate* isolate = Isolate::Current();
- isolate->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- name,
- isolate->heap()->empty_string(),
- v8::CpuProfileNode::kNoLineNumberInfo,
- code->address(),
- code->ExecutableSize(),
- shared->address());
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- shared->DebugName(),
- source,
- line,
- code->address(),
- code->ExecutableSize(),
- shared->address());
-}
-
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, int args_count) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- args_count,
- code->address(),
- code->ExecutableSize());
-}
-
-
-void CpuProfiler::CodeMoveEvent(Address from, Address to) {
- Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
-}
-
-
-void CpuProfiler::CodeDeleteEvent(Address from) {
-}
-
-
-void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
- CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
- profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
-}
-
-
-void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
- Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, "get ", name, entry_point);
-}
-
-
-void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
- Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
- Logger::REG_EXP_TAG,
- "RegExp: ",
- source,
- code->address(),
- code->ExecutableSize());
-}
-
-
-void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
- Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, "set ", name, entry_point);
-}
-
-
-CpuProfiler::CpuProfiler()
- : profiles_(new CpuProfilesCollection()),
- next_profile_uid_(1),
- token_enumerator_(new TokenEnumerator()),
- generator_(NULL),
- processor_(NULL),
- need_to_stop_sampler_(false),
- is_profiling_(false) {
-}
-
-
-CpuProfiler::~CpuProfiler() {
- delete token_enumerator_;
- delete profiles_;
-}
-
-
-void CpuProfiler::ResetProfiles() {
- delete profiles_;
- profiles_ = new CpuProfilesCollection();
-}
-
-void CpuProfiler::StartCollectingProfile(const char* title) {
- if (profiles_->StartProfiling(title, next_profile_uid_++)) {
- StartProcessorIfNotStarted();
- }
- processor_->AddCurrentStack();
-}
-
-
-void CpuProfiler::StartCollectingProfile(String* title) {
- StartCollectingProfile(profiles_->GetName(title));
-}
-
-
-void CpuProfiler::StartProcessorIfNotStarted() {
- if (processor_ == NULL) {
- Isolate* isolate = Isolate::Current();
-
- Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
- // Disable logging when using the new implementation.
- saved_logging_nesting_ = isolate->logger()->logging_nesting_;
- isolate->logger()->logging_nesting_ = 0;
- generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_,
- sampler,
- FLAG_cpu_profiler_sampling_period);
- is_profiling_ = true;
- // Enumerate stuff we already have in the heap.
- if (isolate->heap()->HasBeenSetUp()) {
- if (!FLAG_prof_browser_mode) {
- bool saved_log_code_flag = FLAG_log_code;
- FLAG_log_code = true;
- isolate->logger()->LogCodeObjects();
- FLAG_log_code = saved_log_code_flag;
- }
- isolate->logger()->LogCompiledFunctions();
- isolate->logger()->LogAccessorCallbacks();
- }
- // Enable stack sampling.
- if (!sampler->IsActive()) {
- sampler->Start();
- need_to_stop_sampler_ = true;
- }
- sampler->SetHasProcessingThread(true);
- sampler->IncreaseProfilingDepth();
- processor_->Start();
- }
-}
-
-
-CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
- const double actual_sampling_rate = generator_->actual_sampling_rate();
- StopProcessorIfLastProfile(title);
- CpuProfile* result =
- profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
- title,
- actual_sampling_rate);
- if (result != NULL) {
- result->Print();
- }
- return result;
-}
-
-
-CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
- String* title) {
- const double actual_sampling_rate = generator_->actual_sampling_rate();
- const char* profile_title = profiles_->GetName(title);
- StopProcessorIfLastProfile(profile_title);
- int token = token_enumerator_->GetTokenId(security_token);
- return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
-}
-
-
-void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
- if (profiles_->IsLastProfile(title)) StopProcessor();
-}
-
-
-void CpuProfiler::StopProcessor() {
- Logger* logger = Isolate::Current()->logger();
- Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
- sampler->DecreaseProfilingDepth();
- sampler->SetHasProcessingThread(false);
- if (need_to_stop_sampler_) {
- sampler->Stop();
- need_to_stop_sampler_ = false;
- }
- is_profiling_ = false;
- processor_->Stop();
- processor_->Join();
- delete processor_;
- delete generator_;
- processor_ = NULL;
- generator_ = NULL;
- logger->logging_nesting_ = saved_logging_nesting_;
-}
-
-
-void CpuProfiler::SetUp() {
- Isolate* isolate = Isolate::Current();
- if (isolate->cpu_profiler() == NULL) {
- isolate->set_cpu_profiler(new CpuProfiler());
- }
-}
-
-
-void CpuProfiler::TearDown() {
- Isolate* isolate = Isolate::Current();
- if (isolate->cpu_profiler() != NULL) {
- delete isolate->cpu_profiler();
- }
- isolate->set_cpu_profiler(NULL);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/cpu-profiler.h b/src/3rdparty/v8/src/cpu-profiler.h
deleted file mode 100644
index 187cbbf..0000000
--- a/src/3rdparty/v8/src/cpu-profiler.h
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CPU_PROFILER_H_
-#define V8_CPU_PROFILER_H_
-
-#include "allocation.h"
-#include "atomicops.h"
-#include "circular-queue.h"
-#include "unbound-queue.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class CodeEntry;
-class CodeMap;
-class CpuProfile;
-class CpuProfilesCollection;
-class ProfileGenerator;
-class TokenEnumerator;
-
-#define CODE_EVENTS_TYPE_LIST(V) \
- V(CODE_CREATION, CodeCreateEventRecord) \
- V(CODE_MOVE, CodeMoveEventRecord) \
- V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
-
-
-class CodeEventRecord {
- public:
-#define DECLARE_TYPE(type, ignore) type,
- enum Type {
- NONE = 0,
- CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
- NUMBER_OF_TYPES
- };
-#undef DECLARE_TYPE
-
- Type type;
- unsigned order;
-};
-
-
-class CodeCreateEventRecord : public CodeEventRecord {
- public:
- Address start;
- CodeEntry* entry;
- unsigned size;
- Address shared;
-
- INLINE(void UpdateCodeMap(CodeMap* code_map));
-};
-
-
-class CodeMoveEventRecord : public CodeEventRecord {
- public:
- Address from;
- Address to;
-
- INLINE(void UpdateCodeMap(CodeMap* code_map));
-};
-
-
-class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
- public:
- Address from;
- Address to;
-
- INLINE(void UpdateCodeMap(CodeMap* code_map));
-};
-
-
-class TickSampleEventRecord {
- public:
- // The parameterless constructor is used when we dequeue data from
- // the ticks buffer.
- TickSampleEventRecord() { }
- explicit TickSampleEventRecord(unsigned order)
- : filler(1),
- order(order) {
- ASSERT(filler != SamplingCircularQueue::kClear);
- }
-
- // The first machine word of a TickSampleEventRecord must not ever
- // become equal to SamplingCircularQueue::kClear. As both order and
- // TickSample's first field are not reliable in this sense (order
- // can overflow, TickSample can have all fields reset), we are
- // forced to use an artificial filler field.
- int filler;
- unsigned order;
- TickSample sample;
-
- static TickSampleEventRecord* cast(void* value) {
- return reinterpret_cast<TickSampleEventRecord*>(value);
- }
-};
-
-
-// This class implements both the profile events processor thread and
-// methods called by event producers: VM and stack sampler threads.
-class ProfilerEventsProcessor : public Thread {
- public:
- ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
- int period_in_useconds);
- virtual ~ProfilerEventsProcessor() {}
-
- // Thread control.
- virtual void Run();
- inline void Stop() { running_ = false; }
- INLINE(bool running()) { return running_; }
-
- // Events adding methods. Called by VM threads.
- void CallbackCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix, String* name,
- Address start);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name, int line_number,
- Address start, unsigned size,
- Address shared);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* name,
- Address start, unsigned size);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- int args_count,
- Address start, unsigned size);
- void CodeMoveEvent(Address from, Address to);
- void CodeDeleteEvent(Address from);
- void SharedFunctionInfoMoveEvent(Address from, Address to);
- void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix, String* name,
- Address start, unsigned size);
- // Puts current stack into tick sample events buffer.
- void AddCurrentStack();
-
- // Tick sample events are filled directly in the buffer of the circular
- // queue (because the structure is of fixed width, but usually not all
- // stack frame entries are filled.) This method returns a pointer to the
- // next record of the buffer.
- INLINE(TickSample* TickSampleEvent());
-
- private:
- union CodeEventsContainer {
- CodeEventRecord generic;
-#define DECLARE_CLASS(ignore, type) type type##_;
- CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
-#undef DECLARE_TYPE
- };
-
- // Called from events processing thread (Run() method.)
- bool ProcessCodeEvent(unsigned* dequeue_order);
- bool ProcessTicks(unsigned dequeue_order);
- void ProcessEventsAndDoSample(unsigned* dequeue_order);
- void ProcessEventsAndYield(unsigned* dequeue_order);
-
- INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
-
- ProfileGenerator* generator_;
- Sampler* sampler_;
- bool running_;
- // Sampling period in microseconds.
- const int period_in_useconds_;
- UnboundQueue<CodeEventsContainer> events_buffer_;
- SamplingCircularQueue ticks_buffer_;
- UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- unsigned enqueue_order_;
-};
-
-} } // namespace v8::internal
-
-
-#define PROFILE(isolate, Call) \
- LOG_CODE_EVENT(isolate, Call); \
- do { \
- if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
- v8::internal::CpuProfiler::Call; \
- } \
- } while (false)
-
-
-namespace v8 {
-namespace internal {
-
-
-// TODO(isolates): isolatify this class.
-class CpuProfiler {
- public:
- static void SetUp();
- static void TearDown();
-
- static void StartProfiling(const char* title);
- static void StartProfiling(String* title);
- static CpuProfile* StopProfiling(const char* title);
- static CpuProfile* StopProfiling(Object* security_token, String* title);
- static int GetProfilesCount();
- static CpuProfile* GetProfile(Object* security_token, int index);
- static CpuProfile* FindProfile(Object* security_token, unsigned uid);
- static void DeleteAllProfiles();
- static void DeleteProfile(CpuProfile* profile);
- static bool HasDetachedProfiles();
-
- // Invoked from stack sampler (thread or signal handler.)
- static TickSample* TickSampleEvent(Isolate* isolate);
-
- // Must be called via PROFILE macro, otherwise will crash when
- // profiling is not enabled.
- static void CallbackEvent(String* name, Address entry_point);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, const char* comment);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, String* name);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, int args_count);
- static void CodeMovingGCEvent() {}
- static void CodeMoveEvent(Address from, Address to);
- static void CodeDeleteEvent(Address from);
- static void GetterCallbackEvent(String* name, Address entry_point);
- static void RegExpCodeCreateEvent(Code* code, String* source);
- static void SetterCallbackEvent(String* name, Address entry_point);
- static void SharedFunctionInfoMoveEvent(Address from, Address to);
-
- static INLINE(bool is_profiling(Isolate* isolate)) {
- CpuProfiler* profiler = isolate->cpu_profiler();
- return profiler != NULL && profiler->is_profiling_;
- }
-
- private:
- CpuProfiler();
- ~CpuProfiler();
- void StartCollectingProfile(const char* title);
- void StartCollectingProfile(String* title);
- void StartProcessorIfNotStarted();
- CpuProfile* StopCollectingProfile(const char* title);
- CpuProfile* StopCollectingProfile(Object* security_token, String* title);
- void StopProcessorIfLastProfile(const char* title);
- void StopProcessor();
- void ResetProfiles();
-
- CpuProfilesCollection* profiles_;
- unsigned next_profile_uid_;
- TokenEnumerator* token_enumerator_;
- ProfileGenerator* generator_;
- ProfilerEventsProcessor* processor_;
- int saved_logging_nesting_;
- bool need_to_stop_sampler_;
- bool is_profiling_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
-};
-
-} } // namespace v8::internal
-
-
-#endif // V8_CPU_PROFILER_H_
diff --git a/src/3rdparty/v8/src/cpu.h b/src/3rdparty/v8/src/cpu.h
deleted file mode 100644
index 247af71..0000000
--- a/src/3rdparty/v8/src/cpu.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This module contains the architecture-specific code. This make the rest of
-// the code less dependent on differences between different processor
-// architecture.
-// The classes have the same definition for all architectures. The
-// implementation for a particular architecture is put in cpu_<arch>.cc.
-// The build system then uses the implementation for the target architecture.
-//
-
-#ifndef V8_CPU_H_
-#define V8_CPU_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// CPU
-//
-// This class has static methods for the architecture specific functions. Add
-// methods here to cope with differences between the supported architectures.
-//
-// For each architecture the file cpu_<arch>.cc contains the implementation of
-// these functions.
-
-class CPU : public AllStatic {
- public:
- // Initializes the cpu architecture support. Called once at VM startup.
- static void SetUp();
-
- static bool SupportsCrankshaft();
-
- // Flush instruction cache.
- static void FlushICache(void* start, size_t size);
-
- // Try to activate a system level debugger.
- static void DebugBreak();
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CPU_H_
diff --git a/src/3rdparty/v8/src/d8-debug.cc b/src/3rdparty/v8/src/d8-debug.cc
deleted file mode 100644
index f044328..0000000
--- a/src/3rdparty/v8/src/d8-debug.cc
+++ /dev/null
@@ -1,370 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-#include "d8.h"
-#include "d8-debug.h"
-#include "platform.h"
-#include "debug-agent.h"
-
-
-namespace v8 {
-
-static bool was_running = true;
-
-void PrintPrompt(bool is_running) {
- const char* prompt = is_running? "> " : "dbg> ";
- was_running = is_running;
- printf("%s", prompt);
- fflush(stdout);
-}
-
-
-void PrintPrompt() {
- PrintPrompt(was_running);
-}
-
-
-void HandleDebugEvent(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data) {
- HandleScope scope;
-
- // Check for handled event.
- if (event != Break && event != Exception && event != AfterCompile) {
- return;
- }
-
- TryCatch try_catch;
-
- // Get the toJSONProtocol function on the event and get the JSON format.
- Local<String> to_json_fun_name = String::New("toJSONProtocol");
- Local<Function> to_json_fun =
- Function::Cast(*event_data->Get(to_json_fun_name));
- Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL);
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- return;
- }
-
- // Print the event details.
- Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(event_json));
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- return;
- }
- String::Utf8Value str(details->Get(String::New("text")));
- if (str.length() == 0) {
- // Empty string is used to signal not to process this event.
- return;
- }
- printf("%s\n", *str);
-
- // Get the debug command processor.
- Local<String> fun_name = String::New("debugCommandProcessor");
- Local<Function> fun = Function::Cast(*exec_state->Get(fun_name));
- Local<Object> cmd_processor =
- Object::Cast(*fun->Call(exec_state, 0, NULL));
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- return;
- }
-
- static const int kBufferSize = 256;
- bool running = false;
- while (!running) {
- char command[kBufferSize];
- PrintPrompt(running);
- char* str = fgets(command, kBufferSize, stdin);
- if (str == NULL) break;
-
- // Ignore empty commands.
- if (strlen(command) == 0) continue;
-
- TryCatch try_catch;
-
- // Convert the debugger command to a JSON debugger request.
- Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- continue;
- }
-
- // If undefined is returned the command was handled internally and there is
- // no JSON to send.
- if (request->IsUndefined()) {
- continue;
- }
-
- Handle<String> fun_name;
- Handle<Function> fun;
- // All the functions used below take one argument.
- static const int kArgc = 1;
- Handle<Value> args[kArgc];
-
- // Invoke the JavaScript to convert the debug command line to a JSON
- // request, invoke the JSON request and convert the JSON respose to a text
- // representation.
- fun_name = String::New("processDebugRequest");
- fun = Handle<Function>::Cast(cmd_processor->Get(fun_name));
- args[0] = request;
- Handle<Value> response_val = fun->Call(cmd_processor, kArgc, args);
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- continue;
- }
- Handle<String> response = Handle<String>::Cast(response_val);
-
- // Convert the debugger response into text details and the running state.
- Handle<Object> response_details = Shell::DebugMessageDetails(response);
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- continue;
- }
- String::Utf8Value text_str(response_details->Get(String::New("text")));
- if (text_str.length() > 0) {
- printf("%s\n", *text_str);
- }
- running =
- response_details->Get(String::New("running"))->ToBoolean()->Value();
- }
-}
-
-
-void RunRemoteDebugger(int port) {
- RemoteDebugger debugger(port);
- debugger.Run();
-}
-
-
-void RemoteDebugger::Run() {
- bool ok;
-
- // Make sure that socket support is initialized.
- ok = i::Socket::SetUp();
- if (!ok) {
- printf("Unable to initialize socket support %d\n", i::Socket::LastError());
- return;
- }
-
- // Connect to the debugger agent.
- conn_ = i::OS::CreateSocket();
- static const int kPortStrSize = 6;
- char port_str[kPortStrSize];
- i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_);
- ok = conn_->Connect("localhost", port_str);
- if (!ok) {
- printf("Unable to connect to debug agent %d\n", i::Socket::LastError());
- return;
- }
-
- // Start the receiver thread.
- ReceiverThread receiver(this);
- receiver.Start();
-
- // Start the keyboard thread.
- KeyboardThread keyboard(this);
- keyboard.Start();
- PrintPrompt();
-
- // Process events received from debugged VM and from the keyboard.
- bool terminate = false;
- while (!terminate) {
- event_available_->Wait();
- RemoteDebuggerEvent* event = GetEvent();
- switch (event->type()) {
- case RemoteDebuggerEvent::kMessage:
- HandleMessageReceived(event->data());
- break;
- case RemoteDebuggerEvent::kKeyboard:
- HandleKeyboardCommand(event->data());
- break;
- case RemoteDebuggerEvent::kDisconnect:
- terminate = true;
- break;
-
- default:
- UNREACHABLE();
- }
- delete event;
- }
-
- // Wait for the receiver thread to end.
- receiver.Join();
-}
-
-
-void RemoteDebugger::MessageReceived(i::SmartArrayPointer<char> message) {
- RemoteDebuggerEvent* event =
- new RemoteDebuggerEvent(RemoteDebuggerEvent::kMessage, message);
- AddEvent(event);
-}
-
-
-void RemoteDebugger::KeyboardCommand(i::SmartArrayPointer<char> command) {
- RemoteDebuggerEvent* event =
- new RemoteDebuggerEvent(RemoteDebuggerEvent::kKeyboard, command);
- AddEvent(event);
-}
-
-
-void RemoteDebugger::ConnectionClosed() {
- RemoteDebuggerEvent* event =
- new RemoteDebuggerEvent(RemoteDebuggerEvent::kDisconnect,
- i::SmartArrayPointer<char>());
- AddEvent(event);
-}
-
-
-void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
- i::ScopedLock lock(event_access_);
- if (head_ == NULL) {
- ASSERT(tail_ == NULL);
- head_ = event;
- tail_ = event;
- } else {
- ASSERT(tail_ != NULL);
- tail_->set_next(event);
- tail_ = event;
- }
- event_available_->Signal();
-}
-
-
-RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
- i::ScopedLock lock(event_access_);
- ASSERT(head_ != NULL);
- RemoteDebuggerEvent* result = head_;
- head_ = head_->next();
- if (head_ == NULL) {
- ASSERT(tail_ == result);
- tail_ = NULL;
- }
- return result;
-}
-
-
-void RemoteDebugger::HandleMessageReceived(char* message) {
- Locker lock(v8::Isolate::GetCurrent());
- HandleScope scope;
-
- // Print the event details.
- TryCatch try_catch;
- Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- PrintPrompt();
- return;
- }
- String::Utf8Value str(details->Get(String::New("text")));
- if (str.length() == 0) {
- // Empty string is used to signal not to process this event.
- return;
- }
- if (*str != NULL) {
- printf("%s\n", *str);
- } else {
- printf("???\n");
- }
-
- bool is_running = details->Get(String::New("running"))->ToBoolean()->Value();
- PrintPrompt(is_running);
-}
-
-
-void RemoteDebugger::HandleKeyboardCommand(char* command) {
- Locker lock(v8::Isolate::GetCurrent());
- HandleScope scope;
-
- // Convert the debugger command to a JSON debugger request.
- TryCatch try_catch;
- Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
- if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
- PrintPrompt();
- return;
- }
-
- // If undefined is returned the command was handled internally and there is
- // no JSON to send.
- if (request->IsUndefined()) {
- PrintPrompt();
- return;
- }
-
- // Send the JSON debugger request.
- i::DebuggerAgentUtil::SendMessage(conn_, Handle<String>::Cast(request));
-}
-
-
-void ReceiverThread::Run() {
- // Receive the connect message (with empty body).
- i::SmartArrayPointer<char> message =
- i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
- ASSERT(*message == NULL);
-
- while (true) {
- // Receive a message.
- i::SmartArrayPointer<char> message =
- i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
- if (*message == NULL) {
- remote_debugger_->ConnectionClosed();
- return;
- }
-
- // Pass the message to the main thread.
- remote_debugger_->MessageReceived(message);
- }
-}
-
-
-void KeyboardThread::Run() {
- static const int kBufferSize = 256;
- while (true) {
- // read keyboard input.
- char command[kBufferSize];
- char* str = fgets(command, kBufferSize, stdin);
- if (str == NULL) {
- break;
- }
-
- // Pass the keyboard command to the main thread.
- remote_debugger_->KeyboardCommand(
- i::SmartArrayPointer<char>(i::StrDup(command)));
- }
-}
-
-
-} // namespace v8
-
-#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/3rdparty/v8/src/d8-debug.h b/src/3rdparty/v8/src/d8-debug.h
deleted file mode 100644
index aeff3c1..0000000
--- a/src/3rdparty/v8/src/d8-debug.h
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_D8_DEBUG_H_
-#define V8_D8_DEBUG_H_
-
-
-#include "d8.h"
-#include "debug.h"
-
-
-namespace v8 {
-
-
-void HandleDebugEvent(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data);
-
-// Start the remove debugger connecting to a V8 debugger agent on the specified
-// port.
-void RunRemoteDebugger(int port);
-
-// Forward declerations.
-class RemoteDebuggerEvent;
-class ReceiverThread;
-
-
-// Remote debugging class.
-class RemoteDebugger {
- public:
- explicit RemoteDebugger(int port)
- : port_(port),
- event_access_(i::OS::CreateMutex()),
- event_available_(i::OS::CreateSemaphore(0)),
- head_(NULL), tail_(NULL) {}
- void Run();
-
- // Handle events from the subordinate threads.
- void MessageReceived(i::SmartArrayPointer<char> message);
- void KeyboardCommand(i::SmartArrayPointer<char> command);
- void ConnectionClosed();
-
- private:
- // Add new debugger event to the list.
- void AddEvent(RemoteDebuggerEvent* event);
- // Read next debugger event from the list.
- RemoteDebuggerEvent* GetEvent();
-
- // Handle a message from the debugged V8.
- void HandleMessageReceived(char* message);
- // Handle a keyboard command.
- void HandleKeyboardCommand(char* command);
-
- // Get connection to agent in debugged V8.
- i::Socket* conn() { return conn_; }
-
- int port_; // Port used to connect to debugger V8.
- i::Socket* conn_; // Connection to debugger agent in debugged V8.
-
- // Linked list of events from debugged V8 and from keyboard input. Access to
- // the list is guarded by a mutex and a semaphore signals new items in the
- // list.
- i::Mutex* event_access_;
- i::Semaphore* event_available_;
- RemoteDebuggerEvent* head_;
- RemoteDebuggerEvent* tail_;
-
- friend class ReceiverThread;
-};
-
-
-// Thread reading from debugged V8 instance.
-class ReceiverThread: public i::Thread {
- public:
- explicit ReceiverThread(RemoteDebugger* remote_debugger)
- : Thread("d8:ReceiverThrd"),
- remote_debugger_(remote_debugger) {}
- ~ReceiverThread() {}
-
- void Run();
-
- private:
- RemoteDebugger* remote_debugger_;
-};
-
-
-// Thread reading keyboard input.
-class KeyboardThread: public i::Thread {
- public:
- explicit KeyboardThread(RemoteDebugger* remote_debugger)
- : Thread("d8:KeyboardThrd"),
- remote_debugger_(remote_debugger) {}
- ~KeyboardThread() {}
-
- void Run();
-
- private:
- RemoteDebugger* remote_debugger_;
-};
-
-
-// Events processed by the main deubgger thread.
-class RemoteDebuggerEvent {
- public:
- RemoteDebuggerEvent(int type, i::SmartArrayPointer<char> data)
- : type_(type), data_(data), next_(NULL) {
- ASSERT(type == kMessage || type == kKeyboard || type == kDisconnect);
- }
-
- static const int kMessage = 1;
- static const int kKeyboard = 2;
- static const int kDisconnect = 3;
-
- int type() { return type_; }
- char* data() { return *data_; }
-
- private:
- void set_next(RemoteDebuggerEvent* event) { next_ = event; }
- RemoteDebuggerEvent* next() { return next_; }
-
- int type_;
- i::SmartArrayPointer<char> data_;
- RemoteDebuggerEvent* next_;
-
- friend class RemoteDebugger;
-};
-
-
-} // namespace v8
-
-
-#endif // V8_D8_DEBUG_H_
diff --git a/src/3rdparty/v8/src/d8-posix.cc b/src/3rdparty/v8/src/d8-posix.cc
deleted file mode 100644
index 8a278e4..0000000
--- a/src/3rdparty/v8/src/d8-posix.cc
+++ /dev/null
@@ -1,688 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include <stdlib.h>
-#include <errno.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <time.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/wait.h>
-#include <signal.h>
-
-
-#include "d8.h"
-#include "d8-debug.h"
-#include "debug.h"
-
-
-namespace v8 {
-
-
-// If the buffer ends in the middle of a UTF-8 sequence then we return
-// the length of the string up to but not including the incomplete UTF-8
-// sequence. If the buffer ends with a valid UTF-8 sequence then we
-// return the whole buffer.
-static int LengthWithoutIncompleteUtf8(char* buffer, int len) {
- int answer = len;
- // 1-byte encoding.
- static const int kUtf8SingleByteMask = 0x80;
- static const int kUtf8SingleByteValue = 0x00;
- // 2-byte encoding.
- static const int kUtf8TwoByteMask = 0xe0;
- static const int kUtf8TwoByteValue = 0xc0;
- // 3-byte encoding.
- static const int kUtf8ThreeByteMask = 0xf0;
- static const int kUtf8ThreeByteValue = 0xe0;
- // 4-byte encoding.
- static const int kUtf8FourByteMask = 0xf8;
- static const int kUtf8FourByteValue = 0xf0;
- // Subsequent bytes of a multi-byte encoding.
- static const int kMultiByteMask = 0xc0;
- static const int kMultiByteValue = 0x80;
- int multi_byte_bytes_seen = 0;
- while (answer > 0) {
- int c = buffer[answer - 1];
- // Ends in valid single-byte sequence?
- if ((c & kUtf8SingleByteMask) == kUtf8SingleByteValue) return answer;
- // Ends in one or more subsequent bytes of a multi-byte value?
- if ((c & kMultiByteMask) == kMultiByteValue) {
- multi_byte_bytes_seen++;
- answer--;
- } else {
- if ((c & kUtf8TwoByteMask) == kUtf8TwoByteValue) {
- if (multi_byte_bytes_seen >= 1) {
- return answer + 2;
- }
- return answer - 1;
- } else if ((c & kUtf8ThreeByteMask) == kUtf8ThreeByteValue) {
- if (multi_byte_bytes_seen >= 2) {
- return answer + 3;
- }
- return answer - 1;
- } else if ((c & kUtf8FourByteMask) == kUtf8FourByteValue) {
- if (multi_byte_bytes_seen >= 3) {
- return answer + 4;
- }
- return answer - 1;
- } else {
- return answer; // Malformed UTF-8.
- }
- }
- }
- return 0;
-}
-
-
-// Suspends the thread until there is data available from the child process.
-// Returns false on timeout, true on data ready.
-static bool WaitOnFD(int fd,
- int read_timeout,
- int total_timeout,
- struct timeval& start_time) {
- fd_set readfds, writefds, exceptfds;
- struct timeval timeout;
- int gone = 0;
- if (total_timeout != -1) {
- struct timeval time_now;
- gettimeofday(&time_now, NULL);
- int seconds = time_now.tv_sec - start_time.tv_sec;
- gone = seconds * 1000 + (time_now.tv_usec - start_time.tv_usec) / 1000;
- if (gone >= total_timeout) return false;
- }
- FD_ZERO(&readfds);
- FD_ZERO(&writefds);
- FD_ZERO(&exceptfds);
- FD_SET(fd, &readfds);
- FD_SET(fd, &exceptfds);
- if (read_timeout == -1 ||
- (total_timeout != -1 && total_timeout - gone < read_timeout)) {
- read_timeout = total_timeout - gone;
- }
- timeout.tv_usec = (read_timeout % 1000) * 1000;
- timeout.tv_sec = read_timeout / 1000;
- int number_of_fds_ready = select(fd + 1,
- &readfds,
- &writefds,
- &exceptfds,
- read_timeout != -1 ? &timeout : NULL);
- return number_of_fds_ready == 1;
-}
-
-
-// Checks whether we ran out of time on the timeout. Returns true if we ran out
-// of time, false if we still have time.
-static bool TimeIsOut(const struct timeval& start_time, const int& total_time) {
- if (total_time == -1) return false;
- struct timeval time_now;
- gettimeofday(&time_now, NULL);
- // Careful about overflow.
- int seconds = time_now.tv_sec - start_time.tv_sec;
- if (seconds > 100) {
- if (seconds * 1000 > total_time) return true;
- return false;
- }
- int useconds = time_now.tv_usec - start_time.tv_usec;
- if (seconds * 1000000 + useconds > total_time * 1000) {
- return true;
- }
- return false;
-}
-
-
-// A utility class that does a non-hanging waitpid on the child process if we
-// bail out of the System() function early. If you don't ever do a waitpid on
-// a subprocess then it turns into one of those annoying 'zombie processes'.
-class ZombieProtector {
- public:
- explicit ZombieProtector(int pid): pid_(pid) { }
- ~ZombieProtector() { if (pid_ != 0) waitpid(pid_, NULL, 0); }
- void ChildIsDeadNow() { pid_ = 0; }
- private:
- int pid_;
-};
-
-
-// A utility class that closes a file descriptor when it goes out of scope.
-class OpenFDCloser {
- public:
- explicit OpenFDCloser(int fd): fd_(fd) { }
- ~OpenFDCloser() { close(fd_); }
- private:
- int fd_;
-};
-
-
-// A utility class that takes the array of command arguments and puts then in an
-// array of new[]ed UTF-8 C strings. Deallocates them again when it goes out of
-// scope.
-class ExecArgs {
- public:
- ExecArgs() {
- exec_args_[0] = NULL;
- }
- bool Init(Handle<Value> arg0, Handle<Array> command_args) {
- String::Utf8Value prog(arg0);
- if (*prog == NULL) {
- const char* message =
- "os.system(): String conversion of program name failed";
- ThrowException(String::New(message));
- return false;
- }
- int len = prog.length() + 3;
- char* c_arg = new char[len];
- snprintf(c_arg, len, "%s", *prog);
- exec_args_[0] = c_arg;
- int i = 1;
- for (unsigned j = 0; j < command_args->Length(); i++, j++) {
- Handle<Value> arg(command_args->Get(Integer::New(j)));
- String::Utf8Value utf8_arg(arg);
- if (*utf8_arg == NULL) {
- exec_args_[i] = NULL; // Consistent state for destructor.
- const char* message =
- "os.system(): String conversion of argument failed.";
- ThrowException(String::New(message));
- return false;
- }
- int len = utf8_arg.length() + 1;
- char* c_arg = new char[len];
- snprintf(c_arg, len, "%s", *utf8_arg);
- exec_args_[i] = c_arg;
- }
- exec_args_[i] = NULL;
- return true;
- }
- ~ExecArgs() {
- for (unsigned i = 0; i < kMaxArgs; i++) {
- if (exec_args_[i] == NULL) {
- return;
- }
- delete [] exec_args_[i];
- exec_args_[i] = 0;
- }
- }
- static const unsigned kMaxArgs = 1000;
- char** arg_array() { return exec_args_; }
- char* arg0() { return exec_args_[0]; }
-
- private:
- char* exec_args_[kMaxArgs + 1];
-};
-
-
-// Gets the optional timeouts from the arguments to the system() call.
-static bool GetTimeouts(const Arguments& args,
- int* read_timeout,
- int* total_timeout) {
- if (args.Length() > 3) {
- if (args[3]->IsNumber()) {
- *total_timeout = args[3]->Int32Value();
- } else {
- ThrowException(String::New("system: Argument 4 must be a number"));
- return false;
- }
- }
- if (args.Length() > 2) {
- if (args[2]->IsNumber()) {
- *read_timeout = args[2]->Int32Value();
- } else {
- ThrowException(String::New("system: Argument 3 must be a number"));
- return false;
- }
- }
- return true;
-}
-
-
-static const int kReadFD = 0;
-static const int kWriteFD = 1;
-
-
-// This is run in the child process after fork() but before exec(). It normally
-// ends with the child process being replaced with the desired child program.
-// It only returns if an error occurred.
-static void ExecSubprocess(int* exec_error_fds,
- int* stdout_fds,
- ExecArgs& exec_args) {
- close(exec_error_fds[kReadFD]); // Don't need this in the child.
- close(stdout_fds[kReadFD]); // Don't need this in the child.
- close(1); // Close stdout.
- dup2(stdout_fds[kWriteFD], 1); // Dup pipe fd to stdout.
- close(stdout_fds[kWriteFD]); // Don't need the original fd now.
- fcntl(exec_error_fds[kWriteFD], F_SETFD, FD_CLOEXEC);
- execvp(exec_args.arg0(), exec_args.arg_array());
- // Only get here if the exec failed. Write errno to the parent to tell
- // them it went wrong. If it went well the pipe is closed.
- int err = errno;
- int bytes_written;
- do {
- bytes_written = write(exec_error_fds[kWriteFD], &err, sizeof(err));
- } while (bytes_written == -1 && errno == EINTR);
- // Return (and exit child process).
-}
-
-
-// Runs in the parent process. Checks that the child was able to exec (closing
-// the file desriptor), or reports an error if it failed.
-static bool ChildLaunchedOK(int* exec_error_fds) {
- int bytes_read;
- int err;
- do {
- bytes_read = read(exec_error_fds[kReadFD], &err, sizeof(err));
- } while (bytes_read == -1 && errno == EINTR);
- if (bytes_read != 0) {
- ThrowException(String::New(strerror(err)));
- return false;
- }
- return true;
-}
-
-
-// Accumulates the output from the child in a string handle. Returns true if it
-// succeeded or false if an exception was thrown.
-static Handle<Value> GetStdout(int child_fd,
- struct timeval& start_time,
- int read_timeout,
- int total_timeout) {
- Handle<String> accumulator = String::Empty();
-
- int fullness = 0;
- static const int kStdoutReadBufferSize = 4096;
- char buffer[kStdoutReadBufferSize];
-
- if (fcntl(child_fd, F_SETFL, O_NONBLOCK) != 0) {
- return ThrowException(String::New(strerror(errno)));
- }
-
- int bytes_read;
- do {
- bytes_read = read(child_fd,
- buffer + fullness,
- kStdoutReadBufferSize - fullness);
- if (bytes_read == -1) {
- if (errno == EAGAIN) {
- if (!WaitOnFD(child_fd,
- read_timeout,
- total_timeout,
- start_time) ||
- (TimeIsOut(start_time, total_timeout))) {
- return ThrowException(String::New("Timed out waiting for output"));
- }
- continue;
- } else if (errno == EINTR) {
- continue;
- } else {
- break;
- }
- }
- if (bytes_read + fullness > 0) {
- int length = bytes_read == 0 ?
- bytes_read + fullness :
- LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
- Handle<String> addition = String::New(buffer, length);
- accumulator = String::Concat(accumulator, addition);
- fullness = bytes_read + fullness - length;
- memcpy(buffer, buffer + length, fullness);
- }
- } while (bytes_read != 0);
- return accumulator;
-}
-
-
-// Modern Linux has the waitid call, which is like waitpid, but more useful
-// if you want a timeout. If we don't have waitid we can't limit the time
-// waiting for the process to exit without losing the information about
-// whether it exited normally. In the common case this doesn't matter because
-// we don't get here before the child has closed stdout and most programs don't
-// do that before they exit.
-//
-// We're disabling usage of waitid in Mac OS X because it doens't work for us:
-// a parent process hangs on waiting while a child process is already a zombie.
-// See http://code.google.com/p/v8/issues/detail?id=401.
-#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) \
- && !defined(__NetBSD__)
-#if !defined(__FreeBSD__)
-#define HAS_WAITID 1
-#endif
-#endif
-
-
-// Get exit status of child.
-static bool WaitForChild(int pid,
- ZombieProtector& child_waiter,
- struct timeval& start_time,
- int read_timeout,
- int total_timeout) {
-#ifdef HAS_WAITID
-
- siginfo_t child_info;
- child_info.si_pid = 0;
- int useconds = 1;
- // Wait for child to exit.
- while (child_info.si_pid == 0) {
- waitid(P_PID, pid, &child_info, WEXITED | WNOHANG | WNOWAIT);
- usleep(useconds);
- if (useconds < 1000000) useconds <<= 1;
- if ((read_timeout != -1 && useconds / 1000 > read_timeout) ||
- (TimeIsOut(start_time, total_timeout))) {
- ThrowException(String::New("Timed out waiting for process to terminate"));
- kill(pid, SIGINT);
- return false;
- }
- }
- if (child_info.si_code == CLD_KILLED) {
- char message[999];
- snprintf(message,
- sizeof(message),
- "Child killed by signal %d",
- child_info.si_status);
- ThrowException(String::New(message));
- return false;
- }
- if (child_info.si_code == CLD_EXITED && child_info.si_status != 0) {
- char message[999];
- snprintf(message,
- sizeof(message),
- "Child exited with status %d",
- child_info.si_status);
- ThrowException(String::New(message));
- return false;
- }
-
-#else // No waitid call.
-
- int child_status;
- waitpid(pid, &child_status, 0); // We hang here if the child doesn't exit.
- child_waiter.ChildIsDeadNow();
- if (WIFSIGNALED(child_status)) {
- char message[999];
- snprintf(message,
- sizeof(message),
- "Child killed by signal %d",
- WTERMSIG(child_status));
- ThrowException(String::New(message));
- return false;
- }
- if (WEXITSTATUS(child_status) != 0) {
- char message[999];
- int exit_status = WEXITSTATUS(child_status);
- snprintf(message,
- sizeof(message),
- "Child exited with status %d",
- exit_status);
- ThrowException(String::New(message));
- return false;
- }
-
-#endif // No waitid call.
-
- return true;
-}
-
-
-// Implementation of the system() function (see d8.h for details).
-Handle<Value> Shell::System(const Arguments& args) {
- HandleScope scope;
- int read_timeout = -1;
- int total_timeout = -1;
- if (!GetTimeouts(args, &read_timeout, &total_timeout)) return v8::Undefined();
- Handle<Array> command_args;
- if (args.Length() > 1) {
- if (!args[1]->IsArray()) {
- return ThrowException(String::New("system: Argument 2 must be an array"));
- }
- command_args = Handle<Array>::Cast(args[1]);
- } else {
- command_args = Array::New(0);
- }
- if (command_args->Length() > ExecArgs::kMaxArgs) {
- return ThrowException(String::New("Too many arguments to system()"));
- }
- if (args.Length() < 1) {
- return ThrowException(String::New("Too few arguments to system()"));
- }
-
- struct timeval start_time;
- gettimeofday(&start_time, NULL);
-
- ExecArgs exec_args;
- if (!exec_args.Init(args[0], command_args)) {
- return v8::Undefined();
- }
- int exec_error_fds[2];
- int stdout_fds[2];
-
- if (pipe(exec_error_fds) != 0) {
- return ThrowException(String::New("pipe syscall failed."));
- }
- if (pipe(stdout_fds) != 0) {
- return ThrowException(String::New("pipe syscall failed."));
- }
-
- pid_t pid = fork();
- if (pid == 0) { // Child process.
- ExecSubprocess(exec_error_fds, stdout_fds, exec_args);
- exit(1);
- }
-
- // Parent process. Ensure that we clean up if we exit this function early.
- ZombieProtector child_waiter(pid);
- close(exec_error_fds[kWriteFD]);
- close(stdout_fds[kWriteFD]);
- OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
- OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
-
- if (!ChildLaunchedOK(exec_error_fds)) return v8::Undefined();
-
- Handle<Value> accumulator = GetStdout(stdout_fds[kReadFD],
- start_time,
- read_timeout,
- total_timeout);
- if (accumulator->IsUndefined()) {
- kill(pid, SIGINT); // On timeout, kill the subprocess.
- return accumulator;
- }
-
- if (!WaitForChild(pid,
- child_waiter,
- start_time,
- read_timeout,
- total_timeout)) {
- return v8::Undefined();
- }
-
- return scope.Close(accumulator);
-}
-
-
-Handle<Value> Shell::ChangeDirectory(const Arguments& args) {
- if (args.Length() != 1) {
- const char* message = "chdir() takes one argument";
- return ThrowException(String::New(message));
- }
- String::Utf8Value directory(args[0]);
- if (*directory == NULL) {
- const char* message = "os.chdir(): String conversion of argument failed.";
- return ThrowException(String::New(message));
- }
- if (chdir(*directory) != 0) {
- return ThrowException(String::New(strerror(errno)));
- }
- return v8::Undefined();
-}
-
-
-Handle<Value> Shell::SetUMask(const Arguments& args) {
- if (args.Length() != 1) {
- const char* message = "umask() takes one argument";
- return ThrowException(String::New(message));
- }
- if (args[0]->IsNumber()) {
- mode_t mask = args[0]->Int32Value();
- int previous = umask(mask);
- return Number::New(previous);
- } else {
- const char* message = "umask() argument must be numeric";
- return ThrowException(String::New(message));
- }
-}
-
-
-static bool CheckItsADirectory(char* directory) {
- struct stat stat_buf;
- int stat_result = stat(directory, &stat_buf);
- if (stat_result != 0) {
- ThrowException(String::New(strerror(errno)));
- return false;
- }
- if ((stat_buf.st_mode & S_IFDIR) != 0) return true;
- ThrowException(String::New(strerror(EEXIST)));
- return false;
-}
-
-
-// Returns true for success. Creates intermediate directories as needed. No
-// error if the directory exists already.
-static bool mkdirp(char* directory, mode_t mask) {
- int result = mkdir(directory, mask);
- if (result == 0) return true;
- if (errno == EEXIST) {
- return CheckItsADirectory(directory);
- } else if (errno == ENOENT) { // Intermediate path element is missing.
- char* last_slash = strrchr(directory, '/');
- if (last_slash == NULL) {
- ThrowException(String::New(strerror(errno)));
- return false;
- }
- *last_slash = 0;
- if (!mkdirp(directory, mask)) return false;
- *last_slash = '/';
- result = mkdir(directory, mask);
- if (result == 0) return true;
- if (errno == EEXIST) {
- return CheckItsADirectory(directory);
- }
- ThrowException(String::New(strerror(errno)));
- return false;
- } else {
- ThrowException(String::New(strerror(errno)));
- return false;
- }
-}
-
-
-Handle<Value> Shell::MakeDirectory(const Arguments& args) {
- mode_t mask = 0777;
- if (args.Length() == 2) {
- if (args[1]->IsNumber()) {
- mask = args[1]->Int32Value();
- } else {
- const char* message = "mkdirp() second argument must be numeric";
- return ThrowException(String::New(message));
- }
- } else if (args.Length() != 1) {
- const char* message = "mkdirp() takes one or two arguments";
- return ThrowException(String::New(message));
- }
- String::Utf8Value directory(args[0]);
- if (*directory == NULL) {
- const char* message = "os.mkdirp(): String conversion of argument failed.";
- return ThrowException(String::New(message));
- }
- mkdirp(*directory, mask);
- return v8::Undefined();
-}
-
-
-Handle<Value> Shell::RemoveDirectory(const Arguments& args) {
- if (args.Length() != 1) {
- const char* message = "rmdir() takes one or two arguments";
- return ThrowException(String::New(message));
- }
- String::Utf8Value directory(args[0]);
- if (*directory == NULL) {
- const char* message = "os.rmdir(): String conversion of argument failed.";
- return ThrowException(String::New(message));
- }
- rmdir(*directory);
- return v8::Undefined();
-}
-
-
-Handle<Value> Shell::SetEnvironment(const Arguments& args) {
- if (args.Length() != 2) {
- const char* message = "setenv() takes two arguments";
- return ThrowException(String::New(message));
- }
- String::Utf8Value var(args[0]);
- String::Utf8Value value(args[1]);
- if (*var == NULL) {
- const char* message =
- "os.setenv(): String conversion of variable name failed.";
- return ThrowException(String::New(message));
- }
- if (*value == NULL) {
- const char* message =
- "os.setenv(): String conversion of variable contents failed.";
- return ThrowException(String::New(message));
- }
- setenv(*var, *value, 1);
- return v8::Undefined();
-}
-
-
-Handle<Value> Shell::UnsetEnvironment(const Arguments& args) {
- if (args.Length() != 1) {
- const char* message = "unsetenv() takes one argument";
- return ThrowException(String::New(message));
- }
- String::Utf8Value var(args[0]);
- if (*var == NULL) {
- const char* message =
- "os.setenv(): String conversion of variable name failed.";
- return ThrowException(String::New(message));
- }
- unsetenv(*var);
- return v8::Undefined();
-}
-
-
-void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
- os_templ->Set(String::New("system"), FunctionTemplate::New(System));
- os_templ->Set(String::New("chdir"), FunctionTemplate::New(ChangeDirectory));
- os_templ->Set(String::New("setenv"), FunctionTemplate::New(SetEnvironment));
- os_templ->Set(String::New("unsetenv"),
- FunctionTemplate::New(UnsetEnvironment));
- os_templ->Set(String::New("umask"), FunctionTemplate::New(SetUMask));
- os_templ->Set(String::New("mkdirp"), FunctionTemplate::New(MakeDirectory));
- os_templ->Set(String::New("rmdir"), FunctionTemplate::New(RemoveDirectory));
-}
-
-} // namespace v8
diff --git a/src/3rdparty/v8/src/d8-readline.cc b/src/3rdparty/v8/src/d8-readline.cc
deleted file mode 100644
index 8989263..0000000
--- a/src/3rdparty/v8/src/d8-readline.cc
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <cstdio> // NOLINT
-#include <string.h> // NOLINT
-#include <readline/readline.h> // NOLINT
-#include <readline/history.h> // NOLINT
-
-// The readline includes leaves RETURN defined which breaks V8 compilation.
-#undef RETURN
-
-#include "d8.h"
-
-// There are incompatibilities between different versions and different
-// implementations of readline. This smooths out one known incompatibility.
-#if RL_READLINE_VERSION >= 0x0500
-#define completion_matches rl_completion_matches
-#endif
-
-
-namespace v8 {
-
-
-class ReadLineEditor: public LineEditor {
- public:
- ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
- virtual Handle<String> Prompt(const char* prompt);
- virtual bool Open();
- virtual bool Close();
- virtual void AddHistory(const char* str);
-
- static const char* kHistoryFileName;
- static const int kMaxHistoryEntries;
-
- private:
-#ifndef V8_SHARED
- static char** AttemptedCompletion(const char* text, int start, int end);
- static char* CompletionGenerator(const char* text, int state);
-#endif // V8_SHARED
- static char kWordBreakCharacters[];
-};
-
-
-static ReadLineEditor read_line_editor;
-char ReadLineEditor::kWordBreakCharacters[] = {' ', '\t', '\n', '"',
- '\\', '\'', '`', '@', '.', '>', '<', '=', ';', '|', '&', '{', '(',
- '\0'};
-
-
-const char* ReadLineEditor::kHistoryFileName = ".d8_history";
-const int ReadLineEditor::kMaxHistoryEntries = 1000;
-
-
-bool ReadLineEditor::Open() {
- rl_initialize();
-
-#ifdef V8_SHARED
- // Don't do completion on shared library mode
- // http://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC24
- rl_bind_key('\t', rl_insert);
-#else
- rl_attempted_completion_function = AttemptedCompletion;
-#endif // V8_SHARED
-
- rl_completer_word_break_characters = kWordBreakCharacters;
- rl_bind_key('\t', rl_complete);
- using_history();
- stifle_history(kMaxHistoryEntries);
- return read_history(kHistoryFileName) == 0;
-}
-
-
-bool ReadLineEditor::Close() {
- return write_history(kHistoryFileName) == 0;
-}
-
-
-Handle<String> ReadLineEditor::Prompt(const char* prompt) {
- char* result = NULL;
- { // Release lock for blocking input.
- Unlocker unlock(Isolate::GetCurrent());
- result = readline(prompt);
- }
- if (result != NULL) {
- AddHistory(result);
- } else {
- return Handle<String>();
- }
- return String::New(result);
-}
-
-
-void ReadLineEditor::AddHistory(const char* str) {
- // Do not record empty input.
- if (strlen(str) == 0) return;
- // Remove duplicate history entry.
- history_set_pos(history_length-1);
- if (current_history()) {
- do {
- if (strcmp(current_history()->line, str) == 0) {
- remove_history(where_history());
- break;
- }
- } while (previous_history());
- }
- add_history(str);
-}
-
-
-#ifndef V8_SHARED
-char** ReadLineEditor::AttemptedCompletion(const char* text,
- int start,
- int end) {
- char** result = completion_matches(text, CompletionGenerator);
- rl_attempted_completion_over = true;
- return result;
-}
-
-
-char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
- static unsigned current_index;
- static Persistent<Array> current_completions;
- if (state == 0) {
- HandleScope scope;
- Local<String> full_text = String::New(rl_line_buffer, rl_point);
- Handle<Array> completions =
- Shell::GetCompletions(String::New(text), full_text);
- current_completions = Persistent<Array>::New(completions);
- current_index = 0;
- }
- if (current_index < current_completions->Length()) {
- HandleScope scope;
- Handle<Integer> index = Integer::New(current_index);
- Handle<Value> str_obj = current_completions->Get(index);
- current_index++;
- String::Utf8Value str(str_obj);
- return strdup(*str);
- } else {
- current_completions.Dispose();
- current_completions.Clear();
- return NULL;
- }
-}
-#endif // V8_SHARED
-
-
-} // namespace v8
diff --git a/src/3rdparty/v8/src/d8-windows.cc b/src/3rdparty/v8/src/d8-windows.cc
deleted file mode 100644
index eeb4735..0000000
--- a/src/3rdparty/v8/src/d8-windows.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "d8.h"
-#include "d8-debug.h"
-#include "debug.h"
-#include "api.h"
-
-
-namespace v8 {
-
-
-void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
-}
-
-
-} // namespace v8
diff --git a/src/3rdparty/v8/src/d8.cc b/src/3rdparty/v8/src/d8.cc
deleted file mode 100644
index 2d30a1c..0000000
--- a/src/3rdparty/v8/src/d8.cc
+++ /dev/null
@@ -1,1971 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Defined when linking against shared lib on Windows.
-#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
-#define V8_SHARED
-#endif
-
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-#include <bzlib.h>
-#endif
-
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-
-#ifdef V8_SHARED
-#include <assert.h>
-#include "../include/v8-testing.h"
-#endif // V8_SHARED
-
-#include "d8.h"
-
-#ifndef V8_SHARED
-#include "api.h"
-#include "checks.h"
-#include "d8-debug.h"
-#include "debug.h"
-#include "natives.h"
-#include "platform.h"
-#include "v8.h"
-#endif // V8_SHARED
-
-#if !defined(_WIN32) && !defined(_WIN64)
-#include <unistd.h> // NOLINT
-#endif
-
-#ifndef ASSERT
-#define ASSERT(condition) assert(condition)
-#endif
-
-namespace v8 {
-
-
-static Handle<Value> Throw(const char* message) {
- return ThrowException(String::New(message));
-}
-
-
-// TODO(rossberg): should replace these by proper uses of HasInstance,
-// once we figure out a good way to make the templates global.
-const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
-const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
-
-
-#define FOR_EACH_SYMBOL(V) \
- V(ArrayBuffer, "ArrayBuffer") \
- V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \
- V(ArrayMarkerPropName, kArrayMarkerPropName) \
- V(buffer, "buffer") \
- V(byteLength, "byteLength") \
- V(byteOffset, "byteOffset") \
- V(BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \
- V(length, "length")
-
-
-class Symbols {
- public:
- explicit Symbols(Isolate* isolate) : isolate_(isolate) {
- HandleScope scope;
-#define INIT_SYMBOL(name, value) \
- name##_ = Persistent<String>::New(isolate, String::NewSymbol(value));
- FOR_EACH_SYMBOL(INIT_SYMBOL)
-#undef INIT_SYMBOL
- isolate->SetData(this);
- }
-
- ~Symbols() {
-#define DISPOSE_SYMBOL(name, value) name##_.Dispose(isolate_);
- FOR_EACH_SYMBOL(DISPOSE_SYMBOL)
-#undef DISPOSE_SYMBOL
- isolate_->SetData(NULL); // Not really needed, just to be sure...
- }
-
-#define DEFINE_SYMBOL_GETTER(name, value) \
- static Persistent<String> name(Isolate* isolate) { \
- return reinterpret_cast<Symbols*>(isolate->GetData())->name##_; \
- }
- FOR_EACH_SYMBOL(DEFINE_SYMBOL_GETTER)
-#undef DEFINE_SYMBOL_GETTER
-
- private:
- Isolate* isolate_;
-#define DEFINE_MEMBER(name, value) Persistent<String> name##_;
- FOR_EACH_SYMBOL(DEFINE_MEMBER)
-#undef DEFINE_MEMBER
-};
-
-
-LineEditor *LineEditor::current_ = NULL;
-
-
-LineEditor::LineEditor(Type type, const char* name)
- : type_(type), name_(name) {
- if (current_ == NULL || current_->type_ < type) current_ = this;
-}
-
-
-class DumbLineEditor: public LineEditor {
- public:
- explicit DumbLineEditor(Isolate* isolate)
- : LineEditor(LineEditor::DUMB, "dumb"), isolate_(isolate) { }
- virtual Handle<String> Prompt(const char* prompt);
- private:
- Isolate* isolate_;
-};
-
-
-Handle<String> DumbLineEditor::Prompt(const char* prompt) {
- printf("%s", prompt);
- return Shell::ReadFromStdin(isolate_);
-}
-
-
-#ifndef V8_SHARED
-CounterMap* Shell::counter_map_;
-i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
-CounterCollection Shell::local_counters_;
-CounterCollection* Shell::counters_ = &local_counters_;
-i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
-Persistent<Context> Shell::utility_context_;
-#endif // V8_SHARED
-
-Persistent<Context> Shell::evaluation_context_;
-ShellOptions Shell::options;
-const char* Shell::kPrompt = "d8> ";
-
-
-const int MB = 1024 * 1024;
-
-
-#ifndef V8_SHARED
-bool CounterMap::Match(void* key1, void* key2) {
- const char* name1 = reinterpret_cast<const char*>(key1);
- const char* name2 = reinterpret_cast<const char*>(key2);
- return strcmp(name1, name2) == 0;
-}
-#endif // V8_SHARED
-
-
-// Converts a V8 value to a C string.
-const char* Shell::ToCString(const v8::String::Utf8Value& value) {
- return *value ? *value : "<string conversion failed>";
-}
-
-
-// Executes a string within the current v8 context.
-bool Shell::ExecuteString(Handle<String> source,
- Handle<Value> name,
- bool print_result,
- bool report_exceptions) {
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- bool FLAG_debugger = i::FLAG_debugger;
-#else
- bool FLAG_debugger = false;
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- HandleScope handle_scope;
- TryCatch try_catch;
- options.script_executed = true;
- if (FLAG_debugger) {
- // When debugging make exceptions appear to be uncaught.
- try_catch.SetVerbose(true);
- }
- Handle<Script> script = Script::Compile(source, name);
- if (script.IsEmpty()) {
- // Print errors that happened during compilation.
- if (report_exceptions && !FLAG_debugger)
- ReportException(&try_catch);
- return false;
- } else {
- Handle<Value> result = script->Run();
- if (result.IsEmpty()) {
- ASSERT(try_catch.HasCaught());
- // Print errors that happened during execution.
- if (report_exceptions && !FLAG_debugger)
- ReportException(&try_catch);
- return false;
- } else {
- ASSERT(!try_catch.HasCaught());
- if (print_result && !result->IsUndefined()) {
- // If all went well and the result wasn't undefined then print
- // the returned value.
- v8::String::Utf8Value str(result);
- size_t count = fwrite(*str, sizeof(**str), str.length(), stdout);
- (void) count; // Silence GCC-4.5.x "unused result" warning.
- printf("\n");
- }
- return true;
- }
- }
-}
-
-
-Handle<Value> Shell::Print(const Arguments& args) {
- Handle<Value> val = Write(args);
- printf("\n");
- fflush(stdout);
- return val;
-}
-
-
-Handle<Value> Shell::Write(const Arguments& args) {
- for (int i = 0; i < args.Length(); i++) {
- HandleScope handle_scope;
- if (i != 0) {
- printf(" ");
- }
-
- // Explicitly catch potential exceptions in toString().
- v8::TryCatch try_catch;
- Handle<String> str_obj = args[i]->ToString();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- v8::String::Utf8Value str(str_obj);
- int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
- if (n != str.length()) {
- printf("Error in fwrite\n");
- Exit(1);
- }
- }
- return Undefined(args.GetIsolate());
-}
-
-
-Handle<Value> Shell::EnableProfiler(const Arguments& args) {
- V8::ResumeProfiler();
- return Undefined(args.GetIsolate());
-}
-
-
-Handle<Value> Shell::DisableProfiler(const Arguments& args) {
- V8::PauseProfiler();
- return Undefined(args.GetIsolate());
-}
-
-
-Handle<Value> Shell::Read(const Arguments& args) {
- String::Utf8Value file(args[0]);
- if (*file == NULL) {
- return Throw("Error loading file");
- }
- Handle<String> source = ReadFile(args.GetIsolate(), *file);
- if (source.IsEmpty()) {
- return Throw("Error loading file");
- }
- return source;
-}
-
-
-Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
- static const int kBufferSize = 256;
- char buffer[kBufferSize];
- Handle<String> accumulator = String::New("");
- int length;
- while (true) {
- // Continue reading if the line ends with an escape '\\' or the line has
- // not been fully read into the buffer yet (does not end with '\n').
- // If fgets gets an error, just give up.
- char* input = NULL;
- { // Release lock for blocking input.
- Unlocker unlock(isolate);
- input = fgets(buffer, kBufferSize, stdin);
- }
- if (input == NULL) return Handle<String>();
- length = static_cast<int>(strlen(buffer));
- if (length == 0) {
- return accumulator;
- } else if (buffer[length-1] != '\n') {
- accumulator = String::Concat(accumulator, String::New(buffer, length));
- } else if (length > 1 && buffer[length-2] == '\\') {
- buffer[length-2] = '\n';
- accumulator = String::Concat(accumulator, String::New(buffer, length-1));
- } else {
- return String::Concat(accumulator, String::New(buffer, length-1));
- }
- }
-}
-
-
-Handle<Value> Shell::Load(const Arguments& args) {
- for (int i = 0; i < args.Length(); i++) {
- HandleScope handle_scope;
- String::Utf8Value file(args[i]);
- if (*file == NULL) {
- return Throw("Error loading file");
- }
- Handle<String> source = ReadFile(args.GetIsolate(), *file);
- if (source.IsEmpty()) {
- return Throw("Error loading file");
- }
- if (!ExecuteString(source, String::New(*file), false, true)) {
- return Throw("Error executing file");
- }
- }
- return Undefined(args.GetIsolate());
-}
-
-static int32_t convertToInt(Local<Value> value_in, TryCatch* try_catch) {
- if (value_in->IsInt32()) {
- return value_in->Int32Value();
- }
-
- Local<Value> number = value_in->ToNumber();
- if (try_catch->HasCaught()) return 0;
-
- ASSERT(number->IsNumber());
- Local<Int32> int32 = number->ToInt32();
- if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
-
- int32_t value = int32->Int32Value();
- if (try_catch->HasCaught()) return 0;
-
- return value;
-}
-
-
-static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
- int32_t raw_value = convertToInt(value_in, try_catch);
- if (try_catch->HasCaught()) return 0;
-
- if (raw_value < 0) {
- Throw("Array length must not be negative.");
- return 0;
- }
-
- static const int kMaxLength = 0x3fffffff;
-#ifndef V8_SHARED
- ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
-#endif // V8_SHARED
- if (raw_value > static_cast<int32_t>(kMaxLength)) {
- Throw("Array length exceeds maximum length.");
- }
- return raw_value;
-}
-
-
-Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
- Handle<Object> buffer,
- int32_t length) {
- static const int32_t kMaxSize = 0x7fffffff;
- // Make sure the total size fits into a (signed) int.
- if (length < 0 || length > kMaxSize) {
- return Throw("ArrayBuffer exceeds maximum size (2G)");
- }
- uint8_t* data = new uint8_t[length];
- if (data == NULL) {
- return Throw("Memory allocation failed");
- }
- memset(data, 0, length);
-
- buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
- Persistent<Object> persistent_array =
- Persistent<Object>::New(isolate, buffer);
- persistent_array.MakeWeak(isolate, data, ExternalArrayWeakCallback);
- persistent_array.MarkIndependent(isolate);
- V8::AdjustAmountOfExternalAllocatedMemory(length);
-
- buffer->SetIndexedPropertiesToExternalArrayData(
- data, v8::kExternalByteArray, length);
- buffer->Set(Symbols::byteLength(isolate),
- Int32::New(length, isolate),
- ReadOnly);
-
- return buffer;
-}
-
-
-Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
- if (!args.IsConstructCall()) {
- Handle<Value>* rec_args = new Handle<Value>[args.Length()];
- for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
- Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
- delete[] rec_args;
- return result;
- }
-
- if (args.Length() == 0) {
- return Throw("ArrayBuffer constructor must have one argument");
- }
- TryCatch try_catch;
- int32_t length = convertToUint(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- return CreateExternalArrayBuffer(args.GetIsolate(), args.This(), length);
-}
-
-
-Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
- Handle<Object> array,
- Handle<Object> buffer,
- ExternalArrayType type,
- int32_t length,
- int32_t byteLength,
- int32_t byteOffset,
- int32_t element_size) {
- ASSERT(element_size == 1 || element_size == 2 ||
- element_size == 4 || element_size == 8);
- ASSERT(byteLength == length * element_size);
-
- void* data = buffer->GetIndexedPropertiesExternalArrayData();
- ASSERT(data != NULL);
-
- array->SetIndexedPropertiesToExternalArrayData(
- static_cast<uint8_t*>(data) + byteOffset, type, length);
- array->SetHiddenValue(Symbols::ArrayMarkerPropName(isolate),
- Int32::New(type, isolate));
- array->Set(Symbols::byteLength(isolate),
- Int32::New(byteLength, isolate),
- ReadOnly);
- array->Set(Symbols::byteOffset(isolate),
- Int32::New(byteOffset, isolate),
- ReadOnly);
- array->Set(Symbols::length(isolate),
- Int32::New(length, isolate),
- ReadOnly);
- array->Set(Symbols::BYTES_PER_ELEMENT(isolate),
- Int32::New(element_size, isolate));
- array->Set(Symbols::buffer(isolate),
- buffer,
- ReadOnly);
-
- return array;
-}
-
-
-Handle<Value> Shell::CreateExternalArray(const Arguments& args,
- ExternalArrayType type,
- int32_t element_size) {
- Isolate* isolate = args.GetIsolate();
- if (!args.IsConstructCall()) {
- Handle<Value>* rec_args = new Handle<Value>[args.Length()];
- for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
- Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
- delete[] rec_args;
- return result;
- }
-
- TryCatch try_catch;
- ASSERT(element_size == 1 || element_size == 2 ||
- element_size == 4 || element_size == 8);
-
- // All of the following constructors are supported:
- // TypedArray(unsigned long length)
- // TypedArray(type[] array)
- // TypedArray(TypedArray array)
- // TypedArray(ArrayBuffer buffer,
- // optional unsigned long byteOffset,
- // optional unsigned long length)
- Handle<Object> buffer;
- int32_t length;
- int32_t byteLength;
- int32_t byteOffset;
- bool init_from_array = false;
- if (args.Length() == 0) {
- return Throw("Array constructor must have at least one argument");
- }
- if (args[0]->IsObject() &&
- !args[0]->ToObject()->GetHiddenValue(
- Symbols::ArrayBufferMarkerPropName(isolate)).IsEmpty()) {
- // Construct from ArrayBuffer.
- buffer = args[0]->ToObject();
- int32_t bufferLength =
- convertToUint(buffer->Get(Symbols::byteLength(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- byteOffset = 0;
- } else {
- byteOffset = convertToUint(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (byteOffset > bufferLength) {
- return Throw("byteOffset out of bounds");
- }
- if (byteOffset % element_size != 0) {
- return Throw("byteOffset must be multiple of element size");
- }
- }
-
- if (args.Length() < 3 || args[2]->IsUndefined()) {
- byteLength = bufferLength - byteOffset;
- length = byteLength / element_size;
- if (byteLength % element_size != 0) {
- return Throw("buffer size must be multiple of element size");
- }
- } else {
- length = convertToUint(args[2], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- byteLength = length * element_size;
- if (byteOffset + byteLength > bufferLength) {
- return Throw("length out of bounds");
- }
- }
- } else {
- if (args[0]->IsObject() &&
- args[0]->ToObject()->Has(Symbols::length(isolate))) {
- // Construct from array.
- Local<Value> value = args[0]->ToObject()->Get(Symbols::length(isolate));
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- length = convertToUint(value, &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- init_from_array = true;
- } else {
- // Construct from size.
- length = convertToUint(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
- byteLength = length * element_size;
- byteOffset = 0;
-
- Handle<Object> global = Context::GetCurrent()->Global();
- Handle<Value> array_buffer = global->Get(Symbols::ArrayBuffer(isolate));
- ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
- Handle<Value> buffer_args[] = { Uint32::New(byteLength, isolate) };
- Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
- 1, buffer_args);
- if (try_catch.HasCaught()) return result;
- buffer = result->ToObject();
- }
-
- Handle<Object> array =
- CreateExternalArray(isolate, args.This(), buffer, type, length,
- byteLength, byteOffset, element_size);
-
- if (init_from_array) {
- Handle<Object> init = args[0]->ToObject();
- for (int i = 0; i < length; ++i) {
- Local<Value> value = init->Get(i);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- array->Set(i, value);
- }
- }
-
- return array;
-}
-
-
-Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
- TryCatch try_catch;
-
- if (!args.This()->IsObject()) {
- return Throw("'slice' invoked on non-object receiver");
- }
-
- Isolate* isolate = args.GetIsolate();
- Local<Object> self = args.This();
- Local<Value> marker =
- self->GetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate));
- if (marker.IsEmpty()) {
- return Throw("'slice' invoked on wrong receiver type");
- }
-
- int32_t length =
- convertToUint(self->Get(Symbols::byteLength(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() == 0) {
- return Throw("'slice' must have at least one argument");
- }
- int32_t begin = convertToInt(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (begin < 0) begin += length;
- if (begin < 0) begin = 0;
- if (begin > length) begin = length;
-
- int32_t end;
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- end = length;
- } else {
- end = convertToInt(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (end < 0) end += length;
- if (end < 0) end = 0;
- if (end > length) end = length;
- if (end < begin) end = begin;
- }
-
- Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
- Handle<Value> new_args[] = { Uint32::New(end - begin, isolate) };
- Handle<Value> result = constructor->NewInstance(1, new_args);
- if (try_catch.HasCaught()) return result;
- Handle<Object> buffer = result->ToObject();
- uint8_t* dest =
- static_cast<uint8_t*>(buffer->GetIndexedPropertiesExternalArrayData());
- uint8_t* src = begin + static_cast<uint8_t*>(
- self->GetIndexedPropertiesExternalArrayData());
- memcpy(dest, src, end - begin);
-
- return buffer;
-}
-
-
-Handle<Value> Shell::ArraySubArray(const Arguments& args) {
- TryCatch try_catch;
-
- if (!args.This()->IsObject()) {
- return Throw("'subarray' invoked on non-object receiver");
- }
-
- Isolate* isolate = args.GetIsolate();
- Local<Object> self = args.This();
- Local<Value> marker =
- self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
- if (marker.IsEmpty()) {
- return Throw("'subarray' invoked on wrong receiver type");
- }
-
- Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t length =
- convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t byteOffset =
- convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t element_size =
- convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() == 0) {
- return Throw("'subarray' must have at least one argument");
- }
- int32_t begin = convertToInt(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (begin < 0) begin += length;
- if (begin < 0) begin = 0;
- if (begin > length) begin = length;
-
- int32_t end;
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- end = length;
- } else {
- end = convertToInt(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (end < 0) end += length;
- if (end < 0) end = 0;
- if (end > length) end = length;
- if (end < begin) end = begin;
- }
-
- length = end - begin;
- byteOffset += begin * element_size;
-
- Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
- Handle<Value> construct_args[] = {
- buffer, Uint32::New(byteOffset, isolate), Uint32::New(length, isolate)
- };
- return constructor->NewInstance(3, construct_args);
-}
-
-
-Handle<Value> Shell::ArraySet(const Arguments& args) {
- TryCatch try_catch;
-
- if (!args.This()->IsObject()) {
- return Throw("'set' invoked on non-object receiver");
- }
-
- Isolate* isolate = args.GetIsolate();
- Local<Object> self = args.This();
- Local<Value> marker =
- self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
- if (marker.IsEmpty()) {
- return Throw("'set' invoked on wrong receiver type");
- }
- int32_t length =
- convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t element_size =
- convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() == 0) {
- return Throw("'set' must have at least one argument");
- }
- if (!args[0]->IsObject() ||
- !args[0]->ToObject()->Has(Symbols::length(isolate))) {
- return Throw("'set' invoked with non-array argument");
- }
- Handle<Object> source = args[0]->ToObject();
- int32_t source_length =
- convertToUint(source->Get(Symbols::length(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- int32_t offset;
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- offset = 0;
- } else {
- offset = convertToUint(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
- if (offset + source_length > length) {
- return Throw("offset or source length out of bounds");
- }
-
- int32_t source_element_size;
- if (source->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)).IsEmpty()) {
- source_element_size = 0;
- } else {
- source_element_size =
- convertToUint(source->Get(Symbols::BYTES_PER_ELEMENT(isolate)),
- &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
-
- if (element_size == source_element_size &&
- self->GetConstructor()->StrictEquals(source->GetConstructor())) {
- // Use memmove on the array buffers.
- Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- Handle<Object> source_buffer =
- source->Get(Symbols::buffer(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t byteOffset =
- convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t source_byteOffset =
- convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
- buffer->GetIndexedPropertiesExternalArrayData());
- uint8_t* src = source_byteOffset + static_cast<uint8_t*>(
- source_buffer->GetIndexedPropertiesExternalArrayData());
- memmove(dest, src, source_length * element_size);
- } else if (source_element_size == 0) {
- // Source is not a typed array, copy element-wise sequentially.
- for (int i = 0; i < source_length; ++i) {
- self->Set(offset + i, source->Get(i));
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
- } else {
- // Need to copy element-wise to make the right conversions.
- Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- Handle<Object> source_buffer =
- source->Get(Symbols::buffer(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (buffer->StrictEquals(source_buffer)) {
- // Same backing store, need to handle overlap correctly.
- // This gets a bit tricky in the case of different element sizes
- // (which, of course, is extremely unlikely to ever occur in practice).
- int32_t byteOffset =
- convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t source_byteOffset =
- convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- // Copy as much as we can from left to right.
- int i = 0;
- int32_t next_dest_offset = byteOffset + (offset + 1) * element_size;
- int32_t next_src_offset = source_byteOffset + source_element_size;
- while (i < length && next_dest_offset <= next_src_offset) {
- self->Set(offset + i, source->Get(i));
- ++i;
- next_dest_offset += element_size;
- next_src_offset += source_element_size;
- }
- // Of what's left, copy as much as we can from right to left.
- int j = length - 1;
- int32_t dest_offset = byteOffset + (offset + j) * element_size;
- int32_t src_offset = source_byteOffset + j * source_element_size;
- while (j >= i && dest_offset >= src_offset) {
- self->Set(offset + j, source->Get(j));
- --j;
- dest_offset -= element_size;
- src_offset -= source_element_size;
- }
- // There can be at most 8 entries left in the middle that need buffering
- // (because the largest element_size is 8 times the smallest).
- ASSERT(j+1 - i <= 8);
- Handle<Value> temp[8];
- for (int k = i; k <= j; ++k) {
- temp[k - i] = source->Get(k);
- }
- for (int k = i; k <= j; ++k) {
- self->Set(offset + k, temp[k - i]);
- }
- } else {
- // Different backing stores, safe to copy element-wise sequentially.
- for (int i = 0; i < source_length; ++i)
- self->Set(offset + i, source->Get(i));
- }
- }
-
- return Undefined(args.GetIsolate());
-}
-
-
-void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate,
- Persistent<Value> object,
- void* data) {
- HandleScope scope;
- int32_t length =
- object->ToObject()->Get(Symbols::byteLength(isolate))->Uint32Value();
- V8::AdjustAmountOfExternalAllocatedMemory(-length);
- delete[] static_cast<uint8_t*>(data);
- object.Dispose(isolate);
-}
-
-
-Handle<Value> Shell::Int8Array(const Arguments& args) {
- return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
-}
-
-
-Handle<Value> Shell::Uint8Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalUnsignedByteArray, sizeof(uint8_t));
-}
-
-
-Handle<Value> Shell::Int16Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalShortArray, sizeof(int16_t));
-}
-
-
-Handle<Value> Shell::Uint16Array(const Arguments& args) {
- return CreateExternalArray(
- args, kExternalUnsignedShortArray, sizeof(uint16_t));
-}
-
-
-Handle<Value> Shell::Int32Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalIntArray, sizeof(int32_t));
-}
-
-
-Handle<Value> Shell::Uint32Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalUnsignedIntArray, sizeof(uint32_t));
-}
-
-
-Handle<Value> Shell::Float32Array(const Arguments& args) {
- return CreateExternalArray(
- args, kExternalFloatArray, sizeof(float)); // NOLINT
-}
-
-
-Handle<Value> Shell::Float64Array(const Arguments& args) {
- return CreateExternalArray(
- args, kExternalDoubleArray, sizeof(double)); // NOLINT
-}
-
-
-Handle<Value> Shell::Uint8ClampedArray(const Arguments& args) {
- return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
-}
-
-
-Handle<Value> Shell::Yield(const Arguments& args) {
- v8::Unlocker unlocker(args.GetIsolate());
- return Undefined(args.GetIsolate());
-}
-
-
-Handle<Value> Shell::Quit(const Arguments& args) {
- int exit_code = args[0]->Int32Value();
- OnExit();
- exit(exit_code);
- return Undefined(args.GetIsolate());
-}
-
-
-Handle<Value> Shell::Version(const Arguments& args) {
- return String::New(V8::GetVersion());
-}
-
-
-void Shell::ReportException(v8::TryCatch* try_catch) {
- HandleScope handle_scope;
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- bool enter_context = !Context::InContext();
- if (enter_context) utility_context_->Enter();
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- v8::String::Utf8Value exception(try_catch->Exception());
- const char* exception_string = ToCString(exception);
- Handle<Message> message = try_catch->Message();
- if (message.IsEmpty()) {
- // V8 didn't provide any extra information about this error; just
- // print the exception.
- printf("%s\n", exception_string);
- } else {
- // Print (filename):(line number): (message).
- v8::String::Utf8Value filename(message->GetScriptResourceName());
- const char* filename_string = ToCString(filename);
- int linenum = message->GetLineNumber();
- printf("%s:%i: %s\n", filename_string, linenum, exception_string);
- // Print line of source code.
- v8::String::Utf8Value sourceline(message->GetSourceLine());
- const char* sourceline_string = ToCString(sourceline);
- printf("%s\n", sourceline_string);
- // Print wavy underline (GetUnderline is deprecated).
- int start = message->GetStartColumn();
- for (int i = 0; i < start; i++) {
- printf(" ");
- }
- int end = message->GetEndColumn();
- for (int i = start; i < end; i++) {
- printf("^");
- }
- printf("\n");
- v8::String::Utf8Value stack_trace(try_catch->StackTrace());
- if (stack_trace.length() > 0) {
- const char* stack_trace_string = ToCString(stack_trace);
- printf("%s\n", stack_trace_string);
- }
- }
- printf("\n");
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- if (enter_context) utility_context_->Exit();
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
-}
-
-
-#ifndef V8_SHARED
-Handle<Array> Shell::GetCompletions(Handle<String> text, Handle<String> full) {
- HandleScope handle_scope;
- Context::Scope context_scope(utility_context_);
- Handle<Object> global = utility_context_->Global();
- Handle<Value> fun = global->Get(String::New("GetCompletions"));
- static const int kArgc = 3;
- Handle<Value> argv[kArgc] = { evaluation_context_->Global(), text, full };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return handle_scope.Close(Handle<Array>::Cast(val));
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
- Context::Scope context_scope(utility_context_);
- Handle<Object> global = utility_context_->Global();
- Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
- static const int kArgc = 1;
- Handle<Value> argv[kArgc] = { message };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return Handle<Object>::Cast(val);
-}
-
-
-Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
- Context::Scope context_scope(utility_context_);
- Handle<Object> global = utility_context_->Global();
- Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
- static const int kArgc = 1;
- Handle<Value> argv[kArgc] = { command };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return val;
-}
-
-
-void Shell::DispatchDebugMessages() {
- v8::Context::Scope scope(Shell::evaluation_context_);
- v8::Debug::ProcessDebugMessages();
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-#endif // V8_SHARED
-
-
-#ifndef V8_SHARED
-int32_t* Counter::Bind(const char* name, bool is_histogram) {
- int i;
- for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
- name_[i] = static_cast<char>(name[i]);
- name_[i] = '\0';
- is_histogram_ = is_histogram;
- return ptr();
-}
-
-
-void Counter::AddSample(int32_t sample) {
- count_++;
- sample_total_ += sample;
-}
-
-
-CounterCollection::CounterCollection() {
- magic_number_ = 0xDEADFACE;
- max_counters_ = kMaxCounters;
- max_name_size_ = Counter::kMaxNameSize;
- counters_in_use_ = 0;
-}
-
-
-Counter* CounterCollection::GetNextCounter() {
- if (counters_in_use_ == kMaxCounters) return NULL;
- return &counters_[counters_in_use_++];
-}
-
-
-void Shell::MapCounters(const char* name) {
- counters_file_ = i::OS::MemoryMappedFile::create(
- name, sizeof(CounterCollection), &local_counters_);
- void* memory = (counters_file_ == NULL) ?
- NULL : counters_file_->memory();
- if (memory == NULL) {
- printf("Could not map counters file %s\n", name);
- Exit(1);
- }
- counters_ = static_cast<CounterCollection*>(memory);
- V8::SetCounterFunction(LookupCounter);
- V8::SetCreateHistogramFunction(CreateHistogram);
- V8::SetAddHistogramSampleFunction(AddHistogramSample);
-}
-
-
-int CounterMap::Hash(const char* name) {
- int h = 0;
- int c;
- while ((c = *name++) != 0) {
- h += h << 5;
- h += c;
- }
- return h;
-}
-
-
-Counter* Shell::GetCounter(const char* name, bool is_histogram) {
- Counter* counter = counter_map_->Lookup(name);
-
- if (counter == NULL) {
- counter = counters_->GetNextCounter();
- if (counter != NULL) {
- counter_map_->Set(name, counter);
- counter->Bind(name, is_histogram);
- }
- } else {
- ASSERT(counter->is_histogram() == is_histogram);
- }
- return counter;
-}
-
-
-int* Shell::LookupCounter(const char* name) {
- Counter* counter = GetCounter(name, false);
-
- if (counter != NULL) {
- return counter->ptr();
- } else {
- return NULL;
- }
-}
-
-
-void* Shell::CreateHistogram(const char* name,
- int min,
- int max,
- size_t buckets) {
- return GetCounter(name, true);
-}
-
-
-void Shell::AddHistogramSample(void* histogram, int sample) {
- Counter* counter = reinterpret_cast<Counter*>(histogram);
- counter->AddSample(sample);
-}
-
-
-void Shell::InstallUtilityScript(Isolate* isolate) {
- Locker lock(isolate);
- HandleScope scope;
- // If we use the utility context, we have to set the security tokens so that
- // utility, evaluation and debug context can all access each other.
- utility_context_->SetSecurityToken(Undefined(isolate));
- evaluation_context_->SetSecurityToken(Undefined(isolate));
- Context::Scope utility_scope(utility_context_);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
- // Install the debugger object in the utility scope
- i::Debug* debug = i::Isolate::Current()->debug();
- debug->Load();
- i::Handle<i::JSObject> js_debug
- = i::Handle<i::JSObject>(debug->debug_context()->global_object());
- utility_context_->Global()->Set(String::New("$debug"),
- Utils::ToLocal(js_debug));
- debug->debug_context()->set_security_token(HEAP->undefined_value());
-#endif // ENABLE_DEBUGGER_SUPPORT
-
- // Run the d8 shell utility script in the utility context
- int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
- i::Vector<const char> shell_source =
- i::NativesCollection<i::D8>::GetRawScriptSource(source_index);
- i::Vector<const char> shell_source_name =
- i::NativesCollection<i::D8>::GetScriptName(source_index);
- Handle<String> source = String::New(shell_source.start(),
- shell_source.length());
- Handle<String> name = String::New(shell_source_name.start(),
- shell_source_name.length());
- Handle<Script> script = Script::Compile(source, name);
- script->Run();
- // Mark the d8 shell script as native to avoid it showing up as normal source
- // in the debugger.
- i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
- i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
- ? i::Handle<i::Script>(i::Script::cast(
- i::JSFunction::cast(*compiled_script)->shared()->script()))
- : i::Handle<i::Script>(i::Script::cast(
- i::SharedFunctionInfo::cast(*compiled_script)->script()));
- script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Start the in-process debugger if requested.
- if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
- v8::Debug::SetDebugEventListener(HandleDebugEvent);
- }
-#endif // ENABLE_DEBUGGER_SUPPORT
-}
-#endif // V8_SHARED
-
-
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-class BZip2Decompressor : public v8::StartupDataDecompressor {
- public:
- virtual ~BZip2Decompressor() { }
-
- protected:
- virtual int DecompressData(char* raw_data,
- int* raw_data_size,
- const char* compressed_data,
- int compressed_data_size) {
- ASSERT_EQ(v8::StartupData::kBZip2,
- v8::V8::GetCompressedStartupDataAlgorithm());
- unsigned int decompressed_size = *raw_data_size;
- int result =
- BZ2_bzBuffToBuffDecompress(raw_data,
- &decompressed_size,
- const_cast<char*>(compressed_data),
- compressed_data_size,
- 0, 1);
- if (result == BZ_OK) {
- *raw_data_size = decompressed_size;
- }
- return result;
- }
-};
-#endif
-
-
-Handle<FunctionTemplate> Shell::CreateArrayBufferTemplate(
- InvocationCallback fun) {
- Handle<FunctionTemplate> buffer_template = FunctionTemplate::New(fun);
- Local<Template> proto_template = buffer_template->PrototypeTemplate();
- proto_template->Set(String::New("slice"),
- FunctionTemplate::New(ArrayBufferSlice));
- return buffer_template;
-}
-
-
-Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
- Handle<FunctionTemplate> array_template = FunctionTemplate::New(fun);
- Local<Template> proto_template = array_template->PrototypeTemplate();
- proto_template->Set(String::New("set"), FunctionTemplate::New(ArraySet));
- proto_template->Set(String::New("subarray"),
- FunctionTemplate::New(ArraySubArray));
- return array_template;
-}
-
-
-Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
- Handle<ObjectTemplate> global_template = ObjectTemplate::New();
- global_template->Set(String::New("print"), FunctionTemplate::New(Print));
- global_template->Set(String::New("write"), FunctionTemplate::New(Write));
- global_template->Set(String::New("read"), FunctionTemplate::New(Read));
- global_template->Set(String::New("readbuffer"),
- FunctionTemplate::New(ReadBuffer));
- global_template->Set(String::New("readline"),
- FunctionTemplate::New(ReadLine));
- global_template->Set(String::New("load"), FunctionTemplate::New(Load));
- global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
- global_template->Set(String::New("version"), FunctionTemplate::New(Version));
- global_template->Set(String::New("enableProfiler"),
- FunctionTemplate::New(EnableProfiler));
- global_template->Set(String::New("disableProfiler"),
- FunctionTemplate::New(DisableProfiler));
-
- // Bind the handlers for external arrays.
- PropertyAttribute attr =
- static_cast<PropertyAttribute>(ReadOnly | DontDelete);
- global_template->Set(Symbols::ArrayBuffer(isolate),
- CreateArrayBufferTemplate(ArrayBuffer), attr);
- global_template->Set(String::New("Int8Array"),
- CreateArrayTemplate(Int8Array), attr);
- global_template->Set(String::New("Uint8Array"),
- CreateArrayTemplate(Uint8Array), attr);
- global_template->Set(String::New("Int16Array"),
- CreateArrayTemplate(Int16Array), attr);
- global_template->Set(String::New("Uint16Array"),
- CreateArrayTemplate(Uint16Array), attr);
- global_template->Set(String::New("Int32Array"),
- CreateArrayTemplate(Int32Array), attr);
- global_template->Set(String::New("Uint32Array"),
- CreateArrayTemplate(Uint32Array), attr);
- global_template->Set(String::New("Float32Array"),
- CreateArrayTemplate(Float32Array), attr);
- global_template->Set(String::New("Float64Array"),
- CreateArrayTemplate(Float64Array), attr);
- global_template->Set(String::New("Uint8ClampedArray"),
- CreateArrayTemplate(Uint8ClampedArray), attr);
-
-#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
- Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
- AddOSMethods(os_templ);
- global_template->Set(String::New("os"), os_templ);
-#endif // V8_SHARED
-
- return global_template;
-}
-
-
-void Shell::Initialize(Isolate* isolate) {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- BZip2Decompressor startup_data_decompressor;
- int bz2_result = startup_data_decompressor.Decompress();
- if (bz2_result != BZ_OK) {
- fprintf(stderr, "bzip error code: %d\n", bz2_result);
- Exit(1);
- }
-#endif
-
-#ifndef V8_SHARED
- Shell::counter_map_ = new CounterMap();
- // Set up counters
- if (i::StrLength(i::FLAG_map_counters) != 0)
- MapCounters(i::FLAG_map_counters);
- if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
- V8::SetCounterFunction(LookupCounter);
- V8::SetCreateHistogramFunction(CreateHistogram);
- V8::SetAddHistogramSampleFunction(AddHistogramSample);
- }
-#endif // V8_SHARED
-}
-
-
-void Shell::InitializeDebugger(Isolate* isolate) {
- if (options.test_shell) return;
-#ifndef V8_SHARED
- Locker lock(isolate);
- HandleScope scope;
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- utility_context_ = Context::New(NULL, global_template);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Start the debugger agent if requested.
- if (i::FLAG_debugger_agent) {
- v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
- v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
- }
-#endif // ENABLE_DEBUGGER_SUPPORT
-#endif // V8_SHARED
-}
-
-
-Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
-#ifndef V8_SHARED
- // This needs to be a critical section since this is not thread-safe
- i::ScopedLock lock(context_mutex_);
-#endif // V8_SHARED
- // Initialize the global objects
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- Persistent<Context> context = Context::New(NULL, global_template);
- ASSERT(!context.IsEmpty());
- Context::Scope scope(context);
-
-#ifndef V8_SHARED
- i::JSArguments js_args = i::FLAG_js_arguments;
- i::Handle<i::FixedArray> arguments_array =
- FACTORY->NewFixedArray(js_args.argc());
- for (int j = 0; j < js_args.argc(); j++) {
- i::Handle<i::String> arg =
- FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
- arguments_array->set(j, *arg);
- }
- i::Handle<i::JSArray> arguments_jsarray =
- FACTORY->NewJSArrayWithElements(arguments_array);
- context->Global()->Set(String::New("arguments"),
- Utils::ToLocal(arguments_jsarray));
-#endif // V8_SHARED
- return context;
-}
-
-
-void Shell::Exit(int exit_code) {
- // Use _exit instead of exit to avoid races between isolate
- // threads and static destructors.
- fflush(stdout);
- fflush(stderr);
- _exit(exit_code);
-}
-
-
-#ifndef V8_SHARED
-struct CounterAndKey {
- Counter* counter;
- const char* key;
-};
-
-
-int CompareKeys(const void* a, const void* b) {
- return strcmp(static_cast<const CounterAndKey*>(a)->key,
- static_cast<const CounterAndKey*>(b)->key);
-}
-#endif // V8_SHARED
-
-
-void Shell::OnExit() {
- LineEditor* line_editor = LineEditor::Get();
- if (line_editor) line_editor->Close();
-#ifndef V8_SHARED
- if (i::FLAG_dump_counters) {
- int number_of_counters = 0;
- for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
- number_of_counters++;
- }
- CounterAndKey* counters = new CounterAndKey[number_of_counters];
- int j = 0;
- for (CounterMap::Iterator i(counter_map_); i.More(); i.Next(), j++) {
- counters[j].counter = i.CurrentValue();
- counters[j].key = i.CurrentKey();
- }
- qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys);
- printf("+----------------------------------------------------------------+"
- "-------------+\n");
- printf("| Name |"
- " Value |\n");
- printf("+----------------------------------------------------------------+"
- "-------------+\n");
- for (j = 0; j < number_of_counters; j++) {
- Counter* counter = counters[j].counter;
- const char* key = counters[j].key;
- if (counter->is_histogram()) {
- printf("| c:%-60s | %11i |\n", key, counter->count());
- printf("| t:%-60s | %11i |\n", key, counter->sample_total());
- } else {
- printf("| %-62s | %11i |\n", key, counter->count());
- }
- }
- printf("+----------------------------------------------------------------+"
- "-------------+\n");
- delete [] counters;
- }
- delete context_mutex_;
- delete counters_file_;
- delete counter_map_;
-#endif // V8_SHARED
-}
-
-
-
-static FILE* FOpen(const char* path, const char* mode) {
-#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
- FILE* result;
- if (fopen_s(&result, path, mode) == 0) {
- return result;
- } else {
- return NULL;
- }
-#else
- FILE* file = fopen(path, mode);
- if (file == NULL) return NULL;
- struct stat file_stat;
- if (fstat(fileno(file), &file_stat) != 0) return NULL;
- bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
- if (is_regular_file) return file;
- fclose(file);
- return NULL;
-#endif
-}
-
-
-static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
- // Release the V8 lock while reading files.
- v8::Unlocker unlocker(isolate);
- FILE* file = FOpen(name, "rb");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
- rewind(file);
-
- char* chars = new char[size + 1];
- chars[size] = '\0';
- for (int i = 0; i < size;) {
- int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
- i += read;
- }
- fclose(file);
- *size_out = size;
- return chars;
-}
-
-
-Handle<Value> Shell::ReadBuffer(const Arguments& args) {
- ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
- String::Utf8Value filename(args[0]);
- int length;
- if (*filename == NULL) {
- return Throw("Error loading file");
- }
-
- uint8_t* data = reinterpret_cast<uint8_t*>(
- ReadChars(args.GetIsolate(), *filename, &length));
- if (data == NULL) {
- return Throw("Error reading file");
- }
- Isolate* isolate = args.GetIsolate();
- Handle<Object> buffer = Object::New();
- buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
- Persistent<Object> persistent_buffer =
- Persistent<Object>::New(isolate, buffer);
- persistent_buffer.MakeWeak(isolate, data, ExternalArrayWeakCallback);
- persistent_buffer.MarkIndependent(isolate);
- V8::AdjustAmountOfExternalAllocatedMemory(length);
-
- buffer->SetIndexedPropertiesToExternalArrayData(
- data, kExternalUnsignedByteArray, length);
- buffer->Set(Symbols::byteLength(isolate),
- Int32::New(static_cast<int32_t>(length), isolate), ReadOnly);
- return buffer;
-}
-
-
-#ifndef V8_SHARED
-static char* ReadToken(char* data, char token) {
- char* next = i::OS::StrChr(data, token);
- if (next != NULL) {
- *next = '\0';
- return (next + 1);
- }
-
- return NULL;
-}
-
-
-static char* ReadLine(char* data) {
- return ReadToken(data, '\n');
-}
-
-
-static char* ReadWord(char* data) {
- return ReadToken(data, ' ');
-}
-#endif // V8_SHARED
-
-
-// Reads a file into a v8 string.
-Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
- int size = 0;
- char* chars = ReadChars(isolate, name, &size);
- if (chars == NULL) return Handle<String>();
- Handle<String> result = String::New(chars, size);
- delete[] chars;
- return result;
-}
-
-
-void Shell::RunShell(Isolate* isolate) {
- Locker locker(isolate);
- Context::Scope context_scope(evaluation_context_);
- HandleScope outer_scope;
- Handle<String> name = String::New("(d8)");
- LineEditor* console = LineEditor::Get();
- printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
- console->Open();
- while (true) {
- HandleScope inner_scope;
- Handle<String> input = console->Prompt(Shell::kPrompt);
- if (input.IsEmpty()) break;
- ExecuteString(input, name, true, true);
- }
- printf("\n");
-}
-
-
-#ifndef V8_SHARED
-class ShellThread : public i::Thread {
- public:
- // Takes ownership of the underlying char array of |files|.
- ShellThread(Isolate* isolate, char* files)
- : Thread("d8:ShellThread"),
- isolate_(isolate), files_(files) { }
-
- ~ShellThread() {
- delete[] files_;
- }
-
- virtual void Run();
- private:
- Isolate* isolate_;
- char* files_;
-};
-
-
-void ShellThread::Run() {
- char* ptr = files_;
- while ((ptr != NULL) && (*ptr != '\0')) {
- // For each newline-separated line.
- char* next_line = ReadLine(ptr);
-
- if (*ptr == '#') {
- // Skip comment lines.
- ptr = next_line;
- continue;
- }
-
- // Prepare the context for this thread.
- Locker locker(isolate_);
- HandleScope outer_scope;
- Persistent<Context> thread_context =
- Shell::CreateEvaluationContext(isolate_);
- Context::Scope context_scope(thread_context);
-
- while ((ptr != NULL) && (*ptr != '\0')) {
- HandleScope inner_scope;
- char* filename = ptr;
- ptr = ReadWord(ptr);
-
- // Skip empty strings.
- if (strlen(filename) == 0) {
- continue;
- }
-
- Handle<String> str = Shell::ReadFile(isolate_, filename);
- if (str.IsEmpty()) {
- printf("File '%s' not found\n", filename);
- Shell::Exit(1);
- }
-
- Shell::ExecuteString(str, String::New(filename), false, false);
- }
-
- thread_context.Dispose(thread_context->GetIsolate());
- ptr = next_line;
- }
-}
-#endif // V8_SHARED
-
-
-SourceGroup::~SourceGroup() {
-#ifndef V8_SHARED
- delete next_semaphore_;
- next_semaphore_ = NULL;
- delete done_semaphore_;
- done_semaphore_ = NULL;
- delete thread_;
- thread_ = NULL;
-#endif // V8_SHARED
-}
-
-
-void SourceGroup::Execute(Isolate* isolate) {
- for (int i = begin_offset_; i < end_offset_; ++i) {
- const char* arg = argv_[i];
- if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
- // Execute argument given to -e option directly.
- HandleScope handle_scope;
- Handle<String> file_name = String::New("unnamed");
- Handle<String> source = String::New(argv_[i + 1]);
- if (!Shell::ExecuteString(source, file_name, false, true)) {
- Shell::Exit(1);
- }
- ++i;
- } else if (arg[0] == '-') {
- // Ignore other options. They have been parsed already.
- } else {
- // Use all other arguments as names of files to load and run.
- HandleScope handle_scope;
- Handle<String> file_name = String::New(arg);
- Handle<String> source = ReadFile(isolate, arg);
- if (source.IsEmpty()) {
- printf("Error reading '%s'\n", arg);
- Shell::Exit(1);
- }
- if (!Shell::ExecuteString(source, file_name, false, true)) {
- Shell::Exit(1);
- }
- }
- }
-}
-
-
-Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
- int size;
- char* chars = ReadChars(isolate, name, &size);
- if (chars == NULL) return Handle<String>();
- Handle<String> result = String::New(chars, size);
- delete[] chars;
- return result;
-}
-
-
-#ifndef V8_SHARED
-i::Thread::Options SourceGroup::GetThreadOptions() {
- // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
- // which is not enough to parse the big literal expressions used in tests.
- // The stack size should be at least StackGuard::kLimitSize + some
- // OS-specific padding for thread startup code. 2Mbytes seems to be enough.
- return i::Thread::Options("IsolateThread", 2 * MB);
-}
-
-
-void SourceGroup::ExecuteInThread() {
- Isolate* isolate = Isolate::New();
- do {
- if (next_semaphore_ != NULL) next_semaphore_->Wait();
- {
- Isolate::Scope iscope(isolate);
- Locker lock(isolate);
- HandleScope scope;
- Symbols symbols(isolate);
- Persistent<Context> context = Shell::CreateEvaluationContext(isolate);
- {
- Context::Scope cscope(context);
- Execute(isolate);
- }
- context.Dispose(isolate);
- if (Shell::options.send_idle_notification) {
- const int kLongIdlePauseInMs = 1000;
- V8::ContextDisposedNotification();
- V8::IdleNotification(kLongIdlePauseInMs);
- }
- }
- if (done_semaphore_ != NULL) done_semaphore_->Signal();
- } while (!Shell::options.last_run);
- isolate->Dispose();
-}
-
-
-void SourceGroup::StartExecuteInThread() {
- if (thread_ == NULL) {
- thread_ = new IsolateThread(this);
- thread_->Start();
- }
- next_semaphore_->Signal();
-}
-
-
-void SourceGroup::WaitForThread() {
- if (thread_ == NULL) return;
- if (Shell::options.last_run) {
- thread_->Join();
- } else {
- done_semaphore_->Wait();
- }
-}
-#endif // V8_SHARED
-
-
-bool Shell::SetOptions(int argc, char* argv[]) {
- for (int i = 0; i < argc; i++) {
- if (strcmp(argv[i], "--stress-opt") == 0) {
- options.stress_opt = true;
- argv[i] = NULL;
- } else if (strcmp(argv[i], "--stress-deopt") == 0) {
- options.stress_deopt = true;
- argv[i] = NULL;
- } else if (strcmp(argv[i], "--noalways-opt") == 0) {
- // No support for stressing if we can't use --always-opt.
- options.stress_opt = false;
- options.stress_deopt = false;
- } else if (strcmp(argv[i], "--shell") == 0) {
- options.interactive_shell = true;
- argv[i] = NULL;
- } else if (strcmp(argv[i], "--test") == 0) {
- options.test_shell = true;
- argv[i] = NULL;
- } else if (strcmp(argv[i], "--send-idle-notification") == 0) {
- options.send_idle_notification = true;
- argv[i] = NULL;
- } else if (strcmp(argv[i], "--preemption") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- options.use_preemption = true;
- argv[i] = NULL;
-#endif // V8_SHARED
- } else if (strcmp(argv[i], "--nopreemption") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- options.use_preemption = false;
- argv[i] = NULL;
-#endif // V8_SHARED
- } else if (strcmp(argv[i], "--preemption-interval") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- if (++i < argc) {
- argv[i-1] = NULL;
- char* end = NULL;
- options.preemption_interval = strtol(argv[i], &end, 10); // NOLINT
- if (options.preemption_interval <= 0
- || *end != '\0'
- || errno == ERANGE) {
- printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
- return false;
- }
- argv[i] = NULL;
- } else {
- printf("Missing value for --preemption-interval\n");
- return false;
- }
-#endif // V8_SHARED
- } else if (strcmp(argv[i], "-f") == 0) {
- // Ignore any -f flags for compatibility with other stand-alone
- // JavaScript engines.
- continue;
- } else if (strcmp(argv[i], "--isolate") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#endif // V8_SHARED
- options.num_isolates++;
- } else if (strcmp(argv[i], "-p") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- options.num_parallel_files++;
-#endif // V8_SHARED
- }
-#ifdef V8_SHARED
- else if (strcmp(argv[i], "--dump-counters") == 0) {
- printf("D8 with shared library does not include counters\n");
- return false;
- } else if (strcmp(argv[i], "--debugger") == 0) {
- printf("Javascript debugger not included\n");
- return false;
- }
-#endif // V8_SHARED
- }
-
-#ifndef V8_SHARED
- // Run parallel threads if we are not using --isolate
- options.parallel_files = new char*[options.num_parallel_files];
- int parallel_files_set = 0;
- for (int i = 1; i < argc; i++) {
- if (argv[i] == NULL) continue;
- if (strcmp(argv[i], "-p") == 0 && i + 1 < argc) {
- if (options.num_isolates > 1) {
- printf("-p is not compatible with --isolate\n");
- return false;
- }
- argv[i] = NULL;
- i++;
- options.parallel_files[parallel_files_set] = argv[i];
- parallel_files_set++;
- argv[i] = NULL;
- }
- }
- if (parallel_files_set != options.num_parallel_files) {
- printf("-p requires a file containing a list of files as parameter\n");
- return false;
- }
-#endif // V8_SHARED
-
- v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
-
- // Set up isolated source groups.
- options.isolate_sources = new SourceGroup[options.num_isolates];
- SourceGroup* current = options.isolate_sources;
- current->Begin(argv, 1);
- for (int i = 1; i < argc; i++) {
- const char* str = argv[i];
- if (strcmp(str, "--isolate") == 0) {
- current->End(i);
- current++;
- current->Begin(argv, i + 1);
- } else if (strncmp(argv[i], "--", 2) == 0) {
- printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]);
- }
- }
- current->End(argc);
-
- return true;
-}
-
-
-int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
-#ifndef V8_SHARED
- i::List<i::Thread*> threads(1);
- if (options.parallel_files != NULL) {
- for (int i = 0; i < options.num_parallel_files; i++) {
- char* files = NULL;
- { Locker lock(isolate);
- int size = 0;
- files = ReadChars(isolate, options.parallel_files[i], &size);
- }
- if (files == NULL) {
- printf("File list '%s' not found\n", options.parallel_files[i]);
- Exit(1);
- }
- ShellThread* thread = new ShellThread(isolate, files);
- thread->Start();
- threads.Add(thread);
- }
- }
- for (int i = 1; i < options.num_isolates; ++i) {
- options.isolate_sources[i].StartExecuteInThread();
- }
-#endif // V8_SHARED
- { // NOLINT
- Locker lock(isolate);
- HandleScope scope;
- Persistent<Context> context = CreateEvaluationContext(isolate);
- if (options.last_run) {
- // Keep using the same context in the interactive shell.
- evaluation_context_ = context;
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // If the interactive debugger is enabled make sure to activate
- // it before running the files passed on the command line.
- if (i::FLAG_debugger) {
- InstallUtilityScript(isolate);
- }
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- }
- {
- Context::Scope cscope(context);
- options.isolate_sources[0].Execute(isolate);
- }
- if (!options.last_run) {
- context.Dispose(isolate);
- if (options.send_idle_notification) {
- const int kLongIdlePauseInMs = 1000;
- V8::ContextDisposedNotification();
- V8::IdleNotification(kLongIdlePauseInMs);
- }
- }
-
-#ifndef V8_SHARED
- // Start preemption if threads have been created and preemption is enabled.
- if (threads.length() > 0
- && options.use_preemption) {
- Locker::StartPreemption(options.preemption_interval);
- }
-#endif // V8_SHARED
- }
-
-#ifndef V8_SHARED
- for (int i = 1; i < options.num_isolates; ++i) {
- options.isolate_sources[i].WaitForThread();
- }
-
- for (int i = 0; i < threads.length(); i++) {
- i::Thread* thread = threads[i];
- thread->Join();
- delete thread;
- }
-
- if (threads.length() > 0 && options.use_preemption) {
- Locker lock(isolate);
- Locker::StopPreemption();
- }
-#endif // V8_SHARED
- return 0;
-}
-
-
-int Shell::Main(int argc, char* argv[]) {
- if (!SetOptions(argc, argv)) return 1;
- int result = 0;
- Isolate* isolate = Isolate::GetCurrent();
- DumbLineEditor dumb_line_editor(isolate);
- {
- Initialize(isolate);
- Symbols symbols(isolate);
- InitializeDebugger(isolate);
-
- if (options.stress_opt || options.stress_deopt) {
- Testing::SetStressRunType(options.stress_opt
- ? Testing::kStressTypeOpt
- : Testing::kStressTypeDeopt);
- int stress_runs = Testing::GetStressRuns();
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
- Testing::PrepareStressRun(i);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(isolate, argc, argv);
- }
- printf("======== Full Deoptimization =======\n");
- Testing::DeoptimizeAll();
-#if !defined(V8_SHARED)
- } else if (i::FLAG_stress_runs > 0) {
- int stress_runs = i::FLAG_stress_runs;
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Run %d/%d ============\n", i + 1, stress_runs);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(isolate, argc, argv);
- }
-#endif
- } else {
- result = RunMain(isolate, argc, argv);
- }
-
-
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // Run remote debugger if requested, but never on --test
- if (i::FLAG_remote_debugger && !options.test_shell) {
- InstallUtilityScript(isolate);
- RunRemoteDebugger(i::FLAG_debugger_port);
- return 0;
- }
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
-
- // Run interactive shell if explicitly requested or if no script has been
- // executed, but never on --test
-
- if (( options.interactive_shell || !options.script_executed )
- && !options.test_shell ) {
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- if (!i::FLAG_debugger) {
- InstallUtilityScript(isolate);
- }
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- RunShell(isolate);
- }
- }
- V8::Dispose();
-
- OnExit();
-
- return result;
-}
-
-} // namespace v8
-
-
-#ifndef GOOGLE3
-int main(int argc, char* argv[]) {
- return v8::Shell::Main(argc, argv);
-}
-#endif
diff --git a/src/3rdparty/v8/src/d8.gyp b/src/3rdparty/v8/src/d8.gyp
deleted file mode 100644
index cce8f2a..0000000
--- a/src/3rdparty/v8/src/d8.gyp
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'includes': ['../build/common.gypi'],
- 'variables': {
- 'console%': '',
- },
- 'targets': [
- {
- 'target_name': 'd8',
- 'type': 'executable',
- 'dependencies': [
- '../tools/gyp/v8.gyp:v8',
- ],
- # Generated source files need this explicitly:
- 'include_dirs+': [
- '../src',
- ],
- 'sources': [
- 'd8.cc',
- ],
- 'conditions': [
- [ 'console=="readline"', {
- 'libraries': [ '-lreadline', ],
- 'sources': [ 'd8-readline.cc' ],
- }],
- [ 'component!="shared_library"', {
- 'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ],
- 'conditions': [
- [ 'want_separate_host_toolset==1', {
- 'dependencies': [
- 'd8_js2c#host',
- ],
- }, {
- 'dependencies': [
- 'd8_js2c',
- ],
- }],
- ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
- or OS=="openbsd" or OS=="solaris" or OS=="android")', {
- 'sources': [ 'd8-posix.cc', ]
- }],
- [ 'OS=="win"', {
- 'sources': [ 'd8-windows.cc', ]
- }],
- ],
- }],
- ],
- },
- {
- 'target_name': 'd8_js2c',
- 'type': 'none',
- 'variables': {
- 'js_files': [
- 'd8.js',
- 'macros.py',
- ],
- },
- 'conditions': [
- [ 'want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
- 'actions': [
- {
- 'action_name': 'd8_js2c',
- 'inputs': [
- '../tools/js2c.py',
- '<@(js_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
- ],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<@(_outputs)',
- 'D8',
- 'off', # compress startup data
- '<@(js_files)'
- ],
- },
- ],
- }
- ],
-}
diff --git a/src/3rdparty/v8/src/d8.h b/src/3rdparty/v8/src/d8.h
deleted file mode 100644
index f3b3fa1..0000000
--- a/src/3rdparty/v8/src/d8.h
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_D8_H_
-#define V8_D8_H_
-
-#ifndef V8_SHARED
-#include "allocation.h"
-#include "hashmap.h"
-#include "smart-pointers.h"
-#include "v8.h"
-#else
-#include "../include/v8.h"
-#endif // V8_SHARED
-
-namespace v8 {
-
-
-#ifndef V8_SHARED
-// A single counter in a counter collection.
-class Counter {
- public:
- static const int kMaxNameSize = 64;
- int32_t* Bind(const char* name, bool histogram);
- int32_t* ptr() { return &count_; }
- int32_t count() { return count_; }
- int32_t sample_total() { return sample_total_; }
- bool is_histogram() { return is_histogram_; }
- void AddSample(int32_t sample);
- private:
- int32_t count_;
- int32_t sample_total_;
- bool is_histogram_;
- uint8_t name_[kMaxNameSize];
-};
-
-
-// A set of counters and associated information. An instance of this
-// class is stored directly in the memory-mapped counters file if
-// the --map-counters options is used
-class CounterCollection {
- public:
- CounterCollection();
- Counter* GetNextCounter();
- private:
- static const unsigned kMaxCounters = 512;
- uint32_t magic_number_;
- uint32_t max_counters_;
- uint32_t max_name_size_;
- uint32_t counters_in_use_;
- Counter counters_[kMaxCounters];
-};
-
-
-class CounterMap {
- public:
- CounterMap(): hash_map_(Match) { }
- Counter* Lookup(const char* name) {
- i::HashMap::Entry* answer = hash_map_.Lookup(
- const_cast<char*>(name),
- Hash(name),
- false);
- if (!answer) return NULL;
- return reinterpret_cast<Counter*>(answer->value);
- }
- void Set(const char* name, Counter* value) {
- i::HashMap::Entry* answer = hash_map_.Lookup(
- const_cast<char*>(name),
- Hash(name),
- true);
- ASSERT(answer != NULL);
- answer->value = value;
- }
- class Iterator {
- public:
- explicit Iterator(CounterMap* map)
- : map_(&map->hash_map_), entry_(map_->Start()) { }
- void Next() { entry_ = map_->Next(entry_); }
- bool More() { return entry_ != NULL; }
- const char* CurrentKey() { return static_cast<const char*>(entry_->key); }
- Counter* CurrentValue() { return static_cast<Counter*>(entry_->value); }
- private:
- i::HashMap* map_;
- i::HashMap::Entry* entry_;
- };
-
- private:
- static int Hash(const char* name);
- static bool Match(void* key1, void* key2);
- i::HashMap hash_map_;
-};
-#endif // V8_SHARED
-
-
-class LineEditor {
- public:
- enum Type { DUMB = 0, READLINE = 1 };
- LineEditor(Type type, const char* name);
- virtual ~LineEditor() { }
-
- virtual Handle<String> Prompt(const char* prompt) = 0;
- virtual bool Open() { return true; }
- virtual bool Close() { return true; }
- virtual void AddHistory(const char* str) { }
-
- const char* name() { return name_; }
- static LineEditor* Get() { return current_; }
- private:
- Type type_;
- const char* name_;
- static LineEditor* current_;
-};
-
-
-class SourceGroup {
- public:
- SourceGroup() :
-#ifndef V8_SHARED
- next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
- done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
- thread_(NULL),
-#endif // V8_SHARED
- argv_(NULL),
- begin_offset_(0),
- end_offset_(0) {}
-
- ~SourceGroup();
-
- void Begin(char** argv, int offset) {
- argv_ = const_cast<const char**>(argv);
- begin_offset_ = offset;
- }
-
- void End(int offset) { end_offset_ = offset; }
-
- void Execute(Isolate* isolate);
-
-#ifndef V8_SHARED
- void StartExecuteInThread();
- void WaitForThread();
-
- private:
- class IsolateThread : public i::Thread {
- public:
- explicit IsolateThread(SourceGroup* group)
- : i::Thread(GetThreadOptions()), group_(group) {}
-
- virtual void Run() {
- group_->ExecuteInThread();
- }
-
- private:
- SourceGroup* group_;
- };
-
- static i::Thread::Options GetThreadOptions();
- void ExecuteInThread();
-
- i::Semaphore* next_semaphore_;
- i::Semaphore* done_semaphore_;
- i::Thread* thread_;
-#endif // V8_SHARED
-
- void ExitShell(int exit_code);
- Handle<String> ReadFile(Isolate* isolate, const char* name);
-
- const char** argv_;
- int begin_offset_;
- int end_offset_;
-};
-
-
-class BinaryResource : public v8::String::ExternalAsciiStringResource {
- public:
- BinaryResource(const char* string, int length)
- : data_(string),
- length_(length) { }
-
- ~BinaryResource() {
- delete[] data_;
- data_ = NULL;
- length_ = 0;
- }
-
- virtual const char* data() const { return data_; }
- virtual size_t length() const { return length_; }
-
- private:
- const char* data_;
- size_t length_;
-};
-
-
-class ShellOptions {
- public:
- ShellOptions() :
-#ifndef V8_SHARED
- use_preemption(true),
- preemption_interval(10),
- num_parallel_files(0),
- parallel_files(NULL),
-#endif // V8_SHARED
- script_executed(false),
- last_run(true),
- send_idle_notification(false),
- stress_opt(false),
- stress_deopt(false),
- interactive_shell(false),
- test_shell(false),
- num_isolates(1),
- isolate_sources(NULL) { }
-
- ~ShellOptions() {
-#ifndef V8_SHARED
- delete[] parallel_files;
-#endif // V8_SHARED
- delete[] isolate_sources;
- }
-
-#ifndef V8_SHARED
- bool use_preemption;
- int preemption_interval;
- int num_parallel_files;
- char** parallel_files;
-#endif // V8_SHARED
- bool script_executed;
- bool last_run;
- bool send_idle_notification;
- bool stress_opt;
- bool stress_deopt;
- bool interactive_shell;
- bool test_shell;
- int num_isolates;
- SourceGroup* isolate_sources;
-};
-
-#ifdef V8_SHARED
-class Shell {
-#else
-class Shell : public i::AllStatic {
-#endif // V8_SHARED
-
- public:
- static bool ExecuteString(Handle<String> source,
- Handle<Value> name,
- bool print_result,
- bool report_exceptions);
- static const char* ToCString(const v8::String::Utf8Value& value);
- static void ReportException(TryCatch* try_catch);
- static Handle<String> ReadFile(Isolate* isolate, const char* name);
- static Persistent<Context> CreateEvaluationContext(Isolate* isolate);
- static int RunMain(Isolate* isolate, int argc, char* argv[]);
- static int Main(int argc, char* argv[]);
- static void Exit(int exit_code);
- static void OnExit();
-
-#ifndef V8_SHARED
- static Handle<Array> GetCompletions(Handle<String> text,
- Handle<String> full);
- static int* LookupCounter(const char* name);
- static void* CreateHistogram(const char* name,
- int min,
- int max,
- size_t buckets);
- static void AddHistogramSample(void* histogram, int sample);
- static void MapCounters(const char* name);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static Handle<Object> DebugMessageDetails(Handle<String> message);
- static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
- static void DispatchDebugMessages();
-#endif // ENABLE_DEBUGGER_SUPPORT
-#endif // V8_SHARED
-
-#ifdef WIN32
-#undef Yield
-#endif
-
- static Handle<Value> Print(const Arguments& args);
- static Handle<Value> Write(const Arguments& args);
- static Handle<Value> Yield(const Arguments& args);
- static Handle<Value> Quit(const Arguments& args);
- static Handle<Value> Version(const Arguments& args);
- static Handle<Value> EnableProfiler(const Arguments& args);
- static Handle<Value> DisableProfiler(const Arguments& args);
- static Handle<Value> Read(const Arguments& args);
- static Handle<Value> ReadBuffer(const Arguments& args);
- static Handle<String> ReadFromStdin(Isolate* isolate);
- static Handle<Value> ReadLine(const Arguments& args) {
- return ReadFromStdin(args.GetIsolate());
- }
- static Handle<Value> Load(const Arguments& args);
- static Handle<Value> ArrayBuffer(const Arguments& args);
- static Handle<Value> Int8Array(const Arguments& args);
- static Handle<Value> Uint8Array(const Arguments& args);
- static Handle<Value> Int16Array(const Arguments& args);
- static Handle<Value> Uint16Array(const Arguments& args);
- static Handle<Value> Int32Array(const Arguments& args);
- static Handle<Value> Uint32Array(const Arguments& args);
- static Handle<Value> Float32Array(const Arguments& args);
- static Handle<Value> Float64Array(const Arguments& args);
- static Handle<Value> Uint8ClampedArray(const Arguments& args);
- static Handle<Value> ArrayBufferSlice(const Arguments& args);
- static Handle<Value> ArraySubArray(const Arguments& args);
- static Handle<Value> ArraySet(const Arguments& args);
- // The OS object on the global object contains methods for performing
- // operating system calls:
- //
- // os.system("program_name", ["arg1", "arg2", ...], timeout1, timeout2) will
- // run the command, passing the arguments to the program. The standard output
- // of the program will be picked up and returned as a multiline string. If
- // timeout1 is present then it should be a number. -1 indicates no timeout
- // and a positive number is used as a timeout in milliseconds that limits the
- // time spent waiting between receiving output characters from the program.
- // timeout2, if present, should be a number indicating the limit in
- // milliseconds on the total running time of the program. Exceptions are
- // thrown on timeouts or other errors or if the exit status of the program
- // indicates an error.
- //
- // os.chdir(dir) changes directory to the given directory. Throws an
- // exception/ on error.
- //
- // os.setenv(variable, value) sets an environment variable. Repeated calls to
- // this method leak memory due to the API of setenv in the standard C library.
- //
- // os.umask(alue) calls the umask system call and returns the old umask.
- //
- // os.mkdirp(name, mask) creates a directory. The mask (if present) is anded
- // with the current umask. Intermediate directories are created if necessary.
- // An exception is not thrown if the directory already exists. Analogous to
- // the "mkdir -p" command.
- static Handle<Value> OSObject(const Arguments& args);
- static Handle<Value> System(const Arguments& args);
- static Handle<Value> ChangeDirectory(const Arguments& args);
- static Handle<Value> SetEnvironment(const Arguments& args);
- static Handle<Value> UnsetEnvironment(const Arguments& args);
- static Handle<Value> SetUMask(const Arguments& args);
- static Handle<Value> MakeDirectory(const Arguments& args);
- static Handle<Value> RemoveDirectory(const Arguments& args);
-
- static void AddOSMethods(Handle<ObjectTemplate> os_template);
-
- static const char* kPrompt;
- static ShellOptions options;
-
- private:
- static Persistent<Context> evaluation_context_;
-#ifndef V8_SHARED
- static Persistent<Context> utility_context_;
- static CounterMap* counter_map_;
- // We statically allocate a set of local counters to be used if we
- // don't want to store the stats in a memory-mapped file
- static CounterCollection local_counters_;
- static CounterCollection* counters_;
- static i::OS::MemoryMappedFile* counters_file_;
- static i::Mutex* context_mutex_;
-
- static Counter* GetCounter(const char* name, bool is_histogram);
- static void InstallUtilityScript(Isolate* isolate);
-#endif // V8_SHARED
- static void Initialize(Isolate* isolate);
- static void InitializeDebugger(Isolate* isolate);
- static void RunShell(Isolate* isolate);
- static bool SetOptions(int argc, char* argv[]);
- static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
- static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
- static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
- static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
- Handle<Object> buffer,
- int32_t size);
- static Handle<Object> CreateExternalArray(Isolate* isolate,
- Handle<Object> array,
- Handle<Object> buffer,
- ExternalArrayType type,
- int32_t length,
- int32_t byteLength,
- int32_t byteOffset,
- int32_t element_size);
- static Handle<Value> CreateExternalArray(const Arguments& args,
- ExternalArrayType type,
- int32_t element_size);
- static void ExternalArrayWeakCallback(Isolate* isolate,
- Persistent<Value> object,
- void* data);
-};
-
-
-} // namespace v8
-
-
-#endif // V8_D8_H_
diff --git a/src/3rdparty/v8/src/d8.js b/src/3rdparty/v8/src/d8.js
deleted file mode 100644
index 3cb1819..0000000
--- a/src/3rdparty/v8/src/d8.js
+++ /dev/null
@@ -1,2196 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"use strict";
-
-String.prototype.startsWith = function (str) {
- if (str.length > this.length) {
- return false;
- }
- return this.substr(0, str.length) == str;
-};
-
-function log10(num) {
- return Math.log(num)/Math.log(10);
-}
-
-function ToInspectableObject(obj) {
- if (!obj && typeof obj === 'object') {
- return void 0;
- } else {
- return Object(obj);
- }
-}
-
-function GetCompletions(global, last, full) {
- var full_tokens = full.split();
- full = full_tokens.pop();
- var parts = full.split('.');
- parts.pop();
- var current = global;
- for (var i = 0; i < parts.length; i++) {
- var part = parts[i];
- var next = current[part];
- if (!next) {
- return [];
- }
- current = next;
- }
- var result = [];
- current = ToInspectableObject(current);
- while (typeof current !== 'undefined') {
- var mirror = new $debug.ObjectMirror(current);
- var properties = mirror.properties();
- for (var i = 0; i < properties.length; i++) {
- var name = properties[i].name();
- if (typeof name === 'string' && name.startsWith(last)) {
- result.push(name);
- }
- }
- current = ToInspectableObject(current.__proto__);
- }
- return result;
-}
-
-
-// Global object holding debugger related constants and state.
-var Debug = {};
-
-
-// Debug events which can occour in the V8 JavaScript engine. These originate
-// from the API include file v8-debug.h.
-Debug.DebugEvent = { Break: 1,
- Exception: 2,
- NewFunction: 3,
- BeforeCompile: 4,
- AfterCompile: 5 };
-
-
-// The different types of scripts matching enum ScriptType in objects.h.
-Debug.ScriptType = { Native: 0,
- Extension: 1,
- Normal: 2 };
-
-
-// The different types of script compilations matching enum
-// Script::CompilationType in objects.h.
-Debug.ScriptCompilationType = { Host: 0,
- Eval: 1,
- JSON: 2 };
-
-
-// The different types of scopes matching constants runtime.cc.
-Debug.ScopeType = { Global: 0,
- Local: 1,
- With: 2,
- Closure: 3,
- Catch: 4,
- Block: 5 };
-
-
-// Current debug state.
-var kNoFrame = -1;
-Debug.State = {
- currentFrame: kNoFrame,
- displaySourceStartLine: -1,
- displaySourceEndLine: -1,
- currentSourceLine: -1
-};
-var trace_compile = false; // Tracing all compile events?
-var trace_debug_json = false; // Tracing all debug json packets?
-var last_cmd = '';
-var repeat_cmd_line = '';
-var is_running = true;
-// Global variable used to store whether a handle was requested.
-var lookup_handle = null;
-
-// Copied from debug-delay.js. This is needed below:
-function ScriptTypeFlag(type) {
- return (1 << type);
-}
-
-
-// Process a debugger JSON message into a display text and a running status.
-// This function returns an object with properties "text" and "running" holding
-// this information.
-function DebugMessageDetails(message) {
- if (trace_debug_json) {
- print("received: '" + message + "'");
- }
- // Convert the JSON string to an object.
- var response = new ProtocolPackage(message);
- is_running = response.running();
-
- if (response.type() == 'event') {
- return DebugEventDetails(response);
- } else {
- return DebugResponseDetails(response);
- }
-}
-
-function DebugEventDetails(response) {
- var details = {text:'', running:false};
-
- // Get the running state.
- details.running = response.running();
-
- var body = response.body();
- var result = '';
- switch (response.event()) {
- case 'break':
- if (body.breakpoints) {
- result += 'breakpoint';
- if (body.breakpoints.length > 1) {
- result += 's';
- }
- result += ' #';
- for (var i = 0; i < body.breakpoints.length; i++) {
- if (i > 0) {
- result += ', #';
- }
- result += body.breakpoints[i];
- }
- } else {
- result += 'break';
- }
- result += ' in ';
- result += body.invocationText;
- result += ', ';
- result += SourceInfo(body);
- result += '\n';
- result += SourceUnderline(body.sourceLineText, body.sourceColumn);
- Debug.State.currentSourceLine = body.sourceLine;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- Debug.State.currentFrame = 0;
- details.text = result;
- break;
-
- case 'exception':
- if (body.uncaught) {
- result += 'Uncaught: ';
- } else {
- result += 'Exception: ';
- }
- result += '"';
- result += body.exception.text;
- result += '"';
- if (body.sourceLine >= 0) {
- result += ', ';
- result += SourceInfo(body);
- result += '\n';
- result += SourceUnderline(body.sourceLineText, body.sourceColumn);
- Debug.State.currentSourceLine = body.sourceLine;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- Debug.State.currentFrame = 0;
- } else {
- result += ' (empty stack)';
- Debug.State.currentSourceLine = -1;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- Debug.State.currentFrame = kNoFrame;
- }
- details.text = result;
- break;
-
- case 'afterCompile':
- if (trace_compile) {
- result = 'Source ' + body.script.name + ' compiled:\n';
- var source = body.script.source;
- if (!(source[source.length - 1] == '\n')) {
- result += source;
- } else {
- result += source.substring(0, source.length - 1);
- }
- }
- details.text = result;
- break;
-
- case 'scriptCollected':
- details.text = result;
- break;
-
- default:
- details.text = 'Unknown debug event ' + response.event();
- }
-
- return details;
-}
-
-
-function SourceInfo(body) {
- var result = '';
-
- if (body.script) {
- if (body.script.name) {
- result += body.script.name;
- } else {
- result += '[unnamed]';
- }
- }
- result += ' line ';
- result += body.sourceLine + 1;
- result += ' column ';
- result += body.sourceColumn + 1;
-
- return result;
-}
-
-
-function SourceUnderline(source_text, position) {
- if (!source_text) {
- return;
- }
-
- // Create an underline with a caret pointing to the source position. If the
- // source contains a tab character the underline will have a tab character in
- // the same place otherwise the underline will have a space character.
- var underline = '';
- for (var i = 0; i < position; i++) {
- if (source_text[i] == '\t') {
- underline += '\t';
- } else {
- underline += ' ';
- }
- }
- underline += '^';
-
- // Return the source line text with the underline beneath.
- return source_text + '\n' + underline;
-}
-
-
-// Converts a text command to a JSON request.
-function DebugCommandToJSONRequest(cmd_line) {
- var result = new DebugRequest(cmd_line).JSONRequest();
- if (trace_debug_json && result) {
- print("sending: '" + result + "'");
- }
- return result;
-}
-
-
-function DebugRequest(cmd_line) {
- // If the very first character is a { assume that a JSON request have been
- // entered as a command. Converting that to a JSON request is trivial.
- if (cmd_line && cmd_line.length > 0 && cmd_line.charAt(0) == '{') {
- this.request_ = cmd_line;
- return;
- }
-
- // Check for a simple carriage return to repeat the last command:
- var is_repeating = false;
- if (cmd_line == '\n') {
- if (is_running) {
- cmd_line = 'break'; // Not in debugger mode, break with a frame request.
- } else {
- cmd_line = repeat_cmd_line; // use command to repeat.
- is_repeating = true;
- }
- }
- if (!is_running) { // Only save the command if in debugger mode.
- repeat_cmd_line = cmd_line; // save last command.
- }
-
- // Trim string for leading and trailing whitespace.
- cmd_line = cmd_line.replace(/^\s+|\s+$/g, '');
-
- // Find the command.
- var pos = cmd_line.indexOf(' ');
- var cmd;
- var args;
- if (pos == -1) {
- cmd = cmd_line;
- args = '';
- } else {
- cmd = cmd_line.slice(0, pos);
- args = cmd_line.slice(pos).replace(/^\s+|\s+$/g, '');
- }
-
- if ((cmd === undefined) || !cmd) {
- this.request_ = void 0;
- return;
- }
-
- last_cmd = cmd;
-
- // Switch on command.
- switch (cmd) {
- case 'continue':
- case 'c':
- this.request_ = this.continueCommandToJSONRequest_(args);
- break;
-
- case 'step':
- case 's':
- this.request_ = this.stepCommandToJSONRequest_(args, 'in');
- break;
-
- case 'stepi':
- case 'si':
- this.request_ = this.stepCommandToJSONRequest_(args, 'min');
- break;
-
- case 'next':
- case 'n':
- this.request_ = this.stepCommandToJSONRequest_(args, 'next');
- break;
-
- case 'finish':
- case 'fin':
- this.request_ = this.stepCommandToJSONRequest_(args, 'out');
- break;
-
- case 'backtrace':
- case 'bt':
- this.request_ = this.backtraceCommandToJSONRequest_(args);
- break;
-
- case 'frame':
- case 'f':
- this.request_ = this.frameCommandToJSONRequest_(args);
- break;
-
- case 'scopes':
- this.request_ = this.scopesCommandToJSONRequest_(args);
- break;
-
- case 'scope':
- this.request_ = this.scopeCommandToJSONRequest_(args);
- break;
-
- case 'disconnect':
- case 'exit':
- case 'quit':
- this.request_ = this.disconnectCommandToJSONRequest_(args);
- break;
-
- case 'up':
- this.request_ =
- this.frameCommandToJSONRequest_('' +
- (Debug.State.currentFrame + 1));
- break;
-
- case 'down':
- case 'do':
- this.request_ =
- this.frameCommandToJSONRequest_('' +
- (Debug.State.currentFrame - 1));
- break;
-
- case 'set':
- case 'print':
- case 'p':
- this.request_ = this.printCommandToJSONRequest_(args);
- break;
-
- case 'dir':
- this.request_ = this.dirCommandToJSONRequest_(args);
- break;
-
- case 'references':
- this.request_ = this.referencesCommandToJSONRequest_(args);
- break;
-
- case 'instances':
- this.request_ = this.instancesCommandToJSONRequest_(args);
- break;
-
- case 'list':
- case 'l':
- this.request_ = this.listCommandToJSONRequest_(args);
- break;
- case 'source':
- this.request_ = this.sourceCommandToJSONRequest_(args);
- break;
-
- case 'scripts':
- case 'script':
- case 'scr':
- this.request_ = this.scriptsCommandToJSONRequest_(args);
- break;
-
- case 'break':
- case 'b':
- this.request_ = this.breakCommandToJSONRequest_(args);
- break;
-
- case 'breakpoints':
- case 'bb':
- this.request_ = this.breakpointsCommandToJSONRequest_(args);
- break;
-
- case 'clear':
- case 'delete':
- case 'd':
- this.request_ = this.clearCommandToJSONRequest_(args);
- break;
-
- case 'threads':
- this.request_ = this.threadsCommandToJSONRequest_(args);
- break;
-
- case 'cond':
- this.request_ = this.changeBreakpointCommandToJSONRequest_(args, 'cond');
- break;
-
- case 'enable':
- case 'en':
- this.request_ =
- this.changeBreakpointCommandToJSONRequest_(args, 'enable');
- break;
-
- case 'disable':
- case 'dis':
- this.request_ =
- this.changeBreakpointCommandToJSONRequest_(args, 'disable');
- break;
-
- case 'ignore':
- this.request_ =
- this.changeBreakpointCommandToJSONRequest_(args, 'ignore');
- break;
-
- case 'info':
- case 'inf':
- this.request_ = this.infoCommandToJSONRequest_(args);
- break;
-
- case 'flags':
- this.request_ = this.v8FlagsToJSONRequest_(args);
- break;
-
- case 'gc':
- this.request_ = this.gcToJSONRequest_(args);
- break;
-
- case 'trace':
- case 'tr':
- // Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
- this.traceCommand_(args);
- break;
-
- case 'help':
- case '?':
- this.helpCommand_(args);
- // Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
- break;
-
- default:
- throw new Error('Unknown command "' + cmd + '"');
- }
-}
-
-DebugRequest.prototype.JSONRequest = function() {
- return this.request_;
-};
-
-
-function RequestPacket(command) {
- this.seq = 0;
- this.type = 'request';
- this.command = command;
-}
-
-
-RequestPacket.prototype.toJSONProtocol = function() {
- // Encode the protocol header.
- var json = '{';
- json += '"seq":' + this.seq;
- json += ',"type":"' + this.type + '"';
- if (this.command) {
- json += ',"command":' + StringToJSON_(this.command);
- }
- if (this.arguments) {
- json += ',"arguments":';
- // Encode the arguments part.
- if (this.arguments.toJSONProtocol) {
- json += this.arguments.toJSONProtocol();
- } else {
- json += SimpleObjectToJSON_(this.arguments);
- }
- }
- json += '}';
- return json;
-};
-
-
-DebugRequest.prototype.createRequest = function(command) {
- return new RequestPacket(command);
-};
-
-
-// Create a JSON request for the evaluation command.
-DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
- lookup_handle = null;
-
- // Check if the expression is a handle id in the form #<handle>#.
- var handle_match = expression.match(/^#([0-9]*)#$/);
- if (handle_match) {
- // Remember the handle requested in a global variable.
- lookup_handle = parseInt(handle_match[1]);
- // Build a lookup request.
- var request = this.createRequest('lookup');
- request.arguments = {};
- request.arguments.handles = [ lookup_handle ];
- return request.toJSONProtocol();
- } else {
- // Build an evaluate request.
- var request = this.createRequest('evaluate');
- request.arguments = {};
- request.arguments.expression = expression;
- // Request a global evaluation if there is no current frame.
- if (Debug.State.currentFrame == kNoFrame) {
- request.arguments.global = true;
- }
- return request.toJSONProtocol();
- }
-};
-
-
-// Create a JSON request for the references/instances command.
-DebugRequest.prototype.makeReferencesJSONRequest_ = function(handle, type) {
- // Build a references request.
- var handle_match = handle.match(/^#([0-9]*)#$/);
- if (handle_match) {
- var request = this.createRequest('references');
- request.arguments = {};
- request.arguments.type = type;
- request.arguments.handle = parseInt(handle_match[1]);
- return request.toJSONProtocol();
- } else {
- throw new Error('Invalid object id.');
- }
-};
-
-
-// Create a JSON request for the continue command.
-DebugRequest.prototype.continueCommandToJSONRequest_ = function(args) {
- var request = this.createRequest('continue');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the step command.
-DebugRequest.prototype.stepCommandToJSONRequest_ = function(args, type) {
- // Requesting a step is through the continue command with additional
- // arguments.
- var request = this.createRequest('continue');
- request.arguments = {};
-
- // Process arguments if any.
-
- // Only process args if the command is 'step' which is indicated by type being
- // set to 'in'. For all other commands, ignore the args.
- if (args && args.length > 0) {
- args = args.split(/\s+/g);
-
- if (args.length > 2) {
- throw new Error('Invalid step arguments.');
- }
-
- if (args.length > 0) {
- // Check if we have a gdb stype step command. If so, the 1st arg would
- // be the step count. If it's not a number, then assume that we're
- // parsing for the legacy v8 step command.
- var stepcount = Number(args[0]);
- if (stepcount == Number.NaN) {
- // No step count at arg 1. Process as legacy d8 step command:
- if (args.length == 2) {
- var stepcount = parseInt(args[1]);
- if (isNaN(stepcount) || stepcount <= 0) {
- throw new Error('Invalid step count argument "' + args[0] + '".');
- }
- request.arguments.stepcount = stepcount;
- }
-
- // Get the step action.
- switch (args[0]) {
- case 'in':
- case 'i':
- request.arguments.stepaction = 'in';
- break;
-
- case 'min':
- case 'm':
- request.arguments.stepaction = 'min';
- break;
-
- case 'next':
- case 'n':
- request.arguments.stepaction = 'next';
- break;
-
- case 'out':
- case 'o':
- request.arguments.stepaction = 'out';
- break;
-
- default:
- throw new Error('Invalid step argument "' + args[0] + '".');
- }
-
- } else {
- // gdb style step commands:
- request.arguments.stepaction = type;
- request.arguments.stepcount = stepcount;
- }
- }
- } else {
- // Default is step of the specified type.
- request.arguments.stepaction = type;
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the backtrace command.
-DebugRequest.prototype.backtraceCommandToJSONRequest_ = function(args) {
- // Build a backtrace request from the text command.
- var request = this.createRequest('backtrace');
-
- // Default is to show top 10 frames.
- request.arguments = {};
- request.arguments.fromFrame = 0;
- request.arguments.toFrame = 10;
-
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length == 1 && args[0].length > 0) {
- var frameCount = parseInt(args[0]);
- if (frameCount > 0) {
- // Show top frames.
- request.arguments.fromFrame = 0;
- request.arguments.toFrame = frameCount;
- } else {
- // Show bottom frames.
- request.arguments.fromFrame = 0;
- request.arguments.toFrame = -frameCount;
- request.arguments.bottom = true;
- }
- } else if (args.length == 2) {
- var fromFrame = parseInt(args[0]);
- var toFrame = parseInt(args[1]);
- if (isNaN(fromFrame) || fromFrame < 0) {
- throw new Error('Invalid start frame argument "' + args[0] + '".');
- }
- if (isNaN(toFrame) || toFrame < 0) {
- throw new Error('Invalid end frame argument "' + args[1] + '".');
- }
- if (fromFrame > toFrame) {
- throw new Error('Invalid arguments start frame cannot be larger ' +
- 'than end frame.');
- }
- // Show frame range.
- request.arguments.fromFrame = fromFrame;
- request.arguments.toFrame = toFrame + 1;
- } else if (args.length > 2) {
- throw new Error('Invalid backtrace arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the frame command.
-DebugRequest.prototype.frameCommandToJSONRequest_ = function(args) {
- // Build a frame request from the text command.
- var request = this.createRequest('frame');
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length > 0 && args[0].length > 0) {
- request.arguments = {};
- request.arguments.number = args[0];
- }
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the scopes command.
-DebugRequest.prototype.scopesCommandToJSONRequest_ = function(args) {
- // Build a scopes request from the text command.
- var request = this.createRequest('scopes');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the scope command.
-DebugRequest.prototype.scopeCommandToJSONRequest_ = function(args) {
- // Build a scope request from the text command.
- var request = this.createRequest('scope');
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length > 0 && args[0].length > 0) {
- request.arguments = {};
- request.arguments.number = args[0];
- }
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the print command.
-DebugRequest.prototype.printCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing expression.');
- }
- return this.makeEvaluateJSONRequest_(args);
-};
-
-
-// Create a JSON request for the dir command.
-DebugRequest.prototype.dirCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing expression.');
- }
- return this.makeEvaluateJSONRequest_(args);
-};
-
-
-// Create a JSON request for the references command.
-DebugRequest.prototype.referencesCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing object id.');
- }
-
- return this.makeReferencesJSONRequest_(args, 'referencedBy');
-};
-
-
-// Create a JSON request for the instances command.
-DebugRequest.prototype.instancesCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing object id.');
- }
-
- // Build a references request.
- return this.makeReferencesJSONRequest_(args, 'constructedBy');
-};
-
-
-// Create a JSON request for the list command.
-DebugRequest.prototype.listCommandToJSONRequest_ = function(args) {
-
- // Default is ten lines starting five lines before the current location.
- if (Debug.State.displaySourceEndLine == -1) {
- // If we list forwards, we will start listing after the last source end
- // line. Set it to start from 5 lines before the current location.
- Debug.State.displaySourceEndLine = Debug.State.currentSourceLine - 5;
- // If we list backwards, we will start listing backwards from the last
- // source start line. Set it to start from 1 lines before the current
- // location.
- Debug.State.displaySourceStartLine = Debug.State.currentSourceLine + 1;
- }
-
- var from = Debug.State.displaySourceEndLine + 1;
- var lines = 10;
-
- // Parse the arguments.
- args = args.split(/\s*,\s*/g);
- if (args == '') {
- } else if ((args.length == 1) && (args[0] == '-')) {
- from = Debug.State.displaySourceStartLine - lines;
- } else if (args.length == 2) {
- from = parseInt(args[0]);
- lines = parseInt(args[1]) - from + 1; // inclusive of the ending line.
- } else {
- throw new Error('Invalid list arguments.');
- }
- Debug.State.displaySourceStartLine = from;
- Debug.State.displaySourceEndLine = from + lines - 1;
- var sourceArgs = '' + from + ' ' + lines;
- return this.sourceCommandToJSONRequest_(sourceArgs);
-};
-
-
-// Create a JSON request for the source command.
-DebugRequest.prototype.sourceCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- var request = this.createRequest('source');
-
- // Default is ten lines starting five lines before the current location.
- var from = Debug.State.currentSourceLine - 5;
- var lines = 10;
-
- // Parse the arguments.
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length > 1 && args[0].length > 0 && args[1].length > 0) {
- from = parseInt(args[0]) - 1;
- lines = parseInt(args[1]);
- } else if (args.length > 0 && args[0].length > 0) {
- from = parseInt(args[0]) - 1;
- }
-
- if (from < 0) from = 0;
- if (lines < 0) lines = 10;
-
- // Request source arround current source location.
- request.arguments = {};
- request.arguments.fromLine = from;
- request.arguments.toLine = from + lines;
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the scripts command.
-DebugRequest.prototype.scriptsCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- var request = this.createRequest('scripts');
-
- // Process arguments if any.
- if (args && args.length > 0) {
- args = args.split(/\s*[ ]+\s*/g);
-
- if (args.length > 1) {
- throw new Error('Invalid scripts arguments.');
- }
-
- request.arguments = {};
- switch (args[0]) {
- case 'natives':
- request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Native);
- break;
-
- case 'extensions':
- request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Extension);
- break;
-
- case 'all':
- request.arguments.types =
- ScriptTypeFlag(Debug.ScriptType.Normal) |
- ScriptTypeFlag(Debug.ScriptType.Native) |
- ScriptTypeFlag(Debug.ScriptType.Extension);
- break;
-
- default:
- // If the arg is not one of the know one aboves, then it must be a
- // filter used for filtering the results:
- request.arguments.filter = args[0];
- break;
- }
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the break command.
-DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- // Process arguments if any.
- if (args && args.length > 0) {
- var target = args;
- var type = 'function';
- var line;
- var column;
- var condition;
- var pos;
-
- var request = this.createRequest('setbreakpoint');
-
- // Break the args into target spec and condition if appropriate.
-
- // Check for breakpoint condition.
- pos = args.indexOf(' ');
- if (pos > 0) {
- target = args.substring(0, pos);
- condition = args.substring(pos + 1, args.length);
- }
-
- // Check for script breakpoint (name:line[:column]). If no ':' in break
- // specification it is considered a function break point.
- pos = target.indexOf(':');
- if (pos > 0) {
- var tmp = target.substring(pos + 1, target.length);
- target = target.substring(0, pos);
- if (target[0] == '/' && target[target.length - 1] == '/') {
- type = 'scriptRegExp';
- target = target.substring(1, target.length - 1);
- } else {
- type = 'script';
- }
-
- // Check for both line and column.
- pos = tmp.indexOf(':');
- if (pos > 0) {
- column = parseInt(tmp.substring(pos + 1, tmp.length)) - 1;
- line = parseInt(tmp.substring(0, pos)) - 1;
- } else {
- line = parseInt(tmp) - 1;
- }
- } else if (target[0] == '#' && target[target.length - 1] == '#') {
- type = 'handle';
- target = target.substring(1, target.length - 1);
- } else {
- type = 'function';
- }
-
- request.arguments = {};
- request.arguments.type = type;
- request.arguments.target = target;
- request.arguments.line = line;
- request.arguments.column = column;
- request.arguments.condition = condition;
- } else {
- var request = this.createRequest('suspend');
- }
-
- return request.toJSONProtocol();
-};
-
-
-DebugRequest.prototype.breakpointsCommandToJSONRequest_ = function(args) {
- if (args && args.length > 0) {
- throw new Error('Unexpected arguments.');
- }
- var request = this.createRequest('listbreakpoints');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the clear command.
-DebugRequest.prototype.clearCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- var request = this.createRequest('clearbreakpoint');
-
- // Process arguments if any.
- if (args && args.length > 0) {
- request.arguments = {};
- request.arguments.breakpoint = parseInt(args);
- } else {
- throw new Error('Invalid break arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the change breakpoint command.
-DebugRequest.prototype.changeBreakpointCommandToJSONRequest_ =
- function(args, command) {
-
- var request;
-
- // Check for exception breaks first:
- // en[able] exc[eptions] [all|unc[aught]]
- // en[able] [all|unc[aught]] exc[eptions]
- // dis[able] exc[eptions] [all|unc[aught]]
- // dis[able] [all|unc[aught]] exc[eptions]
- if ((command == 'enable' || command == 'disable') &&
- args && args.length > 1) {
- var nextPos = args.indexOf(' ');
- var arg1 = (nextPos > 0) ? args.substring(0, nextPos) : args;
- var excType = null;
-
- // Check for:
- // en[able] exc[eptions] [all|unc[aught]]
- // dis[able] exc[eptions] [all|unc[aught]]
- if (arg1 == 'exc' || arg1 == 'exception' || arg1 == 'exceptions') {
-
- var arg2 = (nextPos > 0) ?
- args.substring(nextPos + 1, args.length) : 'all';
- if (!arg2) {
- arg2 = 'all'; // if unspecified, set for all.
- } if (arg2 == 'unc') { // check for short cut.
- arg2 = 'uncaught';
- }
- excType = arg2;
-
- // Check for:
- // en[able] [all|unc[aught]] exc[eptions]
- // dis[able] [all|unc[aught]] exc[eptions]
- } else if (arg1 == 'all' || arg1 == 'unc' || arg1 == 'uncaught') {
-
- var arg2 = (nextPos > 0) ?
- args.substring(nextPos + 1, args.length) : null;
- if (arg2 == 'exc' || arg1 == 'exception' || arg1 == 'exceptions') {
- excType = arg1;
- if (excType == 'unc') {
- excType = 'uncaught';
- }
- }
- }
-
- // If we matched one of the command formats, then excType will be non-null:
- if (excType) {
- // Build a evaluate request from the text command.
- request = this.createRequest('setexceptionbreak');
-
- request.arguments = {};
- request.arguments.type = excType;
- request.arguments.enabled = (command == 'enable');
-
- return request.toJSONProtocol();
- }
- }
-
- // Build a evaluate request from the text command.
- request = this.createRequest('changebreakpoint');
-
- // Process arguments if any.
- if (args && args.length > 0) {
- request.arguments = {};
- var pos = args.indexOf(' ');
- var breakpointArg = args;
- var otherArgs;
- if (pos > 0) {
- breakpointArg = args.substring(0, pos);
- otherArgs = args.substring(pos + 1, args.length);
- }
-
- request.arguments.breakpoint = parseInt(breakpointArg);
-
- switch(command) {
- case 'cond':
- request.arguments.condition = otherArgs ? otherArgs : null;
- break;
- case 'enable':
- request.arguments.enabled = true;
- break;
- case 'disable':
- request.arguments.enabled = false;
- break;
- case 'ignore':
- request.arguments.ignoreCount = parseInt(otherArgs);
- break;
- default:
- throw new Error('Invalid arguments.');
- }
- } else {
- throw new Error('Invalid arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the disconnect command.
-DebugRequest.prototype.disconnectCommandToJSONRequest_ = function(args) {
- var request;
- request = this.createRequest('disconnect');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the info command.
-DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) {
- var request;
- if (args && (args == 'break' || args == 'br')) {
- // Build a evaluate request from the text command.
- request = this.createRequest('listbreakpoints');
- last_cmd = 'info break';
- } else if (args && (args == 'locals' || args == 'lo')) {
- // Build a evaluate request from the text command.
- request = this.createRequest('frame');
- last_cmd = 'info locals';
- } else if (args && (args == 'args' || args == 'ar')) {
- // Build a evaluate request from the text command.
- request = this.createRequest('frame');
- last_cmd = 'info args';
- } else {
- throw new Error('Invalid info arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-DebugRequest.prototype.v8FlagsToJSONRequest_ = function(args) {
- var request;
- request = this.createRequest('v8flags');
- request.arguments = {};
- request.arguments.flags = args;
- return request.toJSONProtocol();
-};
-
-
-DebugRequest.prototype.gcToJSONRequest_ = function(args) {
- var request;
- if (!args) {
- args = 'all';
- }
- var args = args.split(/\s+/g);
- var cmd = args[0];
-
- switch(cmd) {
- case 'all':
- case 'quick':
- case 'full':
- case 'young':
- case 'old':
- case 'compact':
- case 'sweep':
- case 'scavenge': {
- if (cmd == 'young') { cmd = 'quick'; }
- else if (cmd == 'old') { cmd = 'full'; }
-
- request = this.createRequest('gc');
- request.arguments = {};
- request.arguments.type = cmd;
- break;
- }
- // Else fall thru to the default case below to report the error.
- default:
- throw new Error('Missing arguments after ' + cmd + '.');
- }
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the threads command.
-DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
- // Build a threads request from the text command.
- var request = this.createRequest('threads');
- return request.toJSONProtocol();
-};
-
-
-// Handle the trace command.
-DebugRequest.prototype.traceCommand_ = function(args) {
- // Process arguments.
- if (args && args.length > 0) {
- if (args == 'compile') {
- trace_compile = !trace_compile;
- print('Tracing of compiled scripts ' + (trace_compile ? 'on' : 'off'));
- } else if (args === 'debug json' || args === 'json' || args === 'packets') {
- trace_debug_json = !trace_debug_json;
- print('Tracing of debug json packets ' +
- (trace_debug_json ? 'on' : 'off'));
- } else {
- throw new Error('Invalid trace arguments.');
- }
- } else {
- throw new Error('Invalid trace arguments.');
- }
-};
-
-// Handle the help command.
-DebugRequest.prototype.helpCommand_ = function(args) {
- // Help os quite simple.
- if (args && args.length > 0) {
- print('warning: arguments to \'help\' are ignored');
- }
-
- print('Note: <> denotes symbollic values to be replaced with real values.');
- print('Note: [] denotes optional parts of commands, or optional options / arguments.');
- print(' e.g. d[elete] - you get the same command if you type d or delete.');
- print('');
- print('[break] - break as soon as possible');
- print('b[reak] location [condition]');
- print(' - break on named function: location is a function name');
- print(' - break on function: location is #<id>#');
- print(' - break on script position: location is name:line[:column]');
- print('');
- print('clear <breakpoint #> - deletes the specified user defined breakpoint');
- print('d[elete] <breakpoint #> - deletes the specified user defined breakpoint');
- print('dis[able] <breakpoint #> - disables the specified user defined breakpoint');
- print('dis[able] exc[eptions] [[all] | unc[aught]]');
- print(' - disables breaking on exceptions');
- print('en[able] <breakpoint #> - enables the specified user defined breakpoint');
- print('en[able] exc[eptions] [[all] | unc[aught]]');
- print(' - enables breaking on exceptions');
- print('');
- print('b[ack]t[race] [n] | [-n] | [from to]');
- print(' - prints the stack back trace');
- print('f[rame] - prints info about the current frame context');
- print('f[rame] <frame #> - set context to specified frame #');
- print('scopes');
- print('scope <scope #>');
- print('');
- print('up - set context to caller of current frame');
- print('do[wn] - set context to callee of current frame');
- print('inf[o] br[eak] - prints info about breakpoints in use');
- print('inf[o] ar[gs] - prints info about arguments of the current function');
- print('inf[o] lo[cals] - prints info about locals in the current function');
- print('');
- print('step [in | next | out| min [step count]]');
- print('c[ontinue] - continue executing after a breakpoint');
- print('s[tep] [<N>] - step into the next N callees (default N is 1)');
- print('s[tep]i [<N>] - step into the next N callees (default N is 1)');
- print('n[ext] [<N>] - step over the next N callees (default N is 1)');
- print('fin[ish] [<N>] - step out of N frames (default N is 1)');
- print('');
- print('p[rint] <expression> - prints the result of the specified expression');
- print('dir <expression> - prints the object structure of the result');
- print('set <var> = <expression> - executes the specified statement');
- print('');
- print('l[ist] - list the source code around for the current pc');
- print('l[ist] [- | <start>,<end>] - list the specified range of source code');
- print('source [from line [num lines]]');
- print('scr[ipts] [native|extensions|all]');
- print('scr[ipts] [<filter text>] - list scripts with the specified text in its description');
- print('');
- print('gc - runs the garbage collector');
- print('');
- print('trace compile');
- // hidden command: trace debug json - toggles tracing of debug json packets
- print('');
- print('disconnect|exit|quit - disconnects and quits the debugger');
- print('help - prints this help information');
-};
-
-
-function formatHandleReference_(value) {
- if (value.handle() >= 0) {
- return '#' + value.handle() + '#';
- } else {
- return '#Transient#';
- }
-}
-
-
-function formatObject_(value, include_properties) {
- var result = '';
- result += formatHandleReference_(value);
- result += ', type: object';
- result += ', constructor ';
- var ctor = value.constructorFunctionValue();
- result += formatHandleReference_(ctor);
- result += ', __proto__ ';
- var proto = value.protoObjectValue();
- result += formatHandleReference_(proto);
- result += ', ';
- result += value.propertyCount();
- result += ' properties.';
- if (include_properties) {
- result += '\n';
- for (var i = 0; i < value.propertyCount(); i++) {
- result += ' ';
- result += value.propertyName(i);
- result += ': ';
- var property_value = value.propertyValue(i);
- if (property_value instanceof ProtocolReference) {
- result += '<no type>';
- } else {
- if (property_value && property_value.type()) {
- result += property_value.type();
- } else {
- result += '<no type>';
- }
- }
- result += ' ';
- result += formatHandleReference_(property_value);
- result += '\n';
- }
- }
- return result;
-}
-
-
-function formatScope_(scope) {
- var result = '';
- var index = scope.index;
- result += '#' + (index <= 9 ? '0' : '') + index;
- result += ' ';
- switch (scope.type) {
- case Debug.ScopeType.Global:
- result += 'Global, ';
- result += '#' + scope.object.ref + '#';
- break;
- case Debug.ScopeType.Local:
- result += 'Local';
- break;
- case Debug.ScopeType.With:
- result += 'With, ';
- result += '#' + scope.object.ref + '#';
- break;
- case Debug.ScopeType.Catch:
- result += 'Catch, ';
- result += '#' + scope.object.ref + '#';
- break;
- case Debug.ScopeType.Closure:
- result += 'Closure';
- break;
- default:
- result += 'UNKNOWN';
- }
- return result;
-}
-
-
-function refObjectToString_(protocolPackage, handle) {
- var value = protocolPackage.lookup(handle);
- var result = '';
- if (value.isString()) {
- result = '"' + value.value() + '"';
- } else if (value.isPrimitive()) {
- result = value.valueString();
- } else if (value.isObject()) {
- result += formatObject_(value, true);
- }
- return result;
-}
-
-
-// Rounds number 'num' to 'length' decimal places.
-function roundNumber(num, length) {
- var factor = Math.pow(10, length);
- return Math.round(num * factor) / factor;
-}
-
-
-// Convert a JSON response to text for display in a text based debugger.
-function DebugResponseDetails(response) {
- var details = { text: '', running: false };
-
- try {
- if (!response.success()) {
- details.text = response.message();
- return details;
- }
-
- // Get the running state.
- details.running = response.running();
-
- var body = response.body();
- var result = '';
- switch (response.command()) {
- case 'suspend':
- details.text = 'stopped';
- break;
-
- case 'setbreakpoint':
- result = 'set breakpoint #';
- result += body.breakpoint;
- details.text = result;
- break;
-
- case 'clearbreakpoint':
- result = 'cleared breakpoint #';
- result += body.breakpoint;
- details.text = result;
- break;
-
- case 'changebreakpoint':
- result = 'successfully changed breakpoint';
- details.text = result;
- break;
-
- case 'listbreakpoints':
- result = 'breakpoints: (' + body.breakpoints.length + ')';
- for (var i = 0; i < body.breakpoints.length; i++) {
- var breakpoint = body.breakpoints[i];
- result += '\n id=' + breakpoint.number;
- result += ' type=' + breakpoint.type;
- if (breakpoint.script_id) {
- result += ' script_id=' + breakpoint.script_id;
- }
- if (breakpoint.script_name) {
- result += ' script_name=' + breakpoint.script_name;
- }
- if (breakpoint.script_regexp) {
- result += ' script_regexp=' + breakpoint.script_regexp;
- }
- result += ' line=' + (breakpoint.line + 1);
- if (breakpoint.column != null) {
- result += ' column=' + (breakpoint.column + 1);
- }
- if (breakpoint.groupId) {
- result += ' groupId=' + breakpoint.groupId;
- }
- if (breakpoint.ignoreCount) {
- result += ' ignoreCount=' + breakpoint.ignoreCount;
- }
- if (breakpoint.active === false) {
- result += ' inactive';
- }
- if (breakpoint.condition) {
- result += ' condition=' + breakpoint.condition;
- }
- result += ' hit_count=' + breakpoint.hit_count;
- }
- if (body.breakpoints.length === 0) {
- result = "No user defined breakpoints\n";
- } else {
- result += '\n';
- }
- if (body.breakOnExceptions) {
- result += '* breaking on ALL exceptions is enabled\n';
- } else if (body.breakOnUncaughtExceptions) {
- result += '* breaking on UNCAUGHT exceptions is enabled\n';
- } else {
- result += '* all exception breakpoints are disabled\n';
- }
- details.text = result;
- break;
-
- case 'setexceptionbreak':
- result = 'Break on ' + body.type + ' exceptions: ';
- result += body.enabled ? 'enabled' : 'disabled';
- details.text = result;
- break;
-
- case 'backtrace':
- if (body.totalFrames == 0) {
- result = '(empty stack)';
- } else {
- var result = 'Frames #' + body.fromFrame + ' to #' +
- (body.toFrame - 1) + ' of ' + body.totalFrames + '\n';
- for (i = 0; i < body.frames.length; i++) {
- if (i != 0) result += '\n';
- result += body.frames[i].text;
- }
- }
- details.text = result;
- break;
-
- case 'frame':
- if (last_cmd === 'info locals') {
- var locals = body.locals;
- if (locals.length === 0) {
- result = 'No locals';
- } else {
- for (var i = 0; i < locals.length; i++) {
- var local = locals[i];
- result += local.name + ' = ';
- result += refObjectToString_(response, local.value.ref);
- result += '\n';
- }
- }
- } else if (last_cmd === 'info args') {
- var args = body.arguments;
- if (args.length === 0) {
- result = 'No arguments';
- } else {
- for (var i = 0; i < args.length; i++) {
- var arg = args[i];
- result += arg.name + ' = ';
- result += refObjectToString_(response, arg.value.ref);
- result += '\n';
- }
- }
- } else {
- result = SourceUnderline(body.sourceLineText,
- body.column);
- Debug.State.currentSourceLine = body.line;
- Debug.State.currentFrame = body.index;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- }
- details.text = result;
- break;
-
- case 'scopes':
- if (body.totalScopes == 0) {
- result = '(no scopes)';
- } else {
- result = 'Scopes #' + body.fromScope + ' to #' +
- (body.toScope - 1) + ' of ' + body.totalScopes + '\n';
- for (i = 0; i < body.scopes.length; i++) {
- if (i != 0) {
- result += '\n';
- }
- result += formatScope_(body.scopes[i]);
- }
- }
- details.text = result;
- break;
-
- case 'scope':
- result += formatScope_(body);
- result += '\n';
- var scope_object_value = response.lookup(body.object.ref);
- result += formatObject_(scope_object_value, true);
- details.text = result;
- break;
-
- case 'evaluate':
- case 'lookup':
- case 'getobj':
- if (last_cmd == 'p' || last_cmd == 'print') {
- result = body.text;
- } else {
- var value;
- if (lookup_handle) {
- value = response.bodyValue(lookup_handle);
- } else {
- value = response.bodyValue();
- }
- if (value.isObject()) {
- result += formatObject_(value, true);
- } else {
- result += 'type: ';
- result += value.type();
- if (!value.isUndefined() && !value.isNull()) {
- result += ', ';
- if (value.isString()) {
- result += '"';
- }
- result += value.value();
- if (value.isString()) {
- result += '"';
- }
- }
- result += '\n';
- }
- }
- details.text = result;
- break;
-
- case 'references':
- var count = body.length;
- result += 'found ' + count + ' objects';
- result += '\n';
- for (var i = 0; i < count; i++) {
- var value = response.bodyValue(i);
- result += formatObject_(value, false);
- result += '\n';
- }
- details.text = result;
- break;
-
- case 'source':
- // Get the source from the response.
- var source = body.source;
- var from_line = body.fromLine + 1;
- var lines = source.split('\n');
- var maxdigits = 1 + Math.floor(log10(from_line + lines.length));
- if (maxdigits < 3) {
- maxdigits = 3;
- }
- var result = '';
- for (var num = 0; num < lines.length; num++) {
- // Check if there's an extra newline at the end.
- if (num == (lines.length - 1) && lines[num].length == 0) {
- break;
- }
-
- var current_line = from_line + num;
- var spacer = maxdigits - (1 + Math.floor(log10(current_line)));
- if (current_line == Debug.State.currentSourceLine + 1) {
- for (var i = 0; i < maxdigits; i++) {
- result += '>';
- }
- result += ' ';
- } else {
- for (var i = 0; i < spacer; i++) {
- result += ' ';
- }
- result += current_line + ': ';
- }
- result += lines[num];
- result += '\n';
- }
- details.text = result;
- break;
-
- case 'scripts':
- var result = '';
- for (i = 0; i < body.length; i++) {
- if (i != 0) result += '\n';
- if (body[i].id) {
- result += body[i].id;
- } else {
- result += '[no id]';
- }
- result += ', ';
- if (body[i].name) {
- result += body[i].name;
- } else {
- if (body[i].compilationType == Debug.ScriptCompilationType.Eval
- && body[i].evalFromScript
- ) {
- result += 'eval from ';
- var script_value = response.lookup(body[i].evalFromScript.ref);
- result += ' ' + script_value.field('name');
- result += ':' + (body[i].evalFromLocation.line + 1);
- result += ':' + body[i].evalFromLocation.column;
- } else if (body[i].compilationType ==
- Debug.ScriptCompilationType.JSON) {
- result += 'JSON ';
- } else { // body[i].compilation == Debug.ScriptCompilationType.Host
- result += '[unnamed] ';
- }
- }
- result += ' (lines: ';
- result += body[i].lineCount;
- result += ', length: ';
- result += body[i].sourceLength;
- if (body[i].type == Debug.ScriptType.Native) {
- result += ', native';
- } else if (body[i].type == Debug.ScriptType.Extension) {
- result += ', extension';
- }
- result += '), [';
- var sourceStart = body[i].sourceStart;
- if (sourceStart.length > 40) {
- sourceStart = sourceStart.substring(0, 37) + '...';
- }
- result += sourceStart;
- result += ']';
- }
- if (body.length == 0) {
- result = "no matching scripts found";
- }
- details.text = result;
- break;
-
- case 'threads':
- var result = 'Active V8 threads: ' + body.totalThreads + '\n';
- body.threads.sort(function(a, b) { return a.id - b.id; });
- for (i = 0; i < body.threads.length; i++) {
- result += body.threads[i].current ? '*' : ' ';
- result += ' ';
- result += body.threads[i].id;
- result += '\n';
- }
- details.text = result;
- break;
-
- case 'continue':
- details.text = "(running)";
- break;
-
- case 'v8flags':
- details.text = "flags set";
- break;
-
- case 'gc':
- details.text = "GC " + body.before + " => " + body.after;
- if (body.after > (1024*1024)) {
- details.text +=
- " (" + roundNumber(body.before/(1024*1024), 1) + "M => " +
- roundNumber(body.after/(1024*1024), 1) + "M)";
- } else if (body.after > 1024) {
- details.text +=
- " (" + roundNumber(body.before/1024, 1) + "K => " +
- roundNumber(body.after/1024, 1) + "K)";
- }
- break;
-
- default:
- details.text =
- 'Response for unknown command \'' + response.command() + '\'' +
- ' (' + response.raw_json() + ')';
- }
- } catch (e) {
- details.text = 'Error: "' + e + '" formatting response';
- }
-
- return details;
-}
-
-
-/**
- * Protocol packages send from the debugger.
- * @param {string} json - raw protocol packet as JSON string.
- * @constructor
- */
-function ProtocolPackage(json) {
- this.raw_json_ = json;
- this.packet_ = JSON.parse(json);
- this.refs_ = [];
- if (this.packet_.refs) {
- for (var i = 0; i < this.packet_.refs.length; i++) {
- this.refs_[this.packet_.refs[i].handle] = this.packet_.refs[i];
- }
- }
-}
-
-
-/**
- * Get the packet type.
- * @return {String} the packet type
- */
-ProtocolPackage.prototype.type = function() {
- return this.packet_.type;
-};
-
-
-/**
- * Get the packet event.
- * @return {Object} the packet event
- */
-ProtocolPackage.prototype.event = function() {
- return this.packet_.event;
-};
-
-
-/**
- * Get the packet request sequence.
- * @return {number} the packet request sequence
- */
-ProtocolPackage.prototype.requestSeq = function() {
- return this.packet_.request_seq;
-};
-
-
-/**
- * Get the packet request sequence.
- * @return {number} the packet request sequence
- */
-ProtocolPackage.prototype.running = function() {
- return this.packet_.running ? true : false;
-};
-
-
-ProtocolPackage.prototype.success = function() {
- return this.packet_.success ? true : false;
-};
-
-
-ProtocolPackage.prototype.message = function() {
- return this.packet_.message;
-};
-
-
-ProtocolPackage.prototype.command = function() {
- return this.packet_.command;
-};
-
-
-ProtocolPackage.prototype.body = function() {
- return this.packet_.body;
-};
-
-
-ProtocolPackage.prototype.bodyValue = function(index) {
- if (index != null) {
- return new ProtocolValue(this.packet_.body[index], this);
- } else {
- return new ProtocolValue(this.packet_.body, this);
- }
-};
-
-
-ProtocolPackage.prototype.body = function() {
- return this.packet_.body;
-};
-
-
-ProtocolPackage.prototype.lookup = function(handle) {
- var value = this.refs_[handle];
- if (value) {
- return new ProtocolValue(value, this);
- } else {
- return new ProtocolReference(handle);
- }
-};
-
-
-ProtocolPackage.prototype.raw_json = function() {
- return this.raw_json_;
-};
-
-
-function ProtocolValue(value, packet) {
- this.value_ = value;
- this.packet_ = packet;
-}
-
-
-/**
- * Get the value type.
- * @return {String} the value type
- */
-ProtocolValue.prototype.type = function() {
- return this.value_.type;
-};
-
-
-/**
- * Get a metadata field from a protocol value.
- * @return {Object} the metadata field value
- */
-ProtocolValue.prototype.field = function(name) {
- return this.value_[name];
-};
-
-
-/**
- * Check is the value is a primitive value.
- * @return {boolean} true if the value is primitive
- */
-ProtocolValue.prototype.isPrimitive = function() {
- return this.isUndefined() || this.isNull() || this.isBoolean() ||
- this.isNumber() || this.isString();
-};
-
-
-/**
- * Get the object handle.
- * @return {number} the value handle
- */
-ProtocolValue.prototype.handle = function() {
- return this.value_.handle;
-};
-
-
-/**
- * Check is the value is undefined.
- * @return {boolean} true if the value is undefined
- */
-ProtocolValue.prototype.isUndefined = function() {
- return this.value_.type == 'undefined';
-};
-
-
-/**
- * Check is the value is null.
- * @return {boolean} true if the value is null
- */
-ProtocolValue.prototype.isNull = function() {
- return this.value_.type == 'null';
-};
-
-
-/**
- * Check is the value is a boolean.
- * @return {boolean} true if the value is a boolean
- */
-ProtocolValue.prototype.isBoolean = function() {
- return this.value_.type == 'boolean';
-};
-
-
-/**
- * Check is the value is a number.
- * @return {boolean} true if the value is a number
- */
-ProtocolValue.prototype.isNumber = function() {
- return this.value_.type == 'number';
-};
-
-
-/**
- * Check is the value is a string.
- * @return {boolean} true if the value is a string
- */
-ProtocolValue.prototype.isString = function() {
- return this.value_.type == 'string';
-};
-
-
-/**
- * Check is the value is an object.
- * @return {boolean} true if the value is an object
- */
-ProtocolValue.prototype.isObject = function() {
- return this.value_.type == 'object' || this.value_.type == 'function' ||
- this.value_.type == 'error' || this.value_.type == 'regexp';
-};
-
-
-/**
- * Get the constructor function
- * @return {ProtocolValue} constructor function
- */
-ProtocolValue.prototype.constructorFunctionValue = function() {
- var ctor = this.value_.constructorFunction;
- return this.packet_.lookup(ctor.ref);
-};
-
-
-/**
- * Get the __proto__ value
- * @return {ProtocolValue} __proto__ value
- */
-ProtocolValue.prototype.protoObjectValue = function() {
- var proto = this.value_.protoObject;
- return this.packet_.lookup(proto.ref);
-};
-
-
-/**
- * Get the number og properties.
- * @return {number} the number of properties
- */
-ProtocolValue.prototype.propertyCount = function() {
- return this.value_.properties ? this.value_.properties.length : 0;
-};
-
-
-/**
- * Get the specified property name.
- * @return {string} property name
- */
-ProtocolValue.prototype.propertyName = function(index) {
- var property = this.value_.properties[index];
- return property.name;
-};
-
-
-/**
- * Return index for the property name.
- * @param name The property name to look for
- * @return {number} index for the property name
- */
-ProtocolValue.prototype.propertyIndex = function(name) {
- for (var i = 0; i < this.propertyCount(); i++) {
- if (this.value_.properties[i].name == name) {
- return i;
- }
- }
- return null;
-};
-
-
-/**
- * Get the specified property value.
- * @return {ProtocolValue} property value
- */
-ProtocolValue.prototype.propertyValue = function(index) {
- var property = this.value_.properties[index];
- return this.packet_.lookup(property.ref);
-};
-
-
-/**
- * Check is the value is a string.
- * @return {boolean} true if the value is a string
- */
-ProtocolValue.prototype.value = function() {
- return this.value_.value;
-};
-
-
-ProtocolValue.prototype.valueString = function() {
- return this.value_.text;
-};
-
-
-function ProtocolReference(handle) {
- this.handle_ = handle;
-}
-
-
-ProtocolReference.prototype.handle = function() {
- return this.handle_;
-};
-
-
-function MakeJSONPair_(name, value) {
- return '"' + name + '":' + value;
-}
-
-
-function ArrayToJSONObject_(content) {
- return '{' + content.join(',') + '}';
-}
-
-
-function ArrayToJSONArray_(content) {
- return '[' + content.join(',') + ']';
-}
-
-
-function BooleanToJSON_(value) {
- return String(value);
-}
-
-
-function NumberToJSON_(value) {
- return String(value);
-}
-
-
-// Mapping of some control characters to avoid the \uXXXX syntax for most
-// commonly used control cahracters.
-var ctrlCharMap_ = {
- '\b': '\\b',
- '\t': '\\t',
- '\n': '\\n',
- '\f': '\\f',
- '\r': '\\r',
- '"' : '\\"',
- '\\': '\\\\'
-};
-
-
-// Regular expression testing for ", \ and control characters (0x00 - 0x1F).
-var ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
-
-
-// Regular expression matching ", \ and control characters (0x00 - 0x1F)
-// globally.
-var ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
-
-
-/**
- * Convert a String to its JSON representation (see http://www.json.org/). To
- * avoid depending on the String object this method calls the functions in
- * string.js directly and not through the value.
- * @param {String} value The String value to format as JSON
- * @return {string} JSON formatted String value
- */
-function StringToJSON_(value) {
- // Check for" , \ and control characters (0x00 - 0x1F). No need to call
- // RegExpTest as ctrlchar is constructed using RegExp.
- if (ctrlCharTest_.test(value)) {
- // Replace ", \ and control characters (0x00 - 0x1F).
- return '"' +
- value.replace(ctrlCharMatch_, function (char) {
- // Use charmap if possible.
- var mapped = ctrlCharMap_[char];
- if (mapped) return mapped;
- mapped = char.charCodeAt();
- // Convert control character to unicode escape sequence.
- return '\\u00' +
- '0' + // TODO %NumberToRadixString(Math.floor(mapped / 16), 16) +
- '0'; // TODO %NumberToRadixString(mapped % 16, 16)
- })
- + '"';
- }
-
- // Simple string with no special characters.
- return '"' + value + '"';
-}
-
-
-/**
- * Convert a Date to ISO 8601 format. To avoid depending on the Date object
- * this method calls the functions in date.js directly and not through the
- * value.
- * @param {Date} value The Date value to format as JSON
- * @return {string} JSON formatted Date value
- */
-function DateToISO8601_(value) {
- var f = function(n) {
- return n < 10 ? '0' + n : n;
- };
- var g = function(n) {
- return n < 10 ? '00' + n : n < 100 ? '0' + n : n;
- };
- return builtins.GetUTCFullYearFrom(value) + '-' +
- f(builtins.GetUTCMonthFrom(value) + 1) + '-' +
- f(builtins.GetUTCDateFrom(value)) + 'T' +
- f(builtins.GetUTCHoursFrom(value)) + ':' +
- f(builtins.GetUTCMinutesFrom(value)) + ':' +
- f(builtins.GetUTCSecondsFrom(value)) + '.' +
- g(builtins.GetUTCMillisecondsFrom(value)) + 'Z';
-}
-
-
-/**
- * Convert a Date to ISO 8601 format. To avoid depending on the Date object
- * this method calls the functions in date.js directly and not through the
- * value.
- * @param {Date} value The Date value to format as JSON
- * @return {string} JSON formatted Date value
- */
-function DateToJSON_(value) {
- return '"' + DateToISO8601_(value) + '"';
-}
-
-
-/**
- * Convert an Object to its JSON representation (see http://www.json.org/).
- * This implementation simply runs through all string property names and adds
- * each property to the JSON representation for some predefined types. For type
- * "object" the function calls itself recursively unless the object has the
- * function property "toJSONProtocol" in which case that is used. This is not
- * a general implementation but sufficient for the debugger. Note that circular
- * structures will cause infinite recursion.
- * @param {Object} object The object to format as JSON
- * @return {string} JSON formatted object value
- */
-function SimpleObjectToJSON_(object) {
- var content = [];
- for (var key in object) {
- // Only consider string keys.
- if (typeof key == 'string') {
- var property_value = object[key];
-
- // Format the value based on its type.
- var property_value_json;
- switch (typeof property_value) {
- case 'object':
- if (property_value === null) {
- property_value_json = 'null';
- } else if (typeof property_value.toJSONProtocol == 'function') {
- property_value_json = property_value.toJSONProtocol(true);
- } else if (property_value.constructor.name == 'Array'){
- property_value_json = SimpleArrayToJSON_(property_value);
- } else {
- property_value_json = SimpleObjectToJSON_(property_value);
- }
- break;
-
- case 'boolean':
- property_value_json = BooleanToJSON_(property_value);
- break;
-
- case 'number':
- property_value_json = NumberToJSON_(property_value);
- break;
-
- case 'string':
- property_value_json = StringToJSON_(property_value);
- break;
-
- default:
- property_value_json = null;
- }
-
- // Add the property if relevant.
- if (property_value_json) {
- content.push(StringToJSON_(key) + ':' + property_value_json);
- }
- }
- }
-
- // Make JSON object representation.
- return '{' + content.join(',') + '}';
-}
-
-
-/**
- * Convert an array to its JSON representation. This is a VERY simple
- * implementation just to support what is needed for the debugger.
- * @param {Array} arrya The array to format as JSON
- * @return {string} JSON formatted array value
- */
-function SimpleArrayToJSON_(array) {
- // Make JSON array representation.
- var json = '[';
- for (var i = 0; i < array.length; i++) {
- if (i != 0) {
- json += ',';
- }
- var elem = array[i];
- if (elem.toJSONProtocol) {
- json += elem.toJSONProtocol(true);
- } else if (typeof(elem) === 'object') {
- json += SimpleObjectToJSON_(elem);
- } else if (typeof(elem) === 'boolean') {
- json += BooleanToJSON_(elem);
- } else if (typeof(elem) === 'number') {
- json += NumberToJSON_(elem);
- } else if (typeof(elem) === 'string') {
- json += StringToJSON_(elem);
- } else {
- json += elem;
- }
- }
- json += ']';
- return json;
-}
diff --git a/src/3rdparty/v8/src/data-flow.cc b/src/3rdparty/v8/src/data-flow.cc
deleted file mode 100644
index 6a3b05c..0000000
--- a/src/3rdparty/v8/src/data-flow.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "data-flow.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-void BitVector::Print() {
- bool first = true;
- PrintF("{");
- for (int i = 0; i < length(); i++) {
- if (Contains(i)) {
- if (!first) PrintF(",");
- first = false;
- PrintF("%d", i);
- }
- }
- PrintF("}");
-}
-#endif
-
-
-void BitVector::Iterator::Advance() {
- current_++;
- uint32_t val = current_value_;
- while (val == 0) {
- current_index_++;
- if (Done()) return;
- val = target_->data_[current_index_];
- current_ = current_index_ << 5;
- }
- val = SkipZeroBytes(val);
- val = SkipZeroBits(val);
- current_value_ = val >> 1;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/data-flow.h b/src/3rdparty/v8/src/data-flow.h
deleted file mode 100644
index 7eeb794..0000000
--- a/src/3rdparty/v8/src/data-flow.h
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DATAFLOW_H_
-#define V8_DATAFLOW_H_
-
-#include "v8.h"
-
-#include "allocation.h"
-#include "ast.h"
-#include "compiler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-class BitVector: public ZoneObject {
- public:
- // Iterator for the elements of this BitVector.
- class Iterator BASE_EMBEDDED {
- public:
- explicit Iterator(BitVector* target)
- : target_(target),
- current_index_(0),
- current_value_(target->data_[0]),
- current_(-1) {
- ASSERT(target->data_length_ > 0);
- Advance();
- }
- ~Iterator() { }
-
- bool Done() const { return current_index_ >= target_->data_length_; }
- void Advance();
-
- int Current() const {
- ASSERT(!Done());
- return current_;
- }
-
- private:
- uint32_t SkipZeroBytes(uint32_t val) {
- while ((val & 0xFF) == 0) {
- val >>= 8;
- current_ += 8;
- }
- return val;
- }
- uint32_t SkipZeroBits(uint32_t val) {
- while ((val & 0x1) == 0) {
- val >>= 1;
- current_++;
- }
- return val;
- }
-
- BitVector* target_;
- int current_index_;
- uint32_t current_value_;
- int current_;
-
- friend class BitVector;
- };
-
- BitVector(int length, Zone* zone)
- : length_(length),
- data_length_(SizeFor(length)),
- data_(zone->NewArray<uint32_t>(data_length_)) {
- ASSERT(length > 0);
- Clear();
- }
-
- BitVector(const BitVector& other, Zone* zone)
- : length_(other.length()),
- data_length_(SizeFor(length_)),
- data_(zone->NewArray<uint32_t>(data_length_)) {
- CopyFrom(other);
- }
-
- static int SizeFor(int length) {
- return 1 + ((length - 1) / 32);
- }
-
- BitVector& operator=(const BitVector& rhs) {
- if (this != &rhs) CopyFrom(rhs);
- return *this;
- }
-
- void CopyFrom(const BitVector& other) {
- ASSERT(other.length() <= length());
- for (int i = 0; i < other.data_length_; i++) {
- data_[i] = other.data_[i];
- }
- for (int i = other.data_length_; i < data_length_; i++) {
- data_[i] = 0;
- }
- }
-
- bool Contains(int i) const {
- ASSERT(i >= 0 && i < length());
- uint32_t block = data_[i / 32];
- return (block & (1U << (i % 32))) != 0;
- }
-
- void Add(int i) {
- ASSERT(i >= 0 && i < length());
- data_[i / 32] |= (1U << (i % 32));
- }
-
- void Remove(int i) {
- ASSERT(i >= 0 && i < length());
- data_[i / 32] &= ~(1U << (i % 32));
- }
-
- void Union(const BitVector& other) {
- ASSERT(other.length() == length());
- for (int i = 0; i < data_length_; i++) {
- data_[i] |= other.data_[i];
- }
- }
-
- bool UnionIsChanged(const BitVector& other) {
- ASSERT(other.length() == length());
- bool changed = false;
- for (int i = 0; i < data_length_; i++) {
- uint32_t old_data = data_[i];
- data_[i] |= other.data_[i];
- if (data_[i] != old_data) changed = true;
- }
- return changed;
- }
-
- void Intersect(const BitVector& other) {
- ASSERT(other.length() == length());
- for (int i = 0; i < data_length_; i++) {
- data_[i] &= other.data_[i];
- }
- }
-
- void Subtract(const BitVector& other) {
- ASSERT(other.length() == length());
- for (int i = 0; i < data_length_; i++) {
- data_[i] &= ~other.data_[i];
- }
- }
-
- void Clear() {
- for (int i = 0; i < data_length_; i++) {
- data_[i] = 0;
- }
- }
-
- bool IsEmpty() const {
- for (int i = 0; i < data_length_; i++) {
- if (data_[i] != 0) return false;
- }
- return true;
- }
-
- bool Equals(const BitVector& other) {
- for (int i = 0; i < data_length_; i++) {
- if (data_[i] != other.data_[i]) return false;
- }
- return true;
- }
-
- int length() const { return length_; }
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- int length_;
- int data_length_;
- uint32_t* data_;
-};
-
-class GrowableBitVector BASE_EMBEDDED {
- public:
- class Iterator BASE_EMBEDDED {
- public:
- Iterator(const GrowableBitVector* target, Zone* zone)
- : it_(target->bits_ == NULL
- ? new(zone) BitVector(1, zone)
- : target->bits_) { }
- bool Done() const { return it_.Done(); }
- void Advance() { it_.Advance(); }
- int Current() const { return it_.Current(); }
- private:
- BitVector::Iterator it_;
- };
-
- GrowableBitVector() : bits_(NULL) { }
-
- bool Contains(int value) const {
- if (!InBitsRange(value)) return false;
- return bits_->Contains(value);
- }
-
- void Add(int value, Zone* zone) {
- EnsureCapacity(value, zone);
- bits_->Add(value);
- }
-
- void Union(const GrowableBitVector& other, Zone* zone) {
- for (Iterator it(&other, zone); !it.Done(); it.Advance()) {
- Add(it.Current(), zone);
- }
- }
-
- void Clear() { if (bits_ != NULL) bits_->Clear(); }
-
- private:
- static const int kInitialLength = 1024;
-
- bool InBitsRange(int value) const {
- return bits_ != NULL && bits_->length() > value;
- }
-
- void EnsureCapacity(int value, Zone* zone) {
- if (InBitsRange(value)) return;
- int new_length = bits_ == NULL ? kInitialLength : bits_->length();
- while (new_length <= value) new_length *= 2;
- BitVector* new_bits = new(zone) BitVector(new_length, zone);
- if (bits_ != NULL) new_bits->CopyFrom(*bits_);
- bits_ = new_bits;
- }
-
- BitVector* bits_;
-};
-
-
-} } // namespace v8::internal
-
-
-#endif // V8_DATAFLOW_H_
diff --git a/src/3rdparty/v8/src/date.cc b/src/3rdparty/v8/src/date.cc
deleted file mode 100644
index a377451..0000000
--- a/src/3rdparty/v8/src/date.cc
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "date.h"
-
-#include "v8.h"
-
-#include "objects.h"
-#include "objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
-static const int kDaysIn4Years = 4 * 365 + 1;
-static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
-static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
-static const int kDays1970to2000 = 30 * 365 + 7;
-static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
- kDays1970to2000;
-static const int kYearsOffset = 400000;
-static const char kDaysInMonths[] =
- {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
-
-void DateCache::ResetDateCache() {
- static const int kMaxStamp = Smi::kMaxValue;
- stamp_ = Smi::FromInt(stamp_->value() + 1);
- if (stamp_->value() > kMaxStamp) {
- stamp_ = Smi::FromInt(0);
- }
- ASSERT(stamp_ != Smi::FromInt(kInvalidStamp));
- for (int i = 0; i < kDSTSize; ++i) {
- ClearSegment(&dst_[i]);
- }
- dst_usage_counter_ = 0;
- before_ = &dst_[0];
- after_ = &dst_[1];
- local_offset_ms_ = kInvalidLocalOffsetInMs;
- ymd_valid_ = false;
-}
-
-
-void DateCache::ClearSegment(DST* segment) {
- segment->start_sec = kMaxEpochTimeInSec;
- segment->end_sec = -kMaxEpochTimeInSec;
- segment->offset_ms = 0;
- segment->last_used = 0;
-}
-
-
-void DateCache::YearMonthDayFromDays(
- int days, int* year, int* month, int* day) {
- if (ymd_valid_) {
- // Check conservatively if the given 'days' has
- // the same year and month as the cached 'days'.
- int new_day = ymd_day_ + (days - ymd_days_);
- if (new_day >= 1 && new_day <= 28) {
- ymd_day_ = new_day;
- ymd_days_ = days;
- *year = ymd_year_;
- *month = ymd_month_;
- *day = new_day;
- return;
- }
- }
- int save_days = days;
-
- days += kDaysOffset;
- *year = 400 * (days / kDaysIn400Years) - kYearsOffset;
- days %= kDaysIn400Years;
-
- ASSERT(DaysFromYearMonth(*year, 0) + days == save_days);
-
- days--;
- int yd1 = days / kDaysIn100Years;
- days %= kDaysIn100Years;
- *year += 100 * yd1;
-
- days++;
- int yd2 = days / kDaysIn4Years;
- days %= kDaysIn4Years;
- *year += 4 * yd2;
-
- days--;
- int yd3 = days / 365;
- days %= 365;
- *year += yd3;
-
-
- bool is_leap = (!yd1 || yd2) && !yd3;
-
- ASSERT(days >= -1);
- ASSERT(is_leap || (days >= 0));
- ASSERT((days < 365) || (is_leap && (days < 366)));
- ASSERT(is_leap == ((*year % 4 == 0) && (*year % 100 || (*year % 400 == 0))));
- ASSERT(is_leap || ((DaysFromYearMonth(*year, 0) + days) == save_days));
- ASSERT(!is_leap || ((DaysFromYearMonth(*year, 0) + days + 1) == save_days));
-
- days += is_leap;
-
- // Check if the date is after February.
- if (days >= 31 + 28 + is_leap) {
- days -= 31 + 28 + is_leap;
- // Find the date starting from March.
- for (int i = 2; i < 12; i++) {
- if (days < kDaysInMonths[i]) {
- *month = i;
- *day = days + 1;
- break;
- }
- days -= kDaysInMonths[i];
- }
- } else {
- // Check January and February.
- if (days < 31) {
- *month = 0;
- *day = days + 1;
- } else {
- *month = 1;
- *day = days - 31 + 1;
- }
- }
- ASSERT(DaysFromYearMonth(*year, *month) + *day - 1 == save_days);
- ymd_valid_ = true;
- ymd_year_ = *year;
- ymd_month_ = *month;
- ymd_day_ = *day;
- ymd_days_ = save_days;
-}
-
-
-int DateCache::DaysFromYearMonth(int year, int month) {
- static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
- 181, 212, 243, 273, 304, 334};
- static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
- 182, 213, 244, 274, 305, 335};
-
- year += month / 12;
- month %= 12;
- if (month < 0) {
- year--;
- month += 12;
- }
-
- ASSERT(month >= 0);
- ASSERT(month < 12);
-
- // year_delta is an arbitrary number such that:
- // a) year_delta = -1 (mod 400)
- // b) year + year_delta > 0 for years in the range defined by
- // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
- // Jan 1 1970. This is required so that we don't run into integer
- // division of negative numbers.
- // c) there shouldn't be an overflow for 32-bit integers in the following
- // operations.
- static const int year_delta = 399999;
- static const int base_day = 365 * (1970 + year_delta) +
- (1970 + year_delta) / 4 -
- (1970 + year_delta) / 100 +
- (1970 + year_delta) / 400;
-
- int year1 = year + year_delta;
- int day_from_year = 365 * year1 +
- year1 / 4 -
- year1 / 100 +
- year1 / 400 -
- base_day;
-
- if ((year % 4 != 0) || (year % 100 == 0 && year % 400 != 0)) {
- return day_from_year + day_from_month[month];
- }
- return day_from_year + day_from_month_leap[month];
-}
-
-
-void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
- if (after_->offset_ms == offset_ms &&
- after_->start_sec <= time_sec + kDefaultDSTDeltaInSec &&
- time_sec <= after_->end_sec) {
- // Extend the after_ segment.
- after_->start_sec = time_sec;
- } else {
- // The after_ segment is either invalid or starts too late.
- if (after_->start_sec <= after_->end_sec) {
- // If the after_ segment is valid, replace it with a new segment.
- after_ = LeastRecentlyUsedDST(before_);
- }
- after_->start_sec = time_sec;
- after_->end_sec = time_sec;
- after_->offset_ms = offset_ms;
- after_->last_used = ++dst_usage_counter_;
- }
-}
-
-
-int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
- int time_sec = (time_ms >= 0 && time_ms <= kMaxEpochTimeInMs)
- ? static_cast<int>(time_ms / 1000)
- : static_cast<int>(EquivalentTime(time_ms) / 1000);
-
- // Invalidate cache if the usage counter is close to overflow.
- // Note that dst_usage_counter is incremented less than ten times
- // in this function.
- if (dst_usage_counter_ >= kMaxInt - 10) {
- dst_usage_counter_ = 0;
- for (int i = 0; i < kDSTSize; ++i) {
- ClearSegment(&dst_[i]);
- }
- }
-
- // Optimistic fast check.
- if (before_->start_sec <= time_sec &&
- time_sec <= before_->end_sec) {
- // Cache hit.
- before_->last_used = ++dst_usage_counter_;
- return before_->offset_ms;
- }
-
- ProbeDST(time_sec);
-
- ASSERT(InvalidSegment(before_) || before_->start_sec <= time_sec);
- ASSERT(InvalidSegment(after_) || time_sec < after_->start_sec);
-
- if (InvalidSegment(before_)) {
- // Cache miss.
- before_->start_sec = time_sec;
- before_->end_sec = time_sec;
- before_->offset_ms = GetDaylightSavingsOffsetFromOS(time_sec);
- before_->last_used = ++dst_usage_counter_;
- return before_->offset_ms;
- }
-
- if (time_sec <= before_->end_sec) {
- // Cache hit.
- before_->last_used = ++dst_usage_counter_;
- return before_->offset_ms;
- }
-
- if (time_sec > before_->end_sec + kDefaultDSTDeltaInSec) {
- // If the before_ segment ends too early, then just
- // query for the offset of the time_sec
- int offset_ms = GetDaylightSavingsOffsetFromOS(time_sec);
- ExtendTheAfterSegment(time_sec, offset_ms);
- // This swap helps the optimistic fast check in subsequent invocations.
- DST* temp = before_;
- before_ = after_;
- after_ = temp;
- return offset_ms;
- }
-
- // Now the time_sec is between
- // before_->end_sec and before_->end_sec + default DST delta.
- // Update the usage counter of before_ since it is going to be used.
- before_->last_used = ++dst_usage_counter_;
-
- // Check if after_ segment is invalid or starts too late.
- // Note that start_sec of invalid segments is kMaxEpochTimeInSec.
- if (before_->end_sec + kDefaultDSTDeltaInSec <= after_->start_sec) {
- int new_after_start_sec = before_->end_sec + kDefaultDSTDeltaInSec;
- int new_offset_ms = GetDaylightSavingsOffsetFromOS(new_after_start_sec);
- ExtendTheAfterSegment(new_after_start_sec, new_offset_ms);
- } else {
- ASSERT(!InvalidSegment(after_));
- // Update the usage counter of after_ since it is going to be used.
- after_->last_used = ++dst_usage_counter_;
- }
-
- // Now the time_sec is between before_->end_sec and after_->start_sec.
- // Only one daylight savings offset change can occur in this interval.
-
- if (before_->offset_ms == after_->offset_ms) {
- // Merge two segments if they have the same offset.
- before_->end_sec = after_->end_sec;
- ClearSegment(after_);
- return before_->offset_ms;
- }
-
- // Binary search for daylight savings offset change point,
- // but give up if we don't find it in four iterations.
- for (int i = 4; i >= 0; --i) {
- int delta = after_->start_sec - before_->end_sec;
- int middle_sec = (i == 0) ? time_sec : before_->end_sec + delta / 2;
- int offset_ms = GetDaylightSavingsOffsetFromOS(middle_sec);
- if (before_->offset_ms == offset_ms) {
- before_->end_sec = middle_sec;
- if (time_sec <= before_->end_sec) {
- return offset_ms;
- }
- } else {
- ASSERT(after_->offset_ms == offset_ms);
- after_->start_sec = middle_sec;
- if (time_sec >= after_->start_sec) {
- // This swap helps the optimistic fast check in subsequent invocations.
- DST* temp = before_;
- before_ = after_;
- after_ = temp;
- return offset_ms;
- }
- }
- }
- UNREACHABLE();
- return 0;
-}
-
-
-void DateCache::ProbeDST(int time_sec) {
- DST* before = NULL;
- DST* after = NULL;
- ASSERT(before_ != after_);
-
- for (int i = 0; i < kDSTSize; ++i) {
- if (dst_[i].start_sec <= time_sec) {
- if (before == NULL || before->start_sec < dst_[i].start_sec) {
- before = &dst_[i];
- }
- } else if (time_sec < dst_[i].end_sec) {
- if (after == NULL || after->end_sec > dst_[i].end_sec) {
- after = &dst_[i];
- }
- }
- }
-
- // If before or after segments were not found,
- // then set them to any invalid segment.
- if (before == NULL) {
- before = InvalidSegment(before_) ? before_ : LeastRecentlyUsedDST(after);
- }
- if (after == NULL) {
- after = InvalidSegment(after_) && before != after_
- ? after_ : LeastRecentlyUsedDST(before);
- }
-
- ASSERT(before != NULL);
- ASSERT(after != NULL);
- ASSERT(before != after);
- ASSERT(InvalidSegment(before) || before->start_sec <= time_sec);
- ASSERT(InvalidSegment(after) || time_sec < after->start_sec);
- ASSERT(InvalidSegment(before) || InvalidSegment(after) ||
- before->end_sec < after->start_sec);
-
- before_ = before;
- after_ = after;
-}
-
-
-DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
- DST* result = NULL;
- for (int i = 0; i < kDSTSize; ++i) {
- if (&dst_[i] == skip) continue;
- if (result == NULL || result->last_used > dst_[i].last_used) {
- result = &dst_[i];
- }
- }
- ClearSegment(result);
- return result;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/date.h b/src/3rdparty/v8/src/date.h
deleted file mode 100644
index fcd61db..0000000
--- a/src/3rdparty/v8/src/date.h
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DATE_H_
-#define V8_DATE_H_
-
-#include "allocation.h"
-#include "globals.h"
-#include "platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-class DateCache {
- public:
- static const int kMsPerMin = 60 * 1000;
- static const int kSecPerDay = 24 * 60 * 60;
- static const int64_t kMsPerDay = kSecPerDay * 1000;
-
- // The largest time that can be passed to OS date-time library functions.
- static const int kMaxEpochTimeInSec = kMaxInt;
- static const int64_t kMaxEpochTimeInMs =
- static_cast<int64_t>(kMaxInt) * 1000;
-
- // The largest time that can be stored in JSDate.
- static const int64_t kMaxTimeInMs =
- static_cast<int64_t>(864000000) * 10000000;
-
- // Conservative upper bound on time that can be stored in JSDate
- // before UTC conversion.
- static const int64_t kMaxTimeBeforeUTCInMs =
- kMaxTimeInMs + 10 * kMsPerDay;
-
- // Sentinel that denotes an invalid local offset.
- static const int kInvalidLocalOffsetInMs = kMaxInt;
- // Sentinel that denotes an invalid cache stamp.
- // It is an invariant of DateCache that cache stamp is non-negative.
- static const int kInvalidStamp = -1;
-
- DateCache() : stamp_(0) {
- ResetDateCache();
- }
-
- virtual ~DateCache() {}
-
-
- // Clears cached timezone information and increments the cache stamp.
- void ResetDateCache();
-
-
- // Computes floor(time_ms / kMsPerDay).
- static int DaysFromTime(int64_t time_ms) {
- if (time_ms < 0) time_ms -= (kMsPerDay - 1);
- return static_cast<int>(time_ms / kMsPerDay);
- }
-
-
- // Computes modulo(time_ms, kMsPerDay) given that
- // days = floor(time_ms / kMsPerDay).
- static int TimeInDay(int64_t time_ms, int days) {
- return static_cast<int>(time_ms - days * kMsPerDay);
- }
-
-
- // Given the number of days since the epoch, computes the weekday.
- // ECMA 262 - 15.9.1.6.
- int Weekday(int days) {
- int result = (days + 4) % 7;
- return result >= 0 ? result : result + 7;
- }
-
-
- bool IsLeap(int year) {
- return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
- }
-
-
- // ECMA 262 - 15.9.1.7.
- int LocalOffsetInMs() {
- if (local_offset_ms_ == kInvalidLocalOffsetInMs) {
- local_offset_ms_ = GetLocalOffsetFromOS();
- }
- return local_offset_ms_;
- }
-
-
- const char* LocalTimezone(int64_t time_ms) {
- if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
- time_ms = EquivalentTime(time_ms);
- }
- return OS::LocalTimezone(static_cast<double>(time_ms));
- }
-
- // ECMA 262 - 15.9.5.26
- int TimezoneOffset(int64_t time_ms) {
- int64_t local_ms = ToLocal(time_ms);
- return static_cast<int>((time_ms - local_ms) / kMsPerMin);
- }
-
- // ECMA 262 - 15.9.1.9
- int64_t ToLocal(int64_t time_ms) {
- return time_ms + LocalOffsetInMs() + DaylightSavingsOffsetInMs(time_ms);
- }
-
- // ECMA 262 - 15.9.1.9
- int64_t ToUTC(int64_t time_ms) {
- time_ms -= LocalOffsetInMs();
- return time_ms - DaylightSavingsOffsetInMs(time_ms);
- }
-
-
- // Computes a time equivalent to the given time according
- // to ECMA 262 - 15.9.1.9.
- // The issue here is that some library calls don't work right for dates
- // that cannot be represented using a non-negative signed 32 bit integer
- // (measured in whole seconds based on the 1970 epoch).
- // We solve this by mapping the time to a year with same leap-year-ness
- // and same starting day for the year. The ECMAscript specification says
- // we must do this, but for compatibility with other browsers, we use
- // the actual year if it is in the range 1970..2037
- int64_t EquivalentTime(int64_t time_ms) {
- int days = DaysFromTime(time_ms);
- int time_within_day_ms = static_cast<int>(time_ms - days * kMsPerDay);
- int year, month, day;
- YearMonthDayFromDays(days, &year, &month, &day);
- int new_days = DaysFromYearMonth(EquivalentYear(year), month) + day - 1;
- return static_cast<int64_t>(new_days) * kMsPerDay + time_within_day_ms;
- }
-
- // Returns an equivalent year in the range [2008-2035] matching
- // - leap year,
- // - week day of first day.
- // ECMA 262 - 15.9.1.9.
- int EquivalentYear(int year) {
- int week_day = Weekday(DaysFromYearMonth(year, 0));
- int recent_year = (IsLeap(year) ? 1956 : 1967) + (week_day * 12) % 28;
- // Find the year in the range 2008..2037 that is equivalent mod 28.
- // Add 3*28 to give a positive argument to the modulus operator.
- return 2008 + (recent_year + 3 * 28 - 2008) % 28;
- }
-
- // Given the number of days since the epoch, computes
- // the corresponding year, month, and day.
- void YearMonthDayFromDays(int days, int* year, int* month, int* day);
-
- // Computes the number of days since the epoch for
- // the first day of the given month in the given year.
- int DaysFromYearMonth(int year, int month);
-
- // Cache stamp is used for invalidating caches in JSDate.
- // We increment the stamp each time when the timezone information changes.
- // JSDate objects perform stamp check and invalidate their caches if
- // their saved stamp is not equal to the current stamp.
- Smi* stamp() { return stamp_; }
- void* stamp_address() { return &stamp_; }
-
- // These functions are virtual so that we can override them when testing.
- virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
- double time_ms = static_cast<double>(time_sec * 1000);
- return static_cast<int>(OS::DaylightSavingsOffset(time_ms));
- }
-
- virtual int GetLocalOffsetFromOS() {
- double offset = OS::LocalTimeOffset();
- ASSERT(offset < kInvalidLocalOffsetInMs);
- return static_cast<int>(offset);
- }
-
- private:
- // The implementation relies on the fact that no time zones have
- // more than one daylight savings offset change per 19 days.
- // In Egypt in 2010 they decided to suspend DST during Ramadan. This
- // led to a short interval where DST is in effect from September 10 to
- // September 30.
- static const int kDefaultDSTDeltaInSec = 19 * kSecPerDay;
-
- // Size of the Daylight Savings Time cache.
- static const int kDSTSize = 32;
-
- // Daylight Savings Time segment stores a segment of time where
- // daylight savings offset does not change.
- struct DST {
- int start_sec;
- int end_sec;
- int offset_ms;
- int last_used;
- };
-
- // Computes the daylight savings offset for the given time.
- // ECMA 262 - 15.9.1.8
- int DaylightSavingsOffsetInMs(int64_t time_ms);
-
- // Sets the before_ and the after_ segments from the DST cache such that
- // the before_ segment starts earlier than the given time and
- // the after_ segment start later than the given time.
- // Both segments might be invalid.
- // The last_used counters of the before_ and after_ are updated.
- void ProbeDST(int time_sec);
-
- // Finds the least recently used segment from the DST cache that is not
- // equal to the given 'skip' segment.
- DST* LeastRecentlyUsedDST(DST* skip);
-
- // Extends the after_ segment with the given point or resets it
- // if it starts later than the given time + kDefaultDSTDeltaInSec.
- inline void ExtendTheAfterSegment(int time_sec, int offset_ms);
-
- // Makes the given segment invalid.
- inline void ClearSegment(DST* segment);
-
- bool InvalidSegment(DST* segment) {
- return segment->start_sec > segment->end_sec;
- }
-
- Smi* stamp_;
-
- // Daylight Saving Time cache.
- DST dst_[kDSTSize];
- int dst_usage_counter_;
- DST* before_;
- DST* after_;
-
- int local_offset_ms_;
-
- // Year/Month/Day cache.
- bool ymd_valid_;
- int ymd_days_;
- int ymd_year_;
- int ymd_month_;
- int ymd_day_;
-};
-
-} } // namespace v8::internal
-
-#endif
diff --git a/src/3rdparty/v8/src/date.js b/src/3rdparty/v8/src/date.js
deleted file mode 100644
index c75d12c..0000000
--- a/src/3rdparty/v8/src/date.js
+++ /dev/null
@@ -1,832 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// This file relies on the fact that the following declarations have been made
-// in v8natives.js:
-// var $isFinite = GlobalIsFinite;
-
-// -------------------------------------------------------------------
-
-// This file contains date support implemented in JavaScript.
-
-// Keep reference to original values of some global properties. This
-// has the added benefit that the code in this file is isolated from
-// changes to these properties.
-var $Date = global.Date;
-
-// Helper function to throw error.
-function ThrowDateTypeError() {
- throw new $TypeError('this is not a Date object.');
-}
-
-
-var timezone_cache_time = $NaN;
-var timezone_cache_timezone;
-
-function LocalTimezone(t) {
- if (NUMBER_IS_NAN(t)) return "";
- if (t == timezone_cache_time) {
- return timezone_cache_timezone;
- }
- var timezone = %DateLocalTimezone(t);
- timezone_cache_time = t;
- timezone_cache_timezone = timezone;
- return timezone;
-}
-
-
-function UTC(time) {
- if (NUMBER_IS_NAN(time)) return time;
- // local_time_offset is needed before the call to DaylightSavingsOffset,
- // so it may be uninitialized.
- return %DateToUTC(time);
-}
-
-
-// ECMA 262 - 15.9.1.11
-function MakeTime(hour, min, sec, ms) {
- if (!$isFinite(hour)) return $NaN;
- if (!$isFinite(min)) return $NaN;
- if (!$isFinite(sec)) return $NaN;
- if (!$isFinite(ms)) return $NaN;
- return TO_INTEGER(hour) * msPerHour
- + TO_INTEGER(min) * msPerMinute
- + TO_INTEGER(sec) * msPerSecond
- + TO_INTEGER(ms);
-}
-
-
-// ECMA 262 - 15.9.1.12
-function TimeInYear(year) {
- return DaysInYear(year) * msPerDay;
-}
-
-
-// Compute number of days given a year, month, date.
-// Note that month and date can lie outside the normal range.
-// For example:
-// MakeDay(2007, -4, 20) --> MakeDay(2006, 8, 20)
-// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
-// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
-function MakeDay(year, month, date) {
- if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
-
- // Convert to integer and map -0 to 0.
- year = TO_INTEGER_MAP_MINUS_ZERO(year);
- month = TO_INTEGER_MAP_MINUS_ZERO(month);
- date = TO_INTEGER_MAP_MINUS_ZERO(date);
-
- if (year < kMinYear || year > kMaxYear ||
- month < kMinMonth || month > kMaxMonth) {
- return $NaN;
- }
-
- // Now we rely on year and month being SMIs.
- return %DateMakeDay(year | 0, month | 0) + date - 1;
-}
-
-
-// ECMA 262 - 15.9.1.13
-function MakeDate(day, time) {
- var time = day * msPerDay + time;
- // Some of our runtime funtions for computing UTC(time) rely on
- // times not being significantly larger than MAX_TIME_MS. If there
- // is no way that the time can be within range even after UTC
- // conversion we return NaN immediately instead of relying on
- // TimeClip to do it.
- if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN;
- return time;
-}
-
-
-// ECMA 262 - 15.9.1.14
-function TimeClip(time) {
- if (!$isFinite(time)) return $NaN;
- if ($abs(time) > MAX_TIME_MS) return $NaN;
- return TO_INTEGER(time);
-}
-
-
-// The Date cache is used to limit the cost of parsing the same Date
-// strings over and over again.
-var Date_cache = {
- // Cached time value.
- time: $NaN,
- // String input for which the cached time is valid.
- string: null
-};
-
-
-%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
- if (!%_IsConstructCall()) {
- // ECMA 262 - 15.9.2
- return (new $Date()).toString();
- }
-
- // ECMA 262 - 15.9.3
- var argc = %_ArgumentsLength();
- var value;
- if (argc == 0) {
- value = %DateCurrentTime();
- SET_UTC_DATE_VALUE(this, value);
- } else if (argc == 1) {
- if (IS_NUMBER(year)) {
- value = year;
- } else if (IS_STRING(year)) {
- // Probe the Date cache. If we already have a time value for the
- // given time, we re-use that instead of parsing the string again.
- var cache = Date_cache;
- if (cache.string === year) {
- value = cache.time;
- } else {
- value = DateParse(year);
- if (!NUMBER_IS_NAN(value)) {
- cache.time = value;
- cache.string = year;
- }
- }
-
- } else {
- // According to ECMA 262, no hint should be given for this
- // conversion. However, ToPrimitive defaults to STRING_HINT for
- // Date objects which will lose precision when the Date
- // constructor is called with another Date object as its
- // argument. We therefore use NUMBER_HINT for the conversion,
- // which is the default for everything else than Date objects.
- // This makes us behave like KJS and SpiderMonkey.
- var time = ToPrimitive(year, NUMBER_HINT);
- value = IS_STRING(time) ? DateParse(time) : ToNumber(time);
- }
- SET_UTC_DATE_VALUE(this, value);
- } else {
- year = ToNumber(year);
- month = ToNumber(month);
- date = argc > 2 ? ToNumber(date) : 1;
- hours = argc > 3 ? ToNumber(hours) : 0;
- minutes = argc > 4 ? ToNumber(minutes) : 0;
- seconds = argc > 5 ? ToNumber(seconds) : 0;
- ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!NUMBER_IS_NAN(year) &&
- 0 <= TO_INTEGER(year) &&
- TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, month, date);
- var time = MakeTime(hours, minutes, seconds, ms);
- value = MakeDate(day, time);
- SET_LOCAL_DATE_VALUE(this, value);
- }
-});
-
-
-%FunctionSetPrototype($Date, new $Date($NaN));
-
-
-var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
-var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
-
-
-function TwoDigitString(value) {
- return value < 10 ? "0" + value : "" + value;
-}
-
-
-function DateString(date) {
- return WeekDays[LOCAL_WEEKDAY(date)] + ' '
- + Months[LOCAL_MONTH(date)] + ' '
- + TwoDigitString(LOCAL_DAY(date)) + ' '
- + LOCAL_YEAR(date);
-}
-
-
-var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
- 'Thursday', 'Friday', 'Saturday'];
-var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June',
- 'July', 'August', 'September', 'October', 'November', 'December'];
-
-
-function LongDateString(date) {
- return LongWeekDays[LOCAL_WEEKDAY(date)] + ', '
- + LongMonths[LOCAL_MONTH(date)] + ' '
- + TwoDigitString(LOCAL_DAY(date)) + ', '
- + LOCAL_YEAR(date);
-}
-
-
-function TimeString(date) {
- return TwoDigitString(LOCAL_HOUR(date)) + ':'
- + TwoDigitString(LOCAL_MIN(date)) + ':'
- + TwoDigitString(LOCAL_SEC(date));
-}
-
-
-function TimeStringUTC(date) {
- return TwoDigitString(UTC_HOUR(date)) + ':'
- + TwoDigitString(UTC_MIN(date)) + ':'
- + TwoDigitString(UTC_SEC(date));
-}
-
-
-function LocalTimezoneString(date) {
- var timezone = LocalTimezone(UTC_DATE_VALUE(date));
-
- var timezoneOffset = -TIMEZONE_OFFSET(date);
- var sign = (timezoneOffset >= 0) ? 1 : -1;
- var hours = FLOOR((sign * timezoneOffset)/60);
- var min = FLOOR((sign * timezoneOffset)%60);
- var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
- TwoDigitString(hours) + TwoDigitString(min);
- return gmt + ' (' + timezone + ')';
-}
-
-
-function DatePrintString(date) {
- return DateString(date) + ' ' + TimeString(date);
-}
-
-// -------------------------------------------------------------------
-
-// Reused output buffer. Used when parsing date strings.
-var parse_buffer = $Array(8);
-
-// ECMA 262 - 15.9.4.2
-function DateParse(string) {
- var arr = %DateParseString(ToString(string), parse_buffer);
- if (IS_NULL(arr)) return $NaN;
-
- var day = MakeDay(arr[0], arr[1], arr[2]);
- var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
- var date = MakeDate(day, time);
-
- if (IS_NULL(arr[7])) {
- return TimeClip(UTC(date));
- } else {
- return TimeClip(date - arr[7] * 1000);
- }
-}
-
-
-// ECMA 262 - 15.9.4.3
-function DateUTC(year, month, date, hours, minutes, seconds, ms) {
- year = ToNumber(year);
- month = ToNumber(month);
- var argc = %_ArgumentsLength();
- date = argc > 2 ? ToNumber(date) : 1;
- hours = argc > 3 ? ToNumber(hours) : 0;
- minutes = argc > 4 ? ToNumber(minutes) : 0;
- seconds = argc > 5 ? ToNumber(seconds) : 0;
- ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!NUMBER_IS_NAN(year) &&
- 0 <= TO_INTEGER(year) &&
- TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, month, date);
- var time = MakeTime(hours, minutes, seconds, ms);
- return TimeClip(MakeDate(day, time));
-}
-
-
-// Mozilla-specific extension. Returns the number of milliseconds
-// elapsed since 1 January 1970 00:00:00 UTC.
-function DateNow() {
- return %DateCurrentTime();
-}
-
-
-// ECMA 262 - 15.9.5.2
-function DateToString() {
- var t = UTC_DATE_VALUE(this)
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var time_zone_string = LocalTimezoneString(this)
- return DatePrintString(this) + time_zone_string;
-}
-
-
-// ECMA 262 - 15.9.5.3
-function DateToDateString() {
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return DateString(this);
-}
-
-
-// ECMA 262 - 15.9.5.4
-function DateToTimeString() {
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var time_zone_string = LocalTimezoneString(this);
- return TimeString(this) + time_zone_string;
-}
-
-
-// ECMA 262 - 15.9.5.5
-function DateToLocaleString() {
- return %_CallFunction(this, DateToString);
-}
-
-
-// ECMA 262 - 15.9.5.6
-function DateToLocaleDateString() {
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return LongDateString(this);
-}
-
-
-// ECMA 262 - 15.9.5.7
-function DateToLocaleTimeString() {
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return TimeString(this);
-}
-
-
-// ECMA 262 - 15.9.5.8
-function DateValueOf() {
- return UTC_DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.9
-function DateGetTime() {
- return UTC_DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.10
-function DateGetFullYear() {
- return LOCAL_YEAR(this);
-}
-
-
-// ECMA 262 - 15.9.5.11
-function DateGetUTCFullYear() {
- return UTC_YEAR(this);
-}
-
-
-// ECMA 262 - 15.9.5.12
-function DateGetMonth() {
- return LOCAL_MONTH(this);
-}
-
-
-// ECMA 262 - 15.9.5.13
-function DateGetUTCMonth() {
- return UTC_MONTH(this);
-}
-
-
-// ECMA 262 - 15.9.5.14
-function DateGetDate() {
- return LOCAL_DAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.15
-function DateGetUTCDate() {
- return UTC_DAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.16
-function DateGetDay() {
- return LOCAL_WEEKDAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.17
-function DateGetUTCDay() {
- return UTC_WEEKDAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.18
-function DateGetHours() {
- return LOCAL_HOUR(this);
-}
-
-
-// ECMA 262 - 15.9.5.19
-function DateGetUTCHours() {
- return UTC_HOUR(this);
-}
-
-
-// ECMA 262 - 15.9.5.20
-function DateGetMinutes() {
- return LOCAL_MIN(this);
-}
-
-
-// ECMA 262 - 15.9.5.21
-function DateGetUTCMinutes() {
- return UTC_MIN(this);
-}
-
-
-// ECMA 262 - 15.9.5.22
-function DateGetSeconds() {
- return LOCAL_SEC(this);
-}
-
-
-// ECMA 262 - 15.9.5.23
-function DateGetUTCSeconds() {
- return UTC_SEC(this)
-}
-
-
-// ECMA 262 - 15.9.5.24
-function DateGetMilliseconds() {
- return LOCAL_MS(this);
-}
-
-
-// ECMA 262 - 15.9.5.25
-function DateGetUTCMilliseconds() {
- return UTC_MS(this);
-}
-
-
-// ECMA 262 - 15.9.5.26
-function DateGetTimezoneOffset() {
- return TIMEZONE_OFFSET(this);
-}
-
-
-// ECMA 262 - 15.9.5.27
-function DateSetTime(ms) {
- CHECK_DATE(this);
- SET_UTC_DATE_VALUE(this, ToNumber(ms));
- return UTC_DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.28
-function DateSetMilliseconds(ms) {
- var t = LOCAL_DATE_VALUE(this);
- ms = ToNumber(ms);
- var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.29
-function DateSetUTCMilliseconds(ms) {
- var t = UTC_DATE_VALUE(this);
- ms = ToNumber(ms);
- var time = MakeTime(UTC_HOUR(this),
- UTC_MIN(this),
- UTC_SEC(this),
- ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.30
-function DateSetSeconds(sec, ms) {
- var t = LOCAL_DATE_VALUE(this);
- sec = ToNumber(sec);
- ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : ToNumber(ms);
- var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), sec, ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.31
-function DateSetUTCSeconds(sec, ms) {
- var t = UTC_DATE_VALUE(this);
- sec = ToNumber(sec);
- ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : ToNumber(ms);
- var time = MakeTime(UTC_HOUR(this), UTC_MIN(this), sec, ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.33
-function DateSetMinutes(min, sec, ms) {
- var t = LOCAL_DATE_VALUE(this);
- min = ToNumber(min);
- var argc = %_ArgumentsLength();
- sec = argc < 2 ? LOCAL_SEC(this) : ToNumber(sec);
- ms = argc < 3 ? LOCAL_MS(this) : ToNumber(ms);
- var time = MakeTime(LOCAL_HOUR(this), min, sec, ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.34
-function DateSetUTCMinutes(min, sec, ms) {
- var t = UTC_DATE_VALUE(this);
- min = ToNumber(min);
- var argc = %_ArgumentsLength();
- sec = argc < 2 ? UTC_SEC(this) : ToNumber(sec);
- ms = argc < 3 ? UTC_MS(this) : ToNumber(ms);
- var time = MakeTime(UTC_HOUR(this), min, sec, ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.35
-function DateSetHours(hour, min, sec, ms) {
- var t = LOCAL_DATE_VALUE(this);
- hour = ToNumber(hour);
- var argc = %_ArgumentsLength();
- min = argc < 2 ? LOCAL_MIN(this) : ToNumber(min);
- sec = argc < 3 ? LOCAL_SEC(this) : ToNumber(sec);
- ms = argc < 4 ? LOCAL_MS(this) : ToNumber(ms);
- var time = MakeTime(hour, min, sec, ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.34
-function DateSetUTCHours(hour, min, sec, ms) {
- var t = UTC_DATE_VALUE(this);
- hour = ToNumber(hour);
- var argc = %_ArgumentsLength();
- min = argc < 2 ? UTC_MIN(this) : ToNumber(min);
- sec = argc < 3 ? UTC_SEC(this) : ToNumber(sec);
- ms = argc < 4 ? UTC_MS(this) : ToNumber(ms);
- var time = MakeTime(hour, min, sec, ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.36
-function DateSetDate(date) {
- var t = LOCAL_DATE_VALUE(this);
- date = ToNumber(date);
- var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.37
-function DateSetUTCDate(date) {
- var t = UTC_DATE_VALUE(this);
- date = ToNumber(date);
- var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
- return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.38
-function DateSetMonth(month, date) {
- var t = LOCAL_DATE_VALUE(this);
- month = ToNumber(month);
- date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : ToNumber(date);
- var day = MakeDay(LOCAL_YEAR(this), month, date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.39
-function DateSetUTCMonth(month, date) {
- var t = UTC_DATE_VALUE(this);
- month = ToNumber(month);
- date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : ToNumber(date);
- var day = MakeDay(UTC_YEAR(this), month, date);
- return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.40
-function DateSetFullYear(year, month, date) {
- var t = LOCAL_DATE_VALUE(this);
- year = ToNumber(year);
- var argc = %_ArgumentsLength();
- var time ;
- if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : ToNumber(month);
- date = argc < 3 ? 1 : ToNumber(date);
- time = 0;
- } else {
- month = argc < 2 ? LOCAL_MONTH(this) : ToNumber(month);
- date = argc < 3 ? LOCAL_DAY(this) : ToNumber(date);
- time = LOCAL_TIME_IN_DAY(this);
- }
- var day = MakeDay(year, month, date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
-}
-
-
-// ECMA 262 - 15.9.5.41
-function DateSetUTCFullYear(year, month, date) {
- var t = UTC_DATE_VALUE(this);
- year = ToNumber(year);
- var argc = %_ArgumentsLength();
- var time ;
- if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : ToNumber(month);
- date = argc < 3 ? 1 : ToNumber(date);
- time = 0;
- } else {
- month = argc < 2 ? UTC_MONTH(this) : ToNumber(month);
- date = argc < 3 ? UTC_DAY(this) : ToNumber(date);
- time = UTC_TIME_IN_DAY(this);
- }
- var day = MakeDay(year, month, date);
- return SET_UTC_DATE_VALUE(this, MakeDate(day, time));
-}
-
-
-// ECMA 262 - 15.9.5.42
-function DateToUTCString() {
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
- return WeekDays[UTC_WEEKDAY(this)] + ', '
- + TwoDigitString(UTC_DAY(this)) + ' '
- + Months[UTC_MONTH(this)] + ' '
- + UTC_YEAR(this) + ' '
- + TimeStringUTC(this) + ' GMT';
-}
-
-
-// ECMA 262 - B.2.4
-function DateGetYear() {
- return LOCAL_YEAR(this) - 1900;
-}
-
-
-// ECMA 262 - B.2.5
-function DateSetYear(year) {
- CHECK_DATE(this);
- year = ToNumber(year);
- if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, $NaN);
- year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
- ? 1900 + TO_INTEGER(year) : year;
- var t = LOCAL_DATE_VALUE(this);
- var month, date, time;
- if (NUMBER_IS_NAN(t)) {
- month = 0;
- date = 1;
- time = 0;
- } else {
- month = LOCAL_MONTH(this);
- date = LOCAL_DAY(this);
- time = LOCAL_TIME_IN_DAY(this);
- }
- var day = MakeDay(year, month, date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
-}
-
-
-// ECMA 262 - B.2.6
-//
-// Notice that this does not follow ECMA 262 completely. ECMA 262
-// says that toGMTString should be the same Function object as
-// toUTCString. JSC does not do this, so for compatibility we do not
-// do that either. Instead, we create a new function whose name
-// property will return toGMTString.
-function DateToGMTString() {
- return %_CallFunction(this, DateToUTCString);
-}
-
-
-function PadInt(n, digits) {
- if (digits == 1) return n;
- return n < MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
-}
-
-
-// ECMA 262 - 15.9.5.43
-function DateToISOString() {
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
- var year = this.getUTCFullYear();
- var year_string;
- if (year >= 0 && year <= 9999) {
- year_string = PadInt(year, 4);
- } else {
- if (year < 0) {
- year_string = "-" + PadInt(-year, 6);
- } else {
- year_string = "+" + PadInt(year, 6);
- }
- }
- return year_string +
- '-' + PadInt(this.getUTCMonth() + 1, 2) +
- '-' + PadInt(this.getUTCDate(), 2) +
- 'T' + PadInt(this.getUTCHours(), 2) +
- ':' + PadInt(this.getUTCMinutes(), 2) +
- ':' + PadInt(this.getUTCSeconds(), 2) +
- '.' + PadInt(this.getUTCMilliseconds(), 3) +
- 'Z';
-}
-
-
-function DateToJSON(key) {
- var o = ToObject(this);
- var tv = DefaultNumber(o);
- if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
- return null;
- }
- return o.toISOString();
-}
-
-
-function ResetDateCache() {
- // Reset the timezone cache:
- timezone_cache_time = $NaN;
- timezone_cache_timezone = undefined;
-
- // Reset the date cache:
- cache = Date_cache;
- cache.time = $NaN;
- cache.string = null;
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpDate() {
- %CheckIsBootstrapping();
- // Set up non-enumerable properties of the Date object itself.
- InstallFunctions($Date, DONT_ENUM, $Array(
- "UTC", DateUTC,
- "parse", DateParse,
- "now", DateNow
- ));
-
- // Set up non-enumerable constructor property of the Date prototype object.
- %SetProperty($Date.prototype, "constructor", $Date, DONT_ENUM);
-
- // Set up non-enumerable functions of the Date prototype object and
- // set their names.
- InstallFunctions($Date.prototype, DONT_ENUM, $Array(
- "toString", DateToString,
- "toDateString", DateToDateString,
- "toTimeString", DateToTimeString,
- "toLocaleString", DateToLocaleString,
- "toLocaleDateString", DateToLocaleDateString,
- "toLocaleTimeString", DateToLocaleTimeString,
- "valueOf", DateValueOf,
- "getTime", DateGetTime,
- "getFullYear", DateGetFullYear,
- "getUTCFullYear", DateGetUTCFullYear,
- "getMonth", DateGetMonth,
- "getUTCMonth", DateGetUTCMonth,
- "getDate", DateGetDate,
- "getUTCDate", DateGetUTCDate,
- "getDay", DateGetDay,
- "getUTCDay", DateGetUTCDay,
- "getHours", DateGetHours,
- "getUTCHours", DateGetUTCHours,
- "getMinutes", DateGetMinutes,
- "getUTCMinutes", DateGetUTCMinutes,
- "getSeconds", DateGetSeconds,
- "getUTCSeconds", DateGetUTCSeconds,
- "getMilliseconds", DateGetMilliseconds,
- "getUTCMilliseconds", DateGetUTCMilliseconds,
- "getTimezoneOffset", DateGetTimezoneOffset,
- "setTime", DateSetTime,
- "setMilliseconds", DateSetMilliseconds,
- "setUTCMilliseconds", DateSetUTCMilliseconds,
- "setSeconds", DateSetSeconds,
- "setUTCSeconds", DateSetUTCSeconds,
- "setMinutes", DateSetMinutes,
- "setUTCMinutes", DateSetUTCMinutes,
- "setHours", DateSetHours,
- "setUTCHours", DateSetUTCHours,
- "setDate", DateSetDate,
- "setUTCDate", DateSetUTCDate,
- "setMonth", DateSetMonth,
- "setUTCMonth", DateSetUTCMonth,
- "setFullYear", DateSetFullYear,
- "setUTCFullYear", DateSetUTCFullYear,
- "toGMTString", DateToGMTString,
- "toUTCString", DateToUTCString,
- "getYear", DateGetYear,
- "setYear", DateSetYear,
- "toISOString", DateToISOString,
- "toJSON", DateToJSON
- ));
-}
-
-SetUpDate();
diff --git a/src/3rdparty/v8/src/dateparser-inl.h b/src/3rdparty/v8/src/dateparser-inl.h
deleted file mode 100644
index 3cb36fa..0000000
--- a/src/3rdparty/v8/src/dateparser-inl.h
+++ /dev/null
@@ -1,334 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DATEPARSER_INL_H_
-#define V8_DATEPARSER_INL_H_
-
-#include "dateparser.h"
-
-namespace v8 {
-namespace internal {
-
-template <typename Char>
-bool DateParser::Parse(Vector<Char> str,
- FixedArray* out,
- UnicodeCache* unicode_cache) {
- ASSERT(out->length() >= OUTPUT_SIZE);
- InputReader<Char> in(unicode_cache, str);
- DateStringTokenizer<Char> scanner(&in);
- TimeZoneComposer tz;
- TimeComposer time;
- DayComposer day;
-
- // Specification:
- // Accept ES5 ISO 8601 date-time-strings or legacy dates compatible
- // with Safari.
- // ES5 ISO 8601 dates:
- // [('-'|'+')yy]yyyy[-MM[-DD]][THH:mm[:ss[.sss]][Z|(+|-)hh:mm]]
- // where yyyy is in the range 0000..9999 and
- // +/-yyyyyy is in the range -999999..+999999 -
- // but -000000 is invalid (year zero must be positive),
- // MM is in the range 01..12,
- // DD is in the range 01..31,
- // MM and DD defaults to 01 if missing,,
- // HH is generally in the range 00..23, but can be 24 if mm, ss
- // and sss are zero (or missing), representing midnight at the
- // end of a day,
- // mm and ss are in the range 00..59,
- // sss is in the range 000..999,
- // hh is in the range 00..23,
- // mm, ss, and sss default to 00 if missing, and
- // timezone defaults to Z if missing
- // (following Safari, ISO actually demands local time).
- // Extensions:
- // We also allow sss to have more or less than three digits (but at
- // least one).
- // We allow hh:mm to be specified as hhmm.
- // Legacy dates:
- // Any unrecognized word before the first number is ignored.
- // Parenthesized text is ignored.
- // An unsigned number followed by ':' is a time value, and is
- // added to the TimeComposer. A number followed by '::' adds a second
- // zero as well. A number followed by '.' is also a time and must be
- // followed by milliseconds.
- // Any other number is a date component and is added to DayComposer.
- // A month name (or really: any word having the same first three letters
- // as a month name) is recorded as a named month in the Day composer.
- // A word recognizable as a time-zone is recorded as such, as is
- // '(+|-)(hhmm|hh:)'.
- // Legacy dates don't allow extra signs ('+' or '-') or umatched ')'
- // after a number has been read (before the first number, any garbage
- // is allowed).
- // Intersection of the two:
- // A string that matches both formats (e.g. 1970-01-01) will be
- // parsed as an ES5 date-time string - which means it will default
- // to UTC time-zone. That's unavoidable if following the ES5
- // specification.
- // After a valid "T" has been read while scanning an ES5 datetime string,
- // the input can no longer be a valid legacy date, since the "T" is a
- // garbage string after a number has been read.
-
- // First try getting as far as possible with as ES5 Date Time String.
- DateToken next_unhandled_token = ParseES5DateTime(&scanner, &day, &time, &tz);
- if (next_unhandled_token.IsInvalid()) return false;
- bool has_read_number = !day.IsEmpty();
- // If there's anything left, continue with the legacy parser.
- for (DateToken token = next_unhandled_token;
- !token.IsEndOfInput();
- token = scanner.Next()) {
- if (token.IsNumber()) {
- has_read_number = true;
- int n = token.number();
- if (scanner.SkipSymbol(':')) {
- if (scanner.SkipSymbol(':')) {
- // n + "::"
- if (!time.IsEmpty()) return false;
- time.Add(n);
- time.Add(0);
- } else {
- // n + ":"
- if (!time.Add(n)) return false;
- if (scanner.Peek().IsSymbol('.')) scanner.Next();
- }
- } else if (scanner.SkipSymbol('.') && time.IsExpecting(n)) {
- time.Add(n);
- if (!scanner.Peek().IsNumber()) return false;
- int n = ReadMilliseconds(scanner.Next());
- if (n < 0) return false;
- time.AddFinal(n);
- } else if (tz.IsExpecting(n)) {
- tz.SetAbsoluteMinute(n);
- } else if (time.IsExpecting(n)) {
- time.AddFinal(n);
- // Require end, white space, "Z", "+" or "-" immediately after
- // finalizing time.
- DateToken peek = scanner.Peek();
- if (!peek.IsEndOfInput() &&
- !peek.IsWhiteSpace() &&
- !peek.IsKeywordZ() &&
- !peek.IsAsciiSign()) return false;
- } else {
- if (!day.Add(n)) return false;
- scanner.SkipSymbol('-');
- }
- } else if (token.IsKeyword()) {
- // Parse a "word" (sequence of chars. >= 'A').
- KeywordType type = token.keyword_type();
- int value = token.keyword_value();
- if (type == AM_PM && !time.IsEmpty()) {
- time.SetHourOffset(value);
- } else if (type == MONTH_NAME) {
- day.SetNamedMonth(value);
- scanner.SkipSymbol('-');
- } else if (type == TIME_ZONE_NAME && has_read_number) {
- tz.Set(value);
- } else {
- // Garbage words are illegal if a number has been read.
- if (has_read_number) return false;
- // The first number has to be separated from garbage words by
- // whitespace or other separators.
- if (scanner.Peek().IsNumber()) return false;
- }
- } else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
- // Parse UTC offset (only after UTC or time).
- tz.SetSign(token.ascii_sign());
- // The following number may be empty.
- int n = 0;
- if (scanner.Peek().IsNumber()) {
- n = scanner.Next().number();
- }
- has_read_number = true;
-
- if (scanner.Peek().IsSymbol(':')) {
- tz.SetAbsoluteHour(n);
- tz.SetAbsoluteMinute(kNone);
- } else {
- tz.SetAbsoluteHour(n / 100);
- tz.SetAbsoluteMinute(n % 100);
- }
- } else if ((token.IsAsciiSign() || token.IsSymbol(')')) &&
- has_read_number) {
- // Extra sign or ')' is illegal if a number has been read.
- return false;
- } else {
- // Ignore other characters and whitespace.
- }
- }
-
- return day.Write(out) && time.Write(out) && tz.Write(out);
-}
-
-
-template<typename CharType>
-DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
- int pre_pos = in_->position();
- if (in_->IsEnd()) return DateToken::EndOfInput();
- if (in_->IsAsciiDigit()) {
- int n = in_->ReadUnsignedNumeral();
- int length = in_->position() - pre_pos;
- return DateToken::Number(n, length);
- }
- if (in_->Skip(':')) return DateToken::Symbol(':');
- if (in_->Skip('-')) return DateToken::Symbol('-');
- if (in_->Skip('+')) return DateToken::Symbol('+');
- if (in_->Skip('.')) return DateToken::Symbol('.');
- if (in_->Skip(')')) return DateToken::Symbol(')');
- if (in_->IsAsciiAlphaOrAbove()) {
- ASSERT(KeywordTable::kPrefixLength == 3);
- uint32_t buffer[3] = {0, 0, 0};
- int length = in_->ReadWord(buffer, 3);
- int index = KeywordTable::Lookup(buffer, length);
- return DateToken::Keyword(KeywordTable::GetType(index),
- KeywordTable::GetValue(index),
- length);
- }
- if (in_->SkipWhiteSpace()) {
- return DateToken::WhiteSpace(in_->position() - pre_pos);
- }
- if (in_->SkipParentheses()) {
- return DateToken::Unknown();
- }
- in_->Next();
- return DateToken::Unknown();
-}
-
-
-template <typename Char>
-DateParser::DateToken DateParser::ParseES5DateTime(
- DateStringTokenizer<Char>* scanner,
- DayComposer* day,
- TimeComposer* time,
- TimeZoneComposer* tz) {
- ASSERT(day->IsEmpty());
- ASSERT(time->IsEmpty());
- ASSERT(tz->IsEmpty());
-
- // Parse mandatory date string: [('-'|'+')yy]yyyy[':'MM[':'DD]]
- if (scanner->Peek().IsAsciiSign()) {
- // Keep the sign token, so we can pass it back to the legacy
- // parser if we don't use it.
- DateToken sign_token = scanner->Next();
- if (!scanner->Peek().IsFixedLengthNumber(6)) return sign_token;
- int sign = sign_token.ascii_sign();
- int year = scanner->Next().number();
- if (sign < 0 && year == 0) return sign_token;
- day->Add(sign * year);
- } else if (scanner->Peek().IsFixedLengthNumber(4)) {
- day->Add(scanner->Next().number());
- } else {
- return scanner->Next();
- }
- if (scanner->SkipSymbol('-')) {
- if (!scanner->Peek().IsFixedLengthNumber(2) ||
- !DayComposer::IsMonth(scanner->Peek().number())) return scanner->Next();
- day->Add(scanner->Next().number());
- if (scanner->SkipSymbol('-')) {
- if (!scanner->Peek().IsFixedLengthNumber(2) ||
- !DayComposer::IsDay(scanner->Peek().number())) return scanner->Next();
- day->Add(scanner->Next().number());
- }
- }
- // Check for optional time string: 'T'HH':'mm[':'ss['.'sss]]Z
- if (!scanner->Peek().IsKeywordType(TIME_SEPARATOR)) {
- if (!scanner->Peek().IsEndOfInput()) return scanner->Next();
- } else {
- // ES5 Date Time String time part is present.
- scanner->Next();
- if (!scanner->Peek().IsFixedLengthNumber(2) ||
- !Between(scanner->Peek().number(), 0, 24)) {
- return DateToken::Invalid();
- }
- // Allow 24:00[:00[.000]], but no other time starting with 24.
- bool hour_is_24 = (scanner->Peek().number() == 24);
- time->Add(scanner->Next().number());
- if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
- if (!scanner->Peek().IsFixedLengthNumber(2) ||
- !TimeComposer::IsMinute(scanner->Peek().number()) ||
- (hour_is_24 && scanner->Peek().number() > 0)) {
- return DateToken::Invalid();
- }
- time->Add(scanner->Next().number());
- if (scanner->SkipSymbol(':')) {
- if (!scanner->Peek().IsFixedLengthNumber(2) ||
- !TimeComposer::IsSecond(scanner->Peek().number()) ||
- (hour_is_24 && scanner->Peek().number() > 0)) {
- return DateToken::Invalid();
- }
- time->Add(scanner->Next().number());
- if (scanner->SkipSymbol('.')) {
- if (!scanner->Peek().IsNumber() ||
- (hour_is_24 && scanner->Peek().number() > 0)) {
- return DateToken::Invalid();
- }
- // Allow more or less than the mandated three digits.
- time->Add(ReadMilliseconds(scanner->Next()));
- }
- }
- // Check for optional timezone designation: 'Z' | ('+'|'-')hh':'mm
- if (scanner->Peek().IsKeywordZ()) {
- scanner->Next();
- tz->Set(0);
- } else if (scanner->Peek().IsSymbol('+') ||
- scanner->Peek().IsSymbol('-')) {
- tz->SetSign(scanner->Next().symbol() == '+' ? 1 : -1);
- if (scanner->Peek().IsFixedLengthNumber(4)) {
- // hhmm extension syntax.
- int hourmin = scanner->Next().number();
- int hour = hourmin / 100;
- int min = hourmin % 100;
- if (!TimeComposer::IsHour(hour) || !TimeComposer::IsMinute(min)) {
- return DateToken::Invalid();
- }
- tz->SetAbsoluteHour(hour);
- tz->SetAbsoluteMinute(min);
- } else {
- // hh:mm standard syntax.
- if (!scanner->Peek().IsFixedLengthNumber(2) ||
- !TimeComposer::IsHour(scanner->Peek().number())) {
- return DateToken::Invalid();
- }
- tz->SetAbsoluteHour(scanner->Next().number());
- if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
- if (!scanner->Peek().IsFixedLengthNumber(2) ||
- !TimeComposer::IsMinute(scanner->Peek().number())) {
- return DateToken::Invalid();
- }
- tz->SetAbsoluteMinute(scanner->Next().number());
- }
- }
- if (!scanner->Peek().IsEndOfInput()) return DateToken::Invalid();
- }
- // Successfully parsed ES5 Date Time String. Default to UTC if no TZ given.
- if (tz->IsEmpty()) tz->Set(0);
- day->set_iso_date();
- return DateToken::EndOfInput();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_DATEPARSER_INL_H_
diff --git a/src/3rdparty/v8/src/dateparser.cc b/src/3rdparty/v8/src/dateparser.cc
deleted file mode 100644
index 4a0721f..0000000
--- a/src/3rdparty/v8/src/dateparser.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "dateparser.h"
-
-namespace v8 {
-namespace internal {
-
-bool DateParser::DayComposer::Write(FixedArray* output) {
- if (index_ < 1) return false;
- // Day and month defaults to 1.
- while (index_ < kSize) {
- comp_[index_++] = 1;
- }
-
- int year = 0; // Default year is 0 (=> 2000) for KJS compatibility.
- int month = kNone;
- int day = kNone;
-
- if (named_month_ == kNone) {
- if (is_iso_date_ || (index_ == 3 && !IsDay(comp_[0]))) {
- // YMD
- year = comp_[0];
- month = comp_[1];
- day = comp_[2];
- } else {
- // MD(Y)
- month = comp_[0];
- day = comp_[1];
- if (index_ == 3) year = comp_[2];
- }
- } else {
- month = named_month_;
- if (index_ == 1) {
- // MD or DM
- day = comp_[0];
- } else if (!IsDay(comp_[0])) {
- // YMD, MYD, or YDM
- year = comp_[0];
- day = comp_[1];
- } else {
- // DMY, MDY, or DYM
- day = comp_[0];
- year = comp_[1];
- }
- }
-
- if (!is_iso_date_) {
- if (Between(year, 0, 49)) year += 2000;
- else if (Between(year, 50, 99)) year += 1900;
- }
-
- if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
-
- output->set(YEAR, Smi::FromInt(year));
- output->set(MONTH, Smi::FromInt(month - 1)); // 0-based
- output->set(DAY, Smi::FromInt(day));
- return true;
-}
-
-
-bool DateParser::TimeComposer::Write(FixedArray* output) {
- // All time slots default to 0
- while (index_ < kSize) {
- comp_[index_++] = 0;
- }
-
- int& hour = comp_[0];
- int& minute = comp_[1];
- int& second = comp_[2];
- int& millisecond = comp_[3];
-
- if (hour_offset_ != kNone) {
- if (!IsHour12(hour)) return false;
- hour %= 12;
- hour += hour_offset_;
- }
-
- if (!IsHour(hour) || !IsMinute(minute) ||
- !IsSecond(second) || !IsMillisecond(millisecond)) return false;
-
- output->set(HOUR, Smi::FromInt(hour));
- output->set(MINUTE, Smi::FromInt(minute));
- output->set(SECOND, Smi::FromInt(second));
- output->set(MILLISECOND, Smi::FromInt(millisecond));
- return true;
-}
-
-bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
- if (sign_ != kNone) {
- if (hour_ == kNone) hour_ = 0;
- if (minute_ == kNone) minute_ = 0;
- int total_seconds = sign_ * (hour_ * 3600 + minute_ * 60);
- if (!Smi::IsValid(total_seconds)) return false;
- output->set(UTC_OFFSET, Smi::FromInt(total_seconds));
- } else {
- output->set_null(UTC_OFFSET);
- }
- return true;
-}
-
-const int8_t DateParser::KeywordTable::
- array[][DateParser::KeywordTable::kEntrySize] = {
- {'j', 'a', 'n', DateParser::MONTH_NAME, 1},
- {'f', 'e', 'b', DateParser::MONTH_NAME, 2},
- {'m', 'a', 'r', DateParser::MONTH_NAME, 3},
- {'a', 'p', 'r', DateParser::MONTH_NAME, 4},
- {'m', 'a', 'y', DateParser::MONTH_NAME, 5},
- {'j', 'u', 'n', DateParser::MONTH_NAME, 6},
- {'j', 'u', 'l', DateParser::MONTH_NAME, 7},
- {'a', 'u', 'g', DateParser::MONTH_NAME, 8},
- {'s', 'e', 'p', DateParser::MONTH_NAME, 9},
- {'o', 'c', 't', DateParser::MONTH_NAME, 10},
- {'n', 'o', 'v', DateParser::MONTH_NAME, 11},
- {'d', 'e', 'c', DateParser::MONTH_NAME, 12},
- {'a', 'm', '\0', DateParser::AM_PM, 0},
- {'p', 'm', '\0', DateParser::AM_PM, 12},
- {'u', 't', '\0', DateParser::TIME_ZONE_NAME, 0},
- {'u', 't', 'c', DateParser::TIME_ZONE_NAME, 0},
- {'z', '\0', '\0', DateParser::TIME_ZONE_NAME, 0},
- {'g', 'm', 't', DateParser::TIME_ZONE_NAME, 0},
- {'c', 'd', 't', DateParser::TIME_ZONE_NAME, -5},
- {'c', 's', 't', DateParser::TIME_ZONE_NAME, -6},
- {'e', 'd', 't', DateParser::TIME_ZONE_NAME, -4},
- {'e', 's', 't', DateParser::TIME_ZONE_NAME, -5},
- {'m', 'd', 't', DateParser::TIME_ZONE_NAME, -6},
- {'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
- {'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
- {'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
- {'t', '\0', '\0', DateParser::TIME_SEPARATOR, 0},
- {'\0', '\0', '\0', DateParser::INVALID, 0},
-};
-
-
-// We could use perfect hashing here, but this is not a bottleneck.
-int DateParser::KeywordTable::Lookup(const uint32_t* pre, int len) {
- int i;
- for (i = 0; array[i][kTypeOffset] != INVALID; i++) {
- int j = 0;
- while (j < kPrefixLength &&
- pre[j] == static_cast<uint32_t>(array[i][j])) {
- j++;
- }
- // Check if we have a match and the length is legal.
- // Word longer than keyword is only allowed for month names.
- if (j == kPrefixLength &&
- (len <= kPrefixLength || array[i][kTypeOffset] == MONTH_NAME)) {
- return i;
- }
- }
- return i;
-}
-
-
-int DateParser::ReadMilliseconds(DateToken token) {
- // Read first three significant digits of the original numeral,
- // as inferred from the value and the number of digits.
- // I.e., use the number of digits to see if there were
- // leading zeros.
- int number = token.number();
- int length = token.length();
- if (length < 3) {
- // Less than three digits. Multiply to put most significant digit
- // in hundreds position.
- if (length == 1) {
- number *= 100;
- } else if (length == 2) {
- number *= 10;
- }
- } else if (length > 3) {
- if (length > kMaxSignificantDigits) length = kMaxSignificantDigits;
- // More than three digits. Divide by 10^(length - 3) to get three
- // most significant digits.
- int factor = 1;
- do {
- ASSERT(factor <= 100000000); // factor won't overflow.
- factor *= 10;
- length--;
- } while (length > 3);
- number /= factor;
- }
- return number;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/dateparser.h b/src/3rdparty/v8/src/dateparser.h
deleted file mode 100644
index 27584ce..0000000
--- a/src/3rdparty/v8/src/dateparser.h
+++ /dev/null
@@ -1,409 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DATEPARSER_H_
-#define V8_DATEPARSER_H_
-
-#include "allocation.h"
-#include "char-predicates-inl.h"
-
-namespace v8 {
-namespace internal {
-
-class DateParser : public AllStatic {
- public:
- // Parse the string as a date. If parsing succeeds, return true after
- // filling out the output array as follows (all integers are Smis):
- // [0]: year
- // [1]: month (0 = Jan, 1 = Feb, ...)
- // [2]: day
- // [3]: hour
- // [4]: minute
- // [5]: second
- // [6]: millisecond
- // [7]: UTC offset in seconds, or null value if no timezone specified
- // If parsing fails, return false (content of output array is not defined).
- template <typename Char>
- static bool Parse(Vector<Char> str, FixedArray* output, UnicodeCache* cache);
-
- enum {
- YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
- };
-
- private:
- // Range testing
- static inline bool Between(int x, int lo, int hi) {
- return static_cast<unsigned>(x - lo) <= static_cast<unsigned>(hi - lo);
- }
-
- // Indicates a missing value.
- static const int kNone = kMaxInt;
-
- // Maximal number of digits used to build the value of a numeral.
- // Remaining digits are ignored.
- static const int kMaxSignificantDigits = 9;
-
- // InputReader provides basic string parsing and character classification.
- template <typename Char>
- class InputReader BASE_EMBEDDED {
- public:
- InputReader(UnicodeCache* unicode_cache, Vector<Char> s)
- : index_(0),
- buffer_(s),
- unicode_cache_(unicode_cache) {
- Next();
- }
-
- int position() { return index_; }
-
- // Advance to the next character of the string.
- void Next() {
- ch_ = (index_ < buffer_.length()) ? buffer_[index_] : 0;
- index_++;
- }
-
- // Read a string of digits as an unsigned number. Cap value at
- // kMaxSignificantDigits, but skip remaining digits if the numeral
- // is longer.
- int ReadUnsignedNumeral() {
- int n = 0;
- int i = 0;
- while (IsAsciiDigit()) {
- if (i < kMaxSignificantDigits) n = n * 10 + ch_ - '0';
- i++;
- Next();
- }
- return n;
- }
-
- // Read a word (sequence of chars. >= 'A'), fill the given buffer with a
- // lower-case prefix, and pad any remainder of the buffer with zeroes.
- // Return word length.
- int ReadWord(uint32_t* prefix, int prefix_size) {
- int len;
- for (len = 0; IsAsciiAlphaOrAbove(); Next(), len++) {
- if (len < prefix_size) prefix[len] = AsciiAlphaToLower(ch_);
- }
- for (int i = len; i < prefix_size; i++) prefix[i] = 0;
- return len;
- }
-
- // The skip methods return whether they actually skipped something.
- bool Skip(uint32_t c) {
- if (ch_ == c) {
- Next();
- return true;
- }
- return false;
- }
-
- bool SkipWhiteSpace() {
- if (unicode_cache_->IsWhiteSpace(ch_)) {
- Next();
- return true;
- }
- return false;
- }
-
- bool SkipParentheses() {
- if (ch_ != '(') return false;
- int balance = 0;
- do {
- if (ch_ == ')') --balance;
- else if (ch_ == '(') ++balance;
- Next();
- } while (balance > 0 && ch_);
- return true;
- }
-
- // Character testing/classification. Non-ASCII digits are not supported.
- bool Is(uint32_t c) const { return ch_ == c; }
- bool IsEnd() const { return ch_ == 0; }
- bool IsAsciiDigit() const { return IsDecimalDigit(ch_); }
- bool IsAsciiAlphaOrAbove() const { return ch_ >= 'A'; }
- bool IsAsciiSign() const { return ch_ == '+' || ch_ == '-'; }
-
- // Return 1 for '+' and -1 for '-'.
- int GetAsciiSignValue() const { return 44 - static_cast<int>(ch_); }
-
- private:
- int index_;
- Vector<Char> buffer_;
- uint32_t ch_;
- UnicodeCache* unicode_cache_;
- };
-
- enum KeywordType {
- INVALID, MONTH_NAME, TIME_ZONE_NAME, TIME_SEPARATOR, AM_PM
- };
-
- struct DateToken {
- public:
- bool IsInvalid() { return tag_ == kInvalidTokenTag; }
- bool IsUnknown() { return tag_ == kUnknownTokenTag; }
- bool IsNumber() { return tag_ == kNumberTag; }
- bool IsSymbol() { return tag_ == kSymbolTag; }
- bool IsWhiteSpace() { return tag_ == kWhiteSpaceTag; }
- bool IsEndOfInput() { return tag_ == kEndOfInputTag; }
- bool IsKeyword() { return tag_ >= kKeywordTagStart; }
-
- int length() { return length_; }
-
- int number() {
- ASSERT(IsNumber());
- return value_;
- }
- KeywordType keyword_type() {
- ASSERT(IsKeyword());
- return static_cast<KeywordType>(tag_);
- }
- int keyword_value() {
- ASSERT(IsKeyword());
- return value_;
- }
- char symbol() {
- ASSERT(IsSymbol());
- return static_cast<char>(value_);
- }
- bool IsSymbol(char symbol) {
- return IsSymbol() && this->symbol() == symbol;
- }
- bool IsKeywordType(KeywordType tag) {
- return tag_ == tag;
- }
- bool IsFixedLengthNumber(int length) {
- return IsNumber() && length_ == length;
- }
- bool IsAsciiSign() {
- return tag_ == kSymbolTag && (value_ == '-' || value_ == '+');
- }
- int ascii_sign() {
- ASSERT(IsAsciiSign());
- return 44 - value_;
- }
- bool IsKeywordZ() {
- return IsKeywordType(TIME_ZONE_NAME) && length_ == 1 && value_ == 0;
- }
- bool IsUnknown(int character) {
- return IsUnknown() && value_ == character;
- }
- // Factory functions.
- static DateToken Keyword(KeywordType tag, int value, int length) {
- return DateToken(tag, length, value);
- }
- static DateToken Number(int value, int length) {
- return DateToken(kNumberTag, length, value);
- }
- static DateToken Symbol(char symbol) {
- return DateToken(kSymbolTag, 1, symbol);
- }
- static DateToken EndOfInput() {
- return DateToken(kEndOfInputTag, 0, -1);
- }
- static DateToken WhiteSpace(int length) {
- return DateToken(kWhiteSpaceTag, length, -1);
- }
- static DateToken Unknown() {
- return DateToken(kUnknownTokenTag, 1, -1);
- }
- static DateToken Invalid() {
- return DateToken(kInvalidTokenTag, 0, -1);
- }
-
- private:
- enum TagType {
- kInvalidTokenTag = -6,
- kUnknownTokenTag = -5,
- kWhiteSpaceTag = -4,
- kNumberTag = -3,
- kSymbolTag = -2,
- kEndOfInputTag = -1,
- kKeywordTagStart = 0
- };
- DateToken(int tag, int length, int value)
- : tag_(tag),
- length_(length),
- value_(value) { }
-
- int tag_;
- int length_; // Number of characters.
- int value_;
- };
-
- template <typename Char>
- class DateStringTokenizer {
- public:
- explicit DateStringTokenizer(InputReader<Char>* in)
- : in_(in), next_(Scan()) { }
- DateToken Next() {
- DateToken result = next_;
- next_ = Scan();
- return result;
- }
-
- DateToken Peek() {
- return next_;
- }
- bool SkipSymbol(char symbol) {
- if (next_.IsSymbol(symbol)) {
- next_ = Scan();
- return true;
- }
- return false;
- }
-
- private:
- DateToken Scan();
-
- InputReader<Char>* in_;
- DateToken next_;
- };
-
- static int ReadMilliseconds(DateToken number);
-
- // KeywordTable maps names of months, time zones, am/pm to numbers.
- class KeywordTable : public AllStatic {
- public:
- // Look up a word in the keyword table and return an index.
- // 'pre' contains a prefix of the word, zero-padded to size kPrefixLength
- // and 'len' is the word length.
- static int Lookup(const uint32_t* pre, int len);
- // Get the type of the keyword at index i.
- static KeywordType GetType(int i) {
- return static_cast<KeywordType>(array[i][kTypeOffset]);
- }
- // Get the value of the keyword at index i.
- static int GetValue(int i) { return array[i][kValueOffset]; }
-
- static const int kPrefixLength = 3;
- static const int kTypeOffset = kPrefixLength;
- static const int kValueOffset = kTypeOffset + 1;
- static const int kEntrySize = kValueOffset + 1;
- static const int8_t array[][kEntrySize];
- };
-
- class TimeZoneComposer BASE_EMBEDDED {
- public:
- TimeZoneComposer() : sign_(kNone), hour_(kNone), minute_(kNone) {}
- void Set(int offset_in_hours) {
- sign_ = offset_in_hours < 0 ? -1 : 1;
- hour_ = offset_in_hours * sign_;
- minute_ = 0;
- }
- void SetSign(int sign) { sign_ = sign < 0 ? -1 : 1; }
- void SetAbsoluteHour(int hour) { hour_ = hour; }
- void SetAbsoluteMinute(int minute) { minute_ = minute; }
- bool IsExpecting(int n) const {
- return hour_ != kNone && minute_ == kNone && TimeComposer::IsMinute(n);
- }
- bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
- bool Write(FixedArray* output);
- bool IsEmpty() { return hour_ == kNone; }
- private:
- int sign_;
- int hour_;
- int minute_;
- };
-
- class TimeComposer BASE_EMBEDDED {
- public:
- TimeComposer() : index_(0), hour_offset_(kNone) {}
- bool IsEmpty() const { return index_ == 0; }
- bool IsExpecting(int n) const {
- return (index_ == 1 && IsMinute(n)) ||
- (index_ == 2 && IsSecond(n)) ||
- (index_ == 3 && IsMillisecond(n));
- }
- bool Add(int n) {
- return index_ < kSize ? (comp_[index_++] = n, true) : false;
- }
- bool AddFinal(int n) {
- if (!Add(n)) return false;
- while (index_ < kSize) comp_[index_++] = 0;
- return true;
- }
- void SetHourOffset(int n) { hour_offset_ = n; }
- bool Write(FixedArray* output);
-
- static bool IsMinute(int x) { return Between(x, 0, 59); }
- static bool IsHour(int x) { return Between(x, 0, 23); }
- static bool IsSecond(int x) { return Between(x, 0, 59); }
-
- private:
- static bool IsHour12(int x) { return Between(x, 0, 12); }
- static bool IsMillisecond(int x) { return Between(x, 0, 999); }
-
- static const int kSize = 4;
- int comp_[kSize];
- int index_;
- int hour_offset_;
- };
-
- class DayComposer BASE_EMBEDDED {
- public:
- DayComposer() : index_(0), named_month_(kNone), is_iso_date_(false) {}
- bool IsEmpty() const { return index_ == 0; }
- bool Add(int n) {
- if (index_ < kSize) {
- comp_[index_] = n;
- index_++;
- return true;
- }
- return false;
- }
- void SetNamedMonth(int n) { named_month_ = n; }
- bool Write(FixedArray* output);
- void set_iso_date() { is_iso_date_ = true; }
- static bool IsMonth(int x) { return Between(x, 1, 12); }
- static bool IsDay(int x) { return Between(x, 1, 31); }
-
- private:
- static const int kSize = 3;
- int comp_[kSize];
- int index_;
- int named_month_;
- // If set, ensures that data is always parsed in year-month-date order.
- bool is_iso_date_;
- };
-
- // Tries to parse an ES5 Date Time String. Returns the next token
- // to continue with in the legacy date string parser. If parsing is
- // complete, returns DateToken::EndOfInput(). If terminally unsuccessful,
- // returns DateToken::Invalid(). Otherwise parsing continues in the
- // legacy parser.
- template <typename Char>
- static DateParser::DateToken ParseES5DateTime(
- DateStringTokenizer<Char>* scanner,
- DayComposer* day,
- TimeComposer* time,
- TimeZoneComposer* tz);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_DATEPARSER_H_
diff --git a/src/3rdparty/v8/src/debug-agent.cc b/src/3rdparty/v8/src/debug-agent.cc
deleted file mode 100644
index 811c00e..0000000
--- a/src/3rdparty/v8/src/debug-agent.cc
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-#include "debug.h"
-#include "debug-agent.h"
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-namespace v8 {
-namespace internal {
-
-// Public V8 debugger API message handler function. This function just delegates
-// to the debugger agent through it's data parameter.
-void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
- DebuggerAgent* agent = Isolate::Current()->debugger_agent_instance();
- ASSERT(agent != NULL);
- agent->DebuggerMessage(message);
-}
-
-
-// Debugger agent main thread.
-void DebuggerAgent::Run() {
- const int kOneSecondInMicros = 1000000;
-
- // Allow this socket to reuse port even if still in TIME_WAIT.
- server_->SetReuseAddress(true);
-
- // First bind the socket to the requested port.
- bool bound = false;
- while (!bound && !terminate_) {
- bound = server_->Bind(port_);
-
- // If an error occurred wait a bit before retrying. The most common error
- // would be that the port is already in use so this avoids a busy loop and
- // make the agent take over the port when it becomes free.
- if (!bound) {
- PrintF("Failed to open socket on port %d, "
- "waiting %d ms before retrying\n", port_, kOneSecondInMicros / 1000);
- terminate_now_->Wait(kOneSecondInMicros);
- }
- }
-
- // Accept connections on the bound port.
- while (!terminate_) {
- bool ok = server_->Listen(1);
- listening_->Signal();
- if (ok) {
- // Accept the new connection.
- Socket* client = server_->Accept();
- ok = client != NULL;
- if (ok) {
- // Create and start a new session.
- CreateSession(client);
- }
- }
- }
-}
-
-
-void DebuggerAgent::Shutdown() {
- // Set the termination flag.
- terminate_ = true;
-
- // Signal termination and make the server exit either its listen call or its
- // binding loop. This makes sure that no new sessions can be established.
- terminate_now_->Signal();
- server_->Shutdown();
- Join();
-
- // Close existing session if any.
- CloseSession();
-}
-
-
-void DebuggerAgent::WaitUntilListening() {
- listening_->Wait();
-}
-
-static const char* kCreateSessionMessage =
- "Remote debugging session already active\r\n";
-
-void DebuggerAgent::CreateSession(Socket* client) {
- ScopedLock with(session_access_);
-
- // If another session is already established terminate this one.
- if (session_ != NULL) {
- client->Send(kCreateSessionMessage, StrLength(kCreateSessionMessage));
- delete client;
- return;
- }
-
- // Create a new session and hook up the debug message handler.
- session_ = new DebuggerAgentSession(this, client);
- isolate_->debugger()->SetMessageHandler(DebuggerAgentMessageHandler);
- session_->Start();
-}
-
-
-void DebuggerAgent::CloseSession() {
- ScopedLock with(session_access_);
-
- // Terminate the session.
- if (session_ != NULL) {
- session_->Shutdown();
- session_->Join();
- delete session_;
- session_ = NULL;
- }
-}
-
-
-void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
- ScopedLock with(session_access_);
-
- // Forward the message handling to the session.
- if (session_ != NULL) {
- v8::String::Value val(message.GetJSON());
- session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(*val),
- val.length()));
- }
-}
-
-
-void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
- // Don't do anything during termination.
- if (terminate_) {
- return;
- }
-
- // Terminate the session.
- ScopedLock with(session_access_);
- ASSERT(session == session_);
- if (session == session_) {
- session_->Shutdown();
- delete session_;
- session_ = NULL;
- }
-}
-
-
-void DebuggerAgentSession::Run() {
- // Send the hello message.
- bool ok = DebuggerAgentUtil::SendConnectMessage(client_, *agent_->name_);
- if (!ok) return;
-
- while (true) {
- // Read data from the debugger front end.
- SmartArrayPointer<char> message =
- DebuggerAgentUtil::ReceiveMessage(client_);
-
- const char* msg = *message;
- bool is_closing_session = (msg == NULL);
-
- if (msg == NULL) {
- // If we lost the connection, then simulate a disconnect msg:
- msg = "{\"seq\":1,\"type\":\"request\",\"command\":\"disconnect\"}";
-
- } else {
- // Check if we're getting a disconnect request:
- const char* disconnectRequestStr =
- "\"type\":\"request\",\"command\":\"disconnect\"}";
- const char* result = strstr(msg, disconnectRequestStr);
- if (result != NULL) {
- is_closing_session = true;
- }
- }
-
- // Convert UTF-8 to UTF-16.
- unibrow::Utf8Decoder<128> decoder(msg, StrLength(msg));
- int utf16_length = decoder.Utf16Length();
- ScopedVector<uint16_t> temp(utf16_length + 1);
- decoder.WriteUtf16(temp.start(), utf16_length);
-
- // Send the request received to the debugger.
- v8::Debug::SendCommand(temp.start(),
- utf16_length,
- NULL,
- reinterpret_cast<v8::Isolate*>(agent_->isolate()));
-
- if (is_closing_session) {
- // Session is closed.
- agent_->OnSessionClosed(this);
- return;
- }
- }
-}
-
-
-void DebuggerAgentSession::DebuggerMessage(Vector<uint16_t> message) {
- DebuggerAgentUtil::SendMessage(client_, message);
-}
-
-
-void DebuggerAgentSession::Shutdown() {
- // Shutdown the socket to end the blocking receive.
- client_->Shutdown();
-}
-
-
-const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-
-
-SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
- int received;
-
- // Read header.
- int content_length = 0;
- while (true) {
- const int kHeaderBufferSize = 80;
- char header_buffer[kHeaderBufferSize];
- int header_buffer_position = 0;
- char c = '\0'; // One character receive buffer.
- char prev_c = '\0'; // Previous character.
-
- // Read until CRLF.
- while (!(c == '\n' && prev_c == '\r')) {
- prev_c = c;
- received = conn->Receive(&c, 1);
- if (received == 0) {
- PrintF("Error %d\n", Socket::LastError());
- return SmartArrayPointer<char>();
- }
-
- // Add character to header buffer.
- if (header_buffer_position < kHeaderBufferSize) {
- header_buffer[header_buffer_position++] = c;
- }
- }
-
- // Check for end of header (empty header line).
- if (header_buffer_position == 2) { // Receive buffer contains CRLF.
- break;
- }
-
- // Terminate header.
- ASSERT(header_buffer_position > 1); // At least CRLF is received.
- ASSERT(header_buffer_position <= kHeaderBufferSize);
- header_buffer[header_buffer_position - 2] = '\0';
-
- // Split header.
- char* key = header_buffer;
- char* value = NULL;
- for (int i = 0; header_buffer[i] != '\0'; i++) {
- if (header_buffer[i] == ':') {
- header_buffer[i] = '\0';
- value = header_buffer + i + 1;
- while (*value == ' ') {
- value++;
- }
- break;
- }
- }
-
- // Check that key is Content-Length.
- if (strcmp(key, kContentLength) == 0) {
- // Get the content length value if present and within a sensible range.
- if (value == NULL || strlen(value) > 7) {
- return SmartArrayPointer<char>();
- }
- for (int i = 0; value[i] != '\0'; i++) {
- // Bail out if illegal data.
- if (value[i] < '0' || value[i] > '9') {
- return SmartArrayPointer<char>();
- }
- content_length = 10 * content_length + (value[i] - '0');
- }
- } else {
- // For now just print all other headers than Content-Length.
- PrintF("%s: %s\n", key, value != NULL ? value : "(no value)");
- }
- }
-
- // Return now if no body.
- if (content_length == 0) {
- return SmartArrayPointer<char>();
- }
-
- // Read body.
- char* buffer = NewArray<char>(content_length + 1);
- received = ReceiveAll(conn, buffer, content_length);
- if (received < content_length) {
- PrintF("Error %d\n", Socket::LastError());
- return SmartArrayPointer<char>();
- }
- buffer[content_length] = '\0';
-
- return SmartArrayPointer<char>(buffer);
-}
-
-
-bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
- const char* embedding_host) {
- static const int kBufferSize = 80;
- char buffer[kBufferSize]; // Sending buffer.
- bool ok;
- int len;
-
- // Send the header.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Type: connect\r\n");
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "V8-Version: %s\r\n", v8::V8::GetVersion());
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Protocol-Version: 1\r\n");
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- if (embedding_host != NULL) {
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Embedding-Host: %s\r\n", embedding_host);
- ok = conn->Send(buffer, len);
- if (!ok) return false;
- }
-
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "%s: 0\r\n", kContentLength);
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- // Terminate header with empty line.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- // No body for connect message.
-
- return true;
-}
-
-
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
- const Vector<uint16_t> message) {
- static const int kBufferSize = 80;
- char buffer[kBufferSize]; // Sending buffer both for header and body.
-
- // Calculate the message size in UTF-8 encoding.
- int utf8_len = 0;
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = 0; i < message.length(); i++) {
- uint16_t character = message[i];
- utf8_len += unibrow::Utf8::Length(character, previous);
- previous = character;
- }
-
- // Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "%s: %d\r\n", kContentLength, utf8_len);
- conn->Send(buffer, len);
-
- // Terminate header with empty line.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
-
- // Send message body as UTF-8.
- int buffer_position = 0; // Current buffer position.
- previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = 0; i < message.length(); i++) {
- // Write next UTF-8 encoded character to buffer.
- uint16_t character = message[i];
- buffer_position +=
- unibrow::Utf8::Encode(buffer + buffer_position, character, previous);
- ASSERT(buffer_position <= kBufferSize);
-
- // Send buffer if full or last character is encoded.
- if (kBufferSize - buffer_position <
- unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit ||
- i == message.length() - 1) {
- if (unibrow::Utf16::IsLeadSurrogate(character)) {
- const int kEncodedSurrogateLength =
- unibrow::Utf16::kUtf8BytesToCodeASurrogate;
- ASSERT(buffer_position >= kEncodedSurrogateLength);
- conn->Send(buffer, buffer_position - kEncodedSurrogateLength);
- for (int i = 0; i < kEncodedSurrogateLength; i++) {
- buffer[i] = buffer[buffer_position + i];
- }
- buffer_position = kEncodedSurrogateLength;
- } else {
- conn->Send(buffer, buffer_position);
- buffer_position = 0;
- }
- }
- previous = character;
- }
-
- return true;
-}
-
-
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
- const v8::Handle<v8::String> request) {
- static const int kBufferSize = 80;
- char buffer[kBufferSize]; // Sending buffer both for header and body.
-
- // Convert the request to UTF-8 encoding.
- v8::String::Utf8Value utf8_request(request);
-
- // Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Content-Length: %d\r\n", utf8_request.length());
- conn->Send(buffer, len);
-
- // Terminate header with empty line.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
-
- // Send message body as UTF-8.
- conn->Send(*utf8_request, utf8_request.length());
-
- return true;
-}
-
-
-// Receive the full buffer before returning unless an error occours.
-int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
- int total_received = 0;
- while (total_received < len) {
- int received = conn->Receive(data + total_received, len - total_received);
- if (received == 0) {
- return total_received;
- }
- total_received += received;
- }
- return total_received;
-}
-
-} } // namespace v8::internal
-
-#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/3rdparty/v8/src/debug-agent.h b/src/3rdparty/v8/src/debug-agent.h
deleted file mode 100644
index 6115190..0000000
--- a/src/3rdparty/v8/src/debug-agent.h
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DEBUG_AGENT_H_
-#define V8_DEBUG_AGENT_H_
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#include "../include/v8-debug.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward decelrations.
-class DebuggerAgentSession;
-
-
-// Debugger agent which starts a socket listener on the debugger port and
-// handles connection from a remote debugger.
-class DebuggerAgent: public Thread {
- public:
- DebuggerAgent(const char* name, int port)
- : Thread(name),
- isolate_(Isolate::Current()),
- name_(StrDup(name)), port_(port),
- server_(OS::CreateSocket()), terminate_(false),
- session_access_(OS::CreateMutex()), session_(NULL),
- terminate_now_(OS::CreateSemaphore(0)),
- listening_(OS::CreateSemaphore(0)) {
- ASSERT(isolate_->debugger_agent_instance() == NULL);
- isolate_->set_debugger_agent_instance(this);
- }
- ~DebuggerAgent() {
- isolate_->set_debugger_agent_instance(NULL);
- delete server_;
- }
-
- void Shutdown();
- void WaitUntilListening();
-
- Isolate* isolate() { return isolate_; }
-
- private:
- void Run();
- void CreateSession(Socket* socket);
- void DebuggerMessage(const v8::Debug::Message& message);
- void CloseSession();
- void OnSessionClosed(DebuggerAgentSession* session);
-
- Isolate* isolate_;
- SmartArrayPointer<const char> name_; // Name of the embedding application.
- int port_; // Port to use for the agent.
- Socket* server_; // Server socket for listen/accept.
- bool terminate_; // Termination flag.
- Mutex* session_access_; // Mutex guarging access to session_.
- DebuggerAgentSession* session_; // Current active session if any.
- Semaphore* terminate_now_; // Semaphore to signal termination.
- Semaphore* listening_;
-
- friend class DebuggerAgentSession;
- friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
-
- DISALLOW_COPY_AND_ASSIGN(DebuggerAgent);
-};
-
-
-// Debugger agent session. The session receives requests from the remote
-// debugger and sends debugger events/responses to the remote debugger.
-class DebuggerAgentSession: public Thread {
- public:
- DebuggerAgentSession(DebuggerAgent* agent, Socket* client)
- : Thread("v8:DbgAgntSessn"),
- agent_(agent), client_(client) {}
-
- void DebuggerMessage(Vector<uint16_t> message);
- void Shutdown();
-
- private:
- void Run();
-
- void DebuggerMessage(Vector<char> message);
-
- DebuggerAgent* agent_;
- Socket* client_;
-
- DISALLOW_COPY_AND_ASSIGN(DebuggerAgentSession);
-};
-
-
-// Utility methods factored out to be used by the D8 shell as well.
-class DebuggerAgentUtil {
- public:
- static const char* const kContentLength;
-
- static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
- static bool SendConnectMessage(const Socket* conn,
- const char* embedding_host);
- static bool SendMessage(const Socket* conn, const Vector<uint16_t> message);
- static bool SendMessage(const Socket* conn,
- const v8::Handle<v8::String> message);
- static int ReceiveAll(const Socket* conn, char* data, int len);
-};
-
-} } // namespace v8::internal
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-#endif // V8_DEBUG_AGENT_H_
diff --git a/src/3rdparty/v8/src/debug-debugger.js b/src/3rdparty/v8/src/debug-debugger.js
deleted file mode 100644
index 6c94c15..0000000
--- a/src/3rdparty/v8/src/debug-debugger.js
+++ /dev/null
@@ -1,2638 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Default number of frames to include in the response to backtrace request.
-var kDefaultBacktraceLength = 10;
-
-var Debug = {};
-
-// Regular expression to skip "crud" at the beginning of a source line which is
-// not really code. Currently the regular expression matches whitespace and
-// comments.
-var sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
-
-// Debug events which can occour in the V8 JavaScript engine. These originate
-// from the API include file debug.h.
-Debug.DebugEvent = { Break: 1,
- Exception: 2,
- NewFunction: 3,
- BeforeCompile: 4,
- AfterCompile: 5,
- ScriptCollected: 6 };
-
-// Types of exceptions that can be broken upon.
-Debug.ExceptionBreak = { Caught : 0,
- Uncaught: 1 };
-
-// The different types of steps.
-Debug.StepAction = { StepOut: 0,
- StepNext: 1,
- StepIn: 2,
- StepMin: 3,
- StepInMin: 4 };
-
-// The different types of scripts matching enum ScriptType in objects.h.
-Debug.ScriptType = { Native: 0,
- Extension: 1,
- Normal: 2 };
-
-// The different types of script compilations matching enum
-// Script::CompilationType in objects.h.
-Debug.ScriptCompilationType = { Host: 0,
- Eval: 1,
- JSON: 2 };
-
-// The different script break point types.
-Debug.ScriptBreakPointType = { ScriptId: 0,
- ScriptName: 1,
- ScriptRegExp: 2 };
-
-function ScriptTypeFlag(type) {
- return (1 << type);
-}
-
-// Globals.
-var next_response_seq = 0;
-var next_break_point_number = 1;
-var break_points = [];
-var script_break_points = [];
-var debugger_flags = {
- breakPointsActive: {
- value: true,
- getValue: function() { return this.value; },
- setValue: function(value) {
- this.value = !!value;
- %SetDisableBreak(!this.value);
- }
- },
- breakOnCaughtException: {
- getValue: function() { return Debug.isBreakOnException(); },
- setValue: function(value) {
- if (value) {
- Debug.setBreakOnException();
- } else {
- Debug.clearBreakOnException();
- }
- }
- },
- breakOnUncaughtException: {
- getValue: function() { return Debug.isBreakOnUncaughtException(); },
- setValue: function(value) {
- if (value) {
- Debug.setBreakOnUncaughtException();
- } else {
- Debug.clearBreakOnUncaughtException();
- }
- }
- },
-};
-
-
-// Create a new break point object and add it to the list of break points.
-function MakeBreakPoint(source_position, opt_script_break_point) {
- var break_point = new BreakPoint(source_position, opt_script_break_point);
- break_points.push(break_point);
- return break_point;
-}
-
-
-// Object representing a break point.
-// NOTE: This object does not have a reference to the function having break
-// point as this would cause function not to be garbage collected when it is
-// not used any more. We do not want break points to keep functions alive.
-function BreakPoint(source_position, opt_script_break_point) {
- this.source_position_ = source_position;
- if (opt_script_break_point) {
- this.script_break_point_ = opt_script_break_point;
- } else {
- this.number_ = next_break_point_number++;
- }
- this.hit_count_ = 0;
- this.active_ = true;
- this.condition_ = null;
- this.ignoreCount_ = 0;
-}
-
-
-BreakPoint.prototype.number = function() {
- return this.number_;
-};
-
-
-BreakPoint.prototype.func = function() {
- return this.func_;
-};
-
-
-BreakPoint.prototype.source_position = function() {
- return this.source_position_;
-};
-
-
-BreakPoint.prototype.hit_count = function() {
- return this.hit_count_;
-};
-
-
-BreakPoint.prototype.active = function() {
- if (this.script_break_point()) {
- return this.script_break_point().active();
- }
- return this.active_;
-};
-
-
-BreakPoint.prototype.condition = function() {
- if (this.script_break_point() && this.script_break_point().condition()) {
- return this.script_break_point().condition();
- }
- return this.condition_;
-};
-
-
-BreakPoint.prototype.ignoreCount = function() {
- return this.ignoreCount_;
-};
-
-
-BreakPoint.prototype.script_break_point = function() {
- return this.script_break_point_;
-};
-
-
-BreakPoint.prototype.enable = function() {
- this.active_ = true;
-};
-
-
-BreakPoint.prototype.disable = function() {
- this.active_ = false;
-};
-
-
-BreakPoint.prototype.setCondition = function(condition) {
- this.condition_ = condition;
-};
-
-
-BreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
- this.ignoreCount_ = ignoreCount;
-};
-
-
-BreakPoint.prototype.isTriggered = function(exec_state) {
- // Break point not active - not triggered.
- if (!this.active()) return false;
-
- // Check for conditional break point.
- if (this.condition()) {
- // If break point has condition try to evaluate it in the top frame.
- try {
- var mirror = exec_state.frame(0).evaluate(this.condition());
- // If no sensible mirror or non true value break point not triggered.
- if (!(mirror instanceof ValueMirror) || !%ToBoolean(mirror.value_)) {
- return false;
- }
- } catch (e) {
- // Exception evaluating condition counts as not triggered.
- return false;
- }
- }
-
- // Update the hit count.
- this.hit_count_++;
- if (this.script_break_point_) {
- this.script_break_point_.hit_count_++;
- }
-
- // If the break point has an ignore count it is not triggered.
- if (this.ignoreCount_ > 0) {
- this.ignoreCount_--;
- return false;
- }
-
- // Break point triggered.
- return true;
-};
-
-
-// Function called from the runtime when a break point is hit. Returns true if
-// the break point is triggered and supposed to break execution.
-function IsBreakPointTriggered(break_id, break_point) {
- return break_point.isTriggered(MakeExecutionState(break_id));
-}
-
-
-// Object representing a script break point. The script is referenced by its
-// script name or script id and the break point is represented as line and
-// column.
-function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId) {
- this.type_ = type;
- if (type == Debug.ScriptBreakPointType.ScriptId) {
- this.script_id_ = script_id_or_name;
- } else if (type == Debug.ScriptBreakPointType.ScriptName) {
- this.script_name_ = script_id_or_name;
- } else if (type == Debug.ScriptBreakPointType.ScriptRegExp) {
- this.script_regexp_object_ = new RegExp(script_id_or_name);
- } else {
- throw new Error("Unexpected breakpoint type " + type);
- }
- this.line_ = opt_line || 0;
- this.column_ = opt_column;
- this.groupId_ = opt_groupId;
- this.hit_count_ = 0;
- this.active_ = true;
- this.condition_ = null;
- this.ignoreCount_ = 0;
- this.break_points_ = [];
-}
-
-
-//Creates a clone of script breakpoint that is linked to another script.
-ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
- var copy = new ScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- other_script.id, this.line_, this.column_, this.groupId_);
- copy.number_ = next_break_point_number++;
- script_break_points.push(copy);
-
- copy.hit_count_ = this.hit_count_;
- copy.active_ = this.active_;
- copy.condition_ = this.condition_;
- copy.ignoreCount_ = this.ignoreCount_;
- return copy;
-};
-
-
-ScriptBreakPoint.prototype.number = function() {
- return this.number_;
-};
-
-
-ScriptBreakPoint.prototype.groupId = function() {
- return this.groupId_;
-};
-
-
-ScriptBreakPoint.prototype.type = function() {
- return this.type_;
-};
-
-
-ScriptBreakPoint.prototype.script_id = function() {
- return this.script_id_;
-};
-
-
-ScriptBreakPoint.prototype.script_name = function() {
- return this.script_name_;
-};
-
-
-ScriptBreakPoint.prototype.script_regexp_object = function() {
- return this.script_regexp_object_;
-};
-
-
-ScriptBreakPoint.prototype.line = function() {
- return this.line_;
-};
-
-
-ScriptBreakPoint.prototype.column = function() {
- return this.column_;
-};
-
-
-ScriptBreakPoint.prototype.actual_locations = function() {
- var locations = [];
- for (var i = 0; i < this.break_points_.length; i++) {
- locations.push(this.break_points_[i].actual_location);
- }
- return locations;
-};
-
-
-ScriptBreakPoint.prototype.update_positions = function(line, column) {
- this.line_ = line;
- this.column_ = column;
-};
-
-
-ScriptBreakPoint.prototype.hit_count = function() {
- return this.hit_count_;
-};
-
-
-ScriptBreakPoint.prototype.active = function() {
- return this.active_;
-};
-
-
-ScriptBreakPoint.prototype.condition = function() {
- return this.condition_;
-};
-
-
-ScriptBreakPoint.prototype.ignoreCount = function() {
- return this.ignoreCount_;
-};
-
-
-ScriptBreakPoint.prototype.enable = function() {
- this.active_ = true;
-};
-
-
-ScriptBreakPoint.prototype.disable = function() {
- this.active_ = false;
-};
-
-
-ScriptBreakPoint.prototype.setCondition = function(condition) {
- this.condition_ = condition;
-};
-
-
-ScriptBreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
- this.ignoreCount_ = ignoreCount;
-
- // Set ignore count on all break points created from this script break point.
- for (var i = 0; i < this.break_points_.length; i++) {
- this.break_points_[i].setIgnoreCount(ignoreCount);
- }
-};
-
-
-// Check whether a script matches this script break point. Currently this is
-// only based on script name.
-ScriptBreakPoint.prototype.matchesScript = function(script) {
- if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
- return this.script_id_ == script.id;
- } else {
- // We might want to account columns here as well.
- if (!(script.line_offset <= this.line_ &&
- this.line_ < script.line_offset + script.lineCount())) {
- return false;
- }
- if (this.type_ == Debug.ScriptBreakPointType.ScriptName) {
- return this.script_name_ == script.nameOrSourceURL();
- } else if (this.type_ == Debug.ScriptBreakPointType.ScriptRegExp) {
- return this.script_regexp_object_.test(script.nameOrSourceURL());
- } else {
- throw new Error("Unexpected breakpoint type " + this.type_);
- }
- }
-};
-
-
-// Set the script break point in a script.
-ScriptBreakPoint.prototype.set = function (script) {
- var column = this.column();
- var line = this.line();
- // If the column is undefined the break is on the line. To help locate the
- // first piece of breakable code on the line try to find the column on the
- // line which contains some source.
- if (IS_UNDEFINED(column)) {
- var source_line = script.sourceLine(this.line());
-
- // Allocate array for caching the columns where the actual source starts.
- if (!script.sourceColumnStart_) {
- script.sourceColumnStart_ = new Array(script.lineCount());
- }
-
- // Fill cache if needed and get column where the actual source starts.
- if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
- script.sourceColumnStart_[line] =
- source_line.match(sourceLineBeginningSkip)[0].length;
- }
- column = script.sourceColumnStart_[line];
- }
-
- // Convert the line and column into an absolute position within the script.
- var position = Debug.findScriptSourcePosition(script, this.line(), column);
-
- // If the position is not found in the script (the script might be shorter
- // than it used to be) just ignore it.
- if (position === null) return;
-
- // Create a break point object and set the break point.
- break_point = MakeBreakPoint(position, this);
- break_point.setIgnoreCount(this.ignoreCount());
- var actual_position = %SetScriptBreakPoint(script, position, break_point);
- if (IS_UNDEFINED(actual_position)) {
- actual_position = position;
- }
- var actual_location = script.locationFromPosition(actual_position, true);
- // Check for any relocation and compare it with the breakpoint_relocation flag
- if (actual_location.line != line && !%AllowBreakPointRelocation()) {
- %ClearBreakPoint(break_point);
- return;
- }
- break_point.actual_location = { line: actual_location.line,
- column: actual_location.column,
- script_id: script.id };
- this.break_points_.push(break_point);
- return break_point;
-};
-
-
-// Clear all the break points created from this script break point
-ScriptBreakPoint.prototype.clear = function () {
- var remaining_break_points = [];
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].script_break_point() &&
- break_points[i].script_break_point() === this) {
- %ClearBreakPoint(break_points[i]);
- } else {
- remaining_break_points.push(break_points[i]);
- }
- }
- break_points = remaining_break_points;
- this.break_points_ = [];
-};
-
-
-// Function called from runtime when a new script is compiled to set any script
-// break points set in this script.
-function UpdateScriptBreakPoints(script) {
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
- if ((break_point.type() == Debug.ScriptBreakPointType.ScriptName ||
- break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) &&
- break_point.matchesScript(script)) {
- break_point.set(script);
- }
- }
-}
-
-
-function GetScriptBreakPoints(script) {
- var result = [];
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].matchesScript(script)) {
- result.push(script_break_points[i]);
- }
- }
- return result;
-}
-
-
-Debug.setListener = function(listener, opt_data) {
- if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
- throw new Error('Parameters have wrong types.');
- }
- %SetDebugEventListener(listener, opt_data);
-};
-
-
-Debug.breakExecution = function(f) {
- %Break();
-};
-
-Debug.breakLocations = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %GetBreakLocations(f);
-};
-
-// Returns a Script object. If the parameter is a function the return value
-// is the script in which the function is defined. If the parameter is a string
-// the return value is the script for which the script name has that string
-// value. If it is a regexp and there is a unique script whose name matches
-// we return that, otherwise undefined.
-Debug.findScript = function(func_or_script_name) {
- if (IS_FUNCTION(func_or_script_name)) {
- return %FunctionGetScript(func_or_script_name);
- } else if (IS_REGEXP(func_or_script_name)) {
- var scripts = Debug.scripts();
- var last_result = null;
- var result_count = 0;
- for (var i in scripts) {
- var script = scripts[i];
- if (func_or_script_name.test(script.name)) {
- last_result = script;
- result_count++;
- }
- }
- // Return the unique script matching the regexp. If there are more
- // than one we don't return a value since there is no good way to
- // decide which one to return. Returning a "random" one, say the
- // first, would introduce nondeterminism (or something close to it)
- // because the order is the heap iteration order.
- if (result_count == 1) {
- return last_result;
- } else {
- return undefined;
- }
- } else {
- return %GetScript(func_or_script_name);
- }
-};
-
-// Returns the script source. If the parameter is a function the return value
-// is the script source for the script in which the function is defined. If the
-// parameter is a string the return value is the script for which the script
-// name has that string value.
-Debug.scriptSource = function(func_or_script_name) {
- return this.findScript(func_or_script_name).source;
-};
-
-Debug.source = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %FunctionGetSourceCode(f);
-};
-
-Debug.disassemble = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %DebugDisassembleFunction(f);
-};
-
-Debug.disassembleConstructor = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %DebugDisassembleConstructor(f);
-};
-
-Debug.ExecuteInDebugContext = function(f, without_debugger) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %ExecuteInDebugContext(f, !!without_debugger);
-};
-
-Debug.sourcePosition = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %FunctionGetScriptSourcePosition(f);
-};
-
-
-Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
- var script = %FunctionGetScript(func);
- var script_offset = %FunctionGetScriptSourcePosition(func);
- return script.locationFromLine(opt_line, opt_column, script_offset);
-};
-
-
-// Returns the character position in a script based on a line number and an
-// optional position within that line.
-Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
- var location = script.locationFromLine(opt_line, opt_column);
- return location ? location.position : null;
-};
-
-
-Debug.findBreakPoint = function(break_point_number, remove) {
- var break_point;
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].number() == break_point_number) {
- break_point = break_points[i];
- // Remove the break point from the list if requested.
- if (remove) {
- break_points.splice(i, 1);
- }
- break;
- }
- }
- if (break_point) {
- return break_point;
- } else {
- return this.findScriptBreakPoint(break_point_number, remove);
- }
-};
-
-Debug.findBreakPointActualLocations = function(break_point_number) {
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].number() == break_point_number) {
- return script_break_points[i].actual_locations();
- }
- }
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].number() == break_point_number) {
- return [break_points[i].actual_location];
- }
- }
- return [];
-};
-
-Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
- if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
- // Break points in API functions are not supported.
- if (%FunctionIsAPIFunction(func)) {
- throw new Error('Cannot set break point in native code.');
- }
- // Find source position relative to start of the function
- var break_position =
- this.findFunctionSourceLocation(func, opt_line, opt_column).position;
- var source_position = break_position - this.sourcePosition(func);
- // Find the script for the function.
- var script = %FunctionGetScript(func);
- // Break in builtin JavaScript code is not supported.
- if (script.type == Debug.ScriptType.Native) {
- throw new Error('Cannot set break point in native code.');
- }
- // If the script for the function has a name convert this to a script break
- // point.
- if (script && script.id) {
- // Adjust the source position to be script relative.
- source_position += %FunctionGetScriptSourcePosition(func);
- // Find line and column for the position in the script and set a script
- // break point from that.
- var location = script.locationFromPosition(source_position, false);
- return this.setScriptBreakPointById(script.id,
- location.line, location.column,
- opt_condition);
- } else {
- // Set a break point directly on the function.
- var break_point = MakeBreakPoint(source_position);
- var actual_position =
- %SetFunctionBreakPoint(func, source_position, break_point);
- actual_position += this.sourcePosition(func);
- var actual_location = script.locationFromPosition(actual_position, true);
- break_point.actual_location = { line: actual_location.line,
- column: actual_location.column,
- script_id: script.id };
- break_point.setCondition(opt_condition);
- return break_point.number();
- }
-};
-
-
-Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
- condition, enabled)
-{
- break_point = MakeBreakPoint(position);
- break_point.setCondition(condition);
- if (!enabled) {
- break_point.disable();
- }
- var scripts = this.scripts();
- for (var i = 0; i < scripts.length; i++) {
- if (script_id == scripts[i].id) {
- break_point.actual_position = %SetScriptBreakPoint(scripts[i], position,
- break_point);
- break;
- }
- }
- return break_point;
-};
-
-
-Debug.enableBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, false);
- // Only enable if the breakpoint hasn't been deleted:
- if (break_point) {
- break_point.enable();
- }
-};
-
-
-Debug.disableBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, false);
- // Only enable if the breakpoint hasn't been deleted:
- if (break_point) {
- break_point.disable();
- }
-};
-
-
-Debug.changeBreakPointCondition = function(break_point_number, condition) {
- var break_point = this.findBreakPoint(break_point_number, false);
- break_point.setCondition(condition);
-};
-
-
-Debug.changeBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
- if (ignoreCount < 0) {
- throw new Error('Invalid argument');
- }
- var break_point = this.findBreakPoint(break_point_number, false);
- break_point.setIgnoreCount(ignoreCount);
-};
-
-
-Debug.clearBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, true);
- if (break_point) {
- return %ClearBreakPoint(break_point);
- } else {
- break_point = this.findScriptBreakPoint(break_point_number, true);
- if (!break_point) {
- throw new Error('Invalid breakpoint');
- }
- }
-};
-
-
-Debug.clearAllBreakPoints = function() {
- for (var i = 0; i < break_points.length; i++) {
- break_point = break_points[i];
- %ClearBreakPoint(break_point);
- }
- break_points = [];
-};
-
-
-Debug.disableAllBreakPoints = function() {
- // Disable all user defined breakpoints:
- for (var i = 1; i < next_break_point_number; i++) {
- Debug.disableBreakPoint(i);
- }
- // Disable all exception breakpoints:
- %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
- %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
-};
-
-
-Debug.findScriptBreakPoint = function(break_point_number, remove) {
- var script_break_point;
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].number() == break_point_number) {
- script_break_point = script_break_points[i];
- // Remove the break point from the list if requested.
- if (remove) {
- script_break_point.clear();
- script_break_points.splice(i,1);
- }
- break;
- }
- }
- return script_break_point;
-};
-
-
-// Sets a breakpoint in a script identified through id or name at the
-// specified source line and column within that line.
-Debug.setScriptBreakPoint = function(type, script_id_or_name,
- opt_line, opt_column, opt_condition,
- opt_groupId) {
- // Create script break point object.
- var script_break_point =
- new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId);
-
- // Assign number to the new script break point and add it.
- script_break_point.number_ = next_break_point_number++;
- script_break_point.setCondition(opt_condition);
- script_break_points.push(script_break_point);
-
- // Run through all scripts to see if this script break point matches any
- // loaded scripts.
- var scripts = this.scripts();
- for (var i = 0; i < scripts.length; i++) {
- if (script_break_point.matchesScript(scripts[i])) {
- script_break_point.set(scripts[i]);
- }
- }
-
- return script_break_point.number();
-};
-
-
-Debug.setScriptBreakPointById = function(script_id,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- script_id, opt_line, opt_column,
- opt_condition, opt_groupId);
-};
-
-
-Debug.setScriptBreakPointByName = function(script_name,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
- script_name, opt_line, opt_column,
- opt_condition, opt_groupId);
-};
-
-
-Debug.setScriptBreakPointByRegExp = function(script_regexp,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
- script_regexp, opt_line, opt_column,
- opt_condition, opt_groupId);
-};
-
-
-Debug.enableScriptBreakPoint = function(break_point_number) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.enable();
-};
-
-
-Debug.disableScriptBreakPoint = function(break_point_number) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.disable();
-};
-
-
-Debug.changeScriptBreakPointCondition = function(
- break_point_number, condition) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.setCondition(condition);
-};
-
-
-Debug.changeScriptBreakPointIgnoreCount = function(
- break_point_number, ignoreCount) {
- if (ignoreCount < 0) {
- throw new Error('Invalid argument');
- }
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.setIgnoreCount(ignoreCount);
-};
-
-
-Debug.scriptBreakPoints = function() {
- return script_break_points;
-};
-
-
-Debug.clearStepping = function() {
- %ClearStepping();
-};
-
-Debug.setBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
-};
-
-Debug.clearBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
-};
-
-Debug.isBreakOnException = function() {
- return !!%IsBreakOnException(Debug.ExceptionBreak.Caught);
-};
-
-Debug.setBreakOnUncaughtException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, true);
-};
-
-Debug.clearBreakOnUncaughtException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
-};
-
-Debug.isBreakOnUncaughtException = function() {
- return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
-};
-
-Debug.showBreakPoints = function(f, full) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- var source = full ? this.scriptSource(f) : this.source(f);
- var offset = full ? this.sourcePosition(f) : 0;
- var locations = this.breakLocations(f);
- if (!locations) return source;
- locations.sort(function(x, y) { return x - y; });
- var result = "";
- var prev_pos = 0;
- var pos;
- for (var i = 0; i < locations.length; i++) {
- pos = locations[i] - offset;
- result += source.slice(prev_pos, pos);
- result += "[B" + i + "]";
- prev_pos = pos;
- }
- pos = source.length;
- result += source.substring(prev_pos, pos);
- return result;
-};
-
-
-// Get all the scripts currently loaded. Locating all the scripts is based on
-// scanning the heap.
-Debug.scripts = function() {
- // Collect all scripts in the heap.
- return %DebugGetLoadedScripts();
-};
-
-
-Debug.debuggerFlags = function() {
- return debugger_flags;
-};
-
-Debug.MakeMirror = MakeMirror;
-
-function MakeExecutionState(break_id) {
- return new ExecutionState(break_id);
-}
-
-function ExecutionState(break_id) {
- this.break_id = break_id;
- this.selected_frame = 0;
-}
-
-ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
- var action = Debug.StepAction.StepIn;
- if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action);
- var count = opt_count ? %ToNumber(opt_count) : 1;
-
- return %PrepareStep(this.break_id, action, count);
-};
-
-ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
- opt_additional_context) {
- return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
- Boolean(disable_break),
- opt_additional_context));
-};
-
-ExecutionState.prototype.frameCount = function() {
- return %GetFrameCount(this.break_id);
-};
-
-ExecutionState.prototype.threadCount = function() {
- return %GetThreadCount(this.break_id);
-};
-
-ExecutionState.prototype.frame = function(opt_index) {
- // If no index supplied return the selected frame.
- if (opt_index == null) opt_index = this.selected_frame;
- if (opt_index < 0 || opt_index >= this.frameCount()) {
- throw new Error('Illegal frame index.');
- }
- return new FrameMirror(this.break_id, opt_index);
-};
-
-ExecutionState.prototype.setSelectedFrame = function(index) {
- var i = %ToNumber(index);
- if (i < 0 || i >= this.frameCount()) throw new Error('Illegal frame index.');
- this.selected_frame = i;
-};
-
-ExecutionState.prototype.selectedFrame = function() {
- return this.selected_frame;
-};
-
-ExecutionState.prototype.debugCommandProcessor = function(opt_is_running) {
- return new DebugCommandProcessor(this, opt_is_running);
-};
-
-
-function MakeBreakEvent(exec_state, break_points_hit) {
- return new BreakEvent(exec_state, break_points_hit);
-}
-
-
-function BreakEvent(exec_state, break_points_hit) {
- this.exec_state_ = exec_state;
- this.break_points_hit_ = break_points_hit;
-}
-
-
-BreakEvent.prototype.executionState = function() {
- return this.exec_state_;
-};
-
-
-BreakEvent.prototype.eventType = function() {
- return Debug.DebugEvent.Break;
-};
-
-
-BreakEvent.prototype.func = function() {
- return this.exec_state_.frame(0).func();
-};
-
-
-BreakEvent.prototype.sourceLine = function() {
- return this.exec_state_.frame(0).sourceLine();
-};
-
-
-BreakEvent.prototype.sourceColumn = function() {
- return this.exec_state_.frame(0).sourceColumn();
-};
-
-
-BreakEvent.prototype.sourceLineText = function() {
- return this.exec_state_.frame(0).sourceLineText();
-};
-
-
-BreakEvent.prototype.breakPointsHit = function() {
- return this.break_points_hit_;
-};
-
-
-BreakEvent.prototype.toJSONProtocol = function() {
- var o = { seq: next_response_seq++,
- type: "event",
- event: "break",
- body: { invocationText: this.exec_state_.frame(0).invocationText(),
- }
- };
-
- // Add script related information to the event if available.
- var script = this.func().script();
- if (script) {
- o.body.sourceLine = this.sourceLine(),
- o.body.sourceColumn = this.sourceColumn(),
- o.body.sourceLineText = this.sourceLineText(),
- o.body.script = MakeScriptObject_(script, false);
- }
-
- // Add an Array of break points hit if any.
- if (this.breakPointsHit()) {
- o.body.breakpoints = [];
- for (var i = 0; i < this.breakPointsHit().length; i++) {
- // Find the break point number. For break points originating from a
- // script break point supply the script break point number.
- var breakpoint = this.breakPointsHit()[i];
- var script_break_point = breakpoint.script_break_point();
- var number;
- if (script_break_point) {
- number = script_break_point.number();
- } else {
- number = breakpoint.number();
- }
- o.body.breakpoints.push(number);
- }
- }
- return JSON.stringify(ObjectToProtocolObject_(o));
-};
-
-
-function MakeExceptionEvent(exec_state, exception, uncaught) {
- return new ExceptionEvent(exec_state, exception, uncaught);
-}
-
-
-function ExceptionEvent(exec_state, exception, uncaught) {
- this.exec_state_ = exec_state;
- this.exception_ = exception;
- this.uncaught_ = uncaught;
-}
-
-
-ExceptionEvent.prototype.executionState = function() {
- return this.exec_state_;
-};
-
-
-ExceptionEvent.prototype.eventType = function() {
- return Debug.DebugEvent.Exception;
-};
-
-
-ExceptionEvent.prototype.exception = function() {
- return this.exception_;
-};
-
-
-ExceptionEvent.prototype.uncaught = function() {
- return this.uncaught_;
-};
-
-
-ExceptionEvent.prototype.func = function() {
- return this.exec_state_.frame(0).func();
-};
-
-
-ExceptionEvent.prototype.sourceLine = function() {
- return this.exec_state_.frame(0).sourceLine();
-};
-
-
-ExceptionEvent.prototype.sourceColumn = function() {
- return this.exec_state_.frame(0).sourceColumn();
-};
-
-
-ExceptionEvent.prototype.sourceLineText = function() {
- return this.exec_state_.frame(0).sourceLineText();
-};
-
-
-ExceptionEvent.prototype.toJSONProtocol = function() {
- var o = new ProtocolMessage();
- o.event = "exception";
- o.body = { uncaught: this.uncaught_,
- exception: MakeMirror(this.exception_)
- };
-
- // Exceptions might happen whithout any JavaScript frames.
- if (this.exec_state_.frameCount() > 0) {
- o.body.sourceLine = this.sourceLine();
- o.body.sourceColumn = this.sourceColumn();
- o.body.sourceLineText = this.sourceLineText();
-
- // Add script information to the event if available.
- var script = this.func().script();
- if (script) {
- o.body.script = MakeScriptObject_(script, false);
- }
- } else {
- o.body.sourceLine = -1;
- }
-
- return o.toJSONProtocol();
-};
-
-
-function MakeCompileEvent(exec_state, script, before) {
- return new CompileEvent(exec_state, script, before);
-}
-
-
-function CompileEvent(exec_state, script, before) {
- this.exec_state_ = exec_state;
- this.script_ = MakeMirror(script);
- this.before_ = before;
-}
-
-
-CompileEvent.prototype.executionState = function() {
- return this.exec_state_;
-};
-
-
-CompileEvent.prototype.eventType = function() {
- if (this.before_) {
- return Debug.DebugEvent.BeforeCompile;
- } else {
- return Debug.DebugEvent.AfterCompile;
- }
-};
-
-
-CompileEvent.prototype.script = function() {
- return this.script_;
-};
-
-
-CompileEvent.prototype.toJSONProtocol = function() {
- var o = new ProtocolMessage();
- o.running = true;
- if (this.before_) {
- o.event = "beforeCompile";
- } else {
- o.event = "afterCompile";
- }
- o.body = {};
- o.body.script = this.script_;
-
- return o.toJSONProtocol();
-};
-
-
-function MakeNewFunctionEvent(func) {
- return new NewFunctionEvent(func);
-}
-
-
-function NewFunctionEvent(func) {
- this.func = func;
-}
-
-
-NewFunctionEvent.prototype.eventType = function() {
- return Debug.DebugEvent.NewFunction;
-};
-
-
-NewFunctionEvent.prototype.name = function() {
- return this.func.name;
-};
-
-
-NewFunctionEvent.prototype.setBreakPoint = function(p) {
- Debug.setBreakPoint(this.func, p || 0);
-};
-
-
-function MakeScriptCollectedEvent(exec_state, id) {
- return new ScriptCollectedEvent(exec_state, id);
-}
-
-
-function ScriptCollectedEvent(exec_state, id) {
- this.exec_state_ = exec_state;
- this.id_ = id;
-}
-
-
-ScriptCollectedEvent.prototype.id = function() {
- return this.id_;
-};
-
-
-ScriptCollectedEvent.prototype.executionState = function() {
- return this.exec_state_;
-};
-
-
-ScriptCollectedEvent.prototype.toJSONProtocol = function() {
- var o = new ProtocolMessage();
- o.running = true;
- o.event = "scriptCollected";
- o.body = {};
- o.body.script = { id: this.id() };
- return o.toJSONProtocol();
-};
-
-
-function MakeScriptObject_(script, include_source) {
- var o = { id: script.id(),
- name: script.name(),
- lineOffset: script.lineOffset(),
- columnOffset: script.columnOffset(),
- lineCount: script.lineCount(),
- };
- if (!IS_UNDEFINED(script.data())) {
- o.data = script.data();
- }
- if (include_source) {
- o.source = script.source();
- }
- return o;
-}
-
-
-function DebugCommandProcessor(exec_state, opt_is_running) {
- this.exec_state_ = exec_state;
- this.running_ = opt_is_running || false;
-}
-
-
-DebugCommandProcessor.prototype.processDebugRequest = function (request) {
- return this.processDebugJSONRequest(request);
-};
-
-
-function ProtocolMessage(request) {
- // Update sequence number.
- this.seq = next_response_seq++;
-
- if (request) {
- // If message is based on a request this is a response. Fill the initial
- // response from the request.
- this.type = 'response';
- this.request_seq = request.seq;
- this.command = request.command;
- } else {
- // If message is not based on a request it is a dabugger generated event.
- this.type = 'event';
- }
- this.success = true;
- // Handler may set this field to control debugger state.
- this.running = undefined;
-}
-
-
-ProtocolMessage.prototype.setOption = function(name, value) {
- if (!this.options_) {
- this.options_ = {};
- }
- this.options_[name] = value;
-};
-
-
-ProtocolMessage.prototype.failed = function(message, opt_details) {
- this.success = false;
- this.message = message;
- if (IS_OBJECT(opt_details)) {
- this.error_details = opt_details;
- }
-};
-
-
-ProtocolMessage.prototype.toJSONProtocol = function() {
- // Encode the protocol header.
- var json = {};
- json.seq= this.seq;
- if (this.request_seq) {
- json.request_seq = this.request_seq;
- }
- json.type = this.type;
- if (this.event) {
- json.event = this.event;
- }
- if (this.command) {
- json.command = this.command;
- }
- if (this.success) {
- json.success = this.success;
- } else {
- json.success = false;
- }
- if (this.body) {
- // Encode the body part.
- var bodyJson;
- var serializer = MakeMirrorSerializer(true, this.options_);
- if (this.body instanceof Mirror) {
- bodyJson = serializer.serializeValue(this.body);
- } else if (this.body instanceof Array) {
- bodyJson = [];
- for (var i = 0; i < this.body.length; i++) {
- if (this.body[i] instanceof Mirror) {
- bodyJson.push(serializer.serializeValue(this.body[i]));
- } else {
- bodyJson.push(ObjectToProtocolObject_(this.body[i], serializer));
- }
- }
- } else {
- bodyJson = ObjectToProtocolObject_(this.body, serializer);
- }
- json.body = bodyJson;
- json.refs = serializer.serializeReferencedObjects();
- }
- if (this.message) {
- json.message = this.message;
- }
- if (this.error_details) {
- json.error_details = this.error_details;
- }
- json.running = this.running;
- return JSON.stringify(json);
-};
-
-
-DebugCommandProcessor.prototype.createResponse = function(request) {
- return new ProtocolMessage(request);
-};
-
-
-DebugCommandProcessor.prototype.processDebugJSONRequest = function(
- json_request) {
- var request; // Current request.
- var response; // Generated response.
- try {
- try {
- // Convert the JSON string to an object.
- request = JSON.parse(json_request);
-
- // Create an initial response.
- response = this.createResponse(request);
-
- if (!request.type) {
- throw new Error('Type not specified');
- }
-
- if (request.type != 'request') {
- throw new Error("Illegal type '" + request.type + "' in request");
- }
-
- if (!request.command) {
- throw new Error('Command not specified');
- }
-
- if (request.arguments) {
- var args = request.arguments;
- // TODO(yurys): remove request.arguments.compactFormat check once
- // ChromeDevTools are switched to 'inlineRefs'
- if (args.inlineRefs || args.compactFormat) {
- response.setOption('inlineRefs', true);
- }
- if (!IS_UNDEFINED(args.maxStringLength)) {
- response.setOption('maxStringLength', args.maxStringLength);
- }
- }
-
- if (request.command == 'continue') {
- this.continueRequest_(request, response);
- } else if (request.command == 'break') {
- this.breakRequest_(request, response);
- } else if (request.command == 'setbreakpoint') {
- this.setBreakPointRequest_(request, response);
- } else if (request.command == 'changebreakpoint') {
- this.changeBreakPointRequest_(request, response);
- } else if (request.command == 'clearbreakpoint') {
- this.clearBreakPointRequest_(request, response);
- } else if (request.command == 'clearbreakpointgroup') {
- this.clearBreakPointGroupRequest_(request, response);
- } else if (request.command == 'disconnect') {
- this.disconnectRequest_(request, response);
- } else if (request.command == 'setexceptionbreak') {
- this.setExceptionBreakRequest_(request, response);
- } else if (request.command == 'listbreakpoints') {
- this.listBreakpointsRequest_(request, response);
- } else if (request.command == 'backtrace') {
- this.backtraceRequest_(request, response);
- } else if (request.command == 'frame') {
- this.frameRequest_(request, response);
- } else if (request.command == 'scopes') {
- this.scopesRequest_(request, response);
- } else if (request.command == 'scope') {
- this.scopeRequest_(request, response);
- } else if (request.command == 'setVariableValue') {
- this.setVariableValueRequest_(request, response);
- } else if (request.command == 'evaluate') {
- this.evaluateRequest_(request, response);
- } else if (request.command == 'lookup') {
- this.lookupRequest_(request, response);
- } else if (request.command == 'references') {
- this.referencesRequest_(request, response);
- } else if (request.command == 'source') {
- this.sourceRequest_(request, response);
- } else if (request.command == 'scripts') {
- this.scriptsRequest_(request, response);
- } else if (request.command == 'threads') {
- this.threadsRequest_(request, response);
- } else if (request.command == 'suspend') {
- this.suspendRequest_(request, response);
- } else if (request.command == 'version') {
- this.versionRequest_(request, response);
- } else if (request.command == 'profile') {
- this.profileRequest_(request, response);
- } else if (request.command == 'changelive') {
- this.changeLiveRequest_(request, response);
- } else if (request.command == 'restartframe') {
- this.restartFrameRequest_(request, response);
- } else if (request.command == 'flags') {
- this.debuggerFlagsRequest_(request, response);
- } else if (request.command == 'v8flags') {
- this.v8FlagsRequest_(request, response);
-
- // GC tools:
- } else if (request.command == 'gc') {
- this.gcRequest_(request, response);
-
- } else {
- throw new Error('Unknown command "' + request.command + '" in request');
- }
- } catch (e) {
- // If there is no response object created one (without command).
- if (!response) {
- response = this.createResponse();
- }
- response.success = false;
- response.message = %ToString(e);
- }
-
- // Return the response as a JSON encoded string.
- try {
- if (!IS_UNDEFINED(response.running)) {
- // Response controls running state.
- this.running_ = response.running;
- }
- response.running = this.running_;
- return response.toJSONProtocol();
- } catch (e) {
- // Failed to generate response - return generic error.
- return '{"seq":' + response.seq + ',' +
- '"request_seq":' + request.seq + ',' +
- '"type":"response",' +
- '"success":false,' +
- '"message":"Internal error: ' + %ToString(e) + '"}';
- }
- } catch (e) {
- // Failed in one of the catch blocks above - most generic error.
- return '{"seq":0,"type":"response","success":false,"message":"Internal error"}';
- }
-};
-
-
-DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
- // Check for arguments for continue.
- if (request.arguments) {
- var count = 1;
- var action = Debug.StepAction.StepIn;
-
- // Pull out arguments.
- var stepaction = request.arguments.stepaction;
- var stepcount = request.arguments.stepcount;
-
- // Get the stepcount argument if any.
- if (stepcount) {
- count = %ToNumber(stepcount);
- if (count < 0) {
- throw new Error('Invalid stepcount argument "' + stepcount + '".');
- }
- }
-
- // Get the stepaction argument.
- if (stepaction) {
- if (stepaction == 'in') {
- action = Debug.StepAction.StepIn;
- } else if (stepaction == 'min') {
- action = Debug.StepAction.StepMin;
- } else if (stepaction == 'next') {
- action = Debug.StepAction.StepNext;
- } else if (stepaction == 'out') {
- action = Debug.StepAction.StepOut;
- } else {
- throw new Error('Invalid stepaction argument "' + stepaction + '".');
- }
- }
-
- // Set up the VM for stepping.
- this.exec_state_.prepareStep(action, count);
- }
-
- // VM should be running after executing this request.
- response.running = true;
-};
-
-
-DebugCommandProcessor.prototype.breakRequest_ = function(request, response) {
- // Ignore as break command does not do anything when broken.
-};
-
-
-DebugCommandProcessor.prototype.setBreakPointRequest_ =
- function(request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var type = request.arguments.type;
- var target = request.arguments.target;
- var line = request.arguments.line;
- var column = request.arguments.column;
- var enabled = IS_UNDEFINED(request.arguments.enabled) ?
- true : request.arguments.enabled;
- var condition = request.arguments.condition;
- var ignoreCount = request.arguments.ignoreCount;
- var groupId = request.arguments.groupId;
-
- // Check for legal arguments.
- if (!type || IS_UNDEFINED(target)) {
- response.failed('Missing argument "type" or "target"');
- return;
- }
-
- // Either function or script break point.
- var break_point_number;
- if (type == 'function') {
- // Handle function break point.
- if (!IS_STRING(target)) {
- response.failed('Argument "target" is not a string value');
- return;
- }
- var f;
- try {
- // Find the function through a global evaluate.
- f = this.exec_state_.evaluateGlobal(target).value();
- } catch (e) {
- response.failed('Error: "' + %ToString(e) +
- '" evaluating "' + target + '"');
- return;
- }
- if (!IS_FUNCTION(f)) {
- response.failed('"' + target + '" does not evaluate to a function');
- return;
- }
-
- // Set function break point.
- break_point_number = Debug.setBreakPoint(f, line, column, condition);
- } else if (type == 'handle') {
- // Find the object pointed by the specified handle.
- var handle = parseInt(target, 10);
- var mirror = LookupMirror(handle);
- if (!mirror) {
- return response.failed('Object #' + handle + '# not found');
- }
- if (!mirror.isFunction()) {
- return response.failed('Object #' + handle + '# is not a function');
- }
-
- // Set function break point.
- break_point_number = Debug.setBreakPoint(mirror.value(),
- line, column, condition);
- } else if (type == 'script') {
- // set script break point.
- break_point_number =
- Debug.setScriptBreakPointByName(target, line, column, condition,
- groupId);
- } else if (type == 'scriptId') {
- break_point_number =
- Debug.setScriptBreakPointById(target, line, column, condition, groupId);
- } else if (type == 'scriptRegExp') {
- break_point_number =
- Debug.setScriptBreakPointByRegExp(target, line, column, condition,
- groupId);
- } else {
- response.failed('Illegal type "' + type + '"');
- return;
- }
-
- // Set additional break point properties.
- var break_point = Debug.findBreakPoint(break_point_number);
- if (ignoreCount) {
- Debug.changeBreakPointIgnoreCount(break_point_number, ignoreCount);
- }
- if (!enabled) {
- Debug.disableBreakPoint(break_point_number);
- }
-
- // Add the break point number to the response.
- response.body = { type: type,
- breakpoint: break_point_number };
-
- // Add break point information to the response.
- if (break_point instanceof ScriptBreakPoint) {
- if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
- response.body.type = 'scriptId';
- response.body.script_id = break_point.script_id();
- } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
- response.body.type = 'scriptName';
- response.body.script_name = break_point.script_name();
- } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
- response.body.type = 'scriptRegExp';
- response.body.script_regexp = break_point.script_regexp_object().source;
- } else {
- throw new Error("Internal error: Unexpected breakpoint type: " +
- break_point.type());
- }
- response.body.line = break_point.line();
- response.body.column = break_point.column();
- response.body.actual_locations = break_point.actual_locations();
- } else {
- response.body.type = 'function';
- response.body.actual_locations = [break_point.actual_location];
- }
-};
-
-
-DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
- request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var break_point = %ToNumber(request.arguments.breakpoint);
- var enabled = request.arguments.enabled;
- var condition = request.arguments.condition;
- var ignoreCount = request.arguments.ignoreCount;
-
- // Check for legal arguments.
- if (!break_point) {
- response.failed('Missing argument "breakpoint"');
- return;
- }
-
- // Change enabled state if supplied.
- if (!IS_UNDEFINED(enabled)) {
- if (enabled) {
- Debug.enableBreakPoint(break_point);
- } else {
- Debug.disableBreakPoint(break_point);
- }
- }
-
- // Change condition if supplied
- if (!IS_UNDEFINED(condition)) {
- Debug.changeBreakPointCondition(break_point, condition);
- }
-
- // Change ignore count if supplied
- if (!IS_UNDEFINED(ignoreCount)) {
- Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
- }
-};
-
-
-DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(
- request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var group_id = request.arguments.groupId;
-
- // Check for legal arguments.
- if (!group_id) {
- response.failed('Missing argument "groupId"');
- return;
- }
-
- var cleared_break_points = [];
- var new_script_break_points = [];
- for (var i = 0; i < script_break_points.length; i++) {
- var next_break_point = script_break_points[i];
- if (next_break_point.groupId() == group_id) {
- cleared_break_points.push(next_break_point.number());
- next_break_point.clear();
- } else {
- new_script_break_points.push(next_break_point);
- }
- }
- script_break_points = new_script_break_points;
-
- // Add the cleared break point numbers to the response.
- response.body = { breakpoints: cleared_break_points };
-};
-
-
-DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
- request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var break_point = %ToNumber(request.arguments.breakpoint);
-
- // Check for legal arguments.
- if (!break_point) {
- response.failed('Missing argument "breakpoint"');
- return;
- }
-
- // Clear break point.
- Debug.clearBreakPoint(break_point);
-
- // Add the cleared break point number to the response.
- response.body = { breakpoint: break_point };
-};
-
-
-DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
- request, response) {
- var array = [];
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
-
- var description = {
- number: break_point.number(),
- line: break_point.line(),
- column: break_point.column(),
- groupId: break_point.groupId(),
- hit_count: break_point.hit_count(),
- active: break_point.active(),
- condition: break_point.condition(),
- ignoreCount: break_point.ignoreCount(),
- actual_locations: break_point.actual_locations()
- };
-
- if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
- description.type = 'scriptId';
- description.script_id = break_point.script_id();
- } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
- description.type = 'scriptName';
- description.script_name = break_point.script_name();
- } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
- description.type = 'scriptRegExp';
- description.script_regexp = break_point.script_regexp_object().source;
- } else {
- throw new Error("Internal error: Unexpected breakpoint type: " +
- break_point.type());
- }
- array.push(description);
- }
-
- response.body = {
- breakpoints: array,
- breakOnExceptions: Debug.isBreakOnException(),
- breakOnUncaughtExceptions: Debug.isBreakOnUncaughtException()
- };
-};
-
-
-DebugCommandProcessor.prototype.disconnectRequest_ =
- function(request, response) {
- Debug.disableAllBreakPoints();
- this.continueRequest_(request, response);
-};
-
-
-DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
- function(request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out and check the 'type' argument:
- var type = request.arguments.type;
- if (!type) {
- response.failed('Missing argument "type"');
- return;
- }
-
- // Initialize the default value of enable:
- var enabled;
- if (type == 'all') {
- enabled = !Debug.isBreakOnException();
- } else if (type == 'uncaught') {
- enabled = !Debug.isBreakOnUncaughtException();
- }
-
- // Pull out and check the 'enabled' argument if present:
- if (!IS_UNDEFINED(request.arguments.enabled)) {
- enabled = request.arguments.enabled;
- if ((enabled != true) && (enabled != false)) {
- response.failed('Illegal value for "enabled":"' + enabled + '"');
- }
- }
-
- // Now set the exception break state:
- if (type == 'all') {
- %ChangeBreakOnException(Debug.ExceptionBreak.Caught, enabled);
- } else if (type == 'uncaught') {
- %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, enabled);
- } else {
- response.failed('Unknown "type":"' + type + '"');
- }
-
- // Add the cleared break point number to the response.
- response.body = { 'type': type, 'enabled': enabled };
-};
-
-
-DebugCommandProcessor.prototype.backtraceRequest_ = function(
- request, response) {
- // Get the number of frames.
- var total_frames = this.exec_state_.frameCount();
-
- // Create simple response if there are no frames.
- if (total_frames == 0) {
- response.body = {
- totalFrames: total_frames
- };
- return;
- }
-
- // Default frame range to include in backtrace.
- var from_index = 0;
- var to_index = kDefaultBacktraceLength;
-
- // Get the range from the arguments.
- if (request.arguments) {
- if (request.arguments.fromFrame) {
- from_index = request.arguments.fromFrame;
- }
- if (request.arguments.toFrame) {
- to_index = request.arguments.toFrame;
- }
- if (request.arguments.bottom) {
- var tmp_index = total_frames - from_index;
- from_index = total_frames - to_index;
- to_index = tmp_index;
- }
- if (from_index < 0 || to_index < 0) {
- return response.failed('Invalid frame number');
- }
- }
-
- // Adjust the index.
- to_index = Math.min(total_frames, to_index);
-
- if (to_index <= from_index) {
- var error = 'Invalid frame range';
- return response.failed(error);
- }
-
- // Create the response body.
- var frames = [];
- for (var i = from_index; i < to_index; i++) {
- frames.push(this.exec_state_.frame(i));
- }
- response.body = {
- fromFrame: from_index,
- toFrame: to_index,
- totalFrames: total_frames,
- frames: frames
- };
-};
-
-
-DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
- // No frames no source.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No frames');
- }
-
- // With no arguments just keep the selected frame.
- if (request.arguments) {
- var index = request.arguments.number;
- if (index < 0 || this.exec_state_.frameCount() <= index) {
- return response.failed('Invalid frame number');
- }
-
- this.exec_state_.setSelectedFrame(request.arguments.number);
- }
- response.body = this.exec_state_.frame();
-};
-
-
-DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
- function(scope_description) {
- // Get the frame for which the scope or scopes are requested.
- // With no frameNumber argument use the currently selected frame.
- if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
- frame_index = scope_description.frameNumber;
- if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
- throw new Error('Invalid frame number');
- }
- return this.exec_state_.frame(frame_index);
- } else {
- return this.exec_state_.frame();
- }
-};
-
-
-// Gets scope host object from request. It is either a function
-// ('functionHandle' argument must be specified) or a stack frame
-// ('frameNumber' may be specified and the current frame is taken by default).
-DebugCommandProcessor.prototype.resolveScopeHolder_ =
- function(scope_description) {
- if (scope_description && "functionHandle" in scope_description) {
- if (!IS_NUMBER(scope_description.functionHandle)) {
- throw new Error('Function handle must be a number');
- }
- var function_mirror = LookupMirror(scope_description.functionHandle);
- if (!function_mirror) {
- throw new Error('Failed to find function object by handle');
- }
- if (!function_mirror.isFunction()) {
- throw new Error('Value of non-function type is found by handle');
- }
- return function_mirror;
- } else {
- // No frames no scopes.
- if (this.exec_state_.frameCount() == 0) {
- throw new Error('No scopes');
- }
-
- // Get the frame for which the scopes are requested.
- var frame = this.resolveFrameFromScopeDescription_(scope_description);
- return frame;
- }
-}
-
-
-DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
- var scope_holder = this.resolveScopeHolder_(request.arguments);
-
- // Fill all scopes for this frame or function.
- var total_scopes = scope_holder.scopeCount();
- var scopes = [];
- for (var i = 0; i < total_scopes; i++) {
- scopes.push(scope_holder.scope(i));
- }
- response.body = {
- fromScope: 0,
- toScope: total_scopes,
- totalScopes: total_scopes,
- scopes: scopes
- };
-};
-
-
-DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
- // Get the frame or function for which the scope is requested.
- var scope_holder = this.resolveScopeHolder_(request.arguments);
-
- // With no scope argument just return top scope.
- var scope_index = 0;
- if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
- scope_index = %ToNumber(request.arguments.number);
- if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
- return response.failed('Invalid scope number');
- }
- }
-
- response.body = scope_holder.scope(scope_index);
-};
-
-
-// Reads value from protocol description. Description may be in form of type
-// (for singletons), raw value (primitive types supported in JSON),
-// string value description plus type (for primitive values) or handle id.
-// Returns raw value or throws exception.
-DebugCommandProcessor.resolveValue_ = function(value_description) {
- if ("handle" in value_description) {
- var value_mirror = LookupMirror(value_description.handle);
- if (!value_mirror) {
- throw new Error("Failed to resolve value by handle, ' #" +
- mapping.handle + "# not found");
- }
- return value_mirror.value();
- } else if ("stringDescription" in value_description) {
- if (value_description.type == BOOLEAN_TYPE) {
- return Boolean(value_description.stringDescription);
- } else if (value_description.type == NUMBER_TYPE) {
- return Number(value_description.stringDescription);
- } if (value_description.type == STRING_TYPE) {
- return String(value_description.stringDescription);
- } else {
- throw new Error("Unknown type");
- }
- } else if ("value" in value_description) {
- return value_description.value;
- } else if (value_description.type == UNDEFINED_TYPE) {
- return void 0;
- } else if (value_description.type == NULL_TYPE) {
- return null;
- } else {
- throw new Error("Failed to parse value description");
- }
-};
-
-
-DebugCommandProcessor.prototype.setVariableValueRequest_ =
- function(request, response) {
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- if (IS_UNDEFINED(request.arguments.name)) {
- response.failed('Missing variable name');
- }
- var variable_name = request.arguments.name;
-
- var scope_description = request.arguments.scope;
-
- // Get the frame or function for which the scope is requested.
- var scope_holder = this.resolveScopeHolder_(scope_description);
-
- if (IS_UNDEFINED(scope_description.number)) {
- response.failed('Missing scope number');
- }
- var scope_index = %ToNumber(scope_description.number);
-
- var scope = scope_holder.scope(scope_index);
-
- var new_value =
- DebugCommandProcessor.resolveValue_(request.arguments.newValue);
-
- scope.setVariableValue(variable_name, new_value);
-
- var new_value_mirror = MakeMirror(new_value);
-
- response.body = {
- newValue: new_value_mirror
- };
-};
-
-
-DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var expression = request.arguments.expression;
- var frame = request.arguments.frame;
- var global = request.arguments.global;
- var disable_break = request.arguments.disable_break;
- var additional_context = request.arguments.additional_context;
-
- // The expression argument could be an integer so we convert it to a
- // string.
- try {
- expression = String(expression);
- } catch(e) {
- return response.failed('Failed to convert expression argument to string');
- }
-
- // Check for legal arguments.
- if (!IS_UNDEFINED(frame) && global) {
- return response.failed('Arguments "frame" and "global" are exclusive');
- }
-
- var additional_context_object;
- if (additional_context) {
- additional_context_object = {};
- for (var i = 0; i < additional_context.length; i++) {
- var mapping = additional_context[i];
-
- if (!IS_STRING(mapping.name)) {
- return response.failed("Context element #" + i +
- " doesn't contain name:string property");
- }
-
- var raw_value = DebugCommandProcessor.resolveValue_(mapping);
- additional_context_object[mapping.name] = raw_value;
- }
- }
-
- // Global evaluate.
- if (global) {
- // Evaluate in the native context.
- response.body = this.exec_state_.evaluateGlobal(
- expression, Boolean(disable_break), additional_context_object);
- return;
- }
-
- // Default value for disable_break is true.
- if (IS_UNDEFINED(disable_break)) {
- disable_break = true;
- }
-
- // No frames no evaluate in frame.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No frames');
- }
-
- // Check whether a frame was specified.
- if (!IS_UNDEFINED(frame)) {
- var frame_number = %ToNumber(frame);
- if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
- return response.failed('Invalid frame "' + frame + '"');
- }
- // Evaluate in the specified frame.
- response.body = this.exec_state_.frame(frame_number).evaluate(
- expression, Boolean(disable_break), additional_context_object);
- return;
- } else {
- // Evaluate in the selected frame.
- response.body = this.exec_state_.frame().evaluate(
- expression, Boolean(disable_break), additional_context_object);
- return;
- }
-};
-
-
-DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var handles = request.arguments.handles;
-
- // Check for legal arguments.
- if (IS_UNDEFINED(handles)) {
- return response.failed('Argument "handles" missing');
- }
-
- // Set 'includeSource' option for script lookup.
- if (!IS_UNDEFINED(request.arguments.includeSource)) {
- includeSource = %ToBoolean(request.arguments.includeSource);
- response.setOption('includeSource', includeSource);
- }
-
- // Lookup handles.
- var mirrors = {};
- for (var i = 0; i < handles.length; i++) {
- var handle = handles[i];
- var mirror = LookupMirror(handle);
- if (!mirror) {
- return response.failed('Object #' + handle + '# not found');
- }
- mirrors[handle] = mirror;
- }
- response.body = mirrors;
-};
-
-
-DebugCommandProcessor.prototype.referencesRequest_ =
- function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var type = request.arguments.type;
- var handle = request.arguments.handle;
-
- // Check for legal arguments.
- if (IS_UNDEFINED(type)) {
- return response.failed('Argument "type" missing');
- }
- if (IS_UNDEFINED(handle)) {
- return response.failed('Argument "handle" missing');
- }
- if (type != 'referencedBy' && type != 'constructedBy') {
- return response.failed('Invalid type "' + type + '"');
- }
-
- // Lookup handle and return objects with references the object.
- var mirror = LookupMirror(handle);
- if (mirror) {
- if (type == 'referencedBy') {
- response.body = mirror.referencedBy();
- } else {
- response.body = mirror.constructedBy();
- }
- } else {
- return response.failed('Object #' + handle + '# not found');
- }
-};
-
-
-DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
- // No frames no source.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No source');
- }
-
- var from_line;
- var to_line;
- var frame = this.exec_state_.frame();
- if (request.arguments) {
- // Pull out arguments.
- from_line = request.arguments.fromLine;
- to_line = request.arguments.toLine;
-
- if (!IS_UNDEFINED(request.arguments.frame)) {
- var frame_number = %ToNumber(request.arguments.frame);
- if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
- return response.failed('Invalid frame "' + frame + '"');
- }
- frame = this.exec_state_.frame(frame_number);
- }
- }
-
- // Get the script selected.
- var script = frame.func().script();
- if (!script) {
- return response.failed('No source');
- }
-
- // Get the source slice and fill it into the response.
- var slice = script.sourceSlice(from_line, to_line);
- if (!slice) {
- return response.failed('Invalid line interval');
- }
- response.body = {};
- response.body.source = slice.sourceText();
- response.body.fromLine = slice.from_line;
- response.body.toLine = slice.to_line;
- response.body.fromPosition = slice.from_position;
- response.body.toPosition = slice.to_position;
- response.body.totalLines = script.lineCount();
-};
-
-
-DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
- var types = ScriptTypeFlag(Debug.ScriptType.Normal);
- var includeSource = false;
- var idsToInclude = null;
- if (request.arguments) {
- // Pull out arguments.
- if (!IS_UNDEFINED(request.arguments.types)) {
- types = %ToNumber(request.arguments.types);
- if (isNaN(types) || types < 0) {
- return response.failed('Invalid types "' +
- request.arguments.types + '"');
- }
- }
-
- if (!IS_UNDEFINED(request.arguments.includeSource)) {
- includeSource = %ToBoolean(request.arguments.includeSource);
- response.setOption('includeSource', includeSource);
- }
-
- if (IS_ARRAY(request.arguments.ids)) {
- idsToInclude = {};
- var ids = request.arguments.ids;
- for (var i = 0; i < ids.length; i++) {
- idsToInclude[ids[i]] = true;
- }
- }
-
- var filterStr = null;
- var filterNum = null;
- if (!IS_UNDEFINED(request.arguments.filter)) {
- var num = %ToNumber(request.arguments.filter);
- if (!isNaN(num)) {
- filterNum = num;
- }
- filterStr = request.arguments.filter;
- }
- }
-
- // Collect all scripts in the heap.
- var scripts = %DebugGetLoadedScripts();
-
- response.body = [];
-
- for (var i = 0; i < scripts.length; i++) {
- if (idsToInclude && !idsToInclude[scripts[i].id]) {
- continue;
- }
- if (filterStr || filterNum) {
- var script = scripts[i];
- var found = false;
- if (filterNum && !found) {
- if (script.id && script.id === filterNum) {
- found = true;
- }
- }
- if (filterStr && !found) {
- if (script.name && script.name.indexOf(filterStr) >= 0) {
- found = true;
- }
- }
- if (!found) continue;
- }
- if (types & ScriptTypeFlag(scripts[i].type)) {
- response.body.push(MakeMirror(scripts[i]));
- }
- }
-};
-
-
-DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) {
- // Get the number of threads.
- var total_threads = this.exec_state_.threadCount();
-
- // Get information for all threads.
- var threads = [];
- for (var i = 0; i < total_threads; i++) {
- var details = %GetThreadDetails(this.exec_state_.break_id, i);
- var thread_info = { current: details[0],
- id: details[1]
- };
- threads.push(thread_info);
- }
-
- // Create the response body.
- response.body = {
- totalThreads: total_threads,
- threads: threads
- };
-};
-
-
-DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
- response.running = false;
-};
-
-
-DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
- response.body = {
- V8Version: %GetV8Version()
- };
-};
-
-
-DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
- if (request.arguments.command == 'resume') {
- %ProfilerResume();
- } else if (request.arguments.command == 'pause') {
- %ProfilerPause();
- } else {
- return response.failed('Unknown command');
- }
- response.body = {};
-};
-
-
-DebugCommandProcessor.prototype.changeLiveRequest_ = function(
- request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
- var script_id = request.arguments.script_id;
- var preview_only = !!request.arguments.preview_only;
-
- var scripts = %DebugGetLoadedScripts();
-
- var the_script = null;
- for (var i = 0; i < scripts.length; i++) {
- if (scripts[i].id == script_id) {
- the_script = scripts[i];
- }
- }
- if (!the_script) {
- response.failed('Script not found');
- return;
- }
-
- var change_log = new Array();
-
- if (!IS_STRING(request.arguments.new_source)) {
- throw "new_source argument expected";
- }
-
- var new_source = request.arguments.new_source;
-
- var result_description;
- try {
- result_description = Debug.LiveEdit.SetScriptSource(the_script,
- new_source, preview_only, change_log);
- } catch (e) {
- if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
- response.failed(e.message, e.details);
- return;
- }
- throw e;
- }
- response.body = {change_log: change_log, result: result_description};
-
- if (!preview_only && !this.running_ && result_description.stack_modified) {
- response.body.stepin_recommended = true;
- }
-};
-
-
-DebugCommandProcessor.prototype.restartFrameRequest_ = function(
- request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
- var frame = request.arguments.frame;
-
- // No frames to evaluate in frame.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No frames');
- }
-
- var frame_mirror;
- // Check whether a frame was specified.
- if (!IS_UNDEFINED(frame)) {
- var frame_number = %ToNumber(frame);
- if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
- return response.failed('Invalid frame "' + frame + '"');
- }
- // Restart specified frame.
- frame_mirror = this.exec_state_.frame(frame_number);
- } else {
- // Restart selected frame.
- frame_mirror = this.exec_state_.frame();
- }
-
- var result_description = Debug.LiveEdit.RestartFrame(frame_mirror);
- response.body = {result: result_description};
-};
-
-
-DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
- response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var flags = request.arguments.flags;
-
- response.body = { flags: [] };
- if (!IS_UNDEFINED(flags)) {
- for (var i = 0; i < flags.length; i++) {
- var name = flags[i].name;
- var debugger_flag = debugger_flags[name];
- if (!debugger_flag) {
- continue;
- }
- if ('value' in flags[i]) {
- debugger_flag.setValue(flags[i].value);
- }
- response.body.flags.push({ name: name, value: debugger_flag.getValue() });
- }
- } else {
- for (var name in debugger_flags) {
- var value = debugger_flags[name].getValue();
- response.body.flags.push({ name: name, value: value });
- }
- }
-};
-
-
-DebugCommandProcessor.prototype.v8FlagsRequest_ = function(request, response) {
- var flags = request.arguments.flags;
- if (!flags) flags = '';
- %SetFlags(flags);
-};
-
-
-DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
- var type = request.arguments.type;
- if (!type) type = 'all';
-
- var before = %GetHeapUsage();
- %CollectGarbage(type);
- var after = %GetHeapUsage();
-
- response.body = { "before": before, "after": after };
-};
-
-
-// Check whether the previously processed command caused the VM to become
-// running.
-DebugCommandProcessor.prototype.isRunning = function() {
- return this.running_;
-};
-
-
-DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
- return %SystemBreak();
-};
-
-
-function NumberToHex8Str(n) {
- var r = "";
- for (var i = 0; i < 8; ++i) {
- var c = hexCharArray[n & 0x0F]; // hexCharArray is defined in uri.js
- r = c + r;
- n = n >>> 4;
- }
- return r;
-}
-
-
-/**
- * Convert an Object to its debugger protocol representation. The representation
- * may be serilized to a JSON object using JSON.stringify().
- * This implementation simply runs through all string property names, converts
- * each property value to a protocol value and adds the property to the result
- * object. For type "object" the function will be called recursively. Note that
- * circular structures will cause infinite recursion.
- * @param {Object} object The object to format as protocol object.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- * mirror objects are encountered.
- * @return {Object} Protocol object value.
- */
-function ObjectToProtocolObject_(object, mirror_serializer) {
- var content = {};
- for (var key in object) {
- // Only consider string keys.
- if (typeof key == 'string') {
- // Format the value based on its type.
- var property_value_json = ValueToProtocolValue_(object[key],
- mirror_serializer);
- // Add the property if relevant.
- if (!IS_UNDEFINED(property_value_json)) {
- content[key] = property_value_json;
- }
- }
- }
-
- return content;
-}
-
-
-/**
- * Convert an array to its debugger protocol representation. It will convert
- * each array element to a protocol value.
- * @param {Array} array The array to format as protocol array.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- * mirror objects are encountered.
- * @return {Array} Protocol array value.
- */
-function ArrayToProtocolArray_(array, mirror_serializer) {
- var json = [];
- for (var i = 0; i < array.length; i++) {
- json.push(ValueToProtocolValue_(array[i], mirror_serializer));
- }
- return json;
-}
-
-
-/**
- * Convert a value to its debugger protocol representation.
- * @param {*} value The value to format as protocol value.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- * mirror objects are encountered.
- * @return {*} Protocol value.
- */
-function ValueToProtocolValue_(value, mirror_serializer) {
- // Format the value based on its type.
- var json;
- switch (typeof value) {
- case 'object':
- if (value instanceof Mirror) {
- json = mirror_serializer.serializeValue(value);
- } else if (IS_ARRAY(value)){
- json = ArrayToProtocolArray_(value, mirror_serializer);
- } else {
- json = ObjectToProtocolObject_(value, mirror_serializer);
- }
- break;
-
- case 'boolean':
- case 'string':
- case 'number':
- json = value;
- break;
-
- default:
- json = null;
- }
- return json;
-}
-
-Debug.TestApi = {
- CommandProcessorResolveValue: DebugCommandProcessor.resolveValue_
-};
diff --git a/src/3rdparty/v8/src/debug.cc b/src/3rdparty/v8/src/debug.cc
deleted file mode 100644
index 2821578..0000000
--- a/src/3rdparty/v8/src/debug.cc
+++ /dev/null
@@ -1,3803 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "compiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "full-codegen.h"
-#include "global-handles.h"
-#include "ic.h"
-#include "ic-inl.h"
-#include "isolate-inl.h"
-#include "list.h"
-#include "messages.h"
-#include "natives.h"
-#include "stub-cache.h"
-#include "log.h"
-
-#include "../include/v8-debug.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-
-Debug::Debug(Isolate* isolate)
- : has_break_points_(false),
- script_cache_(NULL),
- debug_info_list_(NULL),
- disable_break_(false),
- break_on_exception_(false),
- break_on_uncaught_exception_(false),
- debug_break_return_(NULL),
- debug_break_slot_(NULL),
- isolate_(isolate) {
- memset(registers_, 0, sizeof(JSCallerSavedBuffer));
-}
-
-
-Debug::~Debug() {
-}
-
-
-static void PrintLn(v8::Local<v8::Value> value) {
- v8::Local<v8::String> s = value->ToString();
- ScopedVector<char> data(s->Length() + 1);
- if (data.start() == NULL) {
- V8::FatalProcessOutOfMemory("PrintLn");
- return;
- }
- s->WriteAscii(data.start());
- PrintF("%s\n", data.start());
-}
-
-
-static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
- Isolate* isolate = Isolate::Current();
- return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
-}
-
-
-static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
- Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
- // Isolate::context() may have been NULL when "script collected" event
- // occured.
- if (context.is_null()) return v8::Local<v8::Context>();
- Handle<Context> native_context(context->native_context());
- return v8::Utils::ToLocal(native_context);
-}
-
-
-BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type) {
- debug_info_ = debug_info;
- type_ = type;
- reloc_iterator_ = NULL;
- reloc_iterator_original_ = NULL;
- Reset(); // Initialize the rest of the member variables.
-}
-
-
-BreakLocationIterator::~BreakLocationIterator() {
- ASSERT(reloc_iterator_ != NULL);
- ASSERT(reloc_iterator_original_ != NULL);
- delete reloc_iterator_;
- delete reloc_iterator_original_;
-}
-
-
-void BreakLocationIterator::Next() {
- AssertNoAllocation nogc;
- ASSERT(!RinfoDone());
-
- // Iterate through reloc info for code and original code stopping at each
- // breakable code target.
- bool first = break_point_ == -1;
- while (!RinfoDone()) {
- if (!first) RinfoNext();
- first = false;
- if (RinfoDone()) return;
-
- // Whenever a statement position or (plain) position is passed update the
- // current value of these.
- if (RelocInfo::IsPosition(rmode())) {
- if (RelocInfo::IsStatementPosition(rmode())) {
- statement_position_ = static_cast<int>(
- rinfo()->data() - debug_info_->shared()->start_position());
- }
- // Always update the position as we don't want that to be before the
- // statement position.
- position_ = static_cast<int>(
- rinfo()->data() - debug_info_->shared()->start_position());
- ASSERT(position_ >= 0);
- ASSERT(statement_position_ >= 0);
- }
-
- if (IsDebugBreakSlot()) {
- // There is always a possible break point at a debug break slot.
- break_point_++;
- return;
- } else if (RelocInfo::IsCodeTarget(rmode())) {
- // Check for breakable code target. Look in the original code as setting
- // break points can cause the code targets in the running (debugged) code
- // to be of a different kind than in the original code.
- Address target = original_rinfo()->target_address();
- Code* code = Code::GetCodeFromTargetAddress(target);
- if ((code->is_inline_cache_stub() &&
- !code->is_binary_op_stub() &&
- !code->is_unary_op_stub() &&
- !code->is_compare_ic_stub() &&
- !code->is_to_boolean_ic_stub()) ||
- RelocInfo::IsConstructCall(rmode())) {
- break_point_++;
- return;
- }
- if (code->kind() == Code::STUB) {
- if (IsDebuggerStatement()) {
- break_point_++;
- return;
- }
- if (type_ == ALL_BREAK_LOCATIONS) {
- if (Debug::IsBreakStub(code)) {
- break_point_++;
- return;
- }
- } else {
- ASSERT(type_ == SOURCE_BREAK_LOCATIONS);
- if (Debug::IsSourceBreakStub(code)) {
- break_point_++;
- return;
- }
- }
- }
- }
-
- // Check for break at return.
- if (RelocInfo::IsJSReturn(rmode())) {
- // Set the positions to the end of the function.
- if (debug_info_->shared()->HasSourceCode()) {
- position_ = debug_info_->shared()->end_position() -
- debug_info_->shared()->start_position() - 1;
- } else {
- position_ = 0;
- }
- statement_position_ = position_;
- break_point_++;
- return;
- }
- }
-}
-
-
-void BreakLocationIterator::Next(int count) {
- while (count > 0) {
- Next();
- count--;
- }
-}
-
-
-// Find the break point closest to the supplied address.
-void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
- // Run through all break points to locate the one closest to the address.
- int closest_break_point = 0;
- int distance = kMaxInt;
- while (!Done()) {
- // Check if this break point is closer that what was previously found.
- if (this->pc() < pc && pc - this->pc() < distance) {
- closest_break_point = break_point();
- distance = static_cast<int>(pc - this->pc());
- // Check whether we can't get any closer.
- if (distance == 0) break;
- }
- Next();
- }
-
- // Move to the break point found.
- Reset();
- Next(closest_break_point);
-}
-
-
-// Find the break point closest to the supplied source position.
-void BreakLocationIterator::FindBreakLocationFromPosition(int position) {
- // Run through all break points to locate the one closest to the source
- // position.
- int closest_break_point = 0;
- int distance = kMaxInt;
- while (!Done()) {
- // Check if this break point is closer that what was previously found.
- if (position <= statement_position() &&
- statement_position() - position < distance) {
- closest_break_point = break_point();
- distance = statement_position() - position;
- // Check whether we can't get any closer.
- if (distance == 0) break;
- }
- Next();
- }
-
- // Move to the break point found.
- Reset();
- Next(closest_break_point);
-}
-
-
-void BreakLocationIterator::Reset() {
- // Create relocation iterators for the two code objects.
- if (reloc_iterator_ != NULL) delete reloc_iterator_;
- if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
- reloc_iterator_ = new RelocIterator(
- debug_info_->code(),
- ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
- reloc_iterator_original_ = new RelocIterator(
- debug_info_->original_code(),
- ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
-
- // Position at the first break point.
- break_point_ = -1;
- position_ = 1;
- statement_position_ = 1;
- Next();
-}
-
-
-bool BreakLocationIterator::Done() const {
- return RinfoDone();
-}
-
-
-void BreakLocationIterator::SetBreakPoint(Handle<Object> break_point_object) {
- // If there is not already a real break point here patch code with debug
- // break.
- if (!HasBreakPoint()) {
- SetDebugBreak();
- }
- ASSERT(IsDebugBreak() || IsDebuggerStatement());
- // Set the break point information.
- DebugInfo::SetBreakPoint(debug_info_, code_position(),
- position(), statement_position(),
- break_point_object);
-}
-
-
-void BreakLocationIterator::ClearBreakPoint(Handle<Object> break_point_object) {
- // Clear the break point information.
- DebugInfo::ClearBreakPoint(debug_info_, code_position(), break_point_object);
- // If there are no more break points here remove the debug break.
- if (!HasBreakPoint()) {
- ClearDebugBreak();
- ASSERT(!IsDebugBreak());
- }
-}
-
-
-void BreakLocationIterator::SetOneShot() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
-
- // If there is a real break point here no more to do.
- if (HasBreakPoint()) {
- ASSERT(IsDebugBreak());
- return;
- }
-
- // Patch code with debug break.
- SetDebugBreak();
-}
-
-
-void BreakLocationIterator::ClearOneShot() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
-
- // If there is a real break point here no more to do.
- if (HasBreakPoint()) {
- ASSERT(IsDebugBreak());
- return;
- }
-
- // Patch code removing debug break.
- ClearDebugBreak();
- ASSERT(!IsDebugBreak());
-}
-
-
-void BreakLocationIterator::SetDebugBreak() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
-
- // If there is already a break point here just return. This might happen if
- // the same code is flooded with break points twice. Flooding the same
- // function twice might happen when stepping in a function with an exception
- // handler as the handler and the function is the same.
- if (IsDebugBreak()) {
- return;
- }
-
- if (RelocInfo::IsJSReturn(rmode())) {
- // Patch the frame exit code with a break point.
- SetDebugBreakAtReturn();
- } else if (IsDebugBreakSlot()) {
- // Patch the code in the break slot.
- SetDebugBreakAtSlot();
- } else {
- // Patch the IC call.
- SetDebugBreakAtIC();
- }
- ASSERT(IsDebugBreak());
-}
-
-
-void BreakLocationIterator::ClearDebugBreak() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
-
- if (RelocInfo::IsJSReturn(rmode())) {
- // Restore the frame exit code.
- ClearDebugBreakAtReturn();
- } else if (IsDebugBreakSlot()) {
- // Restore the code in the break slot.
- ClearDebugBreakAtSlot();
- } else {
- // Patch the IC call.
- ClearDebugBreakAtIC();
- }
- ASSERT(!IsDebugBreak());
-}
-
-
-void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
- HandleScope scope(isolate);
-
- // Step in can only be prepared if currently positioned on an IC call,
- // construct call or CallFunction stub call.
- Address target = rinfo()->target_address();
- Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
- if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
- // Step in through IC call is handled by the runtime system. Therefore make
- // sure that the any current IC is cleared and the runtime system is
- // called. If the executing code has a debug break at the location change
- // the call in the original code as it is the code there that will be
- // executed in place of the debug break call.
- Handle<Code> stub = ComputeCallDebugPrepareStepIn(
- target_code->arguments_count(), target_code->kind());
- if (IsDebugBreak()) {
- original_rinfo()->set_target_address(stub->entry());
- } else {
- rinfo()->set_target_address(stub->entry());
- }
- } else {
-#ifdef DEBUG
- // All the following stuff is needed only for assertion checks so the code
- // is wrapped in ifdef.
- Handle<Code> maybe_call_function_stub = target_code;
- if (IsDebugBreak()) {
- Address original_target = original_rinfo()->target_address();
- maybe_call_function_stub =
- Handle<Code>(Code::GetCodeFromTargetAddress(original_target));
- }
- bool is_call_function_stub =
- (maybe_call_function_stub->kind() == Code::STUB &&
- maybe_call_function_stub->major_key() == CodeStub::CallFunction);
-
- // Step in through construct call requires no changes to the running code.
- // Step in through getters/setters should already be prepared as well
- // because caller of this function (Debug::PrepareStep) is expected to
- // flood the top frame's function with one shot breakpoints.
- // Step in through CallFunction stub should also be prepared by caller of
- // this function (Debug::PrepareStep) which should flood target function
- // with breakpoints.
- ASSERT(RelocInfo::IsConstructCall(rmode()) ||
- target_code->is_inline_cache_stub() ||
- is_call_function_stub);
-#endif
- }
-}
-
-
-// Check whether the break point is at a position which will exit the function.
-bool BreakLocationIterator::IsExit() const {
- return (RelocInfo::IsJSReturn(rmode()));
-}
-
-
-bool BreakLocationIterator::HasBreakPoint() {
- return debug_info_->HasBreakPoint(code_position());
-}
-
-
-// Check whether there is a debug break at the current position.
-bool BreakLocationIterator::IsDebugBreak() {
- if (RelocInfo::IsJSReturn(rmode())) {
- return IsDebugBreakAtReturn();
- } else if (IsDebugBreakSlot()) {
- return IsDebugBreakAtSlot();
- } else {
- return Debug::IsDebugBreak(rinfo()->target_address());
- }
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtIC() {
- // Patch the original code with the current address as the current address
- // might have changed by the inline caching since the code was copied.
- original_rinfo()->set_target_address(rinfo()->target_address());
-
- RelocInfo::Mode mode = rmode();
- if (RelocInfo::IsCodeTarget(mode)) {
- Address target = rinfo()->target_address();
- Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
-
- // Patch the code to invoke the builtin debug break function matching the
- // calling convention used by the call site.
- Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
- rinfo()->set_target_address(dbgbrk_code->entry());
- }
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtIC() {
- // Patch the code to the original invoke.
- rinfo()->set_target_address(original_rinfo()->target_address());
-}
-
-
-bool BreakLocationIterator::IsDebuggerStatement() {
- return RelocInfo::DEBUG_BREAK == rmode();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakSlot() {
- return RelocInfo::DEBUG_BREAK_SLOT == rmode();
-}
-
-
-Object* BreakLocationIterator::BreakPointObjects() {
- return debug_info_->GetBreakPointObjects(code_position());
-}
-
-
-// Clear out all the debug break code. This is ONLY supposed to be used when
-// shutting down the debugger as it will leave the break point information in
-// DebugInfo even though the code is patched back to the non break point state.
-void BreakLocationIterator::ClearAllDebugBreak() {
- while (!Done()) {
- ClearDebugBreak();
- Next();
- }
-}
-
-
-bool BreakLocationIterator::RinfoDone() const {
- ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
- return reloc_iterator_->done();
-}
-
-
-void BreakLocationIterator::RinfoNext() {
- reloc_iterator_->next();
- reloc_iterator_original_->next();
-#ifdef DEBUG
- ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
- if (!reloc_iterator_->done()) {
- ASSERT(rmode() == original_rmode());
- }
-#endif
-}
-
-
-// Threading support.
-void Debug::ThreadInit() {
- thread_local_.break_count_ = 0;
- thread_local_.break_id_ = 0;
- thread_local_.break_frame_id_ = StackFrame::NO_ID;
- thread_local_.last_step_action_ = StepNone;
- thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
- thread_local_.step_count_ = 0;
- thread_local_.last_fp_ = 0;
- thread_local_.queued_step_count_ = 0;
- thread_local_.step_into_fp_ = 0;
- thread_local_.step_out_fp_ = 0;
- thread_local_.after_break_target_ = 0;
- // TODO(isolates): frames_are_dropped_?
- thread_local_.debugger_entry_ = NULL;
- thread_local_.pending_interrupts_ = 0;
- thread_local_.restarter_frame_function_pointer_ = NULL;
-}
-
-
-char* Debug::ArchiveDebug(char* storage) {
- char* to = storage;
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
- to += sizeof(ThreadLocal);
- memcpy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
- ThreadInit();
- ASSERT(to <= storage + ArchiveSpacePerThread());
- return storage + ArchiveSpacePerThread();
-}
-
-
-char* Debug::RestoreDebug(char* storage) {
- char* from = storage;
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- from += sizeof(ThreadLocal);
- memcpy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
- ASSERT(from <= storage + ArchiveSpacePerThread());
- return storage + ArchiveSpacePerThread();
-}
-
-
-int Debug::ArchiveSpacePerThread() {
- return sizeof(ThreadLocal) + sizeof(JSCallerSavedBuffer);
-}
-
-
-// Frame structure (conforms InternalFrame structure):
-// -- code
-// -- SMI maker
-// -- function (slot is called "context")
-// -- frame base
-Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- ASSERT(bottom_js_frame->is_java_script());
-
- Address fp = bottom_js_frame->fp();
-
- // Move function pointer into "context" slot.
- Memory::Object_at(fp + StandardFrameConstants::kContextOffset) =
- Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset);
-
- Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
- Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
- Smi::FromInt(StackFrame::INTERNAL);
-
- return reinterpret_cast<Object**>(&Memory::Object_at(
- fp + StandardFrameConstants::kContextOffset));
-}
-
-const int Debug::kFrameDropperFrameSize = 4;
-
-
-void ScriptCache::Add(Handle<Script> script) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- // Create an entry in the hash map for the script.
- int id = Smi::cast(script->id())->value();
- HashMap::Entry* entry =
- HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true);
- if (entry->value != NULL) {
- ASSERT(*script == *reinterpret_cast<Script**>(entry->value));
- return;
- }
-
- // Globalize the script object, make it weak and use the location of the
- // global handle as the value in the hash map.
- Handle<Script> script_ =
- Handle<Script>::cast(
- (global_handles->Create(*script)));
- global_handles->MakeWeak(reinterpret_cast<Object**>(script_.location()),
- this,
- NULL,
- ScriptCache::HandleWeakScript);
- entry->value = script_.location();
-}
-
-
-Handle<FixedArray> ScriptCache::GetScripts() {
- Handle<FixedArray> instances = FACTORY->NewFixedArray(occupancy());
- int count = 0;
- for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
- ASSERT(entry->value != NULL);
- if (entry->value != NULL) {
- instances->set(count, *reinterpret_cast<Script**>(entry->value));
- count++;
- }
- }
- return instances;
-}
-
-
-void ScriptCache::ProcessCollectedScripts() {
- Debugger* debugger = Isolate::Current()->debugger();
- for (int i = 0; i < collected_scripts_.length(); i++) {
- debugger->OnScriptCollected(collected_scripts_[i]);
- }
- collected_scripts_.Clear();
-}
-
-
-void ScriptCache::Clear() {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- // Iterate the script cache to get rid of all the weak handles.
- for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
- ASSERT(entry != NULL);
- Object** location = reinterpret_cast<Object**>(entry->value);
- ASSERT((*location)->IsScript());
- global_handles->ClearWeakness(location);
- global_handles->Destroy(location);
- }
- // Clear the content of the hash map.
- HashMap::Clear();
-}
-
-
-void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data) {
- ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
- // Find the location of the global handle.
- Script** location =
- reinterpret_cast<Script**>(Utils::OpenHandle(*obj).location());
- ASSERT((*location)->IsScript());
-
- // Remove the entry from the cache.
- int id = Smi::cast((*location)->id())->value();
- script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
- script_cache->collected_scripts_.Add(id);
-
- // Clear the weak handle.
- obj.Dispose(isolate);
- obj.Clear();
-}
-
-
-void Debug::SetUp(bool create_heap_objects) {
- ThreadInit();
- if (create_heap_objects) {
- // Get code to handle debug break on return.
- debug_break_return_ =
- isolate_->builtins()->builtin(Builtins::kReturn_DebugBreak);
- ASSERT(debug_break_return_->IsCode());
- // Get code to handle debug break in debug break slots.
- debug_break_slot_ =
- isolate_->builtins()->builtin(Builtins::kSlot_DebugBreak);
- ASSERT(debug_break_slot_->IsCode());
- }
-}
-
-
-void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data) {
- Debug* debug = reinterpret_cast<Isolate*>(isolate)->debug();
- DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
- // We need to clear all breakpoints associated with the function to restore
- // original code and avoid patching the code twice later because
- // the function will live in the heap until next gc, and can be found by
- // Debug::FindSharedFunctionInfoInScript.
- BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- it.ClearAllDebugBreak();
- debug->RemoveDebugInfo(node->debug_info());
-#ifdef DEBUG
- node = debug->debug_info_list_;
- while (node != NULL) {
- ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
- node = node->next();
- }
-#endif
-}
-
-
-DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- // Globalize the request debug info object and make it weak.
- debug_info_ = Handle<DebugInfo>::cast(
- (global_handles->Create(debug_info)));
- global_handles->MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
- this,
- NULL,
- Debug::HandleWeakDebugInfo);
-}
-
-
-DebugInfoListNode::~DebugInfoListNode() {
- Isolate::Current()->global_handles()->Destroy(
- reinterpret_cast<Object**>(debug_info_.location()));
-}
-
-
-bool Debug::CompileDebuggerScript(int index) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
-
- // Bail out if the index is invalid.
- if (index == -1) {
- return false;
- }
-
- // Find source and name for the requested script.
- Handle<String> source_code =
- isolate->bootstrapper()->NativesSourceLookup(index);
- Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> script_name = factory->NewStringFromAscii(name);
- Handle<Context> context = isolate->native_context();
-
- // Compile the script.
- Handle<SharedFunctionInfo> function_info;
- function_info = Compiler::Compile(source_code,
- script_name,
- 0, 0,
- context,
- NULL, NULL,
- Handle<String>::null(),
- NATIVES_CODE);
-
- // Silently ignore stack overflows during compilation.
- if (function_info.is_null()) {
- ASSERT(isolate->has_pending_exception());
- isolate->clear_pending_exception();
- return false;
- }
-
- // Execute the shared function in the debugger context.
- bool caught_exception;
- Handle<JSFunction> function =
- factory->NewFunctionFromSharedFunctionInfo(function_info, context);
-
- Handle<Object> exception =
- Execution::TryCall(function,
- Handle<Object>(context->global_object(), isolate),
- 0,
- NULL,
- &caught_exception);
-
- // Check for caught exceptions.
- if (caught_exception) {
- ASSERT(!isolate->has_pending_exception());
- MessageLocation computed_location;
- isolate->ComputeLocation(&computed_location);
- Handle<Object> message = MessageHandler::MakeMessageObject(
- "error_loading_debugger", &computed_location,
- Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
- ASSERT(!isolate->has_pending_exception());
- if (!exception.is_null()) {
- isolate->set_pending_exception(*exception);
- MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
- isolate->clear_pending_exception();
- }
- return false;
- }
-
- // Mark this script as native and return successfully.
- Handle<Script> script(Script::cast(function->shared()->script()));
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- return true;
-}
-
-
-bool Debug::Load() {
- // Return if debugger is already loaded.
- if (IsLoaded()) return true;
-
- Debugger* debugger = isolate_->debugger();
-
- // Bail out if we're already in the process of compiling the native
- // JavaScript source code for the debugger.
- if (debugger->compiling_natives() ||
- debugger->is_loading_debugger())
- return false;
- debugger->set_loading_debugger(true);
-
- // Disable breakpoints and interrupts while compiling and running the
- // debugger scripts including the context creation code.
- DisableBreak disable(true);
- PostponeInterruptsScope postpone(isolate_);
-
- // Create the debugger context.
- HandleScope scope(isolate_);
- Handle<Context> context =
- isolate_->bootstrapper()->CreateEnvironment(
- Handle<Object>::null(),
- v8::Handle<ObjectTemplate>(),
- NULL);
-
- // Fail if no context could be created.
- if (context.is_null()) return false;
-
- // Use the debugger context.
- SaveContext save(isolate_);
- isolate_->set_context(*context);
-
- // Expose the builtins object in the debugger context.
- Handle<String> key = isolate_->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("builtins"));
- Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object());
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate_,
- JSReceiver::SetProperty(global,
- key,
- Handle<Object>(global->builtins(), isolate_),
- NONE,
- kNonStrictMode),
- false);
-
- // Compile the JavaScript for the debugger in the debugger context.
- debugger->set_compiling_natives(true);
- bool caught_exception =
- !CompileDebuggerScript(Natives::GetIndex("mirror")) ||
- !CompileDebuggerScript(Natives::GetIndex("debug"));
-
- if (FLAG_enable_liveedit) {
- caught_exception = caught_exception ||
- !CompileDebuggerScript(Natives::GetIndex("liveedit"));
- }
-
- debugger->set_compiling_natives(false);
-
- // Make sure we mark the debugger as not loading before we might
- // return.
- debugger->set_loading_debugger(false);
-
- // Check for caught exceptions.
- if (caught_exception) return false;
-
- // Debugger loaded.
- debug_context_ = context;
-
- return true;
-}
-
-
-void Debug::Unload() {
- // Return debugger is not loaded.
- if (!IsLoaded()) {
- return;
- }
-
- // Clear the script cache.
- DestroyScriptCache();
-
- // Clear debugger context global handle.
- Isolate::Current()->global_handles()->Destroy(
- reinterpret_cast<Object**>(debug_context_.location()));
- debug_context_ = Handle<Context>();
-}
-
-
-// Set the flag indicating that preemption happened during debugging.
-void Debug::PreemptionWhileInDebugger() {
- ASSERT(InDebugger());
- Debug::set_interrupts_pending(PREEMPT);
-}
-
-
-void Debug::Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**>(&(debug_break_return_)));
- v->VisitPointer(BitCast<Object**>(&(debug_break_slot_)));
-}
-
-
-Object* Debug::Break(Arguments args) {
- Heap* heap = isolate_->heap();
- HandleScope scope(isolate_);
- ASSERT(args.length() == 0);
-
- thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
-
- // Get the top-most JavaScript frame.
- JavaScriptFrameIterator it(isolate_);
- JavaScriptFrame* frame = it.frame();
-
- // Just continue if breaks are disabled or debugger cannot be loaded.
- if (disable_break() || !Load()) {
- SetAfterBreakTarget(frame);
- return heap->undefined_value();
- }
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) {
- return heap->undefined_value();
- }
-
- // Postpone interrupt during breakpoint processing.
- PostponeInterruptsScope postpone(isolate_);
-
- // Get the debug info (create it if it does not exist).
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
-
- // Find the break point where execution has stopped.
- BreakLocationIterator break_location_iterator(debug_info,
- ALL_BREAK_LOCATIONS);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc());
-
- // Check whether step next reached a new statement.
- if (!StepNextContinue(&break_location_iterator, frame)) {
- // Decrease steps left if performing multiple steps.
- if (thread_local_.step_count_ > 0) {
- thread_local_.step_count_--;
- }
- }
-
- // If there is one or more real break points check whether any of these are
- // triggered.
- Handle<Object> break_points_hit(heap->undefined_value(), isolate_);
- if (break_location_iterator.HasBreakPoint()) {
- Handle<Object> break_point_objects =
- Handle<Object>(break_location_iterator.BreakPointObjects(), isolate_);
- break_points_hit = CheckBreakPoints(break_point_objects);
- }
-
- // If step out is active skip everything until the frame where we need to step
- // out to is reached, unless real breakpoint is hit.
- if (StepOutActive() && frame->fp() != step_out_fp() &&
- break_points_hit->IsUndefined() ) {
- // Step count should always be 0 for StepOut.
- ASSERT(thread_local_.step_count_ == 0);
- } else if (!break_points_hit->IsUndefined() ||
- (thread_local_.last_step_action_ != StepNone &&
- thread_local_.step_count_ == 0)) {
- // Notify debugger if a real break point is triggered or if performing
- // single stepping with no more steps to perform. Otherwise do another step.
-
- // Clear all current stepping setup.
- ClearStepping();
-
- if (thread_local_.queued_step_count_ > 0) {
- // Perform queued steps
- int step_count = thread_local_.queued_step_count_;
-
- // Clear queue
- thread_local_.queued_step_count_ = 0;
-
- PrepareStep(StepNext, step_count);
- } else {
- // Notify the debug event listeners.
- isolate_->debugger()->OnDebugBreak(break_points_hit, false);
- }
- } else if (thread_local_.last_step_action_ != StepNone) {
- // Hold on to last step action as it is cleared by the call to
- // ClearStepping.
- StepAction step_action = thread_local_.last_step_action_;
- int step_count = thread_local_.step_count_;
-
- // If StepNext goes deeper in code, StepOut until original frame
- // and keep step count queued up in the meantime.
- if (step_action == StepNext && frame->fp() < thread_local_.last_fp_) {
- // Count frames until target frame
- int count = 0;
- JavaScriptFrameIterator it(isolate_);
- while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) {
- count++;
- it.Advance();
- }
-
- // Check that we indeed found the frame we are looking for.
- CHECK(!it.done() && (it.frame()->fp() == thread_local_.last_fp_));
- if (step_count > 1) {
- // Save old count and action to continue stepping after StepOut.
- thread_local_.queued_step_count_ = step_count - 1;
- }
-
- // Set up for StepOut to reach target frame.
- step_action = StepOut;
- step_count = count;
- }
-
- // Clear all current stepping setup.
- ClearStepping();
-
- // Set up for the remaining steps.
- PrepareStep(step_action, step_count);
- }
-
- if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
- SetAfterBreakTarget(frame);
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_IC_CALL) {
- // We must have been calling IC stub. Do not go there anymore.
- Code* plain_return = isolate_->builtins()->builtin(
- Builtins::kPlainReturn_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
- // Debug break slot stub does not return normally, instead it manually
- // cleans the stack and jumps. We should patch the jump address.
- Code* plain_return = isolate_->builtins()->builtin(
- Builtins::kFrameDropper_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_DIRECT_CALL) {
- // Nothing to do, after_break_target is not used here.
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_RETURN_CALL) {
- Code* plain_return = isolate_->builtins()->builtin(
- Builtins::kFrameDropper_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else {
- UNREACHABLE();
- }
-
- return heap->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Object*, Debug_Break) {
- return isolate->debug()->Break(args);
-}
-
-
-// Check the break point objects for whether one or more are actually
-// triggered. This function returns a JSArray with the break point objects
-// which is triggered.
-Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
- Factory* factory = isolate_->factory();
-
- // Count the number of break points hit. If there are multiple break points
- // they are in a FixedArray.
- Handle<FixedArray> break_points_hit;
- int break_points_hit_count = 0;
- ASSERT(!break_point_objects->IsUndefined());
- if (break_point_objects->IsFixedArray()) {
- Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
- break_points_hit = factory->NewFixedArray(array->length());
- for (int i = 0; i < array->length(); i++) {
- Handle<Object> o(array->get(i), isolate_);
- if (CheckBreakPoint(o)) {
- break_points_hit->set(break_points_hit_count++, *o);
- }
- }
- } else {
- break_points_hit = factory->NewFixedArray(1);
- if (CheckBreakPoint(break_point_objects)) {
- break_points_hit->set(break_points_hit_count++, *break_point_objects);
- }
- }
-
- // Return undefined if no break points were triggered.
- if (break_points_hit_count == 0) {
- return factory->undefined_value();
- }
- // Return break points hit as a JSArray.
- Handle<JSArray> result = factory->NewJSArrayWithElements(break_points_hit);
- result->set_length(Smi::FromInt(break_points_hit_count));
- return result;
-}
-
-
-// Check whether a single break point object is triggered.
-bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
- Factory* factory = isolate_->factory();
- HandleScope scope(isolate_);
-
- // Ignore check if break point object is not a JSObject.
- if (!break_point_object->IsJSObject()) return true;
-
- // Get the function IsBreakPointTriggered (defined in debug-debugger.js).
- Handle<String> is_break_point_triggered_string =
- factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("IsBreakPointTriggered"));
- Handle<JSFunction> check_break_point =
- Handle<JSFunction>(JSFunction::cast(
- debug_context()->global_object()->GetPropertyNoExceptionThrown(
- *is_break_point_triggered_string)));
-
- // Get the break id as an object.
- Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
-
- // Call HandleBreakPointx.
- bool caught_exception;
- Handle<Object> argv[] = { break_id, break_point_object };
- Handle<Object> result = Execution::TryCall(check_break_point,
- isolate_->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
-
- // If exception or non boolean result handle as not triggered
- if (caught_exception || !result->IsBoolean()) {
- return false;
- }
-
- // Return whether the break point is triggered.
- ASSERT(!result.is_null());
- return (*result)->IsTrue();
-}
-
-
-// Check whether the function has debug information.
-bool Debug::HasDebugInfo(Handle<SharedFunctionInfo> shared) {
- return !shared->debug_info()->IsUndefined();
-}
-
-
-// Return the debug info for this function. EnsureDebugInfo must be called
-// prior to ensure the debug info has been generated for shared.
-Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
- ASSERT(HasDebugInfo(shared));
- return Handle<DebugInfo>(DebugInfo::cast(shared->debug_info()));
-}
-
-
-void Debug::SetBreakPoint(Handle<JSFunction> function,
- Handle<Object> break_point_object,
- int* source_position) {
- HandleScope scope(isolate_);
-
- PrepareForBreakPoints();
-
- // Make sure the function is compiled and has set up the debug info.
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if retrieving debug info failed.
- return;
- }
-
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- // Source positions starts with zero.
- ASSERT(*source_position >= 0);
-
- // Find the break point and change it.
- BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(*source_position);
- it.SetBreakPoint(break_point_object);
-
- *source_position = it.position();
-
- // At least one active break point now.
- ASSERT(debug_info->GetBreakPointCount() > 0);
-}
-
-
-bool Debug::SetBreakPointForScript(Handle<Script> script,
- Handle<Object> break_point_object,
- int* source_position) {
- HandleScope scope(isolate_);
-
- PrepareForBreakPoints();
-
- // Obtain shared function info for the function.
- Object* result = FindSharedFunctionInfoInScript(script, *source_position);
- if (result->IsUndefined()) return false;
-
- // Make sure the function has set up the debug info.
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
- if (!EnsureDebugInfo(shared, Handle<JSFunction>::null())) {
- // Return if retrieving debug info failed.
- return false;
- }
-
- // Find position within function. The script position might be before the
- // source position of the first function.
- int position;
- if (shared->start_position() > *source_position) {
- position = 0;
- } else {
- position = *source_position - shared->start_position();
- }
-
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- // Source positions starts with zero.
- ASSERT(position >= 0);
-
- // Find the break point and change it.
- BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(position);
- it.SetBreakPoint(break_point_object);
-
- *source_position = it.position() + shared->start_position();
-
- // At least one active break point now.
- ASSERT(debug_info->GetBreakPointCount() > 0);
- return true;
-}
-
-
-void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
- HandleScope scope(isolate_);
-
- DebugInfoListNode* node = debug_info_list_;
- while (node != NULL) {
- Object* result = DebugInfo::FindBreakPointInfo(node->debug_info(),
- break_point_object);
- if (!result->IsUndefined()) {
- // Get information in the break point.
- BreakPointInfo* break_point_info = BreakPointInfo::cast(result);
- Handle<DebugInfo> debug_info = node->debug_info();
- Handle<SharedFunctionInfo> shared(debug_info->shared());
- int source_position = break_point_info->statement_position()->value();
-
- // Source positions starts with zero.
- ASSERT(source_position >= 0);
-
- // Find the break point and clear it.
- BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(source_position);
- it.ClearBreakPoint(break_point_object);
-
- // If there are no more break points left remove the debug info for this
- // function.
- if (debug_info->GetBreakPointCount() == 0) {
- RemoveDebugInfo(debug_info);
- }
-
- return;
- }
- node = node->next();
- }
-}
-
-
-void Debug::ClearAllBreakPoints() {
- DebugInfoListNode* node = debug_info_list_;
- while (node != NULL) {
- // Remove all debug break code.
- BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- it.ClearAllDebugBreak();
- node = node->next();
- }
-
- // Remove all debug info.
- while (debug_info_list_ != NULL) {
- RemoveDebugInfo(debug_info_list_->debug_info());
- }
-}
-
-
-void Debug::FloodWithOneShot(Handle<JSFunction> function) {
- PrepareForBreakPoints();
-
- // Make sure the function is compiled and has set up the debug info.
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if we failed to retrieve the debug info.
- return;
- }
-
- // Flood the function with break points.
- BreakLocationIterator it(GetDebugInfo(shared), ALL_BREAK_LOCATIONS);
- while (!it.Done()) {
- it.SetOneShot();
- it.Next();
- }
-}
-
-
-void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
- Handle<FixedArray> new_bindings(function->function_bindings());
- Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex),
- isolate_);
-
- if (!bindee.is_null() && bindee->IsJSFunction() &&
- !JSFunction::cast(*bindee)->IsBuiltin()) {
- Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
- Debug::FloodWithOneShot(bindee_function);
- }
-}
-
-
-void Debug::FloodHandlerWithOneShot() {
- // Iterate through the JavaScript stack looking for handlers.
- StackFrame::Id id = break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack don't do anything.
- return;
- }
- for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->HasHandler()) {
- // Flood the function with the catch block with break points
- JSFunction* function = JSFunction::cast(frame->function());
- FloodWithOneShot(Handle<JSFunction>(function));
- return;
- }
- }
-}
-
-
-void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
- if (type == BreakUncaughtException) {
- break_on_uncaught_exception_ = enable;
- } else {
- break_on_exception_ = enable;
- }
-}
-
-
-bool Debug::IsBreakOnException(ExceptionBreakType type) {
- if (type == BreakUncaughtException) {
- return break_on_uncaught_exception_;
- } else {
- return break_on_exception_;
- }
-}
-
-
-void Debug::PrepareStep(StepAction step_action, int step_count) {
- HandleScope scope(isolate_);
-
- PrepareForBreakPoints();
-
- ASSERT(Debug::InDebugger());
-
- // Remember this step action and count.
- thread_local_.last_step_action_ = step_action;
- if (step_action == StepOut) {
- // For step out target frame will be found on the stack so there is no need
- // to set step counter for it. It's expected to always be 0 for StepOut.
- thread_local_.step_count_ = 0;
- } else {
- thread_local_.step_count_ = step_count;
- }
-
- // Get the frame where the execution has stopped and skip the debug frame if
- // any. The debug frame will only be present if execution was stopped due to
- // hitting a break point. In other situations (e.g. unhandled exception) the
- // debug frame is not present.
- StackFrame::Id id = break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack don't do anything.
- return;
- }
- JavaScriptFrameIterator frames_it(isolate_, id);
- JavaScriptFrame* frame = frames_it.frame();
-
- // First of all ensure there is one-shot break points in the top handler
- // if any.
- FloodHandlerWithOneShot();
-
- // If the function on the top frame is unresolved perform step out. This will
- // be the case when calling unknown functions and having the debugger stopped
- // in an unhandled exception.
- if (!frame->function()->IsJSFunction()) {
- // Step out: Find the calling JavaScript frame and flood it with
- // breakpoints.
- frames_it.Advance();
- // Fill the function to return to with one-shot break points.
- JSFunction* function = JSFunction::cast(frames_it.frame()->function());
- FloodWithOneShot(Handle<JSFunction>(function));
- return;
- }
-
- // Get the debug info (create it if it does not exist).
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if ensuring debug info failed.
- return;
- }
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
-
- // Find the break location where execution has stopped.
- BreakLocationIterator it(debug_info, ALL_BREAK_LOCATIONS);
- it.FindBreakLocationFromAddress(frame->pc());
-
- // Compute whether or not the target is a call target.
- bool is_load_or_store = false;
- bool is_inline_cache_stub = false;
- bool is_at_restarted_function = false;
- Handle<Code> call_function_stub;
-
- if (thread_local_.restarter_frame_function_pointer_ == NULL) {
- if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
- bool is_call_target = false;
- Address target = it.rinfo()->target_address();
- Code* code = Code::GetCodeFromTargetAddress(target);
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
- is_call_target = true;
- }
- if (code->is_inline_cache_stub()) {
- is_inline_cache_stub = true;
- is_load_or_store = !is_call_target;
- }
-
- // Check if target code is CallFunction stub.
- Code* maybe_call_function_stub = code;
- // If there is a breakpoint at this line look at the original code to
- // check if it is a CallFunction stub.
- if (it.IsDebugBreak()) {
- Address original_target = it.original_rinfo()->target_address();
- maybe_call_function_stub =
- Code::GetCodeFromTargetAddress(original_target);
- }
- if (maybe_call_function_stub->kind() == Code::STUB &&
- maybe_call_function_stub->major_key() == CodeStub::CallFunction) {
- // Save reference to the code as we may need it to find out arguments
- // count for 'step in' later.
- call_function_stub = Handle<Code>(maybe_call_function_stub);
- }
- }
- } else {
- is_at_restarted_function = true;
- }
-
- // If this is the last break code target step out is the only possibility.
- if (it.IsExit() || step_action == StepOut) {
- if (step_action == StepOut) {
- // Skip step_count frames starting with the current one.
- while (step_count-- > 0 && !frames_it.done()) {
- frames_it.Advance();
- }
- } else {
- ASSERT(it.IsExit());
- frames_it.Advance();
- }
- // Skip builtin functions on the stack.
- while (!frames_it.done() &&
- JSFunction::cast(frames_it.frame()->function())->IsBuiltin()) {
- frames_it.Advance();
- }
- // Step out: If there is a JavaScript caller frame, we need to
- // flood it with breakpoints.
- if (!frames_it.done()) {
- // Fill the function to return to with one-shot break points.
- JSFunction* function = JSFunction::cast(frames_it.frame()->function());
- FloodWithOneShot(Handle<JSFunction>(function));
- // Set target frame pointer.
- ActivateStepOut(frames_it.frame());
- }
- } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode()) ||
- !call_function_stub.is_null() || is_at_restarted_function)
- || step_action == StepNext || step_action == StepMin) {
- // Step next or step min.
-
- // Fill the current function with one-shot break points.
- FloodWithOneShot(function);
-
- // Remember source position and frame to handle step next.
- thread_local_.last_statement_position_ =
- debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->UnpaddedFP();
- } else {
- // If there's restarter frame on top of the stack, just get the pointer
- // to function which is going to be restarted.
- if (is_at_restarted_function) {
- Handle<JSFunction> restarted_function(
- JSFunction::cast(*thread_local_.restarter_frame_function_pointer_));
- FloodWithOneShot(restarted_function);
- } else if (!call_function_stub.is_null()) {
- // If it's CallFunction stub ensure target function is compiled and flood
- // it with one shot breakpoints.
-
- // Find out number of arguments from the stub minor key.
- // Reverse lookup required as the minor key cannot be retrieved
- // from the code object.
- Handle<Object> obj(
- isolate_->heap()->code_stubs()->SlowReverseLookup(
- *call_function_stub),
- isolate_);
- ASSERT(!obj.is_null());
- ASSERT(!(*obj)->IsUndefined());
- ASSERT(obj->IsSmi());
- // Get the STUB key and extract major and minor key.
- uint32_t key = Smi::cast(*obj)->value();
- // Argc in the stub is the number of arguments passed - not the
- // expected arguments of the called function.
- int call_function_arg_count =
- CallFunctionStub::ExtractArgcFromMinorKey(
- CodeStub::MinorKeyFromKey(key));
- ASSERT(call_function_stub->major_key() ==
- CodeStub::MajorKeyFromKey(key));
-
- // Find target function on the expression stack.
- // Expression stack looks like this (top to bottom):
- // argN
- // ...
- // arg0
- // Receiver
- // Function to call
- int expressions_count = frame->ComputeExpressionsCount();
- ASSERT(expressions_count - 2 - call_function_arg_count >= 0);
- Object* fun = frame->GetExpression(
- expressions_count - 2 - call_function_arg_count);
- if (fun->IsJSFunction()) {
- Handle<JSFunction> js_function(JSFunction::cast(fun));
- if (js_function->shared()->bound()) {
- Debug::FloodBoundFunctionWithOneShot(js_function);
- } else if (!js_function->IsBuiltin()) {
- // Don't step into builtins.
- // It will also compile target function if it's not compiled yet.
- FloodWithOneShot(js_function);
- }
- }
- }
-
- // Fill the current function with one-shot break points even for step in on
- // a call target as the function called might be a native function for
- // which step in will not stop. It also prepares for stepping in
- // getters/setters.
- FloodWithOneShot(function);
-
- if (is_load_or_store) {
- // Remember source position and frame to handle step in getter/setter. If
- // there is a custom getter/setter it will be handled in
- // Object::Get/SetPropertyWithCallback, otherwise the step action will be
- // propagated on the next Debug::Break.
- thread_local_.last_statement_position_ =
- debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->UnpaddedFP();
- }
-
- // Step in or Step in min
- it.PrepareStepIn(isolate_);
- ActivateStepIn(frame);
- }
-}
-
-
-// Check whether the current debug break should be reported to the debugger. It
-// is used to have step next and step in only report break back to the debugger
-// if on a different frame or in a different statement. In some situations
-// there will be several break points in the same statement when the code is
-// flooded with one-shot break points. This function helps to perform several
-// steps before reporting break back to the debugger.
-bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
- JavaScriptFrame* frame) {
- // StepNext and StepOut shouldn't bring us deeper in code, so last frame
- // shouldn't be a parent of current frame.
- if (thread_local_.last_step_action_ == StepNext ||
- thread_local_.last_step_action_ == StepOut) {
- if (frame->fp() < thread_local_.last_fp_) return true;
- }
-
- // If the step last action was step next or step in make sure that a new
- // statement is hit.
- if (thread_local_.last_step_action_ == StepNext ||
- thread_local_.last_step_action_ == StepIn) {
- // Never continue if returning from function.
- if (break_location_iterator->IsExit()) return false;
-
- // Continue if we are still on the same frame and in the same statement.
- int current_statement_position =
- break_location_iterator->code()->SourceStatementPosition(frame->pc());
- return thread_local_.last_fp_ == frame->UnpaddedFP() &&
- thread_local_.last_statement_position_ == current_statement_position;
- }
-
- // No step next action - don't continue.
- return false;
-}
-
-
-// Check whether the code object at the specified address is a debug break code
-// object.
-bool Debug::IsDebugBreak(Address addr) {
- Code* code = Code::GetCodeFromTargetAddress(addr);
- return code->is_debug_break();
-}
-
-
-// Check whether a code stub with the specified major key is a possible break
-// point location when looking for source break locations.
-bool Debug::IsSourceBreakStub(Code* code) {
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- return major_key == CodeStub::CallFunction;
-}
-
-
-// Check whether a code stub with the specified major key is a possible break
-// location.
-bool Debug::IsBreakStub(Code* code) {
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- return major_key == CodeStub::CallFunction;
-}
-
-
-// Find the builtin to use for invoking the debug break
-Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
- Isolate* isolate = Isolate::Current();
-
- // Find the builtin debug break function matching the calling convention
- // used by the call site.
- if (code->is_inline_cache_stub()) {
- switch (code->kind()) {
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- return isolate->stub_cache()->ComputeCallDebugBreak(
- code->arguments_count(), code->kind());
-
- case Code::LOAD_IC:
- return isolate->builtins()->LoadIC_DebugBreak();
-
- case Code::STORE_IC:
- return isolate->builtins()->StoreIC_DebugBreak();
-
- case Code::KEYED_LOAD_IC:
- return isolate->builtins()->KeyedLoadIC_DebugBreak();
-
- case Code::KEYED_STORE_IC:
- return isolate->builtins()->KeyedStoreIC_DebugBreak();
-
- default:
- UNREACHABLE();
- }
- }
- if (RelocInfo::IsConstructCall(mode)) {
- if (code->has_function_cache()) {
- return isolate->builtins()->CallConstructStub_Recording_DebugBreak();
- } else {
- return isolate->builtins()->CallConstructStub_DebugBreak();
- }
- }
- if (code->kind() == Code::STUB) {
- ASSERT(code->major_key() == CodeStub::CallFunction);
- if (code->has_function_cache()) {
- return isolate->builtins()->CallFunctionStub_Recording_DebugBreak();
- } else {
- return isolate->builtins()->CallFunctionStub_DebugBreak();
- }
- }
-
- UNREACHABLE();
- return Handle<Code>::null();
-}
-
-
-// Simple function for returning the source positions for active break points.
-Handle<Object> Debug::GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared) {
- Isolate* isolate = Isolate::Current();
- Heap* heap = isolate->heap();
- if (!HasDebugInfo(shared)) {
- return Handle<Object>(heap->undefined_value(), isolate);
- }
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- if (debug_info->GetBreakPointCount() == 0) {
- return Handle<Object>(heap->undefined_value(), isolate);
- }
- Handle<FixedArray> locations =
- isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
- int count = 0;
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined()) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(debug_info->break_points()->get(i));
- if (break_point_info->GetBreakPointCount() > 0) {
- locations->set(count++, break_point_info->statement_position());
- }
- }
- }
- return locations;
-}
-
-
-void Debug::NewBreak(StackFrame::Id break_frame_id) {
- thread_local_.break_frame_id_ = break_frame_id;
- thread_local_.break_id_ = ++thread_local_.break_count_;
-}
-
-
-void Debug::SetBreak(StackFrame::Id break_frame_id, int break_id) {
- thread_local_.break_frame_id_ = break_frame_id;
- thread_local_.break_id_ = break_id;
-}
-
-
-// Handle stepping into a function.
-void Debug::HandleStepIn(Handle<JSFunction> function,
- Handle<Object> holder,
- Address fp,
- bool is_constructor) {
- Isolate* isolate = function->GetIsolate();
- // If the frame pointer is not supplied by the caller find it.
- if (fp == 0) {
- StackFrameIterator it(isolate);
- it.Advance();
- // For constructor functions skip another frame.
- if (is_constructor) {
- ASSERT(it.frame()->is_construct());
- it.Advance();
- }
- fp = it.frame()->fp();
- }
-
- // Flood the function with one-shot break points if it is called from where
- // step into was requested.
- if (fp == step_in_fp()) {
- if (function->shared()->bound()) {
- // Handle Function.prototype.bind
- Debug::FloodBoundFunctionWithOneShot(function);
- } else if (!function->IsBuiltin()) {
- // Don't allow step into functions in the native context.
- if (function->shared()->code() ==
- isolate->builtins()->builtin(Builtins::kFunctionApply) ||
- function->shared()->code() ==
- isolate->builtins()->builtin(Builtins::kFunctionCall)) {
- // Handle function.apply and function.call separately to flood the
- // function to be called and not the code for Builtins::FunctionApply or
- // Builtins::FunctionCall. The receiver of call/apply is the target
- // function.
- if (!holder.is_null() && holder->IsJSFunction() &&
- !JSFunction::cast(*holder)->IsBuiltin()) {
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
- Debug::FloodWithOneShot(js_function);
- }
- } else {
- Debug::FloodWithOneShot(function);
- }
- }
- }
-}
-
-
-void Debug::ClearStepping() {
- // Clear the various stepping setup.
- ClearOneShot();
- ClearStepIn();
- ClearStepOut();
- ClearStepNext();
-
- // Clear multiple step counter.
- thread_local_.step_count_ = 0;
-}
-
-// Clears all the one-shot break points that are currently set. Normally this
-// function is called each time a break point is hit as one shot break points
-// are used to support stepping.
-void Debug::ClearOneShot() {
- // The current implementation just runs through all the breakpoints. When the
- // last break point for a function is removed that function is automatically
- // removed from the list.
-
- DebugInfoListNode* node = debug_info_list_;
- while (node != NULL) {
- BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- while (!it.Done()) {
- it.ClearOneShot();
- it.Next();
- }
- node = node->next();
- }
-}
-
-
-void Debug::ActivateStepIn(StackFrame* frame) {
- ASSERT(!StepOutActive());
- thread_local_.step_into_fp_ = frame->UnpaddedFP();
-}
-
-
-void Debug::ClearStepIn() {
- thread_local_.step_into_fp_ = 0;
-}
-
-
-void Debug::ActivateStepOut(StackFrame* frame) {
- ASSERT(!StepInActive());
- thread_local_.step_out_fp_ = frame->UnpaddedFP();
-}
-
-
-void Debug::ClearStepOut() {
- thread_local_.step_out_fp_ = 0;
-}
-
-
-void Debug::ClearStepNext() {
- thread_local_.last_step_action_ = StepNone;
- thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
- thread_local_.last_fp_ = 0;
-}
-
-
-// Helper function to compile full code for debugging. This code will
-// have debug break slots and deoptimization information. Deoptimization
-// information is required in case that an optimized version of this
-// function is still activated on the stack. It will also make sure that
-// the full code is compiled with the same flags as the previous version,
-// that is flags which can change the code generated. The current method
-// of mapping from already compiled full code without debug break slots
-// to full code with debug break slots depends on the generated code is
-// otherwise exactly the same.
-static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
- Handle<Code> current_code) {
- ASSERT(!current_code->has_debug_break_slots());
-
- CompilationInfoWithZone info(function);
- info.MarkCompilingForDebugging(current_code);
- ASSERT(!info.shared_info()->is_compiled());
- ASSERT(!info.isolate()->has_pending_exception());
-
- // Use compile lazy which will end up compiling the full code in the
- // configuration configured above.
- bool result = Compiler::CompileLazy(&info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
- info.isolate()->clear_pending_exception();
-#if DEBUG
- if (result) {
- Handle<Code> new_code(function->shared()->code());
- ASSERT(new_code->has_debug_break_slots());
- ASSERT(current_code->is_compiled_optimizable() ==
- new_code->is_compiled_optimizable());
- }
-#endif
- return result;
-}
-
-
-static void CollectActiveFunctionsFromThread(
- Isolate* isolate,
- ThreadLocalTop* top,
- List<Handle<JSFunction> >* active_functions,
- Object* active_code_marker) {
- // Find all non-optimized code functions with activation frames
- // on the stack. This includes functions which have optimized
- // activations (including inlined functions) on the stack as the
- // non-optimized code is needed for the lazy deoptimization.
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized()) {
- List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
- frame->GetFunctions(&functions);
- for (int i = 0; i < functions.length(); i++) {
- JSFunction* function = functions[i];
- active_functions->Add(Handle<JSFunction>(function));
- function->shared()->code()->set_gc_metadata(active_code_marker);
- }
- } else if (frame->function()->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(frame->function());
- ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
- active_functions->Add(Handle<JSFunction>(function));
- function->shared()->code()->set_gc_metadata(active_code_marker);
- }
- }
-}
-
-
-static void RedirectActivationsToRecompiledCodeOnThread(
- Isolate* isolate,
- ThreadLocalTop* top) {
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
-
- if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
-
- JSFunction* function = JSFunction::cast(frame->function());
-
- ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
-
- Handle<Code> frame_code(frame->LookupCode());
- if (frame_code->has_debug_break_slots()) continue;
-
- Handle<Code> new_code(function->shared()->code());
- if (new_code->kind() != Code::FUNCTION ||
- !new_code->has_debug_break_slots()) {
- continue;
- }
-
- // Iterate over the RelocInfo in the original code to compute the sum of the
- // constant pools sizes. (See Assembler::CheckConstPool())
- // Note that this is only useful for architectures using constant pools.
- int constpool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL);
- int frame_const_pool_size = 0;
- for (RelocIterator it(*frame_code, constpool_mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->pc() >= frame->pc()) break;
- frame_const_pool_size += static_cast<int>(info->data());
- }
- intptr_t frame_offset =
- frame->pc() - frame_code->instruction_start() - frame_const_pool_size;
-
- // Iterate over the RelocInfo for new code to find the number of bytes
- // generated for debug slots and constant pools.
- int debug_break_slot_bytes = 0;
- int new_code_const_pool_size = 0;
- int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::CONST_POOL);
- for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
- // Check if the pc in the new code with debug break
- // slots is before this slot.
- RelocInfo* info = it.rinfo();
- intptr_t new_offset = info->pc() - new_code->instruction_start() -
- new_code_const_pool_size - debug_break_slot_bytes;
- if (new_offset >= frame_offset) {
- break;
- }
-
- if (RelocInfo::IsDebugBreakSlot(info->rmode())) {
- debug_break_slot_bytes += Assembler::kDebugBreakSlotLength;
- } else {
- ASSERT(RelocInfo::IsConstPool(info->rmode()));
- // The size of the constant pool is encoded in the data.
- new_code_const_pool_size += static_cast<int>(info->data());
- }
- }
-
- // Compute the equivalent pc in the new code.
- byte* new_pc = new_code->instruction_start() + frame_offset +
- debug_break_slot_bytes + new_code_const_pool_size;
-
- if (FLAG_trace_deopt) {
- PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
- "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
- "for debugging, "
- "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
- reinterpret_cast<intptr_t>(
- frame_code->instruction_start()),
- reinterpret_cast<intptr_t>(
- frame_code->instruction_start()) +
- frame_code->instruction_size(),
- frame_code->instruction_size(),
- reinterpret_cast<intptr_t>(new_code->instruction_start()),
- reinterpret_cast<intptr_t>(new_code->instruction_start()) +
- new_code->instruction_size(),
- new_code->instruction_size(),
- reinterpret_cast<intptr_t>(frame->pc()),
- reinterpret_cast<intptr_t>(new_pc));
- }
-
- // Patch the return address to return into the code with
- // debug break slots.
- frame->set_pc(new_pc);
- }
-}
-
-
-class ActiveFunctionsCollector : public ThreadVisitor {
- public:
- explicit ActiveFunctionsCollector(List<Handle<JSFunction> >* active_functions,
- Object* active_code_marker)
- : active_functions_(active_functions),
- active_code_marker_(active_code_marker) { }
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- CollectActiveFunctionsFromThread(isolate,
- top,
- active_functions_,
- active_code_marker_);
- }
-
- private:
- List<Handle<JSFunction> >* active_functions_;
- Object* active_code_marker_;
-};
-
-
-class ActiveFunctionsRedirector : public ThreadVisitor {
- public:
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- RedirectActivationsToRecompiledCodeOnThread(isolate, top);
- }
-};
-
-
-void Debug::PrepareForBreakPoints() {
- // If preparing for the first break point make sure to deoptimize all
- // functions as debugging does not work with optimized code.
- if (!has_break_points_) {
- Deoptimizer::DeoptimizeAll();
-
- Handle<Code> lazy_compile =
- Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
-
- // There will be at least one break point when we are done.
- has_break_points_ = true;
-
- // Keep the list of activated functions in a handlified list as it
- // is used both in GC and non-GC code.
- List<Handle<JSFunction> > active_functions(100);
-
- {
- // We are going to iterate heap to find all functions without
- // debug break slots.
- Heap* heap = isolate_->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "preparing for breakpoints");
-
- // Ensure no GC in this scope as we are going to use gc_metadata
- // field in the Code object to mark active functions.
- AssertNoAllocation no_allocation;
-
- Object* active_code_marker = heap->the_hole_value();
-
- CollectActiveFunctionsFromThread(isolate_,
- isolate_->thread_local_top(),
- &active_functions,
- active_code_marker);
- ActiveFunctionsCollector active_functions_collector(&active_functions,
- active_code_marker);
- isolate_->thread_manager()->IterateArchivedThreads(
- &active_functions_collector);
-
- // Scan the heap for all non-optimized functions which have no
- // debug break slots and are not active or inlined into an active
- // function and mark them for lazy compilation.
- HeapIterator iterator(heap);
- HeapObject* obj = NULL;
- while (((obj = iterator.next()) != NULL)) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- SharedFunctionInfo* shared = function->shared();
- if (shared->allows_lazy_compilation() &&
- shared->script()->IsScript() &&
- function->code()->kind() == Code::FUNCTION &&
- !function->code()->has_debug_break_slots() &&
- shared->code()->gc_metadata() != active_code_marker) {
- function->set_code(*lazy_compile);
- function->shared()->set_code(*lazy_compile);
- }
- }
- }
-
- // Clear gc_metadata field.
- for (int i = 0; i < active_functions.length(); i++) {
- Handle<JSFunction> function = active_functions[i];
- function->shared()->code()->set_gc_metadata(Smi::FromInt(0));
- }
- }
-
- // Now recompile all functions with activation frames and and
- // patch the return address to run in the new compiled code.
- for (int i = 0; i < active_functions.length(); i++) {
- Handle<JSFunction> function = active_functions[i];
- Handle<SharedFunctionInfo> shared(function->shared());
-
- if (function->code()->kind() == Code::FUNCTION &&
- function->code()->has_debug_break_slots()) {
- // Nothing to do. Function code already had debug break slots.
- continue;
- }
-
- // If recompilation is not possible just skip it.
- if (shared->is_toplevel() ||
- !shared->allows_lazy_compilation() ||
- shared->code()->kind() == Code::BUILTIN) {
- continue;
- }
-
- // Make sure that the shared full code is compiled with debug
- // break slots.
- if (!shared->code()->has_debug_break_slots()) {
- // Try to compile the full code with debug break slots. If it
- // fails just keep the current code.
- Handle<Code> current_code(function->shared()->code());
- shared->set_code(*lazy_compile);
- bool prev_force_debugger_active =
- isolate_->debugger()->force_debugger_active();
- isolate_->debugger()->set_force_debugger_active(true);
- ASSERT(current_code->kind() == Code::FUNCTION);
- CompileFullCodeForDebugging(function, current_code);
- isolate_->debugger()->set_force_debugger_active(
- prev_force_debugger_active);
- if (!shared->is_compiled()) {
- shared->set_code(*current_code);
- continue;
- }
- }
-
- // Keep function code in sync with shared function info.
- function->set_code(shared->code());
- }
-
- RedirectActivationsToRecompiledCodeOnThread(isolate_,
- isolate_->thread_local_top());
-
- ActiveFunctionsRedirector active_functions_redirector;
- isolate_->thread_manager()->IterateArchivedThreads(
- &active_functions_redirector);
- }
-}
-
-
-Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
- int position) {
- // Iterate the heap looking for SharedFunctionInfo generated from the
- // script. The inner most SharedFunctionInfo containing the source position
- // for the requested break point is found.
- // NOTE: This might require several heap iterations. If the SharedFunctionInfo
- // which is found is not compiled it is compiled and the heap is iterated
- // again as the compilation might create inner functions from the newly
- // compiled function and the actual requested break point might be in one of
- // these functions.
- // NOTE: The below fix-point iteration depends on all functions that cannot be
- // compiled lazily without a context to not be compiled at all. Compilation
- // will be triggered at points where we do not need a context.
- bool done = false;
- // The current candidate for the source position:
- int target_start_position = RelocInfo::kNoPosition;
- Handle<JSFunction> target_function;
- Handle<SharedFunctionInfo> target;
- Heap* heap = isolate_->heap();
- while (!done) {
- { // Extra scope for iterator and no-allocation.
- heap->EnsureHeapIsIterable();
- AssertNoAllocation no_alloc_during_heap_iteration;
- HeapIterator iterator(heap);
- for (HeapObject* obj = iterator.next();
- obj != NULL; obj = iterator.next()) {
- bool found_next_candidate = false;
- Handle<JSFunction> function;
- Handle<SharedFunctionInfo> shared;
- if (obj->IsJSFunction()) {
- function = Handle<JSFunction>(JSFunction::cast(obj));
- shared = Handle<SharedFunctionInfo>(function->shared());
- ASSERT(shared->allows_lazy_compilation() || shared->is_compiled());
- found_next_candidate = true;
- } else if (obj->IsSharedFunctionInfo()) {
- shared = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
- // Skip functions that we cannot compile lazily without a context,
- // which is not available here, because there is no closure.
- found_next_candidate = shared->is_compiled() ||
- shared->allows_lazy_compilation_without_context();
- }
- if (!found_next_candidate) continue;
- if (shared->script() == *script) {
- // If the SharedFunctionInfo found has the requested script data and
- // contains the source position it is a candidate.
- int start_position = shared->function_token_position();
- if (start_position == RelocInfo::kNoPosition) {
- start_position = shared->start_position();
- }
- if (start_position <= position &&
- position <= shared->end_position()) {
- // If there is no candidate or this function is within the current
- // candidate this is the new candidate.
- if (target.is_null()) {
- target_start_position = start_position;
- target_function = function;
- target = shared;
- } else {
- if (target_start_position == start_position &&
- shared->end_position() == target->end_position()) {
- // If a top-level function contains only one function
- // declaration the source for the top-level and the function
- // is the same. In that case prefer the non top-level function.
- if (!shared->is_toplevel()) {
- target_start_position = start_position;
- target_function = function;
- target = shared;
- }
- } else if (target_start_position <= start_position &&
- shared->end_position() <= target->end_position()) {
- // This containment check includes equality as a function
- // inside a top-level function can share either start or end
- // position with the top-level function.
- target_start_position = start_position;
- target_function = function;
- target = shared;
- }
- }
- }
- }
- } // End for loop.
- } // End no-allocation scope.
-
- if (target.is_null()) return heap->undefined_value();
-
- // There will be at least one break point when we are done.
- has_break_points_ = true;
-
- // If the candidate found is compiled we are done.
- done = target->is_compiled();
- if (!done) {
- // If the candidate is not compiled, compile it to reveal any inner
- // functions which might contain the requested source position. This
- // will compile all inner functions that cannot be compiled without a
- // context, because Compiler::BuildFunctionInfo checks whether the
- // debugger is active.
- if (target_function.is_null()) {
- SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
- } else {
- JSFunction::CompileLazy(target_function, KEEP_EXCEPTION);
- }
- }
- } // End while loop.
-
- return *target;
-}
-
-
-// Ensures the debug information is present for shared.
-bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> function) {
- // Return if we already have the debug info for shared.
- if (HasDebugInfo(shared)) {
- ASSERT(shared->is_compiled());
- return true;
- }
-
- // There will be at least one break point when we are done.
- has_break_points_ = true;
-
- // Ensure function is compiled. Return false if this failed.
- if (!function.is_null() &&
- !JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION)) {
- return false;
- }
-
- // Create the debug info object.
- Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
-
- // Add debug info to the list.
- DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
- node->set_next(debug_info_list_);
- debug_info_list_ = node;
-
- return true;
-}
-
-
-void Debug::RemoveDebugInfo(Handle<DebugInfo> debug_info) {
- ASSERT(debug_info_list_ != NULL);
- // Run through the debug info objects to find this one and remove it.
- DebugInfoListNode* prev = NULL;
- DebugInfoListNode* current = debug_info_list_;
- while (current != NULL) {
- if (*current->debug_info() == *debug_info) {
- // Unlink from list. If prev is NULL we are looking at the first element.
- if (prev == NULL) {
- debug_info_list_ = current->next();
- } else {
- prev->set_next(current->next());
- }
- current->debug_info()->shared()->set_debug_info(
- isolate_->heap()->undefined_value());
- delete current;
-
- // If there are no more debug info objects there are not more break
- // points.
- has_break_points_ = debug_info_list_ != NULL;
-
- return;
- }
- // Move to next in list.
- prev = current;
- current = current->next();
- }
- UNREACHABLE();
-}
-
-
-void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
- HandleScope scope(isolate_);
-
- PrepareForBreakPoints();
-
- // Get the executing function in which the debug break occurred.
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if we failed to retrieve the debug info.
- return;
- }
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- Handle<Code> code(debug_info->code());
- Handle<Code> original_code(debug_info->original_code());
-#ifdef DEBUG
- // Get the code which is actually executing.
- Handle<Code> frame_code(frame->LookupCode());
- ASSERT(frame_code.is_identical_to(code));
-#endif
-
- // Find the call address in the running code. This address holds the call to
- // either a DebugBreakXXX or to the debug break return entry code if the
- // break point is still active after processing the break point.
- Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
-
- // Check if the location is at JS exit or debug break slot.
- bool at_js_return = false;
- bool break_at_js_return_active = false;
- bool at_debug_break_slot = false;
- RelocIterator it(debug_info->code());
- while (!it.done() && !at_js_return && !at_debug_break_slot) {
- if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
- at_js_return = (it.rinfo()->pc() ==
- addr - Assembler::kPatchReturnSequenceAddressOffset);
- break_at_js_return_active = it.rinfo()->IsPatchedReturnSequence();
- }
- if (RelocInfo::IsDebugBreakSlot(it.rinfo()->rmode())) {
- at_debug_break_slot = (it.rinfo()->pc() ==
- addr - Assembler::kPatchDebugBreakSlotAddressOffset);
- }
- it.next();
- }
-
- // Handle the jump to continue execution after break point depending on the
- // break location.
- if (at_js_return) {
- // If the break point as return is still active jump to the corresponding
- // place in the original code. If not the break point was removed during
- // break point processing.
- if (break_at_js_return_active) {
- addr += original_code->instruction_start() - code->instruction_start();
- }
-
- // Move back to where the call instruction sequence started.
- thread_local_.after_break_target_ =
- addr - Assembler::kPatchReturnSequenceAddressOffset;
- } else if (at_debug_break_slot) {
- // Address of where the debug break slot starts.
- addr = addr - Assembler::kPatchDebugBreakSlotAddressOffset;
-
- // Continue just after the slot.
- thread_local_.after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
- } else if (IsDebugBreak(Assembler::target_address_at(addr))) {
- // We now know that there is still a debug break call at the target address,
- // so the break point is still there and the original code will hold the
- // address to jump to in order to complete the call which is replaced by a
- // call to DebugBreakXXX.
-
- // Find the corresponding address in the original code.
- addr += original_code->instruction_start() - code->instruction_start();
-
- // Install jump to the call address in the original code. This will be the
- // call which was overwritten by the call to DebugBreakXXX.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
- } else {
- // There is no longer a break point present. Don't try to look in the
- // original code as the running code will have the right address. This takes
- // care of the case where the last break point is removed from the function
- // and therefore no "original code" is available.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
- }
-}
-
-
-bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
- HandleScope scope(isolate_);
-
- // If there are no break points this cannot be break at return, as
- // the debugger statement and stack guard bebug break cannot be at
- // return.
- if (!has_break_points_) {
- return false;
- }
-
- PrepareForBreakPoints();
-
- // Get the executing function in which the debug break occurred.
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if we failed to retrieve the debug info.
- return false;
- }
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- Handle<Code> code(debug_info->code());
-#ifdef DEBUG
- // Get the code which is actually executing.
- Handle<Code> frame_code(frame->LookupCode());
- ASSERT(frame_code.is_identical_to(code));
-#endif
-
- // Find the call address in the running code.
- Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
-
- // Check if the location is at JS return.
- RelocIterator it(debug_info->code());
- while (!it.done()) {
- if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
- return (it.rinfo()->pc() ==
- addr - Assembler::kPatchReturnSequenceAddressOffset);
- }
- it.next();
- }
- return false;
-}
-
-
-void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- FrameDropMode mode,
- Object** restarter_frame_function_pointer) {
- if (mode != CURRENTLY_SET_MODE) {
- thread_local_.frame_drop_mode_ = mode;
- }
- thread_local_.break_frame_id_ = new_break_frame_id;
- thread_local_.restarter_frame_function_pointer_ =
- restarter_frame_function_pointer;
-}
-
-
-const int Debug::FramePaddingLayout::kInitialSize = 1;
-
-
-// Any even value bigger than kInitialSize as needed for stack scanning.
-const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
-
-
-bool Debug::IsDebugGlobal(GlobalObject* global) {
- return IsLoaded() && global == debug_context()->global_object();
-}
-
-
-void Debug::ClearMirrorCache() {
- PostponeInterruptsScope postpone(isolate_);
- HandleScope scope(isolate_);
- ASSERT(isolate_->context() == *Debug::debug_context());
-
- // Clear the mirror cache.
- Handle<String> function_name = isolate_->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("ClearMirrorCache"));
- Handle<Object> fun(
- isolate_->global_object()->GetPropertyNoExceptionThrown(*function_name),
- isolate_);
- ASSERT(fun->IsJSFunction());
- bool caught_exception;
- Execution::TryCall(Handle<JSFunction>::cast(fun),
- Handle<JSObject>(Debug::debug_context()->global_object()),
- 0, NULL, &caught_exception);
-}
-
-
-void Debug::CreateScriptCache() {
- Heap* heap = isolate_->heap();
- HandleScope scope(isolate_);
-
- // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
- // rid of all the cached script wrappers and the second gets rid of the
- // scripts which are no longer referenced. The second also sweeps precisely,
- // which saves us doing yet another GC to make the heap iterable.
- heap->CollectAllGarbage(Heap::kNoGCFlags, "Debug::CreateScriptCache");
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "Debug::CreateScriptCache");
-
- ASSERT(script_cache_ == NULL);
- script_cache_ = new ScriptCache();
-
- // Scan heap for Script objects.
- int count = 0;
- HeapIterator iterator(heap);
- AssertNoAllocation no_allocation;
-
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
- script_cache_->Add(Handle<Script>(Script::cast(obj)));
- count++;
- }
- }
-}
-
-
-void Debug::DestroyScriptCache() {
- // Get rid of the script cache if it was created.
- if (script_cache_ != NULL) {
- delete script_cache_;
- script_cache_ = NULL;
- }
-}
-
-
-void Debug::AddScriptToScriptCache(Handle<Script> script) {
- if (script_cache_ != NULL) {
- script_cache_->Add(script);
- }
-}
-
-
-Handle<FixedArray> Debug::GetLoadedScripts() {
- // Create and fill the script cache when the loaded scripts is requested for
- // the first time.
- if (script_cache_ == NULL) {
- CreateScriptCache();
- }
-
- // If the script cache is not active just return an empty array.
- ASSERT(script_cache_ != NULL);
- if (script_cache_ == NULL) {
- isolate_->factory()->NewFixedArray(0);
- }
-
- // Perform GC to get unreferenced scripts evicted from the cache before
- // returning the content.
- isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
- "Debug::GetLoadedScripts");
-
- // Get the scripts from the cache.
- return script_cache_->GetScripts();
-}
-
-
-void Debug::AfterGarbageCollection() {
- // Generate events for collected scripts.
- if (script_cache_ != NULL) {
- script_cache_->ProcessCollectedScripts();
- }
-}
-
-
-Debugger::Debugger(Isolate* isolate)
- : debugger_access_(isolate->debugger_access()),
- event_listener_(Handle<Object>()),
- event_listener_data_(Handle<Object>()),
- compiling_natives_(false),
- is_loading_debugger_(false),
- live_edit_enabled_(true),
- never_unload_debugger_(false),
- force_debugger_active_(false),
- message_handler_(NULL),
- debugger_unload_pending_(false),
- host_dispatch_handler_(NULL),
- dispatch_handler_access_(OS::CreateMutex()),
- debug_message_dispatch_handler_(NULL),
- message_dispatch_helper_thread_(NULL),
- host_dispatch_micros_(100 * 1000),
- agent_(NULL),
- command_queue_(isolate->logger(), kQueueInitialSize),
- command_received_(OS::CreateSemaphore(0)),
- event_command_queue_(isolate->logger(), kQueueInitialSize),
- isolate_(isolate) {
-}
-
-
-Debugger::~Debugger() {
- delete dispatch_handler_access_;
- dispatch_handler_access_ = 0;
- delete command_received_;
- command_received_ = 0;
-}
-
-
-Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
- int argc,
- Handle<Object> argv[],
- bool* caught_exception) {
- ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
-
- // Create the execution state object.
- Handle<String> constructor_str =
- isolate_->factory()->InternalizeUtf8String(constructor_name);
- Handle<Object> constructor(
- isolate_->global_object()->GetPropertyNoExceptionThrown(*constructor_str),
- isolate_);
- ASSERT(constructor->IsJSFunction());
- if (!constructor->IsJSFunction()) {
- *caught_exception = true;
- return isolate_->factory()->undefined_value();
- }
- Handle<Object> js_object = Execution::TryCall(
- Handle<JSFunction>::cast(constructor),
- Handle<JSObject>(isolate_->debug()->debug_context()->global_object()),
- argc,
- argv,
- caught_exception);
- return js_object;
-}
-
-
-Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
- // Create the execution state object.
- Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
- isolate_->debug()->break_id());
- Handle<Object> argv[] = { break_id };
- return MakeJSObject(CStrVector("MakeExecutionState"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
- Handle<Object> break_points_hit,
- bool* caught_exception) {
- // Create the new break event object.
- Handle<Object> argv[] = { exec_state, break_points_hit };
- return MakeJSObject(CStrVector("MakeBreakEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
- Handle<Object> exception,
- bool uncaught,
- bool* caught_exception) {
- Factory* factory = isolate_->factory();
- // Create the new exception event object.
- Handle<Object> argv[] = { exec_state,
- exception,
- factory->ToBoolean(uncaught) };
- return MakeJSObject(CStrVector("MakeExceptionEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
- bool* caught_exception) {
- // Create the new function event object.
- Handle<Object> argv[] = { function };
- return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
- bool before,
- bool* caught_exception) {
- Factory* factory = isolate_->factory();
- // Create the compile event object.
- Handle<Object> exec_state = MakeExecutionState(caught_exception);
- Handle<Object> script_wrapper = GetScriptWrapper(script);
- Handle<Object> argv[] = { exec_state,
- script_wrapper,
- factory->ToBoolean(before) };
- return MakeJSObject(CStrVector("MakeCompileEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
- bool* caught_exception) {
- // Create the script collected event object.
- Handle<Object> exec_state = MakeExecutionState(caught_exception);
- Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id), isolate_);
- Handle<Object> argv[] = { exec_state, id_object };
-
- return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
-}
-
-
-void Debugger::OnException(Handle<Object> exception, bool uncaught) {
- HandleScope scope(isolate_);
- Debug* debug = isolate_->debug();
-
- // Bail out based on state or if there is no listener for this event
- if (debug->InDebugger()) return;
- if (!Debugger::EventActive(v8::Exception)) return;
-
- // Bail out if exception breaks are not active
- if (uncaught) {
- // Uncaught exceptions are reported by either flags.
- if (!(debug->break_on_uncaught_exception() ||
- debug->break_on_exception())) return;
- } else {
- // Caught exceptions are reported is activated.
- if (!debug->break_on_exception()) return;
- }
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) return;
-
- // Clear all current stepping setup.
- debug->ClearStepping();
- // Create the event data object.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- Handle<Object> event_data;
- if (!caught_exception) {
- event_data = MakeExceptionEvent(exec_state, exception, uncaught,
- &caught_exception);
- }
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
-
- // Process debug event.
- ProcessDebugEvent(v8::Exception, Handle<JSObject>::cast(event_data), false);
- // Return to continue execution from where the exception was thrown.
-}
-
-
-void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
- bool auto_continue) {
- HandleScope scope(isolate_);
-
- // Debugger has already been entered by caller.
- ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
-
- // Bail out if there is no listener for this event
- if (!Debugger::EventActive(v8::Break)) return;
-
- // Debugger must be entered in advance.
- ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
-
- // Create the event data object.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- Handle<Object> event_data;
- if (!caught_exception) {
- event_data = MakeBreakEvent(exec_state, break_points_hit,
- &caught_exception);
- }
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
-
- // Process debug event.
- ProcessDebugEvent(v8::Break,
- Handle<JSObject>::cast(event_data),
- auto_continue);
-}
-
-
-void Debugger::OnBeforeCompile(Handle<Script> script) {
- HandleScope scope(isolate_);
-
- // Bail out based on state or if there is no listener for this event
- if (isolate_->debug()->InDebugger()) return;
- if (compiling_natives()) return;
- if (!EventActive(v8::BeforeCompile)) return;
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) return;
-
- // Create the event data object.
- bool caught_exception = false;
- Handle<Object> event_data = MakeCompileEvent(script, true, &caught_exception);
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
-
- // Process debug event.
- ProcessDebugEvent(v8::BeforeCompile,
- Handle<JSObject>::cast(event_data),
- true);
-}
-
-
-// Handle debugger actions when a new script is compiled.
-void Debugger::OnAfterCompile(Handle<Script> script,
- AfterCompileFlags after_compile_flags) {
- HandleScope scope(isolate_);
- Debug* debug = isolate_->debug();
-
- // Add the newly compiled script to the script cache.
- debug->AddScriptToScriptCache(script);
-
- // No more to do if not debugging.
- if (!IsDebuggerActive()) return;
-
- // No compile events while compiling natives.
- if (compiling_natives()) return;
-
- // Store whether in debugger before entering debugger.
- bool in_debugger = debug->InDebugger();
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) return;
-
- // If debugging there might be script break points registered for this
- // script. Make sure that these break points are set.
-
- // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
- Handle<String> update_script_break_points_string =
- isolate_->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("UpdateScriptBreakPoints"));
- Handle<Object> update_script_break_points =
- Handle<Object>(
- debug->debug_context()->global_object()->GetPropertyNoExceptionThrown(
- *update_script_break_points_string),
- isolate_);
- if (!update_script_break_points->IsJSFunction()) {
- return;
- }
- ASSERT(update_script_break_points->IsJSFunction());
-
- // Wrap the script object in a proper JS object before passing it
- // to JavaScript.
- Handle<JSValue> wrapper = GetScriptWrapper(script);
-
- // Call UpdateScriptBreakPoints expect no exceptions.
- bool caught_exception;
- Handle<Object> argv[] = { wrapper };
- Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
- Isolate::Current()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
- if (caught_exception) {
- return;
- }
- // Bail out based on state or if there is no listener for this event
- if (in_debugger && (after_compile_flags & SEND_WHEN_DEBUGGING) == 0) return;
- if (!Debugger::EventActive(v8::AfterCompile)) return;
-
- // Create the compile state object.
- Handle<Object> event_data = MakeCompileEvent(script,
- false,
- &caught_exception);
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
- // Process debug event.
- ProcessDebugEvent(v8::AfterCompile,
- Handle<JSObject>::cast(event_data),
- true);
-}
-
-
-void Debugger::OnScriptCollected(int id) {
- HandleScope scope(isolate_);
-
- // No more to do if not debugging.
- if (isolate_->debug()->InDebugger()) return;
- if (!IsDebuggerActive()) return;
- if (!Debugger::EventActive(v8::ScriptCollected)) return;
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) return;
-
- // Create the script collected state object.
- bool caught_exception = false;
- Handle<Object> event_data = MakeScriptCollectedEvent(id,
- &caught_exception);
- // Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
-
- // Process debug event.
- ProcessDebugEvent(v8::ScriptCollected,
- Handle<JSObject>::cast(event_data),
- true);
-}
-
-
-void Debugger::ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- bool auto_continue) {
- HandleScope scope(isolate_);
-
- // Clear any pending debug break if this is a real break.
- if (!auto_continue) {
- isolate_->debug()->clear_interrupt_pending(DEBUGBREAK);
- }
-
- // Create the execution state.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- if (caught_exception) {
- return;
- }
- // First notify the message handler if any.
- if (message_handler_ != NULL) {
- NotifyMessageHandler(event,
- Handle<JSObject>::cast(exec_state),
- event_data,
- auto_continue);
- }
- // Notify registered debug event listener. This can be either a C or
- // a JavaScript function. Don't call event listener for v8::Break
- // here, if it's only a debug command -- they will be processed later.
- if ((event != v8::Break || !auto_continue) && !event_listener_.is_null()) {
- CallEventCallback(event, exec_state, event_data, NULL);
- }
- // Process pending debug commands.
- if (event == v8::Break) {
- while (!event_command_queue_.IsEmpty()) {
- CommandMessage command = event_command_queue_.Get();
- if (!event_listener_.is_null()) {
- CallEventCallback(v8::BreakForCommand,
- exec_state,
- event_data,
- command.client_data());
- }
- command.Dispose();
- }
- }
-}
-
-
-void Debugger::CallEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data) {
- if (event_listener_->IsForeign()) {
- CallCEventCallback(event, exec_state, event_data, client_data);
- } else {
- CallJSEventCallback(event, exec_state, event_data);
- }
-}
-
-
-void Debugger::CallCEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data) {
- Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
- v8::Debug::EventCallback2 callback =
- FUNCTION_CAST<v8::Debug::EventCallback2>(
- callback_obj->foreign_address());
- EventDetailsImpl event_details(
- event,
- Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data),
- event_listener_data_,
- client_data);
- callback(event_details);
-}
-
-
-void Debugger::CallJSEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data) {
- ASSERT(event_listener_->IsJSFunction());
- Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
-
- // Invoke the JavaScript debug event listener.
- Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_),
- exec_state,
- event_data,
- event_listener_data_ };
- bool caught_exception;
- Execution::TryCall(fun,
- isolate_->global_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
- // Silently ignore exceptions from debug event listeners.
-}
-
-
-Handle<Context> Debugger::GetDebugContext() {
- never_unload_debugger_ = true;
- EnterDebugger debugger;
- return isolate_->debug()->debug_context();
-}
-
-
-void Debugger::UnloadDebugger() {
- Debug* debug = isolate_->debug();
-
- // Make sure that there are no breakpoints left.
- debug->ClearAllBreakPoints();
-
- // Unload the debugger if feasible.
- if (!never_unload_debugger_) {
- debug->Unload();
- }
-
- // Clear the flag indicating that the debugger should be unloaded.
- debugger_unload_pending_ = false;
-}
-
-
-void Debugger::NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue) {
- HandleScope scope(isolate_);
-
- if (!isolate_->debug()->Load()) return;
-
- // Process the individual events.
- bool sendEventMessage = false;
- switch (event) {
- case v8::Break:
- case v8::BreakForCommand:
- sendEventMessage = !auto_continue;
- break;
- case v8::Exception:
- sendEventMessage = true;
- break;
- case v8::BeforeCompile:
- break;
- case v8::AfterCompile:
- sendEventMessage = true;
- break;
- case v8::ScriptCollected:
- sendEventMessage = true;
- break;
- case v8::NewFunction:
- break;
- default:
- UNREACHABLE();
- }
-
- // The debug command interrupt flag might have been set when the command was
- // added. It should be enough to clear the flag only once while we are in the
- // debugger.
- ASSERT(isolate_->debug()->InDebugger());
- isolate_->stack_guard()->Continue(DEBUGCOMMAND);
-
- // Notify the debugger that a debug event has occurred unless auto continue is
- // active in which case no event is send.
- if (sendEventMessage) {
- MessageImpl message = MessageImpl::NewEvent(
- event,
- auto_continue,
- Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data));
- InvokeMessageHandler(message);
- }
-
- // If auto continue don't make the event cause a break, but process messages
- // in the queue if any. For script collected events don't even process
- // messages in the queue as the execution state might not be what is expected
- // by the client.
- if ((auto_continue && !HasCommands()) || event == v8::ScriptCollected) {
- return;
- }
-
- v8::TryCatch try_catch;
-
- // DebugCommandProcessor goes here.
- v8::Local<v8::Object> cmd_processor;
- {
- v8::Local<v8::Object> api_exec_state =
- v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
- v8::Local<v8::String> fun_name =
- v8::String::New("debugCommandProcessor");
- v8::Local<v8::Function> fun =
- v8::Function::Cast(*api_exec_state->Get(fun_name));
-
- v8::Handle<v8::Boolean> running =
- auto_continue ? v8::True() : v8::False();
- static const int kArgc = 1;
- v8::Handle<Value> argv[kArgc] = { running };
- cmd_processor = v8::Object::Cast(*fun->Call(api_exec_state, kArgc, argv));
- if (try_catch.HasCaught()) {
- PrintLn(try_catch.Exception());
- return;
- }
- }
-
- bool running = auto_continue;
-
- // Process requests from the debugger.
- while (true) {
- // Wait for new command in the queue.
- if (Debugger::host_dispatch_handler_) {
- // In case there is a host dispatch - do periodic dispatches.
- if (!command_received_->Wait(host_dispatch_micros_)) {
- // Timout expired, do the dispatch.
- Debugger::host_dispatch_handler_();
- continue;
- }
- } else {
- // In case there is no host dispatch - just wait.
- command_received_->Wait();
- }
-
- // Get the command from the queue.
- CommandMessage command = command_queue_.Get();
- isolate_->logger()->DebugTag(
- "Got request from command queue, in interactive loop.");
- if (!Debugger::IsDebuggerActive()) {
- // Delete command text and user data.
- command.Dispose();
- return;
- }
-
- // Invoke JavaScript to process the debug request.
- v8::Local<v8::String> fun_name;
- v8::Local<v8::Function> fun;
- v8::Local<v8::Value> request;
- v8::TryCatch try_catch;
- fun_name = v8::String::New("processDebugRequest");
- fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
-
- request = v8::String::New(command.text().start(),
- command.text().length());
- static const int kArgc = 1;
- v8::Handle<Value> argv[kArgc] = { request };
- v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv);
-
- // Get the response.
- v8::Local<v8::String> response;
- if (!try_catch.HasCaught()) {
- // Get response string.
- if (!response_val->IsUndefined()) {
- response = v8::String::Cast(*response_val);
- } else {
- response = v8::String::New("");
- }
-
- // Log the JSON request/response.
- if (FLAG_trace_debug_json) {
- PrintLn(request);
- PrintLn(response);
- }
-
- // Get the running state.
- fun_name = v8::String::New("isRunning");
- fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
- static const int kArgc = 1;
- v8::Handle<Value> argv[kArgc] = { response };
- v8::Local<v8::Value> running_val = fun->Call(cmd_processor, kArgc, argv);
- if (!try_catch.HasCaught()) {
- running = running_val->ToBoolean()->Value();
- }
- } else {
- // In case of failure the result text is the exception text.
- response = try_catch.Exception()->ToString();
- }
-
- // Return the result.
- MessageImpl message = MessageImpl::NewResponse(
- event,
- running,
- Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data),
- Handle<String>(Utils::OpenHandle(*response)),
- command.client_data());
- InvokeMessageHandler(message);
- command.Dispose();
-
- // Return from debug event processing if either the VM is put into the
- // running state (through a continue command) or auto continue is active
- // and there are no more commands queued.
- if (running && !HasCommands()) {
- return;
- }
- }
-}
-
-
-void Debugger::SetEventListener(Handle<Object> callback,
- Handle<Object> data) {
- HandleScope scope(isolate_);
- GlobalHandles* global_handles = isolate_->global_handles();
-
- // Clear the global handles for the event listener and the event listener data
- // object.
- if (!event_listener_.is_null()) {
- global_handles->Destroy(
- reinterpret_cast<Object**>(event_listener_.location()));
- event_listener_ = Handle<Object>();
- }
- if (!event_listener_data_.is_null()) {
- global_handles->Destroy(
- reinterpret_cast<Object**>(event_listener_data_.location()));
- event_listener_data_ = Handle<Object>();
- }
-
- // If there is a new debug event listener register it together with its data
- // object.
- if (!callback->IsUndefined() && !callback->IsNull()) {
- event_listener_ = Handle<Object>::cast(
- global_handles->Create(*callback));
- if (data.is_null()) {
- data = isolate_->factory()->undefined_value();
- }
- event_listener_data_ = Handle<Object>::cast(
- global_handles->Create(*data));
- }
-
- ListenersChanged();
-}
-
-
-void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
- ScopedLock with(debugger_access_);
-
- message_handler_ = handler;
- ListenersChanged();
- if (handler == NULL) {
- // Send an empty command to the debugger if in a break to make JavaScript
- // run again if the debugger is closed.
- if (isolate_->debug()->InDebugger()) {
- ProcessCommand(Vector<const uint16_t>::empty());
- }
- }
-}
-
-
-void Debugger::ListenersChanged() {
- if (IsDebuggerActive()) {
- // Disable the compilation cache when the debugger is active.
- isolate_->compilation_cache()->Disable();
- debugger_unload_pending_ = false;
- } else {
- isolate_->compilation_cache()->Enable();
- // Unload the debugger if event listener and message handler cleared.
- // Schedule this for later, because we may be in non-V8 thread.
- debugger_unload_pending_ = true;
- }
-}
-
-
-void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period) {
- host_dispatch_handler_ = handler;
- host_dispatch_micros_ = period * 1000;
-}
-
-
-void Debugger::SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
- ScopedLock with(dispatch_handler_access_);
- debug_message_dispatch_handler_ = handler;
-
- if (provide_locker && message_dispatch_helper_thread_ == NULL) {
- message_dispatch_helper_thread_ = new MessageDispatchHelperThread(isolate_);
- message_dispatch_helper_thread_->Start();
- }
-}
-
-
-// Calls the registered debug message handler. This callback is part of the
-// public API.
-void Debugger::InvokeMessageHandler(MessageImpl message) {
- ScopedLock with(debugger_access_);
-
- if (message_handler_ != NULL) {
- message_handler_(message);
- }
-}
-
-
-// Puts a command coming from the public API on the queue. Creates
-// a copy of the command string managed by the debugger. Up to this
-// point, the command data was managed by the API client. Called
-// by the API client thread.
-void Debugger::ProcessCommand(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data) {
- // Need to cast away const.
- CommandMessage message = CommandMessage::New(
- Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
- command.length()),
- client_data);
- isolate_->logger()->DebugTag("Put command on command_queue.");
- command_queue_.Put(message);
- command_received_->Signal();
-
- // Set the debug command break flag to have the command processed.
- if (!isolate_->debug()->InDebugger()) {
- isolate_->stack_guard()->DebugCommand();
- }
-
- MessageDispatchHelperThread* dispatch_thread;
- {
- ScopedLock with(dispatch_handler_access_);
- dispatch_thread = message_dispatch_helper_thread_;
- }
-
- if (dispatch_thread == NULL) {
- CallMessageDispatchHandler();
- } else {
- dispatch_thread->Schedule();
- }
-}
-
-
-bool Debugger::HasCommands() {
- return !command_queue_.IsEmpty();
-}
-
-
-void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
- CommandMessage message = CommandMessage::New(Vector<uint16_t>(), client_data);
- event_command_queue_.Put(message);
-
- // Set the debug command break flag to have the command processed.
- if (!isolate_->debug()->InDebugger()) {
- isolate_->stack_guard()->DebugCommand();
- }
-}
-
-
-bool Debugger::IsDebuggerActive() {
- ScopedLock with(debugger_access_);
-
- return message_handler_ != NULL ||
- !event_listener_.is_null() ||
- force_debugger_active_;
-}
-
-
-Handle<Object> Debugger::Call(Handle<JSFunction> fun,
- Handle<Object> data,
- bool* pending_exception) {
- // When calling functions in the debugger prevent it from beeing unloaded.
- Debugger::never_unload_debugger_ = true;
-
- // Enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) {
- return isolate_->factory()->undefined_value();
- }
-
- // Create the execution state.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- if (caught_exception) {
- return isolate_->factory()->undefined_value();
- }
-
- Handle<Object> argv[] = { exec_state, data };
- Handle<Object> result = Execution::Call(
- fun,
- Handle<Object>(isolate_->debug()->debug_context_->global_proxy(),
- isolate_),
- ARRAY_SIZE(argv),
- argv,
- pending_exception);
- return result;
-}
-
-
-static void StubMessageHandler2(const v8::Debug::Message& message) {
- // Simply ignore message.
-}
-
-
-bool Debugger::StartAgent(const char* name, int port,
- bool wait_for_connection) {
- ASSERT(Isolate::Current() == isolate_);
- if (wait_for_connection) {
- // Suspend V8 if it is already running or set V8 to suspend whenever
- // it starts.
- // Provide stub message handler; V8 auto-continues each suspend
- // when there is no message handler; we doesn't need it.
- // Once become suspended, V8 will stay so indefinitely long, until remote
- // debugger connects and issues "continue" command.
- Debugger::message_handler_ = StubMessageHandler2;
- v8::Debug::DebugBreak();
- }
-
- if (Socket::SetUp()) {
- if (agent_ == NULL) {
- agent_ = new DebuggerAgent(name, port);
- agent_->Start();
- }
- return true;
- }
-
- return false;
-}
-
-
-void Debugger::StopAgent() {
- ASSERT(Isolate::Current() == isolate_);
- if (agent_ != NULL) {
- agent_->Shutdown();
- agent_->Join();
- delete agent_;
- agent_ = NULL;
- }
-}
-
-
-void Debugger::WaitForAgent() {
- ASSERT(Isolate::Current() == isolate_);
- if (agent_ != NULL)
- agent_->WaitUntilListening();
-}
-
-
-void Debugger::CallMessageDispatchHandler() {
- v8::Debug::DebugMessageDispatchHandler handler;
- {
- ScopedLock with(dispatch_handler_access_);
- handler = Debugger::debug_message_dispatch_handler_;
- }
- if (handler != NULL) {
- handler();
- }
-}
-
-
-EnterDebugger::EnterDebugger()
- : isolate_(Isolate::Current()),
- prev_(isolate_->debug()->debugger_entry()),
- it_(isolate_),
- has_js_frames_(!it_.done()),
- save_(isolate_) {
- Debug* debug = isolate_->debug();
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
-
- // Link recursive debugger entry.
- debug->set_debugger_entry(this);
-
- // Store the previous break id and frame id.
- break_id_ = debug->break_id();
- break_frame_id_ = debug->break_frame_id();
-
- // Create the new break info. If there is no JavaScript frames there is no
- // break frame id.
- if (has_js_frames_) {
- debug->NewBreak(it_.frame()->id());
- } else {
- debug->NewBreak(StackFrame::NO_ID);
- }
-
- // Make sure that debugger is loaded and enter the debugger context.
- load_failed_ = !debug->Load();
- if (!load_failed_) {
- // NOTE the member variable save which saves the previous context before
- // this change.
- isolate_->set_context(*debug->debug_context());
- }
-}
-
-
-EnterDebugger::~EnterDebugger() {
- ASSERT(Isolate::Current() == isolate_);
- Debug* debug = isolate_->debug();
-
- // Restore to the previous break state.
- debug->SetBreak(break_frame_id_, break_id_);
-
- // Check for leaving the debugger.
- if (!load_failed_ && prev_ == NULL) {
- // Clear mirror cache when leaving the debugger. Skip this if there is a
- // pending exception as clearing the mirror cache calls back into
- // JavaScript. This can happen if the v8::Debug::Call is used in which
- // case the exception should end up in the calling code.
- if (!isolate_->has_pending_exception()) {
- // Try to avoid any pending debug break breaking in the clear mirror
- // cache JavaScript code.
- if (isolate_->stack_guard()->IsDebugBreak()) {
- debug->set_interrupts_pending(DEBUGBREAK);
- isolate_->stack_guard()->Continue(DEBUGBREAK);
- }
- debug->ClearMirrorCache();
- }
-
- // Request preemption and debug break when leaving the last debugger entry
- // if any of these where recorded while debugging.
- if (debug->is_interrupt_pending(PREEMPT)) {
- // This re-scheduling of preemption is to avoid starvation in some
- // debugging scenarios.
- debug->clear_interrupt_pending(PREEMPT);
- isolate_->stack_guard()->Preempt();
- }
- if (debug->is_interrupt_pending(DEBUGBREAK)) {
- debug->clear_interrupt_pending(DEBUGBREAK);
- isolate_->stack_guard()->DebugBreak();
- }
-
- // If there are commands in the queue when leaving the debugger request
- // that these commands are processed.
- if (isolate_->debugger()->HasCommands()) {
- isolate_->stack_guard()->DebugCommand();
- }
-
- // If leaving the debugger with the debugger no longer active unload it.
- if (!isolate_->debugger()->IsDebuggerActive()) {
- isolate_->debugger()->UnloadDebugger();
- }
- }
-
- // Leaving this debugger entry.
- debug->set_debugger_entry(prev_);
-}
-
-
-MessageImpl MessageImpl::NewEvent(DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data) {
- MessageImpl message(true, event, running,
- exec_state, event_data, Handle<String>(), NULL);
- return message;
-}
-
-
-MessageImpl MessageImpl::NewResponse(DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data) {
- MessageImpl message(false, event, running,
- exec_state, event_data, response_json, client_data);
- return message;
-}
-
-
-MessageImpl::MessageImpl(bool is_event,
- DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data)
- : is_event_(is_event),
- event_(event),
- running_(running),
- exec_state_(exec_state),
- event_data_(event_data),
- response_json_(response_json),
- client_data_(client_data) {}
-
-
-bool MessageImpl::IsEvent() const {
- return is_event_;
-}
-
-
-bool MessageImpl::IsResponse() const {
- return !is_event_;
-}
-
-
-DebugEvent MessageImpl::GetEvent() const {
- return event_;
-}
-
-
-bool MessageImpl::WillStartRunning() const {
- return running_;
-}
-
-
-v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
- return v8::Utils::ToLocal(exec_state_);
-}
-
-
-v8::Handle<v8::Object> MessageImpl::GetEventData() const {
- return v8::Utils::ToLocal(event_data_);
-}
-
-
-v8::Handle<v8::String> MessageImpl::GetJSON() const {
- v8::HandleScope scope;
-
- if (IsEvent()) {
- // Call toJSONProtocol on the debug event object.
- Handle<Object> fun = GetProperty(event_data_, "toJSONProtocol");
- if (!fun->IsJSFunction()) {
- return v8::Handle<v8::String>();
- }
- bool caught_exception;
- Handle<Object> json = Execution::TryCall(Handle<JSFunction>::cast(fun),
- event_data_,
- 0, NULL, &caught_exception);
- if (caught_exception || !json->IsString()) {
- return v8::Handle<v8::String>();
- }
- return scope.Close(v8::Utils::ToLocal(Handle<String>::cast(json)));
- } else {
- return v8::Utils::ToLocal(response_json_);
- }
-}
-
-
-v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
- Isolate* isolate = Isolate::Current();
- v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
- // Isolate::context() may be NULL when "script collected" event occures.
- ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
- return context;
-}
-
-
-v8::Debug::ClientData* MessageImpl::GetClientData() const {
- return client_data_;
-}
-
-
-EventDetailsImpl::EventDetailsImpl(DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<Object> callback_data,
- v8::Debug::ClientData* client_data)
- : event_(event),
- exec_state_(exec_state),
- event_data_(event_data),
- callback_data_(callback_data),
- client_data_(client_data) {}
-
-
-DebugEvent EventDetailsImpl::GetEvent() const {
- return event_;
-}
-
-
-v8::Handle<v8::Object> EventDetailsImpl::GetExecutionState() const {
- return v8::Utils::ToLocal(exec_state_);
-}
-
-
-v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
- return v8::Utils::ToLocal(event_data_);
-}
-
-
-v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
- return GetDebugEventContext(Isolate::Current());
-}
-
-
-v8::Handle<v8::Value> EventDetailsImpl::GetCallbackData() const {
- return v8::Utils::ToLocal(callback_data_);
-}
-
-
-v8::Debug::ClientData* EventDetailsImpl::GetClientData() const {
- return client_data_;
-}
-
-
-CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
- client_data_(NULL) {
-}
-
-
-CommandMessage::CommandMessage(const Vector<uint16_t>& text,
- v8::Debug::ClientData* data)
- : text_(text),
- client_data_(data) {
-}
-
-
-CommandMessage::~CommandMessage() {
-}
-
-
-void CommandMessage::Dispose() {
- text_.Dispose();
- delete client_data_;
- client_data_ = NULL;
-}
-
-
-CommandMessage CommandMessage::New(const Vector<uint16_t>& command,
- v8::Debug::ClientData* data) {
- return CommandMessage(command.Clone(), data);
-}
-
-
-CommandMessageQueue::CommandMessageQueue(int size) : start_(0), end_(0),
- size_(size) {
- messages_ = NewArray<CommandMessage>(size);
-}
-
-
-CommandMessageQueue::~CommandMessageQueue() {
- while (!IsEmpty()) {
- CommandMessage m = Get();
- m.Dispose();
- }
- DeleteArray(messages_);
-}
-
-
-CommandMessage CommandMessageQueue::Get() {
- ASSERT(!IsEmpty());
- int result = start_;
- start_ = (start_ + 1) % size_;
- return messages_[result];
-}
-
-
-void CommandMessageQueue::Put(const CommandMessage& message) {
- if ((end_ + 1) % size_ == start_) {
- Expand();
- }
- messages_[end_] = message;
- end_ = (end_ + 1) % size_;
-}
-
-
-void CommandMessageQueue::Expand() {
- CommandMessageQueue new_queue(size_ * 2);
- while (!IsEmpty()) {
- new_queue.Put(Get());
- }
- CommandMessage* array_to_free = messages_;
- *this = new_queue;
- new_queue.messages_ = array_to_free;
- // Make the new_queue empty so that it doesn't call Dispose on any messages.
- new_queue.start_ = new_queue.end_;
- // Automatic destructor called on new_queue, freeing array_to_free.
-}
-
-
-LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
- : logger_(logger), queue_(size) {
- lock_ = OS::CreateMutex();
-}
-
-
-LockingCommandMessageQueue::~LockingCommandMessageQueue() {
- delete lock_;
-}
-
-
-bool LockingCommandMessageQueue::IsEmpty() const {
- ScopedLock sl(lock_);
- return queue_.IsEmpty();
-}
-
-
-CommandMessage LockingCommandMessageQueue::Get() {
- ScopedLock sl(lock_);
- CommandMessage result = queue_.Get();
- logger_->DebugEvent("Get", result.text());
- return result;
-}
-
-
-void LockingCommandMessageQueue::Put(const CommandMessage& message) {
- ScopedLock sl(lock_);
- queue_.Put(message);
- logger_->DebugEvent("Put", message.text());
-}
-
-
-void LockingCommandMessageQueue::Clear() {
- ScopedLock sl(lock_);
- queue_.Clear();
-}
-
-
-MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
- : Thread("v8:MsgDispHelpr"),
- sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
- already_signalled_(false) {
-}
-
-
-MessageDispatchHelperThread::~MessageDispatchHelperThread() {
- delete mutex_;
- delete sem_;
-}
-
-
-void MessageDispatchHelperThread::Schedule() {
- {
- ScopedLock lock(mutex_);
- if (already_signalled_) {
- return;
- }
- already_signalled_ = true;
- }
- sem_->Signal();
-}
-
-
-void MessageDispatchHelperThread::Run() {
- Isolate* isolate = Isolate::Current();
- while (true) {
- sem_->Wait();
- {
- ScopedLock lock(mutex_);
- already_signalled_ = false;
- }
- {
- Locker locker(reinterpret_cast<v8::Isolate*>(isolate));
- isolate->debugger()->CallMessageDispatchHandler();
- }
- }
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/debug.h b/src/3rdparty/v8/src/debug.h
deleted file mode 100644
index c7f0681..0000000
--- a/src/3rdparty/v8/src/debug.h
+++ /dev/null
@@ -1,1056 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DEBUG_H_
-#define V8_DEBUG_H_
-
-#include "allocation.h"
-#include "arguments.h"
-#include "assembler.h"
-#include "debug-agent.h"
-#include "execution.h"
-#include "factory.h"
-#include "flags.h"
-#include "frames-inl.h"
-#include "hashmap.h"
-#include "platform.h"
-#include "string-stream.h"
-#include "v8threads.h"
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#include "../include/v8-debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Forward declarations.
-class EnterDebugger;
-
-
-// Step actions. NOTE: These values are in macros.py as well.
-enum StepAction {
- StepNone = -1, // Stepping not prepared.
- StepOut = 0, // Step out of the current function.
- StepNext = 1, // Step to the next statement in the current function.
- StepIn = 2, // Step into new functions invoked or the next statement
- // in the current function.
- StepMin = 3, // Perform a minimum step in the current function.
- StepInMin = 4 // Step into new functions invoked or perform a minimum step
- // in the current function.
-};
-
-
-// Type of exception break. NOTE: These values are in macros.py as well.
-enum ExceptionBreakType {
- BreakException = 0,
- BreakUncaughtException = 1
-};
-
-
-// Type of exception break. NOTE: These values are in macros.py as well.
-enum BreakLocatorType {
- ALL_BREAK_LOCATIONS = 0,
- SOURCE_BREAK_LOCATIONS = 1
-};
-
-
-// Class for iterating through the break points in a function and changing
-// them.
-class BreakLocationIterator {
- public:
- explicit BreakLocationIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type);
- virtual ~BreakLocationIterator();
-
- void Next();
- void Next(int count);
- void FindBreakLocationFromAddress(Address pc);
- void FindBreakLocationFromPosition(int position);
- void Reset();
- bool Done() const;
- void SetBreakPoint(Handle<Object> break_point_object);
- void ClearBreakPoint(Handle<Object> break_point_object);
- void SetOneShot();
- void ClearOneShot();
- void PrepareStepIn(Isolate* isolate);
- bool IsExit() const;
- bool HasBreakPoint();
- bool IsDebugBreak();
- Object* BreakPointObjects();
- void ClearAllDebugBreak();
-
-
- inline int code_position() {
- return static_cast<int>(pc() - debug_info_->code()->entry());
- }
- inline int break_point() { return break_point_; }
- inline int position() { return position_; }
- inline int statement_position() { return statement_position_; }
- inline Address pc() { return reloc_iterator_->rinfo()->pc(); }
- inline Code* code() { return debug_info_->code(); }
- inline RelocInfo* rinfo() { return reloc_iterator_->rinfo(); }
- inline RelocInfo::Mode rmode() const {
- return reloc_iterator_->rinfo()->rmode();
- }
- inline RelocInfo* original_rinfo() {
- return reloc_iterator_original_->rinfo();
- }
- inline RelocInfo::Mode original_rmode() const {
- return reloc_iterator_original_->rinfo()->rmode();
- }
-
- bool IsDebuggerStatement();
-
- protected:
- bool RinfoDone() const;
- void RinfoNext();
-
- BreakLocatorType type_;
- int break_point_;
- int position_;
- int statement_position_;
- Handle<DebugInfo> debug_info_;
- RelocIterator* reloc_iterator_;
- RelocIterator* reloc_iterator_original_;
-
- private:
- void SetDebugBreak();
- void ClearDebugBreak();
-
- void SetDebugBreakAtIC();
- void ClearDebugBreakAtIC();
-
- bool IsDebugBreakAtReturn();
- void SetDebugBreakAtReturn();
- void ClearDebugBreakAtReturn();
-
- bool IsDebugBreakSlot();
- bool IsDebugBreakAtSlot();
- void SetDebugBreakAtSlot();
- void ClearDebugBreakAtSlot();
-
- DISALLOW_COPY_AND_ASSIGN(BreakLocationIterator);
-};
-
-
-// Cache of all script objects in the heap. When a script is added a weak handle
-// to it is created and that weak handle is stored in the cache. The weak handle
-// callback takes care of removing the script from the cache. The key used in
-// the cache is the script id.
-class ScriptCache : private HashMap {
- public:
- ScriptCache() : HashMap(ScriptMatch), collected_scripts_(10) {}
- virtual ~ScriptCache() { Clear(); }
-
- // Add script to the cache.
- void Add(Handle<Script> script);
-
- // Return the scripts in the cache.
- Handle<FixedArray> GetScripts();
-
- // Generate debugger events for collected scripts.
- void ProcessCollectedScripts();
-
- private:
- // Calculate the hash value from the key (script id).
- static uint32_t Hash(int key) {
- return ComputeIntegerHash(key, v8::internal::kZeroHashSeed);
- }
-
- // Scripts match if their keys (script id) match.
- static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
-
- // Clear the cache releasing all the weak handles.
- void Clear();
-
- // Weak handle callback for scripts in the cache.
- static void HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data);
-
- // List used during GC to temporarily store id's of collected scripts.
- List<int> collected_scripts_;
-};
-
-
-// Linked list holding debug info objects. The debug info objects are kept as
-// weak handles to avoid a debug info object to keep a function alive.
-class DebugInfoListNode {
- public:
- explicit DebugInfoListNode(DebugInfo* debug_info);
- virtual ~DebugInfoListNode();
-
- DebugInfoListNode* next() { return next_; }
- void set_next(DebugInfoListNode* next) { next_ = next; }
- Handle<DebugInfo> debug_info() { return debug_info_; }
-
- private:
- // Global (weak) handle to the debug info object.
- Handle<DebugInfo> debug_info_;
-
- // Next pointer for linked list.
- DebugInfoListNode* next_;
-};
-
-// This class contains the debugger support. The main purpose is to handle
-// setting break points in the code.
-//
-// This class controls the debug info for all functions which currently have
-// active breakpoints in them. This debug info is held in the heap root object
-// debug_info which is a FixedArray. Each entry in this list is of class
-// DebugInfo.
-class Debug {
- public:
- void SetUp(bool create_heap_objects);
- bool Load();
- void Unload();
- bool IsLoaded() { return !debug_context_.is_null(); }
- bool InDebugger() { return thread_local_.debugger_entry_ != NULL; }
- void PreemptionWhileInDebugger();
- void Iterate(ObjectVisitor* v);
-
- Object* Break(Arguments args);
- void SetBreakPoint(Handle<JSFunction> function,
- Handle<Object> break_point_object,
- int* source_position);
- bool SetBreakPointForScript(Handle<Script> script,
- Handle<Object> break_point_object,
- int* source_position);
- void ClearBreakPoint(Handle<Object> break_point_object);
- void ClearAllBreakPoints();
- void FloodWithOneShot(Handle<JSFunction> function);
- void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
- void FloodHandlerWithOneShot();
- void ChangeBreakOnException(ExceptionBreakType type, bool enable);
- bool IsBreakOnException(ExceptionBreakType type);
- void PrepareStep(StepAction step_action, int step_count);
- void ClearStepping();
- void ClearStepOut();
- bool IsStepping() { return thread_local_.step_count_ > 0; }
- bool StepNextContinue(BreakLocationIterator* break_location_iterator,
- JavaScriptFrame* frame);
- static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
- static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
-
- void PrepareForBreakPoints();
-
- // This function is used in FunctionNameUsing* tests.
- Object* FindSharedFunctionInfoInScript(Handle<Script> script, int position);
-
- // Returns whether the operation succeeded. Compilation can only be triggered
- // if a valid closure is passed as the second argument, otherwise the shared
- // function needs to be compiled already.
- bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> function);
-
- // Returns true if the current stub call is patched to call the debugger.
- static bool IsDebugBreak(Address addr);
- // Returns true if the current return statement has been patched to be
- // a debugger breakpoint.
- static bool IsDebugBreakAtReturn(RelocInfo* rinfo);
-
- // Check whether a code stub with the specified major key is a possible break
- // point location.
- static bool IsSourceBreakStub(Code* code);
- static bool IsBreakStub(Code* code);
-
- // Find the builtin to use for invoking the debug break
- static Handle<Code> FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode);
-
- static Handle<Object> GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared);
-
- // Getter for the debug_context.
- inline Handle<Context> debug_context() { return debug_context_; }
-
- // Check whether a global object is the debug global object.
- bool IsDebugGlobal(GlobalObject* global);
-
- // Check whether this frame is just about to return.
- bool IsBreakAtReturn(JavaScriptFrame* frame);
-
- // Fast check to see if any break points are active.
- inline bool has_break_points() { return has_break_points_; }
-
- void NewBreak(StackFrame::Id break_frame_id);
- void SetBreak(StackFrame::Id break_frame_id, int break_id);
- StackFrame::Id break_frame_id() {
- return thread_local_.break_frame_id_;
- }
- int break_id() { return thread_local_.break_id_; }
-
- bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
- void HandleStepIn(Handle<JSFunction> function,
- Handle<Object> holder,
- Address fp,
- bool is_constructor);
- Address step_in_fp() { return thread_local_.step_into_fp_; }
- Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
-
- bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
- Address step_out_fp() { return thread_local_.step_out_fp_; }
-
- EnterDebugger* debugger_entry() {
- return thread_local_.debugger_entry_;
- }
- void set_debugger_entry(EnterDebugger* entry) {
- thread_local_.debugger_entry_ = entry;
- }
-
- // Check whether any of the specified interrupts are pending.
- bool is_interrupt_pending(InterruptFlag what) {
- return (thread_local_.pending_interrupts_ & what) != 0;
- }
-
- // Set specified interrupts as pending.
- void set_interrupts_pending(InterruptFlag what) {
- thread_local_.pending_interrupts_ |= what;
- }
-
- // Clear specified interrupts from pending.
- void clear_interrupt_pending(InterruptFlag what) {
- thread_local_.pending_interrupts_ &= ~static_cast<int>(what);
- }
-
- // Getter and setter for the disable break state.
- bool disable_break() { return disable_break_; }
- void set_disable_break(bool disable_break) {
- disable_break_ = disable_break;
- }
-
- // Getters for the current exception break state.
- bool break_on_exception() { return break_on_exception_; }
- bool break_on_uncaught_exception() {
- return break_on_uncaught_exception_;
- }
-
- enum AddressId {
- k_after_break_target_address,
- k_debug_break_return_address,
- k_debug_break_slot_address,
- k_restarter_frame_function_pointer
- };
-
- // Support for setting the address to jump to when returning from break point.
- Address* after_break_target_address() {
- return reinterpret_cast<Address*>(&thread_local_.after_break_target_);
- }
- Address* restarter_frame_function_pointer_address() {
- Object*** address = &thread_local_.restarter_frame_function_pointer_;
- return reinterpret_cast<Address*>(address);
- }
-
- // Support for saving/restoring registers when handling debug break calls.
- Object** register_address(int r) {
- return &registers_[r];
- }
-
- // Access to the debug break on return code.
- Code* debug_break_return() { return debug_break_return_; }
- Code** debug_break_return_address() {
- return &debug_break_return_;
- }
-
- // Access to the debug break in debug break slot code.
- Code* debug_break_slot() { return debug_break_slot_; }
- Code** debug_break_slot_address() {
- return &debug_break_slot_;
- }
-
- static const int kEstimatedNofDebugInfoEntries = 16;
- static const int kEstimatedNofBreakPointsInFunction = 16;
-
- // Passed to MakeWeak.
- static void HandleWeakDebugInfo(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data);
-
- friend class Debugger;
- friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
- friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc
-
- // Threading support.
- char* ArchiveDebug(char* to);
- char* RestoreDebug(char* from);
- static int ArchiveSpacePerThread();
- void FreeThreadResources() { }
-
- // Mirror cache handling.
- void ClearMirrorCache();
-
- // Script cache handling.
- void CreateScriptCache();
- void DestroyScriptCache();
- void AddScriptToScriptCache(Handle<Script> script);
- Handle<FixedArray> GetLoadedScripts();
-
- // Garbage collection notifications.
- void AfterGarbageCollection();
-
- // Code generator routines.
- static void GenerateSlot(MacroAssembler* masm);
- static void GenerateLoadICDebugBreak(MacroAssembler* masm);
- static void GenerateStoreICDebugBreak(MacroAssembler* masm);
- static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
- static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
- static void GenerateReturnDebugBreak(MacroAssembler* masm);
- static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
- static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm);
- static void GenerateCallConstructStubDebugBreak(MacroAssembler* masm);
- static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm);
- static void GenerateSlotDebugBreak(MacroAssembler* masm);
- static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
-
- // FrameDropper is a code replacement for a JavaScript frame with possibly
- // several frames above.
- // There is no calling conventions here, because it never actually gets
- // called, it only gets returned to.
- static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
-
- // Called from stub-cache.cc.
- static void GenerateCallICDebugBreak(MacroAssembler* masm);
-
- // Describes how exactly a frame has been dropped from stack.
- enum FrameDropMode {
- // No frame has been dropped.
- FRAMES_UNTOUCHED,
- // The top JS frame had been calling IC stub. IC stub mustn't be called now.
- FRAME_DROPPED_IN_IC_CALL,
- // The top JS frame had been calling debug break slot stub. Patch the
- // address this stub jumps to in the end.
- FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
- // The top JS frame had been calling some C++ function. The return address
- // gets patched automatically.
- FRAME_DROPPED_IN_DIRECT_CALL,
- FRAME_DROPPED_IN_RETURN_CALL,
- CURRENTLY_SET_MODE
- };
-
- void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- FrameDropMode mode,
- Object** restarter_frame_function_pointer);
-
- // Initializes an artificial stack frame. The data it contains is used for:
- // a. successful work of frame dropper code which eventually gets control,
- // b. being compatible with regular stack structure for various stack
- // iterators.
- // Returns address of stack allocated pointer to restarted function,
- // the value that is called 'restarter_frame_function_pointer'. The value
- // at this address (possibly updated by GC) may be used later when preparing
- // 'step in' operation.
- static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code);
-
- static const int kFrameDropperFrameSize;
-
- // Architecture-specific constant.
- static const bool kFrameDropperSupported;
-
- /**
- * Defines layout of a stack frame that supports padding. This is a regular
- * internal frame that has a flexible stack structure. LiveEdit can shift
- * its lower part up the stack, taking up the 'padding' space when additional
- * stack memory is required.
- * Such frame is expected immediately above the topmost JavaScript frame.
- *
- * Stack Layout:
- * --- Top
- * LiveEdit routine frames
- * ---
- * C frames of debug handler
- * ---
- * ...
- * ---
- * An internal frame that has n padding words:
- * - any number of words as needed by code -- upper part of frame
- * - padding size: a Smi storing n -- current size of padding
- * - padding: n words filled with kPaddingValue in form of Smi
- * - 3 context/type words of a regular InternalFrame
- * - fp
- * ---
- * Topmost JavaScript frame
- * ---
- * ...
- * --- Bottom
- */
- class FramePaddingLayout : public AllStatic {
- public:
- // Architecture-specific constant.
- static const bool kIsSupported;
-
- // A size of frame base including fp. Padding words starts right above
- // the base.
- static const int kFrameBaseSize = 4;
-
- // A number of words that should be reserved on stack for the LiveEdit use.
- // Normally equals 1. Stored on stack in form of Smi.
- static const int kInitialSize;
- // A value that padding words are filled with (in form of Smi). Going
- // bottom-top, the first word not having this value is a counter word.
- static const int kPaddingValue;
- };
-
- private:
- explicit Debug(Isolate* isolate);
- ~Debug();
-
- static bool CompileDebuggerScript(int index);
- void ClearOneShot();
- void ActivateStepIn(StackFrame* frame);
- void ClearStepIn();
- void ActivateStepOut(StackFrame* frame);
- void ClearStepNext();
- // Returns whether the compile succeeded.
- void RemoveDebugInfo(Handle<DebugInfo> debug_info);
- void SetAfterBreakTarget(JavaScriptFrame* frame);
- Handle<Object> CheckBreakPoints(Handle<Object> break_point);
- bool CheckBreakPoint(Handle<Object> break_point_object);
-
- // Global handle to debug context where all the debugger JavaScript code is
- // loaded.
- Handle<Context> debug_context_;
-
- // Boolean state indicating whether any break points are set.
- bool has_break_points_;
-
- // Cache of all scripts in the heap.
- ScriptCache* script_cache_;
-
- // List of active debug info objects.
- DebugInfoListNode* debug_info_list_;
-
- bool disable_break_;
- bool break_on_exception_;
- bool break_on_uncaught_exception_;
-
- // Per-thread data.
- class ThreadLocal {
- public:
- // Counter for generating next break id.
- int break_count_;
-
- // Current break id.
- int break_id_;
-
- // Frame id for the frame of the current break.
- StackFrame::Id break_frame_id_;
-
- // Step action for last step performed.
- StepAction last_step_action_;
-
- // Source statement position from last step next action.
- int last_statement_position_;
-
- // Number of steps left to perform before debug event.
- int step_count_;
-
- // Frame pointer from last step next action.
- Address last_fp_;
-
- // Number of queued steps left to perform before debug event.
- int queued_step_count_;
-
- // Frame pointer for frame from which step in was performed.
- Address step_into_fp_;
-
- // Frame pointer for the frame where debugger should be called when current
- // step out action is completed.
- Address step_out_fp_;
-
- // Storage location for jump when exiting debug break calls.
- Address after_break_target_;
-
- // Stores the way how LiveEdit has patched the stack. It is used when
- // debugger returns control back to user script.
- FrameDropMode frame_drop_mode_;
-
- // Top debugger entry.
- EnterDebugger* debugger_entry_;
-
- // Pending interrupts scheduled while debugging.
- int pending_interrupts_;
-
- // When restarter frame is on stack, stores the address
- // of the pointer to function being restarted. Otherwise (most of the time)
- // stores NULL. This pointer is used with 'step in' implementation.
- Object** restarter_frame_function_pointer_;
- };
-
- // Storage location for registers when handling debug break calls
- JSCallerSavedBuffer registers_;
- ThreadLocal thread_local_;
- void ThreadInit();
-
- // Code to call for handling debug break on return.
- Code* debug_break_return_;
-
- // Code to call for handling debug break in debug break slots.
- Code* debug_break_slot_;
-
- Isolate* isolate_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(Debug);
-};
-
-
-DECLARE_RUNTIME_FUNCTION(Object*, Debug_Break);
-
-
-// Message delivered to the message handler callback. This is either a debugger
-// event or the response to a command.
-class MessageImpl: public v8::Debug::Message {
- public:
- // Create a message object for a debug event.
- static MessageImpl NewEvent(DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data);
-
- // Create a message object for the response to a debug command.
- static MessageImpl NewResponse(DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data);
-
- // Implementation of interface v8::Debug::Message.
- virtual bool IsEvent() const;
- virtual bool IsResponse() const;
- virtual DebugEvent GetEvent() const;
- virtual bool WillStartRunning() const;
- virtual v8::Handle<v8::Object> GetExecutionState() const;
- virtual v8::Handle<v8::Object> GetEventData() const;
- virtual v8::Handle<v8::String> GetJSON() const;
- virtual v8::Handle<v8::Context> GetEventContext() const;
- virtual v8::Debug::ClientData* GetClientData() const;
-
- private:
- MessageImpl(bool is_event,
- DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data);
-
- bool is_event_; // Does this message represent a debug event?
- DebugEvent event_; // Debug event causing the break.
- bool running_; // Will the VM start running after this event?
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
- Handle<String> response_json_; // Response JSON if message holds a response.
- v8::Debug::ClientData* client_data_; // Client data passed with the request.
-};
-
-
-// Details of the debug event delivered to the debug event listener.
-class EventDetailsImpl : public v8::Debug::EventDetails {
- public:
- EventDetailsImpl(DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<Object> callback_data,
- v8::Debug::ClientData* client_data);
- virtual DebugEvent GetEvent() const;
- virtual v8::Handle<v8::Object> GetExecutionState() const;
- virtual v8::Handle<v8::Object> GetEventData() const;
- virtual v8::Handle<v8::Context> GetEventContext() const;
- virtual v8::Handle<v8::Value> GetCallbackData() const;
- virtual v8::Debug::ClientData* GetClientData() const;
- private:
- DebugEvent event_; // Debug event causing the break.
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
- Handle<Object> callback_data_; // User data passed with the callback
- // when it was registered.
- v8::Debug::ClientData* client_data_; // Data passed to DebugBreakForCommand.
-};
-
-
-// Message send by user to v8 debugger or debugger output message.
-// In addition to command text it may contain a pointer to some user data
-// which are expected to be passed along with the command reponse to message
-// handler.
-class CommandMessage {
- public:
- static CommandMessage New(const Vector<uint16_t>& command,
- v8::Debug::ClientData* data);
- CommandMessage();
- ~CommandMessage();
-
- // Deletes user data and disposes of the text.
- void Dispose();
- Vector<uint16_t> text() const { return text_; }
- v8::Debug::ClientData* client_data() const { return client_data_; }
- private:
- CommandMessage(const Vector<uint16_t>& text,
- v8::Debug::ClientData* data);
-
- Vector<uint16_t> text_;
- v8::Debug::ClientData* client_data_;
-};
-
-// A Queue of CommandMessage objects. A thread-safe version is
-// LockingCommandMessageQueue, based on this class.
-class CommandMessageQueue BASE_EMBEDDED {
- public:
- explicit CommandMessageQueue(int size);
- ~CommandMessageQueue();
- bool IsEmpty() const { return start_ == end_; }
- CommandMessage Get();
- void Put(const CommandMessage& message);
- void Clear() { start_ = end_ = 0; } // Queue is empty after Clear().
- private:
- // Doubles the size of the message queue, and copies the messages.
- void Expand();
-
- CommandMessage* messages_;
- int start_;
- int end_;
- int size_; // The size of the queue buffer. Queue can hold size-1 messages.
-};
-
-
-class MessageDispatchHelperThread;
-
-
-// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
-// messages. The message data is not managed by LockingCommandMessageQueue.
-// Pointers to the data are passed in and out. Implemented by adding a
-// Mutex to CommandMessageQueue. Includes logging of all puts and gets.
-class LockingCommandMessageQueue BASE_EMBEDDED {
- public:
- LockingCommandMessageQueue(Logger* logger, int size);
- ~LockingCommandMessageQueue();
- bool IsEmpty() const;
- CommandMessage Get();
- void Put(const CommandMessage& message);
- void Clear();
- private:
- Logger* logger_;
- CommandMessageQueue queue_;
- Mutex* lock_;
- DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
-};
-
-
-class Debugger {
- public:
- ~Debugger();
-
- void DebugRequest(const uint16_t* json_request, int length);
-
- Handle<Object> MakeJSObject(Vector<const char> constructor_name,
- int argc,
- Handle<Object> argv[],
- bool* caught_exception);
- Handle<Object> MakeExecutionState(bool* caught_exception);
- Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
- Handle<Object> break_points_hit,
- bool* caught_exception);
- Handle<Object> MakeExceptionEvent(Handle<Object> exec_state,
- Handle<Object> exception,
- bool uncaught,
- bool* caught_exception);
- Handle<Object> MakeNewFunctionEvent(Handle<Object> func,
- bool* caught_exception);
- Handle<Object> MakeCompileEvent(Handle<Script> script,
- bool before,
- bool* caught_exception);
- Handle<Object> MakeScriptCollectedEvent(int id,
- bool* caught_exception);
- void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
- void OnException(Handle<Object> exception, bool uncaught);
- void OnBeforeCompile(Handle<Script> script);
-
- enum AfterCompileFlags {
- NO_AFTER_COMPILE_FLAGS,
- SEND_WHEN_DEBUGGING
- };
- void OnAfterCompile(Handle<Script> script,
- AfterCompileFlags after_compile_flags);
- void OnScriptCollected(int id);
- void ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- bool auto_continue);
- void NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue);
- void SetEventListener(Handle<Object> callback, Handle<Object> data);
- void SetMessageHandler(v8::Debug::MessageHandler2 handler);
- void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period);
- void SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler,
- bool provide_locker);
-
- // Invoke the message handler function.
- void InvokeMessageHandler(MessageImpl message);
-
- // Add a debugger command to the command queue.
- void ProcessCommand(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data = NULL);
-
- // Check whether there are commands in the command queue.
- bool HasCommands();
-
- // Enqueue a debugger command to the command queue for event listeners.
- void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
-
- Handle<Object> Call(Handle<JSFunction> fun,
- Handle<Object> data,
- bool* pending_exception);
-
- // Start the debugger agent listening on the provided port.
- bool StartAgent(const char* name, int port,
- bool wait_for_connection = false);
-
- // Stop the debugger agent.
- void StopAgent();
-
- // Blocks until the agent has started listening for connections
- void WaitForAgent();
-
- void CallMessageDispatchHandler();
-
- Handle<Context> GetDebugContext();
-
- // Unload the debugger if possible. Only called when no debugger is currently
- // active.
- void UnloadDebugger();
- friend void ForceUnloadDebugger(); // In test-debug.cc
-
- inline bool EventActive(v8::DebugEvent event) {
- ScopedLock with(debugger_access_);
-
- // Check whether the message handler was been cleared.
- if (debugger_unload_pending_) {
- if (isolate_->debug()->debugger_entry() == NULL) {
- UnloadDebugger();
- }
- }
-
- if (((event == v8::BeforeCompile) || (event == v8::AfterCompile)) &&
- !FLAG_debug_compile_events) {
- return false;
-
- } else if ((event == v8::ScriptCollected) &&
- !FLAG_debug_script_collected_events) {
- return false;
- }
-
- // Currently argument event is not used.
- return !compiling_natives_ && Debugger::IsDebuggerActive();
- }
-
- void set_compiling_natives(bool compiling_natives) {
- compiling_natives_ = compiling_natives;
- }
- bool compiling_natives() const { return compiling_natives_; }
- void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
- bool is_loading_debugger() const { return is_loading_debugger_; }
- void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
- bool live_edit_enabled() const {
- return FLAG_enable_liveedit && live_edit_enabled_ ;
- }
- void set_force_debugger_active(bool force_debugger_active) {
- force_debugger_active_ = force_debugger_active;
- }
- bool force_debugger_active() const { return force_debugger_active_; }
-
- bool IsDebuggerActive();
-
- private:
- explicit Debugger(Isolate* isolate);
-
- void CallEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
- void CallCEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
- void CallJSEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data);
- void ListenersChanged();
-
- Mutex* debugger_access_; // Mutex guarding debugger variables.
- Handle<Object> event_listener_; // Global handle to listener.
- Handle<Object> event_listener_data_;
- bool compiling_natives_; // Are we compiling natives?
- bool is_loading_debugger_; // Are we loading the debugger?
- bool live_edit_enabled_; // Enable LiveEdit.
- bool never_unload_debugger_; // Can we unload the debugger?
- bool force_debugger_active_; // Activate debugger without event listeners.
- v8::Debug::MessageHandler2 message_handler_;
- bool debugger_unload_pending_; // Was message handler cleared?
- v8::Debug::HostDispatchHandler host_dispatch_handler_;
- Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
- v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
- MessageDispatchHelperThread* message_dispatch_helper_thread_;
- int host_dispatch_micros_;
-
- DebuggerAgent* agent_;
-
- static const int kQueueInitialSize = 4;
- LockingCommandMessageQueue command_queue_;
- Semaphore* command_received_; // Signaled for each command received.
- LockingCommandMessageQueue event_command_queue_;
-
- Isolate* isolate_;
-
- friend class EnterDebugger;
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(Debugger);
-};
-
-
-// This class is used for entering the debugger. Create an instance in the stack
-// to enter the debugger. This will set the current break state, make sure the
-// debugger is loaded and switch to the debugger context. If the debugger for
-// some reason could not be entered FailedToEnter will return true.
-class EnterDebugger BASE_EMBEDDED {
- public:
- EnterDebugger();
- ~EnterDebugger();
-
- // Check whether the debugger could be entered.
- inline bool FailedToEnter() { return load_failed_; }
-
- // Check whether there are any JavaScript frames on the stack.
- inline bool HasJavaScriptFrames() { return has_js_frames_; }
-
- // Get the active context from before entering the debugger.
- inline Handle<Context> GetContext() { return save_.context(); }
-
- private:
- Isolate* isolate_;
- EnterDebugger* prev_; // Previous debugger entry if entered recursively.
- JavaScriptFrameIterator it_;
- const bool has_js_frames_; // Were there any JavaScript frames?
- StackFrame::Id break_frame_id_; // Previous break frame id.
- int break_id_; // Previous break id.
- bool load_failed_; // Did the debugger fail to load?
- SaveContext save_; // Saves previous context.
-};
-
-
-// Stack allocated class for disabling break.
-class DisableBreak BASE_EMBEDDED {
- public:
- explicit DisableBreak(bool disable_break) : isolate_(Isolate::Current()) {
- prev_disable_break_ = isolate_->debug()->disable_break();
- isolate_->debug()->set_disable_break(disable_break);
- }
- ~DisableBreak() {
- ASSERT(Isolate::Current() == isolate_);
- isolate_->debug()->set_disable_break(prev_disable_break_);
- }
-
- private:
- Isolate* isolate_;
- // The previous state of the disable break used to restore the value when this
- // object is destructed.
- bool prev_disable_break_;
-};
-
-
-// Debug_Address encapsulates the Address pointers used in generating debug
-// code.
-class Debug_Address {
- public:
- explicit Debug_Address(Debug::AddressId id) : id_(id) { }
-
- static Debug_Address AfterBreakTarget() {
- return Debug_Address(Debug::k_after_break_target_address);
- }
-
- static Debug_Address DebugBreakReturn() {
- return Debug_Address(Debug::k_debug_break_return_address);
- }
-
- static Debug_Address RestarterFrameFunctionPointer() {
- return Debug_Address(Debug::k_restarter_frame_function_pointer);
- }
-
- Address address(Isolate* isolate) const {
- Debug* debug = isolate->debug();
- switch (id_) {
- case Debug::k_after_break_target_address:
- return reinterpret_cast<Address>(debug->after_break_target_address());
- case Debug::k_debug_break_return_address:
- return reinterpret_cast<Address>(debug->debug_break_return_address());
- case Debug::k_debug_break_slot_address:
- return reinterpret_cast<Address>(debug->debug_break_slot_address());
- case Debug::k_restarter_frame_function_pointer:
- return reinterpret_cast<Address>(
- debug->restarter_frame_function_pointer_address());
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-
- private:
- Debug::AddressId id_;
-};
-
-// The optional thread that Debug Agent may use to temporary call V8 to process
-// pending debug requests if debuggee is not running V8 at the moment.
-// Techincally it does not call V8 itself, rather it asks embedding program
-// to do this via v8::Debug::HostDispatchHandler
-class MessageDispatchHelperThread: public Thread {
- public:
- explicit MessageDispatchHelperThread(Isolate* isolate);
- ~MessageDispatchHelperThread();
-
- void Schedule();
-
- private:
- void Run();
-
- Semaphore* const sem_;
- Mutex* const mutex_;
- bool already_signalled_;
-
- DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
-};
-
-
-} } // namespace v8::internal
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-#endif // V8_DEBUG_H_
diff --git a/src/3rdparty/v8/src/deoptimizer.cc b/src/3rdparty/v8/src/deoptimizer.cc
deleted file mode 100644
index c0b5945..0000000
--- a/src/3rdparty/v8/src/deoptimizer.cc
+++ /dev/null
@@ -1,2375 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "full-codegen.h"
-#include "global-handles.h"
-#include "macro-assembler.h"
-#include "prettyprinter.h"
-
-
-namespace v8 {
-namespace internal {
-
-DeoptimizerData::DeoptimizerData() {
- eager_deoptimization_entry_code_entries_ = -1;
- lazy_deoptimization_entry_code_entries_ = -1;
- size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
- MemoryAllocator* allocator = Isolate::Current()->memory_allocator();
- size_t initial_commit_size = OS::CommitPageSize();
- eager_deoptimization_entry_code_ =
- allocator->AllocateChunk(deopt_table_size,
- initial_commit_size,
- EXECUTABLE,
- NULL);
- lazy_deoptimization_entry_code_ =
- allocator->AllocateChunk(deopt_table_size,
- initial_commit_size,
- EXECUTABLE,
- NULL);
- current_ = NULL;
- deoptimizing_code_list_ = NULL;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- deoptimized_frame_info_ = NULL;
-#endif
-}
-
-
-DeoptimizerData::~DeoptimizerData() {
- Isolate::Current()->memory_allocator()->Free(
- eager_deoptimization_entry_code_);
- eager_deoptimization_entry_code_ = NULL;
- Isolate::Current()->memory_allocator()->Free(
- lazy_deoptimization_entry_code_);
- lazy_deoptimization_entry_code_ = NULL;
-
- DeoptimizingCodeListNode* current = deoptimizing_code_list_;
- while (current != NULL) {
- DeoptimizingCodeListNode* prev = current;
- current = current->next();
- delete prev;
- }
- deoptimizing_code_list_ = NULL;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void DeoptimizerData::Iterate(ObjectVisitor* v) {
- if (deoptimized_frame_info_ != NULL) {
- deoptimized_frame_info_->Iterate(v);
- }
-}
-#endif
-
-
-Code* DeoptimizerData::FindDeoptimizingCode(Address addr) {
- for (DeoptimizingCodeListNode* node = deoptimizing_code_list_;
- node != NULL;
- node = node->next()) {
- if (node->code()->contains(addr)) return *node->code();
- }
- return NULL;
-}
-
-
-void DeoptimizerData::RemoveDeoptimizingCode(Code* code) {
- for (DeoptimizingCodeListNode *prev = NULL, *cur = deoptimizing_code_list_;
- cur != NULL;
- prev = cur, cur = cur->next()) {
- if (*cur->code() == code) {
- if (prev == NULL) {
- deoptimizing_code_list_ = cur->next();
- } else {
- prev->set_next(cur->next());
- }
- delete cur;
- return;
- }
- }
- // Deoptimizing code is removed through weak callback. Each object is expected
- // to be removed once and only once.
- UNREACHABLE();
-}
-
-
-// We rely on this function not causing a GC. It is called from generated code
-// without having a real stack frame in place.
-Deoptimizer* Deoptimizer::New(JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- Deoptimizer* deoptimizer = new Deoptimizer(isolate,
- function,
- type,
- bailout_id,
- from,
- fp_to_sp_delta,
- NULL);
- ASSERT(isolate->deoptimizer_data()->current_ == NULL);
- isolate->deoptimizer_data()->current_ = deoptimizer;
- return deoptimizer;
-}
-
-
-// No larger than 2K on all platforms
-static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
-
-
-size_t Deoptimizer::GetMaxDeoptTableSize() {
- int entries_size =
- Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
- int commit_page_size = static_cast<int>(OS::CommitPageSize());
- int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
- commit_page_size) + 1;
- return static_cast<size_t>(commit_page_size * page_count);
-}
-
-
-Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- Deoptimizer* result = isolate->deoptimizer_data()->current_;
- ASSERT(result != NULL);
- result->DeleteFrameDescriptions();
- isolate->deoptimizer_data()->current_ = NULL;
- return result;
-}
-
-
-int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
- if (jsframe_index == 0) return 0;
-
- int frame_index = 0;
- while (jsframe_index >= 0) {
- FrameDescription* frame = output_[frame_index];
- if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
- jsframe_index--;
- }
- frame_index++;
- }
-
- return frame_index - 1;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
- JavaScriptFrame* frame,
- int jsframe_index,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- ASSERT(frame->is_optimized());
- ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
-
- // Get the function and code from the frame.
- JSFunction* function = JSFunction::cast(frame->function());
- Code* code = frame->LookupCode();
-
- // Locate the deoptimization point in the code. As we are at a call the
- // return address must be at a place in the code with deoptimization support.
- SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
- int deoptimization_index = safepoint_entry.deoptimization_index();
- ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
-
- // Always use the actual stack slots when calculating the fp to sp
- // delta adding two for the function and context.
- unsigned stack_slots = code->stack_slots();
- unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
-
- Deoptimizer* deoptimizer = new Deoptimizer(isolate,
- function,
- Deoptimizer::DEBUGGER,
- deoptimization_index,
- frame->pc(),
- fp_to_sp_delta,
- code);
- Address tos = frame->fp() - fp_to_sp_delta;
- deoptimizer->FillInputFrame(tos, frame);
-
- // Calculate the output frames.
- Deoptimizer::ComputeOutputFrames(deoptimizer);
-
- // Create the GC safe output frame information and register it for GC
- // handling.
- ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
-
- // Convert JS frame index into frame index.
- int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
-
- bool has_arguments_adaptor =
- frame_index > 0 &&
- deoptimizer->output_[frame_index - 1]->GetFrameType() ==
- StackFrame::ARGUMENTS_ADAPTOR;
-
- int construct_offset = has_arguments_adaptor ? 2 : 1;
- bool has_construct_stub =
- frame_index >= construct_offset &&
- deoptimizer->output_[frame_index - construct_offset]->GetFrameType() ==
- StackFrame::CONSTRUCT;
-
- DeoptimizedFrameInfo* info = new DeoptimizedFrameInfo(deoptimizer,
- frame_index,
- has_arguments_adaptor,
- has_construct_stub);
- isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
-
- // Get the "simulated" top and size for the requested frame.
- FrameDescription* parameters_frame =
- deoptimizer->output_[
- has_arguments_adaptor ? (frame_index - 1) : frame_index];
-
- uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize;
- Address parameters_top = reinterpret_cast<Address>(
- parameters_frame->GetTop() + (parameters_frame->GetFrameSize() -
- parameters_size));
-
- uint32_t expressions_size = info->expression_count() * kPointerSize;
- Address expressions_top = reinterpret_cast<Address>(
- deoptimizer->output_[frame_index]->GetTop());
-
- // Done with the GC-unsafe frame descriptions. This re-enables allocation.
- deoptimizer->DeleteFrameDescriptions();
-
- // Allocate a heap number for the doubles belonging to this frame.
- deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
- parameters_top, parameters_size, expressions_top, expressions_size, info);
-
- // Finished using the deoptimizer instance.
- delete deoptimizer;
-
- return info;
-}
-
-
-void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
- delete info;
- isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
-}
-#endif
-
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- int count,
- BailoutType type) {
- TableEntryGenerator generator(masm, type, count);
- generator.Generate();
-}
-
-
-void Deoptimizer::VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor) {
- Isolate* isolate = context->GetIsolate();
- ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
- AssertNoAllocation no_allocation;
-
- ASSERT(context->IsNativeContext());
-
- visitor->EnterContext(context);
-
- // Create a snapshot of the optimized functions list. This is needed because
- // visitors might remove more than one link from the list at once.
- ZoneList<JSFunction*> snapshot(1, isolate->runtime_zone());
- Object* element = context->OptimizedFunctionsListHead();
- while (!element->IsUndefined()) {
- JSFunction* element_function = JSFunction::cast(element);
- snapshot.Add(element_function, isolate->runtime_zone());
- element = element_function->next_function_link();
- }
-
- // Run through the snapshot of optimized functions and visit them.
- for (int i = 0; i < snapshot.length(); ++i) {
- visitor->VisitFunction(snapshot.at(i));
- }
-
- visitor->LeaveContext(context);
-}
-
-
-void Deoptimizer::VisitAllOptimizedFunctions(
- OptimizedFunctionVisitor* visitor) {
- AssertNoAllocation no_allocation;
-
- // Run through the list of all native contexts and deoptimize.
- Object* context = Isolate::Current()->heap()->native_contexts_list();
- while (!context->IsUndefined()) {
- VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
-// Removes the functions selected by the given filter from the optimized
-// function list of the given context and partitions the removed functions
-// into one or more lists such that all functions in a list share the same
-// code. The head of each list is written in the deoptimizing_functions field
-// of the corresponding code object.
-// The found code objects are returned in the given zone list.
-static void PartitionOptimizedFunctions(Context* context,
- OptimizedFunctionFilter* filter,
- ZoneList<Code*>* partitions,
- Zone* zone,
- Object* undefined) {
- AssertNoAllocation no_allocation;
- Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- Object* remainder_head = undefined;
- Object* remainder_tail = undefined;
- ASSERT_EQ(0, partitions->length());
- while (current != undefined) {
- JSFunction* function = JSFunction::cast(current);
- current = function->next_function_link();
- if (filter->TakeFunction(function)) {
- Code* code = function->code();
- if (code->deoptimizing_functions() == undefined) {
- partitions->Add(code, zone);
- } else {
- ASSERT(partitions->Contains(code));
- }
- function->set_next_function_link(code->deoptimizing_functions());
- code->set_deoptimizing_functions(function);
- } else {
- if (remainder_head == undefined) {
- remainder_head = function;
- } else {
- JSFunction::cast(remainder_tail)->set_next_function_link(function);
- }
- remainder_tail = function;
- }
- }
- if (remainder_tail != undefined) {
- JSFunction::cast(remainder_tail)->set_next_function_link(undefined);
- }
- context->set(Context::OPTIMIZED_FUNCTIONS_LIST, remainder_head);
-}
-
-
-class DeoptimizeAllFilter : public OptimizedFunctionFilter {
- public:
- virtual bool TakeFunction(JSFunction* function) {
- return true;
- }
-};
-
-
-class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter {
- public:
- explicit DeoptimizeWithMatchingCodeFilter(Code* code) : code_(code) {}
- virtual bool TakeFunction(JSFunction* function) {
- return function->code() == code_;
- }
- private:
- Code* code_;
-};
-
-
-void Deoptimizer::DeoptimizeAll() {
- AssertNoAllocation no_allocation;
-
- if (FLAG_trace_deopt) {
- PrintF("[deoptimize all contexts]\n");
- }
-
- DeoptimizeAllFilter filter;
- DeoptimizeAllFunctionsWith(&filter);
-}
-
-
-void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
- AssertNoAllocation no_allocation;
- DeoptimizeAllFilter filter;
- if (object->IsJSGlobalProxy()) {
- Object* proto = object->GetPrototype();
- ASSERT(proto->IsJSGlobalObject());
- DeoptimizeAllFunctionsForContext(
- GlobalObject::cast(proto)->native_context(), &filter);
- } else if (object->IsGlobalObject()) {
- DeoptimizeAllFunctionsForContext(
- GlobalObject::cast(object)->native_context(), &filter);
- }
-}
-
-
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- if (!function->IsOptimized()) return;
- Code* code = function->code();
- Context* context = function->context()->native_context();
- Isolate* isolate = context->GetIsolate();
- Object* undefined = isolate->heap()->undefined_value();
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
- ZoneList<Code*> codes(1, zone);
- DeoptimizeWithMatchingCodeFilter filter(code);
- PartitionOptimizedFunctions(context, &filter, &codes, zone, undefined);
- ASSERT_EQ(1, codes.length());
- DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction::cast(codes.at(0)->deoptimizing_functions()));
- codes.at(0)->set_deoptimizing_functions(undefined);
-}
-
-
-void Deoptimizer::DeoptimizeAllFunctionsForContext(
- Context* context, OptimizedFunctionFilter* filter) {
- ASSERT(context->IsNativeContext());
- Isolate* isolate = context->GetIsolate();
- Object* undefined = isolate->heap()->undefined_value();
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
- ZoneList<Code*> codes(1, zone);
- PartitionOptimizedFunctions(context, filter, &codes, zone, undefined);
- for (int i = 0; i < codes.length(); ++i) {
- DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction::cast(codes.at(i)->deoptimizing_functions()));
- codes.at(i)->set_deoptimizing_functions(undefined);
- }
-}
-
-
-void Deoptimizer::DeoptimizeAllFunctionsWith(OptimizedFunctionFilter* filter) {
- AssertNoAllocation no_allocation;
-
- // Run through the list of all native contexts and deoptimize.
- Object* context = Isolate::Current()->heap()->native_contexts_list();
- while (!context->IsUndefined()) {
- DeoptimizeAllFunctionsForContext(Context::cast(context), filter);
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
-void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* parameter) {
- DeoptimizingCodeListNode* node =
- reinterpret_cast<DeoptimizingCodeListNode*>(parameter);
- DeoptimizerData* data =
- reinterpret_cast<Isolate*>(isolate)->deoptimizer_data();
- data->RemoveDeoptimizingCode(*node->code());
-#ifdef DEBUG
- for (DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
- current != NULL;
- current = current->next()) {
- ASSERT(current != node);
- }
-#endif
-}
-
-
-void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
- deoptimizer->DoComputeOutputFrames();
-}
-
-
-bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
- StackFrame::Type frame_type) {
- switch (deopt_type) {
- case EAGER:
- case LAZY:
- case DEBUGGER:
- return (frame_type == StackFrame::STUB)
- ? FLAG_trace_stub_failures
- : FLAG_trace_deopt;
- case OSR:
- return FLAG_trace_osr;
- }
- UNREACHABLE();
- return false;
-}
-
-
-const char* Deoptimizer::MessageFor(BailoutType type) {
- switch (type) {
- case EAGER:
- case LAZY:
- return "DEOPT";
- case DEBUGGER:
- return "DEOPT FOR DEBUGGER";
- case OSR:
- return "OSR";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-Deoptimizer::Deoptimizer(Isolate* isolate,
- JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
- Code* optimized_code)
- : isolate_(isolate),
- function_(function),
- bailout_id_(bailout_id),
- bailout_type_(type),
- from_(from),
- fp_to_sp_delta_(fp_to_sp_delta),
- has_alignment_padding_(0),
- input_(NULL),
- output_count_(0),
- jsframe_count_(0),
- output_(NULL),
- deferred_arguments_objects_values_(0),
- deferred_arguments_objects_(0),
- deferred_heap_numbers_(0),
- trace_(false) {
- // For COMPILED_STUBs called from builtins, the function pointer is a SMI
- // indicating an internal frame.
- if (function->IsSmi()) {
- function = NULL;
- }
- if (function != NULL && function->IsOptimized()) {
- function->shared()->increment_deopt_count();
- }
- compiled_code_ = FindOptimizedCode(function, optimized_code);
- StackFrame::Type frame_type = function == NULL
- ? StackFrame::STUB
- : StackFrame::JAVA_SCRIPT;
- trace_ = TraceEnabledFor(type, frame_type);
- if (trace_) Trace();
- ASSERT(HEAP->allow_allocation(false));
- unsigned size = ComputeInputFrameSize();
- input_ = new(size) FrameDescription(size, function);
- input_->SetFrameType(frame_type);
-}
-
-
-Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
- Code* optimized_code) {
- switch (bailout_type_) {
- case Deoptimizer::EAGER:
- ASSERT(from_ == NULL);
- return function->code();
- case Deoptimizer::LAZY: {
- Code* compiled_code =
- isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
- return (compiled_code == NULL)
- ? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_))
- : compiled_code;
- }
- case Deoptimizer::OSR: {
- // The function has already been optimized and we're transitioning
- // from the unoptimized shared version to the optimized one in the
- // function. The return address (from_) points to unoptimized code.
- Code* compiled_code = function->code();
- ASSERT(compiled_code->kind() == Code::OPTIMIZED_FUNCTION);
- ASSERT(!compiled_code->contains(from_));
- return compiled_code;
- }
- case Deoptimizer::DEBUGGER:
- ASSERT(optimized_code->contains(from_));
- return optimized_code;
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-void Deoptimizer::Trace() {
- PrintF("**** %s: ", Deoptimizer::MessageFor(bailout_type_));
- PrintFunctionName();
- PrintF(" at id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
- bailout_id_,
- reinterpret_cast<intptr_t>(from_),
- fp_to_sp_delta_ - (2 * kPointerSize));
- if (bailout_type_ == EAGER) compiled_code_->PrintDeoptLocation(bailout_id_);
-}
-
-
-void Deoptimizer::PrintFunctionName() {
- if (function_->IsJSFunction()) {
- function_->PrintName();
- } else {
- PrintF("%s", Code::Kind2String(compiled_code_->kind()));
- }
-}
-
-
-Deoptimizer::~Deoptimizer() {
- ASSERT(input_ == NULL && output_ == NULL);
-}
-
-
-void Deoptimizer::DeleteFrameDescriptions() {
- delete input_;
- for (int i = 0; i < output_count_; ++i) {
- if (output_[i] != input_) delete output_[i];
- }
- delete[] output_;
- input_ = NULL;
- output_ = NULL;
- ASSERT(!HEAP->allow_allocation(true));
-}
-
-
-Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
- int id,
- BailoutType type,
- GetEntryMode mode) {
- ASSERT(id >= 0);
- if (id >= kMaxNumberOfEntries) return NULL;
- MemoryChunk* base = NULL;
- if (mode == ENSURE_ENTRY_CODE) {
- EnsureCodeForDeoptimizationEntry(isolate, type, id);
- } else {
- ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
- }
- DeoptimizerData* data = isolate->deoptimizer_data();
- if (type == EAGER) {
- base = data->eager_deoptimization_entry_code_;
- } else {
- base = data->lazy_deoptimization_entry_code_;
- }
- return base->area_start() + (id * table_entry_size_);
-}
-
-
-int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- MemoryChunk* base = NULL;
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- if (type == EAGER) {
- base = data->eager_deoptimization_entry_code_;
- } else {
- base = data->lazy_deoptimization_entry_code_;
- }
- Address start = base->area_start();
- if (base == NULL ||
- addr < start ||
- addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
- return kNotDeoptimizationEntry;
- }
- ASSERT_EQ(0,
- static_cast<int>(addr - start) % table_entry_size_);
- return static_cast<int>(addr - start) / table_entry_size_;
-}
-
-
-int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
- BailoutId id,
- SharedFunctionInfo* shared) {
- // TODO(kasperl): For now, we do a simple linear search for the PC
- // offset associated with the given node id. This should probably be
- // changed to a binary search.
- int length = data->DeoptPoints();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == id) {
- return data->PcAndState(i)->value();
- }
- }
- PrintF("[couldn't find pc offset for node=%d]\n", id.ToInt());
- PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
- // Print the source code if available.
- HeapStringAllocator string_allocator;
- StringStream stream(&string_allocator);
- shared->SourceCodePrint(&stream, -1);
- PrintF("[source:\n%s\n]", *stream.ToCString());
-
- FATAL("unable to find pc offset during deoptimization");
- return -1;
-}
-
-
-int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
- int length = 0;
- DeoptimizingCodeListNode* node =
- isolate->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- length++;
- node = node->next();
- }
- return length;
-}
-
-
-// We rely on this function not causing a GC. It is called from generated code
-// without having a real stack frame in place.
-void Deoptimizer::DoComputeOutputFrames() {
- if (bailout_type_ == OSR) {
- DoComputeOsrOutputFrame();
- return;
- }
-
- // Print some helpful diagnostic information.
- int64_t start = OS::Ticks();
- if (trace_) {
- PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
- (bailout_type_ == LAZY ? " (lazy)" : ""),
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" @%d]\n", bailout_id_);
- }
-
- // Determine basic deoptimization information. The optimized frame is
- // described by the input data.
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
- BailoutId node_id = input_data->AstId(bailout_id_);
- ByteArray* translations = input_data->TranslationByteArray();
- unsigned translation_index =
- input_data->TranslationIndex(bailout_id_)->value();
-
- // Do the input frame to output frame(s) translation.
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- // Read the number of output frames and allocate an array for their
- // descriptions.
- int count = iterator.Next();
- iterator.Next(); // Drop JS frames count.
- ASSERT(output_ == NULL);
- output_ = new FrameDescription*[count];
- for (int i = 0; i < count; ++i) {
- output_[i] = NULL;
- }
- output_count_ = count;
-
- // Translate each output frame.
- for (int i = 0; i < count; ++i) {
- // Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- switch (opcode) {
- case Translation::JS_FRAME:
- DoComputeJSFrame(&iterator, i);
- jsframe_count_++;
- break;
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- DoComputeArgumentsAdaptorFrame(&iterator, i);
- break;
- case Translation::CONSTRUCT_STUB_FRAME:
- DoComputeConstructStubFrame(&iterator, i);
- break;
- case Translation::GETTER_STUB_FRAME:
- DoComputeAccessorStubFrame(&iterator, i, false);
- break;
- case Translation::SETTER_STUB_FRAME:
- DoComputeAccessorStubFrame(&iterator, i, true);
- break;
- case Translation::COMPILED_STUB_FRAME:
- DoComputeCompiledStubFrame(&iterator, i);
- break;
- case Translation::BEGIN:
- case Translation::REGISTER:
- case Translation::INT32_REGISTER:
- case Translation::UINT32_REGISTER:
- case Translation::DOUBLE_REGISTER:
- case Translation::STACK_SLOT:
- case Translation::INT32_STACK_SLOT:
- case Translation::UINT32_STACK_SLOT:
- case Translation::DOUBLE_STACK_SLOT:
- case Translation::LITERAL:
- case Translation::ARGUMENTS_OBJECT:
- case Translation::DUPLICATE:
- default:
- UNREACHABLE();
- break;
- }
- }
-
- // Print some helpful diagnostic information.
- if (trace_) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
- int index = output_count_ - 1; // Index of the topmost frame.
- JSFunction* function = output_[index]->GetFunction();
- PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function));
- if (function != NULL) function->PrintName();
- PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
- " took %0.3f ms]\n",
- node_id.ToInt(),
- output_[index]->GetPc(),
- FullCodeGenerator::State2String(
- static_cast<FullCodeGenerator::State>(
- output_[index]->GetState()->value())),
- has_alignment_padding_ ? "with padding" : "no padding",
- ms);
- }
-}
-
-
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- intptr_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
- }
-
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- ASSERT(0 == output_offset);
-
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- intptr_t pc_value = reinterpret_cast<intptr_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc_value);
-}
-
-
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (trace_) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 1 stack entry for the return address + 4 stack entries from
- // StackFrame::INTERNAL (FP, context, frame type, code object, see
- // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
- // entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
- ASSERT_NE(DEBUGGER, bailout_type_);
-
- // Handlify all argument object values before triggering any allocation.
- List<Handle<Object> > values(deferred_arguments_objects_values_.length());
- for (int i = 0; i < deferred_arguments_objects_values_.length(); ++i) {
- values.Add(Handle<Object>(deferred_arguments_objects_values_[i],
- isolate_));
- }
-
- // Play it safe and clear all unhandlified values before we continue.
- deferred_arguments_objects_values_.Clear();
-
- // Materialize all heap numbers before looking at arguments because when the
- // output frames are used to materialize arguments objects later on they need
- // to already contain valid heap numbers.
- for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
- Handle<Object> num = isolate_->factory()->NewNumber(d.value());
- if (trace_) {
- PrintF("Materializing a new heap number %p [%e] in slot %p\n",
- reinterpret_cast<void*>(*num),
- d.value(),
- d.slot_address());
- }
- Memory::Object_at(d.slot_address()) = *num;
- }
-
- // Materialize arguments objects one frame at a time.
- for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
- if (frame_index != 0) it->Advance();
- JavaScriptFrame* frame = it->frame();
- Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate_);
- Handle<JSObject> arguments;
- for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
- if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
- ArgumentsObjectMaterializationDescriptor descriptor =
- deferred_arguments_objects_.RemoveLast();
- const int length = descriptor.arguments_length();
- if (arguments.is_null()) {
- if (frame->has_adapted_arguments()) {
- // Use the arguments adapter frame we just built to materialize the
- // arguments object. FunctionGetArguments can't throw an exception,
- // so cast away the doubt with an assert.
- arguments = Handle<JSObject>(JSObject::cast(
- Accessors::FunctionGetArguments(*function,
- NULL)->ToObjectUnchecked()));
- values.RewindBy(length);
- } else {
- // Construct an arguments object and copy the parameters to a newly
- // allocated arguments object backing store.
- arguments =
- isolate_->factory()->NewArgumentsObject(function, length);
- Handle<FixedArray> array =
- isolate_->factory()->NewFixedArray(length);
- ASSERT(array->length() == length);
- for (int i = length - 1; i >= 0 ; --i) {
- array->set(i, *values.RemoveLast());
- }
- arguments->set_elements(*array);
- }
- }
- frame->SetExpression(i, *arguments);
- ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments);
- if (trace_) {
- PrintF("Materializing %sarguments object of length %d for %p: ",
- frame->has_adapted_arguments() ? "(adapted) " : "",
- arguments->elements()->length(),
- reinterpret_cast<void*>(descriptor.slot_address()));
- arguments->ShortPrint();
- PrintF("\n");
- }
- }
- }
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
- Address parameters_top,
- uint32_t parameters_size,
- Address expressions_top,
- uint32_t expressions_size,
- DeoptimizedFrameInfo* info) {
- ASSERT_EQ(DEBUGGER, bailout_type_);
- Address parameters_bottom = parameters_top + parameters_size;
- Address expressions_bottom = expressions_top + expressions_size;
- for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
-
- // Check of the heap number to materialize actually belong to the frame
- // being extracted.
- Address slot = d.slot_address();
- if (parameters_top <= slot && slot < parameters_bottom) {
- Handle<Object> num = isolate_->factory()->NewNumber(d.value());
-
- int index = (info->parameters_count() - 1) -
- static_cast<int>(slot - parameters_top) / kPointerSize;
-
- if (trace_) {
- PrintF("Materializing a new heap number %p [%e] in slot %p"
- "for parameter slot #%d\n",
- reinterpret_cast<void*>(*num),
- d.value(),
- d.slot_address(),
- index);
- }
-
- info->SetParameter(index, *num);
- } else if (expressions_top <= slot && slot < expressions_bottom) {
- Handle<Object> num = isolate_->factory()->NewNumber(d.value());
-
- int index = info->expression_count() - 1 -
- static_cast<int>(slot - expressions_top) / kPointerSize;
-
- if (trace_) {
- PrintF("Materializing a new heap number %p [%e] in slot %p"
- "for expression slot #%d\n",
- reinterpret_cast<void*>(*num),
- d.value(),
- d.slot_address(),
- index);
- }
-
- info->SetExpression(index, *num);
- }
- }
-}
-#endif
-
-
-void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset) {
- disasm::NameConverter converter;
- // A GC-safe temporary placeholder that we can put in the output frame.
- const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
-
- // Ignore commands marked as duplicate and act on the first non-duplicate.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- while (opcode == Translation::DUPLICATE) {
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- }
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::JS_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
- case Translation::COMPILED_STUB_FRAME:
- case Translation::DUPLICATE:
- UNREACHABLE();
- return;
-
- case Translation::REGISTER: {
- int input_reg = iterator->Next();
- intptr_t input_value = input_->GetRegister(input_reg);
- if (trace_) {
- PrintF(
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- input_value,
- converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
- }
- output_[frame_index]->SetFrameSlot(output_offset, input_value);
- return;
- }
-
- case Translation::INT32_REGISTER: {
- int input_reg = iterator->Next();
- intptr_t value = input_->GetRegister(input_reg);
- bool is_smi = Smi::IsValid(value);
- if (trace_) {
- PrintF(
- " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- converter.NameOfCPURegister(input_reg),
- is_smi ? "smi" : "heap number");
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
- static_cast<double>(static_cast<int32_t>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::UINT32_REGISTER: {
- int input_reg = iterator->Next();
- uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
- bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
- PrintF(
- " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
- " ; uint %s (%s)\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- converter.NameOfCPURegister(input_reg),
- is_smi ? "smi" : "heap number");
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
- static_cast<double>(static_cast<uint32_t>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::DOUBLE_REGISTER: {
- int input_reg = iterator->Next();
- double value = input_->GetDoubleRegister(input_reg);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- DoubleRegister::AllocationIndexToString(input_reg));
- }
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- return;
- }
-
- case Translation::STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
- output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
- output_offset,
- input_value,
- input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
- }
- output_[frame_index]->SetFrameSlot(output_offset, input_value);
- return;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
- intptr_t value = input_->GetFrameSlot(input_offset);
- bool is_smi = Smi::IsValid(value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
- output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
- output_offset,
- value,
- input_offset,
- is_smi ? "smi" : "heap number");
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
- static_cast<double>(static_cast<int32_t>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::UINT32_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
- uintptr_t value =
- static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
- bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
- output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
- output_offset,
- value,
- input_offset,
- is_smi ? "smi" : "heap number");
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
- static_cast<double>(static_cast<uint32_t>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
- double value = input_->GetDoubleFrameSlot(input_offset);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- input_offset);
- }
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- return;
- }
-
- case Translation::LITERAL: {
- Object* literal = ComputeLiteral(iterator->Next());
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
- output_[frame_index]->GetTop() + output_offset,
- output_offset);
- literal->ShortPrint();
- PrintF(" ; literal\n");
- }
- intptr_t value = reinterpret_cast<intptr_t>(literal);
- output_[frame_index]->SetFrameSlot(output_offset, value);
- return;
- }
-
- case Translation::ARGUMENTS_OBJECT: {
- bool args_known = iterator->Next();
- int args_index = iterator->Next() + 1; // Skip receiver.
- int args_length = iterator->Next() - 1; // Skip receiver.
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
- output_[frame_index]->GetTop() + output_offset,
- output_offset);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; %sarguments object\n", args_known ? "" : "dummy ");
- }
- // Use the arguments marker value as a sentinel and fill in the arguments
- // object after the deoptimized frame is built.
- intptr_t value = reinterpret_cast<intptr_t>(
- isolate_->heap()->arguments_marker());
- AddArgumentsObject(
- output_[frame_index]->GetTop() + output_offset, args_length);
- output_[frame_index]->SetFrameSlot(output_offset, value);
- // We save the tagged argument values on the side and materialize the
- // actual arguments object after the deoptimized frame is built.
- for (int i = 0; i < args_length; i++) {
- unsigned input_offset = input_->GetOffsetFromSlotIndex(args_index + i);
- intptr_t input_value = args_known
- ? input_->GetFrameSlot(input_offset)
- : reinterpret_cast<intptr_t>(isolate_->heap()->the_hole_value());
- AddArgumentsObjectValue(input_value);
- }
- return;
- }
- }
-}
-
-
-static bool ObjectToInt32(Object* obj, int32_t* value) {
- if (obj->IsSmi()) {
- *value = Smi::cast(obj)->value();
- return true;
- }
-
- if (obj->IsHeapNumber()) {
- double num = HeapNumber::cast(obj)->value();
- if (FastI2D(FastD2I(num)) != num) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to int32 ****\n",
- HeapNumber::cast(obj)->value());
- }
- return false;
- }
-
- *value = FastD2I(num);
- return true;
- }
-
- return false;
-}
-
-
-static bool ObjectToUint32(Object* obj, uint32_t* value) {
- if (obj->IsSmi()) {
- if (Smi::cast(obj)->value() < 0) return false;
-
- *value = static_cast<uint32_t>(Smi::cast(obj)->value());
- return true;
- }
-
- if (obj->IsHeapNumber()) {
- double num = HeapNumber::cast(obj)->value();
- if ((num < 0) || (FastUI2D(FastD2UI(num)) != num)) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to uint32 ****\n",
- HeapNumber::cast(obj)->value());
- }
- return false;
- }
-
- *value = FastD2UI(num);
- return true;
- }
-
- return false;
-}
-
-
-bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset) {
- disasm::NameConverter converter;
- FrameDescription* output = output_[0];
-
- // The input values are all part of the unoptimized frame so they
- // are all tagged pointers.
- uintptr_t input_value = input_->GetFrameSlot(*input_offset);
- Object* input_object = reinterpret_cast<Object*>(input_value);
-
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- bool duplicate = (opcode == Translation::DUPLICATE);
- if (duplicate) {
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- }
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::JS_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
- case Translation::COMPILED_STUB_FRAME:
- case Translation::DUPLICATE:
- UNREACHABLE(); // Malformed input.
- return false;
-
- case Translation::REGISTER: {
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- input_value,
- *input_offset);
- }
- output->SetRegister(output_reg, input_value);
- break;
- }
-
- case Translation::INT32_REGISTER: {
- int32_t int32_value = 0;
- if (!ObjectToInt32(input_object, &int32_value)) return false;
-
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %d (int32) ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- int32_value,
- *input_offset);
- }
- output->SetRegister(output_reg, int32_value);
- break;
- }
-
- case Translation::UINT32_REGISTER: {
- uint32_t uint32_value = 0;
- if (!ObjectToUint32(input_object, &uint32_value)) return false;
-
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %u (uint32) ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- uint32_value,
- *input_offset);
- }
- output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
- }
-
-
- case Translation::DOUBLE_REGISTER: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_reg = iterator->Next();
- double double_value = input_object->Number();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %g (double) ; [sp + %d]\n",
- DoubleRegister::AllocationIndexToString(output_reg),
- double_value,
- *input_offset);
- }
- output->SetDoubleRegister(output_reg, double_value);
- break;
- }
-
- case Translation::STACK_SLOT: {
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
- output_offset,
- input_value,
- *input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
- }
- output->SetFrameSlot(output_offset, input_value);
- break;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int32_t int32_value = 0;
- if (!ObjectToInt32(input_object, &int32_value)) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
- output_offset,
- int32_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset, int32_value);
- break;
- }
-
- case Translation::UINT32_STACK_SLOT: {
- uint32_t uint32_value = 0;
- if (!ObjectToUint32(input_object, &uint32_value)) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
- output_offset,
- uint32_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
- break;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- static const int kLowerOffset = 0 * kPointerSize;
- static const int kUpperOffset = 1 * kPointerSize;
-
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- double double_value = input_object->Number();
- uint64_t int_value = BitCast<uint64_t, double>(double_value);
- int32_t lower = static_cast<int32_t>(int_value);
- int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
- output_offset + kUpperOffset,
- upper,
- double_value,
- *input_offset);
- PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
- output_offset + kLowerOffset,
- lower,
- double_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset + kLowerOffset, lower);
- output->SetFrameSlot(output_offset + kUpperOffset, upper);
- break;
- }
-
- case Translation::LITERAL: {
- // Just ignore non-materialized literals.
- iterator->Next();
- break;
- }
-
- case Translation::ARGUMENTS_OBJECT: {
- // Optimized code assumes that the argument object has not been
- // materialized and so bypasses it when doing arguments access.
- // We should have bailed out before starting the frame
- // translation.
- UNREACHABLE();
- return false;
- }
- }
-
- if (!duplicate) *input_offset -= kPointerSize;
- return true;
-}
-
-
-void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code) {
- // Iterate over the stack check table and patch every stack check
- // call to an unconditional call to the replacement code.
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(stack_check_cursor);
- stack_check_cursor += kIntSize;
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- PatchStackCheckCodeAt(unoptimized_code,
- pc_after,
- check_code,
- replacement_code);
- stack_check_cursor += 2 * kIntSize;
- }
-}
-
-
-void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code) {
- // Iterate over the stack check table and revert the patched
- // stack check calls.
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(stack_check_cursor);
- stack_check_cursor += kIntSize;
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- RevertStackCheckCodeAt(unoptimized_code,
- pc_after,
- check_code,
- replacement_code);
- stack_check_cursor += 2 * kIntSize;
- }
-}
-
-
-unsigned Deoptimizer::ComputeInputFrameSize() const {
- unsigned fixed_size = ComputeFixedSize(function_);
- // The fp-to-sp delta already takes the context and the function
- // into account so we have to avoid double counting them (-2).
- unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
-#ifdef DEBUG
- if (bailout_type_ == OSR) {
- // TODO(kasperl): It would be nice if we could verify that the
- // size matches with the stack height we can compute based on the
- // environment at the OSR entry. The code for that his built into
- // the DoComputeOsrOutputFrame function for now.
- } else if (compiled_code_->kind() != Code::COMPILED_STUB) {
- unsigned stack_slots = compiled_code_->stack_slots();
- unsigned outgoing_size = ComputeOutgoingArgumentSize();
- ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
- }
-#endif
- return result;
-}
-
-
-unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
- // The fixed part of the frame consists of the return address, frame
- // pointer, function, context, and all the incoming arguments.
- return ComputeIncomingArgumentSize(function) +
- StandardFrameConstants::kFixedFrameSize;
-}
-
-
-unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
- // The incoming arguments is the values for formal parameters and
- // the receiver. Every slot contains a pointer.
- if (function->IsSmi()) {
- ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
- return 0;
- }
- unsigned arguments = function->shared()->formal_parameter_count() + 1;
- return arguments * kPointerSize;
-}
-
-
-unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
- return height * kPointerSize;
-}
-
-
-Object* Deoptimizer::ComputeLiteral(int index) const {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- FixedArray* literals = data->LiteralArray();
- return literals->get(index);
-}
-
-
-void Deoptimizer::AddArgumentsObject(intptr_t slot_address, int argc) {
- ArgumentsObjectMaterializationDescriptor object_desc(
- reinterpret_cast<Address>(slot_address), argc);
- deferred_arguments_objects_.Add(object_desc);
-}
-
-
-void Deoptimizer::AddArgumentsObjectValue(intptr_t value) {
- deferred_arguments_objects_values_.Add(reinterpret_cast<Object*>(value));
-}
-
-
-void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
- HeapNumberMaterializationDescriptor value_desc(
- reinterpret_cast<Address>(slot_address), value);
- deferred_heap_numbers_.Add(value_desc);
-}
-
-
-void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- BailoutType type,
- int max_entry_id) {
- // We cannot run this if the serializer is enabled because this will
- // cause us to emit relocation information for the external
- // references. This is fine because the deoptimizer's code section
- // isn't meant to be serialized at all.
- ASSERT(type == EAGER || type == LAZY);
- DeoptimizerData* data = isolate->deoptimizer_data();
- int entry_count = (type == EAGER)
- ? data->eager_deoptimization_entry_code_entries_
- : data->lazy_deoptimization_entry_code_entries_;
- if (max_entry_id < entry_count) return;
- entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
- while (max_entry_id >= entry_count) entry_count *= 2;
- ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
-
- MacroAssembler masm(isolate, NULL, 16 * KB);
- masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, entry_count, type);
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- MemoryChunk* chunk = type == EAGER
- ? data->eager_deoptimization_entry_code_
- : data->lazy_deoptimization_entry_code_;
- ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
- desc.instr_size);
- chunk->CommitArea(desc.instr_size);
- memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
- CPU::FlushICache(chunk->area_start(), desc.instr_size);
-
- if (type == EAGER) {
- data->eager_deoptimization_entry_code_entries_ = entry_count;
- } else {
- data->lazy_deoptimization_entry_code_entries_ = entry_count;
- }
-}
-
-
-void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function,
- Code* code) {
- SharedFunctionInfo* shared = function->shared();
- Object* undefined = Isolate::Current()->heap()->undefined_value();
- Object* current = function;
-
- while (current != undefined) {
- JSFunction* func = JSFunction::cast(current);
- current = func->next_function_link();
- func->set_code(shared->code());
- func->set_next_function_link(undefined);
- }
-}
-
-
-FrameDescription::FrameDescription(uint32_t frame_size,
- JSFunction* function)
- : frame_size_(frame_size),
- function_(function),
- top_(kZapUint32),
- pc_(kZapUint32),
- fp_(kZapUint32),
- context_(kZapUint32) {
- // Zap all the registers.
- for (int r = 0; r < Register::kNumRegisters; r++) {
- SetRegister(r, kZapUint32);
- }
-
- // Zap all the slots.
- for (unsigned o = 0; o < frame_size; o += kPointerSize) {
- SetFrameSlot(o, kZapUint32);
- }
-}
-
-
-int FrameDescription::ComputeFixedSize() {
- return StandardFrameConstants::kFixedFrameSize +
- (ComputeParametersCount() + 1) * kPointerSize;
-}
-
-
-unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
- if (slot_index >= 0) {
- // Local or spill slots. Skip the fixed part of the frame
- // including all arguments.
- unsigned base = GetFrameSize() - ComputeFixedSize();
- return base - ((slot_index + 1) * kPointerSize);
- } else {
- // Incoming parameter.
- int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
- unsigned base = GetFrameSize() - arg_size;
- return base - ((slot_index + 1) * kPointerSize);
- }
-}
-
-
-int FrameDescription::ComputeParametersCount() {
- switch (type_) {
- case StackFrame::JAVA_SCRIPT:
- return function_->shared()->formal_parameter_count();
- case StackFrame::ARGUMENTS_ADAPTOR: {
- // Last slot contains number of incomming arguments as a smi.
- // Can't use GetExpression(0) because it would cause infinite recursion.
- return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
- }
- case StackFrame::STUB:
- return -1; // Minus receiver.
- default:
- UNREACHABLE();
- return 0;
- }
-}
-
-
-Object* FrameDescription::GetParameter(int index) {
- ASSERT(index >= 0);
- ASSERT(index < ComputeParametersCount());
- // The slot indexes for incoming arguments are negative.
- unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
- return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
-}
-
-
-unsigned FrameDescription::GetExpressionCount() {
- ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
- unsigned size = GetFrameSize() - ComputeFixedSize();
- return size / kPointerSize;
-}
-
-
-Object* FrameDescription::GetExpression(int index) {
- ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
- unsigned offset = GetOffsetFromSlotIndex(index);
- return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
-}
-
-
-void TranslationBuffer::Add(int32_t value, Zone* zone) {
- // Encode the sign bit in the least significant bit.
- bool is_negative = (value < 0);
- uint32_t bits = ((is_negative ? -value : value) << 1) |
- static_cast<int32_t>(is_negative);
- // Encode the individual bytes using the least significant bit of
- // each byte to indicate whether or not more bytes follow.
- do {
- uint32_t next = bits >> 7;
- contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
- bits = next;
- } while (bits != 0);
-}
-
-
-int32_t TranslationIterator::Next() {
- // Run through the bytes until we reach one with a least significant
- // bit of zero (marks the end).
- uint32_t bits = 0;
- for (int i = 0; true; i += 7) {
- ASSERT(HasNext());
- uint8_t next = buffer_->get(index_++);
- bits |= (next >> 1) << i;
- if ((next & 1) == 0) break;
- }
- // The bits encode the sign in the least significant bit.
- bool is_negative = (bits & 1) == 1;
- int32_t result = bits >> 1;
- return is_negative ? -result : result;
-}
-
-
-Handle<ByteArray> TranslationBuffer::CreateByteArray() {
- int length = contents_.length();
- Handle<ByteArray> result =
- Isolate::Current()->factory()->NewByteArray(length, TENURED);
- memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
- return result;
-}
-
-
-void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
- buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
- buffer_->Add(literal_id, zone());
- buffer_->Add(height, zone());
-}
-
-
-void Translation::BeginGetterStubFrame(int literal_id) {
- buffer_->Add(GETTER_STUB_FRAME, zone());
- buffer_->Add(literal_id, zone());
-}
-
-
-void Translation::BeginSetterStubFrame(int literal_id) {
- buffer_->Add(SETTER_STUB_FRAME, zone());
- buffer_->Add(literal_id, zone());
-}
-
-
-void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
- buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
- buffer_->Add(literal_id, zone());
- buffer_->Add(height, zone());
-}
-
-
-void Translation::BeginJSFrame(BailoutId node_id,
- int literal_id,
- unsigned height) {
- buffer_->Add(JS_FRAME, zone());
- buffer_->Add(node_id.ToInt(), zone());
- buffer_->Add(literal_id, zone());
- buffer_->Add(height, zone());
-}
-
-
-void Translation::BeginCompiledStubFrame() {
- buffer_->Add(COMPILED_STUB_FRAME, zone());
-}
-
-
-void Translation::StoreRegister(Register reg) {
- buffer_->Add(REGISTER, zone());
- buffer_->Add(reg.code(), zone());
-}
-
-
-void Translation::StoreInt32Register(Register reg) {
- buffer_->Add(INT32_REGISTER, zone());
- buffer_->Add(reg.code(), zone());
-}
-
-
-void Translation::StoreUint32Register(Register reg) {
- buffer_->Add(UINT32_REGISTER, zone());
- buffer_->Add(reg.code(), zone());
-}
-
-
-void Translation::StoreDoubleRegister(DoubleRegister reg) {
- buffer_->Add(DOUBLE_REGISTER, zone());
- buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
-}
-
-
-void Translation::StoreStackSlot(int index) {
- buffer_->Add(STACK_SLOT, zone());
- buffer_->Add(index, zone());
-}
-
-
-void Translation::StoreInt32StackSlot(int index) {
- buffer_->Add(INT32_STACK_SLOT, zone());
- buffer_->Add(index, zone());
-}
-
-
-void Translation::StoreUint32StackSlot(int index) {
- buffer_->Add(UINT32_STACK_SLOT, zone());
- buffer_->Add(index, zone());
-}
-
-
-void Translation::StoreDoubleStackSlot(int index) {
- buffer_->Add(DOUBLE_STACK_SLOT, zone());
- buffer_->Add(index, zone());
-}
-
-
-void Translation::StoreLiteral(int literal_id) {
- buffer_->Add(LITERAL, zone());
- buffer_->Add(literal_id, zone());
-}
-
-
-void Translation::StoreArgumentsObject(bool args_known,
- int args_index,
- int args_length) {
- buffer_->Add(ARGUMENTS_OBJECT, zone());
- buffer_->Add(args_known, zone());
- buffer_->Add(args_index, zone());
- buffer_->Add(args_length, zone());
-}
-
-
-void Translation::MarkDuplicate() {
- buffer_->Add(DUPLICATE, zone());
-}
-
-
-int Translation::NumberOfOperandsFor(Opcode opcode) {
- switch (opcode) {
- case DUPLICATE:
- return 0;
- case GETTER_STUB_FRAME:
- case SETTER_STUB_FRAME:
- case REGISTER:
- case INT32_REGISTER:
- case UINT32_REGISTER:
- case DOUBLE_REGISTER:
- case STACK_SLOT:
- case INT32_STACK_SLOT:
- case UINT32_STACK_SLOT:
- case DOUBLE_STACK_SLOT:
- case LITERAL:
- case COMPILED_STUB_FRAME:
- return 1;
- case BEGIN:
- case ARGUMENTS_ADAPTOR_FRAME:
- case CONSTRUCT_STUB_FRAME:
- return 2;
- case JS_FRAME:
- case ARGUMENTS_OBJECT:
- return 3;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
-const char* Translation::StringFor(Opcode opcode) {
- switch (opcode) {
- case BEGIN:
- return "BEGIN";
- case JS_FRAME:
- return "JS_FRAME";
- case ARGUMENTS_ADAPTOR_FRAME:
- return "ARGUMENTS_ADAPTOR_FRAME";
- case CONSTRUCT_STUB_FRAME:
- return "CONSTRUCT_STUB_FRAME";
- case GETTER_STUB_FRAME:
- return "GETTER_STUB_FRAME";
- case SETTER_STUB_FRAME:
- return "SETTER_STUB_FRAME";
- case COMPILED_STUB_FRAME:
- return "COMPILED_STUB_FRAME";
- case REGISTER:
- return "REGISTER";
- case INT32_REGISTER:
- return "INT32_REGISTER";
- case UINT32_REGISTER:
- return "UINT32_REGISTER";
- case DOUBLE_REGISTER:
- return "DOUBLE_REGISTER";
- case STACK_SLOT:
- return "STACK_SLOT";
- case INT32_STACK_SLOT:
- return "INT32_STACK_SLOT";
- case UINT32_STACK_SLOT:
- return "UINT32_STACK_SLOT";
- case DOUBLE_STACK_SLOT:
- return "DOUBLE_STACK_SLOT";
- case LITERAL:
- return "LITERAL";
- case ARGUMENTS_OBJECT:
- return "ARGUMENTS_OBJECT";
- case DUPLICATE:
- return "DUPLICATE";
- }
- UNREACHABLE();
- return "";
-}
-
-#endif
-
-
-DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- // Globalize the code object and make it weak.
- code_ = Handle<Code>::cast(global_handles->Create(code));
- global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
- this,
- NULL,
- Deoptimizer::HandleWeakDeoptimizedCode);
-}
-
-
-DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
-}
-
-
-// We can't intermix stack decoding and allocations because
-// deoptimization infrastracture is not GC safe.
-// Thus we build a temporary structure in malloced space.
-SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame) {
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::JS_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
- // Peeled off before getting here.
- break;
-
- case Translation::ARGUMENTS_OBJECT:
- // This can be only emitted for local slots not for argument slots.
- break;
-
- case Translation::REGISTER:
- case Translation::INT32_REGISTER:
- case Translation::UINT32_REGISTER:
- case Translation::DOUBLE_REGISTER:
- case Translation::DUPLICATE:
- // We are at safepoint which corresponds to call. All registers are
- // saved by caller so there would be no live registers at this
- // point. Thus these translation commands should not be used.
- break;
-
- case Translation::STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::TAGGED);
- }
-
- case Translation::INT32_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::INT32);
- }
-
- case Translation::UINT32_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::UINT32);
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::DOUBLE);
- }
-
- case Translation::LITERAL: {
- int literal_index = iterator->Next();
- return SlotRef(data->GetIsolate(),
- data->LiteralArray()->get(literal_index));
- }
-
- case Translation::COMPILED_STUB_FRAME:
- UNREACHABLE();
- break;
- }
-
- UNREACHABLE();
- return SlotRef();
-}
-
-
-void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
- TranslationIterator* it,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame) {
- // Process the translation commands for the arguments.
-
- // Skip the translation command for the receiver.
- it->Skip(Translation::NumberOfOperandsFor(
- static_cast<Translation::Opcode>(it->Next())));
-
- // Compute slots for arguments.
- for (int i = 0; i < args_slots->length(); ++i) {
- (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
- }
-}
-
-
-Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
- JavaScriptFrame* frame,
- int inlined_jsframe_index,
- int formal_parameter_count) {
- AssertNoAllocation no_gc;
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data =
- static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::BEGIN);
- it.Next(); // Drop frame count.
- int jsframe_count = it.Next();
- USE(jsframe_count);
- ASSERT(jsframe_count > inlined_jsframe_index);
- int jsframes_to_skip = inlined_jsframe_index;
- while (true) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
- if (jsframes_to_skip == 0) {
- ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
-
- it.Skip(1); // literal id
- int height = it.Next();
-
- // We reached the arguments adaptor frame corresponding to the
- // inlined function in question. Number of arguments is height - 1.
- Vector<SlotRef> args_slots =
- Vector<SlotRef>::New(height - 1); // Minus receiver.
- ComputeSlotsForArguments(&args_slots, &it, data, frame);
- return args_slots;
- }
- } else if (opcode == Translation::JS_FRAME) {
- if (jsframes_to_skip == 0) {
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
-
- // We reached the frame corresponding to the inlined function
- // in question. Process the translation commands for the
- // arguments. Number of arguments is equal to the number of
- // format parameter count.
- Vector<SlotRef> args_slots =
- Vector<SlotRef>::New(formal_parameter_count);
- ComputeSlotsForArguments(&args_slots, &it, data, frame);
- return args_slots;
- }
- jsframes_to_skip--;
- }
-
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- }
-
- UNREACHABLE();
- return Vector<SlotRef>();
-}
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
- int frame_index,
- bool has_arguments_adaptor,
- bool has_construct_stub) {
- FrameDescription* output_frame = deoptimizer->output_[frame_index];
- function_ = output_frame->GetFunction();
- has_construct_stub_ = has_construct_stub;
- expression_count_ = output_frame->GetExpressionCount();
- expression_stack_ = new Object*[expression_count_];
- // Get the source position using the unoptimized code.
- Address pc = reinterpret_cast<Address>(output_frame->GetPc());
- Code* code = Code::cast(Isolate::Current()->heap()->FindCodeObject(pc));
- source_position_ = code->SourcePosition(pc);
-
- for (int i = 0; i < expression_count_; i++) {
- SetExpression(i, output_frame->GetExpression(i));
- }
-
- if (has_arguments_adaptor) {
- output_frame = deoptimizer->output_[frame_index - 1];
- ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
- }
-
- parameters_count_ = output_frame->ComputeParametersCount();
- parameters_ = new Object*[parameters_count_];
- for (int i = 0; i < parameters_count_; i++) {
- SetParameter(i, output_frame->GetParameter(i));
- }
-}
-
-
-DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
- delete[] expression_stack_;
- delete[] parameters_;
-}
-
-
-void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**>(&function_));
- v->VisitPointers(parameters_, parameters_ + parameters_count_);
- v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/deoptimizer.h b/src/3rdparty/v8/src/deoptimizer.h
deleted file mode 100644
index b4d8873..0000000
--- a/src/3rdparty/v8/src/deoptimizer.h
+++ /dev/null
@@ -1,888 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DEOPTIMIZER_H_
-#define V8_DEOPTIMIZER_H_
-
-#include "v8.h"
-
-#include "allocation.h"
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-class FrameDescription;
-class TranslationIterator;
-class DeoptimizingCodeListNode;
-class DeoptimizedFrameInfo;
-
-class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
- public:
- HeapNumberMaterializationDescriptor(Address slot_address, double val)
- : slot_address_(slot_address), val_(val) { }
-
- Address slot_address() const { return slot_address_; }
- double value() const { return val_; }
-
- private:
- Address slot_address_;
- double val_;
-};
-
-
-class ArgumentsObjectMaterializationDescriptor BASE_EMBEDDED {
- public:
- ArgumentsObjectMaterializationDescriptor(Address slot_address, int argc)
- : slot_address_(slot_address), arguments_length_(argc) { }
-
- Address slot_address() const { return slot_address_; }
- int arguments_length() const { return arguments_length_; }
-
- private:
- Address slot_address_;
- int arguments_length_;
-};
-
-
-class OptimizedFunctionVisitor BASE_EMBEDDED {
- public:
- virtual ~OptimizedFunctionVisitor() {}
-
- // Function which is called before iteration of any optimized functions
- // from given native context.
- virtual void EnterContext(Context* context) = 0;
-
- virtual void VisitFunction(JSFunction* function) = 0;
-
- // Function which is called after iteration of all optimized functions
- // from given native context.
- virtual void LeaveContext(Context* context) = 0;
-};
-
-
-class OptimizedFunctionFilter BASE_EMBEDDED {
- public:
- virtual ~OptimizedFunctionFilter() {}
-
- virtual bool TakeFunction(JSFunction* function) = 0;
-};
-
-
-class Deoptimizer;
-
-
-class DeoptimizerData {
- public:
- DeoptimizerData();
- ~DeoptimizerData();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void Iterate(ObjectVisitor* v);
-#endif
-
- Code* FindDeoptimizingCode(Address addr);
- void RemoveDeoptimizingCode(Code* code);
-
- private:
- int eager_deoptimization_entry_code_entries_;
- int lazy_deoptimization_entry_code_entries_;
- MemoryChunk* eager_deoptimization_entry_code_;
- MemoryChunk* lazy_deoptimization_entry_code_;
- Deoptimizer* current_;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- DeoptimizedFrameInfo* deoptimized_frame_info_;
-#endif
-
- // List of deoptimized code which still have references from active stack
- // frames. These code objects are needed by the deoptimizer when deoptimizing
- // a frame for which the code object for the function function has been
- // changed from the code present when deoptimizing was done.
- DeoptimizingCodeListNode* deoptimizing_code_list_;
-
- friend class Deoptimizer;
-
- DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
-};
-
-
-class Deoptimizer : public Malloced {
- public:
- enum BailoutType {
- EAGER,
- LAZY,
- OSR,
- // This last bailout type is not really a bailout, but used by the
- // debugger to deoptimize stack frames to allow inspection.
- DEBUGGER
- };
-
- static bool TraceEnabledFor(BailoutType deopt_type,
- StackFrame::Type frame_type);
- static const char* MessageFor(BailoutType type);
-
- int output_count() const { return output_count_; }
-
- Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
-
- // Number of created JS frames. Not all created frames are necessarily JS.
- int jsframe_count() const { return jsframe_count_; }
-
- static Deoptimizer* New(JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
- Isolate* isolate);
- static Deoptimizer* Grab(Isolate* isolate);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // The returned object with information on the optimized frame needs to be
- // freed before another one can be generated.
- static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
- int jsframe_index,
- Isolate* isolate);
- static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
- Isolate* isolate);
-#endif
-
- // Makes sure that there is enough room in the relocation
- // information of a code object to perform lazy deoptimization
- // patching. If there is not enough room a new relocation
- // information object is allocated and comments are added until it
- // is big enough.
- static void EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code);
-
- // Deoptimize the function now. Its current optimized code will never be run
- // again and any activations of the optimized code will get deoptimized when
- // execution returns.
- static void DeoptimizeFunction(JSFunction* function);
-
- // Iterate over all the functions which share the same code object
- // and make them use unoptimized version.
- static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
-
- // Deoptimize all functions in the heap.
- static void DeoptimizeAll();
-
- static void DeoptimizeGlobalObject(JSObject* object);
-
- static void DeoptimizeAllFunctionsWith(OptimizedFunctionFilter* filter);
-
- static void DeoptimizeAllFunctionsForContext(
- Context* context, OptimizedFunctionFilter* filter);
-
- static void VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor);
-
- static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
-
- // The size in bytes of the code required at a lazy deopt patch site.
- static int patch_size();
-
- // Patch all stack guard checks in the unoptimized code to
- // unconditionally call replacement_code.
- static void PatchStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code);
-
- // Patch stack guard check at instruction before pc_after in
- // the unoptimized code to unconditionally call replacement_code.
- static void PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code);
-
- // Change all patched stack guard checks in the unoptimized code
- // back to a normal stack guard check.
- static void RevertStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code);
-
- // Change all patched stack guard checks in the unoptimized code
- // back to a normal stack guard check.
- static void RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code);
-
- ~Deoptimizer();
-
- void MaterializeHeapObjects(JavaScriptFrameIterator* it);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void MaterializeHeapNumbersForDebuggerInspectableFrame(
- Address parameters_top,
- uint32_t parameters_size,
- Address expressions_top,
- uint32_t expressions_size,
- DeoptimizedFrameInfo* info);
-#endif
-
- static void ComputeOutputFrames(Deoptimizer* deoptimizer);
-
-
- enum GetEntryMode {
- CALCULATE_ENTRY_ADDRESS,
- ENSURE_ENTRY_CODE
- };
-
-
- static Address GetDeoptimizationEntry(
- Isolate* isolate,
- int id,
- BailoutType type,
- GetEntryMode mode = ENSURE_ENTRY_CODE);
- static int GetDeoptimizationId(Address addr, BailoutType type);
- static int GetOutputInfo(DeoptimizationOutputData* data,
- BailoutId node_id,
- SharedFunctionInfo* shared);
-
- // Code generation support.
- static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
- static int output_count_offset() {
- return OFFSET_OF(Deoptimizer, output_count_);
- }
- static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
-
- static int has_alignment_padding_offset() {
- return OFFSET_OF(Deoptimizer, has_alignment_padding_);
- }
-
- static int GetDeoptimizedCodeCount(Isolate* isolate);
-
- static const int kNotDeoptimizationEntry = -1;
-
- // Generators for the deoptimization entry code.
- class EntryGenerator BASE_EMBEDDED {
- public:
- EntryGenerator(MacroAssembler* masm, BailoutType type)
- : masm_(masm), type_(type) { }
- virtual ~EntryGenerator() { }
-
- void Generate();
-
- protected:
- MacroAssembler* masm() const { return masm_; }
- BailoutType type() const { return type_; }
-
- virtual void GeneratePrologue() { }
-
- private:
- MacroAssembler* masm_;
- Deoptimizer::BailoutType type_;
- };
-
- class TableEntryGenerator : public EntryGenerator {
- public:
- TableEntryGenerator(MacroAssembler* masm, BailoutType type, int count)
- : EntryGenerator(masm, type), count_(count) { }
-
- protected:
- virtual void GeneratePrologue();
-
- private:
- int count() const { return count_; }
-
- int count_;
- };
-
- int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
-
- static size_t GetMaxDeoptTableSize();
-
- static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- BailoutType type,
- int max_entry_id);
-
- private:
- static const int kMinNumberOfEntries = 64;
- static const int kMaxNumberOfEntries = 16384;
-
- Deoptimizer(Isolate* isolate,
- JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
- Code* optimized_code);
- Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
- void Trace();
- void PrintFunctionName();
- void DeleteFrameDescriptions();
-
- void DoComputeOutputFrames();
- void DoComputeOsrOutputFrame();
- void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
- void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index);
- void DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index);
- void DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame);
- void DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index);
- void DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset);
- // Translate a command for OSR. Updates the input offset to be used for
- // the next command. Returns false if translation of the command failed
- // (e.g., a number conversion failed) and may or may not have updated the
- // input offset.
- bool DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset);
-
- unsigned ComputeInputFrameSize() const;
- unsigned ComputeFixedSize(JSFunction* function) const;
-
- unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
- unsigned ComputeOutgoingArgumentSize() const;
-
- Object* ComputeLiteral(int index) const;
-
- void AddArgumentsObject(intptr_t slot_address, int argc);
- void AddArgumentsObjectValue(intptr_t value);
- void AddDoubleValue(intptr_t slot_address, double value);
-
- static void GenerateDeoptimizationEntries(
- MacroAssembler* masm, int count, BailoutType type);
-
- // Weak handle callback for deoptimizing code objects.
- static void HandleWeakDeoptimizedCode(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data);
-
- // Deoptimize function assuming that function->next_function_link() points
- // to a list that contains all functions that share the same optimized code.
- static void DeoptimizeFunctionWithPreparedFunctionList(JSFunction* function);
-
- // Fill the input from from a JavaScript frame. This is used when
- // the debugger needs to inspect an optimized frame. For normal
- // deoptimizations the input frame is filled in generated code.
- void FillInputFrame(Address tos, JavaScriptFrame* frame);
-
- Isolate* isolate_;
- JSFunction* function_;
- Code* compiled_code_;
- unsigned bailout_id_;
- BailoutType bailout_type_;
- Address from_;
- int fp_to_sp_delta_;
- int has_alignment_padding_;
-
- // Input frame description.
- FrameDescription* input_;
- // Number of output frames.
- int output_count_;
- // Number of output js frames.
- int jsframe_count_;
- // Array of output frame descriptions.
- FrameDescription** output_;
-
- List<Object*> deferred_arguments_objects_values_;
- List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
- List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
-
- bool trace_;
-
- static const int table_entry_size_;
-
- friend class FrameDescription;
- friend class DeoptimizingCodeListNode;
- friend class DeoptimizedFrameInfo;
-};
-
-
-class FrameDescription {
- public:
- FrameDescription(uint32_t frame_size,
- JSFunction* function);
-
- void* operator new(size_t size, uint32_t frame_size) {
- // Subtracts kPointerSize, as the member frame_content_ already supplies
- // the first element of the area to store the frame.
- return malloc(size + frame_size - kPointerSize);
- }
-
- void operator delete(void* pointer, uint32_t frame_size) {
- free(pointer);
- }
-
- void operator delete(void* description) {
- free(description);
- }
-
- uint32_t GetFrameSize() const {
- ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_);
- return static_cast<uint32_t>(frame_size_);
- }
-
- JSFunction* GetFunction() const { return function_; }
-
- unsigned GetOffsetFromSlotIndex(int slot_index);
-
- intptr_t GetFrameSlot(unsigned offset) {
- return *GetFrameSlotPointer(offset);
- }
-
- double GetDoubleFrameSlot(unsigned offset) {
- intptr_t* ptr = GetFrameSlotPointer(offset);
-#if V8_TARGET_ARCH_MIPS
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned double. Uses two lwc1 instructions.
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
- c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
- return c.d;
-#else
- return *reinterpret_cast<double*>(ptr);
-#endif
- }
-
- void SetFrameSlot(unsigned offset, intptr_t value) {
- *GetFrameSlotPointer(offset) = value;
- }
-
- intptr_t GetRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(registers_));
- return registers_[n];
- }
-
- double GetDoubleRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- return double_registers_[n];
- }
-
- void SetRegister(unsigned n, intptr_t value) {
- ASSERT(n < ARRAY_SIZE(registers_));
- registers_[n] = value;
- }
-
- void SetDoubleRegister(unsigned n, double value) {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- double_registers_[n] = value;
- }
-
- intptr_t GetTop() const { return top_; }
- void SetTop(intptr_t top) { top_ = top; }
-
- intptr_t GetPc() const { return pc_; }
- void SetPc(intptr_t pc) { pc_ = pc; }
-
- intptr_t GetFp() const { return fp_; }
- void SetFp(intptr_t fp) { fp_ = fp; }
-
- intptr_t GetContext() const { return context_; }
- void SetContext(intptr_t context) { context_ = context; }
-
- Smi* GetState() const { return state_; }
- void SetState(Smi* state) { state_ = state; }
-
- void SetContinuation(intptr_t pc) { continuation_ = pc; }
-
- StackFrame::Type GetFrameType() const { return type_; }
- void SetFrameType(StackFrame::Type type) { type_ = type; }
-
- // Get the incoming arguments count.
- int ComputeParametersCount();
-
- // Get a parameter value for an unoptimized frame.
- Object* GetParameter(int index);
-
- // Get the expression stack height for a unoptimized frame.
- unsigned GetExpressionCount();
-
- // Get the expression stack value for an unoptimized frame.
- Object* GetExpression(int index);
-
- static int registers_offset() {
- return OFFSET_OF(FrameDescription, registers_);
- }
-
- static int double_registers_offset() {
- return OFFSET_OF(FrameDescription, double_registers_);
- }
-
- static int frame_size_offset() {
- return OFFSET_OF(FrameDescription, frame_size_);
- }
-
- static int pc_offset() {
- return OFFSET_OF(FrameDescription, pc_);
- }
-
- static int state_offset() {
- return OFFSET_OF(FrameDescription, state_);
- }
-
- static int continuation_offset() {
- return OFFSET_OF(FrameDescription, continuation_);
- }
-
- static int frame_content_offset() {
- return OFFSET_OF(FrameDescription, frame_content_);
- }
-
- private:
- static const uint32_t kZapUint32 = 0xbeeddead;
-
- // Frame_size_ must hold a uint32_t value. It is only a uintptr_t to
- // keep the variable-size array frame_content_ of type intptr_t at
- // the end of the structure aligned.
- uintptr_t frame_size_; // Number of bytes.
- JSFunction* function_;
- intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kMaxNumRegisters];
- intptr_t top_;
- intptr_t pc_;
- intptr_t fp_;
- intptr_t context_;
- StackFrame::Type type_;
- Smi* state_;
-
- // Continuation is the PC where the execution continues after
- // deoptimizing.
- intptr_t continuation_;
-
- // This must be at the end of the object as the object is allocated larger
- // than it's definition indicate to extend this array.
- intptr_t frame_content_[1];
-
- intptr_t* GetFrameSlotPointer(unsigned offset) {
- ASSERT(offset < frame_size_);
- return reinterpret_cast<intptr_t*>(
- reinterpret_cast<Address>(this) + frame_content_offset() + offset);
- }
-
- int ComputeFixedSize();
-};
-
-
-class TranslationBuffer BASE_EMBEDDED {
- public:
- explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
-
- int CurrentIndex() const { return contents_.length(); }
- void Add(int32_t value, Zone* zone);
-
- Handle<ByteArray> CreateByteArray();
-
- private:
- ZoneList<uint8_t> contents_;
-};
-
-
-class TranslationIterator BASE_EMBEDDED {
- public:
- TranslationIterator(ByteArray* buffer, int index)
- : buffer_(buffer), index_(index) {
- ASSERT(index >= 0 && index < buffer->length());
- }
-
- int32_t Next();
-
- bool HasNext() const { return index_ < buffer_->length(); }
-
- void Skip(int n) {
- for (int i = 0; i < n; i++) Next();
- }
-
- private:
- ByteArray* buffer_;
- int index_;
-};
-
-
-class Translation BASE_EMBEDDED {
- public:
- enum Opcode {
- BEGIN,
- JS_FRAME,
- CONSTRUCT_STUB_FRAME,
- GETTER_STUB_FRAME,
- SETTER_STUB_FRAME,
- ARGUMENTS_ADAPTOR_FRAME,
- COMPILED_STUB_FRAME,
- REGISTER,
- INT32_REGISTER,
- UINT32_REGISTER,
- DOUBLE_REGISTER,
- STACK_SLOT,
- INT32_STACK_SLOT,
- UINT32_STACK_SLOT,
- DOUBLE_STACK_SLOT,
- LITERAL,
- ARGUMENTS_OBJECT,
-
- // A prefix indicating that the next command is a duplicate of the one
- // that follows it.
- DUPLICATE
- };
-
- Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
- Zone* zone)
- : buffer_(buffer),
- index_(buffer->CurrentIndex()),
- zone_(zone) {
- buffer_->Add(BEGIN, zone);
- buffer_->Add(frame_count, zone);
- buffer_->Add(jsframe_count, zone);
- }
-
- int index() const { return index_; }
-
- // Commands.
- void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
- void BeginCompiledStubFrame();
- void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
- void BeginConstructStubFrame(int literal_id, unsigned height);
- void BeginGetterStubFrame(int literal_id);
- void BeginSetterStubFrame(int literal_id);
- void StoreRegister(Register reg);
- void StoreInt32Register(Register reg);
- void StoreUint32Register(Register reg);
- void StoreDoubleRegister(DoubleRegister reg);
- void StoreStackSlot(int index);
- void StoreInt32StackSlot(int index);
- void StoreUint32StackSlot(int index);
- void StoreDoubleStackSlot(int index);
- void StoreLiteral(int literal_id);
- void StoreArgumentsObject(bool args_known, int args_index, int args_length);
- void MarkDuplicate();
-
- Zone* zone() const { return zone_; }
-
- static int NumberOfOperandsFor(Opcode opcode);
-
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
- static const char* StringFor(Opcode opcode);
-#endif
-
- // A literal id which refers to the JSFunction itself.
- static const int kSelfLiteralId = -239;
-
- private:
- TranslationBuffer* buffer_;
- int index_;
- Zone* zone_;
-};
-
-
-// Linked list holding deoptimizing code objects. The deoptimizing code objects
-// are kept as weak handles until they are no longer activated on the stack.
-class DeoptimizingCodeListNode : public Malloced {
- public:
- explicit DeoptimizingCodeListNode(Code* code);
- ~DeoptimizingCodeListNode();
-
- DeoptimizingCodeListNode* next() const { return next_; }
- void set_next(DeoptimizingCodeListNode* next) { next_ = next; }
- Handle<Code> code() const { return code_; }
-
- private:
- // Global (weak) handle to the deoptimizing code object.
- Handle<Code> code_;
-
- // Next pointer for linked list.
- DeoptimizingCodeListNode* next_;
-};
-
-
-class SlotRef BASE_EMBEDDED {
- public:
- enum SlotRepresentation {
- UNKNOWN,
- TAGGED,
- INT32,
- UINT32,
- DOUBLE,
- LITERAL
- };
-
- SlotRef()
- : addr_(NULL), representation_(UNKNOWN) { }
-
- SlotRef(Address addr, SlotRepresentation representation)
- : addr_(addr), representation_(representation) { }
-
- SlotRef(Isolate* isolate, Object* literal)
- : literal_(literal, isolate), representation_(LITERAL) { }
-
- Handle<Object> GetValue(Isolate* isolate) {
- switch (representation_) {
- case TAGGED:
- return Handle<Object>(Memory::Object_at(addr_), isolate);
-
- case INT32: {
- int value = Memory::int32_at(addr_);
- if (Smi::IsValid(value)) {
- return Handle<Object>(Smi::FromInt(value), isolate);
- } else {
- return isolate->factory()->NewNumberFromInt(value);
- }
- }
-
- case UINT32: {
- uint32_t value = Memory::uint32_at(addr_);
- if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
- } else {
- return isolate->factory()->NewNumber(static_cast<double>(value));
- }
- }
-
- case DOUBLE: {
- double value = Memory::double_at(addr_);
- return isolate->factory()->NewNumber(value);
- }
-
- case LITERAL:
- return literal_;
-
- default:
- UNREACHABLE();
- return Handle<Object>::null();
- }
- }
-
- static Vector<SlotRef> ComputeSlotMappingForArguments(
- JavaScriptFrame* frame,
- int inlined_frame_index,
- int formal_parameter_count);
-
- private:
- Address addr_;
- Handle<Object> literal_;
- SlotRepresentation representation_;
-
- static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
- if (slot_index >= 0) {
- const int offset = JavaScriptFrameConstants::kLocal0Offset;
- return frame->fp() + offset - (slot_index * kPointerSize);
- } else {
- const int offset = JavaScriptFrameConstants::kLastParameterOffset;
- return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
- }
- }
-
- static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
-
- static void ComputeSlotsForArguments(
- Vector<SlotRef>* args_slots,
- TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
-};
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-// Class used to represent an unoptimized frame when the debugger
-// needs to inspect a frame that is part of an optimized frame. The
-// internally used FrameDescription objects are not GC safe so for use
-// by the debugger frame information is copied to an object of this type.
-// Represents parameters in unadapted form so their number might mismatch
-// formal parameter count.
-class DeoptimizedFrameInfo : public Malloced {
- public:
- DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
- int frame_index,
- bool has_arguments_adaptor,
- bool has_construct_stub);
- virtual ~DeoptimizedFrameInfo();
-
- // GC support.
- void Iterate(ObjectVisitor* v);
-
- // Return the number of incoming arguments.
- int parameters_count() { return parameters_count_; }
-
- // Return the height of the expression stack.
- int expression_count() { return expression_count_; }
-
- // Get the frame function.
- JSFunction* GetFunction() {
- return function_;
- }
-
- // Check if this frame is preceded by construct stub frame. The bottom-most
- // inlined frame might still be called by an uninlined construct stub.
- bool HasConstructStub() {
- return has_construct_stub_;
- }
-
- // Get an incoming argument.
- Object* GetParameter(int index) {
- ASSERT(0 <= index && index < parameters_count());
- return parameters_[index];
- }
-
- // Get an expression from the expression stack.
- Object* GetExpression(int index) {
- ASSERT(0 <= index && index < expression_count());
- return expression_stack_[index];
- }
-
- int GetSourcePosition() {
- return source_position_;
- }
-
- private:
- // Set an incoming argument.
- void SetParameter(int index, Object* obj) {
- ASSERT(0 <= index && index < parameters_count());
- parameters_[index] = obj;
- }
-
- // Set an expression on the expression stack.
- void SetExpression(int index, Object* obj) {
- ASSERT(0 <= index && index < expression_count());
- expression_stack_[index] = obj;
- }
-
- JSFunction* function_;
- bool has_construct_stub_;
- int parameters_count_;
- int expression_count_;
- Object** parameters_;
- Object** expression_stack_;
- int source_position_;
-
- friend class Deoptimizer;
-};
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_DEOPTIMIZER_H_
diff --git a/src/3rdparty/v8/src/disasm.h b/src/3rdparty/v8/src/disasm.h
deleted file mode 100644
index f7f2d41..0000000
--- a/src/3rdparty/v8/src/disasm.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DISASM_H_
-#define V8_DISASM_H_
-
-namespace disasm {
-
-typedef unsigned char byte;
-
-// Interface and default implementation for converting addresses and
-// register-numbers to text. The default implementation is machine
-// specific.
-class NameConverter {
- public:
- virtual ~NameConverter() {}
- virtual const char* NameOfCPURegister(int reg) const;
- virtual const char* NameOfByteCPURegister(int reg) const;
- virtual const char* NameOfXMMRegister(int reg) const;
- virtual const char* NameOfAddress(byte* addr) const;
- virtual const char* NameOfConstant(byte* addr) const;
- virtual const char* NameInCode(byte* addr) const;
-
- protected:
- v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
-};
-
-
-// A generic Disassembler interface
-class Disassembler {
- public:
- // Caller deallocates converter.
- explicit Disassembler(const NameConverter& converter);
-
- virtual ~Disassembler();
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
-
- // Returns -1 if instruction does not mark the beginning of a constant pool,
- // or the number of entries in the constant pool beginning here.
- int ConstantPoolSizeAt(byte* instruction);
-
- // Write disassembly into specified file 'f' using specified NameConverter
- // (see constructor).
- static void Disassemble(FILE* f, byte* begin, byte* end);
- private:
- const NameConverter& converter_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Disassembler);
-};
-
-} // namespace disasm
-
-#endif // V8_DISASM_H_
diff --git a/src/3rdparty/v8/src/disassembler.cc b/src/3rdparty/v8/src/disassembler.cc
deleted file mode 100644
index 5d18d68..0000000
--- a/src/3rdparty/v8/src/disassembler.cc
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "disassembler.h"
-#include "macro-assembler.h"
-#include "serialize.h"
-#include "string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DISASSEMBLER
-
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
- for (byte* pc = begin; pc < end; pc++) {
- if (f == NULL) {
- PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
- reinterpret_cast<intptr_t>(pc),
- pc - begin,
- *pc);
- } else {
- fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
- reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
- }
- }
-}
-
-
-class V8NameConverter: public disasm::NameConverter {
- public:
- explicit V8NameConverter(Code* code) : code_(code) {}
- virtual const char* NameOfAddress(byte* pc) const;
- virtual const char* NameInCode(byte* addr) const;
- Code* code() const { return code_; }
- private:
- Code* code_;
-
- EmbeddedVector<char, 128> v8_buffer_;
-};
-
-
-const char* V8NameConverter::NameOfAddress(byte* pc) const {
- const char* name = Isolate::Current()->builtins()->Lookup(pc);
- if (name != NULL) {
- OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
- return v8_buffer_.start();
- }
-
- if (code_ != NULL) {
- int offs = static_cast<int>(pc - code_->instruction_start());
- // print as code offset, if it seems reasonable
- if (0 <= offs && offs < code_->instruction_size()) {
- OS::SNPrintF(v8_buffer_, "%d (%p)", offs, pc);
- return v8_buffer_.start();
- }
- }
-
- return disasm::NameConverter::NameOfAddress(pc);
-}
-
-
-const char* V8NameConverter::NameInCode(byte* addr) const {
- // The V8NameConverter is used for well known code, so we can "safely"
- // dereference pointers in generated code.
- return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
-}
-
-
-static void DumpBuffer(FILE* f, StringBuilder* out) {
- if (f == NULL) {
- PrintF("%s\n", out->Finalize());
- } else {
- fprintf(f, "%s\n", out->Finalize());
- }
- out->Reset();
-}
-
-
-
-static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
-static const int kRelocInfoPosition = 57;
-
-static int DecodeIt(Isolate* isolate,
- FILE* f,
- const V8NameConverter& converter,
- byte* begin,
- byte* end) {
- NoHandleAllocation ha(isolate);
- AssertNoAllocation no_alloc;
- ExternalReferenceEncoder ref_encoder;
- Heap* heap = HEAP;
-
- v8::internal::EmbeddedVector<char, 128> decode_buffer;
- v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
- StringBuilder out(out_buffer.start(), out_buffer.length());
- byte* pc = begin;
- disasm::Disassembler d(converter);
- RelocIterator* it = NULL;
- if (converter.code() != NULL) {
- it = new RelocIterator(converter.code());
- } else {
- // No relocation information when printing code stubs.
- }
- int constants = -1; // no constants being decoded at the start
-
- while (pc < end) {
- // First decode instruction so that we know its length.
- byte* prev_pc = pc;
- if (constants > 0) {
- OS::SNPrintF(decode_buffer,
- "%08x constant",
- *reinterpret_cast<int32_t*>(pc));
- constants--;
- pc += 4;
- } else {
- int num_const = d.ConstantPoolSizeAt(pc);
- if (num_const >= 0) {
- OS::SNPrintF(decode_buffer,
- "%08x constant pool begin",
- *reinterpret_cast<int32_t*>(pc));
- constants = num_const;
- pc += 4;
- } else if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
- it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
- // raw pointer embedded in code stream, e.g., jump table
- byte* ptr = *reinterpret_cast<byte**>(pc);
- OS::SNPrintF(decode_buffer,
- "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
- ptr,
- ptr - begin);
- pc += 4;
- } else {
- decode_buffer[0] = '\0';
- pc += d.InstructionDecode(decode_buffer, pc);
- }
- }
-
- // Collect RelocInfo for this instruction (prev_pc .. pc-1)
- List<const char*> comments(4);
- List<byte*> pcs(1);
- List<RelocInfo::Mode> rmodes(1);
- List<intptr_t> datas(1);
- if (it != NULL) {
- while (!it->done() && it->rinfo()->pc() < pc) {
- if (RelocInfo::IsComment(it->rinfo()->rmode())) {
- // For comments just collect the text.
- comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
- } else {
- // For other reloc info collect all data.
- pcs.Add(it->rinfo()->pc());
- rmodes.Add(it->rinfo()->rmode());
- datas.Add(it->rinfo()->data());
- }
- it->next();
- }
- }
-
- // Comments.
- for (int i = 0; i < comments.length(); i++) {
- out.AddFormatted(" %s", comments[i]);
- DumpBuffer(f, &out);
- }
-
- // Instruction address and instruction offset.
- out.AddFormatted("%p %4d ", prev_pc, prev_pc - begin);
-
- // Instruction.
- out.AddFormatted("%s", decode_buffer.start());
-
- // Print all the reloc info for this instruction which are not comments.
- for (int i = 0; i < pcs.length(); i++) {
- // Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
-
- // Indent the printing of the reloc info.
- if (i == 0) {
- // The first reloc info is printed after the disassembled instruction.
- out.AddPadding(' ', kRelocInfoPosition - out.position());
- } else {
- // Additional reloc infos are printed on separate lines.
- DumpBuffer(f, &out);
- out.AddPadding(' ', kRelocInfoPosition);
- }
-
- RelocInfo::Mode rmode = relocinfo.rmode();
- if (RelocInfo::IsPosition(rmode)) {
- if (RelocInfo::IsStatementPosition(rmode)) {
- out.AddFormatted(" ;; debug: statement %d", relocinfo.data());
- } else {
- out.AddFormatted(" ;; debug: position %d", relocinfo.data());
- }
- } else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- relocinfo.target_object()->ShortPrint(&accumulator);
- SmartArrayPointer<const char> obj_name = accumulator.ToCString();
- out.AddFormatted(" ;; object: %s", *obj_name);
- } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- const char* reference_name =
- ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
- out.AddFormatted(" ;; external reference (%s)", reference_name);
- } else if (RelocInfo::IsCodeTarget(rmode)) {
- out.AddFormatted(" ;; code:");
- if (rmode == RelocInfo::CONSTRUCT_CALL) {
- out.AddFormatted(" constructor,");
- }
- Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
- Code::Kind kind = code->kind();
- if (code->is_inline_cache_stub()) {
- if (rmode == RelocInfo::CODE_TARGET_CONTEXT) {
- out.AddFormatted(" contextual,");
- }
- InlineCacheState ic_state = code->ic_state();
- out.AddFormatted(" %s, %s", Code::Kind2String(kind),
- Code::ICState2String(ic_state));
- if (ic_state == MONOMORPHIC) {
- Code::StubType type = code->type();
- out.AddFormatted(", %s", Code::StubType2String(type));
- }
- if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
- out.AddFormatted(", argc = %d", code->arguments_count());
- }
- } else if (kind == Code::STUB) {
- // Reverse lookup required as the minor key cannot be retrieved
- // from the code object.
- Object* obj = heap->code_stubs()->SlowReverseLookup(code);
- if (obj != heap->undefined_value()) {
- ASSERT(obj->IsSmi());
- // Get the STUB key and extract major and minor key.
- uint32_t key = Smi::cast(obj)->value();
- uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
- out.AddFormatted(" %s, %s, ",
- Code::Kind2String(kind),
- CodeStub::MajorName(major_key, false));
- switch (major_key) {
- case CodeStub::CallFunction: {
- int argc =
- CallFunctionStub::ExtractArgcFromMinorKey(minor_key);
- out.AddFormatted("argc = %d", argc);
- break;
- }
- default:
- out.AddFormatted("minor: %d", minor_key);
- }
- }
- } else {
- out.AddFormatted(" %s", Code::Kind2String(kind));
- }
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data()));
- }
- } else if (rmode == RelocInfo::RUNTIME_ENTRY &&
- isolate->deoptimizer_data() != NULL) {
- // A runtime entry reloinfo might be a deoptimization bailout.
- Address addr = relocinfo.target_address();
- int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::LAZY);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
- } else {
- out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
- }
- } else {
- out.AddFormatted(" ;; deoptimization bailout %d", id);
- }
- } else {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
- }
- }
- DumpBuffer(f, &out);
- }
-
- // Emit comments following the last instruction (if any).
- if (it != NULL) {
- for ( ; !it->done(); it->next()) {
- if (RelocInfo::IsComment(it->rinfo()->rmode())) {
- out.AddFormatted(" %s",
- reinterpret_cast<const char*>(it->rinfo()->data()));
- DumpBuffer(f, &out);
- }
- }
- }
-
- delete it;
- return static_cast<int>(pc - begin);
-}
-
-
-int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
- V8NameConverter defaultConverter(NULL);
- return DecodeIt(isolate, f, defaultConverter, begin, end);
-}
-
-
-// Called by Code::CodePrint.
-void Disassembler::Decode(FILE* f, Code* code) {
- Isolate* isolate = code->GetIsolate();
- int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION ||
- code->kind() == Code::COMPILED_STUB)
- ? static_cast<int>(code->safepoint_table_offset())
- : code->instruction_size();
- // If there might be a stack check table, stop before reaching it.
- if (code->kind() == Code::FUNCTION) {
- decode_size =
- Min(decode_size, static_cast<int>(code->stack_check_table_offset()));
- }
-
- byte* begin = code->instruction_start();
- byte* end = begin + decode_size;
- V8NameConverter v8NameConverter(code);
- DecodeIt(isolate, f, v8NameConverter, begin, end);
-}
-
-#else // ENABLE_DISASSEMBLER
-
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
-int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
- return 0;
-}
-void Disassembler::Decode(FILE* f, Code* code) {}
-
-#endif // ENABLE_DISASSEMBLER
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/disassembler.h b/src/3rdparty/v8/src/disassembler.h
deleted file mode 100644
index 8789150..0000000
--- a/src/3rdparty/v8/src/disassembler.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DISASSEMBLER_H_
-#define V8_DISASSEMBLER_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-class Disassembler : public AllStatic {
- public:
- // Print the bytes in the interval [begin, end) into f.
- static void Dump(FILE* f, byte* begin, byte* end);
-
- // Decode instructions in the the interval [begin, end) and print the
- // code into f. Returns the number of bytes disassembled or 1 if no
- // instruction could be decoded.
- static int Decode(Isolate* isolate, FILE* f, byte* begin, byte* end);
-
- // Decode instructions in code.
- static void Decode(FILE* f, Code* code);
- private:
- // Decode instruction at pc and print disassembled instruction into f.
- // Returns the instruction length in bytes, or 1 if the instruction could
- // not be decoded. The number of characters written is written into
- // the out parameter char_count.
- static int Decode(FILE* f, byte* pc, int* char_count);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_DISASSEMBLER_H_
diff --git a/src/3rdparty/v8/src/diy-fp.cc b/src/3rdparty/v8/src/diy-fp.cc
deleted file mode 100644
index 4913877..0000000
--- a/src/3rdparty/v8/src/diy-fp.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "diy-fp.h"
-
-namespace v8 {
-namespace internal {
-
-void DiyFp::Multiply(const DiyFp& other) {
- // Simply "emulates" a 128 bit multiplication.
- // However: the resulting number only contains 64 bits. The least
- // significant 64 bits are only used for rounding the most significant 64
- // bits.
- const uint64_t kM32 = 0xFFFFFFFFu;
- uint64_t a = f_ >> 32;
- uint64_t b = f_ & kM32;
- uint64_t c = other.f_ >> 32;
- uint64_t d = other.f_ & kM32;
- uint64_t ac = a * c;
- uint64_t bc = b * c;
- uint64_t ad = a * d;
- uint64_t bd = b * d;
- uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
- // By adding 1U << 31 to tmp we round the final result.
- // Halfway cases will be round up.
- tmp += 1U << 31;
- uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
- e_ += other.e_ + 64;
- f_ = result_f;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/diy-fp.h b/src/3rdparty/v8/src/diy-fp.h
deleted file mode 100644
index 26ff1a2..0000000
--- a/src/3rdparty/v8/src/diy-fp.h
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DIY_FP_H_
-#define V8_DIY_FP_H_
-
-namespace v8 {
-namespace internal {
-
-// This "Do It Yourself Floating Point" class implements a floating-point number
-// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
-// have the most significant bit of the significand set.
-// Multiplication and Subtraction do not normalize their results.
-// DiyFp are not designed to contain special doubles (NaN and Infinity).
-class DiyFp {
- public:
- static const int kSignificandSize = 64;
-
- DiyFp() : f_(0), e_(0) {}
- DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
-
- // this = this - other.
- // The exponents of both numbers must be the same and the significand of this
- // must be bigger than the significand of other.
- // The result will not be normalized.
- void Subtract(const DiyFp& other) {
- ASSERT(e_ == other.e_);
- ASSERT(f_ >= other.f_);
- f_ -= other.f_;
- }
-
- // Returns a - b.
- // The exponents of both numbers must be the same and this must be bigger
- // than other. The result will not be normalized.
- static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
- DiyFp result = a;
- result.Subtract(b);
- return result;
- }
-
-
- // this = this * other.
- void Multiply(const DiyFp& other);
-
- // returns a * b;
- static DiyFp Times(const DiyFp& a, const DiyFp& b) {
- DiyFp result = a;
- result.Multiply(b);
- return result;
- }
-
- void Normalize() {
- ASSERT(f_ != 0);
- uint64_t f = f_;
- int e = e_;
-
- // This method is mainly called for normalizing boundaries. In general
- // boundaries need to be shifted by 10 bits. We thus optimize for this case.
- const uint64_t k10MSBits = static_cast<uint64_t>(0x3FF) << 54;
- while ((f & k10MSBits) == 0) {
- f <<= 10;
- e -= 10;
- }
- while ((f & kUint64MSB) == 0) {
- f <<= 1;
- e--;
- }
- f_ = f;
- e_ = e;
- }
-
- static DiyFp Normalize(const DiyFp& a) {
- DiyFp result = a;
- result.Normalize();
- return result;
- }
-
- uint64_t f() const { return f_; }
- int e() const { return e_; }
-
- void set_f(uint64_t new_value) { f_ = new_value; }
- void set_e(int new_value) { e_ = new_value; }
-
- private:
- static const uint64_t kUint64MSB = static_cast<uint64_t>(1) << 63;
-
- uint64_t f_;
- int e_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_DIY_FP_H_
diff --git a/src/3rdparty/v8/src/double.h b/src/3rdparty/v8/src/double.h
deleted file mode 100644
index fcf6906..0000000
--- a/src/3rdparty/v8/src/double.h
+++ /dev/null
@@ -1,232 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DOUBLE_H_
-#define V8_DOUBLE_H_
-
-#include "diy-fp.h"
-
-namespace v8 {
-namespace internal {
-
-// We assume that doubles and uint64_t have the same endianness.
-inline uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
-inline double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
-
-// Helper functions for doubles.
-class Double {
- public:
- static const uint64_t kSignMask = V8_2PART_UINT64_C(0x80000000, 00000000);
- static const uint64_t kExponentMask = V8_2PART_UINT64_C(0x7FF00000, 00000000);
- static const uint64_t kSignificandMask =
- V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
- static const uint64_t kHiddenBit = V8_2PART_UINT64_C(0x00100000, 00000000);
- static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit.
- static const int kSignificandSize = 53;
-
- Double() : d64_(0) {}
- explicit Double(double d) : d64_(double_to_uint64(d)) {}
- explicit Double(uint64_t d64) : d64_(d64) {}
- explicit Double(DiyFp diy_fp)
- : d64_(DiyFpToUint64(diy_fp)) {}
-
- // The value encoded by this Double must be greater or equal to +0.0.
- // It must not be special (infinity, or NaN).
- DiyFp AsDiyFp() const {
- ASSERT(Sign() > 0);
- ASSERT(!IsSpecial());
- return DiyFp(Significand(), Exponent());
- }
-
- // The value encoded by this Double must be strictly greater than 0.
- DiyFp AsNormalizedDiyFp() const {
- ASSERT(value() > 0.0);
- uint64_t f = Significand();
- int e = Exponent();
-
- // The current double could be a denormal.
- while ((f & kHiddenBit) == 0) {
- f <<= 1;
- e--;
- }
- // Do the final shifts in one go.
- f <<= DiyFp::kSignificandSize - kSignificandSize;
- e -= DiyFp::kSignificandSize - kSignificandSize;
- return DiyFp(f, e);
- }
-
- // Returns the double's bit as uint64.
- uint64_t AsUint64() const {
- return d64_;
- }
-
- // Returns the next greater double. Returns +infinity on input +infinity.
- double NextDouble() const {
- if (d64_ == kInfinity) return Double(kInfinity).value();
- if (Sign() < 0 && Significand() == 0) {
- // -0.0
- return 0.0;
- }
- if (Sign() < 0) {
- return Double(d64_ - 1).value();
- } else {
- return Double(d64_ + 1).value();
- }
- }
-
- int Exponent() const {
- if (IsDenormal()) return kDenormalExponent;
-
- uint64_t d64 = AsUint64();
- int biased_e =
- static_cast<int>((d64 & kExponentMask) >> kPhysicalSignificandSize);
- return biased_e - kExponentBias;
- }
-
- uint64_t Significand() const {
- uint64_t d64 = AsUint64();
- uint64_t significand = d64 & kSignificandMask;
- if (!IsDenormal()) {
- return significand + kHiddenBit;
- } else {
- return significand;
- }
- }
-
- // Returns true if the double is a denormal.
- bool IsDenormal() const {
- uint64_t d64 = AsUint64();
- return (d64 & kExponentMask) == 0;
- }
-
- // We consider denormals not to be special.
- // Hence only Infinity and NaN are special.
- bool IsSpecial() const {
- uint64_t d64 = AsUint64();
- return (d64 & kExponentMask) == kExponentMask;
- }
-
- bool IsInfinite() const {
- uint64_t d64 = AsUint64();
- return ((d64 & kExponentMask) == kExponentMask) &&
- ((d64 & kSignificandMask) == 0);
- }
-
- int Sign() const {
- uint64_t d64 = AsUint64();
- return (d64 & kSignMask) == 0? 1: -1;
- }
-
- // Precondition: the value encoded by this Double must be greater or equal
- // than +0.0.
- DiyFp UpperBoundary() const {
- ASSERT(Sign() > 0);
- return DiyFp(Significand() * 2 + 1, Exponent() - 1);
- }
-
- // Returns the two boundaries of this.
- // The bigger boundary (m_plus) is normalized. The lower boundary has the same
- // exponent as m_plus.
- // Precondition: the value encoded by this Double must be greater than 0.
- void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
- ASSERT(value() > 0.0);
- DiyFp v = this->AsDiyFp();
- bool significand_is_zero = (v.f() == kHiddenBit);
- DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
- DiyFp m_minus;
- if (significand_is_zero && v.e() != kDenormalExponent) {
- // The boundary is closer. Think of v = 1000e10 and v- = 9999e9.
- // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
- // at a distance of 1e8.
- // The only exception is for the smallest normal: the largest denormal is
- // at the same distance as its successor.
- // Note: denormals have the same exponent as the smallest normals.
- m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
- } else {
- m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
- }
- m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
- m_minus.set_e(m_plus.e());
- *out_m_plus = m_plus;
- *out_m_minus = m_minus;
- }
-
- double value() const { return uint64_to_double(d64_); }
-
- // Returns the significand size for a given order of magnitude.
- // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude.
- // This function returns the number of significant binary digits v will have
- // once its encoded into a double. In almost all cases this is equal to
- // kSignificandSize. The only exception are denormals. They start with leading
- // zeroes and their effective significand-size is hence smaller.
- static int SignificandSizeForOrderOfMagnitude(int order) {
- if (order >= (kDenormalExponent + kSignificandSize)) {
- return kSignificandSize;
- }
- if (order <= kDenormalExponent) return 0;
- return order - kDenormalExponent;
- }
-
- private:
- static const int kExponentBias = 0x3FF + kPhysicalSignificandSize;
- static const int kDenormalExponent = -kExponentBias + 1;
- static const int kMaxExponent = 0x7FF - kExponentBias;
- static const uint64_t kInfinity = V8_2PART_UINT64_C(0x7FF00000, 00000000);
-
- const uint64_t d64_;
-
- static uint64_t DiyFpToUint64(DiyFp diy_fp) {
- uint64_t significand = diy_fp.f();
- int exponent = diy_fp.e();
- while (significand > kHiddenBit + kSignificandMask) {
- significand >>= 1;
- exponent++;
- }
- if (exponent >= kMaxExponent) {
- return kInfinity;
- }
- if (exponent < kDenormalExponent) {
- return 0;
- }
- while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) {
- significand <<= 1;
- exponent--;
- }
- uint64_t biased_exponent;
- if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) {
- biased_exponent = 0;
- } else {
- biased_exponent = static_cast<uint64_t>(exponent + kExponentBias);
- }
- return (significand & kSignificandMask) |
- (biased_exponent << kPhysicalSignificandSize);
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_DOUBLE_H_
diff --git a/src/3rdparty/v8/src/dtoa.cc b/src/3rdparty/v8/src/dtoa.cc
deleted file mode 100644
index 00233a8..0000000
--- a/src/3rdparty/v8/src/dtoa.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <math.h>
-
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "utils.h"
-
-#include "dtoa.h"
-
-#include "bignum-dtoa.h"
-#include "double.h"
-#include "fast-dtoa.h"
-#include "fixed-dtoa.h"
-
-namespace v8 {
-namespace internal {
-
-static BignumDtoaMode DtoaToBignumDtoaMode(DtoaMode dtoa_mode) {
- switch (dtoa_mode) {
- case DTOA_SHORTEST: return BIGNUM_DTOA_SHORTEST;
- case DTOA_FIXED: return BIGNUM_DTOA_FIXED;
- case DTOA_PRECISION: return BIGNUM_DTOA_PRECISION;
- default:
- UNREACHABLE();
- return BIGNUM_DTOA_SHORTEST; // To silence compiler.
- }
-}
-
-
-void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
- Vector<char> buffer, int* sign, int* length, int* point) {
- ASSERT(!Double(v).IsSpecial());
- ASSERT(mode == DTOA_SHORTEST || requested_digits >= 0);
-
- if (Double(v).Sign() < 0) {
- *sign = 1;
- v = -v;
- } else {
- *sign = 0;
- }
-
- if (v == 0) {
- buffer[0] = '0';
- buffer[1] = '\0';
- *length = 1;
- *point = 1;
- return;
- }
-
- if (mode == DTOA_PRECISION && requested_digits == 0) {
- buffer[0] = '\0';
- *length = 0;
- return;
- }
-
- bool fast_worked;
- switch (mode) {
- case DTOA_SHORTEST:
- fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, length, point);
- break;
- case DTOA_FIXED:
- fast_worked = FastFixedDtoa(v, requested_digits, buffer, length, point);
- break;
- case DTOA_PRECISION:
- fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
- buffer, length, point);
- break;
- default:
- UNREACHABLE();
- fast_worked = false;
- }
- if (fast_worked) return;
-
- // If the fast dtoa didn't succeed use the slower bignum version.
- BignumDtoaMode bignum_mode = DtoaToBignumDtoaMode(mode);
- BignumDtoa(v, bignum_mode, requested_digits, buffer, length, point);
- buffer[*length] = '\0';
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/dtoa.h b/src/3rdparty/v8/src/dtoa.h
deleted file mode 100644
index 948a079..0000000
--- a/src/3rdparty/v8/src/dtoa.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DTOA_H_
-#define V8_DTOA_H_
-
-namespace v8 {
-namespace internal {
-
-enum DtoaMode {
- // Return the shortest correct representation.
- // For example the output of 0.299999999999999988897 is (the less accurate but
- // correct) 0.3.
- DTOA_SHORTEST,
- // Return a fixed number of digits after the decimal point.
- // For instance fixed(0.1, 4) becomes 0.1000
- // If the input number is big, the output will be big.
- DTOA_FIXED,
- // Return a fixed number of digits, no matter what the exponent is.
- DTOA_PRECISION
-};
-
-// The maximal length of digits a double can have in base 10.
-// Note that DoubleToAscii null-terminates its input. So the given buffer should
-// be at least kBase10MaximalLength + 1 characters long.
-const int kBase10MaximalLength = 17;
-
-// Converts the given double 'v' to ASCII.
-// The result should be interpreted as buffer * 10^(point-length).
-//
-// The output depends on the given mode:
-// - SHORTEST: produce the least amount of digits for which the internal
-// identity requirement is still satisfied. If the digits are printed
-// (together with the correct exponent) then reading this number will give
-// 'v' again. The buffer will choose the representation that is closest to
-// 'v'. If there are two at the same distance, than the one farther away
-// from 0 is chosen (halfway cases - ending with 5 - are rounded up).
-// In this mode the 'requested_digits' parameter is ignored.
-// - FIXED: produces digits necessary to print a given number with
-// 'requested_digits' digits after the decimal point. The produced digits
-// might be too short in which case the caller has to fill the gaps with '0's.
-// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
-// Halfway cases are rounded towards +/-Infinity (away from 0). The call
-// toFixed(0.15, 2) thus returns buffer="2", point=0.
-// The returned buffer may contain digits that would be truncated from the
-// shortest representation of the input.
-// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
-// Even though the length of produced digits usually equals
-// 'requested_digits', the function is allowed to return fewer digits, in
-// which case the caller has to fill the missing digits with '0's.
-// Halfway cases are again rounded away from 0.
-// 'DoubleToAscii' expects the given buffer to be big enough to hold all digits
-// and a terminating null-character. In SHORTEST-mode it expects a buffer of
-// at least kBase10MaximalLength + 1. Otherwise, the size of the output is
-// limited to requested_digits digits plus the null terminator.
-void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
- Vector<char> buffer, int* sign, int* length, int* point);
-
-} } // namespace v8::internal
-
-#endif // V8_DTOA_H_
diff --git a/src/3rdparty/v8/src/elements-kind.cc b/src/3rdparty/v8/src/elements-kind.cc
deleted file mode 100644
index 19fac44..0000000
--- a/src/3rdparty/v8/src/elements-kind.cc
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "elements-kind.h"
-
-#include "api.h"
-#include "elements.h"
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-const char* ElementsKindToString(ElementsKind kind) {
- ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
- return accessor->name();
-}
-
-
-void PrintElementsKind(FILE* out, ElementsKind kind) {
- FPrintF(out, "%s", ElementsKindToString(kind));
-}
-
-
-ElementsKind GetInitialFastElementsKind() {
- if (FLAG_packed_arrays) {
- return FAST_SMI_ELEMENTS;
- } else {
- return FAST_HOLEY_SMI_ELEMENTS;
- }
-}
-
-
-struct InitializeFastElementsKindSequence {
- static void Construct(
- ElementsKind** fast_elements_kind_sequence_ptr) {
- ElementsKind* fast_elements_kind_sequence =
- new ElementsKind[kFastElementsKindCount];
- *fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
- STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
- fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS;
- fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS;
- fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS;
- fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
- fast_elements_kind_sequence[4] = FAST_ELEMENTS;
- fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
- }
-};
-
-
-static LazyInstance<ElementsKind*,
- InitializeFastElementsKindSequence>::type
- fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
-
-
-ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
- ASSERT(sequence_number >= 0 &&
- sequence_number < kFastElementsKindCount);
- return fast_elements_kind_sequence.Get()[sequence_number];
-}
-
-int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
- for (int i = 0; i < kFastElementsKindCount; ++i) {
- if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
- return i;
- }
- }
- UNREACHABLE();
- return 0;
-}
-
-
-ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
- bool allow_only_packed) {
- ASSERT(IsFastElementsKind(elements_kind));
- ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
- while (true) {
- int index =
- GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
- elements_kind = GetFastElementsKindFromSequenceIndex(index);
- if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
- return elements_kind;
- }
- }
- UNREACHABLE();
- return TERMINAL_FAST_ELEMENTS_KIND;
-}
-
-
-bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
- ElementsKind to_kind) {
- switch (from_kind) {
- case FAST_SMI_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS;
- case FAST_HOLEY_SMI_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS &&
- to_kind != FAST_HOLEY_SMI_ELEMENTS;
- case FAST_DOUBLE_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS &&
- to_kind != FAST_HOLEY_SMI_ELEMENTS &&
- to_kind != FAST_DOUBLE_ELEMENTS;
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return to_kind == FAST_ELEMENTS ||
- to_kind == FAST_HOLEY_ELEMENTS;
- case FAST_ELEMENTS:
- return to_kind == FAST_HOLEY_ELEMENTS;
- case FAST_HOLEY_ELEMENTS:
- return false;
- default:
- return false;
- }
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/elements-kind.h b/src/3rdparty/v8/src/elements-kind.h
deleted file mode 100644
index cb3bb9c..0000000
--- a/src/3rdparty/v8/src/elements-kind.h
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ELEMENTS_KIND_H_
-#define V8_ELEMENTS_KIND_H_
-
-#include "v8checks.h"
-
-namespace v8 {
-namespace internal {
-
-enum ElementsKind {
- // The "fast" kind for elements that only contain SMI values. Must be first
- // to make it possible to efficiently check maps for this kind.
- FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
-
- // The "fast" kind for tagged values. Must be second to make it possible to
- // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
- // together at once.
- FAST_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
-
- // The "fast" kind for unwrapped, non-tagged double values.
- FAST_DOUBLE_ELEMENTS,
- FAST_HOLEY_DOUBLE_ELEMENTS,
-
- // The "slow" kind.
- DICTIONARY_ELEMENTS,
- NON_STRICT_ARGUMENTS_ELEMENTS,
- // The "fast" kind for external arrays
- EXTERNAL_BYTE_ELEMENTS,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
- EXTERNAL_SHORT_ELEMENTS,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
- EXTERNAL_INT_ELEMENTS,
- EXTERNAL_UNSIGNED_INT_ELEMENTS,
- EXTERNAL_FLOAT_ELEMENTS,
- EXTERNAL_DOUBLE_ELEMENTS,
- EXTERNAL_PIXEL_ELEMENTS,
-
- // Derived constants from ElementsKind
- FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
- LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
- FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
- LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
- FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
- LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
- TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
-};
-
-const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
-const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
- FIRST_FAST_ELEMENTS_KIND + 1;
-
-const char* ElementsKindToString(ElementsKind kind);
-void PrintElementsKind(FILE* out, ElementsKind kind);
-
-ElementsKind GetInitialFastElementsKind();
-
-ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
-
-int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
-
-
-inline bool IsDictionaryElementsKind(ElementsKind kind) {
- return kind == DICTIONARY_ELEMENTS;
-}
-
-
-inline bool IsExternalArrayElementsKind(ElementsKind kind) {
- return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
-}
-
-
-inline bool IsFastElementsKind(ElementsKind kind) {
- ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
- return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
-}
-
-
-inline bool IsFastDoubleElementsKind(ElementsKind kind) {
- return kind == FAST_DOUBLE_ELEMENTS ||
- kind == FAST_HOLEY_DOUBLE_ELEMENTS;
-}
-
-
-inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
- return IsFastDoubleElementsKind(kind) ||
- kind == EXTERNAL_DOUBLE_ELEMENTS ||
- kind == EXTERNAL_FLOAT_ELEMENTS;
-}
-
-
-inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS ||
- kind == FAST_HOLEY_SMI_ELEMENTS ||
- kind == FAST_ELEMENTS ||
- kind == FAST_HOLEY_ELEMENTS;
-}
-
-
-inline bool IsFastSmiElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS ||
- kind == FAST_HOLEY_SMI_ELEMENTS;
-}
-
-
-inline bool IsFastObjectElementsKind(ElementsKind kind) {
- return kind == FAST_ELEMENTS ||
- kind == FAST_HOLEY_ELEMENTS;
-}
-
-
-inline bool IsFastHoleyElementsKind(ElementsKind kind) {
- return kind == FAST_HOLEY_SMI_ELEMENTS ||
- kind == FAST_HOLEY_DOUBLE_ELEMENTS ||
- kind == FAST_HOLEY_ELEMENTS;
-}
-
-
-inline bool IsHoleyElementsKind(ElementsKind kind) {
- return IsFastHoleyElementsKind(kind) ||
- kind == DICTIONARY_ELEMENTS;
-}
-
-
-inline bool IsFastPackedElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS ||
- kind == FAST_DOUBLE_ELEMENTS ||
- kind == FAST_ELEMENTS;
-}
-
-
-inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
- if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) {
- return FAST_SMI_ELEMENTS;
- }
- if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
- return FAST_DOUBLE_ELEMENTS;
- }
- if (holey_kind == FAST_HOLEY_ELEMENTS) {
- return FAST_ELEMENTS;
- }
- return holey_kind;
-}
-
-
-inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
- if (packed_kind == FAST_SMI_ELEMENTS) {
- return FAST_HOLEY_SMI_ELEMENTS;
- }
- if (packed_kind == FAST_DOUBLE_ELEMENTS) {
- return FAST_HOLEY_DOUBLE_ELEMENTS;
- }
- if (packed_kind == FAST_ELEMENTS) {
- return FAST_HOLEY_ELEMENTS;
- }
- return packed_kind;
-}
-
-
-inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
- ASSERT(IsFastSmiElementsKind(from_kind));
- return (from_kind == FAST_SMI_ELEMENTS)
- ? FAST_ELEMENTS
- : FAST_HOLEY_ELEMENTS;
-}
-
-
-inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
- ElementsKind to_kind) {
- return (GetHoleyElementsKind(from_kind) == to_kind) ||
- (IsFastSmiElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind));
-}
-
-
-bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
- ElementsKind to_kind);
-
-
-inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
- return IsFastElementsKind(from_kind) &&
- from_kind != TERMINAL_FAST_ELEMENTS_KIND;
-}
-
-
-ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
- bool allow_only_packed);
-
-
-inline bool CanTransitionToMoreGeneralFastElementsKind(
- ElementsKind elements_kind,
- bool allow_only_packed) {
- return IsFastElementsKind(elements_kind) &&
- (elements_kind != TERMINAL_FAST_ELEMENTS_KIND &&
- (!allow_only_packed || elements_kind != FAST_ELEMENTS));
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ELEMENTS_KIND_H_
diff --git a/src/3rdparty/v8/src/elements.cc b/src/3rdparty/v8/src/elements.cc
deleted file mode 100644
index 9deef60..0000000
--- a/src/3rdparty/v8/src/elements.cc
+++ /dev/null
@@ -1,2073 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arguments.h"
-#include "objects.h"
-#include "elements.h"
-#include "utils.h"
-#include "v8conversions.h"
-
-// Each concrete ElementsAccessor can handle exactly one ElementsKind,
-// several abstract ElementsAccessor classes are used to allow sharing
-// common code.
-//
-// Inheritance hierarchy:
-// - ElementsAccessorBase (abstract)
-// - FastElementsAccessor (abstract)
-// - FastSmiOrObjectElementsAccessor
-// - FastPackedSmiElementsAccessor
-// - FastHoleySmiElementsAccessor
-// - FastPackedObjectElementsAccessor
-// - FastHoleyObjectElementsAccessor
-// - FastDoubleElementsAccessor
-// - FastPackedDoubleElementsAccessor
-// - FastHoleyDoubleElementsAccessor
-// - ExternalElementsAccessor (abstract)
-// - ExternalByteElementsAccessor
-// - ExternalUnsignedByteElementsAccessor
-// - ExternalShortElementsAccessor
-// - ExternalUnsignedShortElementsAccessor
-// - ExternalIntElementsAccessor
-// - ExternalUnsignedIntElementsAccessor
-// - ExternalFloatElementsAccessor
-// - ExternalDoubleElementsAccessor
-// - PixelElementsAccessor
-// - DictionaryElementsAccessor
-// - NonStrictArgumentsElementsAccessor
-
-
-namespace v8 {
-namespace internal {
-
-
-static const int kPackedSizeNotKnown = -1;
-
-
-// First argument in list is the accessor class, the second argument is the
-// accessor ElementsKind, and the third is the backing store class. Use the
-// fast element handler for smi-only arrays. The implementation is currently
-// identical. Note that the order must match that of the ElementsKind enum for
-// the |accessor_array[]| below to work.
-#define ELEMENTS_LIST(V) \
- V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \
- V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \
- FixedArray) \
- V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
- V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
- V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \
- FixedDoubleArray) \
- V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
- FixedDoubleArray) \
- V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
- SeededNumberDictionary) \
- V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
- FixedArray) \
- V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS, \
- ExternalByteArray) \
- V(ExternalUnsignedByteElementsAccessor, \
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS, ExternalUnsignedByteArray) \
- V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS, \
- ExternalShortArray) \
- V(ExternalUnsignedShortElementsAccessor, \
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS, ExternalUnsignedShortArray) \
- V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS, \
- ExternalIntArray) \
- V(ExternalUnsignedIntElementsAccessor, \
- EXTERNAL_UNSIGNED_INT_ELEMENTS, ExternalUnsignedIntArray) \
- V(ExternalFloatElementsAccessor, \
- EXTERNAL_FLOAT_ELEMENTS, ExternalFloatArray) \
- V(ExternalDoubleElementsAccessor, \
- EXTERNAL_DOUBLE_ELEMENTS, ExternalDoubleArray) \
- V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS, ExternalPixelArray)
-
-
-template<ElementsKind Kind> class ElementsKindTraits {
- public:
- typedef FixedArrayBase BackingStore;
-};
-
-#define ELEMENTS_TRAITS(Class, KindParam, Store) \
-template<> class ElementsKindTraits<KindParam> { \
- public: \
- static const ElementsKind Kind = KindParam; \
- typedef Store BackingStore; \
-};
-ELEMENTS_LIST(ELEMENTS_TRAITS)
-#undef ELEMENTS_TRAITS
-
-
-ElementsAccessor** ElementsAccessor::elements_accessors_;
-
-
-static bool HasKey(FixedArray* array, Object* key) {
- int len0 = array->length();
- for (int i = 0; i < len0; i++) {
- Object* element = array->get(i);
- if (element->IsSmi() && element == key) return true;
- if (element->IsString() &&
- key->IsString() && String::cast(element)->Equals(String::cast(key))) {
- return true;
- }
- }
- return false;
-}
-
-
-static Failure* ThrowArrayLengthRangeError(Heap* heap) {
- HandleScope scope(heap->isolate());
- return heap->isolate()->Throw(
- *heap->isolate()->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
-}
-
-
-static void CopyObjectToObjectElements(FixedArrayBase* from_base,
- ElementsKind from_kind,
- uint32_t from_start,
- FixedArrayBase* to_base,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
- ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
- AssertNoAllocation no_allocation;
- int copy_size = raw_copy_size;
- if (raw_copy_size < 0) {
- ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from_base->length() - from_start,
- to_base->length() - to_start);
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- int start = to_start + copy_size;
- int length = to_base->length() - start;
- if (length > 0) {
- Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- heap->the_hole_value(), length);
- }
- }
- }
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
- ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
- ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
- Address to_address = to->address() + FixedArray::kHeaderSize;
- Address from_address = from->address() + FixedArray::kHeaderSize;
- CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
- reinterpret_cast<Object**>(from_address) + from_start,
- copy_size);
- if (IsFastObjectElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Heap* heap = from->GetHeap();
- if (!heap->InNewSpace(to)) {
- heap->RecordWrites(to->address(),
- to->OffsetOfElementAt(to_start),
- copy_size);
- }
- heap->incremental_marking()->RecordWrites(to);
- }
-}
-
-
-static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
- AssertNoAllocation no_allocation;
- int copy_size = raw_copy_size;
- Heap* heap = from->GetHeap();
- if (raw_copy_size < 0) {
- ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->max_number_key() + 1 - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- int start = to_start + copy_size;
- int length = to_base->length() - start;
- if (length > 0) {
- Heap* heap = from->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- heap->the_hole_value(), length);
- }
- }
- }
- ASSERT(to_base != from_base);
- ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
- if (copy_size == 0) return;
- FixedArray* to = FixedArray::cast(to_base);
- uint32_t to_length = to->length();
- if (to_start + copy_size > to_length) {
- copy_size = to_length - to_start;
- }
- for (int i = 0; i < copy_size; i++) {
- int entry = from->FindEntry(i + from_start);
- if (entry != SeededNumberDictionary::kNotFound) {
- Object* value = from->ValueAt(entry);
- ASSERT(!value->IsTheHole());
- to->set(i + to_start, value, SKIP_WRITE_BARRIER);
- } else {
- to->set_the_hole(i + to_start);
- }
- }
- if (IsFastObjectElementsKind(to_kind)) {
- if (!heap->InNewSpace(to)) {
- heap->RecordWrites(to->address(),
- to->OffsetOfElementAt(to_start),
- copy_size);
- }
- heap->incremental_marking()->RecordWrites(to);
- }
-}
-
-
-MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
- FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
- ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
- int copy_size = raw_copy_size;
- if (raw_copy_size < 0) {
- ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from_base->length() - from_start,
- to_base->length() - to_start);
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- // Also initialize the area that will be copied over since HeapNumber
- // allocation below can cause an incremental marking step, requiring all
- // existing heap objects to be propertly initialized.
- int start = to_start;
- int length = to_base->length() - start;
- if (length > 0) {
- Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- heap->the_hole_value(), length);
- }
- }
- }
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return from_base;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
- for (int i = 0; i < copy_size; ++i) {
- if (IsFastSmiElementsKind(to_kind)) {
- UNIMPLEMENTED();
- return Failure::Exception();
- } else {
- MaybeObject* maybe_value = from->get(i + from_start);
- Object* value;
- ASSERT(IsFastObjectElementsKind(to_kind));
- // Because Double -> Object elements transitions allocate HeapObjects
- // iteratively, the allocate must succeed within a single GC cycle,
- // otherwise the retry after the GC will also fail. In order to ensure
- // that no GC is triggered, allocate HeapNumbers from old space if they
- // can't be taken from new space.
- if (!maybe_value->ToObject(&value)) {
- ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
- Heap* heap = from->GetHeap();
- MaybeObject* maybe_value_object =
- heap->AllocateHeapNumber(from->get_scalar(i + from_start),
- TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
- }
- to->set(i + to_start, value, UPDATE_WRITE_BARRIER);
- }
- }
- return to;
-}
-
-
-static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- uint32_t to_start,
- int raw_copy_size) {
- int copy_size = raw_copy_size;
- if (raw_copy_size < 0) {
- ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from_base->length() - from_start,
- to_base->length() - to_start);
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
- }
- }
- }
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
- Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
- to_address += kDoubleSize * to_start;
- from_address += kDoubleSize * from_start;
- int words_per_double = (kDoubleSize / kPointerSize);
- CopyWords(reinterpret_cast<Object**>(to_address),
- reinterpret_cast<Object**>(from_address),
- words_per_double * copy_size);
-}
-
-
-static void CopySmiToDoubleElements(FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- uint32_t to_start,
- int raw_copy_size) {
- int copy_size = raw_copy_size;
- if (raw_copy_size < 0) {
- ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from_base->length() - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
- }
- }
- }
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
- for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
- from_start < from_end; from_start++, to_start++) {
- Object* hole_or_smi = from->get(from_start);
- if (hole_or_smi == the_hole) {
- to->set_the_hole(to_start);
- } else {
- to->set(to_start, Smi::cast(hole_or_smi)->value());
- }
- }
-}
-
-
-static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- uint32_t to_start,
- int packed_size,
- int raw_copy_size) {
- int copy_size = raw_copy_size;
- uint32_t to_end;
- if (raw_copy_size < 0) {
- ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = packed_size - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- to_end = to_base->length();
- for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
- }
- } else {
- to_end = to_start + static_cast<uint32_t>(copy_size);
- }
- } else {
- to_end = to_start + static_cast<uint32_t>(copy_size);
- }
- ASSERT(static_cast<int>(to_end) <= to_base->length());
- ASSERT(packed_size >= 0 && packed_size <= copy_size);
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
- from_start < from_end; from_start++, to_start++) {
- Object* smi = from->get(from_start);
- ASSERT(!smi->IsTheHole());
- to->set(to_start, Smi::cast(smi)->value());
- }
-}
-
-
-static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- uint32_t to_start,
- int raw_copy_size) {
- int copy_size = raw_copy_size;
- if (raw_copy_size < 0) {
- ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from_base->length() - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
- }
- }
- }
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
- for (uint32_t from_end = from_start + copy_size;
- from_start < from_end; from_start++, to_start++) {
- Object* hole_or_object = from->get(from_start);
- if (hole_or_object == the_hole) {
- to->set_the_hole(to_start);
- } else {
- to->set(to_start, hole_or_object->Number());
- }
- }
-}
-
-
-static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- uint32_t to_start,
- int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
- int copy_size = raw_copy_size;
- if (copy_size < 0) {
- ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
- copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->max_number_key() + 1 - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
- }
- }
- }
- if (copy_size == 0) return;
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- uint32_t to_length = to->length();
- if (to_start + copy_size > to_length) {
- copy_size = to_length - to_start;
- }
- for (int i = 0; i < copy_size; i++) {
- int entry = from->FindEntry(i + from_start);
- if (entry != SeededNumberDictionary::kNotFound) {
- to->set(i + to_start, from->ValueAt(entry)->Number());
- } else {
- to->set_the_hole(i + to_start);
- }
- }
-}
-
-
-static void TraceTopFrame(Isolate* isolate) {
- StackFrameIterator it(isolate);
- if (it.done()) {
- PrintF("unknown location (no JavaScript frames present)");
- return;
- }
- StackFrame* raw_frame = it.frame();
- if (raw_frame->is_internal()) {
- Isolate* isolate = Isolate::Current();
- Code* apply_builtin = isolate->builtins()->builtin(
- Builtins::kFunctionApply);
- if (raw_frame->unchecked_code() == apply_builtin) {
- PrintF("apply from ");
- it.Advance();
- raw_frame = it.frame();
- }
- }
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
-}
-
-
-void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
- bool allow_appending) {
- Object* raw_length = NULL;
- const char* elements_type = "array";
- if (obj->IsJSArray()) {
- JSArray* array = JSArray::cast(obj);
- raw_length = array->length();
- } else {
- raw_length = Smi::FromInt(obj->elements()->length());
- elements_type = "object";
- }
-
- if (raw_length->IsNumber()) {
- double n = raw_length->Number();
- if (FastI2D(FastD2UI(n)) == n) {
- int32_t int32_length = DoubleToInt32(n);
- uint32_t compare_length = static_cast<uint32_t>(int32_length);
- if (allow_appending) compare_length++;
- if (key >= compare_length) {
- PrintF("[OOB %s %s (%s length = %d, element accessed = %d) in ",
- elements_type, op, elements_type,
- static_cast<int>(int32_length),
- static_cast<int>(key));
- TraceTopFrame(obj->GetIsolate());
- PrintF("]\n");
- }
- } else {
- PrintF("[%s elements length not integer value in ", elements_type);
- TraceTopFrame(obj->GetIsolate());
- PrintF("]\n");
- }
- } else {
- PrintF("[%s elements length not a number in ", elements_type);
- TraceTopFrame(obj->GetIsolate());
- PrintF("]\n");
- }
-}
-
-
-// Base class for element handler implementations. Contains the
-// the common logic for objects with different ElementsKinds.
-// Subclasses must specialize method for which the element
-// implementation differs from the base class implementation.
-//
-// This class is intended to be used in the following way:
-//
-// class SomeElementsAccessor :
-// public ElementsAccessorBase<SomeElementsAccessor,
-// BackingStoreClass> {
-// ...
-// }
-//
-// This is an example of the Curiously Recurring Template Pattern (see
-// http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern). We use
-// CRTP to guarantee aggressive compile time optimizations (i.e. inlining and
-// specialization of SomeElementsAccessor methods).
-template <typename ElementsAccessorSubclass,
- typename ElementsTraitsParam>
-class ElementsAccessorBase : public ElementsAccessor {
- protected:
- explicit ElementsAccessorBase(const char* name)
- : ElementsAccessor(name) { }
-
- typedef ElementsTraitsParam ElementsTraits;
- typedef typename ElementsTraitsParam::BackingStore BackingStore;
-
- virtual ElementsKind kind() const { return ElementsTraits::Kind; }
-
- static void ValidateContents(JSObject* holder, int length) {
- }
-
- static void ValidateImpl(JSObject* holder) {
- FixedArrayBase* fixed_array_base = holder->elements();
- // When objects are first allocated, its elements are Failures.
- if (fixed_array_base->IsFailure()) return;
- if (!fixed_array_base->IsHeapObject()) return;
- Map* map = fixed_array_base->map();
- // Arrays that have been shifted in place can't be verified.
- Heap* heap = holder->GetHeap();
- if (map == heap->one_pointer_filler_map() ||
- map == heap->two_pointer_filler_map() ||
- map == heap->free_space_map()) {
- return;
- }
- int length = 0;
- if (holder->IsJSArray()) {
- Object* length_obj = JSArray::cast(holder)->length();
- if (length_obj->IsSmi()) {
- length = Smi::cast(length_obj)->value();
- }
- } else {
- length = fixed_array_base->length();
- }
- ElementsAccessorSubclass::ValidateContents(holder, length);
- }
-
- virtual void Validate(JSObject* holder) {
- ElementsAccessorSubclass::ValidateImpl(holder);
- }
-
- static bool HasElementImpl(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return ElementsAccessorSubclass::GetAttributesImpl(
- receiver, holder, key, backing_store) != ABSENT;
- }
-
- virtual bool HasElement(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
- return ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, backing_store);
- }
-
- MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
-
- if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
- FLAG_trace_js_array_abuse) {
- CheckArrayAbuse(holder, "elements read", key);
- }
-
- if (IsExternalArrayElementsKind(ElementsTraits::Kind) &&
- FLAG_trace_external_array_abuse) {
- CheckArrayAbuse(holder, "external elements read", key);
- }
-
- return ElementsAccessorSubclass::GetImpl(
- receiver, holder, key, backing_store);
- }
-
- MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
- ? BackingStore::cast(backing_store)->get(key)
- : backing_store->GetHeap()->the_hole_value();
- }
-
- MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
- return ElementsAccessorSubclass::GetAttributesImpl(
- receiver, holder, key, backing_store);
- }
-
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
- return ABSENT;
- }
- return BackingStore::cast(backing_store)->is_the_hole(key) ? ABSENT : NONE;
- }
-
- MUST_USE_RESULT virtual PropertyType GetType(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
- return ElementsAccessorSubclass::GetTypeImpl(
- receiver, holder, key, backing_store);
- }
-
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
- return NONEXISTENT;
- }
- return BackingStore::cast(backing_store)->is_the_hole(key)
- ? NONEXISTENT : FIELD;
- }
-
- MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
- return ElementsAccessorSubclass::GetAccessorPairImpl(
- receiver, holder, key, backing_store);
- }
-
- MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return NULL;
- }
-
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
- Object* length) {
- return ElementsAccessorSubclass::SetLengthImpl(
- array, length, array->elements());
- }
-
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store);
-
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
- JSArray* array,
- int capacity,
- int length) {
- return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
- array,
- capacity,
- length);
- }
-
- MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength(
- JSObject* obj,
- int capacity,
- int length) {
- UNIMPLEMENTED();
- return obj;
- }
-
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
-
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
- UNREACHABLE();
- return NULL;
- }
-
- MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
- uint32_t from_start,
- ElementsKind from_kind,
- FixedArrayBase* to,
- uint32_t to_start,
- int copy_size,
- FixedArrayBase* from) {
- int packed_size = kPackedSizeNotKnown;
- if (from == NULL) {
- from = from_holder->elements();
- }
-
- if (from_holder) {
- bool is_packed = IsFastPackedElementsKind(from_kind) &&
- from_holder->IsJSArray();
- if (is_packed) {
- packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
- if (copy_size >= 0 && packed_size > copy_size) {
- packed_size = copy_size;
- }
- }
- }
- return ElementsAccessorSubclass::CopyElementsImpl(
- from, from_start, to, from_kind, to_start, packed_size, copy_size);
- }
-
- MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
- Object* receiver,
- JSObject* holder,
- FixedArray* to,
- FixedArrayBase* from) {
- int len0 = to->length();
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < len0; i++) {
- ASSERT(!to->get(i)->IsTheHole());
- }
- }
-#endif
- if (from == NULL) {
- from = holder->elements();
- }
-
- // Optimize if 'other' is empty.
- // We cannot optimize if 'this' is empty, as other may have holes.
- uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(from);
- if (len1 == 0) return to;
-
- // Compute how many elements are not in other.
- uint32_t extra = 0;
- for (uint32_t y = 0; y < len1; y++) {
- uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
- if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, from)) {
- MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
- Object* value;
- if (!maybe_value->To(&value)) return maybe_value;
- ASSERT(!value->IsTheHole());
- if (!HasKey(to, value)) {
- extra++;
- }
- }
- }
-
- if (extra == 0) return to;
-
- // Allocate the result
- FixedArray* result;
- MaybeObject* maybe_obj = from->GetHeap()->AllocateFixedArray(len0 + extra);
- if (!maybe_obj->To(&result)) return maybe_obj;
-
- // Fill in the content
- {
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len0; i++) {
- Object* e = to->get(i);
- ASSERT(e->IsString() || e->IsNumber());
- result->set(i, e, mode);
- }
- }
- // Fill in the extra values.
- uint32_t index = 0;
- for (uint32_t y = 0; y < len1; y++) {
- uint32_t key =
- ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
- if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, from)) {
- MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
- Object* value;
- if (!maybe_value->To(&value)) return maybe_value;
- if (!value->IsTheHole() && !HasKey(to, value)) {
- result->set(len0 + index, value);
- index++;
- }
- }
- }
- ASSERT(extra == index);
- return result;
- }
-
- protected:
- static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
- return backing_store->length();
- }
-
- virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
- return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
- }
-
- static uint32_t GetKeyForIndexImpl(FixedArrayBase* backing_store,
- uint32_t index) {
- return index;
- }
-
- virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
- uint32_t index) {
- return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase);
-};
-
-
-// Super class for all fast element arrays.
-template<typename FastElementsAccessorSubclass,
- typename KindTraits,
- int ElementSize>
-class FastElementsAccessor
- : public ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits> {
- public:
- explicit FastElementsAccessor(const char* name)
- : ElementsAccessorBase<FastElementsAccessorSubclass,
- KindTraits>(name) {}
- protected:
- friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
- friend class NonStrictArgumentsElementsAccessor;
-
- typedef typename KindTraits::BackingStore BackingStore;
-
- // Adjusts the length of the fast backing store or returns the new length or
- // undefined in case conversion to a slow backing store should be performed.
- static MaybeObject* SetLengthWithoutNormalize(FixedArrayBase* backing_store,
- JSArray* array,
- Object* length_object,
- uint32_t length) {
- uint32_t old_capacity = backing_store->length();
- Object* old_length = array->length();
- bool same_or_smaller_size = old_length->IsSmi() &&
- static_cast<uint32_t>(Smi::cast(old_length)->value()) >= length;
- ElementsKind kind = array->GetElementsKind();
-
- if (!same_or_smaller_size && IsFastElementsKind(kind) &&
- !IsFastHoleyElementsKind(kind)) {
- kind = GetHoleyElementsKind(kind);
- MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
-
- // Check whether the backing store should be shrunk.
- if (length <= old_capacity) {
- if (array->HasFastSmiOrObjectElements()) {
- MaybeObject* maybe_obj = array->EnsureWritableFastElements();
- if (!maybe_obj->To(&backing_store)) return maybe_obj;
- }
- if (2 * length <= old_capacity) {
- // If more than half the elements won't be used, trim the array.
- if (length == 0) {
- array->initialize_elements();
- } else {
- backing_store->set_length(length);
- Address filler_start = backing_store->address() +
- BackingStore::OffsetOfElementAt(length);
- int filler_size = (old_capacity - length) * ElementSize;
- array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
- }
- } else {
- // Otherwise, fill the unused tail with holes.
- int old_length = FastD2IChecked(array->length()->Number());
- for (int i = length; i < old_length; i++) {
- BackingStore::cast(backing_store)->set_the_hole(i);
- }
- }
- return length_object;
- }
-
- // Check whether the backing store should be expanded.
- uint32_t min = JSObject::NewElementsCapacity(old_capacity);
- uint32_t new_capacity = length > min ? length : min;
- if (!array->ShouldConvertToSlowElements(new_capacity)) {
- MaybeObject* result = FastElementsAccessorSubclass::
- SetFastElementsCapacityAndLength(array, new_capacity, length);
- if (result->IsFailure()) return result;
- array->ValidateElements();
- return length_object;
- }
-
- // Request conversion to slow elements.
- return array->GetHeap()->undefined_value();
- }
-
- static MaybeObject* DeleteCommon(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- ASSERT(obj->HasFastSmiOrObjectElements() ||
- obj->HasFastDoubleElements() ||
- obj->HasFastArgumentsElements());
- Heap* heap = obj->GetHeap();
- Object* elements = obj->elements();
- if (elements == heap->empty_fixed_array()) {
- return heap->true_value();
- }
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
- bool is_non_strict_arguments_elements_map =
- backing_store->map() == heap->non_strict_arguments_elements_map();
- if (is_non_strict_arguments_elements_map) {
- backing_store = KindTraits::BackingStore::cast(
- FixedArray::cast(backing_store)->get(1));
- }
- uint32_t length = static_cast<uint32_t>(
- obj->IsJSArray()
- ? Smi::cast(JSArray::cast(obj)->length())->value()
- : backing_store->length());
- if (key < length) {
- if (!is_non_strict_arguments_elements_map) {
- ElementsKind kind = KindTraits::Kind;
- if (IsFastPackedElementsKind(kind)) {
- MaybeObject* transitioned =
- obj->TransitionElementsKind(GetHoleyElementsKind(kind));
- if (transitioned->IsFailure()) return transitioned;
- }
- if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
- Object* writable;
- MaybeObject* maybe = obj->EnsureWritableFastElements();
- if (!maybe->ToObject(&writable)) return maybe;
- backing_store = KindTraits::BackingStore::cast(writable);
- }
- }
- backing_store->set_the_hole(key);
- // If an old space backing store is larger than a certain size and
- // has too few used values, normalize it.
- // To avoid doing the check on every delete we require at least
- // one adjacent hole to the value being deleted.
- const int kMinLengthForSparsenessCheck = 64;
- if (backing_store->length() >= kMinLengthForSparsenessCheck &&
- !heap->InNewSpace(backing_store) &&
- ((key > 0 && backing_store->is_the_hole(key - 1)) ||
- (key + 1 < length && backing_store->is_the_hole(key + 1)))) {
- int num_used = 0;
- for (int i = 0; i < backing_store->length(); ++i) {
- if (!backing_store->is_the_hole(i)) ++num_used;
- // Bail out early if more than 1/4 is used.
- if (4 * num_used > backing_store->length()) break;
- }
- if (4 * num_used <= backing_store->length()) {
- MaybeObject* result = obj->NormalizeElements();
- if (result->IsFailure()) return result;
- }
- }
- }
- return heap->true_value();
- }
-
- virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- return DeleteCommon(obj, key, mode);
- }
-
- static bool HasElementImpl(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (key >= static_cast<uint32_t>(backing_store->length())) {
- return false;
- }
- return !BackingStore::cast(backing_store)->is_the_hole(key);
- }
-
- static void ValidateContents(JSObject* holder, int length) {
-#if DEBUG
- FixedArrayBase* elements = holder->elements();
- Heap* heap = elements->GetHeap();
- Map* map = elements->map();
- ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
- (map == heap->fixed_array_map() ||
- map == heap->fixed_cow_array_map())) ||
- (IsFastDoubleElementsKind(KindTraits::Kind) ==
- ((map == heap->fixed_array_map() && length == 0) ||
- map == heap->fixed_double_array_map())));
- for (int i = 0; i < length; i++) {
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
- ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
- static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
- (IsFastHoleyElementsKind(KindTraits::Kind) ==
- backing_store->is_the_hole(i)));
- }
-#endif
- }
-};
-
-
-static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) {
- switch (array->map()->instance_type()) {
- case FIXED_ARRAY_TYPE:
- if (array->IsDictionary()) {
- return DICTIONARY_ELEMENTS;
- } else {
- return FAST_HOLEY_ELEMENTS;
- }
- case FIXED_DOUBLE_ARRAY_TYPE:
- return FAST_HOLEY_DOUBLE_ELEMENTS;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return EXTERNAL_BYTE_ELEMENTS;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return EXTERNAL_SHORT_ELEMENTS;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
- case EXTERNAL_INT_ARRAY_TYPE:
- return EXTERNAL_INT_ELEMENTS;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_INT_ELEMENTS;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return EXTERNAL_FLOAT_ELEMENTS;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return EXTERNAL_DOUBLE_ELEMENTS;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return EXTERNAL_PIXEL_ELEMENTS;
- default:
- UNREACHABLE();
- }
- return FAST_HOLEY_ELEMENTS;
-}
-
-
-template<typename FastElementsAccessorSubclass,
- typename KindTraits>
-class FastSmiOrObjectElementsAccessor
- : public FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits,
- kPointerSize> {
- public:
- explicit FastSmiOrObjectElementsAccessor(const char* name)
- : FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits,
- kPointerSize>(name) {}
-
- static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
- ElementsKind to_kind = KindTraits::Kind;
- switch (from_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- CopyObjectToObjectElements(
- from, from_kind, from_start, to, to_kind, to_start, copy_size);
- return to->GetHeap()->undefined_value();
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return CopyDoubleToObjectElements(
- from, from_start, to, to_kind, to_start, copy_size);
- case DICTIONARY_ELEMENTS:
- CopyDictionaryToObjectElements(
- from, from_start, to, to_kind, to_start, copy_size);
- return to->GetHeap()->undefined_value();
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
- // TODO(verwaest): This is a temporary hack to support extending
- // NON_STRICT_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
- // This case should be UNREACHABLE().
- FixedArray* parameter_map = FixedArray::cast(from);
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
- ElementsKind from_kind = ElementsKindForArray(arguments);
- return CopyElementsImpl(arguments, from_start, to, from_kind,
- to_start, packed_size, copy_size);
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- UNREACHABLE();
- }
- return NULL;
- }
-
-
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
- JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
- obj->HasFastSmiElements()
- ? JSObject::kAllowSmiElements
- : JSObject::kDontAllowSmiElements;
- return obj->SetFastElementsCapacityAndLength(capacity,
- length,
- set_capacity_mode);
- }
-};
-
-
-class FastPackedSmiElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
- FastPackedSmiElementsAccessor,
- ElementsKindTraits<FAST_SMI_ELEMENTS> > {
- public:
- explicit FastPackedSmiElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastPackedSmiElementsAccessor,
- ElementsKindTraits<FAST_SMI_ELEMENTS> >(name) {}
-};
-
-
-class FastHoleySmiElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
- FastHoleySmiElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> > {
- public:
- explicit FastHoleySmiElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastHoleySmiElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> >(name) {}
-};
-
-
-class FastPackedObjectElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
- FastPackedObjectElementsAccessor,
- ElementsKindTraits<FAST_ELEMENTS> > {
- public:
- explicit FastPackedObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastPackedObjectElementsAccessor,
- ElementsKindTraits<FAST_ELEMENTS> >(name) {}
-};
-
-
-class FastHoleyObjectElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
- FastHoleyObjectElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_ELEMENTS> > {
- public:
- explicit FastHoleyObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastHoleyObjectElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
-};
-
-
-template<typename FastElementsAccessorSubclass,
- typename KindTraits>
-class FastDoubleElementsAccessor
- : public FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits,
- kDoubleSize> {
- public:
- explicit FastDoubleElementsAccessor(const char* name)
- : FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits,
- kDoubleSize>(name) {}
-
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
- return obj->SetFastDoubleElementsCapacityAndLength(capacity,
- length);
- }
-
- protected:
- static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
- switch (from_kind) {
- case FAST_SMI_ELEMENTS:
- CopyPackedSmiToDoubleElements(
- from, from_start, to, to_start, packed_size, copy_size);
- break;
- case FAST_HOLEY_SMI_ELEMENTS:
- CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
- break;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
- break;
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
- break;
- case DICTIONARY_ELEMENTS:
- CopyDictionaryToDoubleElements(
- from, from_start, to, to_start, copy_size);
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- UNREACHABLE();
- }
- return to->GetHeap()->undefined_value();
- }
-};
-
-
-class FastPackedDoubleElementsAccessor
- : public FastDoubleElementsAccessor<
- FastPackedDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> > {
- public:
- friend class ElementsAccessorBase<FastPackedDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
- explicit FastPackedDoubleElementsAccessor(const char* name)
- : FastDoubleElementsAccessor<
- FastPackedDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >(name) {}
-};
-
-
-class FastHoleyDoubleElementsAccessor
- : public FastDoubleElementsAccessor<
- FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> > {
- public:
- friend class ElementsAccessorBase<
- FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >;
- explicit FastHoleyDoubleElementsAccessor(const char* name)
- : FastDoubleElementsAccessor<
- FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >(name) {}
-};
-
-
-// Super class for all external element arrays.
-template<typename ExternalElementsAccessorSubclass,
- ElementsKind Kind>
-class ExternalElementsAccessor
- : public ElementsAccessorBase<ExternalElementsAccessorSubclass,
- ElementsKindTraits<Kind> > {
- public:
- explicit ExternalElementsAccessor(const char* name)
- : ElementsAccessorBase<ExternalElementsAccessorSubclass,
- ElementsKindTraits<Kind> >(name) {}
-
- protected:
- typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
-
- friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
- ElementsKindTraits<Kind> >;
-
- MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? BackingStore::cast(backing_store)->get(key)
- : backing_store->GetHeap()->undefined_value();
- }
-
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? NONE : ABSENT;
- }
-
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? FIELD : NONEXISTENT;
- }
-
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store) {
- // External arrays do not support changing their length.
- UNREACHABLE();
- return obj;
- }
-
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- // External arrays always ignore deletes.
- return obj->GetHeap()->true_value();
- }
-
- static bool HasElementImpl(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- uint32_t capacity =
- ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
- return key < capacity;
- }
-};
-
-
-class ExternalByteElementsAccessor
- : public ExternalElementsAccessor<ExternalByteElementsAccessor,
- EXTERNAL_BYTE_ELEMENTS> {
- public:
- explicit ExternalByteElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalByteElementsAccessor,
- EXTERNAL_BYTE_ELEMENTS>(name) {}
-};
-
-
-class ExternalUnsignedByteElementsAccessor
- : public ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS> {
- public:
- explicit ExternalUnsignedByteElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS>(name) {}
-};
-
-
-class ExternalShortElementsAccessor
- : public ExternalElementsAccessor<ExternalShortElementsAccessor,
- EXTERNAL_SHORT_ELEMENTS> {
- public:
- explicit ExternalShortElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalShortElementsAccessor,
- EXTERNAL_SHORT_ELEMENTS>(name) {}
-};
-
-
-class ExternalUnsignedShortElementsAccessor
- : public ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS> {
- public:
- explicit ExternalUnsignedShortElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS>(name) {}
-};
-
-
-class ExternalIntElementsAccessor
- : public ExternalElementsAccessor<ExternalIntElementsAccessor,
- EXTERNAL_INT_ELEMENTS> {
- public:
- explicit ExternalIntElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalIntElementsAccessor,
- EXTERNAL_INT_ELEMENTS>(name) {}
-};
-
-
-class ExternalUnsignedIntElementsAccessor
- : public ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
- EXTERNAL_UNSIGNED_INT_ELEMENTS> {
- public:
- explicit ExternalUnsignedIntElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
- EXTERNAL_UNSIGNED_INT_ELEMENTS>(name) {}
-};
-
-
-class ExternalFloatElementsAccessor
- : public ExternalElementsAccessor<ExternalFloatElementsAccessor,
- EXTERNAL_FLOAT_ELEMENTS> {
- public:
- explicit ExternalFloatElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalFloatElementsAccessor,
- EXTERNAL_FLOAT_ELEMENTS>(name) {}
-};
-
-
-class ExternalDoubleElementsAccessor
- : public ExternalElementsAccessor<ExternalDoubleElementsAccessor,
- EXTERNAL_DOUBLE_ELEMENTS> {
- public:
- explicit ExternalDoubleElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalDoubleElementsAccessor,
- EXTERNAL_DOUBLE_ELEMENTS>(name) {}
-};
-
-
-class PixelElementsAccessor
- : public ExternalElementsAccessor<PixelElementsAccessor,
- EXTERNAL_PIXEL_ELEMENTS> {
- public:
- explicit PixelElementsAccessor(const char* name)
- : ExternalElementsAccessor<PixelElementsAccessor,
- EXTERNAL_PIXEL_ELEMENTS>(name) {}
-};
-
-
-class DictionaryElementsAccessor
- : public ElementsAccessorBase<DictionaryElementsAccessor,
- ElementsKindTraits<DICTIONARY_ELEMENTS> > {
- public:
- explicit DictionaryElementsAccessor(const char* name)
- : ElementsAccessorBase<DictionaryElementsAccessor,
- ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
-
- // Adjusts the length of the dictionary backing store and returns the new
- // length according to ES5 section 15.4.5.2 behavior.
- MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
- FixedArrayBase* store,
- JSArray* array,
- Object* length_object,
- uint32_t length) {
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
- Heap* heap = array->GetHeap();
- int capacity = dict->Capacity();
- uint32_t new_length = length;
- uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
- if (new_length < old_length) {
- // Find last non-deletable element in range of elements to be
- // deleted and adjust range accordingly.
- for (int i = 0; i < capacity; i++) {
- Object* key = dict->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(key->Number());
- if (new_length <= number && number < old_length) {
- PropertyDetails details = dict->DetailsAt(i);
- if (details.IsDontDelete()) new_length = number + 1;
- }
- }
- }
- if (new_length != length) {
- MaybeObject* maybe_object = heap->NumberFromUint32(new_length);
- if (!maybe_object->To(&length_object)) return maybe_object;
- }
- }
-
- if (new_length == 0) {
- // If the length of a slow array is reset to zero, we clear
- // the array and flush backing storage. This has the added
- // benefit that the array returns to fast mode.
- Object* obj;
- MaybeObject* maybe_obj = array->ResetElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- } else {
- // Remove elements that should be deleted.
- int removed_entries = 0;
- Object* the_hole_value = heap->the_hole_value();
- for (int i = 0; i < capacity; i++) {
- Object* key = dict->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(key->Number());
- if (new_length <= number && number < old_length) {
- dict->SetEntry(i, the_hole_value, the_hole_value);
- removed_entries++;
- }
- }
- }
-
- // Update the number of elements.
- dict->ElementsRemoved(removed_entries);
- }
- return length_object;
- }
-
- MUST_USE_RESULT static MaybeObject* DeleteCommon(
- JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- Isolate* isolate = obj->GetIsolate();
- Heap* heap = isolate->heap();
- FixedArray* backing_store = FixedArray::cast(obj->elements());
- bool is_arguments =
- (obj->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS);
- if (is_arguments) {
- backing_store = FixedArray::cast(backing_store->get(1));
- }
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(backing_store);
- int entry = dictionary->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- Object* result = dictionary->DeleteProperty(entry, mode);
- if (result == heap->false_value()) {
- if (mode == JSObject::STRICT_DELETION) {
- // Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> holder(obj, isolate);
- Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
- Handle<Object> args[2] = { name, holder };
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_delete_property",
- HandleVector(args, 2));
- return isolate->Throw(*error);
- }
- return heap->false_value();
- }
- MaybeObject* maybe_elements = dictionary->Shrink(key);
- FixedArray* new_elements = NULL;
- if (!maybe_elements->To(&new_elements)) {
- return maybe_elements;
- }
- if (is_arguments) {
- FixedArray::cast(obj->elements())->set(1, new_elements);
- } else {
- obj->set_elements(new_elements);
- }
- }
- return heap->true_value();
- }
-
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
- UNREACHABLE();
- return NULL;
- }
-
-
- protected:
- friend class ElementsAccessorBase<DictionaryElementsAccessor,
- ElementsKindTraits<DICTIONARY_ELEMENTS> >;
-
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- return DeleteCommon(obj, key, mode);
- }
-
- MUST_USE_RESULT static MaybeObject* GetImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* store) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
- int entry = backing_store->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- Object* element = backing_store->ValueAt(entry);
- PropertyDetails details = backing_store->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- return obj->GetElementWithCallback(receiver,
- element,
- key,
- obj);
- } else {
- return element;
- }
- }
- return obj->GetHeap()->the_hole_value();
- }
-
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(backing_store);
- int entry = dictionary->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- return dictionary->DetailsAt(entry).attributes();
- }
- return ABSENT;
- }
-
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* store) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
- int entry = backing_store->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- return backing_store->DetailsAt(entry).type();
- }
- return NONEXISTENT;
- }
-
- MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* store) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
- int entry = backing_store->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound &&
- backing_store->DetailsAt(entry).type() == CALLBACKS &&
- backing_store->ValueAt(entry)->IsAccessorPair()) {
- return AccessorPair::cast(backing_store->ValueAt(entry));
- }
- return NULL;
- }
-
- static bool HasElementImpl(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return SeededNumberDictionary::cast(backing_store)->FindEntry(key) !=
- SeededNumberDictionary::kNotFound;
- }
-
- static uint32_t GetKeyForIndexImpl(FixedArrayBase* store,
- uint32_t index) {
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
- Object* key = dict->KeyAt(index);
- return Smi::cast(key)->value();
- }
-};
-
-
-class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> > {
- public:
- explicit NonStrictArgumentsElementsAccessor(const char* name)
- : ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >(name) {}
- protected:
- friend class ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
-
- MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- Context* context = Context::cast(parameter_map->get(0));
- int context_index = Smi::cast(probe)->value();
- ASSERT(!context->get(context_index)->IsTheHole());
- return context->get(context_index);
- } else {
- // Object is not mapped, defer to the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- MaybeObject* maybe_result = ElementsAccessor::ForArray(arguments)->Get(
- receiver, obj, key, arguments);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // Elements of the arguments object in slow mode might be slow aliases.
- if (result->IsAliasedArgumentsEntry()) {
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(result);
- Context* context = Context::cast(parameter_map->get(0));
- int context_index = entry->aliased_context_slot();
- ASSERT(!context->get(context_index)->IsTheHole());
- return context->get(context_index);
- } else {
- return result;
- }
- }
- }
-
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- FixedArray* parameter_map = FixedArray::cast(backing_store);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- return NONE;
- } else {
- // If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- return ElementsAccessor::ForArray(arguments)->GetAttributes(
- receiver, obj, key, arguments);
- }
- }
-
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- return FIELD;
- } else {
- // If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- return ElementsAccessor::ForArray(arguments)->GetType(
- receiver, obj, key, arguments);
- }
- }
-
- MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- return NULL;
- } else {
- // If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- return ElementsAccessor::ForArray(arguments)->GetAccessorPair(
- receiver, obj, key, arguments);
- }
- }
-
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* parameter_map) {
- // TODO(mstarzinger): This was never implemented but will be used once we
- // correctly implement [[DefineOwnProperty]] on arrays.
- UNIMPLEMENTED();
- return obj;
- }
-
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- FixedArray* parameter_map = FixedArray::cast(obj->elements());
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- // TODO(kmillikin): We could check if this was the last aliased
- // parameter, and revert to normal elements in that case. That
- // would enable GC of the context.
- parameter_map->set_the_hole(key + 2);
- } else {
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- if (arguments->IsDictionary()) {
- return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
- } else {
- // It's difficult to access the version of DeleteCommon that is declared
- // in the templatized super class, call the concrete implementation in
- // the class for the most generalized ElementsKind subclass.
- return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
- }
- }
- return obj->GetHeap()->true_value();
- }
-
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
- UNREACHABLE();
- return NULL;
- }
-
- static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
- FixedArray* parameter_map = FixedArray::cast(backing_store);
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
- return Max(static_cast<uint32_t>(parameter_map->length() - 2),
- ForArray(arguments)->GetCapacity(arguments));
- }
-
- static uint32_t GetKeyForIndexImpl(FixedArrayBase* dict,
- uint32_t index) {
- return index;
- }
-
- static bool HasElementImpl(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(holder, parameter_map, key);
- if (!probe->IsTheHole()) {
- return true;
- } else {
- FixedArrayBase* arguments =
- FixedArrayBase::cast(FixedArray::cast(parameter_map)->get(1));
- ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
- return !accessor->Get(receiver, holder, key, arguments)->IsTheHole();
- }
- }
-
- private:
- static Object* GetParameterMapArg(JSObject* holder,
- FixedArray* parameter_map,
- uint32_t key) {
- uint32_t length = holder->IsJSArray()
- ? Smi::cast(JSArray::cast(holder)->length())->value()
- : parameter_map->length();
- return key < (length - 2)
- ? parameter_map->get(key + 2)
- : parameter_map->GetHeap()->the_hole_value();
- }
-};
-
-
-ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
- return elements_accessors_[ElementsKindForArray(array)];
-}
-
-
-void ElementsAccessor::InitializeOncePerProcess() {
- static ElementsAccessor* accessor_array[] = {
-#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind),
- ELEMENTS_LIST(ACCESSOR_ARRAY)
-#undef ACCESSOR_ARRAY
- };
-
- STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
- kElementsKindCount);
-
- elements_accessors_ = accessor_array;
-}
-
-
-void ElementsAccessor::TearDown() {
-#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
- ELEMENTS_LIST(ACCESSOR_DELETE)
-#undef ACCESSOR_DELETE
- elements_accessors_ = NULL;
-}
-
-
-template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
- ElementsKindTraits>::
- SetLengthImpl(JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store) {
- JSArray* array = JSArray::cast(obj);
-
- // Fast case: The new length fits into a Smi.
- MaybeObject* maybe_smi_length = length->ToSmi();
- Object* smi_length = Smi::FromInt(0);
- if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
- const int value = Smi::cast(smi_length)->value();
- if (value >= 0) {
- Object* new_length;
- MaybeObject* result = ElementsAccessorSubclass::
- SetLengthWithoutNormalize(backing_store, array, smi_length, value);
- if (!result->ToObject(&new_length)) return result;
- ASSERT(new_length->IsSmi() || new_length->IsUndefined());
- if (new_length->IsSmi()) {
- array->set_length(Smi::cast(new_length));
- return array;
- }
- } else {
- return ThrowArrayLengthRangeError(array->GetHeap());
- }
- }
-
- // Slow case: The new length does not fit into a Smi or conversion
- // to slow elements is needed for other reasons.
- if (length->IsNumber()) {
- uint32_t value;
- if (length->ToArrayIndex(&value)) {
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_object = array->NormalizeElements();
- if (!maybe_object->To(&dictionary)) return maybe_object;
- Object* new_length;
- MaybeObject* result = DictionaryElementsAccessor::
- SetLengthWithoutNormalize(dictionary, array, length, value);
- if (!result->ToObject(&new_length)) return result;
- ASSERT(new_length->IsNumber());
- array->set_length(new_length);
- return array;
- } else {
- return ThrowArrayLengthRangeError(array->GetHeap());
- }
- }
-
- // Fall-back case: The new length is not a number so make the array
- // size one and set only element to length.
- FixedArray* new_backing_store;
- MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
- if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
- new_backing_store->set(0, length);
- { MaybeObject* result = array->SetContent(new_backing_store);
- if (result->IsFailure()) return result;
- }
- return array;
-}
-
-
-MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
- JSArray* array, Arguments* args) {
- Heap* heap = array->GetIsolate()->heap();
-
- // Optimize the case where there is one argument and the argument is a
- // small smi.
- if (args->length() == 1) {
- Object* obj = (*args)[0];
- if (obj->IsSmi()) {
- int len = Smi::cast(obj)->value();
- if (len > 0 && len < JSObject::kInitialMaxFastElementArray) {
- ElementsKind elements_kind = array->GetElementsKind();
- MaybeObject* maybe_array = array->Initialize(len, len);
- if (maybe_array->IsFailure()) return maybe_array;
-
- if (!IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- maybe_array = array->TransitionElementsKind(elements_kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
-
- return array;
- } else if (len == 0) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
- }
- }
-
- // Take the argument as the length.
- MaybeObject* maybe_obj = array->Initialize(0);
- if (!maybe_obj->To(&obj)) return maybe_obj;
-
- return array->SetElementsLength((*args)[0]);
- }
-
- // Optimize the case where there are no parameters passed.
- if (args->length() == 0) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
- }
-
- // Set length and elements on the array.
- int number_of_elements = args->length();
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(args, 0, number_of_elements,
- ALLOW_CONVERTED_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
-
- // Allocate an appropriately typed elements array.
- MaybeObject* maybe_elms;
- ElementsKind elements_kind = array->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
- maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
- number_of_elements);
- } else {
- maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
- }
- FixedArrayBase* elms;
- if (!maybe_elms->To(&elms)) return maybe_elms;
-
- // Fill in the content
- switch (array->GetElementsKind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
- FixedArray* smi_elms = FixedArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER);
- }
- break;
- }
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS: {
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- FixedArray* object_elms = FixedArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- object_elms->set(index, (*args)[index], mode);
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- double_elms->set(index, (*args)[index]->Number());
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- array->set_elements(elms);
- array->set_length(Smi::FromInt(number_of_elements));
- return array;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/elements.h b/src/3rdparty/v8/src/elements.h
deleted file mode 100644
index 6353aae..0000000
--- a/src/3rdparty/v8/src/elements.h
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ELEMENTS_H_
-#define V8_ELEMENTS_H_
-
-#include "elements-kind.h"
-#include "objects.h"
-#include "heap.h"
-#include "isolate.h"
-
-namespace v8 {
-namespace internal {
-
-// Abstract base class for handles that can operate on objects with differing
-// ElementsKinds.
-class ElementsAccessor {
- public:
- explicit ElementsAccessor(const char* name) : name_(name) { }
- virtual ~ElementsAccessor() { }
-
- virtual ElementsKind kind() const = 0;
- const char* name() const { return name_; }
-
- // Checks the elements of an object for consistency, asserting when a problem
- // is found.
- virtual void Validate(JSObject* obj) = 0;
-
- // Returns true if a holder contains an element with the specified key
- // without iterating up the prototype chain. The caller can optionally pass
- // in the backing store to use for the check, which must be compatible with
- // the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
- // holder->elements() is used as the backing store.
- virtual bool HasElement(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
-
- // Returns the element with the specified key or undefined if there is no such
- // element. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual MaybeObject* Get(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
-
- // Returns an element's attributes, or ABSENT if there is no such
- // element. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
-
- // Returns an element's type, or NONEXISTENT if there is no such
- // element. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual PropertyType GetType(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
-
- // Returns an element's accessors, or NULL if the element does not exist or
- // is plain. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
-
- // Modifies the length data property as specified for JSArrays and resizes the
- // underlying backing store accordingly. The method honors the semantics of
- // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
- // have non-deletable elements can only be shrunk to the size of highest
- // element that is non-deletable.
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
- Object* new_length) = 0;
-
- // Modifies both the length and capacity of a JSArray, resizing the underlying
- // backing store as necessary. This method does NOT honor the semantics of
- // EcmaScript 5.1 15.4.5.2, arrays can be shrunk beyond non-deletable
- // elements. This method should only be called for array expansion OR by
- // runtime JavaScript code that use InternalArrays and don't care about
- // EcmaScript 5.1 semantics.
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
- int capacity,
- int length) = 0;
-
- // Deletes an element in an object, returning a new elements backing store.
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
-
- // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
- // of elements from source after source_start to the destination array.
- static const int kCopyToEnd = -1;
- // If kCopyToEndAndInitializeToHole is specified as the copy_size to
- // CopyElements, it copies all of elements from source after source_start to
- // destination array, padding any remaining uninitialized elements in the
- // destination array with the hole.
- static const int kCopyToEndAndInitializeToHole = -2;
-
- // Copy elements from one backing store to another. Typically, callers specify
- // the source JSObject or JSArray in source_holder. If the holder's backing
- // store is available, it can be passed in source and source_holder is
- // ignored.
- MUST_USE_RESULT virtual MaybeObject* CopyElements(
- JSObject* source_holder,
- uint32_t source_start,
- ElementsKind source_kind,
- FixedArrayBase* destination,
- uint32_t destination_start,
- int copy_size,
- FixedArrayBase* source = NULL) = 0;
-
- MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
- FixedArrayBase* to,
- ElementsKind from_kind,
- FixedArrayBase* from = NULL) {
- return CopyElements(from_holder, 0, from_kind, to, 0,
- kCopyToEndAndInitializeToHole, from);
- }
-
- MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
- Object* receiver,
- JSObject* holder,
- FixedArray* to,
- FixedArrayBase* from = NULL) = 0;
-
- // Returns a shared ElementsAccessor for the specified ElementsKind.
- static ElementsAccessor* ForKind(ElementsKind elements_kind) {
- ASSERT(elements_kind < kElementsKindCount);
- return elements_accessors_[elements_kind];
- }
-
- static ElementsAccessor* ForArray(FixedArrayBase* array);
-
- static void InitializeOncePerProcess();
- static void TearDown();
-
- protected:
- friend class NonStrictArgumentsElementsAccessor;
-
- virtual uint32_t GetCapacity(FixedArrayBase* backing_store) = 0;
-
- // Element handlers distinguish between indexes and keys when they manipulate
- // elements. Indexes refer to elements in terms of their location in the
- // underlying storage's backing store representation, and are between 0 and
- // GetCapacity. Keys refer to elements in terms of the value that would be
- // specified in JavaScript to access the element. In most implementations,
- // keys are equivalent to indexes, and GetKeyForIndex returns the same value
- // it is passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps
- // the index to a key using the KeyAt method on the NumberDictionary.
- virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
- uint32_t index) = 0;
-
- private:
- static ElementsAccessor** elements_accessors_;
- const char* name_;
-
- DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
-};
-
-void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
- bool allow_appending = false);
-
-MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
- JSArray* array, Arguments* args);
-
-} } // namespace v8::internal
-
-#endif // V8_ELEMENTS_H_
diff --git a/src/3rdparty/v8/src/execution.cc b/src/3rdparty/v8/src/execution.cc
deleted file mode 100644
index fc153b4..0000000
--- a/src/3rdparty/v8/src/execution.cc
+++ /dev/null
@@ -1,972 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "isolate-inl.h"
-#include "runtime-profiler.h"
-#include "simulator.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-StackGuard::StackGuard()
- : isolate_(NULL) {
-}
-
-
-void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
- ASSERT(isolate_ != NULL);
- // Ignore attempts to interrupt when interrupts are postponed.
- if (should_postpone_interrupts(lock)) return;
- thread_local_.jslimit_ = kInterruptLimit;
- thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
-}
-
-
-void StackGuard::reset_limits(const ExecutionAccess& lock) {
- ASSERT(isolate_ != NULL);
- thread_local_.jslimit_ = thread_local_.real_jslimit_;
- thread_local_.climit_ = thread_local_.real_climit_;
- isolate_->heap()->SetStackLimits();
-}
-
-
-static Handle<Object> Invoke(bool is_construct,
- Handle<JSFunction> function,
- Handle<Object> receiver,
- int argc,
- Handle<Object> args[],
- bool* has_pending_exception,
- Handle<Object> qml) {
- Isolate* isolate = function->GetIsolate();
-
- // Entering JavaScript.
- VMState state(isolate, JS);
-
- // Placeholder for return value.
- MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
-
- typedef Object* (*JSEntryFunction)(byte* entry,
- Object* function,
- Object* receiver,
- int argc,
- Object*** args);
-
- Handle<Code> code = is_construct
- ? isolate->factory()->js_construct_entry_code()
- : isolate->factory()->js_entry_code();
-
- // Convert calls on global objects to be calls on the global
- // receiver instead to avoid having a 'this' pointer which refers
- // directly to a global object.
- if (receiver->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- receiver = Handle<JSObject>(global->global_receiver());
- }
-
- // Make sure that the global object of the context we're about to
- // make the current one is indeed a global object.
- ASSERT(function->context()->global_object()->IsGlobalObject());
-
- Handle<JSObject> oldqml;
- if (!qml.is_null()) {
- oldqml = Handle<JSObject>(function->context()->qml_global_object());
- function->context()->set_qml_global_object(JSObject::cast(*qml));
- }
-
- {
- // Save and restore context around invocation and block the
- // allocation of handles without explicit handle scopes.
- SaveContext save(isolate);
- NoHandleAllocation na(isolate);
- JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
-
- // Call the function through the right JS entry stub.
- byte* function_entry = function->code()->entry();
- JSFunction* func = *function;
- Object* recv = *receiver;
- Object*** argv = reinterpret_cast<Object***>(args);
- value =
- CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
- }
-
- if (!qml.is_null())
- function->context()->set_qml_global_object(*oldqml);
-
-#ifdef VERIFY_HEAP
- value->Verify();
-#endif
-
- // Update the pending exception flag and return the value.
- *has_pending_exception = value->IsException();
- ASSERT(*has_pending_exception == isolate->has_pending_exception());
- if (*has_pending_exception) {
- isolate->ReportPendingMessages();
- if (isolate->pending_exception()->IsOutOfMemory()) {
- if (!isolate->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("JS", true);
- }
- }
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Reset stepping state when script exits with uncaught exception.
- if (isolate->debugger()->IsDebuggerActive()) {
- isolate->debug()->ClearStepping();
- }
-#endif // ENABLE_DEBUGGER_SUPPORT
- return Handle<Object>();
- } else {
- isolate->clear_pending_message();
- }
-
- return Handle<Object>(value->ToObjectUnchecked(), isolate);
-}
-
-
-Handle<Object> Execution::Call(Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception,
- bool convert_receiver) {
- return Call(callable, receiver, argc, argv, pending_exception,
- convert_receiver, Handle<Object>());
-}
-
-Handle<Object> Execution::Call(Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception,
- bool convert_receiver,
- Handle<Object> qml) {
- *pending_exception = false;
-
- if (!callable->IsJSFunction()) {
- callable = TryGetFunctionDelegate(callable, pending_exception);
- if (*pending_exception) return callable;
- }
- Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
-
- // In non-strict mode, convert receiver.
- if (convert_receiver && !receiver->IsJSReceiver() &&
- !func->shared()->native() && func->shared()->is_classic_mode()) {
- if (receiver->IsUndefined() || receiver->IsNull()) {
- Object* global = func->context()->global_object()->global_receiver();
- // Under some circumstances, 'global' can be the JSBuiltinsObject
- // In that case, don't rewrite. (FWIW, the same holds for
- // GetIsolate()->global_object()->global_receiver().)
- if (!global->IsJSBuiltinsObject()) {
- receiver = Handle<Object>(global, func->GetIsolate());
- }
- } else {
- receiver = ToObject(receiver, pending_exception);
- }
- if (*pending_exception) return callable;
- }
-
- return Invoke(false, func, receiver, argc, argv, pending_exception, qml);
-}
-
-
-Handle<Object> Execution::New(Handle<JSFunction> func,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception) {
- return Invoke(true, func, func->GetIsolate()->global_object(), argc, argv,
- pending_exception, Handle<Object>());
-}
-
-
-Handle<Object> Execution::TryCall(Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Handle<Object> args[],
- bool* caught_exception) {
- // Enter a try-block while executing the JavaScript code. To avoid
- // duplicate error printing it must be non-verbose. Also, to avoid
- // creating message objects during stack overflow we shouldn't
- // capture messages.
- v8::TryCatch catcher;
- catcher.SetVerbose(false);
- catcher.SetCaptureMessage(false);
- *caught_exception = false;
-
- Handle<Object> result = Invoke(false, func, receiver, argc, args,
- caught_exception, Handle<Object>());
-
- Isolate* isolate = func->GetIsolate();
- if (*caught_exception) {
- ASSERT(catcher.HasCaught());
- ASSERT(isolate->has_pending_exception());
- ASSERT(isolate->external_caught_exception());
- if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
- }
- if (isolate->pending_exception() ==
- isolate->heap()->termination_exception()) {
- result = isolate->factory()->termination_exception();
- } else {
- result = v8::Utils::OpenHandle(*catcher.Exception());
- }
- isolate->OptionalRescheduleException(true);
- }
-
- ASSERT(!isolate->has_pending_exception());
- ASSERT(!isolate->external_caught_exception());
- return result;
-}
-
-
-Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
- ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- // If you return a function from here, it will be called when an
- // attempt is made to call the given object as a function.
-
- // If object is a function proxy, get its handler. Iterate if necessary.
- Object* fun = *object;
- while (fun->IsJSFunctionProxy()) {
- fun = JSFunctionProxy::cast(fun)->call_trap();
- }
- if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (fun->IsHeapObject() &&
- HeapObject::cast(fun)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->native_context()->call_as_function_delegate());
- }
-
- return factory->undefined_value();
-}
-
-
-Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
- bool* has_pending_exception) {
- ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
-
- // If object is a function proxy, get its handler. Iterate if necessary.
- Object* fun = *object;
- while (fun->IsJSFunctionProxy()) {
- fun = JSFunctionProxy::cast(fun)->call_trap();
- }
- if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (fun->IsHeapObject() &&
- HeapObject::cast(fun)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->native_context()->call_as_function_delegate());
- }
-
- // If the Object doesn't have an instance-call handler we should
- // throw a non-callable exception.
- i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
- "called_non_callable", i::HandleVector<i::Object>(&object, 1));
- isolate->Throw(*error_obj);
- *has_pending_exception = true;
-
- return isolate->factory()->undefined_value();
-}
-
-
-Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
- ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
-
- // If you return a function from here, it will be called when an
- // attempt is made to call the given object as a constructor.
-
- // If object is a function proxies, get its handler. Iterate if necessary.
- Object* fun = *object;
- while (fun->IsJSFunctionProxy()) {
- fun = JSFunctionProxy::cast(fun)->call_trap();
- }
- if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (fun->IsHeapObject() &&
- HeapObject::cast(fun)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->native_context()->call_as_constructor_delegate());
- }
-
- return isolate->factory()->undefined_value();
-}
-
-
-Handle<Object> Execution::TryGetConstructorDelegate(
- Handle<Object> object,
- bool* has_pending_exception) {
- ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
-
- // If you return a function from here, it will be called when an
- // attempt is made to call the given object as a constructor.
-
- // If object is a function proxies, get its handler. Iterate if necessary.
- Object* fun = *object;
- while (fun->IsJSFunctionProxy()) {
- fun = JSFunctionProxy::cast(fun)->call_trap();
- }
- if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (fun->IsHeapObject() &&
- HeapObject::cast(fun)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->native_context()->call_as_constructor_delegate());
- }
-
- // If the Object doesn't have an instance-call handler we should
- // throw a non-callable exception.
- i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
- "called_non_callable", i::HandleVector<i::Object>(&object, 1));
- isolate->Throw(*error_obj);
- *has_pending_exception = true;
-
- return isolate->factory()->undefined_value();
-}
-
-
-bool StackGuard::IsStackOverflow() {
- ExecutionAccess access(isolate_);
- return (thread_local_.jslimit_ != kInterruptLimit &&
- thread_local_.climit_ != kInterruptLimit);
-}
-
-
-void StackGuard::EnableInterrupts() {
- ExecutionAccess access(isolate_);
- if (has_pending_interrupts(access)) {
- set_interrupt_limits(access);
- }
-}
-
-
-void StackGuard::SetStackLimit(uintptr_t limit) {
- ExecutionAccess access(isolate_);
- // If the current limits are special (e.g. due to a pending interrupt) then
- // leave them alone.
- uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
- if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
- thread_local_.jslimit_ = jslimit;
- }
- if (thread_local_.climit_ == thread_local_.real_climit_) {
- thread_local_.climit_ = limit;
- }
- thread_local_.real_climit_ = limit;
- thread_local_.real_jslimit_ = jslimit;
-}
-
-
-void StackGuard::DisableInterrupts() {
- ExecutionAccess access(isolate_);
- reset_limits(access);
-}
-
-
-bool StackGuard::ShouldPostponeInterrupts() {
- ExecutionAccess access(isolate_);
- return should_postpone_interrupts(access);
-}
-
-
-bool StackGuard::IsInterrupted() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
-}
-
-
-void StackGuard::Interrupt() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= INTERRUPT;
- set_interrupt_limits(access);
-}
-
-
-bool StackGuard::IsPreempted() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & PREEMPT;
-}
-
-
-void StackGuard::Preempt() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= PREEMPT;
- set_interrupt_limits(access);
-}
-
-
-bool StackGuard::IsTerminateExecution() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
-}
-
-
-void StackGuard::TerminateExecution() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= TERMINATE;
- set_interrupt_limits(access);
-}
-
-
-void StackGuard::RequestCodeReadyEvent() {
- ASSERT(FLAG_parallel_recompilation);
- if (ExecutionAccess::TryLock(isolate_)) {
- thread_local_.interrupt_flags_ |= CODE_READY;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
- ExecutionAccess::Unlock(isolate_);
- }
-}
-
-
-bool StackGuard::IsCodeReadyEvent() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & CODE_READY) != 0;
-}
-
-
-bool StackGuard::IsGCRequest() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
-}
-
-
-void StackGuard::RequestGC() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= GC_REQUEST;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool StackGuard::IsDebugBreak() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & DEBUGBREAK;
-}
-
-
-void StackGuard::DebugBreak() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= DEBUGBREAK;
- set_interrupt_limits(access);
-}
-
-
-bool StackGuard::IsDebugCommand() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & DEBUGCOMMAND;
-}
-
-
-void StackGuard::DebugCommand() {
- if (FLAG_debugger_auto_break) {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= DEBUGCOMMAND;
- set_interrupt_limits(access);
- }
-}
-#endif
-
-void StackGuard::Continue(InterruptFlag after_what) {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what);
- if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
- reset_limits(access);
- }
-}
-
-
-char* StackGuard::ArchiveStackGuard(char* to) {
- ExecutionAccess access(isolate_);
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
- ThreadLocal blank;
-
- // Set the stack limits using the old thread_local_.
- // TODO(isolates): This was the old semantics of constructing a ThreadLocal
- // (as the ctor called SetStackLimits, which looked at the
- // current thread_local_ from StackGuard)-- but is this
- // really what was intended?
- isolate_->heap()->SetStackLimits();
- thread_local_ = blank;
-
- return to + sizeof(ThreadLocal);
-}
-
-
-char* StackGuard::RestoreStackGuard(char* from) {
- ExecutionAccess access(isolate_);
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- isolate_->heap()->SetStackLimits();
- return from + sizeof(ThreadLocal);
-}
-
-
-void StackGuard::FreeThreadResources() {
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindOrAllocatePerThreadDataForThisThread();
- per_thread->set_stack_limit(thread_local_.real_climit_);
-}
-
-
-void StackGuard::ThreadLocal::Clear() {
- real_jslimit_ = kIllegalLimit;
- jslimit_ = kIllegalLimit;
- real_climit_ = kIllegalLimit;
- climit_ = kIllegalLimit;
- nesting_ = 0;
- postpone_interrupts_nesting_ = 0;
- interrupt_flags_ = 0;
-}
-
-
-bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
- bool should_set_stack_limits = false;
- if (real_climit_ == kIllegalLimit) {
- // Takes the address of the limit variable in order to find out where
- // the top of stack is right now.
- const uintptr_t kLimitSize = FLAG_stack_size * KB;
- uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
- ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
- real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
- jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
- real_climit_ = limit;
- climit_ = limit;
- should_set_stack_limits = true;
- }
- nesting_ = 0;
- postpone_interrupts_nesting_ = 0;
- interrupt_flags_ = 0;
- return should_set_stack_limits;
-}
-
-
-void StackGuard::ClearThread(const ExecutionAccess& lock) {
- thread_local_.Clear();
- isolate_->heap()->SetStackLimits();
-}
-
-
-void StackGuard::InitThread(const ExecutionAccess& lock) {
- if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindOrAllocatePerThreadDataForThisThread();
- uintptr_t stored_limit = per_thread->stack_limit();
- // You should hold the ExecutionAccess lock when you call this.
- if (stored_limit != 0) {
- SetStackLimit(stored_limit);
- }
-}
-
-
-// --- C a l l s t o n a t i v e s ---
-
-#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
- do { \
- Isolate* isolate = Isolate::Current(); \
- Handle<Object> argv[] = args; \
- ASSERT(has_pending_exception != NULL); \
- return Call(isolate->name##_fun(), \
- isolate->js_builtins_object(), \
- ARRAY_SIZE(argv), argv, \
- has_pending_exception); \
- } while (false)
-
-
-Handle<Object> Execution::ToBoolean(Isolate* isolate, Handle<Object> obj) {
- // See the similar code in runtime.js:ToBoolean.
- if (obj->IsBoolean()) return obj;
- bool result = true;
- if (obj->IsString()) {
- result = Handle<String>::cast(obj)->length() != 0;
- } else if (obj->IsNull() || obj->IsUndefined()) {
- result = false;
- } else if (obj->IsNumber()) {
- double value = obj->Number();
- result = !((value == 0) || isnan(value));
- }
- return Handle<Object>(isolate->heap()->ToBoolean(result), isolate);
-}
-
-
-Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_number, { obj }, exc);
-}
-
-
-Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_string, { obj }, exc);
-}
-
-
-Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
-}
-
-
-Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
- if (obj->IsSpecObject()) return obj;
- RETURN_NATIVE_CALL(to_object, { obj }, exc);
-}
-
-
-Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_integer, { obj }, exc);
-}
-
-
-Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
-}
-
-
-Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_int32, { obj }, exc);
-}
-
-
-Handle<Object> Execution::NewDate(double time, bool* exc) {
- Handle<Object> time_obj = FACTORY->NewNumber(time);
- RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
-}
-
-
-#undef RETURN_NATIVE_CALL
-
-
-Handle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
- Handle<String> flags,
- bool* exc) {
- Handle<JSFunction> function = Handle<JSFunction>(
- pattern->GetIsolate()->native_context()->regexp_function());
- Handle<Object> re_obj = RegExpImpl::CreateRegExpLiteral(
- function, pattern, flags, exc);
- if (*exc) return Handle<JSRegExp>();
- return Handle<JSRegExp>::cast(re_obj);
-}
-
-
-Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
- Isolate* isolate = string->GetIsolate();
- Factory* factory = isolate->factory();
-
- int int_index = static_cast<int>(index);
- if (int_index < 0 || int_index >= string->length()) {
- return factory->undefined_value();
- }
-
- Handle<Object> char_at = GetProperty(
- isolate, isolate->js_builtins_object(), factory->char_at_string());
- if (!char_at->IsJSFunction()) {
- return factory->undefined_value();
- }
-
- bool caught_exception;
- Handle<Object> index_object = factory->NewNumberFromInt(int_index);
- Handle<Object> index_arg[] = { index_object };
- Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
- string,
- ARRAY_SIZE(index_arg),
- index_arg,
- &caught_exception);
- if (caught_exception) {
- return factory->undefined_value();
- }
- return result;
-}
-
-
-Handle<JSFunction> Execution::InstantiateFunction(
- Handle<FunctionTemplateInfo> data,
- bool* exc) {
- Isolate* isolate = data->GetIsolate();
- // Fast case: see if the function has already been instantiated
- int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm =
- isolate->native_context()->function_cache()->
- GetElementNoExceptionThrown(serial_number);
- if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
- // The function has not yet been instantiated in this context; do it.
- Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- exc);
- if (*exc) return Handle<JSFunction>::null();
- return Handle<JSFunction>::cast(result);
-}
-
-
-Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
- bool* exc) {
- Isolate* isolate = data->GetIsolate();
- if (data->property_list()->IsUndefined() &&
- !data->constructor()->IsUndefined()) {
- // Initialization to make gcc happy.
- Object* result = NULL;
- {
- HandleScope scope(isolate);
- Handle<FunctionTemplateInfo> cons_template =
- Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(data->constructor()));
- Handle<JSFunction> cons = InstantiateFunction(cons_template, exc);
- if (*exc) return Handle<JSObject>::null();
- Handle<Object> value = New(cons, 0, NULL, exc);
- if (*exc) return Handle<JSObject>::null();
- result = *value;
- }
- ASSERT(!*exc);
- return Handle<JSObject>(JSObject::cast(result));
- } else {
- Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- exc);
- if (*exc) return Handle<JSObject>::null();
- return Handle<JSObject>::cast(result);
- }
-}
-
-
-void Execution::ConfigureInstance(Handle<Object> instance,
- Handle<Object> instance_template,
- bool* exc) {
- Isolate* isolate = Isolate::Current();
- Handle<Object> args[] = { instance, instance_template };
- Execution::Call(isolate->configure_instance_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- exc);
-}
-
-
-Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
- Handle<JSFunction> fun,
- Handle<Object> pos,
- Handle<Object> is_global) {
- Isolate* isolate = fun->GetIsolate();
- Handle<Object> args[] = { recv, fun, pos, is_global };
- bool caught_exception;
- Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- &caught_exception);
- if (caught_exception || !result->IsString()) {
- return isolate->factory()->empty_string();
- }
-
- return Handle<String>::cast(result);
-}
-
-
-static Object* RuntimePreempt() {
- Isolate* isolate = Isolate::Current();
-
- // Clear the preempt request flag.
- isolate->stack_guard()->Continue(PREEMPT);
-
- ContextSwitcher::PreemptionReceived();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate->debug()->InDebugger()) {
- // If currently in the debugger don't do any actual preemption but record
- // that preemption occoured while in the debugger.
- isolate->debug()->PreemptionWhileInDebugger();
- } else {
- // Perform preemption.
- v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
- Thread::YieldCPU();
- }
-#else
- { // NOLINT
- // Perform preemption.
- v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
- Thread::YieldCPU();
- }
-#endif
-
- return isolate->heap()->undefined_value();
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Object* Execution::DebugBreakHelper() {
- Isolate* isolate = Isolate::Current();
-
- // Just continue if breaks are disabled.
- if (isolate->debug()->disable_break()) {
- return isolate->heap()->undefined_value();
- }
-
- // Ignore debug break during bootstrapping.
- if (isolate->bootstrapper()->IsActive()) {
- return isolate->heap()->undefined_value();
- }
-
- // Ignore debug break if debugger is not active.
- if (!isolate->debugger()->IsDebuggerActive()) {
- return isolate->heap()->undefined_value();
- }
-
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) {
- return isolate->heap()->undefined_value();
- }
-
- {
- JavaScriptFrameIterator it(isolate);
- ASSERT(!it.done());
- Object* fun = it.frame()->function();
- if (fun && fun->IsJSFunction()) {
- // Don't stop in builtin functions.
- if (JSFunction::cast(fun)->IsBuiltin()) {
- return isolate->heap()->undefined_value();
- }
- GlobalObject* global = JSFunction::cast(fun)->context()->global_object();
- // Don't stop in debugger functions.
- if (isolate->debug()->IsDebugGlobal(global)) {
- return isolate->heap()->undefined_value();
- }
- }
- }
-
- // Collect the break state before clearing the flags.
- bool debug_command_only =
- isolate->stack_guard()->IsDebugCommand() &&
- !isolate->stack_guard()->IsDebugBreak();
-
- // Clear the debug break request flag.
- isolate->stack_guard()->Continue(DEBUGBREAK);
-
- ProcessDebugMessages(debug_command_only);
-
- // Return to continue execution.
- return isolate->heap()->undefined_value();
-}
-
-void Execution::ProcessDebugMessages(bool debug_command_only) {
- Isolate* isolate = Isolate::Current();
- // Clear the debug command request flag.
- isolate->stack_guard()->Continue(DEBUGCOMMAND);
-
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) {
- return;
- }
-
- HandleScope scope(isolate);
- // Enter the debugger. Just continue if we fail to enter the debugger.
- EnterDebugger debugger;
- if (debugger.FailedToEnter()) {
- return;
- }
-
- // Notify the debug event listeners. Indicate auto continue if the break was
- // a debug command break.
- isolate->debugger()->OnDebugBreak(isolate->factory()->undefined_value(),
- debug_command_only);
-}
-
-
-#endif
-
-MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
- StackGuard* stack_guard = isolate->stack_guard();
- if (stack_guard->ShouldPostponeInterrupts()) {
- return isolate->heap()->undefined_value();
- }
-
- if (stack_guard->IsGCRequest()) {
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
- "StackGuard GC request");
- stack_guard->Continue(GC_REQUEST);
- }
-
- if (stack_guard->IsCodeReadyEvent()) {
- ASSERT(FLAG_parallel_recompilation);
- if (FLAG_trace_parallel_recompilation) {
- PrintF(" ** CODE_READY event received.\n");
- }
- stack_guard->Continue(CODE_READY);
- }
- if (!stack_guard->IsTerminateExecution() &&
- !FLAG_manual_parallel_recompilation) {
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- }
-
- isolate->counters()->stack_interrupts()->Increment();
- isolate->counters()->runtime_profiler_ticks()->Increment();
- isolate->runtime_profiler()->OptimizeNow();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
- DebugBreakHelper();
- }
-#endif
- if (stack_guard->IsPreempted()) RuntimePreempt();
- if (stack_guard->IsTerminateExecution()) {
- stack_guard->Continue(TERMINATE);
- return isolate->TerminateExecution();
- }
- if (stack_guard->IsInterrupted()) {
- stack_guard->Continue(INTERRUPT);
- return isolate->StackOverflow();
- }
- return isolate->heap()->undefined_value();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/execution.h b/src/3rdparty/v8/src/execution.h
deleted file mode 100644
index 7fc822e..0000000
--- a/src/3rdparty/v8/src/execution.h
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXECUTION_H_
-#define V8_EXECUTION_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Flag used to set the interrupt causes.
-enum InterruptFlag {
- INTERRUPT = 1 << 0,
- DEBUGBREAK = 1 << 1,
- DEBUGCOMMAND = 1 << 2,
- PREEMPT = 1 << 3,
- TERMINATE = 1 << 4,
- GC_REQUEST = 1 << 5,
- CODE_READY = 1 << 6
-};
-
-
-class Isolate;
-
-
-class Execution : public AllStatic {
- public:
- // Call a function, the caller supplies a receiver and an array
- // of arguments. Arguments are Object* type. After function returns,
- // pointers in 'args' might be invalid.
- //
- // *pending_exception tells whether the invoke resulted in
- // a pending exception.
- //
- // When convert_receiver is set, and the receiver is not an object,
- // and the function called is not in strict mode, receiver is converted to
- // an object.
- //
- static Handle<Object> Call(Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception,
- bool convert_receiver = false);
-
- static Handle<Object> Call(Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception,
- bool convert_receiver,
- Handle<Object> qml);
-
- // Construct object from function, the caller supplies an array of
- // arguments. Arguments are Object* type. After function returns,
- // pointers in 'args' might be invalid.
- //
- // *pending_exception tells whether the invoke resulted in
- // a pending exception.
- //
- static Handle<Object> New(Handle<JSFunction> func,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception);
-
- // Call a function, just like Call(), but make sure to silently catch
- // any thrown exceptions. The return value is either the result of
- // calling the function (if caught exception is false) or the exception
- // that occurred (if caught exception is true).
- static Handle<Object> TryCall(Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* caught_exception);
-
- // ECMA-262 9.2
- static Handle<Object> ToBoolean(Isolate* isolate, Handle<Object> obj);
-
- // ECMA-262 9.3
- static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.4
- static Handle<Object> ToInteger(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.5
- static Handle<Object> ToInt32(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.6
- static Handle<Object> ToUint32(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.8
- static Handle<Object> ToString(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.8
- static Handle<Object> ToDetailString(Handle<Object> obj, bool* exc);
-
- // ECMA-262 9.9
- static Handle<Object> ToObject(Handle<Object> obj, bool* exc);
-
- // Create a new date object from 'time'.
- static Handle<Object> NewDate(double time, bool* exc);
-
- // Create a new regular expression object from 'pattern' and 'flags'.
- static Handle<JSRegExp> NewJSRegExp(Handle<String> pattern,
- Handle<String> flags,
- bool* exc);
-
- // Used to implement [] notation on strings (calls JS code)
- static Handle<Object> CharAt(Handle<String> str, uint32_t index);
-
- static Handle<Object> GetFunctionFor();
- static Handle<JSFunction> InstantiateFunction(
- Handle<FunctionTemplateInfo> data, bool* exc);
- static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data,
- bool* exc);
- static void ConfigureInstance(Handle<Object> instance,
- Handle<Object> data,
- bool* exc);
- static Handle<String> GetStackTraceLine(Handle<Object> recv,
- Handle<JSFunction> fun,
- Handle<Object> pos,
- Handle<Object> is_global);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static Object* DebugBreakHelper();
- static void ProcessDebugMessages(bool debug_command_only);
-#endif
-
- // If the stack guard is triggered, but it is not an actual
- // stack overflow, then handle the interruption accordingly.
- MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt(
- Isolate* isolate);
-
- // Get a function delegate (or undefined) for the given non-function
- // object. Used for support calling objects as functions.
- static Handle<Object> GetFunctionDelegate(Handle<Object> object);
- static Handle<Object> TryGetFunctionDelegate(Handle<Object> object,
- bool* has_pending_exception);
-
- // Get a function delegate (or undefined) for the given non-function
- // object. Used for support calling objects as constructors.
- static Handle<Object> GetConstructorDelegate(Handle<Object> object);
- static Handle<Object> TryGetConstructorDelegate(Handle<Object> object,
- bool* has_pending_exception);
-};
-
-
-class ExecutionAccess;
-
-
-// StackGuard contains the handling of the limits that are used to limit the
-// number of nested invocations of JavaScript and the stack size used in each
-// invocation.
-class StackGuard {
- public:
- // Pass the address beyond which the stack should not grow. The stack
- // is assumed to grow downwards.
- void SetStackLimit(uintptr_t limit);
-
- // Threading support.
- char* ArchiveStackGuard(char* to);
- char* RestoreStackGuard(char* from);
- static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
- void FreeThreadResources();
- // Sets up the default stack guard for this thread if it has not
- // already been set up.
- void InitThread(const ExecutionAccess& lock);
- // Clears the stack guard for this thread so it does not look as if
- // it has been set up.
- void ClearThread(const ExecutionAccess& lock);
-
- bool IsStackOverflow();
- bool IsPreempted();
- void Preempt();
- bool IsInterrupted();
- void Interrupt();
- bool IsTerminateExecution();
- void TerminateExecution();
- bool IsCodeReadyEvent();
- void RequestCodeReadyEvent();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- bool IsDebugBreak();
- void DebugBreak();
- bool IsDebugCommand();
- void DebugCommand();
-#endif
- bool IsGCRequest();
- void RequestGC();
- void Continue(InterruptFlag after_what);
-
- // This provides an asynchronous read of the stack limits for the current
- // thread. There are no locks protecting this, but it is assumed that you
- // have the global V8 lock if you are using multiple V8 threads.
- uintptr_t climit() {
- return thread_local_.climit_;
- }
- uintptr_t real_climit() {
- return thread_local_.real_climit_;
- }
- uintptr_t jslimit() {
- return thread_local_.jslimit_;
- }
- uintptr_t real_jslimit() {
- return thread_local_.real_jslimit_;
- }
- Address address_of_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.jslimit_);
- }
- Address address_of_real_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
- }
- bool ShouldPostponeInterrupts();
-
- private:
- StackGuard();
-
- // You should hold the ExecutionAccess lock when calling this method.
- bool has_pending_interrupts(const ExecutionAccess& lock) {
- // Sanity check: We shouldn't be asking about pending interrupts
- // unless we're not postponing them anymore.
- ASSERT(!should_postpone_interrupts(lock));
- return thread_local_.interrupt_flags_ != 0;
- }
-
- // You should hold the ExecutionAccess lock when calling this method.
- bool should_postpone_interrupts(const ExecutionAccess& lock) {
- return thread_local_.postpone_interrupts_nesting_ > 0;
- }
-
- // You should hold the ExecutionAccess lock when calling this method.
- inline void set_interrupt_limits(const ExecutionAccess& lock);
-
- // Reset limits to actual values. For example after handling interrupt.
- // You should hold the ExecutionAccess lock when calling this method.
- inline void reset_limits(const ExecutionAccess& lock);
-
- // Enable or disable interrupts.
- void EnableInterrupts();
- void DisableInterrupts();
-
-#ifdef V8_TARGET_ARCH_X64
- static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
- static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
-#else
- static const uintptr_t kInterruptLimit = 0xfffffffe;
- static const uintptr_t kIllegalLimit = 0xfffffff8;
-#endif
-
- class ThreadLocal {
- public:
- ThreadLocal() { Clear(); }
- // You should hold the ExecutionAccess lock when you call Initialize or
- // Clear.
- void Clear();
-
- // Returns true if the heap's stack limits should be set, false if not.
- bool Initialize(Isolate* isolate);
-
- // The stack limit is split into a JavaScript and a C++ stack limit. These
- // two are the same except when running on a simulator where the C++ and
- // JavaScript stacks are separate. Each of the two stack limits have two
- // values. The one eith the real_ prefix is the actual stack limit
- // set for the VM. The one without the real_ prefix has the same value as
- // the actual stack limit except when there is an interruption (e.g. debug
- // break or preemption) in which case it is lowered to make stack checks
- // fail. Both the generated code and the runtime system check against the
- // one without the real_ prefix.
- uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
- uintptr_t jslimit_;
- uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
- uintptr_t climit_;
-
- int nesting_;
- int postpone_interrupts_nesting_;
- int interrupt_flags_;
- };
-
- // TODO(isolates): Technically this could be calculated directly from a
- // pointer to StackGuard.
- Isolate* isolate_;
- ThreadLocal thread_local_;
-
- friend class Isolate;
- friend class StackLimitCheck;
- friend class PostponeInterruptsScope;
-
- DISALLOW_COPY_AND_ASSIGN(StackGuard);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_EXECUTION_H_
diff --git a/src/3rdparty/v8/src/extensions/externalize-string-extension.cc b/src/3rdparty/v8/src/extensions/externalize-string-extension.cc
deleted file mode 100644
index 76d2030..0000000
--- a/src/3rdparty/v8/src/extensions/externalize-string-extension.cc
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "externalize-string-extension.h"
-
-namespace v8 {
-namespace internal {
-
-template <typename Char, typename Base>
-class SimpleStringResource : public Base {
- public:
- // Takes ownership of |data|.
- SimpleStringResource(Char* data, size_t length)
- : data_(data),
- length_(length) {}
-
- virtual ~SimpleStringResource() { delete[] data_; }
-
- virtual const Char* data() const { return data_; }
-
- virtual size_t length() const { return length_; }
-
- private:
- Char* const data_;
- const size_t length_;
-};
-
-
-typedef SimpleStringResource<char, v8::String::ExternalAsciiStringResource>
- SimpleAsciiStringResource;
-typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
- SimpleTwoByteStringResource;
-
-
-const char* const ExternalizeStringExtension::kSource =
- "native function externalizeString();"
- "native function isAsciiString();";
-
-
-v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
- v8::Handle<v8::String> str) {
- if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) {
- return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
- } else {
- ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0);
- return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
- }
-}
-
-
-v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
- const v8::Arguments& args) {
- if (args.Length() < 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::String::New(
- "First parameter to externalizeString() must be a string."));
- }
- bool force_two_byte = false;
- if (args.Length() >= 2) {
- if (args[1]->IsBoolean()) {
- force_two_byte = args[1]->BooleanValue();
- } else {
- return v8::ThrowException(v8::String::New(
- "Second parameter to externalizeString() must be a boolean."));
- }
- }
- bool result = false;
- Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
- if (string->IsExternalString()) {
- return v8::ThrowException(v8::String::New(
- "externalizeString() can't externalize twice."));
- }
- if (string->IsOneByteRepresentation() && !force_two_byte) {
- uint8_t* data = new uint8_t[string->length()];
- String::WriteToFlat(*string, data, 0, string->length());
- SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
- reinterpret_cast<char*>(data), string->length());
- result = string->MakeExternal(resource);
- if (result && !string->IsInternalizedString()) {
- HEAP->external_string_table()->AddString(*string);
- }
- if (!result) delete resource;
- } else {
- uc16* data = new uc16[string->length()];
- String::WriteToFlat(*string, data, 0, string->length());
- SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
- data, string->length());
- result = string->MakeExternal(resource);
- if (result && !string->IsInternalizedString()) {
- HEAP->external_string_table()->AddString(*string);
- }
- if (!result) delete resource;
- }
- if (!result) {
- return v8::ThrowException(v8::String::New("externalizeString() failed."));
- }
- return v8::Undefined();
-}
-
-
-v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
- const v8::Arguments& args) {
- if (args.Length() != 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::String::New(
- "isAsciiString() requires a single string argument."));
- }
- return
- Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation() ?
- v8::True() : v8::False();
-}
-
-
-void ExternalizeStringExtension::Register() {
- static ExternalizeStringExtension externalize_extension;
- static v8::DeclareExtension declaration(&externalize_extension);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/extensions/externalize-string-extension.h b/src/3rdparty/v8/src/extensions/externalize-string-extension.h
deleted file mode 100644
index b97b496..0000000
--- a/src/3rdparty/v8/src/extensions/externalize-string-extension.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
-#define V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-class ExternalizeStringExtension : public v8::Extension {
- public:
- ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> Externalize(const v8::Arguments& args);
- static v8::Handle<v8::Value> IsAscii(const v8::Arguments& args);
- static void Register();
- private:
- static const char* const kSource;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
diff --git a/src/3rdparty/v8/src/extensions/gc-extension.cc b/src/3rdparty/v8/src/extensions/gc-extension.cc
deleted file mode 100644
index 813b921..0000000
--- a/src/3rdparty/v8/src/extensions/gc-extension.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "gc-extension.h"
-
-namespace v8 {
-namespace internal {
-
-const char* const GCExtension::kSource = "native function gc();";
-
-
-v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
- v8::Handle<v8::String> str) {
- return v8::FunctionTemplate::New(GCExtension::GC);
-}
-
-
-v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
- if (args[0]->BooleanValue()) {
- HEAP->CollectGarbage(NEW_SPACE, "gc extension");
- } else {
- HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
- }
- return v8::Undefined();
-}
-
-
-void GCExtension::Register() {
- static GCExtension gc_extension;
- static v8::DeclareExtension declaration(&gc_extension);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/extensions/gc-extension.h b/src/3rdparty/v8/src/extensions/gc-extension.h
deleted file mode 100644
index 06ea4ed..0000000
--- a/src/3rdparty/v8/src/extensions/gc-extension.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_GC_EXTENSION_H_
-#define V8_EXTENSIONS_GC_EXTENSION_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-class GCExtension : public v8::Extension {
- public:
- GCExtension() : v8::Extension("v8/gc", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> GC(const v8::Arguments& args);
- static void Register();
- private:
- static const char* const kSource;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_GC_EXTENSION_H_
diff --git a/src/3rdparty/v8/src/extensions/statistics-extension.cc b/src/3rdparty/v8/src/extensions/statistics-extension.cc
deleted file mode 100644
index 7ae090c..0000000
--- a/src/3rdparty/v8/src/extensions/statistics-extension.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "statistics-extension.h"
-
-namespace v8 {
-namespace internal {
-
-const char* const StatisticsExtension::kSource =
- "native function getV8Statistics();";
-
-
-v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
- v8::Handle<v8::String> str) {
- ASSERT(strcmp(*v8::String::AsciiValue(str), "getV8Statistics") == 0);
- return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
-}
-
-
-static void AddCounter(v8::Local<v8::Object> object,
- StatsCounter* counter,
- const char* name) {
- if (counter->Enabled()) {
- object->Set(v8::String::New(name),
- v8::Number::New(*counter->GetInternalPointer()));
- }
-}
-
-static void AddNumber(v8::Local<v8::Object> object,
- intptr_t value,
- const char* name) {
- object->Set(v8::String::New(name),
- v8::Number::New(static_cast<double>(value)));
-}
-
-
-v8::Handle<v8::Value> StatisticsExtension::GetCounters(
- const v8::Arguments& args) {
- Isolate* isolate = Isolate::Current();
- Heap* heap = isolate->heap();
-
- if (args.Length() > 0) { // GC if first argument evaluates to true.
- if (args[0]->IsBoolean() && args[0]->ToBoolean()->Value()) {
- heap->CollectAllGarbage(Heap::kNoGCFlags, "counters extension");
- }
- }
-
- Counters* counters = isolate->counters();
- v8::Local<v8::Object> result = v8::Object::New();
-
-#define ADD_COUNTER(name, caption) \
- AddCounter(result, counters->name(), #name);
-
- STATS_COUNTER_LIST_1(ADD_COUNTER)
- STATS_COUNTER_LIST_2(ADD_COUNTER)
-#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(result, counters->count_of_##name(), "count_of_" #name); \
- AddCounter(result, counters->size_of_##name(), "size_of_" #name);
-
- INSTANCE_TYPE_LIST(ADD_COUNTER)
-#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(result, counters->count_of_CODE_TYPE_##name(), \
- "count_of_CODE_TYPE_" #name); \
- AddCounter(result, counters->size_of_CODE_TYPE_##name(), \
- "size_of_CODE_TYPE_" #name);
-
- CODE_KIND_LIST(ADD_COUNTER)
-#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(result, counters->count_of_FIXED_ARRAY_##name(), \
- "count_of_FIXED_ARRAY_" #name); \
- AddCounter(result, counters->size_of_FIXED_ARRAY_##name(), \
- "size_of_FIXED_ARRAY_" #name);
-
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
-#undef ADD_COUNTER
-
- AddNumber(result, isolate->memory_allocator()->Size(),
- "total_committed_bytes");
- AddNumber(result, heap->new_space()->Size(),
- "new_space_live_bytes");
- AddNumber(result, heap->new_space()->Available(),
- "new_space_available_bytes");
- AddNumber(result, heap->new_space()->CommittedMemory(),
- "new_space_commited_bytes");
- AddNumber(result, heap->old_pointer_space()->Size(),
- "old_pointer_space_live_bytes");
- AddNumber(result, heap->old_pointer_space()->Available(),
- "old_pointer_space_available_bytes");
- AddNumber(result, heap->old_pointer_space()->CommittedMemory(),
- "old_pointer_space_commited_bytes");
- AddNumber(result, heap->old_data_space()->Size(),
- "old_data_space_live_bytes");
- AddNumber(result, heap->old_data_space()->Available(),
- "old_data_space_available_bytes");
- AddNumber(result, heap->old_data_space()->CommittedMemory(),
- "old_data_space_commited_bytes");
- AddNumber(result, heap->code_space()->Size(),
- "code_space_live_bytes");
- AddNumber(result, heap->code_space()->Available(),
- "code_space_available_bytes");
- AddNumber(result, heap->code_space()->CommittedMemory(),
- "code_space_commited_bytes");
- AddNumber(result, heap->cell_space()->Size(),
- "cell_space_live_bytes");
- AddNumber(result, heap->cell_space()->Available(),
- "cell_space_available_bytes");
- AddNumber(result, heap->cell_space()->CommittedMemory(),
- "cell_space_commited_bytes");
- AddNumber(result, heap->lo_space()->Size(),
- "lo_space_live_bytes");
- AddNumber(result, heap->lo_space()->Available(),
- "lo_space_available_bytes");
- AddNumber(result, heap->lo_space()->CommittedMemory(),
- "lo_space_commited_bytes");
- AddNumber(result, heap->amount_of_external_allocated_memory(),
- "amount_of_external_allocated_memory");
- return result;
-}
-
-
-void StatisticsExtension::Register() {
- static StatisticsExtension statistics_extension;
- static v8::DeclareExtension declaration(&statistics_extension);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/extensions/statistics-extension.h b/src/3rdparty/v8/src/extensions/statistics-extension.h
deleted file mode 100644
index 433c4cf..0000000
--- a/src/3rdparty/v8/src/extensions/statistics-extension.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
-#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-class StatisticsExtension : public v8::Extension {
- public:
- StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> GetCounters(const v8::Arguments& args);
- static void Register();
- private:
- static const char* const kSource;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_STATISTICS_EXTENSION_H_
diff --git a/src/3rdparty/v8/src/factory.cc b/src/3rdparty/v8/src/factory.cc
deleted file mode 100644
index 4c6af40..0000000
--- a/src/3rdparty/v8/src/factory.cc
+++ /dev/null
@@ -1,1496 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "debug.h"
-#include "execution.h"
-#include "factory.h"
-#include "macro-assembler.h"
-#include "objects.h"
-#include "objects-visiting.h"
-#include "platform.h"
-#include "scopeinfo.h"
-
-namespace v8 {
-namespace internal {
-
-
-Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
- ASSERT(0 <= size);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFixedArray(size, pretenure),
- FixedArray);
-}
-
-
-Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
- PretenureFlag pretenure) {
- ASSERT(0 <= size);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFixedArrayWithHoles(size, pretenure),
- FixedArray);
-}
-
-
-Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
- PretenureFlag pretenure) {
- ASSERT(0 <= size);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
- FixedDoubleArray);
-}
-
-
-Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- StringDictionary::Allocate(at_least_space_for),
- StringDictionary);
-}
-
-
-Handle<SeededNumberDictionary> Factory::NewSeededNumberDictionary(
- int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- SeededNumberDictionary::Allocate(at_least_space_for),
- SeededNumberDictionary);
-}
-
-
-Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary(
- int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- UnseededNumberDictionary::Allocate(at_least_space_for),
- UnseededNumberDictionary);
-}
-
-
-Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- ObjectHashSet::Allocate(at_least_space_for),
- ObjectHashSet);
-}
-
-
-Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- ObjectHashTable::Allocate(at_least_space_for),
- ObjectHashTable);
-}
-
-
-Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
- int slack) {
- ASSERT(0 <= number_of_descriptors);
- CALL_HEAP_FUNCTION(isolate(),
- DescriptorArray::Allocate(number_of_descriptors, slack),
- DescriptorArray);
-}
-
-
-Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
- int deopt_entry_count,
- PretenureFlag pretenure) {
- ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationInputData::Allocate(deopt_entry_count,
- pretenure),
- DeoptimizationInputData);
-}
-
-
-Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
- int deopt_entry_count,
- PretenureFlag pretenure) {
- ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationOutputData::Allocate(deopt_entry_count,
- pretenure),
- DeoptimizationOutputData);
-}
-
-
-Handle<AccessorPair> Factory::NewAccessorPair() {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateAccessorPair(),
- AccessorPair);
-}
-
-
-Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateTypeFeedbackInfo(),
- TypeFeedbackInfo);
-}
-
-
-// Internalized strings are created in the old generation (data space).
-Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeUtf8String(string),
- String);
-}
-
-// Internalized strings are created in the old generation (data space).
-Handle<String> Factory::InternalizeString(Handle<String> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeString(*string),
- String);
-}
-
-Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeOneByteString(string),
- String);
-}
-
-
-Handle<String> Factory::InternalizeOneByteString(
- Handle<SeqOneByteString> string, int from, int length) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeOneByteString(
- string, from, length),
- String);
-}
-
-
-Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeTwoByteString(string),
- String);
-}
-
-
-Handle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateStringFromOneByte(string, pretenure),
- String);
-}
-
-Handle<String> Factory::NewStringFromUtf8(Vector<const char> string,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateStringFromUtf8(string, pretenure),
- String);
-}
-
-
-Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateStringFromTwoByte(string, pretenure),
- String);
-}
-
-
-Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateRawOneByteString(length, pretenure),
- SeqOneByteString);
-}
-
-
-Handle<SeqTwoByteString> Factory::NewRawTwoByteString(int length,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
- SeqTwoByteString);
-}
-
-
-Handle<String> Factory::NewConsString(Handle<String> first,
- Handle<String> second) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateConsString(*first, *second),
- String);
-}
-
-
-Handle<String> Factory::NewSubString(Handle<String> str,
- int begin,
- int end) {
- CALL_HEAP_FUNCTION(isolate(),
- str->SubString(begin, end),
- String);
-}
-
-
-Handle<String> Factory::NewProperSubString(Handle<String> str,
- int begin,
- int end) {
- ASSERT(begin > 0 || end < str->length());
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateSubString(*str, begin, end),
- String);
-}
-
-
-Handle<String> Factory::NewExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExternalStringFromAscii(resource),
- String);
-}
-
-
-Handle<String> Factory::NewExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
- String);
-}
-
-
-Handle<Symbol> Factory::NewSymbol() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateSymbol(),
- Symbol);
-}
-
-
-Handle<Context> Factory::NewNativeContext() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateNativeContext(),
- Context);
-}
-
-
-Handle<Context> Factory::NewGlobalContext(Handle<JSFunction> function,
- Handle<ScopeInfo> scope_info) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateGlobalContext(*function, *scope_info),
- Context);
-}
-
-
-Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateModuleContext(*scope_info),
- Context);
-}
-
-
-Handle<Context> Factory::NewFunctionContext(int length,
- Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunctionContext(length, *function),
- Context);
-}
-
-
-Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
- Handle<Context> previous,
- Handle<String> name,
- Handle<Object> thrown_object) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateCatchContext(*function,
- *previous,
- *name,
- *thrown_object),
- Context);
-}
-
-
-Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
- Handle<Context> previous,
- Handle<JSObject> extension) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateWithContext(*function, *previous, *extension),
- Context);
-}
-
-
-Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
- Handle<Context> previous,
- Handle<ScopeInfo> scope_info) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateBlockContext(*function,
- *previous,
- *scope_info),
- Context);
-}
-
-
-Handle<Struct> Factory::NewStruct(InstanceType type) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateStruct(type),
- Struct);
-}
-
-
-Handle<DeclaredAccessorInfo> Factory::NewDeclaredAccessorInfo() {
- Handle<DeclaredAccessorInfo> info =
- Handle<DeclaredAccessorInfo>::cast(
- NewStruct(DECLARED_ACCESSOR_INFO_TYPE));
- info->set_flag(0); // Must clear the flag, it was initialized as undefined.
- return info;
-}
-
-
-Handle<ExecutableAccessorInfo> Factory::NewExecutableAccessorInfo() {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(
- NewStruct(EXECUTABLE_ACCESSOR_INFO_TYPE));
- info->set_flag(0); // Must clear the flag, it was initialized as undefined.
- return info;
-}
-
-
-Handle<Script> Factory::NewScript(Handle<String> source) {
- // Generate id for this script.
- int id;
- Heap* heap = isolate()->heap();
- if (heap->last_script_id()->IsUndefined()) {
- // Script ids start from one.
- id = 1;
- } else {
- // Increment id, wrap when positive smi is exhausted.
- id = Smi::cast(heap->last_script_id())->value();
- id++;
- if (!Smi::IsValid(id)) {
- id = 0;
- }
- }
- heap->SetLastScriptId(Smi::FromInt(id));
-
- // Create and initialize script object.
- Handle<Foreign> wrapper = NewForeign(0, TENURED);
- Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
- script->set_source(*source);
- script->set_name(heap->undefined_value());
- script->set_id(heap->last_script_id());
- script->set_line_offset(Smi::FromInt(0));
- script->set_column_offset(Smi::FromInt(0));
- script->set_data(heap->undefined_value());
- script->set_context_data(heap->undefined_value());
- script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
- script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
- script->set_compilation_state(
- Smi::FromInt(Script::COMPILATION_STATE_INITIAL));
- script->set_wrapper(*wrapper);
- script->set_line_ends(heap->undefined_value());
- script->set_eval_from_shared(heap->undefined_value());
- script->set_eval_from_instructions_offset(Smi::FromInt(0));
-
- return script;
-}
-
-
-Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateForeign(addr, pretenure),
- Foreign);
-}
-
-
-Handle<Foreign> Factory::NewForeign(const AccessorDescriptor* desc) {
- return NewForeign((Address) desc, TENURED);
-}
-
-
-Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
- ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateByteArray(length, pretenure),
- ByteArray);
-}
-
-
-Handle<ExternalArray> Factory::NewExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure) {
- ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExternalArray(length,
- array_type,
- external_pointer,
- pretenure),
- ExternalArray);
-}
-
-
-Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
- JSGlobalPropertyCell);
-}
-
-
-Handle<Map> Factory::NewMap(InstanceType type,
- int instance_size,
- ElementsKind elements_kind) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
- Map);
-}
-
-
-Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunctionPrototype(*function),
- JSObject);
-}
-
-
-Handle<Map> Factory::CopyWithPreallocatedFieldDescriptors(Handle<Map> src) {
- CALL_HEAP_FUNCTION(
- isolate(), src->CopyWithPreallocatedFieldDescriptors(), Map);
-}
-
-
-Handle<Map> Factory::CopyMap(Handle<Map> src,
- int extra_inobject_properties) {
- Handle<Map> copy = CopyWithPreallocatedFieldDescriptors(src);
- // Check that we do not overflow the instance size when adding the
- // extra inobject properties.
- int instance_size_delta = extra_inobject_properties * kPointerSize;
- int max_instance_size_delta =
- JSObject::kMaxInstanceSize - copy->instance_size();
- if (instance_size_delta > max_instance_size_delta) {
- // If the instance size overflows, we allocate as many properties
- // as we can as inobject properties.
- instance_size_delta = max_instance_size_delta;
- extra_inobject_properties = max_instance_size_delta >> kPointerSizeLog2;
- }
- // Adjust the map with the extra inobject properties.
- int inobject_properties =
- copy->inobject_properties() + extra_inobject_properties;
- copy->set_inobject_properties(inobject_properties);
- copy->set_unused_property_fields(inobject_properties);
- copy->set_instance_size(copy->instance_size() + instance_size_delta);
- copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
- return copy;
-}
-
-
-Handle<Map> Factory::CopyMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->Copy(), Map);
-}
-
-
-Handle<Map> Factory::GetElementsTransitionMap(
- Handle<JSObject> src,
- ElementsKind elements_kind) {
- Isolate* i = isolate();
- CALL_HEAP_FUNCTION(i,
- src->GetElementsTransitionMap(i, elements_kind),
- Map);
-}
-
-
-Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
- CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray);
-}
-
-
-Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
- int new_length) {
- CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray);
-}
-
-
-Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
- Handle<FixedDoubleArray> array) {
- CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
-}
-
-
-Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Map> function_map,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunction(*function_map,
- *function_info,
- isolate()->heap()->the_hole_value(),
- pretenure),
- JSFunction);
-}
-
-
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Context> context,
- PretenureFlag pretenure) {
- Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
- function_info,
- function_info->is_classic_mode()
- ? isolate()->function_map()
- : isolate()->strict_mode_function_map(),
- pretenure);
-
- if (function_info->ic_age() != isolate()->heap()->global_ic_age()) {
- function_info->ResetForNewContext(isolate()->heap()->global_ic_age());
- }
-
- result->set_context(*context);
-
- int index = function_info->SearchOptimizedCodeMap(context->native_context());
- if (!function_info->bound() && index < 0) {
- int number_of_literals = function_info->num_literals();
- Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
- if (number_of_literals > 0) {
- // Store the native context in the literals array prefix. This
- // context will be used when creating object, regexp and array
- // literals in this function.
- literals->set(JSFunction::kLiteralNativeContextIndex,
- context->native_context());
- }
- result->set_literals(*literals);
- }
-
- if (index > 0) {
- // Caching of optimized code enabled and optimized code found.
- function_info->InstallFromOptimizedCodeMap(*result, index);
- return result;
- }
-
- if (V8::UseCrankshaft() &&
- FLAG_always_opt &&
- result->is_compiled() &&
- !function_info->is_toplevel() &&
- function_info->allows_lazy_compilation() &&
- !function_info->optimization_disabled()) {
- result->MarkForLazyRecompilation();
- }
- return result;
-}
-
-
-Handle<Object> Factory::NewNumber(double value,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->NumberFromDouble(value, pretenure), Object);
-}
-
-
-Handle<Object> Factory::NewNumberFromInt(int32_t value,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->NumberFromInt32(value, pretenure), Object);
-}
-
-
-Handle<Object> Factory::NewNumberFromUint(uint32_t value,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->NumberFromUint32(value, pretenure), Object);
-}
-
-
-Handle<JSObject> Factory::NewNeanderObject() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(
- isolate()->heap()->neander_map()),
- JSObject);
-}
-
-
-Handle<Object> Factory::NewTypeError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeTypeError", type, args);
-}
-
-
-Handle<Object> Factory::NewTypeError(Handle<String> message) {
- return NewError("$TypeError", message);
-}
-
-
-Handle<Object> Factory::NewRangeError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeRangeError", type, args);
-}
-
-
-Handle<Object> Factory::NewRangeError(Handle<String> message) {
- return NewError("$RangeError", message);
-}
-
-
-Handle<Object> Factory::NewSyntaxError(const char* type, Handle<JSArray> args) {
- return NewError("MakeSyntaxError", type, args);
-}
-
-
-Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
- return NewError("$SyntaxError", message);
-}
-
-
-Handle<Object> Factory::NewReferenceError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeReferenceError", type, args);
-}
-
-
-Handle<Object> Factory::NewReferenceError(Handle<String> message) {
- return NewError("$ReferenceError", message);
-}
-
-
-Handle<Object> Factory::NewError(const char* maker, const char* type,
- Vector< Handle<Object> > args) {
- v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom.
- Handle<FixedArray> array = NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- array->set(i, *args[i]);
- }
- Handle<JSArray> object = NewJSArrayWithElements(array);
- Handle<Object> result = NewError(maker, type, object);
- return result.EscapeFrom(&scope);
-}
-
-
-Handle<Object> Factory::NewEvalError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeEvalError", type, args);
-}
-
-
-Handle<Object> Factory::NewError(const char* type,
- Vector< Handle<Object> > args) {
- return NewError("MakeError", type, args);
-}
-
-
-Handle<String> Factory::EmergencyNewError(const char* type,
- Handle<JSArray> args) {
- const int kBufferSize = 1000;
- char buffer[kBufferSize];
- size_t space = kBufferSize;
- char* p = &buffer[0];
-
- Vector<char> v(buffer, kBufferSize);
- OS::StrNCpy(v, type, space);
- space -= Min(space, strlen(type));
- p = &buffer[kBufferSize] - space;
-
- for (unsigned i = 0; i < ARRAY_SIZE(args); i++) {
- if (space > 0) {
- *p++ = ' ';
- space--;
- if (space > 0) {
- MaybeObject* maybe_arg = args->GetElement(i);
- Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
- const char* arg = *arg_str->ToCString();
- Vector<char> v2(p, static_cast<int>(space));
- OS::StrNCpy(v2, arg, space);
- space -= Min(space, strlen(arg));
- p = &buffer[kBufferSize] - space;
- }
- }
- }
- if (space > 0) {
- *p = '\0';
- } else {
- buffer[kBufferSize - 1] = '\0';
- }
- Handle<String> error_string = NewStringFromUtf8(CStrVector(buffer), TENURED);
- return error_string;
-}
-
-
-Handle<Object> Factory::NewError(const char* maker,
- const char* type,
- Handle<JSArray> args) {
- Handle<String> make_str = InternalizeUtf8String(maker);
- Handle<Object> fun_obj(
- isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str),
- isolate());
- // If the builtins haven't been properly configured yet this error
- // constructor may not have been defined. Bail out.
- if (!fun_obj->IsJSFunction()) {
- return EmergencyNewError(type, args);
- }
- Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
- Handle<Object> type_obj = InternalizeUtf8String(type);
- Handle<Object> argv[] = { type_obj, args };
-
- // Invoke the JavaScript factory method. If an exception is thrown while
- // running the factory method, use the exception as the result.
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
- return result;
-}
-
-
-Handle<Object> Factory::NewError(Handle<String> message) {
- return NewError("$Error", message);
-}
-
-
-Handle<Object> Factory::NewError(const char* constructor,
- Handle<String> message) {
- Handle<String> constr = InternalizeUtf8String(constructor);
- Handle<JSFunction> fun = Handle<JSFunction>(
- JSFunction::cast(isolate()->js_builtins_object()->
- GetPropertyNoExceptionThrown(*constr)));
- Handle<Object> argv[] = { message };
-
- // Invoke the JavaScript factory method. If an exception is thrown while
- // running the factory method, use the exception as the result.
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
- return result;
-}
-
-
-Handle<JSFunction> Factory::NewFunction(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<Code> code,
- bool force_initial_map) {
- // Allocate the function
- Handle<JSFunction> function = NewFunction(name, the_hole_value());
-
- // Set up the code pointer in both the shared function info and in
- // the function itself.
- function->shared()->set_code(*code);
- function->set_code(*code);
-
- if (force_initial_map ||
- type != JS_OBJECT_TYPE ||
- instance_size != JSObject::kHeaderSize) {
- Handle<Map> initial_map = NewMap(type, instance_size);
- Handle<JSObject> prototype = NewFunctionPrototype(function);
- initial_map->set_prototype(*prototype);
- function->set_initial_map(*initial_map);
- initial_map->set_constructor(*function);
- } else {
- ASSERT(!function->has_initial_map());
- ASSERT(!function->has_prototype());
- }
-
- return function;
-}
-
-
-Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Handle<Code> code,
- bool force_initial_map) {
- // Allocate the function.
- Handle<JSFunction> function = NewFunction(name, prototype);
-
- // Set up the code pointer in both the shared function info and in
- // the function itself.
- function->shared()->set_code(*code);
- function->set_code(*code);
-
- if (force_initial_map ||
- type != JS_OBJECT_TYPE ||
- instance_size != JSObject::kHeaderSize) {
- Handle<Map> initial_map = NewMap(type,
- instance_size,
- GetInitialFastElementsKind());
- function->set_initial_map(*initial_map);
- initial_map->set_constructor(*function);
- }
-
- // Set function.prototype and give the prototype a constructor
- // property that refers to the function.
- SetPrototypeProperty(function, prototype);
- // Currently safe because it is only invoked from Genesis.
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- prototype, constructor_string(),
- function, DONT_ENUM));
- return function;
-}
-
-
-Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code) {
- Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
- CLASSIC_MODE);
- function->shared()->set_code(*code);
- function->set_code(*code);
- ASSERT(!function->has_initial_map());
- ASSERT(!function->has_prototype());
- return function;
-}
-
-
-Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateScopeInfo(length),
- ScopeInfo);
-}
-
-
-Handle<JSObject> Factory::NewExternal(void* value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateExternal(value),
- JSObject);
-}
-
-
-Handle<Code> Factory::NewCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_ref,
- bool immovable) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CreateCode(
- desc, flags, self_ref, immovable),
- Code);
-}
-
-
-Handle<Code> Factory::CopyCode(Handle<Code> code) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyCode(*code),
- Code);
-}
-
-
-Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyCode(*code, reloc_info),
- Code);
-}
-
-
-Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeString(*value), String);
-}
-
-
-Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObject(*constructor, pretenure), JSObject);
-}
-
-
-Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
- Handle<ScopeInfo> scope_info) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSModule(*context, *scope_info), JSModule);
-}
-
-
-Handle<GlobalObject> Factory::NewGlobalObject(
- Handle<JSFunction> constructor) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateGlobalObject(*constructor),
- GlobalObject);
-}
-
-
-
-Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure),
- JSObject);
-}
-
-
-Handle<JSArray> Factory::NewJSArray(int capacity,
- ElementsKind elements_kind,
- PretenureFlag pretenure) {
- if (capacity != 0) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- }
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateJSArrayAndStorage(
- elements_kind,
- 0,
- capacity,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
- pretenure),
- JSArray);
-}
-
-
-Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
- ElementsKind elements_kind,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSArrayWithElements(*elements,
- elements_kind,
- elements->length(),
- pretenure),
- JSArray);
-}
-
-
-void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
- int capacity,
- int length) {
- ElementsAccessor* accessor = array->GetElementsAccessor();
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- accessor->SetCapacityAndLength(*array, capacity, length));
-}
-
-
-void Factory::SetContent(Handle<JSArray> array,
- Handle<FixedArrayBase> elements) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->SetContent(*elements));
-}
-
-
-void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->EnsureCanContainHeapObjectElements());
-}
-
-
-void Factory::EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->EnsureCanContainElements(*elements, length, mode));
-}
-
-
-Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
- Handle<Object> prototype) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSProxy(*handler, *prototype),
- JSProxy);
-}
-
-
-void Factory::BecomeJSObject(Handle<JSReceiver> object) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- isolate()->heap()->ReinitializeJSReceiver(
- *object, JS_OBJECT_TYPE, JSObject::kHeaderSize));
-}
-
-
-void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- isolate()->heap()->ReinitializeJSReceiver(
- *object, JS_FUNCTION_TYPE, JSFunction::kSize));
-}
-
-
-void Factory::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- object->SetIdentityHash(hash, ALLOW_CREATION));
-}
-
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name,
- int number_of_literals,
- Handle<Code> code,
- Handle<ScopeInfo> scope_info) {
- Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
- shared->set_code(*code);
- shared->set_scope_info(*scope_info);
- int literals_array_size = number_of_literals;
- // If the function contains object, regexp or array literals,
- // allocate extra space for a literals array prefix containing the
- // context.
- if (number_of_literals > 0) {
- literals_array_size += JSFunction::kLiteralsPrefixSize;
- }
- shared->set_num_literals(literals_array_size);
- return shared;
-}
-
-
-Handle<JSMessageObject> Factory::NewJSMessageObject(
- Handle<String> type,
- Handle<JSArray> arguments,
- int start_position,
- int end_position,
- Handle<Object> script,
- Handle<Object> stack_trace,
- Handle<Object> stack_frames) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateJSMessageObject(*type,
- *arguments,
- start_position,
- end_position,
- *script,
- *stack_trace,
- *stack_frames),
- JSMessageObject);
-}
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateSharedFunctionInfo(*name),
- SharedFunctionInfo);
-}
-
-
-Handle<String> Factory::NumberToString(Handle<Object> number) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->NumberToString(*number), String);
-}
-
-
-Handle<String> Factory::Uint32ToString(uint32_t value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->Uint32ToString(value), String);
-}
-
-
-Handle<SeededNumberDictionary> Factory::DictionaryAtNumberPut(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(isolate(),
- dictionary->AtNumberPut(key, *value),
- SeededNumberDictionary);
-}
-
-
-Handle<UnseededNumberDictionary> Factory::DictionaryAtNumberPut(
- Handle<UnseededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(isolate(),
- dictionary->AtNumberPut(key, *value),
- UnseededNumberDictionary);
-}
-
-
-Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
- Handle<Object> prototype) {
- Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunction(*isolate()->function_map(),
- *function_share,
- *prototype),
- JSFunction);
-}
-
-
-Handle<JSFunction> Factory::NewFunction(Handle<String> name,
- Handle<Object> prototype) {
- Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
- fun->set_context(isolate()->context()->native_context());
- return fun;
-}
-
-
-Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
- Handle<String> name,
- LanguageMode language_mode) {
- Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- Handle<Map> map = (language_mode == CLASSIC_MODE)
- ? isolate()->function_without_prototype_map()
- : isolate()->strict_mode_function_without_prototype_map();
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFunction(
- *map,
- *function_share,
- *the_hole_value()),
- JSFunction);
-}
-
-
-Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
- Handle<String> name,
- LanguageMode language_mode) {
- Handle<JSFunction> fun =
- NewFunctionWithoutPrototypeHelper(name, language_mode);
- fun->set_context(isolate()->context()->native_context());
- return fun;
-}
-
-
-Handle<Object> Factory::ToObject(Handle<Object> object) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(), Object);
-}
-
-
-Handle<Object> Factory::ToObject(Handle<Object> object,
- Handle<Context> native_context) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(*native_context), Object);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
- // Get the original code of the function.
- Handle<Code> code(shared->code());
-
- // Create a copy of the code before allocating the debug info object to avoid
- // allocation while setting up the debug info object.
- Handle<Code> original_code(*Factory::CopyCode(code));
-
- // Allocate initial fixed array for active break points before allocating the
- // debug info object to avoid allocation while setting up the debug info
- // object.
- Handle<FixedArray> break_points(
- NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction));
-
- // Create and set up the debug info object. Debug info contains function, a
- // copy of the original code, the executing code and initial fixed array for
- // active break points.
- Handle<DebugInfo> debug_info =
- Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
- debug_info->set_shared(*shared);
- debug_info->set_original_code(*original_code);
- debug_info->set_code(*code);
- debug_info->set_break_points(*break_points);
-
- // Link debug info to function.
- shared->set_debug_info(*debug_info);
-
- return debug_info;
-}
-#endif
-
-
-Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
- int length) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateArgumentsObject(*callee, length), JSObject);
-}
-
-
-Handle<JSFunction> Factory::CreateApiFunction(
- Handle<FunctionTemplateInfo> obj, ApiInstanceType instance_type) {
- Handle<Code> code = isolate()->builtins()->HandleApiCall();
- Handle<Code> construct_stub = isolate()->builtins()->JSConstructStubApi();
-
- int internal_field_count = 0;
- bool has_external_resource = false;
- bool use_user_object_comparison = false;
-
- if (!obj->instance_template()->IsUndefined()) {
- Handle<ObjectTemplateInfo> instance_template =
- Handle<ObjectTemplateInfo>(
- ObjectTemplateInfo::cast(obj->instance_template()));
- internal_field_count =
- Smi::cast(instance_template->internal_field_count())->value();
- has_external_resource =
- !instance_template->has_external_resource()->IsUndefined();
- use_user_object_comparison =
- !instance_template->use_user_object_comparison()->IsUndefined();
- }
-
- int instance_size = kPointerSize * internal_field_count;
- if (has_external_resource) instance_size += kPointerSize;
-
- InstanceType type = INVALID_TYPE;
- switch (instance_type) {
- case JavaScriptObject:
- type = JS_OBJECT_TYPE;
- instance_size += JSObject::kHeaderSize;
- break;
- case InnerGlobalObject:
- type = JS_GLOBAL_OBJECT_TYPE;
- instance_size += JSGlobalObject::kSize;
- break;
- case OuterGlobalObject:
- type = JS_GLOBAL_PROXY_TYPE;
- instance_size += JSGlobalProxy::kSize;
- break;
- default:
- break;
- }
- ASSERT(type != INVALID_TYPE);
-
- Handle<JSFunction> result =
- NewFunction(Factory::empty_string(),
- type,
- instance_size,
- code,
- true);
-
- // Set length.
- result->shared()->set_length(obj->length());
-
- // Set class name.
- Handle<Object> class_name = Handle<Object>(obj->class_name(), isolate());
- if (class_name->IsString()) {
- result->shared()->set_instance_class_name(*class_name);
- result->shared()->set_name(*class_name);
- }
-
- Handle<Map> map = Handle<Map>(result->initial_map());
-
- // Mark as having external data object if needed
- if (has_external_resource) {
- map->set_has_external_resource(true);
- }
-
- // Mark as using user object comparison if needed
- if (use_user_object_comparison) {
- map->set_use_user_object_comparison(true);
- }
-
- // Mark as undetectable if needed.
- if (obj->undetectable()) {
- map->set_is_undetectable();
- }
-
- // Mark as hidden for the __proto__ accessor if needed.
- if (obj->hidden_prototype()) {
- map->set_is_hidden_prototype();
- }
-
- // Mark as needs_access_check if needed.
- if (obj->needs_access_check()) {
- map->set_is_access_check_needed(true);
- }
-
- // Set interceptor information in the map.
- if (!obj->named_property_handler()->IsUndefined()) {
- map->set_has_named_interceptor();
- InterceptorInfo *nph = InterceptorInfo::cast(obj->named_property_handler());
- bool is_fallback =
- nph->is_fallback()->IsUndefined()?false:nph->is_fallback()->value();
- map->set_named_interceptor_is_fallback(is_fallback);
- }
- if (!obj->indexed_property_handler()->IsUndefined()) {
- map->set_has_indexed_interceptor();
- }
-
- // Set instance call-as-function information in the map.
- if (!obj->instance_call_handler()->IsUndefined()) {
- map->set_has_instance_call_handler();
- }
-
- result->shared()->set_function_data(*obj);
- result->shared()->set_construct_stub(*construct_stub);
- result->shared()->DontAdaptArguments();
-
- // Recursively copy parent templates' accessors, 'data' may be modified.
- int max_number_of_additional_properties = 0;
- FunctionTemplateInfo* info = *obj;
- while (true) {
- Object* props = info->property_accessors();
- if (!props->IsUndefined()) {
- Handle<Object> props_handle(props, isolate());
- NeanderArray props_array(props_handle);
- max_number_of_additional_properties += props_array.length();
- }
- Object* parent = info->parent_template();
- if (parent->IsUndefined()) break;
- info = FunctionTemplateInfo::cast(parent);
- }
-
- Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
-
- while (true) {
- Handle<Object> props = Handle<Object>(obj->property_accessors(),
- isolate());
- if (!props->IsUndefined()) {
- Map::AppendCallbackDescriptors(map, props);
- }
- Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate());
- if (parent->IsUndefined()) break;
- obj = Handle<FunctionTemplateInfo>::cast(parent);
- }
-
- ASSERT(result->shared()->IsApiFunction());
- return result;
-}
-
-
-Handle<MapCache> Factory::NewMapCache(int at_least_space_for) {
- CALL_HEAP_FUNCTION(isolate(),
- MapCache::Allocate(at_least_space_for), MapCache);
-}
-
-
-MUST_USE_RESULT static MaybeObject* UpdateMapCacheWith(Context* context,
- FixedArray* keys,
- Map* map) {
- Object* result;
- { MaybeObject* maybe_result =
- MapCache::cast(context->map_cache())->Put(keys, map);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- context->set_map_cache(MapCache::cast(result));
- return result;
-}
-
-
-Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
- Handle<FixedArray> keys,
- Handle<Map> map) {
- CALL_HEAP_FUNCTION(isolate(),
- UpdateMapCacheWith(*context, *keys, *map), MapCache);
-}
-
-
-Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
- Handle<FixedArray> keys) {
- if (context->map_cache()->IsUndefined()) {
- // Allocate the new map cache for the native context.
- Handle<MapCache> new_cache = NewMapCache(24);
- context->set_map_cache(*new_cache);
- }
- // Check to see whether there is a matching element in the cache.
- Handle<MapCache> cache =
- Handle<MapCache>(MapCache::cast(context->map_cache()));
- Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
- if (result->IsMap()) return Handle<Map>::cast(result);
- // Create a new map and add it to the cache.
- Handle<Map> map =
- CopyMap(Handle<Map>(context->object_function()->initial_map()),
- keys->length());
- AddToMapCache(context, keys, map);
- return Handle<Map>(map);
-}
-
-
-void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<Object> data) {
- Handle<FixedArray> store = NewFixedArray(JSRegExp::kAtomDataSize);
-
- store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
- store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
- store->set(JSRegExp::kAtomPatternIndex, *data);
- regexp->set_data(*store);
-}
-
-void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- int capture_count) {
- Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
- Smi* uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
- store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
- store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
- store->set(JSRegExp::kIrregexpASCIICodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpASCIICodeSavedIndex, uninitialized);
- store->set(JSRegExp::kIrregexpUC16CodeSavedIndex, uninitialized);
- store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
- store->set(JSRegExp::kIrregexpCaptureCountIndex,
- Smi::FromInt(capture_count));
- regexp->set_data(*store);
-}
-
-
-
-void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
- Handle<JSObject> instance,
- bool* pending_exception) {
- // Configure the instance by adding the properties specified by the
- // instance template.
- Handle<Object> instance_template(desc->instance_template(), isolate());
- if (!instance_template->IsUndefined()) {
- Execution::ConfigureInstance(instance,
- instance_template,
- pending_exception);
- } else {
- *pending_exception = false;
- }
-}
-
-
-Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
- Heap* h = isolate()->heap();
- if (name->Equals(h->undefined_string())) return undefined_value();
- if (name->Equals(h->nan_string())) return nan_value();
- if (name->Equals(h->infinity_string())) return infinity_value();
- return Handle<Object>::null();
-}
-
-
-Handle<Object> Factory::ToBoolean(bool value) {
- return value ? true_value() : false_value();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/factory.h b/src/3rdparty/v8/src/factory.h
deleted file mode 100644
index 3651d36..0000000
--- a/src/3rdparty/v8/src/factory.h
+++ /dev/null
@@ -1,533 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FACTORY_H_
-#define V8_FACTORY_H_
-
-#include "globals.h"
-#include "handles.h"
-#include "heap.h"
-
-namespace v8 {
-namespace internal {
-
-// Interface for handle based allocation.
-
-class Factory {
- public:
- // Allocate a new uninitialized fixed array.
- Handle<FixedArray> NewFixedArray(
- int size,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a new fixed array with non-existing entries (the hole).
- Handle<FixedArray> NewFixedArrayWithHoles(
- int size,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a new uninitialized fixed double array.
- Handle<FixedDoubleArray> NewFixedDoubleArray(
- int size,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<SeededNumberDictionary> NewSeededNumberDictionary(
- int at_least_space_for);
-
- Handle<UnseededNumberDictionary> NewUnseededNumberDictionary(
- int at_least_space_for);
-
- Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
-
- Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
-
- Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
-
- Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors,
- int slack = 0);
- Handle<DeoptimizationInputData> NewDeoptimizationInputData(
- int deopt_entry_count,
- PretenureFlag pretenure);
- Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
- int deopt_entry_count,
- PretenureFlag pretenure);
- // Allocates a pre-tenured empty AccessorPair.
- Handle<AccessorPair> NewAccessorPair();
-
- Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
-
- Handle<String> InternalizeUtf8String(Vector<const char> str);
- Handle<String> InternalizeUtf8String(const char* str) {
- return InternalizeUtf8String(CStrVector(str));
- }
- Handle<String> InternalizeString(Handle<String> str);
- Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
- Handle<String> InternalizeOneByteString(Handle<SeqOneByteString>,
- int from,
- int length);
- Handle<String> InternalizeTwoByteString(Vector<const uc16> str);
-
-
- // String creation functions. Most of the string creation functions take
- // a Heap::PretenureFlag argument to optionally request that they be
- // allocated in the old generation. The pretenure flag defaults to
- // DONT_TENURE.
- //
- // Creates a new String object. There are two String encodings: ASCII and
- // two byte. One should choose between the three string factory functions
- // based on the encoding of the string buffer that the string is
- // initialized from.
- // - ...FromAscii initializes the string from a buffer that is ASCII
- // encoded (it does not check that the buffer is ASCII encoded) and
- // the result will be ASCII encoded.
- // - ...FromUtf8 initializes the string from a buffer that is UTF-8
- // encoded. If the characters are all single-byte characters, the
- // result will be ASCII encoded, otherwise it will converted to two
- // byte.
- // - ...FromTwoByte initializes the string from a buffer that is two
- // byte encoded. If the characters are all single-byte characters,
- // the result will be converted to ASCII, otherwise it will be left as
- // two byte.
- //
- // ASCII strings are pretenured when used as keys in the SourceCodeCache.
- Handle<String> NewStringFromOneByte(
- Vector<const uint8_t> str,
- PretenureFlag pretenure = NOT_TENURED);
- // TODO(dcarney): remove this function.
- inline Handle<String> NewStringFromAscii(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED) {
- return NewStringFromOneByte(Vector<const uint8_t>::cast(str), pretenure);
- }
-
- // UTF8 strings are pretenured when used for regexp literal patterns and
- // flags in the parser.
- Handle<String> NewStringFromUtf8(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<String> NewStringFromTwoByte(
- Vector<const uc16> str,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates and partially initializes an ASCII or TwoByte String. The
- // characters of the string are uninitialized. Currently used in regexp code
- // only, where they are pretenured.
- Handle<SeqOneByteString> NewRawOneByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
- Handle<SeqTwoByteString> NewRawTwoByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Create a new cons string object which consists of a pair of strings.
- Handle<String> NewConsString(Handle<String> first,
- Handle<String> second);
-
- // Create a new string object which holds a substring of a string.
- Handle<String> NewSubString(Handle<String> str,
- int begin,
- int end);
-
- // Create a new string object which holds a proper substring of a string.
- Handle<String> NewProperSubString(Handle<String> str,
- int begin,
- int end);
-
- // Creates a new external String object. There are two String encodings
- // in the system: ASCII and two byte. Unlike other String types, it does
- // not make sense to have a UTF-8 factory function for external strings,
- // because we cannot change the underlying buffer.
- Handle<String> NewExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource);
- Handle<String> NewExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource);
-
- // Create a symbol.
- Handle<Symbol> NewSymbol();
-
- // Create a global (but otherwise uninitialized) context.
- Handle<Context> NewNativeContext();
-
- // Create a global context.
- Handle<Context> NewGlobalContext(Handle<JSFunction> function,
- Handle<ScopeInfo> scope_info);
-
- // Create a module context.
- Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
-
- // Create a function context.
- Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
-
- // Create a catch context.
- Handle<Context> NewCatchContext(Handle<JSFunction> function,
- Handle<Context> previous,
- Handle<String> name,
- Handle<Object> thrown_object);
-
- // Create a 'with' context.
- Handle<Context> NewWithContext(Handle<JSFunction> function,
- Handle<Context> previous,
- Handle<JSObject> extension);
-
- // Create a block context.
- Handle<Context> NewBlockContext(Handle<JSFunction> function,
- Handle<Context> previous,
- Handle<ScopeInfo> scope_info);
-
- // Return the internalized version of the passed in string.
- Handle<String> InternalizedStringFromString(Handle<String> value);
-
- // Allocate a new struct. The struct is pretenured (allocated directly in
- // the old generation).
- Handle<Struct> NewStruct(InstanceType type);
-
- Handle<DeclaredAccessorInfo> NewDeclaredAccessorInfo();
-
- Handle<ExecutableAccessorInfo> NewExecutableAccessorInfo();
-
- Handle<Script> NewScript(Handle<String> source);
-
- // Foreign objects are pretenured when allocated by the bootstrapper.
- Handle<Foreign> NewForeign(Address addr,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a new foreign object. The foreign is pretenured (allocated
- // directly in the old generation).
- Handle<Foreign> NewForeign(const AccessorDescriptor* foreign);
-
- Handle<ByteArray> NewByteArray(int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<ExternalArray> NewExternalArray(
- int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
- Handle<Object> value);
-
- Handle<Map> NewMap(
- InstanceType type,
- int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
-
- Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
-
- Handle<Map> CopyWithPreallocatedFieldDescriptors(Handle<Map> map);
-
- // Copy the map adding more inobject properties if possible without
- // overflowing the instance size.
- Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
- Handle<Map> CopyMap(Handle<Map> map);
-
- Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind elements_kind);
-
- Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
-
- Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
- int new_length);
-
- Handle<FixedDoubleArray> CopyFixedDoubleArray(
- Handle<FixedDoubleArray> array);
-
- // Numbers (e.g. literals) are pretenured by the parser.
- Handle<Object> NewNumber(double value,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<Object> NewNumberFromInt(int32_t value,
- PretenureFlag pretenure = NOT_TENURED);
- Handle<Object> NewNumberFromUint(uint32_t value,
- PretenureFlag pretenure = NOT_TENURED);
-
- // These objects are used by the api to create env-independent data
- // structures in the heap.
- Handle<JSObject> NewNeanderObject();
-
- Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
-
- // JS objects are pretenured when allocated by the bootstrapper and
- // runtime.
- Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Global objects are pretenured.
- Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
-
- // JS objects are pretenured when allocated by the bootstrapper and
- // runtime.
- Handle<JSObject> NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure = NOT_TENURED);
-
- // JS modules are pretenured.
- Handle<JSModule> NewJSModule(Handle<Context> context,
- Handle<ScopeInfo> scope_info);
-
- // JS arrays are pretenured when allocated by the parser.
- Handle<JSArray> NewJSArray(
- int capacity,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<JSArray> NewJSArrayWithElements(
- Handle<FixedArrayBase> elements,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
- PretenureFlag pretenure = NOT_TENURED);
-
- void SetElementsCapacityAndLength(Handle<JSArray> array,
- int capacity,
- int length);
-
- void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
-
- void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
- void EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode);
-
- Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
-
- // Change the type of the argument into a JS object/function and reinitialize.
- void BecomeJSObject(Handle<JSReceiver> object);
- void BecomeJSFunction(Handle<JSReceiver> object);
-
- void SetIdentityHash(Handle<JSObject> object, Smi* hash);
-
- Handle<JSFunction> NewFunction(Handle<String> name,
- Handle<Object> prototype);
-
- Handle<JSFunction> NewFunctionWithoutPrototype(
- Handle<String> name,
- LanguageMode language_mode);
-
- Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
-
- Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Map> function_map,
- PretenureFlag pretenure);
-
- Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Context> context,
- PretenureFlag pretenure = TENURED);
-
- Handle<ScopeInfo> NewScopeInfo(int length);
-
- Handle<JSObject> NewExternal(void* value);
-
- Handle<Code> NewCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false);
-
- Handle<Code> CopyCode(Handle<Code> code);
-
- Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
-
- Handle<Object> ToObject(Handle<Object> object);
- Handle<Object> ToObject(Handle<Object> object,
- Handle<Context> native_context);
-
- // Interface for creating error objects.
-
- Handle<Object> NewError(const char* maker, const char* type,
- Handle<JSArray> args);
- Handle<String> EmergencyNewError(const char* type, Handle<JSArray> args);
- Handle<Object> NewError(const char* maker, const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewError(const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewError(Handle<String> message);
- Handle<Object> NewError(const char* constructor,
- Handle<String> message);
-
- Handle<Object> NewTypeError(const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewTypeError(Handle<String> message);
-
- Handle<Object> NewRangeError(const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewRangeError(Handle<String> message);
-
- Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
- Handle<Object> NewSyntaxError(Handle<String> message);
-
- Handle<Object> NewReferenceError(const char* type,
- Vector< Handle<Object> > args);
- Handle<Object> NewReferenceError(Handle<String> message);
-
- Handle<Object> NewEvalError(const char* type,
- Vector< Handle<Object> > args);
-
-
- Handle<JSFunction> NewFunction(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<Code> code,
- bool force_initial_map);
-
- Handle<JSFunction> NewFunction(Handle<Map> function_map,
- Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
-
-
- Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Handle<Code> code,
- bool force_initial_map);
-
- Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code);
-
- Handle<String> NumberToString(Handle<Object> number);
- Handle<String> Uint32ToString(uint32_t value);
-
- enum ApiInstanceType {
- JavaScriptObject,
- InnerGlobalObject,
- OuterGlobalObject
- };
-
- Handle<JSFunction> CreateApiFunction(
- Handle<FunctionTemplateInfo> data,
- ApiInstanceType type = JavaScriptObject);
-
- Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
-
- // Installs interceptors on the instance. 'desc' is a function template,
- // and instance is an object instance created by the function of this
- // function template.
- void ConfigureInstance(Handle<FunctionTemplateInfo> desc,
- Handle<JSObject> instance,
- bool* pending_exception);
-
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline Handle<type> name() { \
- return Handle<type>(BitCast<type**>( \
- &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
- }
- ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR_ACCESSOR
-
-#define STRING_ACCESSOR(name, str) \
- inline Handle<String> name() { \
- return Handle<String>(BitCast<String**>( \
- &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
- }
- INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
- Handle<String> hidden_string() {
- return Handle<String>(&isolate()->heap()->hidden_string_);
- }
-
- Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- Handle<String> name,
- int number_of_literals,
- Handle<Code> code,
- Handle<ScopeInfo> scope_info);
- Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
-
- Handle<JSMessageObject> NewJSMessageObject(
- Handle<String> type,
- Handle<JSArray> arguments,
- int start_position,
- int end_position,
- Handle<Object> script,
- Handle<Object> stack_trace,
- Handle<Object> stack_frames);
-
- Handle<SeededNumberDictionary> DictionaryAtNumberPut(
- Handle<SeededNumberDictionary>,
- uint32_t key,
- Handle<Object> value);
-
- Handle<UnseededNumberDictionary> DictionaryAtNumberPut(
- Handle<UnseededNumberDictionary>,
- uint32_t key,
- Handle<Object> value);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
-#endif
-
- // Return a map using the map cache in the native context.
- // The key the an ordered set of property names.
- Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
- Handle<FixedArray> keys);
-
- // Creates a new FixedArray that holds the data associated with the
- // atom regexp and stores it in the regexp.
- void SetRegExpAtomData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<Object> match_pattern);
-
- // Creates a new FixedArray that holds the data associated with the
- // irregexp regexp and stores it in the regexp.
- void SetRegExpIrregexpData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- int capture_count);
-
- // Returns the value for a known global constant (a property of the global
- // object which is neither configurable nor writable) like 'undefined'.
- // Returns a null handle when the given name is unknown.
- Handle<Object> GlobalConstantFor(Handle<String> name);
-
- // Converts the given boolean condition to JavaScript boolean value.
- Handle<Object> ToBoolean(bool value);
-
- private:
- Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
-
- Handle<JSFunction> NewFunctionHelper(Handle<String> name,
- Handle<Object> prototype);
-
- Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
- Handle<String> name,
- LanguageMode language_mode);
-
- // Create a new map cache.
- Handle<MapCache> NewMapCache(int at_least_space_for);
-
- // Update the map cache in the native context with (keys, map)
- Handle<MapCache> AddToMapCache(Handle<Context> context,
- Handle<FixedArray> keys,
- Handle<Map> map);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FACTORY_H_
diff --git a/src/3rdparty/v8/src/fast-dtoa.cc b/src/3rdparty/v8/src/fast-dtoa.cc
deleted file mode 100644
index e62bd01..0000000
--- a/src/3rdparty/v8/src/fast-dtoa.cc
+++ /dev/null
@@ -1,738 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "utils.h"
-
-#include "fast-dtoa.h"
-
-#include "cached-powers.h"
-#include "diy-fp.h"
-#include "double.h"
-
-namespace v8 {
-namespace internal {
-
-// The minimal and maximal target exponent define the range of w's binary
-// exponent, where 'w' is the result of multiplying the input by a cached power
-// of ten.
-//
-// A different range might be chosen on a different platform, to optimize digit
-// generation, but a smaller range requires more powers of ten to be cached.
-static const int kMinimalTargetExponent = -60;
-static const int kMaximalTargetExponent = -32;
-
-
-// Adjusts the last digit of the generated number, and screens out generated
-// solutions that may be inaccurate. A solution may be inaccurate if it is
-// outside the safe interval, or if we ctannot prove that it is closer to the
-// input than a neighboring representation of the same length.
-//
-// Input: * buffer containing the digits of too_high / 10^kappa
-// * the buffer's length
-// * distance_too_high_w == (too_high - w).f() * unit
-// * unsafe_interval == (too_high - too_low).f() * unit
-// * rest = (too_high - buffer * 10^kappa).f() * unit
-// * ten_kappa = 10^kappa * unit
-// * unit = the common multiplier
-// Output: returns true if the buffer is guaranteed to contain the closest
-// representable number to the input.
-// Modifies the generated digits in the buffer to approach (round towards) w.
-static bool RoundWeed(Vector<char> buffer,
- int length,
- uint64_t distance_too_high_w,
- uint64_t unsafe_interval,
- uint64_t rest,
- uint64_t ten_kappa,
- uint64_t unit) {
- uint64_t small_distance = distance_too_high_w - unit;
- uint64_t big_distance = distance_too_high_w + unit;
- // Let w_low = too_high - big_distance, and
- // w_high = too_high - small_distance.
- // Note: w_low < w < w_high
- //
- // The real w (* unit) must lie somewhere inside the interval
- // ]w_low; w_high[ (often written as "(w_low; w_high)")
-
- // Basically the buffer currently contains a number in the unsafe interval
- // ]too_low; too_high[ with too_low < w < too_high
- //
- // too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- // ^v 1 unit ^ ^ ^ ^
- // boundary_high --------------------- . . . .
- // ^v 1 unit . . . .
- // - - - - - - - - - - - - - - - - - - - + - - + - - - - - - . .
- // . . ^ . .
- // . big_distance . . .
- // . . . . rest
- // small_distance . . . .
- // v . . . .
- // w_high - - - - - - - - - - - - - - - - - - . . . .
- // ^v 1 unit . . . .
- // w ---------------------------------------- . . . .
- // ^v 1 unit v . . .
- // w_low - - - - - - - - - - - - - - - - - - - - - . . .
- // . . v
- // buffer --------------------------------------------------+-------+--------
- // . .
- // safe_interval .
- // v .
- // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - .
- // ^v 1 unit .
- // boundary_low ------------------------- unsafe_interval
- // ^v 1 unit v
- // too_low - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- //
- //
- // Note that the value of buffer could lie anywhere inside the range too_low
- // to too_high.
- //
- // boundary_low, boundary_high and w are approximations of the real boundaries
- // and v (the input number). They are guaranteed to be precise up to one unit.
- // In fact the error is guaranteed to be strictly less than one unit.
- //
- // Anything that lies outside the unsafe interval is guaranteed not to round
- // to v when read again.
- // Anything that lies inside the safe interval is guaranteed to round to v
- // when read again.
- // If the number inside the buffer lies inside the unsafe interval but not
- // inside the safe interval then we simply do not know and bail out (returning
- // false).
- //
- // Similarly we have to take into account the imprecision of 'w' when finding
- // the closest representation of 'w'. If we have two potential
- // representations, and one is closer to both w_low and w_high, then we know
- // it is closer to the actual value v.
- //
- // By generating the digits of too_high we got the largest (closest to
- // too_high) buffer that is still in the unsafe interval. In the case where
- // w_high < buffer < too_high we try to decrement the buffer.
- // This way the buffer approaches (rounds towards) w.
- // There are 3 conditions that stop the decrementation process:
- // 1) the buffer is already below w_high
- // 2) decrementing the buffer would make it leave the unsafe interval
- // 3) decrementing the buffer would yield a number below w_high and farther
- // away than the current number. In other words:
- // (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
- // Instead of using the buffer directly we use its distance to too_high.
- // Conceptually rest ~= too_high - buffer
- // We need to do the following tests in this order to avoid over- and
- // underflows.
- ASSERT(rest <= unsafe_interval);
- while (rest < small_distance && // Negated condition 1
- unsafe_interval - rest >= ten_kappa && // Negated condition 2
- (rest + ten_kappa < small_distance || // buffer{-1} > w_high
- small_distance - rest >= rest + ten_kappa - small_distance)) {
- buffer[length - 1]--;
- rest += ten_kappa;
- }
-
- // We have approached w+ as much as possible. We now test if approaching w-
- // would require changing the buffer. If yes, then we have two possible
- // representations close to w, but we cannot decide which one is closer.
- if (rest < big_distance &&
- unsafe_interval - rest >= ten_kappa &&
- (rest + ten_kappa < big_distance ||
- big_distance - rest > rest + ten_kappa - big_distance)) {
- return false;
- }
-
- // Weeding test.
- // The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
- // Since too_low = too_high - unsafe_interval this is equivalent to
- // [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
- // Conceptually we have: rest ~= too_high - buffer
- return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
-}
-
-
-// Rounds the buffer upwards if the result is closer to v by possibly adding
-// 1 to the buffer. If the precision of the calculation is not sufficient to
-// round correctly, return false.
-// The rounding might shift the whole buffer in which case the kappa is
-// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
-//
-// If 2*rest > ten_kappa then the buffer needs to be round up.
-// rest can have an error of +/- 1 unit. This function accounts for the
-// imprecision and returns false, if the rounding direction cannot be
-// unambiguously determined.
-//
-// Precondition: rest < ten_kappa.
-static bool RoundWeedCounted(Vector<char> buffer,
- int length,
- uint64_t rest,
- uint64_t ten_kappa,
- uint64_t unit,
- int* kappa) {
- ASSERT(rest < ten_kappa);
- // The following tests are done in a specific order to avoid overflows. They
- // will work correctly with any uint64 values of rest < ten_kappa and unit.
- //
- // If the unit is too big, then we don't know which way to round. For example
- // a unit of 50 means that the real number lies within rest +/- 50. If
- // 10^kappa == 40 then there is no way to tell which way to round.
- if (unit >= ten_kappa) return false;
- // Even if unit is just half the size of 10^kappa we are already completely
- // lost. (And after the previous test we know that the expression will not
- // over/underflow.)
- if (ten_kappa - unit <= unit) return false;
- // If 2 * (rest + unit) <= 10^kappa we can safely round down.
- if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
- return true;
- }
- // If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
- if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
- // Increment the last digit recursively until we find a non '9' digit.
- buffer[length - 1]++;
- for (int i = length - 1; i > 0; --i) {
- if (buffer[i] != '0' + 10) break;
- buffer[i] = '0';
- buffer[i - 1]++;
- }
- // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
- // exception of the first digit all digits are now '0'. Simply switch the
- // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
- // the power (the kappa) is increased.
- if (buffer[0] == '0' + 10) {
- buffer[0] = '1';
- (*kappa) += 1;
- }
- return true;
- }
- return false;
-}
-
-
-static const uint32_t kTen4 = 10000;
-static const uint32_t kTen5 = 100000;
-static const uint32_t kTen6 = 1000000;
-static const uint32_t kTen7 = 10000000;
-static const uint32_t kTen8 = 100000000;
-static const uint32_t kTen9 = 1000000000;
-
-// Returns the biggest power of ten that is less than or equal than the given
-// number. We furthermore receive the maximum number of bits 'number' has.
-// If number_bits == 0 then 0^-1 is returned
-// The number of bits must be <= 32.
-// Precondition: number < (1 << (number_bits + 1)).
-static void BiggestPowerTen(uint32_t number,
- int number_bits,
- uint32_t* power,
- int* exponent) {
- switch (number_bits) {
- case 32:
- case 31:
- case 30:
- if (kTen9 <= number) {
- *power = kTen9;
- *exponent = 9;
- break;
- } // else fallthrough
- case 29:
- case 28:
- case 27:
- if (kTen8 <= number) {
- *power = kTen8;
- *exponent = 8;
- break;
- } // else fallthrough
- case 26:
- case 25:
- case 24:
- if (kTen7 <= number) {
- *power = kTen7;
- *exponent = 7;
- break;
- } // else fallthrough
- case 23:
- case 22:
- case 21:
- case 20:
- if (kTen6 <= number) {
- *power = kTen6;
- *exponent = 6;
- break;
- } // else fallthrough
- case 19:
- case 18:
- case 17:
- if (kTen5 <= number) {
- *power = kTen5;
- *exponent = 5;
- break;
- } // else fallthrough
- case 16:
- case 15:
- case 14:
- if (kTen4 <= number) {
- *power = kTen4;
- *exponent = 4;
- break;
- } // else fallthrough
- case 13:
- case 12:
- case 11:
- case 10:
- if (1000 <= number) {
- *power = 1000;
- *exponent = 3;
- break;
- } // else fallthrough
- case 9:
- case 8:
- case 7:
- if (100 <= number) {
- *power = 100;
- *exponent = 2;
- break;
- } // else fallthrough
- case 6:
- case 5:
- case 4:
- if (10 <= number) {
- *power = 10;
- *exponent = 1;
- break;
- } // else fallthrough
- case 3:
- case 2:
- case 1:
- if (1 <= number) {
- *power = 1;
- *exponent = 0;
- break;
- } // else fallthrough
- case 0:
- *power = 0;
- *exponent = -1;
- break;
- default:
- // Following assignments are here to silence compiler warnings.
- *power = 0;
- *exponent = 0;
- UNREACHABLE();
- }
-}
-
-
-// Generates the digits of input number w.
-// w is a floating-point number (DiyFp), consisting of a significand and an
-// exponent. Its exponent is bounded by kMinimalTargetExponent and
-// kMaximalTargetExponent.
-// Hence -60 <= w.e() <= -32.
-//
-// Returns false if it fails, in which case the generated digits in the buffer
-// should not be used.
-// Preconditions:
-// * low, w and high are correct up to 1 ulp (unit in the last place). That
-// is, their error must be less than a unit of their last digits.
-// * low.e() == w.e() == high.e()
-// * low < w < high, and taking into account their error: low~ <= high~
-// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
-// Postconditions: returns false if procedure fails.
-// otherwise:
-// * buffer is not null-terminated, but len contains the number of digits.
-// * buffer contains the shortest possible decimal digit-sequence
-// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
-// correct values of low and high (without their error).
-// * if more than one decimal representation gives the minimal number of
-// decimal digits then the one closest to W (where W is the correct value
-// of w) is chosen.
-// Remark: this procedure takes into account the imprecision of its input
-// numbers. If the precision is not enough to guarantee all the postconditions
-// then false is returned. This usually happens rarely (~0.5%).
-//
-// Say, for the sake of example, that
-// w.e() == -48, and w.f() == 0x1234567890abcdef
-// w's value can be computed by w.f() * 2^w.e()
-// We can obtain w's integral digits by simply shifting w.f() by -w.e().
-// -> w's integral part is 0x1234
-// w's fractional part is therefore 0x567890abcdef.
-// Printing w's integral part is easy (simply print 0x1234 in decimal).
-// In order to print its fraction we repeatedly multiply the fraction by 10 and
-// get each digit. Example the first digit after the point would be computed by
-// (0x567890abcdef * 10) >> 48. -> 3
-// The whole thing becomes slightly more complicated because we want to stop
-// once we have enough digits. That is, once the digits inside the buffer
-// represent 'w' we can stop. Everything inside the interval low - high
-// represents w. However we have to pay attention to low, high and w's
-// imprecision.
-static bool DigitGen(DiyFp low,
- DiyFp w,
- DiyFp high,
- Vector<char> buffer,
- int* length,
- int* kappa) {
- ASSERT(low.e() == w.e() && w.e() == high.e());
- ASSERT(low.f() + 1 <= high.f() - 1);
- ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
- // low, w and high are imprecise, but by less than one ulp (unit in the last
- // place).
- // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
- // the new numbers are outside of the interval we want the final
- // representation to lie in.
- // Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
- // numbers that are certain to lie in the interval. We will use this fact
- // later on.
- // We will now start by generating the digits within the uncertain
- // interval. Later we will weed out representations that lie outside the safe
- // interval and thus _might_ lie outside the correct interval.
- uint64_t unit = 1;
- DiyFp too_low = DiyFp(low.f() - unit, low.e());
- DiyFp too_high = DiyFp(high.f() + unit, high.e());
- // too_low and too_high are guaranteed to lie outside the interval we want the
- // generated number in.
- DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
- // We now cut the input number into two parts: the integral digits and the
- // fractionals. We will not write any decimal separator though, but adapt
- // kappa instead.
- // Reminder: we are currently computing the digits (stored inside the buffer)
- // such that: too_low < buffer * 10^kappa < too_high
- // We use too_high for the digit_generation and stop as soon as possible.
- // If we stop early we effectively round down.
- DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
- // Division by one is a shift.
- uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
- // Modulo by one is an and.
- uint64_t fractionals = too_high.f() & (one.f() - 1);
- uint32_t divisor;
- int divisor_exponent;
- BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
- &divisor, &divisor_exponent);
- *kappa = divisor_exponent + 1;
- *length = 0;
- // Loop invariant: buffer = too_high / 10^kappa (integer division)
- // The invariant holds for the first iteration: kappa has been initialized
- // with the divisor exponent + 1. And the divisor is the biggest power of ten
- // that is smaller than integrals.
- while (*kappa > 0) {
- int digit = integrals / divisor;
- buffer[*length] = '0' + digit;
- (*length)++;
- integrals %= divisor;
- (*kappa)--;
- // Note that kappa now equals the exponent of the divisor and that the
- // invariant thus holds again.
- uint64_t rest =
- (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
- // Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
- // Reminder: unsafe_interval.e() == one.e()
- if (rest < unsafe_interval.f()) {
- // Rounding down (by not emitting the remaining digits) yields a number
- // that lies within the unsafe interval.
- return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
- unsafe_interval.f(), rest,
- static_cast<uint64_t>(divisor) << -one.e(), unit);
- }
- divisor /= 10;
- }
-
- // The integrals have been generated. We are at the point of the decimal
- // separator. In the following loop we simply multiply the remaining digits by
- // 10 and divide by one. We just need to pay attention to multiply associated
- // data (like the interval or 'unit'), too.
- // Note that the multiplication by 10 does not overflow, because w.e >= -60
- // and thus one.e >= -60.
- ASSERT(one.e() >= -60);
- ASSERT(fractionals < one.f());
- ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
- while (true) {
- fractionals *= 10;
- unit *= 10;
- unsafe_interval.set_f(unsafe_interval.f() * 10);
- // Integer division by one.
- int digit = static_cast<int>(fractionals >> -one.e());
- buffer[*length] = '0' + digit;
- (*length)++;
- fractionals &= one.f() - 1; // Modulo by one.
- (*kappa)--;
- if (fractionals < unsafe_interval.f()) {
- return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
- unsafe_interval.f(), fractionals, one.f(), unit);
- }
- }
-}
-
-
-
-// Generates (at most) requested_digits of input number w.
-// w is a floating-point number (DiyFp), consisting of a significand and an
-// exponent. Its exponent is bounded by kMinimalTargetExponent and
-// kMaximalTargetExponent.
-// Hence -60 <= w.e() <= -32.
-//
-// Returns false if it fails, in which case the generated digits in the buffer
-// should not be used.
-// Preconditions:
-// * w is correct up to 1 ulp (unit in the last place). That
-// is, its error must be strictly less than a unit of its last digit.
-// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
-//
-// Postconditions: returns false if procedure fails.
-// otherwise:
-// * buffer is not null-terminated, but length contains the number of
-// digits.
-// * the representation in buffer is the most precise representation of
-// requested_digits digits.
-// * buffer contains at most requested_digits digits of w. If there are less
-// than requested_digits digits then some trailing '0's have been removed.
-// * kappa is such that
-// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
-//
-// Remark: This procedure takes into account the imprecision of its input
-// numbers. If the precision is not enough to guarantee all the postconditions
-// then false is returned. This usually happens rarely, but the failure-rate
-// increases with higher requested_digits.
-static bool DigitGenCounted(DiyFp w,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* kappa) {
- ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
- ASSERT(kMinimalTargetExponent >= -60);
- ASSERT(kMaximalTargetExponent <= -32);
- // w is assumed to have an error less than 1 unit. Whenever w is scaled we
- // also scale its error.
- uint64_t w_error = 1;
- // We cut the input number into two parts: the integral digits and the
- // fractional digits. We don't emit any decimal separator, but adapt kappa
- // instead. Example: instead of writing "1.2" we put "12" into the buffer and
- // increase kappa by 1.
- DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
- // Division by one is a shift.
- uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
- // Modulo by one is an and.
- uint64_t fractionals = w.f() & (one.f() - 1);
- uint32_t divisor;
- int divisor_exponent;
- BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
- &divisor, &divisor_exponent);
- *kappa = divisor_exponent + 1;
- *length = 0;
-
- // Loop invariant: buffer = w / 10^kappa (integer division)
- // The invariant holds for the first iteration: kappa has been initialized
- // with the divisor exponent + 1. And the divisor is the biggest power of ten
- // that is smaller than 'integrals'.
- while (*kappa > 0) {
- int digit = integrals / divisor;
- buffer[*length] = '0' + digit;
- (*length)++;
- requested_digits--;
- integrals %= divisor;
- (*kappa)--;
- // Note that kappa now equals the exponent of the divisor and that the
- // invariant thus holds again.
- if (requested_digits == 0) break;
- divisor /= 10;
- }
-
- if (requested_digits == 0) {
- uint64_t rest =
- (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
- return RoundWeedCounted(buffer, *length, rest,
- static_cast<uint64_t>(divisor) << -one.e(), w_error,
- kappa);
- }
-
- // The integrals have been generated. We are at the point of the decimal
- // separator. In the following loop we simply multiply the remaining digits by
- // 10 and divide by one. We just need to pay attention to multiply associated
- // data (the 'unit'), too.
- // Note that the multiplication by 10 does not overflow, because w.e >= -60
- // and thus one.e >= -60.
- ASSERT(one.e() >= -60);
- ASSERT(fractionals < one.f());
- ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
- while (requested_digits > 0 && fractionals > w_error) {
- fractionals *= 10;
- w_error *= 10;
- // Integer division by one.
- int digit = static_cast<int>(fractionals >> -one.e());
- buffer[*length] = '0' + digit;
- (*length)++;
- requested_digits--;
- fractionals &= one.f() - 1; // Modulo by one.
- (*kappa)--;
- }
- if (requested_digits != 0) return false;
- return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
- kappa);
-}
-
-
-// Provides a decimal representation of v.
-// Returns true if it succeeds, otherwise the result cannot be trusted.
-// There will be *length digits inside the buffer (not null-terminated).
-// If the function returns true then
-// v == (double) (buffer * 10^decimal_exponent).
-// The digits in the buffer are the shortest representation possible: no
-// 0.09999999999999999 instead of 0.1. The shorter representation will even be
-// chosen even if the longer one would be closer to v.
-// The last digit will be closest to the actual v. That is, even if several
-// digits might correctly yield 'v' when read again, the closest will be
-// computed.
-static bool Grisu3(double v,
- Vector<char> buffer,
- int* length,
- int* decimal_exponent) {
- DiyFp w = Double(v).AsNormalizedDiyFp();
- // boundary_minus and boundary_plus are the boundaries between v and its
- // closest floating-point neighbors. Any number strictly between
- // boundary_minus and boundary_plus will round to v when convert to a double.
- // Grisu3 will never output representations that lie exactly on a boundary.
- DiyFp boundary_minus, boundary_plus;
- Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
- ASSERT(boundary_plus.e() == w.e());
- DiyFp ten_mk; // Cached power of ten: 10^-k
- int mk; // -k
- int ten_mk_minimal_binary_exponent =
- kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
- int ten_mk_maximal_binary_exponent =
- kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
- PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
- ten_mk_minimal_binary_exponent,
- ten_mk_maximal_binary_exponent,
- &ten_mk, &mk);
- ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize) &&
- (kMaximalTargetExponent >= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize));
- // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
- // 64 bit significand and ten_mk is thus only precise up to 64 bits.
-
- // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
- // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
- // off by a small amount.
- // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
- // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
- // (f-1) * 2^e < w*10^k < (f+1) * 2^e
- DiyFp scaled_w = DiyFp::Times(w, ten_mk);
- ASSERT(scaled_w.e() ==
- boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
- // In theory it would be possible to avoid some recomputations by computing
- // the difference between w and boundary_minus/plus (a power of 2) and to
- // compute scaled_boundary_minus/plus by subtracting/adding from
- // scaled_w. However the code becomes much less readable and the speed
- // enhancements are not terriffic.
- DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
- DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
-
- // DigitGen will generate the digits of scaled_w. Therefore we have
- // v == (double) (scaled_w * 10^-mk).
- // Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
- // integer than it will be updated. For instance if scaled_w == 1.23 then
- // the buffer will be filled with "123" und the decimal_exponent will be
- // decreased by 2.
- int kappa;
- bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
- buffer, length, &kappa);
- *decimal_exponent = -mk + kappa;
- return result;
-}
-
-
-// The "counted" version of grisu3 (see above) only generates requested_digits
-// number of digits. This version does not generate the shortest representation,
-// and with enough requested digits 0.1 will at some point print as 0.9999999...
-// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
-// therefore the rounding strategy for halfway cases is irrelevant.
-static bool Grisu3Counted(double v,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* decimal_exponent) {
- DiyFp w = Double(v).AsNormalizedDiyFp();
- DiyFp ten_mk; // Cached power of ten: 10^-k
- int mk; // -k
- int ten_mk_minimal_binary_exponent =
- kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
- int ten_mk_maximal_binary_exponent =
- kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
- PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
- ten_mk_minimal_binary_exponent,
- ten_mk_maximal_binary_exponent,
- &ten_mk, &mk);
- ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize) &&
- (kMaximalTargetExponent >= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize));
- // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
- // 64 bit significand and ten_mk is thus only precise up to 64 bits.
-
- // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
- // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
- // off by a small amount.
- // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
- // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
- // (f-1) * 2^e < w*10^k < (f+1) * 2^e
- DiyFp scaled_w = DiyFp::Times(w, ten_mk);
-
- // We now have (double) (scaled_w * 10^-mk).
- // DigitGen will generate the first requested_digits digits of scaled_w and
- // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
- // will not always be exactly the same since DigitGenCounted only produces a
- // limited number of digits.)
- int kappa;
- bool result = DigitGenCounted(scaled_w, requested_digits,
- buffer, length, &kappa);
- *decimal_exponent = -mk + kappa;
- return result;
-}
-
-
-bool FastDtoa(double v,
- FastDtoaMode mode,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* decimal_point) {
- ASSERT(v > 0);
- ASSERT(!Double(v).IsSpecial());
-
- bool result = false;
- int decimal_exponent = 0;
- switch (mode) {
- case FAST_DTOA_SHORTEST:
- result = Grisu3(v, buffer, length, &decimal_exponent);
- break;
- case FAST_DTOA_PRECISION:
- result = Grisu3Counted(v, requested_digits,
- buffer, length, &decimal_exponent);
- break;
- default:
- UNREACHABLE();
- }
- if (result) {
- *decimal_point = *length + decimal_exponent;
- buffer[*length] = '\0';
- }
- return result;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/fast-dtoa.h b/src/3rdparty/v8/src/fast-dtoa.h
deleted file mode 100644
index ef28557..0000000
--- a/src/3rdparty/v8/src/fast-dtoa.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FAST_DTOA_H_
-#define V8_FAST_DTOA_H_
-
-namespace v8 {
-namespace internal {
-
-enum FastDtoaMode {
- // Computes the shortest representation of the given input. The returned
- // result will be the most accurate number of this length. Longer
- // representations might be more accurate.
- FAST_DTOA_SHORTEST,
- // Computes a representation where the precision (number of digits) is
- // given as input. The precision is independent of the decimal point.
- FAST_DTOA_PRECISION
-};
-
-// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
-// include the terminating '\0' character.
-const int kFastDtoaMaximalLength = 17;
-
-// Provides a decimal representation of v.
-// The result should be interpreted as buffer * 10^(point - length).
-//
-// Precondition:
-// * v must be a strictly positive finite double.
-//
-// Returns true if it succeeds, otherwise the result can not be trusted.
-// There will be *length digits inside the buffer followed by a null terminator.
-// If the function returns true and mode equals
-// - FAST_DTOA_SHORTEST, then
-// the parameter requested_digits is ignored.
-// The result satisfies
-// v == (double) (buffer * 10^(point - length)).
-// The digits in the buffer are the shortest representation possible. E.g.
-// if 0.099999999999 and 0.1 represent the same double then "1" is returned
-// with point = 0.
-// The last digit will be closest to the actual v. That is, even if several
-// digits might correctly yield 'v' when read again, the buffer will contain
-// the one closest to v.
-// - FAST_DTOA_PRECISION, then
-// the buffer contains requested_digits digits.
-// the difference v - (buffer * 10^(point-length)) is closest to zero for
-// all possible representations of requested_digits digits.
-// If there are two values that are equally close, then FastDtoa returns
-// false.
-// For both modes the buffer must be large enough to hold the result.
-bool FastDtoa(double d,
- FastDtoaMode mode,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* decimal_point);
-
-} } // namespace v8::internal
-
-#endif // V8_FAST_DTOA_H_
diff --git a/src/3rdparty/v8/src/fixed-dtoa.cc b/src/3rdparty/v8/src/fixed-dtoa.cc
deleted file mode 100644
index 1fd974c..0000000
--- a/src/3rdparty/v8/src/fixed-dtoa.cc
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <math.h>
-
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "utils.h"
-
-#include "double.h"
-#include "fixed-dtoa.h"
-
-namespace v8 {
-namespace internal {
-
-// Represents a 128bit type. This class should be replaced by a native type on
-// platforms that support 128bit integers.
-class UInt128 {
- public:
- UInt128() : high_bits_(0), low_bits_(0) { }
- UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { }
-
- void Multiply(uint32_t multiplicand) {
- uint64_t accumulator;
-
- accumulator = (low_bits_ & kMask32) * multiplicand;
- uint32_t part = static_cast<uint32_t>(accumulator & kMask32);
- accumulator >>= 32;
- accumulator = accumulator + (low_bits_ >> 32) * multiplicand;
- low_bits_ = (accumulator << 32) + part;
- accumulator >>= 32;
- accumulator = accumulator + (high_bits_ & kMask32) * multiplicand;
- part = static_cast<uint32_t>(accumulator & kMask32);
- accumulator >>= 32;
- accumulator = accumulator + (high_bits_ >> 32) * multiplicand;
- high_bits_ = (accumulator << 32) + part;
- ASSERT((accumulator >> 32) == 0);
- }
-
- void Shift(int shift_amount) {
- ASSERT(-64 <= shift_amount && shift_amount <= 64);
- if (shift_amount == 0) {
- return;
- } else if (shift_amount == -64) {
- high_bits_ = low_bits_;
- low_bits_ = 0;
- } else if (shift_amount == 64) {
- low_bits_ = high_bits_;
- high_bits_ = 0;
- } else if (shift_amount <= 0) {
- high_bits_ <<= -shift_amount;
- high_bits_ += low_bits_ >> (64 + shift_amount);
- low_bits_ <<= -shift_amount;
- } else {
- low_bits_ >>= shift_amount;
- low_bits_ += high_bits_ << (64 - shift_amount);
- high_bits_ >>= shift_amount;
- }
- }
-
- // Modifies *this to *this MOD (2^power).
- // Returns *this DIV (2^power).
- int DivModPowerOf2(int power) {
- if (power >= 64) {
- int result = static_cast<int>(high_bits_ >> (power - 64));
- high_bits_ -= static_cast<uint64_t>(result) << (power - 64);
- return result;
- } else {
- uint64_t part_low = low_bits_ >> power;
- uint64_t part_high = high_bits_ << (64 - power);
- int result = static_cast<int>(part_low + part_high);
- high_bits_ = 0;
- low_bits_ -= part_low << power;
- return result;
- }
- }
-
- bool IsZero() const {
- return high_bits_ == 0 && low_bits_ == 0;
- }
-
- int BitAt(int position) {
- if (position >= 64) {
- return static_cast<int>(high_bits_ >> (position - 64)) & 1;
- } else {
- return static_cast<int>(low_bits_ >> position) & 1;
- }
- }
-
- private:
- static const uint64_t kMask32 = 0xFFFFFFFF;
- // Value == (high_bits_ << 64) + low_bits_
- uint64_t high_bits_;
- uint64_t low_bits_;
-};
-
-
-static const int kDoubleSignificandSize = 53; // Includes the hidden bit.
-
-
-static void FillDigits32FixedLength(uint32_t number, int requested_length,
- Vector<char> buffer, int* length) {
- for (int i = requested_length - 1; i >= 0; --i) {
- buffer[(*length) + i] = '0' + number % 10;
- number /= 10;
- }
- *length += requested_length;
-}
-
-
-static void FillDigits32(uint32_t number, Vector<char> buffer, int* length) {
- int number_length = 0;
- // We fill the digits in reverse order and exchange them afterwards.
- while (number != 0) {
- int digit = number % 10;
- number /= 10;
- buffer[(*length) + number_length] = '0' + digit;
- number_length++;
- }
- // Exchange the digits.
- int i = *length;
- int j = *length + number_length - 1;
- while (i < j) {
- char tmp = buffer[i];
- buffer[i] = buffer[j];
- buffer[j] = tmp;
- i++;
- j--;
- }
- *length += number_length;
-}
-
-
-static void FillDigits64FixedLength(uint64_t number, int requested_length,
- Vector<char> buffer, int* length) {
- const uint32_t kTen7 = 10000000;
- // For efficiency cut the number into 3 uint32_t parts, and print those.
- uint32_t part2 = static_cast<uint32_t>(number % kTen7);
- number /= kTen7;
- uint32_t part1 = static_cast<uint32_t>(number % kTen7);
- uint32_t part0 = static_cast<uint32_t>(number / kTen7);
-
- FillDigits32FixedLength(part0, 3, buffer, length);
- FillDigits32FixedLength(part1, 7, buffer, length);
- FillDigits32FixedLength(part2, 7, buffer, length);
-}
-
-
-static void FillDigits64(uint64_t number, Vector<char> buffer, int* length) {
- const uint32_t kTen7 = 10000000;
- // For efficiency cut the number into 3 uint32_t parts, and print those.
- uint32_t part2 = static_cast<uint32_t>(number % kTen7);
- number /= kTen7;
- uint32_t part1 = static_cast<uint32_t>(number % kTen7);
- uint32_t part0 = static_cast<uint32_t>(number / kTen7);
-
- if (part0 != 0) {
- FillDigits32(part0, buffer, length);
- FillDigits32FixedLength(part1, 7, buffer, length);
- FillDigits32FixedLength(part2, 7, buffer, length);
- } else if (part1 != 0) {
- FillDigits32(part1, buffer, length);
- FillDigits32FixedLength(part2, 7, buffer, length);
- } else {
- FillDigits32(part2, buffer, length);
- }
-}
-
-
-static void RoundUp(Vector<char> buffer, int* length, int* decimal_point) {
- // An empty buffer represents 0.
- if (*length == 0) {
- buffer[0] = '1';
- *decimal_point = 1;
- *length = 1;
- return;
- }
- // Round the last digit until we either have a digit that was not '9' or until
- // we reached the first digit.
- buffer[(*length) - 1]++;
- for (int i = (*length) - 1; i > 0; --i) {
- if (buffer[i] != '0' + 10) {
- return;
- }
- buffer[i] = '0';
- buffer[i - 1]++;
- }
- // If the first digit is now '0' + 10, we would need to set it to '0' and add
- // a '1' in front. However we reach the first digit only if all following
- // digits had been '9' before rounding up. Now all trailing digits are '0' and
- // we simply switch the first digit to '1' and update the decimal-point
- // (indicating that the point is now one digit to the right).
- if (buffer[0] == '0' + 10) {
- buffer[0] = '1';
- (*decimal_point)++;
- }
-}
-
-
-// The given fractionals number represents a fixed-point number with binary
-// point at bit (-exponent).
-// Preconditions:
-// -128 <= exponent <= 0.
-// 0 <= fractionals * 2^exponent < 1
-// The buffer holds the result.
-// The function will round its result. During the rounding-process digits not
-// generated by this function might be updated, and the decimal-point variable
-// might be updated. If this function generates the digits 99 and the buffer
-// already contained "199" (thus yielding a buffer of "19999") then a
-// rounding-up will change the contents of the buffer to "20000".
-static void FillFractionals(uint64_t fractionals, int exponent,
- int fractional_count, Vector<char> buffer,
- int* length, int* decimal_point) {
- ASSERT(-128 <= exponent && exponent <= 0);
- // 'fractionals' is a fixed-point number, with binary point at bit
- // (-exponent). Inside the function the non-converted remainder of fractionals
- // is a fixed-point number, with binary point at bit 'point'.
- if (-exponent <= 64) {
- // One 64 bit number is sufficient.
- ASSERT(fractionals >> 56 == 0);
- int point = -exponent;
- for (int i = 0; i < fractional_count; ++i) {
- if (fractionals == 0) break;
- // Instead of multiplying by 10 we multiply by 5 and adjust the point
- // location. This way the fractionals variable will not overflow.
- // Invariant at the beginning of the loop: fractionals < 2^point.
- // Initially we have: point <= 64 and fractionals < 2^56
- // After each iteration the point is decremented by one.
- // Note that 5^3 = 125 < 128 = 2^7.
- // Therefore three iterations of this loop will not overflow fractionals
- // (even without the subtraction at the end of the loop body). At this
- // time point will satisfy point <= 61 and therefore fractionals < 2^point
- // and any further multiplication of fractionals by 5 will not overflow.
- fractionals *= 5;
- point--;
- int digit = static_cast<int>(fractionals >> point);
- buffer[*length] = '0' + digit;
- (*length)++;
- fractionals -= static_cast<uint64_t>(digit) << point;
- }
- // If the first bit after the point is set we have to round up.
- if (((fractionals >> (point - 1)) & 1) == 1) {
- RoundUp(buffer, length, decimal_point);
- }
- } else { // We need 128 bits.
- ASSERT(64 < -exponent && -exponent <= 128);
- UInt128 fractionals128 = UInt128(fractionals, 0);
- fractionals128.Shift(-exponent - 64);
- int point = 128;
- for (int i = 0; i < fractional_count; ++i) {
- if (fractionals128.IsZero()) break;
- // As before: instead of multiplying by 10 we multiply by 5 and adjust the
- // point location.
- // This multiplication will not overflow for the same reasons as before.
- fractionals128.Multiply(5);
- point--;
- int digit = fractionals128.DivModPowerOf2(point);
- buffer[*length] = '0' + digit;
- (*length)++;
- }
- if (fractionals128.BitAt(point - 1) == 1) {
- RoundUp(buffer, length, decimal_point);
- }
- }
-}
-
-
-// Removes leading and trailing zeros.
-// If leading zeros are removed then the decimal point position is adjusted.
-static void TrimZeros(Vector<char> buffer, int* length, int* decimal_point) {
- while (*length > 0 && buffer[(*length) - 1] == '0') {
- (*length)--;
- }
- int first_non_zero = 0;
- while (first_non_zero < *length && buffer[first_non_zero] == '0') {
- first_non_zero++;
- }
- if (first_non_zero != 0) {
- for (int i = first_non_zero; i < *length; ++i) {
- buffer[i - first_non_zero] = buffer[i];
- }
- *length -= first_non_zero;
- *decimal_point -= first_non_zero;
- }
-}
-
-
-bool FastFixedDtoa(double v,
- int fractional_count,
- Vector<char> buffer,
- int* length,
- int* decimal_point) {
- const uint32_t kMaxUInt32 = 0xFFFFFFFF;
- uint64_t significand = Double(v).Significand();
- int exponent = Double(v).Exponent();
- // v = significand * 2^exponent (with significand a 53bit integer).
- // If the exponent is larger than 20 (i.e. we may have a 73bit number) then we
- // don't know how to compute the representation. 2^73 ~= 9.5*10^21.
- // If necessary this limit could probably be increased, but we don't need
- // more.
- if (exponent > 20) return false;
- if (fractional_count > 20) return false;
- *length = 0;
- // At most kDoubleSignificandSize bits of the significand are non-zero.
- // Given a 64 bit integer we have 11 0s followed by 53 potentially non-zero
- // bits: 0..11*..0xxx..53*..xx
- if (exponent + kDoubleSignificandSize > 64) {
- // The exponent must be > 11.
- //
- // We know that v = significand * 2^exponent.
- // And the exponent > 11.
- // We simplify the task by dividing v by 10^17.
- // The quotient delivers the first digits, and the remainder fits into a 64
- // bit number.
- // Dividing by 10^17 is equivalent to dividing by 5^17*2^17.
- const uint64_t kFive17 = V8_2PART_UINT64_C(0xB1, A2BC2EC5); // 5^17
- uint64_t divisor = kFive17;
- int divisor_power = 17;
- uint64_t dividend = significand;
- uint32_t quotient;
- uint64_t remainder;
- // Let v = f * 2^e with f == significand and e == exponent.
- // Then need q (quotient) and r (remainder) as follows:
- // v = q * 10^17 + r
- // f * 2^e = q * 10^17 + r
- // f * 2^e = q * 5^17 * 2^17 + r
- // If e > 17 then
- // f * 2^(e-17) = q * 5^17 + r/2^17
- // else
- // f = q * 5^17 * 2^(17-e) + r/2^e
- if (exponent > divisor_power) {
- // We only allow exponents of up to 20 and therefore (17 - e) <= 3
- dividend <<= exponent - divisor_power;
- quotient = static_cast<uint32_t>(dividend / divisor);
- remainder = (dividend % divisor) << divisor_power;
- } else {
- divisor <<= divisor_power - exponent;
- quotient = static_cast<uint32_t>(dividend / divisor);
- remainder = (dividend % divisor) << exponent;
- }
- FillDigits32(quotient, buffer, length);
- FillDigits64FixedLength(remainder, divisor_power, buffer, length);
- *decimal_point = *length;
- } else if (exponent >= 0) {
- // 0 <= exponent <= 11
- significand <<= exponent;
- FillDigits64(significand, buffer, length);
- *decimal_point = *length;
- } else if (exponent > -kDoubleSignificandSize) {
- // We have to cut the number.
- uint64_t integrals = significand >> -exponent;
- uint64_t fractionals = significand - (integrals << -exponent);
- if (integrals > kMaxUInt32) {
- FillDigits64(integrals, buffer, length);
- } else {
- FillDigits32(static_cast<uint32_t>(integrals), buffer, length);
- }
- *decimal_point = *length;
- FillFractionals(fractionals, exponent, fractional_count,
- buffer, length, decimal_point);
- } else if (exponent < -128) {
- // This configuration (with at most 20 digits) means that all digits must be
- // 0.
- ASSERT(fractional_count <= 20);
- buffer[0] = '\0';
- *length = 0;
- *decimal_point = -fractional_count;
- } else {
- *decimal_point = 0;
- FillFractionals(significand, exponent, fractional_count,
- buffer, length, decimal_point);
- }
- TrimZeros(buffer, length, decimal_point);
- buffer[*length] = '\0';
- if ((*length) == 0) {
- // The string is empty and the decimal_point thus has no importance. Mimick
- // Gay's dtoa and and set it to -fractional_count.
- *decimal_point = -fractional_count;
- }
- return true;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/fixed-dtoa.h b/src/3rdparty/v8/src/fixed-dtoa.h
deleted file mode 100644
index 93f826f..0000000
--- a/src/3rdparty/v8/src/fixed-dtoa.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FIXED_DTOA_H_
-#define V8_FIXED_DTOA_H_
-
-namespace v8 {
-namespace internal {
-
-// Produces digits necessary to print a given number with
-// 'fractional_count' digits after the decimal point.
-// The buffer must be big enough to hold the result plus one terminating null
-// character.
-//
-// The produced digits might be too short in which case the caller has to fill
-// the gaps with '0's.
-// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and
-// decimal_point = -2.
-// Halfway cases are rounded towards +/-Infinity (away from 0). The call
-// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0.
-// The returned buffer may contain digits that would be truncated from the
-// shortest representation of the input.
-//
-// This method only works for some parameters. If it can't handle the input it
-// returns false. The output is null-terminated when the function succeeds.
-bool FastFixedDtoa(double v, int fractional_count,
- Vector<char> buffer, int* length, int* decimal_point);
-
-} } // namespace v8::internal
-
-#endif // V8_FIXED_DTOA_H_
diff --git a/src/3rdparty/v8/src/flag-definitions.h b/src/3rdparty/v8/src/flag-definitions.h
deleted file mode 100644
index b4184ff..0000000
--- a/src/3rdparty/v8/src/flag-definitions.h
+++ /dev/null
@@ -1,764 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file defines all of the flags. It is separated into different section,
-// for Debug, Release, Logging and Profiling, etc. To add a new flag, find the
-// correct section, and use one of the DEFINE_ macros, without a trailing ';'.
-//
-// This include does not have a guard, because it is a template-style include,
-// which can be included multiple times in different modes. It expects to have
-// a mode defined before it's included. The modes are FLAG_MODE_... below:
-
-// We want to declare the names of the variables for the header file. Normally
-// this will just be an extern declaration, but for a readonly flag we let the
-// compiler make better optimizations by giving it the value.
-#if defined(FLAG_MODE_DECLARE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- extern ctype FLAG_##nam;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
- static ctype const FLAG_##nam = def;
-#define DEFINE_implication(whenflag, thenflag)
-
-// We want to supply the actual storage and value for the flag variable in the
-// .cc file. We only do this for writable flags.
-#elif defined(FLAG_MODE_DEFINE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- ctype FLAG_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
-
-// We need to define all of our default values so that the Flag structure can
-// access them by pointer. These are just used internally inside of one .cc,
-// for MODE_META, so there is no impact on the flags interface.
-#elif defined(FLAG_MODE_DEFINE_DEFAULTS)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- static ctype const FLAGDEFAULT_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
-
-// We want to write entries into our meta data table, for internal parsing and
-// printing / etc in the flag parser code. We only do this for writable flags.
-#elif defined(FLAG_MODE_META)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
-
-// We produce the code to set flags when it is implied by another flag.
-#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt)
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag) \
- if (FLAG_##whenflag) FLAG_##thenflag = true;
-
-#else
-#error No mode supplied when including flags.defs
-#endif
-
-#ifdef FLAG_MODE_DECLARE
-// Structure used to hold a collection of arguments to the JavaScript code.
-#define JSARGUMENTS_INIT {{}}
-struct JSArguments {
-public:
- inline int argc() const {
- return static_cast<int>(storage_[0]);
- }
- inline const char** argv() const {
- return reinterpret_cast<const char**>(storage_[1]);
- }
- inline const char*& operator[] (int idx) const {
- return argv()[idx];
- }
- inline JSArguments& operator=(JSArguments args) {
- set_argc(args.argc());
- set_argv(args.argv());
- return *this;
- }
- static JSArguments Create(int argc, const char** argv) {
- JSArguments args;
- args.set_argc(argc);
- args.set_argv(argv);
- return args;
- }
-private:
- void set_argc(int argc) {
- storage_[0] = argc;
- }
- void set_argv(const char** argv) {
- storage_[1] = reinterpret_cast<AtomicWord>(argv);
- }
-public:
- // Contains argc and argv. Unfortunately we have to store these two fields
- // into a single one to avoid making the initialization macro (which would be
- // "{ 0, NULL }") contain a coma.
- AtomicWord storage_[2];
-};
-#endif
-
-#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
-#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
-#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
-#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
-
-//
-// Flags in all modes.
-//
-#define FLAG FLAG_FULL
-
-// Flags for language modes and experimental language features.
-DEFINE_bool(use_strict, false, "enforce strict mode")
-DEFINE_bool(es5_readonly, true,
- "activate correct semantics for inheriting readonliness")
-DEFINE_bool(es52_globals, true,
- "activate new semantics for global var declarations")
-
-DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
-DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
-DEFINE_bool(harmony_modules, false,
- "enable harmony modules (implies block scoping)")
-DEFINE_bool(harmony_symbols, false,
- "enable harmony symbols (a.k.a. private names)")
-DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
-DEFINE_bool(harmony_collections, false,
- "enable harmony collections (sets, maps, and weak maps)")
-DEFINE_bool(harmony_observation, false,
- "enable harmony object observation (implies harmony collections")
-DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
-DEFINE_implication(harmony, harmony_scoping)
-DEFINE_implication(harmony, harmony_modules)
-DEFINE_implication(harmony, harmony_symbols)
-DEFINE_implication(harmony, harmony_proxies)
-DEFINE_implication(harmony, harmony_collections)
-DEFINE_implication(harmony, harmony_observation)
-DEFINE_implication(harmony_modules, harmony_scoping)
-DEFINE_implication(harmony_observation, harmony_collections)
-
-// Flags for experimental implementation features.
-DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
-DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(compiled_transitions, false, "use optimizing compiler to "
- "generate array elements transition stubs")
-DEFINE_bool(clever_optimizations,
- true,
- "Optimize object size, Array shift, DOM strings and string +")
-
-// Flags for data representation optimizations
-DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
-DEFINE_bool(string_slices, true, "use string slices")
-
-// Flags for Crankshaft.
-DEFINE_bool(crankshaft, true, "use crankshaft")
-DEFINE_string(hydrogen_filter, "", "optimization filter")
-DEFINE_bool(use_range, true, "use hydrogen range analysis")
-DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
-DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
-DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
-DEFINE_bool(use_inlining, true, "use function inlining")
-DEFINE_int(max_inlined_source_size, 600,
- "maximum source size in bytes considered for a single inlining")
-DEFINE_int(max_inlined_nodes, 196,
- "maximum number of AST nodes considered for a single inlining")
-DEFINE_int(max_inlined_nodes_cumulative, 196,
- "maximum cumulative number of AST nodes considered for inlining")
-DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
-DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
-DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
- true,
- "crankshaft harvests type feedback from stub cache")
-DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
-DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
-DEFINE_string(trace_phase, "Z", "trace generated IR for specified phases")
-DEFINE_bool(trace_inlining, false, "trace inlining decisions")
-DEFINE_bool(trace_alloc, false, "trace register allocator")
-DEFINE_bool(trace_all_uses, false, "trace all use positions")
-DEFINE_bool(trace_range, false, "trace range analysis")
-DEFINE_bool(trace_gvn, false, "trace global value numbering")
-DEFINE_bool(trace_representation, false, "trace representation types")
-DEFINE_bool(trace_track_allocation_sites, false,
- "trace the tracking of allocation sites")
-DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
-DEFINE_bool(stress_environments, false, "environment for every instruction")
-DEFINE_int(deopt_every_n_times,
- 0,
- "deoptimize every n times a deopt point is passed")
-DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
-DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
-DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
-DEFINE_bool(use_osr, true, "use on-stack replacement")
-DEFINE_bool(idefs, false, "use informative definitions")
-DEFINE_bool(array_bounds_checks_elimination, true,
- "perform array bounds checks elimination")
-DEFINE_bool(array_index_dehoisting, true,
- "perform array index dehoisting")
-DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
-DEFINE_bool(fold_constants, true, "use constant folding")
-DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
-DEFINE_bool(unreachable_code_elimination, false,
- "eliminate unreachable code (hidden behind soft deopts)")
-DEFINE_bool(track_allocation_sites, true,
- "Use allocation site info to reduce transitions")
-DEFINE_bool(optimize_constructed_arrays, false,
- "Use allocation site info on constructed arrays")
-DEFINE_bool(trace_osr, false, "trace on-stack replacement")
-DEFINE_int(stress_runs, 0, "number of stress runs")
-DEFINE_bool(optimize_closures, true, "optimize closures")
-DEFINE_bool(lookup_sample_by_shared, true,
- "when picking a function to optimize, watch for shared function "
- "info, not JSFunction itself")
-DEFINE_bool(cache_optimized_code, true,
- "cache optimized code for closures")
-DEFINE_bool(inline_construct, true, "inline constructor calls")
-DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
-DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
-DEFINE_int(loop_weight, 1, "loop weight for representation inference")
-
-DEFINE_bool(optimize_for_in, true,
- "optimize functions containing for-in loops")
-DEFINE_bool(opt_safe_uint32_operations, true,
- "allow uint32 values on optimize frames if they are used only in "
- "safe operations")
-
-DEFINE_bool(parallel_recompilation, false,
- "optimizing hot functions asynchronously on a separate thread")
-DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
-DEFINE_int(parallel_recompilation_queue_length, 2,
- "the length of the parallel compilation queue")
-DEFINE_bool(manual_parallel_recompilation, false,
- "disable automatic optimization")
-DEFINE_implication(manual_parallel_recompilation, parallel_recompilation)
-DEFINE_bool(omit_prototype_checks_for_leaf_maps, true,
- "do not emit prototype checks if all prototypes have leaf maps, "
- "deoptimize the optimized code if the layout of the maps changes.")
-
-// Experimental profiler changes.
-DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
-DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
-DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
-DEFINE_bool(self_optimization, false,
- "primitive functions trigger their own optimization")
-DEFINE_bool(direct_self_opt, false,
- "call recompile stub directly when self-optimizing")
-DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
-DEFINE_bool(interrupt_at_exit, false,
- "insert an interrupt check at function exit")
-DEFINE_bool(weighted_back_edges, false,
- "weight back edges by jump distance for interrupt triggering")
- // 0x1700 fits in the immediate field of an ARM instruction.
-DEFINE_int(interrupt_budget, 0x1700,
- "execution budget before interrupt is triggered")
-DEFINE_int(type_info_threshold, 15,
- "percentage of ICs that must have type info to allow optimization")
-DEFINE_int(self_opt_count, 130, "call count before self-optimization")
-
-DEFINE_implication(experimental_profiler, watch_ic_patching)
-DEFINE_implication(experimental_profiler, self_optimization)
-// Not implying direct_self_opt here because it seems to be a bad idea.
-DEFINE_implication(experimental_profiler, retry_self_opt)
-DEFINE_implication(experimental_profiler, interrupt_at_exit)
-DEFINE_implication(experimental_profiler, weighted_back_edges)
-
-DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing")
-DEFINE_implication(trace_opt_verbose, trace_opt)
-
-// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
-DEFINE_bool(debug_code, false,
- "generate extra code (assertions) for debugging")
-DEFINE_bool(code_comments, false, "emit comments in code disassembly")
-DEFINE_bool(enable_sse2, true,
- "enable use of SSE2 instructions if available")
-DEFINE_bool(enable_sse3, true,
- "enable use of SSE3 instructions if available")
-DEFINE_bool(enable_sse4_1, true,
- "enable use of SSE4.1 instructions if available")
-DEFINE_bool(enable_cmov, true,
- "enable use of CMOV instruction if available")
-DEFINE_bool(enable_rdtsc, true,
- "enable use of RDTSC instruction if available")
-DEFINE_bool(enable_sahf, true,
- "enable use of SAHF instruction if available (X64 only)")
-DEFINE_bool(enable_vfp3, true,
- "enable use of VFP3 instructions if available - this implies "
- "enabling ARMv7 and VFP2 instructions (ARM only)")
-DEFINE_bool(enable_vfp2, true,
- "enable use of VFP2 instructions if available")
-DEFINE_bool(enable_armv7, true,
- "enable use of ARMv7 instructions if available (ARM only)")
-DEFINE_bool(enable_sudiv, true,
- "enable use of SDIV and UDIV instructions if available (ARM only)")
-DEFINE_bool(enable_movw_movt, false,
- "enable loading 32-bit constant by means of movw/movt "
- "instruction pairs (ARM only)")
-DEFINE_bool(enable_unaligned_accesses, true,
- "enable unaligned accesses for ARMv7 (ARM only)")
-DEFINE_bool(enable_32dregs, true,
- "enable use of d16-d31 registers on ARM - this requires VFP3")
-DEFINE_bool(enable_fpu, true,
- "enable use of MIPS FPU instructions if available (MIPS only)")
-DEFINE_bool(enable_vldr_imm, false,
- "enable use of constant pools for double immediate (ARM only)")
-
-// bootstrapper.cc
-DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
-DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
-DEFINE_bool(expose_gc, false, "expose gc extension")
-DEFINE_bool(expose_externalize_string, false,
- "expose externalize string extension")
-DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
-DEFINE_bool(builtins_in_stack_traces, false,
- "show built-in functions in stack traces")
-DEFINE_bool(disable_native_files, false, "disable builtin natives files")
-
-// builtins-ia32.cc
-DEFINE_bool(inline_new, true, "use fast inline allocation")
-
-// checks.cc
-DEFINE_bool(stack_trace_on_abort, true,
- "print a stack trace if an assertion failure occurs")
-
-// codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(trace, false, "trace function calls")
-DEFINE_bool(mask_constants_with_cookie,
- true,
- "use random jit cookie to mask large constants")
-
-// codegen.cc
-DEFINE_bool(lazy, true, "use lazy compilation")
-DEFINE_bool(trace_opt, false, "trace lazy optimization")
-DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
-DEFINE_bool(opt, true, "use adaptive optimizations")
-DEFINE_bool(always_opt, false, "always try to optimize functions")
-DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(trace_deopt, false, "trace optimize function deoptimization")
-DEFINE_bool(trace_stub_failures, false,
- "trace deoptimization of generated code stubs")
-
-// compiler.cc
-DEFINE_int(min_preparse_length, 1024,
- "minimum length for automatic enable preparsing")
-DEFINE_bool(always_full_compiler, false,
- "try to use the dedicated run-once backend for all code")
-DEFINE_int(max_opt_count, 10,
- "maximum number of optimization attempts before giving up.")
-
-// compilation-cache.cc
-DEFINE_bool(compilation_cache, true, "enable compilation cache")
-
-DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
-
-// cpu-profiler.cc
-DEFINE_int(cpu_profiler_sampling_period, 1000,
- "CPU profiler sampling period in microseconds")
-
-// debug.cc
-DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
-DEFINE_bool(trace_js_array_abuse, false,
- "trace out-of-bounds accesses to JS arrays")
-DEFINE_bool(trace_external_array_abuse, false,
- "trace out-of-bounds-accesses to external arrays")
-DEFINE_bool(trace_array_abuse, false,
- "trace out-of-bounds accesses to all arrays")
-DEFINE_implication(trace_array_abuse, trace_js_array_abuse)
-DEFINE_implication(trace_array_abuse, trace_external_array_abuse)
-DEFINE_bool(debugger_auto_break, true,
- "automatically set the debug break flag when debugger commands are "
- "in the queue")
-DEFINE_bool(breakpoint_relocation, true, "relocate breakpoints to the next executable line")
-DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
-DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
-
-// execution.cc
-// Slightly less than 1MB on 64-bit, since Windows' default stack size for
-// the main execution thread is 1MB for both 32 and 64-bit.
-DEFINE_int(stack_size, kPointerSize * 123,
- "default size of stack region v8 is allowed to use (in kBytes)")
-
-// frames.cc
-DEFINE_int(max_stack_trace_source_length, 300,
- "maximum length of function source code printed in a stack trace.")
-
-// full-codegen.cc
-DEFINE_bool(always_inline_smi_code, false,
- "always inline smi code in non-opt code")
-
-// heap.cc
-DEFINE_int(max_new_space_size, 0, "max size of the new generation (in kBytes)")
-DEFINE_int(max_old_space_size, 0, "max size of the old generation (in Mbytes)")
-DEFINE_int(max_executable_size, 0, "max size of executable memory (in Mbytes)")
-DEFINE_bool(gc_global, false, "always perform global GCs")
-DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
-DEFINE_bool(trace_gc, false,
- "print one trace line following each garbage collection")
-DEFINE_bool(trace_gc_nvp, false,
- "print one detailed trace line in name=value format "
- "after each garbage collection")
-DEFINE_bool(trace_gc_ignore_scavenger, false,
- "do not print trace line after scavenger collection")
-DEFINE_bool(print_cumulative_gc_stat, false,
- "print cumulative GC statistics in name=value format on exit")
-DEFINE_bool(trace_gc_verbose, false,
- "print more details following each garbage collection")
-DEFINE_bool(trace_fragmentation, false,
- "report fragmentation for old pointer and data pages")
-DEFINE_bool(trace_external_memory, false,
- "print amount of external allocated memory after each time "
- "it is adjusted.")
-DEFINE_bool(collect_maps, true,
- "garbage collect maps from which no objects can be reached")
-DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
- "make maps embedded in optimized code weak")
-DEFINE_bool(flush_code, true,
- "flush code that we expect not to use again (during full gc)")
-DEFINE_bool(flush_code_incrementally, true,
- "flush code that we expect not to use again (incrementally)")
-DEFINE_bool(age_code, true,
- "track un-executed functions to age code and flush only "
- "old code")
-DEFINE_bool(incremental_marking, true, "use incremental marking")
-DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
-DEFINE_bool(trace_incremental_marking, false,
- "trace progress of the incremental marking")
-DEFINE_bool(track_gc_object_stats, false,
- "track object counts and memory usage")
-DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
-DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
-DEFINE_int(sweeper_threads, 0,
- "number of parallel and concurrent sweeping threads")
-DEFINE_bool(parallel_marking, false, "enable parallel marking")
-DEFINE_int(marking_threads, 0, "number of parallel marking threads")
-#ifdef VERIFY_HEAP
-DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
-#endif
-
-// v8.cc
-DEFINE_bool(use_idle_notification, true,
- "Use idle notification to reduce memory footprint.")
-// ic.cc
-DEFINE_bool(use_ic, true, "use inline caching")
-
-// macro-assembler-ia32.cc
-DEFINE_bool(native_code_counters, false,
- "generate extra code for manipulating stats counters")
-
-// mark-compact.cc
-DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
-DEFINE_bool(lazy_sweeping, true,
- "Use lazy sweeping for old pointer and data spaces")
-DEFINE_bool(never_compact, false,
- "Never perform compaction on full GC - testing only")
-DEFINE_bool(compact_code_space, true,
- "Compact code space on full non-incremental collections")
-DEFINE_bool(incremental_code_compaction, true,
- "Compact code space on full incremental collections")
-DEFINE_bool(cleanup_code_caches_at_gc, true,
- "Flush inline caches prior to mark compact collection and "
- "flush code caches in maps during mark compact cycle.")
-DEFINE_bool(use_marking_progress_bar, true,
- "Use a progress bar to scan large objects in increments when "
- "incremental marking is active.")
-DEFINE_int(random_seed, 0,
- "Default seed for initializing random generator "
- "(0, the default, means to use system random).")
-
-// objects.cc
-DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
-
-// parser.cc
-DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
-
-// simulator-arm.cc and simulator-mips.cc
-DEFINE_bool(trace_sim, false, "Trace simulator execution")
-DEFINE_bool(check_icache, false,
- "Check icache flushes in ARM and MIPS simulator")
-DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
-DEFINE_int(sim_stack_alignment, 8,
- "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
-
-// isolate.cc
-DEFINE_bool(trace_exception, false,
- "print stack trace when throwing exceptions")
-DEFINE_bool(preallocate_message_memory, false,
- "preallocate some memory to build stack traces.")
-DEFINE_bool(randomize_hashes,
- false,
- "randomize hashes to avoid predictable hash collisions "
- "(with snapshots this option cannot override the baked-in seed)")
-DEFINE_int(hash_seed,
- 0,
- "Fixed seed to use to hash property keys (0 means random)"
- "(with snapshots this option cannot override the baked-in seed)")
-
-// v8.cc
-DEFINE_bool(preemption, false,
- "activate a 100ms timer that switches between V8 threads")
-
-// Regexp
-DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
-
-// Testing flags test/cctest/test-{flags,api,serialization}.cc
-DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
-DEFINE_int(testing_int_flag, 13, "testing_int_flag")
-DEFINE_float(testing_float_flag, 2.5, "float-flag")
-DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
-DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness")
-#ifdef WIN32
-DEFINE_string(testing_serialization_file, "C:\\Windows\\Temp\\serdes",
- "file in which to testing_serialize heap")
-#else
-DEFINE_string(testing_serialization_file, "/tmp/serdes",
- "file in which to serialize heap")
-#endif
-
-// mksnapshot.cc
-DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
- " the snapshot (mksnapshot only)")
-
-//
-// Dev shell flags
-//
-
-DEFINE_bool(help, false, "Print usage message, including flags, on console")
-DEFINE_bool(dump_counters, false, "Dump counters on exit")
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-DEFINE_bool(debugger, false, "Enable JavaScript debugger")
-DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
- "debugger agent in another process")
-DEFINE_bool(debugger_agent, false, "Enable debugger agent")
-DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSARGUMENTS_INIT,
- "Pass all remaining arguments to the script. Alias for \"--\".")
-
-#if defined(WEBOS__)
-DEFINE_bool(debug_compile_events, false, "Enable debugger compile events")
-DEFINE_bool(debug_script_collected_events, false,
- "Enable debugger script collected events")
-#else
-DEFINE_bool(debug_compile_events, true, "Enable debugger compile events")
-DEFINE_bool(debug_script_collected_events, true,
- "Enable debugger script collected events")
-#endif
-
-
-//
-// GDB JIT integration flags.
-//
-
-DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
-DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
-DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
-DEFINE_string(gdbjit_dump_filter, "",
- "dump only objects containing this substring")
-
-// mark-compact.cc
-DEFINE_bool(force_marking_deque_overflows, false,
- "force overflows of marking deque by reducing it's size "
- "to 64 words")
-
-DEFINE_bool(stress_compaction, false,
- "stress the GC compactor to flush out bugs (implies "
- "--force_marking_deque_overflows)")
-
-//
-// Debug only flags
-//
-#undef FLAG
-#ifdef DEBUG
-#define FLAG FLAG_FULL
-#else
-#define FLAG FLAG_READONLY
-#endif
-
-// checks.cc
-DEFINE_bool(enable_slow_asserts, false,
- "enable asserts that are slow to execute")
-
-// codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(trace_codegen, false,
- "print name of functions for which code is generated")
-DEFINE_bool(print_source, false, "pretty print source code")
-DEFINE_bool(print_builtin_source, false,
- "pretty print source code for builtins")
-DEFINE_bool(print_ast, false, "print source AST")
-DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
-DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
-
-// compiler.cc
-DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
-DEFINE_bool(print_scopes, false, "print scopes")
-
-// contexts.cc
-DEFINE_bool(trace_contexts, false, "trace contexts operations")
-
-// heap.cc
-DEFINE_bool(gc_greedy, false, "perform GC prior to some allocations")
-DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
-DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
-DEFINE_bool(code_stats, false, "report code statistics after GC")
-DEFINE_bool(verify_native_context_separation, false,
- "verify that code holds on to at most one native context after GC")
-DEFINE_bool(print_handles, false, "report handles after GC")
-DEFINE_bool(print_global_handles, false, "report global handles after GC")
-
-// ic.cc
-DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
-
-// interface.cc
-DEFINE_bool(print_interfaces, false, "print interfaces")
-DEFINE_bool(print_interface_details, false, "print interface inference details")
-DEFINE_int(print_interface_depth, 5, "depth for printing interfaces")
-
-// objects.cc
-DEFINE_bool(trace_normalization,
- false,
- "prints when objects are turned into dictionaries.")
-
-// runtime.cc
-DEFINE_bool(trace_lazy, false, "trace lazy compilation")
-
-// spaces.cc
-DEFINE_bool(collect_heap_spill_statistics, false,
- "report heap spill statistics along with heap_stats "
- "(requires heap_stats)")
-
-DEFINE_bool(trace_isolates, false, "trace isolate state changes")
-
-// VM state
-DEFINE_bool(log_state_changes, false, "Log state changes.")
-
-// Regexp
-DEFINE_bool(regexp_possessive_quantifier,
- false,
- "enable possessive quantifier syntax for testing")
-DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
-DEFINE_bool(trace_regexp_assembler,
- false,
- "trace regexp macro assembler calls.")
-
-//
-// Logging and profiling flags
-//
-#undef FLAG
-#define FLAG FLAG_FULL
-
-// log.cc
-DEFINE_bool(log, false,
- "Minimal logging (no API, code, GC, suspect, or handles samples).")
-DEFINE_bool(log_all, false, "Log all events to the log file.")
-DEFINE_bool(log_runtime, false, "Activate runtime system %Log call.")
-DEFINE_bool(log_api, false, "Log API events to the log file.")
-DEFINE_bool(log_code, false,
- "Log code events to the log file without profiling.")
-DEFINE_bool(log_gc, false,
- "Log heap samples on garbage collection for the hp2ps tool.")
-DEFINE_bool(log_handles, false, "Log global handle events.")
-DEFINE_bool(log_snapshot_positions, false,
- "log positions of (de)serialized objects in the snapshot.")
-DEFINE_bool(log_suspect, false, "Log suspect operations.")
-DEFINE_bool(prof, false,
- "Log statistical profiling information (implies --log-code).")
-DEFINE_bool(prof_auto, true,
- "Used with --prof, starts profiling automatically")
-DEFINE_bool(prof_lazy, false,
- "Used with --prof, only does sampling and logging"
- " when profiler is active (implies --noprof_auto).")
-DEFINE_bool(prof_browser_mode, true,
- "Used with --prof, turns on browser-compatible mode for profiling.")
-DEFINE_bool(log_regexp, false, "Log regular expression execution.")
-DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
-DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
-DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
- "Specify the name of the file for fake gc mmap used in ll_prof")
-DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
-DEFINE_bool(log_timer_events, false,
- "Time events including external callbacks.")
-DEFINE_implication(log_timer_events, log_internal_timer_events)
-
-//
-// Disassembler only flags
-//
-#undef FLAG
-#ifdef ENABLE_DISASSEMBLER
-#define FLAG FLAG_FULL
-#else
-#define FLAG FLAG_READONLY
-#endif
-
-// elements.cc
-DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
-
-// code-stubs.cc
-DEFINE_bool(print_code_stubs, false, "print code stubs")
-DEFINE_bool(test_secondary_stub_cache,
- false,
- "test secondary stub cache by disabling the primary one")
-
-DEFINE_bool(test_primary_stub_cache,
- false,
- "test primary stub cache by disabling the secondary one")
-
-// codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(print_code, false, "print generated code")
-DEFINE_bool(print_opt_code, false, "print optimized code")
-DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
- "printing optimized code based on it")
-DEFINE_bool(print_code_verbose, false, "print more information for code")
-DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
-
-#ifdef ENABLE_DISASSEMBLER
-DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
-DEFINE_implication(print_all_code, print_code)
-DEFINE_implication(print_all_code, print_opt_code)
-DEFINE_implication(print_all_code, print_unopt_code)
-DEFINE_implication(print_all_code, print_code_verbose)
-DEFINE_implication(print_all_code, print_builtin_code)
-DEFINE_implication(print_all_code, print_code_stubs)
-DEFINE_implication(print_all_code, code_comments)
-#ifdef DEBUG
-DEFINE_implication(print_all_code, trace_codegen)
-#endif
-#endif
-
-// Cleanup...
-#undef FLAG_FULL
-#undef FLAG_READONLY
-#undef FLAG
-
-#undef DEFINE_bool
-#undef DEFINE_int
-#undef DEFINE_string
-#undef DEFINE_implication
-
-#undef FLAG_MODE_DECLARE
-#undef FLAG_MODE_DEFINE
-#undef FLAG_MODE_DEFINE_DEFAULTS
-#undef FLAG_MODE_META
-#undef FLAG_MODE_DEFINE_IMPLICATIONS
diff --git a/src/3rdparty/v8/src/flags.cc b/src/3rdparty/v8/src/flags.cc
deleted file mode 100644
index bca0eff..0000000
--- a/src/3rdparty/v8/src/flags.cc
+++ /dev/null
@@ -1,547 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <ctype.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "platform.h"
-#include "smart-pointers.h"
-#include "string-stream.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Define all of our flags.
-#define FLAG_MODE_DEFINE
-#include "flag-definitions.h"
-
-// Define all of our flags default values.
-#define FLAG_MODE_DEFINE_DEFAULTS
-#include "flag-definitions.h"
-
-namespace {
-
-// This structure represents a single entry in the flag system, with a pointer
-// to the actual flag, default value, comment, etc. This is designed to be POD
-// initialized as to avoid requiring static constructors.
-struct Flag {
- enum FlagType { TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS };
-
- FlagType type_; // What type of flag, bool, int, or string.
- const char* name_; // Name of the flag, ex "my_flag".
- void* valptr_; // Pointer to the global flag variable.
- const void* defptr_; // Pointer to the default value.
- const char* cmt_; // A comment about the flags purpose.
- bool owns_ptr_; // Does the flag own its string value?
-
- FlagType type() const { return type_; }
-
- const char* name() const { return name_; }
-
- const char* comment() const { return cmt_; }
-
- bool* bool_variable() const {
- ASSERT(type_ == TYPE_BOOL);
- return reinterpret_cast<bool*>(valptr_);
- }
-
- int* int_variable() const {
- ASSERT(type_ == TYPE_INT);
- return reinterpret_cast<int*>(valptr_);
- }
-
- double* float_variable() const {
- ASSERT(type_ == TYPE_FLOAT);
- return reinterpret_cast<double*>(valptr_);
- }
-
- const char* string_value() const {
- ASSERT(type_ == TYPE_STRING);
- return *reinterpret_cast<const char**>(valptr_);
- }
-
- void set_string_value(const char* value, bool owns_ptr) {
- ASSERT(type_ == TYPE_STRING);
- const char** ptr = reinterpret_cast<const char**>(valptr_);
- if (owns_ptr_ && *ptr != NULL) DeleteArray(*ptr);
- *ptr = value;
- owns_ptr_ = owns_ptr;
- }
-
- JSArguments* args_variable() const {
- ASSERT(type_ == TYPE_ARGS);
- return reinterpret_cast<JSArguments*>(valptr_);
- }
-
- bool bool_default() const {
- ASSERT(type_ == TYPE_BOOL);
- return *reinterpret_cast<const bool*>(defptr_);
- }
-
- int int_default() const {
- ASSERT(type_ == TYPE_INT);
- return *reinterpret_cast<const int*>(defptr_);
- }
-
- double float_default() const {
- ASSERT(type_ == TYPE_FLOAT);
- return *reinterpret_cast<const double*>(defptr_);
- }
-
- const char* string_default() const {
- ASSERT(type_ == TYPE_STRING);
- return *reinterpret_cast<const char* const *>(defptr_);
- }
-
- JSArguments args_default() const {
- ASSERT(type_ == TYPE_ARGS);
- return *reinterpret_cast<const JSArguments*>(defptr_);
- }
-
- // Compare this flag's current value against the default.
- bool IsDefault() const {
- switch (type_) {
- case TYPE_BOOL:
- return *bool_variable() == bool_default();
- case TYPE_INT:
- return *int_variable() == int_default();
- case TYPE_FLOAT:
- return *float_variable() == float_default();
- case TYPE_STRING: {
- const char* str1 = string_value();
- const char* str2 = string_default();
- if (str2 == NULL) return str1 == NULL;
- if (str1 == NULL) return str2 == NULL;
- return strcmp(str1, str2) == 0;
- }
- case TYPE_ARGS:
- return args_variable()->argc() == 0;
- }
- UNREACHABLE();
- return true;
- }
-
- // Set a flag back to it's default value.
- void Reset() {
- switch (type_) {
- case TYPE_BOOL:
- *bool_variable() = bool_default();
- break;
- case TYPE_INT:
- *int_variable() = int_default();
- break;
- case TYPE_FLOAT:
- *float_variable() = float_default();
- break;
- case TYPE_STRING:
- set_string_value(string_default(), false);
- break;
- case TYPE_ARGS:
- *args_variable() = args_default();
- break;
- }
- }
-};
-
-Flag flags[] = {
-#define FLAG_MODE_META
-#include "flag-definitions.h"
-};
-
-const size_t num_flags = sizeof(flags) / sizeof(*flags);
-
-} // namespace
-
-
-static const char* Type2String(Flag::FlagType type) {
- switch (type) {
- case Flag::TYPE_BOOL: return "bool";
- case Flag::TYPE_INT: return "int";
- case Flag::TYPE_FLOAT: return "float";
- case Flag::TYPE_STRING: return "string";
- case Flag::TYPE_ARGS: return "arguments";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-static SmartArrayPointer<const char> ToString(Flag* flag) {
- HeapStringAllocator string_allocator;
- StringStream buffer(&string_allocator);
- switch (flag->type()) {
- case Flag::TYPE_BOOL:
- buffer.Add("%s", (*flag->bool_variable() ? "true" : "false"));
- break;
- case Flag::TYPE_INT:
- buffer.Add("%d", *flag->int_variable());
- break;
- case Flag::TYPE_FLOAT:
- buffer.Add("%f", FmtElm(*flag->float_variable()));
- break;
- case Flag::TYPE_STRING: {
- const char* str = flag->string_value();
- buffer.Add("%s", str ? str : "NULL");
- break;
- }
- case Flag::TYPE_ARGS: {
- JSArguments args = *flag->args_variable();
- if (args.argc() > 0) {
- buffer.Add("%s", args[0]);
- for (int i = 1; i < args.argc(); i++) {
- buffer.Add(" %s", args[i]);
- }
- }
- break;
- }
- }
- return buffer.ToCString();
-}
-
-
-// static
-List<const char*>* FlagList::argv() {
- List<const char*>* args = new List<const char*>(8);
- Flag* args_flag = NULL;
- for (size_t i = 0; i < num_flags; ++i) {
- Flag* f = &flags[i];
- if (!f->IsDefault()) {
- if (f->type() == Flag::TYPE_ARGS) {
- ASSERT(args_flag == NULL);
- args_flag = f; // Must be last in arguments.
- continue;
- }
- HeapStringAllocator string_allocator;
- StringStream buffer(&string_allocator);
- if (f->type() != Flag::TYPE_BOOL || *(f->bool_variable())) {
- buffer.Add("--%s", f->name());
- } else {
- buffer.Add("--no%s", f->name());
- }
- args->Add(buffer.ToCString().Detach());
- if (f->type() != Flag::TYPE_BOOL) {
- args->Add(ToString(f).Detach());
- }
- }
- }
- if (args_flag != NULL) {
- HeapStringAllocator string_allocator;
- StringStream buffer(&string_allocator);
- buffer.Add("--%s", args_flag->name());
- args->Add(buffer.ToCString().Detach());
- JSArguments jsargs = *args_flag->args_variable();
- for (int j = 0; j < jsargs.argc(); j++) {
- args->Add(StrDup(jsargs[j]));
- }
- }
- return args;
-}
-
-
-// Helper function to parse flags: Takes an argument arg and splits it into
-// a flag name and flag value (or NULL if they are missing). is_bool is set
-// if the arg started with "-no" or "--no". The buffer may be used to NUL-
-// terminate the name, it must be large enough to hold any possible name.
-static void SplitArgument(const char* arg,
- char* buffer,
- int buffer_size,
- const char** name,
- const char** value,
- bool* is_bool) {
- *name = NULL;
- *value = NULL;
- *is_bool = false;
-
- if (arg != NULL && *arg == '-') {
- // find the begin of the flag name
- arg++; // remove 1st '-'
- if (*arg == '-') {
- arg++; // remove 2nd '-'
- if (arg[0] == '\0') {
- const char* kJSArgumentsFlagName = "js_arguments";
- *name = kJSArgumentsFlagName;
- return;
- }
- }
- if (arg[0] == 'n' && arg[1] == 'o') {
- arg += 2; // remove "no"
- *is_bool = true;
- }
- *name = arg;
-
- // find the end of the flag name
- while (*arg != '\0' && *arg != '=')
- arg++;
-
- // get the value if any
- if (*arg == '=') {
- // make a copy so we can NUL-terminate flag name
- size_t n = arg - *name;
- CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
- memcpy(buffer, *name, n);
- buffer[n] = '\0';
- *name = buffer;
- // get the value
- *value = arg + 1;
- }
- }
-}
-
-
-inline char NormalizeChar(char ch) {
- return ch == '_' ? '-' : ch;
-}
-
-
-static bool EqualNames(const char* a, const char* b) {
- for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
- if (a[i] == '\0') {
- return true;
- }
- }
- return false;
-}
-
-
-static Flag* FindFlag(const char* name) {
- for (size_t i = 0; i < num_flags; ++i) {
- if (EqualNames(name, flags[i].name()))
- return &flags[i];
- }
- return NULL;
-}
-
-
-// static
-int FlagList::SetFlagsFromCommandLine(int* argc,
- char** argv,
- bool remove_flags) {
- int return_code = 0;
- // parse arguments
- for (int i = 1; i < *argc;) {
- int j = i; // j > 0
- const char* arg = argv[i++];
-
- // split arg into flag components
- char buffer[1*KB];
- const char* name;
- const char* value;
- bool is_bool;
- SplitArgument(arg, buffer, sizeof buffer, &name, &value, &is_bool);
-
- if (name != NULL) {
- // lookup the flag
- Flag* flag = FindFlag(name);
- if (flag == NULL) {
- if (remove_flags) {
- // We don't recognize this flag but since we're removing
- // the flags we recognize we assume that the remaining flags
- // will be processed somewhere else so this flag might make
- // sense there.
- continue;
- } else {
- fprintf(stderr, "Error: unrecognized flag %s\n"
- "Try --help for options\n", arg);
- return_code = j;
- break;
- }
- }
-
- // if we still need a flag value, use the next argument if available
- if (flag->type() != Flag::TYPE_BOOL &&
- flag->type() != Flag::TYPE_ARGS &&
- value == NULL) {
- if (i < *argc) {
- value = argv[i++];
- } else {
- fprintf(stderr, "Error: missing value for flag %s of type %s\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()));
- return_code = j;
- break;
- }
- }
-
- // set the flag
- char* endp = const_cast<char*>(""); // *endp is only read
- switch (flag->type()) {
- case Flag::TYPE_BOOL:
- *flag->bool_variable() = !is_bool;
- break;
- case Flag::TYPE_INT:
- *flag->int_variable() = strtol(value, &endp, 10); // NOLINT
- break;
- case Flag::TYPE_FLOAT:
- *flag->float_variable() = strtod(value, &endp);
- break;
- case Flag::TYPE_STRING:
- flag->set_string_value(value ? StrDup(value) : NULL, true);
- break;
- case Flag::TYPE_ARGS: {
- int start_pos = (value == NULL) ? i : i - 1;
- int js_argc = *argc - start_pos;
- const char** js_argv = NewArray<const char*>(js_argc);
- if (value != NULL) {
- js_argv[0] = StrDup(value);
- }
- for (int k = i; k < *argc; k++) {
- js_argv[k - start_pos] = StrDup(argv[k]);
- }
- *flag->args_variable() = JSArguments::Create(js_argc, js_argv);
- i = *argc; // Consume all arguments
- break;
- }
- }
-
- // handle errors
- if ((flag->type() == Flag::TYPE_BOOL && value != NULL) ||
- (flag->type() != Flag::TYPE_BOOL && is_bool) ||
- *endp != '\0') {
- fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()));
- return_code = j;
- break;
- }
-
- // remove the flag & value from the command
- if (remove_flags) {
- while (j < i) {
- argv[j++] = NULL;
- }
- }
- }
- }
-
- // shrink the argument list
- if (remove_flags) {
- int j = 1;
- for (int i = 1; i < *argc; i++) {
- if (argv[i] != NULL)
- argv[j++] = argv[i];
- }
- *argc = j;
- }
-
- if (FLAG_help) {
- PrintHelp();
- exit(0);
- }
- // parsed all flags successfully
- return return_code;
-}
-
-
-static char* SkipWhiteSpace(char* p) {
- while (*p != '\0' && isspace(*p) != 0) p++;
- return p;
-}
-
-
-static char* SkipBlackSpace(char* p) {
- while (*p != '\0' && isspace(*p) == 0) p++;
- return p;
-}
-
-
-// static
-int FlagList::SetFlagsFromString(const char* str, int len) {
- // make a 0-terminated copy of str
- ScopedVector<char> copy0(len + 1);
- memcpy(copy0.start(), str, len);
- copy0[len] = '\0';
-
- // strip leading white space
- char* copy = SkipWhiteSpace(copy0.start());
-
- // count the number of 'arguments'
- int argc = 1; // be compatible with SetFlagsFromCommandLine()
- for (char* p = copy; *p != '\0'; argc++) {
- p = SkipBlackSpace(p);
- p = SkipWhiteSpace(p);
- }
-
- // allocate argument array
- ScopedVector<char*> argv(argc);
-
- // split the flags string into arguments
- argc = 1; // be compatible with SetFlagsFromCommandLine()
- for (char* p = copy; *p != '\0'; argc++) {
- argv[argc] = p;
- p = SkipBlackSpace(p);
- if (*p != '\0') *p++ = '\0'; // 0-terminate argument
- p = SkipWhiteSpace(p);
- }
-
- // set the flags
- int result = SetFlagsFromCommandLine(&argc, argv.start(), false);
-
- return result;
-}
-
-
-// static
-void FlagList::ResetAllFlags() {
- for (size_t i = 0; i < num_flags; ++i) {
- flags[i].Reset();
- }
-}
-
-
-// static
-void FlagList::PrintHelp() {
- printf("Usage:\n");
- printf(" shell [options] -e string\n");
- printf(" execute string in V8\n");
- printf(" shell [options] file1 file2 ... filek\n");
- printf(" run JavaScript scripts in file1, file2, ..., filek\n");
- printf(" shell [options]\n");
- printf(" shell [options] --shell [file1 file2 ... filek]\n");
- printf(" run an interactive JavaScript shell\n");
- printf(" d8 [options] file1 file2 ... filek\n");
- printf(" d8 [options]\n");
- printf(" d8 [options] --shell [file1 file2 ... filek]\n");
- printf(" run the new debugging shell\n\n");
- printf("Options:\n");
- for (size_t i = 0; i < num_flags; ++i) {
- Flag* f = &flags[i];
- SmartArrayPointer<const char> value = ToString(f);
- printf(" --%s (%s)\n type: %s default: %s\n",
- f->name(), f->comment(), Type2String(f->type()), *value);
- }
-}
-
-
-void FlagList::EnforceFlagImplications() {
-#define FLAG_MODE_DEFINE_IMPLICATIONS
-#include "flag-definitions.h"
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/flags.h b/src/3rdparty/v8/src/flags.h
deleted file mode 100644
index f0b239b..0000000
--- a/src/3rdparty/v8/src/flags.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_FLAGS_H_
-#define V8_FLAGS_H_
-
-namespace v8 {
-namespace internal {
-
-// Declare all of our flags.
-#define FLAG_MODE_DECLARE
-#include "flag-definitions.h"
-
-// The global list of all flags.
-class FlagList {
- public:
- // The list of all flags with a value different from the default
- // and their values. The format of the list is like the format of the
- // argv array passed to the main function, e.g.
- // ("--prof", "--log-file", "v8.prof", "--nolazy").
- //
- // The caller is responsible for disposing the list, as well
- // as every element of it.
- static List<const char*>* argv();
-
- // Set the flag values by parsing the command line. If remove_flags is
- // set, the flags and associated values are removed from (argc,
- // argv). Returns 0 if no error occurred. Otherwise, returns the argv
- // index > 0 for the argument where an error occurred. In that case,
- // (argc, argv) will remain unchanged independent of the remove_flags
- // value, and no assumptions about flag settings should be made.
- //
- // The following syntax for flags is accepted (both '-' and '--' are ok):
- //
- // --flag (bool flags only)
- // --noflag (bool flags only)
- // --flag=value (non-bool flags only, no spaces around '=')
- // --flag value (non-bool flags only)
- // -- (equivalent to --js_arguments, captures all remaining args)
- static int SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags);
-
- // Set the flag values by parsing the string str. Splits string into argc
- // substrings argv[], each of which consisting of non-white-space chars,
- // and then calls SetFlagsFromCommandLine() and returns its result.
- static int SetFlagsFromString(const char* str, int len);
-
- // Reset all flags to their default value.
- static void ResetAllFlags();
-
- // Print help to stdout with flags, types, and default values.
- static void PrintHelp();
-
- // Set flags as consequence of being implied by another flag.
- static void EnforceFlagImplications();
-};
-
-} } // namespace v8::internal
-
-#endif // V8_FLAGS_H_
diff --git a/src/3rdparty/v8/src/frames-inl.h b/src/3rdparty/v8/src/frames-inl.h
deleted file mode 100644
index 83b37a5..0000000
--- a/src/3rdparty/v8/src/frames-inl.h
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FRAMES_INL_H_
-#define V8_FRAMES_INL_H_
-
-#include "frames.h"
-#include "isolate.h"
-#include "v8memory.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/frames-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/frames-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/frames-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/frames-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-inline Address StackHandler::address() const {
- return reinterpret_cast<Address>(const_cast<StackHandler*>(this));
-}
-
-
-inline StackHandler* StackHandler::next() const {
- const int offset = StackHandlerConstants::kNextOffset;
- return FromAddress(Memory::Address_at(address() + offset));
-}
-
-
-inline bool StackHandler::includes(Address address) const {
- Address start = this->address();
- Address end = start + StackHandlerConstants::kSize;
- return start <= address && address <= end;
-}
-
-
-inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
- v->VisitPointer(context_address());
- v->VisitPointer(code_address());
-}
-
-
-inline StackHandler* StackHandler::FromAddress(Address address) {
- return reinterpret_cast<StackHandler*>(address);
-}
-
-
-inline bool StackHandler::is_js_entry() const {
- return kind() == JS_ENTRY;
-}
-
-
-inline bool StackHandler::is_catch() const {
- return kind() == CATCH;
-}
-
-
-inline bool StackHandler::is_finally() const {
- return kind() == FINALLY;
-}
-
-
-inline StackHandler::Kind StackHandler::kind() const {
- const int offset = StackHandlerConstants::kStateOffset;
- return KindField::decode(Memory::unsigned_at(address() + offset));
-}
-
-
-inline Object** StackHandler::context_address() const {
- const int offset = StackHandlerConstants::kContextOffset;
- return reinterpret_cast<Object**>(address() + offset);
-}
-
-
-inline Object** StackHandler::code_address() const {
- const int offset = StackHandlerConstants::kCodeOffset;
- return reinterpret_cast<Object**>(address() + offset);
-}
-
-
-inline StackFrame::StackFrame(StackFrameIterator* iterator)
- : iterator_(iterator), isolate_(iterator_->isolate()) {
-}
-
-
-inline StackHandler* StackFrame::top_handler() const {
- return iterator_->handler();
-}
-
-
-inline Code* StackFrame::LookupCode() const {
- return GetContainingCode(isolate(), pc());
-}
-
-
-inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
- return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
-}
-
-
-inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
- : StackFrame(iterator) {
-}
-
-
-inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
- : EntryFrame(iterator) {
-}
-
-
-inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
- : StackFrame(iterator) {
-}
-
-
-inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
- : StackFrame(iterator) {
-}
-
-
-inline Object* StandardFrame::GetExpression(int index) const {
- return Memory::Object_at(GetExpressionAddress(index));
-}
-
-
-inline void StandardFrame::SetExpression(int index, Object* value) {
- Memory::Object_at(GetExpressionAddress(index)) = value;
-}
-
-
-inline Object* StandardFrame::context() const {
- const int offset = StandardFrameConstants::kContextOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-inline Address StandardFrame::caller_fp() const {
- return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset);
-}
-
-
-inline Address StandardFrame::caller_pc() const {
- return Memory::Address_at(ComputePCAddress(fp()));
-}
-
-
-inline Address StandardFrame::ComputePCAddress(Address fp) {
- return fp + StandardFrameConstants::kCallerPCOffset;
-}
-
-
-inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
- Object* marker =
- Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
- return marker == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
-}
-
-
-inline bool StandardFrame::IsConstructFrame(Address fp) {
- Object* marker =
- Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset);
- return marker == Smi::FromInt(StackFrame::CONSTRUCT);
-}
-
-
-inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator) {
-}
-
-
-Address JavaScriptFrame::GetParameterSlot(int index) const {
- int param_count = ComputeParametersCount();
- ASSERT(-1 <= index && index < param_count);
- int parameter_offset = (param_count - index - 1) * kPointerSize;
- return caller_sp() + parameter_offset;
-}
-
-
-Object* JavaScriptFrame::GetParameter(int index) const {
- return Memory::Object_at(GetParameterSlot(index));
-}
-
-
-inline Object* JavaScriptFrame::receiver() const {
- return GetParameter(-1);
-}
-
-
-inline void JavaScriptFrame::set_receiver(Object* value) {
- Memory::Object_at(GetParameterSlot(-1)) = value;
-}
-
-
-inline bool JavaScriptFrame::has_adapted_arguments() const {
- return IsArgumentsAdaptorFrame(caller_fp());
-}
-
-
-inline Object* JavaScriptFrame::function() const {
- Object* result = function_slot_object();
- ASSERT(result->IsJSFunction());
- return result;
-}
-
-
-inline StubFrame::StubFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator) {
-}
-
-
-inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
- : JavaScriptFrame(iterator) {
-}
-
-
-inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
- StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
-}
-
-
-inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator) {
-}
-
-
-inline StubFailureTrampolineFrame::StubFailureTrampolineFrame(
- StackFrameIterator* iterator) : StandardFrame(iterator) {
-}
-
-
-inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
- : InternalFrame(iterator) {
-}
-
-
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
- Isolate* isolate)
- : iterator_(isolate) {
- if (!done()) Advance();
-}
-
-
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
- Isolate* isolate, ThreadLocalTop* top)
- : iterator_(isolate, top) {
- if (!done()) Advance();
-}
-
-
-template<typename Iterator>
-inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
- // TODO(1233797): The frame hierarchy needs to change. It's
- // problematic that we can't use the safe-cast operator to cast to
- // the JavaScript frame type, because we may encounter arguments
- // adaptor frames.
- StackFrame* frame = iterator_.frame();
- ASSERT(frame->is_java_script() || frame->is_arguments_adaptor());
- return static_cast<JavaScriptFrame*>(frame);
-}
-
-
-template<typename Iterator>
-JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
- Isolate* isolate, StackFrame::Id id)
- : iterator_(isolate) {
- AdvanceToId(id);
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Advance() {
- do {
- iterator_.Advance();
- } while (!iterator_.done() && !iterator_.frame()->is_java_script());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToArgumentsFrame() {
- if (!frame()->has_adapted_arguments()) return;
- iterator_.Advance();
- ASSERT(iterator_.frame()->is_arguments_adaptor());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToId(StackFrame::Id id) {
- while (!done()) {
- Advance();
- if (frame()->id() == id) return;
- }
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Reset() {
- iterator_.Reset();
- if (!done()) Advance();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_FRAMES_INL_H_
diff --git a/src/3rdparty/v8/src/frames.cc b/src/3rdparty/v8/src/frames.cc
deleted file mode 100644
index 7dcf540..0000000
--- a/src/3rdparty/v8/src/frames.cc
+++ /dev/null
@@ -1,1502 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "deoptimizer.h"
-#include "frames-inl.h"
-#include "full-codegen.h"
-#include "lazy-instance.h"
-#include "mark-compact.h"
-#include "safepoint-table.h"
-#include "scopeinfo.h"
-#include "string-stream.h"
-
-#include "allocation-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-static ReturnAddressLocationResolver return_address_location_resolver = NULL;
-
-
-// Resolves pc_address through the resolution address function if one is set.
-static inline Address* ResolveReturnAddressLocation(Address* pc_address) {
- if (return_address_location_resolver == NULL) {
- return pc_address;
- } else {
- return reinterpret_cast<Address*>(
- return_address_location_resolver(
- reinterpret_cast<uintptr_t>(pc_address)));
- }
-}
-
-
-// Iterator that supports traversing the stack handlers of a
-// particular frame. Needs to know the top of the handler chain.
-class StackHandlerIterator BASE_EMBEDDED {
- public:
- StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
- : limit_(frame->fp()), handler_(handler) {
- // Make sure the handler has already been unwound to this frame.
- ASSERT(frame->sp() <= handler->address());
- }
-
- StackHandler* handler() const { return handler_; }
-
- bool done() {
- return handler_ == NULL || handler_->address() > limit_;
- }
- void Advance() {
- ASSERT(!done());
- handler_ = handler_->next();
- }
-
- private:
- const Address limit_;
- StackHandler* handler_;
-};
-
-
-// -------------------------------------------------------------------------
-
-
-#define INITIALIZE_SINGLETON(type, field) field##_(this),
-StackFrameIterator::StackFrameIterator(Isolate* isolate)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- thread_(isolate_->thread_local_top()),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
-}
-StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL), thread_(t),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
-}
-StackFrameIterator::StackFrameIterator(Isolate* isolate,
- bool use_top, Address fp, Address sp)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- thread_(use_top ? isolate_->thread_local_top() : NULL),
- fp_(use_top ? NULL : fp), sp_(sp),
- advance_(use_top ? &StackFrameIterator::AdvanceWithHandler :
- &StackFrameIterator::AdvanceWithoutHandler) {
- if (use_top || fp != NULL) {
- Reset();
- }
-}
-
-#undef INITIALIZE_SINGLETON
-
-
-void StackFrameIterator::AdvanceWithHandler() {
- ASSERT(!done());
- // Compute the state of the calling frame before restoring
- // callee-saved registers and unwinding handlers. This allows the
- // frame code that computes the caller state to access the top
- // handler and the value of any callee-saved register if needed.
- StackFrame::State state;
- StackFrame::Type type = frame_->GetCallerState(&state);
-
- // Unwind handlers corresponding to the current frame.
- StackHandlerIterator it(frame_, handler_);
- while (!it.done()) it.Advance();
- handler_ = it.handler();
-
- // Advance to the calling frame.
- frame_ = SingletonFor(type, &state);
-
- // When we're done iterating over the stack frames, the handler
- // chain must have been completely unwound.
- ASSERT(!done() || handler_ == NULL);
-}
-
-
-void StackFrameIterator::AdvanceWithoutHandler() {
- // A simpler version of Advance which doesn't care about handler.
- ASSERT(!done());
- StackFrame::State state;
- StackFrame::Type type = frame_->GetCallerState(&state);
- frame_ = SingletonFor(type, &state);
-}
-
-
-void StackFrameIterator::Reset() {
- StackFrame::State state;
- StackFrame::Type type;
- if (thread_ != NULL) {
- type = ExitFrame::GetStateForFramePointer(
- Isolate::c_entry_fp(thread_), &state);
- handler_ = StackHandler::FromAddress(
- Isolate::handler(thread_));
- } else {
- ASSERT(fp_ != NULL);
- state.fp = fp_;
- state.sp = sp_;
- state.pc_address = ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_)));
- type = StackFrame::ComputeType(isolate(), &state);
- }
- if (SingletonFor(type) == NULL) return;
- frame_ = SingletonFor(type, &state);
-}
-
-
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type,
- StackFrame::State* state) {
- if (type == StackFrame::NONE) return NULL;
- StackFrame* result = SingletonFor(type);
- ASSERT(result != NULL);
- result->state_ = *state;
- return result;
-}
-
-
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
-#define FRAME_TYPE_CASE(type, field) \
- case StackFrame::type: result = &field##_; break;
-
- StackFrame* result = NULL;
- switch (type) {
- case StackFrame::NONE: return NULL;
- STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
- default: break;
- }
- return result;
-
-#undef FRAME_TYPE_CASE
-}
-
-
-// -------------------------------------------------------------------------
-
-
-StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
- : JavaScriptFrameIterator(isolate) {
- if (!done() && !IsValidFrame()) Advance();
-}
-
-
-void StackTraceFrameIterator::Advance() {
- while (true) {
- JavaScriptFrameIterator::Advance();
- if (done()) return;
- if (IsValidFrame()) return;
- }
-}
-
-bool StackTraceFrameIterator::IsValidFrame() {
- if (!frame()->function()->IsJSFunction()) return false;
- Object* script = JSFunction::cast(frame()->function())->shared()->script();
- // Don't show functions from native scripts to user.
- return (script->IsScript() &&
- Script::TYPE_NATIVE != Script::cast(script)->type()->value());
-}
-
-
-// -------------------------------------------------------------------------
-
-
-bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
- if (!validator_.IsValid(fp)) return false;
- Address sp = ExitFrame::ComputeStackPointer(fp);
- if (!validator_.IsValid(sp)) return false;
- StackFrame::State state;
- ExitFrame::FillState(fp, sp, &state);
- if (!validator_.IsValid(reinterpret_cast<Address>(state.pc_address))) {
- return false;
- }
- return *state.pc_address != NULL;
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::ActiveCountMaintainer(
- Isolate* isolate)
- : isolate_(isolate) {
- isolate_->set_safe_stack_iterator_counter(
- isolate_->safe_stack_iterator_counter() + 1);
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::~ActiveCountMaintainer() {
- isolate_->set_safe_stack_iterator_counter(
- isolate_->safe_stack_iterator_counter() - 1);
-}
-
-
-SafeStackFrameIterator::SafeStackFrameIterator(
- Isolate* isolate,
- Address fp, Address sp, Address low_bound, Address high_bound) :
- maintainer_(isolate),
- stack_validator_(low_bound, high_bound),
- is_valid_top_(IsValidTop(isolate, low_bound, high_bound)),
- is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
- is_working_iterator_(is_valid_top_ || is_valid_fp_),
- iteration_done_(!is_working_iterator_),
- iterator_(isolate, is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
-}
-
-bool SafeStackFrameIterator::is_active(Isolate* isolate) {
- return isolate->safe_stack_iterator_counter() > 0;
-}
-
-
-bool SafeStackFrameIterator::IsValidTop(Isolate* isolate,
- Address low_bound, Address high_bound) {
- ThreadLocalTop* top = isolate->thread_local_top();
- Address fp = Isolate::c_entry_fp(top);
- ExitFrameValidator validator(low_bound, high_bound);
- if (!validator.IsValidFP(fp)) return false;
- return Isolate::handler(top) != NULL;
-}
-
-
-void SafeStackFrameIterator::Advance() {
- ASSERT(is_working_iterator_);
- ASSERT(!done());
- StackFrame* last_frame = iterator_.frame();
- Address last_sp = last_frame->sp(), last_fp = last_frame->fp();
- // Before advancing to the next stack frame, perform pointer validity tests
- iteration_done_ = !IsValidFrame(last_frame) ||
- !CanIterateHandles(last_frame, iterator_.handler()) ||
- !IsValidCaller(last_frame);
- if (iteration_done_) return;
-
- iterator_.Advance();
- if (iterator_.done()) return;
- // Check that we have actually moved to the previous frame in the stack
- StackFrame* prev_frame = iterator_.frame();
- iteration_done_ = prev_frame->sp() < last_sp || prev_frame->fp() < last_fp;
-}
-
-
-bool SafeStackFrameIterator::CanIterateHandles(StackFrame* frame,
- StackHandler* handler) {
- // If StackIterator iterates over StackHandles, verify that
- // StackHandlerIterator can be instantiated (see StackHandlerIterator
- // constructor.)
- return !is_valid_top_ || (frame->sp() <= handler->address());
-}
-
-
-bool SafeStackFrameIterator::IsValidFrame(StackFrame* frame) const {
- return IsValidStackAddress(frame->sp()) && IsValidStackAddress(frame->fp());
-}
-
-
-bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
- StackFrame::State state;
- if (frame->is_entry() || frame->is_entry_construct()) {
- // See EntryFrame::GetCallerState. It computes the caller FP address
- // and calls ExitFrame::GetStateForFramePointer on it. We need to be
- // sure that caller FP address is valid.
- Address caller_fp = Memory::Address_at(
- frame->fp() + EntryFrameConstants::kCallerFPOffset);
- ExitFrameValidator validator(stack_validator_);
- if (!validator.IsValidFP(caller_fp)) return false;
- } else if (frame->is_arguments_adaptor()) {
- // See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
- // the number of arguments is stored on stack as Smi. We need to check
- // that it really an Smi.
- Object* number_of_args = reinterpret_cast<ArgumentsAdaptorFrame*>(frame)->
- GetExpression(0);
- if (!number_of_args->IsSmi()) {
- return false;
- }
- }
- frame->ComputeCallerState(&state);
- return IsValidStackAddress(state.sp) && IsValidStackAddress(state.fp) &&
- iterator_.SingletonFor(frame->GetCallerState(&state)) != NULL;
-}
-
-
-void SafeStackFrameIterator::Reset() {
- if (is_working_iterator_) {
- iterator_.Reset();
- iteration_done_ = false;
- }
-}
-
-
-// -------------------------------------------------------------------------
-
-
-SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
- Isolate* isolate,
- Address fp, Address sp, Address low_bound, Address high_bound) :
- SafeJavaScriptFrameIterator(isolate, fp, sp, low_bound, high_bound) {
- if (!done() && !frame()->is_java_script()) Advance();
-}
-
-
-void SafeStackTraceFrameIterator::Advance() {
- while (true) {
- SafeJavaScriptFrameIterator::Advance();
- if (done()) return;
- if (frame()->is_java_script()) return;
- }
-}
-
-
-Code* StackFrame::GetSafepointData(Isolate* isolate,
- Address inner_pointer,
- SafepointEntry* safepoint_entry,
- unsigned* stack_slots) {
- InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
- isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
- if (!entry->safepoint_entry.is_valid()) {
- entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
- ASSERT(entry->safepoint_entry.is_valid());
- } else {
- ASSERT(entry->safepoint_entry.Equals(
- entry->code->GetSafepointEntry(inner_pointer)));
- }
-
- // Fill in the results and return the code.
- Code* code = entry->code;
- *safepoint_entry = entry->safepoint_entry;
- *stack_slots = code->stack_slots();
- return code;
-}
-
-
-bool StackFrame::HasHandler() const {
- StackHandlerIterator it(this, top_handler());
- return !it.done();
-}
-
-
-#ifdef DEBUG
-static bool GcSafeCodeContains(HeapObject* object, Address addr);
-#endif
-
-
-void StackFrame::IteratePc(ObjectVisitor* v,
- Address* pc_address,
- Code* holder) {
- Address pc = *pc_address;
- ASSERT(GcSafeCodeContains(holder, pc));
- unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
- Object* code = holder;
- v->VisitPointer(&code);
- if (code != holder) {
- holder = reinterpret_cast<Code*>(code);
- pc = holder->instruction_start() + pc_offset;
- *pc_address = pc;
- }
-}
-
-
-void StackFrame::SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver resolver) {
- ASSERT(return_address_location_resolver == NULL);
- return_address_location_resolver = resolver;
-}
-
-
-StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) {
- // If we're using a "safe" stack iterator, we treat optimized
- // frames as normal JavaScript frames to avoid having to look
- // into the heap to determine the state. This is safe as long
- // as nobody tries to GC...
- if (SafeStackFrameIterator::is_active(isolate)) return JAVA_SCRIPT;
- Code::Kind kind = GetContainingCode(isolate, *(state->pc_address))->kind();
- ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
- return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
- }
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
-
-StackFrame::Type StackFrame::GetCallerState(State* state) const {
- ComputeCallerState(state);
- return ComputeType(isolate(), state);
-}
-
-
-Address StackFrame::UnpaddedFP() const {
-#if defined(V8_TARGET_ARCH_IA32)
- if (!is_optimized()) return fp();
- int32_t alignment_state = Memory::int32_at(
- fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
-
- return (alignment_state == kAlignmentPaddingPushed) ?
- (fp() + kPointerSize) : fp();
-#else
- return fp();
-#endif
-}
-
-
-Code* EntryFrame::unchecked_code() const {
- return HEAP->js_entry_code();
-}
-
-
-void EntryFrame::ComputeCallerState(State* state) const {
- GetCallerState(state);
-}
-
-
-void EntryFrame::SetCallerFp(Address caller_fp) {
- const int offset = EntryFrameConstants::kCallerFPOffset;
- Memory::Address_at(this->fp() + offset) = caller_fp;
-}
-
-
-StackFrame::Type EntryFrame::GetCallerState(State* state) const {
- const int offset = EntryFrameConstants::kCallerFPOffset;
- Address fp = Memory::Address_at(this->fp() + offset);
- return ExitFrame::GetStateForFramePointer(fp, state);
-}
-
-
-Code* EntryConstructFrame::unchecked_code() const {
- return HEAP->js_construct_entry_code();
-}
-
-
-Object*& ExitFrame::code_slot() const {
- const int offset = ExitFrameConstants::kCodeOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-Code* ExitFrame::unchecked_code() const {
- return reinterpret_cast<Code*>(code_slot());
-}
-
-
-void ExitFrame::ComputeCallerState(State* state) const {
- // Set up the caller state.
- state->sp = caller_sp();
- state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
- state->pc_address = ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
-}
-
-
-void ExitFrame::SetCallerFp(Address caller_fp) {
- Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset) = caller_fp;
-}
-
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
- IteratePc(v, pc_address(), LookupCode());
- v->VisitPointer(&code_slot());
-}
-
-
-Address ExitFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPDisplacement;
-}
-
-
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
- Address sp = ComputeStackPointer(fp);
- FillState(fp, sp, state);
- ASSERT(*state->pc_address != NULL);
- return EXIT;
-}
-
-
-void ExitFrame::FillState(Address fp, Address sp, State* state) {
- state->sp = sp;
- state->fp = fp;
- state->pc_address = ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(sp - 1 * kPointerSize));
-}
-
-
-Address StandardFrame::GetExpressionAddress(int n) const {
- const int offset = StandardFrameConstants::kExpressionsOffset;
- return fp() + offset - n * kPointerSize;
-}
-
-
-Object* StandardFrame::GetExpression(Address fp, int index) {
- return Memory::Object_at(GetExpressionAddress(fp, index));
-}
-
-
-Address StandardFrame::GetExpressionAddress(Address fp, int n) {
- const int offset = StandardFrameConstants::kExpressionsOffset;
- return fp + offset - n * kPointerSize;
-}
-
-
-int StandardFrame::ComputeExpressionsCount() const {
- const int offset =
- StandardFrameConstants::kExpressionsOffset + kPointerSize;
- Address base = fp() + offset;
- Address limit = sp();
- ASSERT(base >= limit); // stack grows downwards
- // Include register-allocated locals in number of expressions.
- return static_cast<int>((base - limit) / kPointerSize);
-}
-
-
-void StandardFrame::ComputeCallerState(State* state) const {
- state->sp = caller_sp();
- state->fp = caller_fp();
- state->pc_address = ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(ComputePCAddress(fp())));
-}
-
-
-void StandardFrame::SetCallerFp(Address caller_fp) {
- Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset) =
- caller_fp;
-}
-
-
-bool StandardFrame::IsExpressionInsideHandler(int n) const {
- Address address = GetExpressionAddress(n);
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- if (it.handler()->includes(address)) return true;
- }
- return false;
-}
-
-
-void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
- // Make sure that we're not doing "safe" stack frame iteration. We cannot
- // possibly find pointers in optimized frames in that state.
- ASSERT(!SafeStackFrameIterator::is_active(isolate()));
-
- // Compute the safepoint information.
- unsigned stack_slots = 0;
- SafepointEntry safepoint_entry;
- Code* code = StackFrame::GetSafepointData(
- isolate(), pc(), &safepoint_entry, &stack_slots);
- unsigned slot_space = stack_slots * kPointerSize;
-
- // Visit the outgoing parameters.
- Object** parameters_base = &Memory::Object_at(sp());
- Object** parameters_limit = &Memory::Object_at(
- fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
-
- // Visit the parameters that may be on top of the saved registers.
- if (safepoint_entry.argument_count() > 0) {
- v->VisitPointers(parameters_base,
- parameters_base + safepoint_entry.argument_count());
- parameters_base += safepoint_entry.argument_count();
- }
-
- // Skip saved double registers.
- if (safepoint_entry.has_doubles()) {
- // Number of doubles not known at snapshot time.
- ASSERT(!Serializer::enabled());
- parameters_base += DoubleRegister::NumAllocatableRegisters() *
- kDoubleSize / kPointerSize;
- }
-
- // Visit the registers that contain pointers if any.
- if (safepoint_entry.HasRegisters()) {
- for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
- if (safepoint_entry.HasRegisterAt(i)) {
- int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
- v->VisitPointer(parameters_base + reg_stack_index);
- }
- }
- // Skip the words containing the register values.
- parameters_base += kNumSafepointRegisters;
- }
-
- // We're done dealing with the register bits.
- uint8_t* safepoint_bits = safepoint_entry.bits();
- safepoint_bits += kNumSafepointRegisters >> kBitsPerByteLog2;
-
- // Visit the rest of the parameters.
- v->VisitPointers(parameters_base, parameters_limit);
-
- // Visit pointer spill slots and locals.
- for (unsigned index = 0; index < stack_slots; index++) {
- int byte_index = index >> kBitsPerByteLog2;
- int bit_index = index & (kBitsPerByte - 1);
- if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
- v->VisitPointer(parameters_limit + index);
- }
- }
-
- // Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), code);
-
- // Visit the context in stub frame and JavaScript frame.
- // Visit the function in JavaScript frame.
- Object** fixed_base = &Memory::Object_at(
- fp() + StandardFrameConstants::kMarkerOffset);
- Object** fixed_limit = &Memory::Object_at(fp());
- v->VisitPointers(fixed_base, fixed_limit);
-}
-
-
-void StubFrame::Iterate(ObjectVisitor* v) const {
- IterateCompiledFrame(v);
-}
-
-
-Code* StubFrame::unchecked_code() const {
- return static_cast<Code*>(isolate()->heap()->FindCodeObject(pc()));
-}
-
-
-Address StubFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPDisplacement;
-}
-
-
-int StubFrame::GetNumberOfIncomingArguments() const {
- return 0;
-}
-
-
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
-#ifdef DEBUG
- // Make sure that optimized frames do not contain any stack handlers.
- StackHandlerIterator it(this, top_handler());
- ASSERT(it.done());
-#endif
-
- IterateCompiledFrame(v);
-}
-
-
-void JavaScriptFrame::SetParameterValue(int index, Object* value) const {
- Memory::Object_at(GetParameterSlot(index)) = value;
-}
-
-
-bool JavaScriptFrame::IsConstructor() const {
- Address fp = caller_fp();
- if (has_adapted_arguments()) {
- // Skip the arguments adaptor frame and look at the real caller.
- fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
- }
- return IsConstructFrame(fp);
-}
-
-
-int JavaScriptFrame::GetArgumentsLength() const {
- // If there is an arguments adaptor frame get the arguments length from it.
- if (has_adapted_arguments()) {
- return Smi::cast(GetExpression(caller_fp(), 0))->value();
- } else {
- return GetNumberOfIncomingArguments();
- }
-}
-
-
-Code* JavaScriptFrame::unchecked_code() const {
- JSFunction* function = JSFunction::cast(this->function());
- return function->unchecked_code();
-}
-
-
-int JavaScriptFrame::GetNumberOfIncomingArguments() const {
- ASSERT(!SafeStackFrameIterator::is_active(isolate()) &&
- isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
-
- JSFunction* function = JSFunction::cast(this->function());
- return function->shared()->formal_parameter_count();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
-void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
- ASSERT(functions->length() == 0);
- functions->Add(JSFunction::cast(function()));
-}
-
-
-void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
- ASSERT(functions->length() == 0);
- Code* code_pointer = LookupCode();
- int offset = static_cast<int>(pc() - code_pointer->address());
- FrameSummary summary(receiver(),
- JSFunction::cast(function()),
- code_pointer,
- offset,
- IsConstructor());
- functions->Add(summary);
-}
-
-
-void JavaScriptFrame::PrintTop(Isolate* isolate,
- FILE* file,
- bool print_args,
- bool print_line_number) {
- // constructor calls
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
- JavaScriptFrameIterator it(isolate);
- while (!it.done()) {
- if (it.frame()->is_java_script()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->IsConstructor()) FPrintF(file, "new ");
- // function name
- Object* maybe_fun = frame->function();
- if (maybe_fun->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(maybe_fun);
- fun->PrintName();
- Code* js_code = frame->unchecked_code();
- Address pc = frame->pc();
- int code_offset =
- static_cast<int>(pc - js_code->instruction_start());
- PrintF("+%d", code_offset);
- SharedFunctionInfo* shared = fun->shared();
- if (print_line_number) {
- Code* code = Code::cast(
- v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
- int source_pos = code->SourcePosition(pc);
- Object* maybe_script = shared->script();
- if (maybe_script->IsScript()) {
- Handle<Script> script(Script::cast(maybe_script));
- int line = GetScriptLineNumberSafe(script, source_pos) + 1;
- Object* script_name_raw = script->name();
- if (script_name_raw->IsString()) {
- String* script_name = String::cast(script->name());
- SmartArrayPointer<char> c_script_name =
- script_name->ToCString(DISALLOW_NULLS,
- ROBUST_STRING_TRAVERSAL);
- FPrintF(file, " at %s:%d", *c_script_name, line);
- } else {
- FPrintF(file, " at <unknown>:%d", line);
- }
- } else {
- FPrintF(file, " at <unknown>:<unknown>");
- }
- }
- } else {
- PrintF("<unknown>");
- }
-
- if (print_args) {
- // function arguments
- // (we are intentionally only printing the actually
- // supplied parameters, not all parameters required)
- FPrintF(file, "(this=");
- frame->receiver()->ShortPrint(file);
- const int length = frame->ComputeParametersCount();
- for (int i = 0; i < length; i++) {
- FPrintF(file, ", ");
- frame->GetParameter(i)->ShortPrint(file);
- }
- FPrintF(file, ")");
- }
- break;
- }
- it.Advance();
- }
-}
-
-
-void FrameSummary::Print() {
- PrintF("receiver: ");
- receiver_->ShortPrint();
- PrintF("\nfunction: ");
- function_->shared()->DebugName()->ShortPrint();
- PrintF("\ncode: ");
- code_->ShortPrint();
- if (code_->kind() == Code::FUNCTION) PrintF(" NON-OPT");
- if (code_->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT");
- PrintF("\npc: %d\n", offset_);
-}
-
-
-JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
- int literal_id) {
- if (literal_id == Translation::kSelfLiteralId) {
- return JSFunction::cast(function());
- }
-
- return JSFunction::cast(literal_array->get(literal_id));
-}
-
-
-void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
- ASSERT(frames->length() == 0);
- ASSERT(is_optimized());
-
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
- FixedArray* literal_array = data->LiteralArray();
-
- // BUG(3243555): Since we don't have a lazy-deopt registered at
- // throw-statements, we can't use the translation at the call-site of
- // throw. An entry with no deoptimization index indicates a call-site
- // without a lazy-deopt. As a consequence we are not allowed to inline
- // functions containing throw.
- if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
- JavaScriptFrame::Summarize(frames);
- return;
- }
-
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::BEGIN);
- it.Next(); // Drop frame count.
- int jsframe_count = it.Next();
-
- // We create the summary in reverse order because the frames
- // in the deoptimization translation are ordered bottom-to-top.
- bool is_constructor = IsConstructor();
- int i = jsframe_count;
- while (i > 0) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::JS_FRAME) {
- i--;
- BailoutId ast_id = BailoutId(it.Next());
- JSFunction* function = LiteralAt(literal_array, it.Next());
- it.Next(); // Skip height.
-
- // The translation commands are ordered and the receiver is always
- // at the first position. Since we are always at a call when we need
- // to construct a stack trace, the receiver is always in a stack slot.
- opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::STACK_SLOT ||
- opcode == Translation::LITERAL);
- int index = it.Next();
-
- // Get the correct receiver in the optimized frame.
- Object* receiver = NULL;
- if (opcode == Translation::LITERAL) {
- receiver = data->LiteralArray()->get(index);
- } else {
- // Positive index means the value is spilled to the locals
- // area. Negative means it is stored in the incoming parameter
- // area.
- if (index >= 0) {
- receiver = GetExpression(index);
- } else {
- // Index -1 overlaps with last parameter, -n with the first parameter,
- // (-n - 1) with the receiver with n being the number of parameters
- // of the outermost, optimized frame.
- int parameter_count = ComputeParametersCount();
- int parameter_index = index + parameter_count;
- receiver = (parameter_index == -1)
- ? this->receiver()
- : this->GetParameter(parameter_index);
- }
- }
-
- Code* code = function->shared()->code();
- DeoptimizationOutputData* output_data =
- DeoptimizationOutputData::cast(code->deoptimization_data());
- unsigned entry = Deoptimizer::GetOutputInfo(output_data,
- ast_id,
- function->shared());
- unsigned pc_offset =
- FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
- ASSERT(pc_offset > 0);
-
- FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
- frames->Add(summary);
- is_constructor = false;
- } else if (opcode == Translation::CONSTRUCT_STUB_FRAME) {
- // The next encountered JS_FRAME will be marked as a constructor call.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- ASSERT(!is_constructor);
- is_constructor = true;
- } else {
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- }
- }
- ASSERT(!is_constructor);
-}
-
-
-DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
- int* deopt_index) {
- ASSERT(is_optimized());
-
- JSFunction* opt_function = JSFunction::cast(function());
- Code* code = opt_function->code();
-
- // The code object may have been replaced by lazy deoptimization. Fall
- // back to a slow search in this case to find the original optimized
- // code object.
- if (!code->contains(pc())) {
- code = isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(pc());
- }
- ASSERT(code != NULL);
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
-
- SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
- *deopt_index = safepoint_entry.deoptimization_index();
- ASSERT(*deopt_index != Safepoint::kNoDeoptimizationIndex);
-
- return DeoptimizationInputData::cast(code->deoptimization_data());
-}
-
-
-int OptimizedFrame::GetInlineCount() {
- ASSERT(is_optimized());
-
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
-
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::BEGIN);
- USE(opcode);
- it.Next(); // Drop frame count.
- int jsframe_count = it.Next();
- return jsframe_count;
-}
-
-
-void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
- ASSERT(functions->length() == 0);
- ASSERT(is_optimized());
-
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
- FixedArray* literal_array = data->LiteralArray();
-
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::BEGIN);
- it.Next(); // Drop frame count.
- int jsframe_count = it.Next();
-
- // We insert the frames in reverse order because the frames
- // in the deoptimization translation are ordered bottom-to-top.
- while (jsframe_count > 0) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::JS_FRAME) {
- jsframe_count--;
- it.Next(); // Skip ast id.
- JSFunction* function = LiteralAt(literal_array, it.Next());
- it.Next(); // Skip height.
- functions->Add(function);
- } else {
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- }
- }
-}
-
-
-int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
- return Smi::cast(GetExpression(0))->value();
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
-Code* ArgumentsAdaptorFrame::unchecked_code() const {
- return isolate()->builtins()->builtin(
- Builtins::kArgumentsAdaptorTrampoline);
-}
-
-
-Code* InternalFrame::unchecked_code() const {
- const int offset = InternalFrameConstants::kCodeOffset;
- Object* code = Memory::Object_at(fp() + offset);
- ASSERT(code != NULL);
- return reinterpret_cast<Code*>(code);
-}
-
-
-void StackFrame::PrintIndex(StringStream* accumulator,
- PrintMode mode,
- int index) {
- accumulator->Add((mode == OVERVIEW) ? "%5d: " : "[%d]: ", index);
-}
-
-
-void JavaScriptFrame::Print(StringStream* accumulator,
- PrintMode mode,
- int index) const {
- HandleScope scope(isolate());
- Object* receiver = this->receiver();
- Object* function = this->function();
-
- accumulator->PrintSecurityTokenIfChanged(function);
- PrintIndex(accumulator, mode, index);
- Code* code = NULL;
- if (IsConstructor()) accumulator->Add("new ");
- accumulator->PrintFunction(function, receiver, &code);
-
- // Get scope information for nicer output, if possible. If code is NULL, or
- // doesn't contain scope info, scope_info will return 0 for the number of
- // parameters, stack local variables, context local variables, stack slots,
- // or context slots.
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
-
- if (function->IsJSFunction()) {
- Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
- scope_info = Handle<ScopeInfo>(shared->scope_info());
- Object* script_obj = shared->script();
- if (script_obj->IsScript()) {
- Handle<Script> script(Script::cast(script_obj));
- accumulator->Add(" [");
- accumulator->PrintName(script->name());
-
- Address pc = this->pc();
- if (code != NULL && code->kind() == Code::FUNCTION &&
- pc >= code->instruction_start() && pc < code->instruction_end()) {
- int source_pos = code->SourcePosition(pc);
- int line = GetScriptLineNumberSafe(script, source_pos) + 1;
- accumulator->Add(":%d", line);
- } else {
- int function_start_pos = shared->start_position();
- int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
- accumulator->Add(":~%d", line);
- }
-
- accumulator->Add("] ");
- }
- }
-
- accumulator->Add("(this=%o", receiver);
-
- // Print the parameters.
- int parameters_count = ComputeParametersCount();
- for (int i = 0; i < parameters_count; i++) {
- accumulator->Add(",");
- // If we have a name for the parameter we print it. Nameless
- // parameters are either because we have more actual parameters
- // than formal parameters or because we have no scope information.
- if (i < scope_info->ParameterCount()) {
- accumulator->PrintName(scope_info->ParameterName(i));
- accumulator->Add("=");
- }
- accumulator->Add("%o", GetParameter(i));
- }
-
- accumulator->Add(")");
- if (mode == OVERVIEW) {
- accumulator->Add("\n");
- return;
- }
- if (is_optimized()) {
- accumulator->Add(" {\n// optimized frame\n}\n");
- return;
- }
- accumulator->Add(" {\n");
-
- // Compute the number of locals and expression stack elements.
- int stack_locals_count = scope_info->StackLocalCount();
- int heap_locals_count = scope_info->ContextLocalCount();
- int expressions_count = ComputeExpressionsCount();
-
- // Print stack-allocated local variables.
- if (stack_locals_count > 0) {
- accumulator->Add(" // stack-allocated locals\n");
- }
- for (int i = 0; i < stack_locals_count; i++) {
- accumulator->Add(" var ");
- accumulator->PrintName(scope_info->StackLocalName(i));
- accumulator->Add(" = ");
- if (i < expressions_count) {
- accumulator->Add("%o", GetExpression(i));
- } else {
- accumulator->Add("// no expression found - inconsistent frame?");
- }
- accumulator->Add("\n");
- }
-
- // Try to get hold of the context of this frame.
- Context* context = NULL;
- if (this->context() != NULL && this->context()->IsContext()) {
- context = Context::cast(this->context());
- }
-
- // Print heap-allocated local variables.
- if (heap_locals_count > 0) {
- accumulator->Add(" // heap-allocated locals\n");
- }
- for (int i = 0; i < heap_locals_count; i++) {
- accumulator->Add(" var ");
- accumulator->PrintName(scope_info->ContextLocalName(i));
- accumulator->Add(" = ");
- if (context != NULL) {
- if (i < context->length()) {
- accumulator->Add("%o", context->get(Context::MIN_CONTEXT_SLOTS + i));
- } else {
- accumulator->Add(
- "// warning: missing context slot - inconsistent frame?");
- }
- } else {
- accumulator->Add("// warning: no context found - inconsistent frame?");
- }
- accumulator->Add("\n");
- }
-
- // Print the expression stack.
- int expressions_start = stack_locals_count;
- if (expressions_start < expressions_count) {
- accumulator->Add(" // expression stack (top to bottom)\n");
- }
- for (int i = expressions_count - 1; i >= expressions_start; i--) {
- if (IsExpressionInsideHandler(i)) continue;
- accumulator->Add(" [%02d] : %o\n", i, GetExpression(i));
- }
-
- // Print details about the function.
- if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
- SharedFunctionInfo* shared = JSFunction::cast(function)->shared();
- accumulator->Add("--------- s o u r c e c o d e ---------\n");
- shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length);
- accumulator->Add("\n-----------------------------------------\n");
- }
-
- accumulator->Add("}\n\n");
-}
-
-
-void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
- PrintMode mode,
- int index) const {
- int actual = ComputeParametersCount();
- int expected = -1;
- Object* function = this->function();
- if (function->IsJSFunction()) {
- expected = JSFunction::cast(function)->shared()->formal_parameter_count();
- }
-
- PrintIndex(accumulator, mode, index);
- accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
- if (mode == OVERVIEW) {
- accumulator->Add("\n");
- return;
- }
- accumulator->Add(" {\n");
-
- // Print actual arguments.
- if (actual > 0) accumulator->Add(" // actual arguments\n");
- for (int i = 0; i < actual; i++) {
- accumulator->Add(" [%02d] : %o", i, GetParameter(i));
- if (expected != -1 && i >= expected) {
- accumulator->Add(" // not passed to callee");
- }
- accumulator->Add("\n");
- }
-
- accumulator->Add("}\n\n");
-}
-
-
-void EntryFrame::Iterate(ObjectVisitor* v) const {
- StackHandlerIterator it(this, top_handler());
- ASSERT(!it.done());
- StackHandler* handler = it.handler();
- ASSERT(handler->is_js_entry());
- handler->Iterate(v, LookupCode());
-#ifdef DEBUG
- // Make sure that the entry frame does not contain more than one
- // stack handler.
- it.Advance();
- ASSERT(it.done());
-#endif
- IteratePc(v, pc_address(), LookupCode());
-}
-
-
-void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
- const int offset = StandardFrameConstants::kContextOffset;
- Object** base = &Memory::Object_at(sp());
- Object** limit = &Memory::Object_at(fp() + offset) + 1;
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- StackHandler* handler = it.handler();
- // Traverse pointers down to - but not including - the next
- // handler in the handler chain. Update the base to skip the
- // handler and allow the handler to traverse its own pointers.
- const Address address = handler->address();
- v->VisitPointers(base, reinterpret_cast<Object**>(address));
- base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
- // Traverse the pointers in the handler itself.
- handler->Iterate(v, LookupCode());
- }
- v->VisitPointers(base, limit);
-}
-
-
-void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
- IterateExpressions(v);
- IteratePc(v, pc_address(), LookupCode());
-}
-
-
-void InternalFrame::Iterate(ObjectVisitor* v) const {
- // Internal frames only have object pointers on the expression stack
- // as they never have any arguments.
- IterateExpressions(v);
- IteratePc(v, pc_address(), LookupCode());
-}
-
-
-void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
- Object** base = &Memory::Object_at(sp());
- Object** limit = &Memory::Object_at(fp() +
- kFirstRegisterParameterFrameOffset);
- v->VisitPointers(base, limit);
- base = &Memory::Object_at(fp() + StandardFrameConstants::kMarkerOffset);
- const int offset = StandardFrameConstants::kContextOffset;
- limit = &Memory::Object_at(fp() + offset) + 1;
- v->VisitPointers(base, limit);
- IteratePc(v, pc_address(), LookupCode());
-}
-
-
-Address StubFailureTrampolineFrame::GetCallerStackPointer() const {
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
-Code* StubFailureTrampolineFrame::unchecked_code() const {
- int i = 0;
- for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) {
- Code* trampoline;
- StubFailureTrampolineStub(i).FindCodeInCache(&trampoline, isolate());
- ASSERT(trampoline != NULL);
- Address current_pc = pc();
- Address code_start = trampoline->instruction_start();
- Address code_end = code_start + trampoline->instruction_size();
- if (code_start <= current_pc && current_pc < code_end) {
- return trampoline;
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-// -------------------------------------------------------------------------
-
-
-JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
- ASSERT(n >= 0);
- for (int i = 0; i <= n; i++) {
- while (!iterator_.frame()->is_java_script()) iterator_.Advance();
- if (i == n) return JavaScriptFrame::cast(iterator_.frame());
- iterator_.Advance();
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-// -------------------------------------------------------------------------
-
-
-static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
- MapWord map_word = object->map_word();
- return map_word.IsForwardingAddress() ?
- map_word.ToForwardingAddress()->map() : map_word.ToMap();
-}
-
-
-static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
- return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
-}
-
-
-#ifdef DEBUG
-static bool GcSafeCodeContains(HeapObject* code, Address addr) {
- Map* map = GcSafeMapOfCodeSpaceObject(code);
- ASSERT(map == code->GetHeap()->code_map());
- Address start = code->address();
- Address end = code->address() + code->SizeFromMap(map);
- return start <= addr && addr < end;
-}
-#endif
-
-
-Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
- Address inner_pointer) {
- Code* code = reinterpret_cast<Code*>(object);
- ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
- return code;
-}
-
-
-Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
- Address inner_pointer) {
- Heap* heap = isolate_->heap();
- // Check if the inner pointer points into a large object chunk.
- LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
- if (large_page != NULL) {
- return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
- }
-
- // Iterate through the page until we reach the end or find an object starting
- // after the inner pointer.
- Page* page = Page::FromAddress(inner_pointer);
-
- Address addr = page->skip_list()->StartFor(inner_pointer);
-
- Address top = heap->code_space()->top();
- Address limit = heap->code_space()->limit();
-
- while (true) {
- if (addr == top && addr != limit) {
- addr = limit;
- continue;
- }
-
- HeapObject* obj = HeapObject::FromAddress(addr);
- int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
- Address next_addr = addr + obj_size;
- if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
- addr = next_addr;
- }
-}
-
-
-InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
- InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
- isolate_->counters()->pc_to_code()->Increment();
- ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
- uint32_t hash = ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)),
- v8::internal::kZeroHashSeed);
- uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
- InnerPointerToCodeCacheEntry* entry = cache(index);
- if (entry->inner_pointer == inner_pointer) {
- isolate_->counters()->pc_to_code_cached()->Increment();
- ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
- } else {
- // Because this code may be interrupted by a profiling signal that
- // also queries the cache, we cannot update inner_pointer before the code
- // has been set. Otherwise, we risk trying to use a cache entry before
- // the code has been computed.
- entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
- entry->safepoint_entry.Reset();
- entry->inner_pointer = inner_pointer;
- }
- return entry;
-}
-
-
-// -------------------------------------------------------------------------
-
-int NumRegs(RegList reglist) {
- return CompilerIntrinsics::CountSetBits(reglist);
-}
-
-
-struct JSCallerSavedCodeData {
- int reg_code[kNumJSCallerSaved];
-};
-
-JSCallerSavedCodeData caller_saved_code_data;
-
-void SetUpJSCallerSavedCodeData() {
- int i = 0;
- for (int r = 0; r < kNumRegs; r++)
- if ((kJSCallerSaved & (1 << r)) != 0)
- caller_saved_code_data.reg_code[i++] = r;
-
- ASSERT(i == kNumJSCallerSaved);
-}
-
-int JSCallerSavedCode(int n) {
- ASSERT(0 <= n && n < kNumJSCallerSaved);
- return caller_saved_code_data.reg_code[n];
-}
-
-
-#define DEFINE_WRAPPER(type, field) \
-class field##_Wrapper : public ZoneObject { \
- public: /* NOLINT */ \
- field##_Wrapper(const field& original) : frame_(original) { \
- } \
- field frame_; \
-};
-STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER)
-#undef DEFINE_WRAPPER
-
-static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
-#define FRAME_TYPE_CASE(type, field) \
- case StackFrame::type: { \
- field##_Wrapper* wrapper = \
- new(zone) field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
- return &wrapper->frame_; \
- }
-
- switch (frame->type()) {
- STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
- default: UNREACHABLE();
- }
-#undef FRAME_TYPE_CASE
- return NULL;
-}
-
-Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone) {
- ZoneList<StackFrame*> list(10, zone);
- for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
- StackFrame* frame = AllocateFrameCopy(it.frame(), zone);
- list.Add(frame, zone);
- }
- return list.ToVector();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/frames.h b/src/3rdparty/v8/src/frames.h
deleted file mode 100644
index a91d004..0000000
--- a/src/3rdparty/v8/src/frames.h
+++ /dev/null
@@ -1,978 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FRAMES_H_
-#define V8_FRAMES_H_
-
-#include "allocation.h"
-#include "handles.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-typedef uint32_t RegList;
-
-// Get the number of registers in a given register list.
-int NumRegs(RegList list);
-
-void SetUpJSCallerSavedCodeData();
-
-// Return the code of the n-th saved register available to JavaScript.
-int JSCallerSavedCode(int n);
-
-
-// Forward declarations.
-class StackFrameIterator;
-class ThreadLocalTop;
-class Isolate;
-
-class InnerPointerToCodeCache {
- public:
- struct InnerPointerToCodeCacheEntry {
- Address inner_pointer;
- Code* code;
- SafepointEntry safepoint_entry;
- };
-
- explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
- Flush();
- }
-
- Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
- Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
-
- void Flush() {
- memset(&cache_[0], 0, sizeof(cache_));
- }
-
- InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
-
- private:
- InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
-
- Isolate* isolate_;
-
- static const int kInnerPointerToCodeCacheSize = 1024;
- InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
-
- DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
-};
-
-
-class StackHandler BASE_EMBEDDED {
- public:
- enum Kind {
- JS_ENTRY,
- CATCH,
- FINALLY,
- LAST_KIND = FINALLY
- };
-
- static const int kKindWidth = 2;
- STATIC_ASSERT(LAST_KIND < (1 << kKindWidth));
- static const int kIndexWidth = 32 - kKindWidth;
- class KindField: public BitField<StackHandler::Kind, 0, kKindWidth> {};
- class IndexField: public BitField<unsigned, kKindWidth, kIndexWidth> {};
-
- // Get the address of this stack handler.
- inline Address address() const;
-
- // Get the next stack handler in the chain.
- inline StackHandler* next() const;
-
- // Tells whether the given address is inside this handler.
- inline bool includes(Address address) const;
-
- // Garbage collection support.
- inline void Iterate(ObjectVisitor* v, Code* holder) const;
-
- // Conversion support.
- static inline StackHandler* FromAddress(Address address);
-
- // Testers
- inline bool is_js_entry() const;
- inline bool is_catch() const;
- inline bool is_finally() const;
-
- private:
- // Accessors.
- inline Kind kind() const;
-
- inline Object** context_address() const;
- inline Object** code_address() const;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
-};
-
-
-#define STACK_FRAME_TYPE_LIST(V) \
- V(ENTRY, EntryFrame) \
- V(ENTRY_CONSTRUCT, EntryConstructFrame) \
- V(EXIT, ExitFrame) \
- V(JAVA_SCRIPT, JavaScriptFrame) \
- V(OPTIMIZED, OptimizedFrame) \
- V(STUB, StubFrame) \
- V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
- V(INTERNAL, InternalFrame) \
- V(CONSTRUCT, ConstructFrame) \
- V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
-
-
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- // StandardFrame::IterateExpressions assumes that kContextOffset is the last
- // object pointer.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-};
-
-
-// Abstract base class for all stack frames.
-class StackFrame BASE_EMBEDDED {
- public:
-#define DECLARE_TYPE(type, ignore) type,
- enum Type {
- NONE = 0,
- STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
- NUMBER_OF_TYPES,
- // Used by FrameScope to indicate that the stack frame is constructed
- // manually and the FrameScope does not need to emit code.
- MANUAL
- };
-#undef DECLARE_TYPE
-
- // Opaque data type for identifying stack frames. Used extensively
- // by the debugger.
- // ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
- // has correct value range (see Issue 830 for more details).
- enum Id {
- ID_MIN_VALUE = kMinInt,
- ID_MAX_VALUE = kMaxInt,
- NO_ID = 0
- };
-
- // Used to mark the outermost JS entry frame.
- enum JsFrameMarker {
- INNER_JSENTRY_FRAME = 0,
- OUTERMOST_JSENTRY_FRAME = 1
- };
-
- struct State {
- State() : sp(NULL), fp(NULL), pc_address(NULL) { }
- Address sp;
- Address fp;
- Address* pc_address;
- };
-
- // Copy constructor; it breaks the connection to host iterator
- // (as an iterator usually lives on stack).
- StackFrame(const StackFrame& original) {
- this->state_ = original.state_;
- this->iterator_ = NULL;
- this->isolate_ = original.isolate_;
- }
-
- // Type testers.
- bool is_entry() const { return type() == ENTRY; }
- bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
- bool is_exit() const { return type() == EXIT; }
- bool is_optimized() const { return type() == OPTIMIZED; }
- bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
- bool is_internal() const { return type() == INTERNAL; }
- bool is_stub_failure_trampoline() const {
- return type() == STUB_FAILURE_TRAMPOLINE;
- }
- bool is_construct() const { return type() == CONSTRUCT; }
- virtual bool is_standard() const { return false; }
-
- bool is_java_script() const {
- Type type = this->type();
- return (type == JAVA_SCRIPT) || (type == OPTIMIZED);
- }
-
- // Accessors.
- Address sp() const { return state_.sp; }
- Address fp() const { return state_.fp; }
- Address caller_sp() const { return GetCallerStackPointer(); }
-
- // If this frame is optimized and was dynamically aligned return its old
- // unaligned frame pointer. When the frame is deoptimized its FP will shift
- // up one word and become unaligned.
- Address UnpaddedFP() const;
-
- Address pc() const { return *pc_address(); }
- void set_pc(Address pc) { *pc_address() = pc; }
-
- virtual void SetCallerFp(Address caller_fp) = 0;
-
- // Manually changes value of fp in this object.
- void UpdateFp(Address fp) { state_.fp = fp; }
-
- Address* pc_address() const { return state_.pc_address; }
-
- // Get the id of this stack frame.
- Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
-
- // Checks if this frame includes any stack handlers.
- bool HasHandler() const;
-
- // Get the type of this frame.
- virtual Type type() const = 0;
-
- // Get the code associated with this frame.
- // This method could be called during marking phase of GC.
- virtual Code* unchecked_code() const = 0;
-
- // Get the code associated with this frame.
- inline Code* LookupCode() const;
-
- // Get the code object that contains the given pc.
- static inline Code* GetContainingCode(Isolate* isolate, Address pc);
-
- // Get the code object containing the given pc and fill in the
- // safepoint entry and the number of stack slots. The pc must be at
- // a safepoint.
- static Code* GetSafepointData(Isolate* isolate,
- Address pc,
- SafepointEntry* safepoint_entry,
- unsigned* stack_slots);
-
- virtual void Iterate(ObjectVisitor* v) const = 0;
- static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
-
- // Sets a callback function for return-address rewriting profilers
- // to resolve the location of a return address to the location of the
- // profiler's stashed return address.
- static void SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver resolver);
-
- // Printing support.
- enum PrintMode { OVERVIEW, DETAILS };
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const { }
-
- Isolate* isolate() const { return isolate_; }
-
- protected:
- inline explicit StackFrame(StackFrameIterator* iterator);
- virtual ~StackFrame() { }
-
- // Compute the stack pointer for the calling frame.
- virtual Address GetCallerStackPointer() const = 0;
-
- // Printing support.
- static void PrintIndex(StringStream* accumulator,
- PrintMode mode,
- int index);
-
- // Get the top handler from the current stack iterator.
- inline StackHandler* top_handler() const;
-
- // Compute the stack frame type for the given state.
- static Type ComputeType(Isolate* isolate, State* state);
-
- private:
- const StackFrameIterator* iterator_;
- Isolate* isolate_;
- State state_;
-
- // Fill in the state of the calling frame.
- virtual void ComputeCallerState(State* state) const = 0;
-
- // Get the type and the state of the calling frame.
- virtual Type GetCallerState(State* state) const;
-
- static const intptr_t kIsolateTag = 1;
-
- friend class StackFrameIterator;
- friend class StackHandlerIterator;
- friend class SafeStackFrameIterator;
-
- private:
- void operator=(const StackFrame& original);
-};
-
-
-// Entry frames are used to enter JavaScript execution from C.
-class EntryFrame: public StackFrame {
- public:
- virtual Type type() const { return ENTRY; }
-
- virtual Code* unchecked_code() const;
-
- // Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- static EntryFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_entry());
- return static_cast<EntryFrame*>(frame);
- }
- virtual void SetCallerFp(Address caller_fp);
-
- protected:
- inline explicit EntryFrame(StackFrameIterator* iterator);
-
- // The caller stack pointer for entry frames is always zero. The
- // real information about the caller frame is available through the
- // link to the top exit frame.
- virtual Address GetCallerStackPointer() const { return 0; }
-
- private:
- virtual void ComputeCallerState(State* state) const;
- virtual Type GetCallerState(State* state) const;
-
- friend class StackFrameIterator;
-};
-
-
-class EntryConstructFrame: public EntryFrame {
- public:
- virtual Type type() const { return ENTRY_CONSTRUCT; }
-
- virtual Code* unchecked_code() const;
-
- static EntryConstructFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_entry_construct());
- return static_cast<EntryConstructFrame*>(frame);
- }
-
- protected:
- inline explicit EntryConstructFrame(StackFrameIterator* iterator);
-
- private:
- friend class StackFrameIterator;
-};
-
-
-// Exit frames are used to exit JavaScript execution and go to C.
-class ExitFrame: public StackFrame {
- public:
- virtual Type type() const { return EXIT; }
-
- virtual Code* unchecked_code() const;
-
- Object*& code_slot() const;
-
- // Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- virtual void SetCallerFp(Address caller_fp);
-
- static ExitFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_exit());
- return static_cast<ExitFrame*>(frame);
- }
-
- // Compute the state and type of an exit frame given a frame
- // pointer. Used when constructing the first stack frame seen by an
- // iterator and the frames following entry frames.
- static Type GetStateForFramePointer(Address fp, State* state);
- static Address ComputeStackPointer(Address fp);
- static void FillState(Address fp, Address sp, State* state);
-
- protected:
- inline explicit ExitFrame(StackFrameIterator* iterator);
-
- virtual Address GetCallerStackPointer() const;
-
- private:
- virtual void ComputeCallerState(State* state) const;
-
- friend class StackFrameIterator;
-};
-
-
-class StandardFrame: public StackFrame {
- public:
- // Testers.
- virtual bool is_standard() const { return true; }
-
- // Accessors.
- inline Object* context() const;
-
- // Access the expressions in the stack frame including locals.
- inline Object* GetExpression(int index) const;
- inline void SetExpression(int index, Object* value);
- int ComputeExpressionsCount() const;
- static Object* GetExpression(Address fp, int index);
-
- virtual void SetCallerFp(Address caller_fp);
-
- static StandardFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_standard());
- return static_cast<StandardFrame*>(frame);
- }
-
- protected:
- inline explicit StandardFrame(StackFrameIterator* iterator);
-
- virtual void ComputeCallerState(State* state) const;
-
- // Accessors.
- inline Address caller_fp() const;
- inline Address caller_pc() const;
-
- // Computes the address of the PC field in the standard frame given
- // by the provided frame pointer.
- static inline Address ComputePCAddress(Address fp);
-
- // Iterate over expression stack including stack handlers, locals,
- // and parts of the fixed part including context and code fields.
- void IterateExpressions(ObjectVisitor* v) const;
-
- // Returns the address of the n'th expression stack element.
- Address GetExpressionAddress(int n) const;
- static Address GetExpressionAddress(Address fp, int n);
-
- // Determines if the n'th expression stack element is in a stack
- // handler or not. Requires traversing all handlers in this frame.
- bool IsExpressionInsideHandler(int n) const;
-
- // Determines if the standard frame for the given frame pointer is
- // an arguments adaptor frame.
- static inline bool IsArgumentsAdaptorFrame(Address fp);
-
- // Determines if the standard frame for the given frame pointer is a
- // construct frame.
- static inline bool IsConstructFrame(Address fp);
-
- // Used by OptimizedFrames and StubFrames.
- void IterateCompiledFrame(ObjectVisitor* v) const;
-
- private:
- friend class StackFrame;
- friend class StackFrameIterator;
-};
-
-
-class FrameSummary BASE_EMBEDDED {
- public:
- FrameSummary(Object* receiver,
- JSFunction* function,
- Code* code,
- int offset,
- bool is_constructor)
- : receiver_(receiver, function->GetIsolate()),
- function_(function),
- code_(code),
- offset_(offset),
- is_constructor_(is_constructor) { }
- Handle<Object> receiver() { return receiver_; }
- Handle<JSFunction> function() { return function_; }
- Handle<Code> code() { return code_; }
- Address pc() { return code_->address() + offset_; }
- int offset() { return offset_; }
- bool is_constructor() { return is_constructor_; }
-
- void Print();
-
- private:
- Handle<Object> receiver_;
- Handle<JSFunction> function_;
- Handle<Code> code_;
- int offset_;
- bool is_constructor_;
-};
-
-
-class JavaScriptFrame: public StandardFrame {
- public:
- virtual Type type() const { return JAVA_SCRIPT; }
-
- // Accessors.
- inline Object* function() const;
- inline Object* receiver() const;
- inline void set_receiver(Object* value);
-
- // Access the parameters.
- inline Address GetParameterSlot(int index) const;
- inline Object* GetParameter(int index) const;
- inline int ComputeParametersCount() const {
- return GetNumberOfIncomingArguments();
- }
-
- // Debugger access.
- void SetParameterValue(int index, Object* value) const;
-
- // Check if this frame is a constructor frame invoked through 'new'.
- bool IsConstructor() const;
-
- // Check if this frame has "adapted" arguments in the sense that the
- // actual passed arguments are available in an arguments adaptor
- // frame below it on the stack.
- inline bool has_adapted_arguments() const;
- int GetArgumentsLength() const;
-
- // Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- // Printing support.
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const;
-
- // Determine the code for the frame.
- virtual Code* unchecked_code() const;
-
- // Returns the levels of inlining for this frame.
- virtual int GetInlineCount() { return 1; }
-
- // Return a list with JSFunctions of this frame.
- virtual void GetFunctions(List<JSFunction*>* functions);
-
- // Build a list with summaries for this frame including all inlined frames.
- virtual void Summarize(List<FrameSummary>* frames);
-
- static JavaScriptFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_java_script());
- return static_cast<JavaScriptFrame*>(frame);
- }
-
- static void PrintTop(Isolate* isolate,
- FILE* file,
- bool print_args,
- bool print_line_number);
-
- protected:
- inline explicit JavaScriptFrame(StackFrameIterator* iterator);
-
- virtual Address GetCallerStackPointer() const;
-
- virtual int GetNumberOfIncomingArguments() const;
-
- // Garbage collection support. Iterates over incoming arguments,
- // receiver, and any callee-saved registers.
- void IterateArguments(ObjectVisitor* v) const;
-
- private:
- inline Object* function_slot_object() const;
-
- friend class StackFrameIterator;
- friend class StackTracer;
-};
-
-
-class StubFrame : public StandardFrame {
- public:
- virtual Type type() const { return STUB; }
-
- // GC support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- // Determine the code for the frame.
- virtual Code* unchecked_code() const;
-
- protected:
- inline explicit StubFrame(StackFrameIterator* iterator);
-
- virtual Address GetCallerStackPointer() const;
-
- virtual int GetNumberOfIncomingArguments() const;
-
- friend class StackFrameIterator;
-};
-
-
-class OptimizedFrame : public JavaScriptFrame {
- public:
- virtual Type type() const { return OPTIMIZED; }
-
- // GC support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- virtual int GetInlineCount();
-
- // Return a list with JSFunctions of this frame.
- // The functions are ordered bottom-to-top (i.e. functions.last()
- // is the top-most activation)
- virtual void GetFunctions(List<JSFunction*>* functions);
-
- virtual void Summarize(List<FrameSummary>* frames);
-
- DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
-
- protected:
- inline explicit OptimizedFrame(StackFrameIterator* iterator);
-
- private:
- JSFunction* LiteralAt(FixedArray* literal_array, int literal_id);
-
- friend class StackFrameIterator;
-};
-
-
-// Arguments adaptor frames are automatically inserted below
-// JavaScript frames when the actual number of parameters does not
-// match the formal number of parameters.
-class ArgumentsAdaptorFrame: public JavaScriptFrame {
- public:
- virtual Type type() const { return ARGUMENTS_ADAPTOR; }
-
- // Determine the code for the frame.
- virtual Code* unchecked_code() const;
-
- static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_arguments_adaptor());
- return static_cast<ArgumentsAdaptorFrame*>(frame);
- }
-
- // Printing support.
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const;
-
- protected:
- inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
-
- virtual int GetNumberOfIncomingArguments() const;
-
- virtual Address GetCallerStackPointer() const;
-
- private:
- friend class StackFrameIterator;
-};
-
-
-class InternalFrame: public StandardFrame {
- public:
- virtual Type type() const { return INTERNAL; }
-
- // Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
-
- // Determine the code for the frame.
- virtual Code* unchecked_code() const;
-
- static InternalFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_internal());
- return static_cast<InternalFrame*>(frame);
- }
-
- protected:
- inline explicit InternalFrame(StackFrameIterator* iterator);
-
- virtual Address GetCallerStackPointer() const;
-
- private:
- friend class StackFrameIterator;
-};
-
-
-class StubFailureTrampolineFrame: public StandardFrame {
- public:
- // sizeof(Arguments) - sizeof(Arguments*) is 3 * kPointerSize), but the
- // presubmit script complains about using sizeof() on a type.
- static const int kFirstRegisterParameterFrameOffset =
- StandardFrameConstants::kMarkerOffset - 3 * kPointerSize;
-
- static const int kCallerStackParameterCountFrameOffset =
- StandardFrameConstants::kMarkerOffset - 2 * kPointerSize;
-
- virtual Type type() const { return STUB_FAILURE_TRAMPOLINE; }
-
- // Get the code associated with this frame.
- // This method could be called during marking phase of GC.
- virtual Code* unchecked_code() const;
-
- virtual void Iterate(ObjectVisitor* v) const;
-
- protected:
- inline explicit StubFailureTrampolineFrame(
- StackFrameIterator* iterator);
-
- virtual Address GetCallerStackPointer() const;
-
- private:
- friend class StackFrameIterator;
-};
-
-
-// Construct frames are special trampoline frames introduced to handle
-// function invocations through 'new'.
-class ConstructFrame: public InternalFrame {
- public:
- virtual Type type() const { return CONSTRUCT; }
-
- static ConstructFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_construct());
- return static_cast<ConstructFrame*>(frame);
- }
-
- protected:
- inline explicit ConstructFrame(StackFrameIterator* iterator);
-
- private:
- friend class StackFrameIterator;
-};
-
-
-class StackFrameIterator BASE_EMBEDDED {
- public:
- // An iterator that iterates over the isolate's current thread's stack,
- explicit StackFrameIterator(Isolate* isolate);
-
- // An iterator that iterates over a given thread's stack.
- StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
-
- // An iterator that can start from a given FP address.
- // If use_top, then work as usual, if fp isn't NULL, use it,
- // otherwise, do nothing.
- StackFrameIterator(Isolate* isolate, bool use_top, Address fp, Address sp);
-
- StackFrame* frame() const {
- ASSERT(!done());
- return frame_;
- }
-
- Isolate* isolate() const { return isolate_; }
-
- bool done() const { return frame_ == NULL; }
- void Advance() { (this->*advance_)(); }
-
- // Go back to the first frame.
- void Reset();
-
- private:
- Isolate* isolate_;
-#define DECLARE_SINGLETON(ignore, type) type type##_;
- STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
-#undef DECLARE_SINGLETON
- StackFrame* frame_;
- StackHandler* handler_;
- ThreadLocalTop* thread_;
- Address fp_;
- Address sp_;
- void (StackFrameIterator::*advance_)();
-
- StackHandler* handler() const {
- ASSERT(!done());
- return handler_;
- }
-
- // Get the type-specific frame singleton in a given state.
- StackFrame* SingletonFor(StackFrame::Type type, StackFrame::State* state);
- // A helper function, can return a NULL pointer.
- StackFrame* SingletonFor(StackFrame::Type type);
-
- void AdvanceWithHandler();
- void AdvanceWithoutHandler();
-
- friend class StackFrame;
- friend class SafeStackFrameIterator;
- DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
-};
-
-
-// Iterator that supports iterating through all JavaScript frames.
-template<typename Iterator>
-class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
- public:
- inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
-
- inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
-
- // Skip frames until the frame with the given id is reached.
- explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
-
- inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
-
- JavaScriptFrameIteratorTemp(Address fp,
- Address sp,
- Address low_bound,
- Address high_bound) :
- iterator_(fp, sp, low_bound, high_bound) {
- if (!done()) Advance();
- }
-
- JavaScriptFrameIteratorTemp(Isolate* isolate,
- Address fp,
- Address sp,
- Address low_bound,
- Address high_bound) :
- iterator_(isolate, fp, sp, low_bound, high_bound) {
- if (!done()) Advance();
- }
-
- inline JavaScriptFrame* frame() const;
-
- bool done() const { return iterator_.done(); }
- void Advance();
-
- // Advance to the frame holding the arguments for the current
- // frame. This only affects the current frame if it has adapted
- // arguments.
- void AdvanceToArgumentsFrame();
-
- // Go back to the first frame.
- void Reset();
-
- private:
- inline void AdvanceToId(StackFrame::Id id);
-
- Iterator iterator_;
-};
-
-
-typedef JavaScriptFrameIteratorTemp<StackFrameIterator> JavaScriptFrameIterator;
-
-
-// NOTE: The stack trace frame iterator is an iterator that only
-// traverse proper JavaScript frames; that is JavaScript frames that
-// have proper JavaScript functions. This excludes the problematic
-// functions in runtime.js.
-class StackTraceFrameIterator: public JavaScriptFrameIterator {
- public:
- StackTraceFrameIterator();
- explicit StackTraceFrameIterator(Isolate* isolate);
- void Advance();
-
- private:
- bool IsValidFrame();
-};
-
-
-class SafeStackFrameIterator BASE_EMBEDDED {
- public:
- SafeStackFrameIterator(Isolate* isolate,
- Address fp, Address sp,
- Address low_bound, Address high_bound);
-
- StackFrame* frame() const {
- ASSERT(is_working_iterator_);
- return iterator_.frame();
- }
-
- bool done() const { return iteration_done_ ? true : iterator_.done(); }
-
- void Advance();
- void Reset();
-
- static bool is_active(Isolate* isolate);
-
- static bool IsWithinBounds(
- Address low_bound, Address high_bound, Address addr) {
- return low_bound <= addr && addr <= high_bound;
- }
-
- private:
- class StackAddressValidator {
- public:
- StackAddressValidator(Address low_bound, Address high_bound)
- : low_bound_(low_bound), high_bound_(high_bound) { }
- bool IsValid(Address addr) const {
- return IsWithinBounds(low_bound_, high_bound_, addr);
- }
- private:
- Address low_bound_;
- Address high_bound_;
- };
-
- class ExitFrameValidator {
- public:
- explicit ExitFrameValidator(const StackAddressValidator& validator)
- : validator_(validator) { }
- ExitFrameValidator(Address low_bound, Address high_bound)
- : validator_(low_bound, high_bound) { }
- bool IsValidFP(Address fp);
- private:
- StackAddressValidator validator_;
- };
-
- bool IsValidStackAddress(Address addr) const {
- return stack_validator_.IsValid(addr);
- }
- bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
- bool IsValidFrame(StackFrame* frame) const;
- bool IsValidCaller(StackFrame* frame);
- static bool IsValidTop(Isolate* isolate,
- Address low_bound, Address high_bound);
-
- // This is a nasty hack to make sure the active count is incremented
- // before the constructor for the embedded iterator is invoked. This
- // is needed because the constructor will start looking at frames
- // right away and we need to make sure it doesn't start inspecting
- // heap objects.
- class ActiveCountMaintainer BASE_EMBEDDED {
- public:
- explicit ActiveCountMaintainer(Isolate* isolate);
- ~ActiveCountMaintainer();
- private:
- Isolate* isolate_;
- };
-
- ActiveCountMaintainer maintainer_;
- StackAddressValidator stack_validator_;
- const bool is_valid_top_;
- const bool is_valid_fp_;
- const bool is_working_iterator_;
- bool iteration_done_;
- StackFrameIterator iterator_;
-};
-
-
-typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
- SafeJavaScriptFrameIterator;
-
-
-class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
- public:
- explicit SafeStackTraceFrameIterator(Isolate* isolate,
- Address fp, Address sp,
- Address low_bound, Address high_bound);
- void Advance();
-};
-
-
-class StackFrameLocator BASE_EMBEDDED {
- public:
- explicit StackFrameLocator(Isolate* isolate) : iterator_(isolate) {}
-
- // Find the nth JavaScript frame on the stack. The caller must
- // guarantee that such a frame exists.
- JavaScriptFrame* FindJavaScriptFrame(int n);
-
- private:
- StackFrameIterator iterator_;
-};
-
-
-// Reads all frames on the current stack and copies them into the current
-// zone memory.
-Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
-
-} } // namespace v8::internal
-
-#endif // V8_FRAMES_H_
diff --git a/src/3rdparty/v8/src/full-codegen.cc b/src/3rdparty/v8/src/full-codegen.cc
deleted file mode 100644
index a43f674..0000000
--- a/src/3rdparty/v8/src/full-codegen.cc
+++ /dev/null
@@ -1,1584 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "liveedit.h"
-#include "macro-assembler.h"
-#include "prettyprinter.h"
-#include "scopes.h"
-#include "scopeinfo.h"
-#include "snapshot.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-void BreakableStatementChecker::Check(Statement* stmt) {
- Visit(stmt);
-}
-
-
-void BreakableStatementChecker::Check(Expression* expr) {
- Visit(expr);
-}
-
-
-void BreakableStatementChecker::VisitVariableDeclaration(
- VariableDeclaration* decl) {
-}
-
-void BreakableStatementChecker::VisitFunctionDeclaration(
- FunctionDeclaration* decl) {
-}
-
-void BreakableStatementChecker::VisitModuleDeclaration(
- ModuleDeclaration* decl) {
-}
-
-void BreakableStatementChecker::VisitImportDeclaration(
- ImportDeclaration* decl) {
-}
-
-void BreakableStatementChecker::VisitExportDeclaration(
- ExportDeclaration* decl) {
-}
-
-
-void BreakableStatementChecker::VisitModuleLiteral(ModuleLiteral* module) {
-}
-
-void BreakableStatementChecker::VisitModuleVariable(ModuleVariable* module) {
-}
-
-void BreakableStatementChecker::VisitModulePath(ModulePath* module) {
-}
-
-void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
-}
-
-
-void BreakableStatementChecker::VisitModuleStatement(ModuleStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitBlock(Block* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- // Check if expression is breakable.
- Visit(stmt->expression());
-}
-
-
-void BreakableStatementChecker::VisitEmptyStatement(EmptyStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitIfStatement(IfStatement* stmt) {
- // If the condition is breakable the if statement is breakable.
- Visit(stmt->condition());
-}
-
-
-void BreakableStatementChecker::VisitContinueStatement(
- ContinueStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitBreakStatement(BreakStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitReturnStatement(ReturnStatement* stmt) {
- // Return is breakable if the expression is.
- Visit(stmt->expression());
-}
-
-
-void BreakableStatementChecker::VisitWithStatement(WithStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void BreakableStatementChecker::VisitSwitchStatement(SwitchStatement* stmt) {
- // Switch statements breakable if the tag expression is.
- Visit(stmt->tag());
-}
-
-
-void BreakableStatementChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
- // Mark do while as breakable to avoid adding a break slot in front of it.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitWhileStatement(WhileStatement* stmt) {
- // Mark while statements breakable if the condition expression is.
- Visit(stmt->cond());
-}
-
-
-void BreakableStatementChecker::VisitForStatement(ForStatement* stmt) {
- // Mark for statements breakable if the condition expression is.
- if (stmt->cond() != NULL) {
- Visit(stmt->cond());
- }
-}
-
-
-void BreakableStatementChecker::VisitForInStatement(ForInStatement* stmt) {
- // Mark for in statements breakable if the enumerable expression is.
- Visit(stmt->enumerable());
-}
-
-
-void BreakableStatementChecker::VisitTryCatchStatement(
- TryCatchStatement* stmt) {
- // Mark try catch as breakable to avoid adding a break slot in front of it.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- // Mark try finally as breakable to avoid adding a break slot in front of it.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- // The debugger statement is breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitConditional(Conditional* expr) {
-}
-
-
-void BreakableStatementChecker::VisitVariableProxy(VariableProxy* expr) {
-}
-
-
-void BreakableStatementChecker::VisitLiteral(Literal* expr) {
-}
-
-
-void BreakableStatementChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitObjectLiteral(ObjectLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitArrayLiteral(ArrayLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
- // If assigning to a property (including a global property) the assignment is
- // breakable.
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL || (proxy != NULL && proxy->var()->IsUnallocated())) {
- is_breakable_ = true;
- return;
- }
-
- // Otherwise the assignment is breakable if the assigned value is.
- Visit(expr->value());
-}
-
-
-void BreakableStatementChecker::VisitThrow(Throw* expr) {
- // Throw is breakable if the expression is.
- Visit(expr->exception());
-}
-
-
-void BreakableStatementChecker::VisitProperty(Property* expr) {
- // Property load is breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCall(Call* expr) {
- // Function calls both through IC and call stub are breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCallNew(CallNew* expr) {
- // Function calls through new are breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCallRuntime(CallRuntime* expr) {
-}
-
-
-void BreakableStatementChecker::VisitUnaryOperation(UnaryOperation* expr) {
- Visit(expr->expression());
-}
-
-
-void BreakableStatementChecker::VisitCountOperation(CountOperation* expr) {
- Visit(expr->expression());
-}
-
-
-void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
- if (expr->op() != Token::AND &&
- expr->op() != Token::OR) {
- Visit(expr->right());
- }
-}
-
-
-void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
- Visit(expr->left());
- Visit(expr->right());
-}
-
-
-void BreakableStatementChecker::VisitThisFunction(ThisFunction* expr) {
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
- Isolate* isolate = info->isolate();
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- int len = String::cast(script->source())->length();
- isolate->counters()->total_full_codegen_source_size()->Increment(len);
- }
- if (FLAG_trace_codegen) {
- PrintF("Full Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info);
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- masm.positions_recorder()->StartGDBJITLineInfoRecording();
-#endif
- LOG_CODE_EVENT(isolate,
- CodeStartLinePosInfoRecordEvent(masm.positions_recorder()));
-
- FullCodeGenerator cgen(&masm, info);
- cgen.Generate();
- if (cgen.HasStackOverflow()) {
- ASSERT(!isolate->has_pending_exception());
- return false;
- }
- unsigned table_offset = cgen.EmitStackCheckTable();
-
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
- Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
- code->set_optimizable(info->IsOptimizable() &&
- !info->function()->flags()->Contains(kDontOptimize) &&
- info->function()->scope()->AllowsLazyCompilation());
- cgen.PopulateDeoptimizationData(code);
- cgen.PopulateTypeFeedbackInfo(code);
- cgen.PopulateTypeFeedbackCells(code);
- code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
- code->set_handler_table(*cgen.handler_table());
-#ifdef ENABLE_DEBUGGER_SUPPORT
- code->set_has_debug_break_slots(
- info->isolate()->debugger()->IsDebuggerActive());
- code->set_compiled_optimizable(info->IsOptimizable());
-#endif // ENABLE_DEBUGGER_SUPPORT
- code->set_allow_osr_at_loop_nesting_level(0);
- code->set_profiler_ticks(0);
- code->set_stack_check_table_offset(table_offset);
- CodeGenerator::PrintCode(code, info);
- info->SetCode(code); // May be an empty handle.
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && !code.is_null()) {
- GDBJITLineInfo* lineinfo =
- masm.positions_recorder()->DetachGDBJITLineInfo();
-
- GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
- }
-#endif
- if (!code.is_null()) {
- void* line_info =
- masm.positions_recorder()->DetachJITHandlerData();
- LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
- }
- return !code.is_null();
-}
-
-
-unsigned FullCodeGenerator::EmitStackCheckTable() {
- // The stack check table consists of a length (in number of entries)
- // field, and then a sequence of entries. Each entry is a pair of AST id
- // and code-relative pc offset.
- masm()->Align(kIntSize);
- unsigned offset = masm()->pc_offset();
- unsigned length = stack_checks_.length();
- __ dd(length);
- for (unsigned i = 0; i < length; ++i) {
- __ dd(stack_checks_[i].id.ToInt());
- __ dd(stack_checks_[i].pc_and_state);
- }
- return offset;
-}
-
-
-void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
- // Fill in the deoptimization information.
- ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
- if (!info_->HasDeoptimizationSupport()) return;
- int length = bailout_entries_.length();
- Handle<DeoptimizationOutputData> data = isolate()->factory()->
- NewDeoptimizationOutputData(length, TENURED);
- for (int i = 0; i < length; i++) {
- data->SetAstId(i, bailout_entries_[i].id);
- data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
- Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
- info->set_ic_total_count(ic_total_count_);
- ASSERT(!isolate()->heap()->InNewSpace(*info));
- code->set_type_feedback_info(*info);
-}
-
-
-void FullCodeGenerator::Initialize() {
- // The generation of debug code must match between the snapshot code and the
- // code that is generated later. This is assumed by the debugger when it is
- // calculating PC offsets after generating a debug version of code. Therefore
- // we disable the production of debug code in the full compiler if we are
- // either generating a snapshot or we booted from a snapshot.
- generate_debug_code_ = FLAG_debug_code &&
- !Serializer::enabled() &&
- !Snapshot::HaveASnapshotToStartFrom();
- masm_->set_emit_debug_code(generate_debug_code_);
- masm_->set_predictable_code_size(true);
- InitializeAstVisitor();
-}
-
-
-void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
- if (type_feedback_cells_.is_empty()) return;
- int length = type_feedback_cells_.length();
- int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
- Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
- isolate()->factory()->NewFixedArray(array_size, TENURED));
- for (int i = 0; i < length; i++) {
- cache->SetAstId(i, type_feedback_cells_[i].ast_id);
- cache->SetCell(i, *type_feedback_cells_[i].cell);
- }
- TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
- *cache);
-}
-
-
-
-void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
- PrepareForBailoutForId(node->id(), state);
-}
-
-
-void FullCodeGenerator::RecordJSReturnSite(Call* call) {
- // We record the offset of the function return so we can rebuild the frame
- // if the function was inlined, i.e., this is the return address in the
- // inlined function's frame.
- //
- // The state is ignored. We defensively set it to TOS_REG, which is the
- // real state of the unoptimized code at the return site.
- PrepareForBailoutForId(call->ReturnId(), TOS_REG);
-#ifdef DEBUG
- // In debug builds, mark the return so we can verify that this function
- // was called.
- ASSERT(!call->return_is_recorded_);
- call->return_is_recorded_ = true;
-#endif
-}
-
-
-void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
- // There's no need to prepare this code for bailouts from already optimized
- // code or code that can't be optimized.
- if (!info_->HasDeoptimizationSupport()) return;
- unsigned pc_and_state =
- StateField::encode(state) | PcField::encode(masm_->pc_offset());
- ASSERT(Smi::IsValid(pc_and_state));
- BailoutEntry entry = { id, pc_and_state };
- ASSERT(!prepared_bailout_ids_.Contains(id.ToInt()));
- prepared_bailout_ids_.Add(id.ToInt(), zone());
- bailout_entries_.Add(entry, zone());
-}
-
-
-void FullCodeGenerator::RecordTypeFeedbackCell(
- TypeFeedbackId id, Handle<JSGlobalPropertyCell> cell) {
- TypeFeedbackCellEntry entry = { id, cell };
- type_feedback_cells_.Add(entry, zone());
-}
-
-
-void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
- // The pc offset does not need to be encoded and packed together with a state.
- ASSERT(masm_->pc_offset() > 0);
- BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
- stack_checks_.Add(entry, zone());
-}
-
-
-bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
- // Inline smi case inside loops, but not division and modulo which
- // are too complicated and take up too much space.
- if (op == Token::DIV ||op == Token::MOD) return false;
- if (FLAG_always_inline_smi_code) return true;
- return loop_depth_ > 0;
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Register reg) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
- __ push(reg);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Register reg) const {
- // For simplicity we always test the accumulator register.
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::PlugTOS() const {
- __ Drop(1);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
- __ pop(result_register());
-}
-
-
-void FullCodeGenerator::StackValueContext::PlugTOS() const {
-}
-
-
-void FullCodeGenerator::TestContext::PlugTOS() const {
- // For simplicity we always test the accumulator register.
- __ pop(result_register());
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::PrepareTest(
- Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const {
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = *fall_through = materialize_true;
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::PrepareTest(
- Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const {
- *if_true = *fall_through = materialize_true;
- *if_false = materialize_false;
-}
-
-
-void FullCodeGenerator::StackValueContext::PrepareTest(
- Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const {
- *if_true = *fall_through = materialize_true;
- *if_false = materialize_false;
-}
-
-
-void FullCodeGenerator::TestContext::PrepareTest(
- Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const {
- *if_true = true_label_;
- *if_false = false_label_;
- *fall_through = fall_through_;
-}
-
-
-void FullCodeGenerator::DoTest(const TestContext* context) {
- DoTest(context->condition(),
- context->true_label(),
- context->false_label(),
- context->fall_through());
-}
-
-
-void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
- ASSERT(scope_->is_global_scope());
-
- for (int i = 0; i < declarations->length(); i++) {
- ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration();
- if (declaration != NULL) {
- ModuleLiteral* module = declaration->module()->AsModuleLiteral();
- if (module != NULL) {
- Comment cmnt(masm_, "[ Link nested modules");
- Scope* scope = module->body()->scope();
- Interface* interface = scope->interface();
- ASSERT(interface->IsModule() && interface->IsFrozen());
-
- interface->Allocate(scope->module_var()->index());
-
- // Set up module context.
- ASSERT(scope->interface()->Index() >= 0);
- __ Push(Smi::FromInt(scope->interface()->Index()));
- __ Push(scope->GetScopeInfo());
- __ CallRuntime(Runtime::kPushModuleContext, 2);
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
-
- AllocateModules(scope->declarations());
-
- // Pop module context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
- }
- }
-}
-
-
-// Modules have their own local scope, represented by their own context.
-// Module instance objects have an accessor for every export that forwards
-// access to the respective slot from the module's context. (Exports that are
-// modules themselves, however, are simple data properties.)
-//
-// All modules have a _hosting_ scope/context, which (currently) is the
-// (innermost) enclosing global scope. To deal with recursion, nested modules
-// are hosted by the same scope as global ones.
-//
-// For every (global or nested) module literal, the hosting context has an
-// internal slot that points directly to the respective module context. This
-// enables quick access to (statically resolved) module members by 2-dimensional
-// access through the hosting context. For example,
-//
-// module A {
-// let x;
-// module B { let y; }
-// }
-// module C { let z; }
-//
-// allocates contexts as follows:
-//
-// [header| .A | .B | .C | A | C ] (global)
-// | | |
-// | | +-- [header| z ] (module)
-// | |
-// | +------- [header| y ] (module)
-// |
-// +------------ [header| x | B ] (module)
-//
-// Here, .A, .B, .C are the internal slots pointing to the hosted module
-// contexts, whereas A, B, C hold the actual instance objects (note that every
-// module context also points to the respective instance object through its
-// extension slot in the header).
-//
-// To deal with arbitrary recursion and aliases between modules,
-// they are created and initialized in several stages. Each stage applies to
-// all modules in the hosting global scope, including nested ones.
-//
-// 1. Allocate: for each module _literal_, allocate the module contexts and
-// respective instance object and wire them up. This happens in the
-// PushModuleContext runtime function, as generated by AllocateModules
-// (invoked by VisitDeclarations in the hosting scope).
-//
-// 2. Bind: for each module _declaration_ (i.e. literals as well as aliases),
-// assign the respective instance object to respective local variables. This
-// happens in VisitModuleDeclaration, and uses the instance objects created
-// in the previous stage.
-// For each module _literal_, this phase also constructs a module descriptor
-// for the next stage. This happens in VisitModuleLiteral.
-//
-// 3. Populate: invoke the DeclareModules runtime function to populate each
-// _instance_ object with accessors for it exports. This is generated by
-// DeclareModules (invoked by VisitDeclarations in the hosting scope again),
-// and uses the descriptors generated in the previous stage.
-//
-// 4. Initialize: execute the module bodies (and other code) in sequence. This
-// happens by the separate statements generated for module bodies. To reenter
-// the module scopes properly, the parser inserted ModuleStatements.
-
-void FullCodeGenerator::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
- Handle<FixedArray> saved_modules = modules_;
- int saved_module_index = module_index_;
- ZoneList<Handle<Object> >* saved_globals = globals_;
- ZoneList<Handle<Object> > inner_globals(10, zone());
- globals_ = &inner_globals;
-
- if (scope_->num_modules() != 0) {
- // This is a scope hosting modules. Allocate a descriptor array to pass
- // to the runtime for initialization.
- Comment cmnt(masm_, "[ Allocate modules");
- ASSERT(scope_->is_global_scope());
- modules_ =
- isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED);
- module_index_ = 0;
-
- // Generate code for allocating all modules, including nested ones.
- // The allocated contexts are stored in internal variables in this scope.
- AllocateModules(declarations);
- }
-
- AstVisitor::VisitDeclarations(declarations);
-
- if (scope_->num_modules() != 0) {
- // Initialize modules from descriptor array.
- ASSERT(module_index_ == modules_->length());
- DeclareModules(modules_);
- modules_ = saved_modules;
- module_index_ = saved_module_index;
- }
-
- if (!globals_->is_empty()) {
- // Invoke the platform-dependent code generator to do the actual
- // declaration of the global functions and variables.
- Handle<FixedArray> array =
- isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
- for (int i = 0; i < globals_->length(); ++i)
- array->set(i, *globals_->at(i));
- DeclareGlobals(array);
- }
-
- globals_ = saved_globals;
-}
-
-
-void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
- Block* block = module->body();
- Scope* saved_scope = scope();
- scope_ = block->scope();
- Interface* interface = scope_->interface();
-
- Comment cmnt(masm_, "[ ModuleLiteral");
- SetStatementPosition(block);
-
- ASSERT(!modules_.is_null());
- ASSERT(module_index_ < modules_->length());
- int index = module_index_++;
-
- // Set up module context.
- ASSERT(interface->Index() >= 0);
- __ Push(Smi::FromInt(interface->Index()));
- __ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-
- {
- Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope_->declarations());
- }
-
- // Populate the module description.
- Handle<ModuleInfo> description =
- ModuleInfo::Create(isolate(), interface, scope_);
- modules_->set(index, *description);
-
- scope_ = saved_scope;
- // Pop module context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-}
-
-
-void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
- // Nothing to do.
- // The instance object is resolved statically through the module's interface.
-}
-
-
-void FullCodeGenerator::VisitModulePath(ModulePath* module) {
- // Nothing to do.
- // The instance object is resolved statically through the module's interface.
-}
-
-
-void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
- // TODO(rossberg): dummy allocation for now.
- Scope* scope = module->body()->scope();
- Interface* interface = scope_->interface();
-
- ASSERT(interface->IsModule() && interface->IsFrozen());
- ASSERT(!modules_.is_null());
- ASSERT(module_index_ < modules_->length());
- interface->Allocate(scope->module_var()->index());
- int index = module_index_++;
-
- Handle<ModuleInfo> description =
- ModuleInfo::Create(isolate(), interface, scope_);
- modules_->set(index, *description);
-}
-
-
-int FullCodeGenerator::DeclareGlobalsFlags() {
- ASSERT(DeclareGlobalsLanguageMode::is_valid(language_mode()));
- return DeclareGlobalsEvalFlag::encode(is_eval()) |
- DeclareGlobalsNativeFlag::encode(is_native()) |
- DeclareGlobalsLanguageMode::encode(language_mode());
-}
-
-
-void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
- CodeGenerator::RecordPositions(masm_, fun->start_position());
-}
-
-
-void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
- CodeGenerator::RecordPositions(masm_, fun->end_position() - 1);
-}
-
-
-void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
- } else {
- // Check if the statement will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker;
- checker.Check(stmt);
- // Record the statement position right here if the statement is not
- // breakable. For breakable statements the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, stmt->statement_pos(), !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- Debug::GenerateSlot(masm_);
- }
- }
-#else
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
-#endif
-}
-
-
-void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, pos);
- } else {
- // Check if the expression will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker;
- checker.Check(expr);
- // Record a statement position right here if the expression is not
- // breakable. For breakable expressions the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- // NOTE this will record a statement position for something which might
- // not be a statement. As stepping in the debugger will only stop at
- // statement positions this is used for e.g. the condition expression of
- // a do while loop.
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, pos, !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- Debug::GenerateSlot(masm_);
- }
- }
-#else
- CodeGenerator::RecordPositions(masm_, pos);
-#endif
-}
-
-
-void FullCodeGenerator::SetStatementPosition(int pos) {
- CodeGenerator::RecordPositions(masm_, pos);
-}
-
-
-void FullCodeGenerator::SetSourcePosition(int pos) {
- if (pos != RelocInfo::kNoPosition) {
- masm_->positions_recorder()->RecordPosition(pos);
- }
-}
-
-
-// Lookup table for code generators for special runtime calls which are
-// generated inline.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &FullCodeGenerator::Emit##Name,
-
-const FullCodeGenerator::InlineFunctionGenerator
- FullCodeGenerator::kInlineFunctionGenerators[] = {
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- };
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
-FullCodeGenerator::InlineFunctionGenerator
- FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
- int lookup_index =
- static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction);
- ASSERT(lookup_index >= 0);
- ASSERT(static_cast<size_t>(lookup_index) <
- ARRAY_SIZE(kInlineFunctionGenerators));
- return kInlineFunctionGenerators[lookup_index];
-}
-
-
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
- const Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
- ASSERT(function->intrinsic_type == Runtime::INLINE);
- InlineFunctionGenerator generator =
- FindInlineFunctionGenerator(function->function_id);
- ((*this).*(generator))(expr);
-}
-
-
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- switch (expr->op()) {
- case Token::COMMA:
- return VisitComma(expr);
- case Token::OR:
- case Token::AND:
- return VisitLogicalExpression(expr);
- default:
- return VisitArithmeticExpression(expr);
- }
-}
-
-
-void FullCodeGenerator::VisitInDuplicateContext(Expression* expr) {
- if (context()->IsEffect()) {
- VisitForEffect(expr);
- } else if (context()->IsAccumulatorValue()) {
- VisitForAccumulatorValue(expr);
- } else if (context()->IsStackValue()) {
- VisitForStackValue(expr);
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- VisitForControl(expr, test->true_label(), test->false_label(),
- test->fall_through());
- }
-}
-
-
-void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ Comma");
- VisitForEffect(expr->left());
- VisitInDuplicateContext(expr->right());
-}
-
-
-void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
- bool is_logical_and = expr->op() == Token::AND;
- Comment cmnt(masm_, is_logical_and ? "[ Logical AND" : "[ Logical OR");
- Expression* left = expr->left();
- Expression* right = expr->right();
- BailoutId right_id = expr->RightId();
- Label done;
-
- if (context()->IsTest()) {
- Label eval_right;
- const TestContext* test = TestContext::cast(context());
- if (is_logical_and) {
- VisitForControl(left, &eval_right, test->false_label(), &eval_right);
- } else {
- VisitForControl(left, test->true_label(), &eval_right, &eval_right);
- }
- PrepareForBailoutForId(right_id, NO_REGISTERS);
- __ bind(&eval_right);
-
- } else if (context()->IsAccumulatorValue()) {
- VisitForAccumulatorValue(left);
- // We want the value in the accumulator for the test, and on the stack in
- // case we need it.
- __ push(result_register());
- Label discard, restore;
- if (is_logical_and) {
- DoTest(left, &discard, &restore, &restore);
- } else {
- DoTest(left, &restore, &discard, &restore);
- }
- __ bind(&restore);
- __ pop(result_register());
- __ jmp(&done);
- __ bind(&discard);
- __ Drop(1);
- PrepareForBailoutForId(right_id, NO_REGISTERS);
-
- } else if (context()->IsStackValue()) {
- VisitForAccumulatorValue(left);
- // We want the value in the accumulator for the test, and on the stack in
- // case we need it.
- __ push(result_register());
- Label discard;
- if (is_logical_and) {
- DoTest(left, &discard, &done, &discard);
- } else {
- DoTest(left, &done, &discard, &discard);
- }
- __ bind(&discard);
- __ Drop(1);
- PrepareForBailoutForId(right_id, NO_REGISTERS);
-
- } else {
- ASSERT(context()->IsEffect());
- Label eval_right;
- if (is_logical_and) {
- VisitForControl(left, &eval_right, &done, &eval_right);
- } else {
- VisitForControl(left, &done, &eval_right, &eval_right);
- }
- PrepareForBailoutForId(right_id, NO_REGISTERS);
- __ bind(&eval_right);
- }
-
- VisitInDuplicateContext(right);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
- Token::Value op = expr->op();
- Comment cmnt(masm_, "[ ArithmeticExpression");
- Expression* left = expr->left();
- Expression* right = expr->right();
- OverwriteMode mode =
- left->ResultOverwriteAllowed()
- ? OVERWRITE_LEFT
- : (right->ResultOverwriteAllowed() ? OVERWRITE_RIGHT : NO_OVERWRITE);
-
- VisitForStackValue(left);
- VisitForAccumulatorValue(right);
-
- SetSourcePosition(expr->position());
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr, op, mode, left, right);
- } else {
- EmitBinaryOp(expr, op, mode);
- }
-}
-
-
-void FullCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- NestedBlock nested_block(this, stmt);
- SetStatementPosition(stmt);
-
- Scope* saved_scope = scope();
- // Push a block context when entering a block with block scoped variables.
- if (stmt->scope() != NULL) {
- scope_ = stmt->scope();
- ASSERT(!scope_->is_module_scope());
- { Comment cmnt(masm_, "[ Extend block context");
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
- int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
- __ Push(scope_info);
- PushFunctionArgumentForContextAllocation();
- if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
- FastNewBlockContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kPushBlockContext, 2);
- }
-
- // Replace the context stored in the frame.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope_->declarations());
- }
- }
-
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- VisitStatements(stmt->statements());
- scope_ = saved_scope;
- __ bind(nested_block.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-
- // Pop block context if necessary.
- if (stmt->scope() != NULL) {
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
-}
-
-
-void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
- Comment cmnt(masm_, "[ Module context");
-
- __ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
- __ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
- StoreToFrameField(
- StandardFrameConstants::kContextOffset, context_register());
-
- Scope* saved_scope = scope_;
- scope_ = stmt->body()->scope();
- VisitStatements(stmt->body()->statements());
- scope_ = saved_scope;
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
-}
-
-
-void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- VisitForEffect(stmt->expression());
-}
-
-
-void FullCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
- Comment cmnt(masm_, "[ EmptyStatement");
- SetStatementPosition(stmt);
-}
-
-
-void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
- Comment cmnt(masm_, "[ IfStatement");
- SetStatementPosition(stmt);
- Label then_part, else_part, done;
-
- if (stmt->HasElseStatement()) {
- VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
- PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
- __ bind(&then_part);
- Visit(stmt->then_statement());
- __ jmp(&done);
-
- PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
- __ bind(&else_part);
- Visit(stmt->else_statement());
- } else {
- VisitForControl(stmt->condition(), &then_part, &done, &then_part);
- PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
- __ bind(&then_part);
- Visit(stmt->then_statement());
-
- PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
- }
- __ bind(&done);
- PrepareForBailoutForId(stmt->IfId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- Comment cmnt(masm_, "[ ContinueStatement");
- SetStatementPosition(stmt);
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- // When continuing, we clobber the unpredictable value in the accumulator
- // with one that's safe for GC. If we hit an exit from the try block of
- // try...finally on our way out, we will unconditionally preserve the
- // accumulator on the stack.
- ClearAccumulator();
- while (!current->IsContinueTarget(stmt->target())) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- if (context_length > 0) {
- while (context_length > 0) {
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- --context_length;
- }
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
-
- __ jmp(current->AsIteration()->continue_label());
-}
-
-
-void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- Comment cmnt(masm_, "[ BreakStatement");
- SetStatementPosition(stmt);
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- // When breaking, we clobber the unpredictable value in the accumulator
- // with one that's safe for GC. If we hit an exit from the try block of
- // try...finally on our way out, we will unconditionally preserve the
- // accumulator on the stack.
- ClearAccumulator();
- while (!current->IsBreakTarget(stmt->target())) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- if (context_length > 0) {
- while (context_length > 0) {
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- --context_length;
- }
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
-
- __ jmp(current->AsBreakable()->break_label());
-}
-
-
-void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
- Expression* expr = stmt->expression();
- VisitForAccumulatorValue(expr);
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
-
- EmitReturnSequence();
-}
-
-
-void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
- Comment cmnt(masm_, "[ WithStatement");
- SetStatementPosition(stmt);
-
- VisitForStackValue(stmt->expression());
- PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushWithContext, 2);
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-
- { WithOrCatch body(this);
- Visit(stmt->statement());
- }
-
- // Pop context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-}
-
-
-void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- Comment cmnt(masm_, "[ DoWhileStatement");
- SetStatementPosition(stmt);
- Label body, stack_check;
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- __ bind(&body);
- Visit(stmt->body());
-
- // Record the position of the do while condition and make sure it is
- // possible to break on the condition.
- __ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
- SetExpressionPosition(stmt->cond(), stmt->condition_position());
- VisitForControl(stmt->cond(),
- &stack_check,
- loop_statement.break_label(),
- &stack_check);
-
- // Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
- __ bind(&stack_check);
- EmitBackEdgeBookkeeping(stmt, &body);
- __ jmp(&body);
-
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(loop_statement.break_label());
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- Comment cmnt(masm_, "[ WhileStatement");
- Label test, body;
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- // Emit the test at the bottom of the loop.
- __ jmp(&test);
-
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&body);
- Visit(stmt->body());
-
- // Emit the statement position here as this is where the while
- // statement code starts.
- __ bind(loop_statement.continue_label());
- SetStatementPosition(stmt);
-
- // Check stack before looping.
- EmitBackEdgeBookkeeping(stmt, &body);
-
- __ bind(&test);
- VisitForControl(stmt->cond(),
- &body,
- loop_statement.break_label(),
- loop_statement.break_label());
-
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(loop_statement.break_label());
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
- Comment cmnt(masm_, "[ ForStatement");
- Label test, body;
-
- Iteration loop_statement(this, stmt);
-
- // Set statement position for a break slot before entering the for-body.
- SetStatementPosition(stmt);
-
- if (stmt->init() != NULL) {
- Visit(stmt->init());
- }
-
- increment_loop_depth();
- // Emit the test at the bottom of the loop (even if empty).
- __ jmp(&test);
-
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&body);
- Visit(stmt->body());
-
- PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
- __ bind(loop_statement.continue_label());
- if (stmt->next() != NULL) {
- Visit(stmt->next());
- }
-
- // Emit the statement position here as this is where the for
- // statement code starts.
- SetStatementPosition(stmt);
-
- // Check stack before looping.
- EmitBackEdgeBookkeeping(stmt, &body);
-
- __ bind(&test);
- if (stmt->cond() != NULL) {
- VisitForControl(stmt->cond(),
- &body,
- loop_statement.break_label(),
- loop_statement.break_label());
- } else {
- __ jmp(&body);
- }
-
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(loop_statement.break_label());
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Comment cmnt(masm_, "[ TryCatchStatement");
- SetStatementPosition(stmt);
- // The try block adds a handler to the exception handler chain before
- // entering, and removes it again when exiting normally. If an exception
- // is thrown during execution of the try block, the handler is consumed
- // and control is passed to the catch block with the exception in the
- // result register.
-
- Label try_entry, handler_entry, exit;
- __ jmp(&try_entry);
- __ bind(&handler_entry);
- handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
- // Exception handler code, the exception is in the result register.
- // Extend the context before executing the catch block.
- { Comment cmnt(masm_, "[ Extend catch context");
- __ Push(stmt->variable()->name());
- __ push(result_register());
- PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushCatchContext, 3);
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
-
- Scope* saved_scope = scope();
- scope_ = stmt->scope();
- ASSERT(scope_->declarations()->is_empty());
- { WithOrCatch catch_body(this);
- Visit(stmt->catch_block());
- }
- // Restore the context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
- scope_ = saved_scope;
- __ jmp(&exit);
-
- // Try block code. Sets up the exception handler chain.
- __ bind(&try_entry);
- __ PushTryHandler(StackHandler::CATCH, stmt->index());
- { TryCatch try_body(this);
- Visit(stmt->try_block());
- }
- __ PopTryHandler();
- __ bind(&exit);
-}
-
-
-void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- Comment cmnt(masm_, "[ TryFinallyStatement");
- SetStatementPosition(stmt);
- // Try finally is compiled by setting up a try-handler on the stack while
- // executing the try body, and removing it again afterwards.
- //
- // The try-finally construct can enter the finally block in three ways:
- // 1. By exiting the try-block normally. This removes the try-handler and
- // calls the finally block code before continuing.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (break/continue/return). The site of the, e.g., break removes the
- // try handler and calls the finally block code before continuing
- // its outward control transfer.
- // 3. By exiting the try-block with a thrown exception.
- // This can happen in nested function calls. It traverses the try-handler
- // chain and consumes the try-handler entry before jumping to the
- // handler code. The handler code then calls the finally-block before
- // rethrowing the exception.
- //
- // The finally block must assume a return address on top of the stack
- // (or in the link register on ARM chips) and a value (return value or
- // exception) in the result register (rax/eax/r0), both of which must
- // be preserved. The return address isn't GC-safe, so it should be
- // cooked before GC.
- Label try_entry, handler_entry, finally_entry;
-
- // Jump to try-handler setup and try-block code.
- __ jmp(&try_entry);
- __ bind(&handler_entry);
- handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
- // Exception handler code. This code is only executed when an exception
- // is thrown. The exception is in the result register, and must be
- // preserved by the finally block. Call the finally block and then
- // rethrow the exception if it returns.
- __ Call(&finally_entry);
- __ push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
-
- // Finally block implementation.
- __ bind(&finally_entry);
- EnterFinallyBlock();
- { Finally finally_body(this);
- Visit(stmt->finally_block());
- }
- ExitFinallyBlock(); // Return to the calling code.
-
- // Set up try handler.
- __ bind(&try_entry);
- __ PushTryHandler(StackHandler::FINALLY, stmt->index());
- { TryFinally try_body(this, &finally_entry);
- Visit(stmt->try_block());
- }
- __ PopTryHandler();
- // Execute the finally block on the way out. Clobber the unpredictable
- // value in the result register with one that's safe for GC because the
- // finally block will unconditionally preserve the result register on the
- // stack.
- ClearAccumulator();
- __ Call(&finally_entry);
-}
-
-
-void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Comment cmnt(masm_, "[ DebuggerStatement");
- SetStatementPosition(stmt);
-
- __ DebugBreak();
- // Ignore the return value.
-#endif
-}
-
-
-void FullCodeGenerator::VisitConditional(Conditional* expr) {
- Comment cmnt(masm_, "[ Conditional");
- Label true_case, false_case, done;
- VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
-
- PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
- __ bind(&true_case);
- SetExpressionPosition(expr->then_expression(),
- expr->then_expression_position());
- if (context()->IsTest()) {
- const TestContext* for_test = TestContext::cast(context());
- VisitForControl(expr->then_expression(),
- for_test->true_label(),
- for_test->false_label(),
- NULL);
- } else {
- VisitInDuplicateContext(expr->then_expression());
- __ jmp(&done);
- }
-
- PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
- __ bind(&false_case);
- SetExpressionPosition(expr->else_expression(),
- expr->else_expression_position());
- VisitInDuplicateContext(expr->else_expression());
- // If control flow falls through Visit, merge it with true case here.
- if (!context()->IsTest()) {
- __ bind(&done);
- }
-}
-
-
-void FullCodeGenerator::VisitLiteral(Literal* expr) {
- Comment cmnt(masm_, "[ Literal");
- context()->Plug(expr->handle());
-}
-
-
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function boilerplate and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(expr, script());
- if (function_info.is_null()) {
- SetStackOverflow();
- return;
- }
- EmitNewClosure(function_info, expr->pretenure());
-}
-
-
-void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- EmitNewClosure(expr->shared_function_info(), false);
-}
-
-
-void FullCodeGenerator::VisitThrow(Throw* expr) {
- Comment cmnt(masm_, "[ Throw");
- VisitForStackValue(expr->exception());
- __ CallRuntime(Runtime::kThrow, 1);
- // Never returns here.
-}
-
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
- __ Drop(*stack_depth);
- __ PopTryHandler();
- *stack_depth = 0;
- return previous_;
-}
-
-
-bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
- Expression* sub_expr;
- Handle<String> check;
- if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
- EmitLiteralCompareTypeof(expr, sub_expr, check);
- return true;
- }
-
- if (expr->IsLiteralCompareUndefined(&sub_expr)) {
- EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
- return true;
- }
-
- if (expr->IsLiteralCompareNull(&sub_expr)) {
- EmitLiteralCompareNil(expr, sub_expr, kNullValue);
- return true;
- }
-
- return false;
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/full-codegen.h b/src/3rdparty/v8/src/full-codegen.h
deleted file mode 100644
index b9285c7..0000000
--- a/src/3rdparty/v8/src/full-codegen.h
+++ /dev/null
@@ -1,863 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FULL_CODEGEN_H_
-#define V8_FULL_CODEGEN_H_
-
-#include "v8.h"
-
-#include "allocation.h"
-#include "ast.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "data-flow.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class JumpPatchSite;
-
-// AST node visitor which can tell whether a given statement will be breakable
-// when the code is compiled by the full compiler in the debugger. This means
-// that there will be an IC (load/store/call) in the code generated for the
-// debugger to piggybag on.
-class BreakableStatementChecker: public AstVisitor {
- public:
- BreakableStatementChecker() : is_breakable_(false) {
- InitializeAstVisitor();
- }
-
- void Check(Statement* stmt);
- void Check(Expression* stmt);
-
- bool is_breakable() { return is_breakable_; }
-
- private:
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- bool is_breakable_;
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
- DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
-};
-
-
-// -----------------------------------------------------------------------------
-// Full code generator.
-
-class FullCodeGenerator: public AstVisitor {
- public:
- enum State {
- NO_REGISTERS,
- TOS_REG
- };
-
- FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
- : masm_(masm),
- info_(info),
- scope_(info->scope()),
- nesting_stack_(NULL),
- loop_depth_(0),
- globals_(NULL),
- context_(NULL),
- bailout_entries_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0,
- info->zone()),
- stack_checks_(2, info->zone()), // There's always at least one.
- type_feedback_cells_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0,
- info->zone()),
- ic_total_count_(0),
- zone_(info->zone()) {
- Initialize();
- }
-
- void Initialize();
-
- static bool MakeCode(CompilationInfo* info);
-
- // Encode state and pc-offset as a BitField<type, start, size>.
- // Only use 30 bits because we encode the result as a smi.
- class StateField : public BitField<State, 0, 1> { };
- class PcField : public BitField<unsigned, 1, 30-1> { };
-
- static const char* State2String(State state) {
- switch (state) {
- case NO_REGISTERS: return "NO_REGISTERS";
- case TOS_REG: return "TOS_REG";
- }
- UNREACHABLE();
- return NULL;
- }
-
- Zone* zone() const { return zone_; }
-
- static const int kMaxBackEdgeWeight = 127;
-
-#if V8_TARGET_ARCH_IA32
- static const int kBackEdgeDistanceUnit = 100;
-#elif V8_TARGET_ARCH_X64
- static const int kBackEdgeDistanceUnit = 162;
-#elif V8_TARGET_ARCH_ARM
- static const int kBackEdgeDistanceUnit = 142;
-#elif V8_TARGET_ARCH_MIPS
- static const int kBackEdgeDistanceUnit = 142;
-#else
-#error Unsupported target architecture.
-#endif
-
-
- private:
- class Breakable;
- class Iteration;
-
- class TestContext;
-
- class NestedStatement BASE_EMBEDDED {
- public:
- explicit NestedStatement(FullCodeGenerator* codegen) : codegen_(codegen) {
- // Link into codegen's nesting stack.
- previous_ = codegen->nesting_stack_;
- codegen->nesting_stack_ = this;
- }
- virtual ~NestedStatement() {
- // Unlink from codegen's nesting stack.
- ASSERT_EQ(this, codegen_->nesting_stack_);
- codegen_->nesting_stack_ = previous_;
- }
-
- virtual Breakable* AsBreakable() { return NULL; }
- virtual Iteration* AsIteration() { return NULL; }
-
- virtual bool IsContinueTarget(Statement* target) { return false; }
- virtual bool IsBreakTarget(Statement* target) { return false; }
-
- // Notify the statement that we are exiting it via break, continue, or
- // return and give it a chance to generate cleanup code. Return the
- // next outer statement in the nesting stack. We accumulate in
- // *stack_depth the amount to drop the stack and in *context_length the
- // number of context chain links to unwind as we traverse the nesting
- // stack from an exit to its target.
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
- return previous_;
- }
-
- protected:
- MacroAssembler* masm() { return codegen_->masm(); }
-
- FullCodeGenerator* codegen_;
- NestedStatement* previous_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(NestedStatement);
- };
-
- // A breakable statement such as a block.
- class Breakable : public NestedStatement {
- public:
- Breakable(FullCodeGenerator* codegen, BreakableStatement* statement)
- : NestedStatement(codegen), statement_(statement) {
- }
- virtual ~Breakable() {}
-
- virtual Breakable* AsBreakable() { return this; }
- virtual bool IsBreakTarget(Statement* target) {
- return statement() == target;
- }
-
- BreakableStatement* statement() { return statement_; }
- Label* break_label() { return &break_label_; }
-
- private:
- BreakableStatement* statement_;
- Label break_label_;
- };
-
- // An iteration statement such as a while, for, or do loop.
- class Iteration : public Breakable {
- public:
- Iteration(FullCodeGenerator* codegen, IterationStatement* statement)
- : Breakable(codegen, statement) {
- }
- virtual ~Iteration() {}
-
- virtual Iteration* AsIteration() { return this; }
- virtual bool IsContinueTarget(Statement* target) {
- return statement() == target;
- }
-
- Label* continue_label() { return &continue_label_; }
-
- private:
- Label continue_label_;
- };
-
- // A nested block statement.
- class NestedBlock : public Breakable {
- public:
- NestedBlock(FullCodeGenerator* codegen, Block* block)
- : Breakable(codegen, block) {
- }
- virtual ~NestedBlock() {}
-
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
- if (statement()->AsBlock()->scope() != NULL) {
- ++(*context_length);
- }
- return previous_;
- };
- };
-
- // The try block of a try/catch statement.
- class TryCatch : public NestedStatement {
- public:
- explicit TryCatch(FullCodeGenerator* codegen) : NestedStatement(codegen) {
- }
- virtual ~TryCatch() {}
-
- virtual NestedStatement* Exit(int* stack_depth, int* context_length);
- };
-
- // The try block of a try/finally statement.
- class TryFinally : public NestedStatement {
- public:
- TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
- : NestedStatement(codegen), finally_entry_(finally_entry) {
- }
- virtual ~TryFinally() {}
-
- virtual NestedStatement* Exit(int* stack_depth, int* context_length);
-
- private:
- Label* finally_entry_;
- };
-
- // The finally block of a try/finally statement.
- class Finally : public NestedStatement {
- public:
- static const int kElementCount = 5;
-
- explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
- virtual ~Finally() {}
-
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
- *stack_depth += kElementCount;
- return previous_;
- }
- };
-
- // The body of a for/in loop.
- class ForIn : public Iteration {
- public:
- static const int kElementCount = 5;
-
- ForIn(FullCodeGenerator* codegen, ForInStatement* statement)
- : Iteration(codegen, statement) {
- }
- virtual ~ForIn() {}
-
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
- *stack_depth += kElementCount;
- return previous_;
- }
- };
-
-
- // The body of a with or catch.
- class WithOrCatch : public NestedStatement {
- public:
- explicit WithOrCatch(FullCodeGenerator* codegen)
- : NestedStatement(codegen) {
- }
- virtual ~WithOrCatch() {}
-
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
- ++(*context_length);
- return previous_;
- }
- };
-
- // Type of a member function that generates inline code for a native function.
- typedef void (FullCodeGenerator::*InlineFunctionGenerator)(CallRuntime* expr);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // A platform-specific utility to overwrite the accumulator register
- // with a GC-safe value.
- void ClearAccumulator();
-
- // Determine whether or not to inline the smi case for the given
- // operation.
- bool ShouldInlineSmiCase(Token::Value op);
-
- // Helper function to convert a pure value into a test context. The value
- // is expected on the stack or the accumulator, depending on the platform.
- // See the platform-specific implementation for details.
- void DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
- void DoTest(const TestContext* context);
-
- // Helper function to split control flow and avoid a branch to the
- // fall-through label if it is set up.
-#ifdef V8_TARGET_ARCH_MIPS
- void Split(Condition cc,
- Register lhs,
- const Operand& rhs,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
-#else // All non-mips arch.
- void Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
-#endif // V8_TARGET_ARCH_MIPS
-
- // Load the value of a known (PARAMETER, LOCAL, or CONTEXT) variable into
- // a register. Emits a context chain walk if if necessary (so does
- // SetVar) so avoid calling both on the same variable.
- void GetVar(Register destination, Variable* var);
-
- // Assign to a known (PARAMETER, LOCAL, or CONTEXT) variable. If it's in
- // the context, the write barrier will be emitted and source, scratch0,
- // scratch1 will be clobbered. Emits a context chain walk if if necessary
- // (so does GetVar) so avoid calling both on the same variable.
- void SetVar(Variable* var,
- Register source,
- Register scratch0,
- Register scratch1);
-
- // An operand used to read/write a stack-allocated (PARAMETER or LOCAL)
- // variable. Writing does not need the write barrier.
- MemOperand StackOperand(Variable* var);
-
- // An operand used to read/write a known (PARAMETER, LOCAL, or CONTEXT)
- // variable. May emit code to traverse the context chain, loading the
- // found context into the scratch register. Writing to this operand will
- // need the write barrier if location is CONTEXT.
- MemOperand VarOperand(Variable* var, Register scratch);
-
- void VisitForEffect(Expression* expr) {
- EffectContext context(this);
- Visit(expr);
- PrepareForBailout(expr, NO_REGISTERS);
- }
-
- void VisitForAccumulatorValue(Expression* expr) {
- AccumulatorValueContext context(this);
- Visit(expr);
- PrepareForBailout(expr, TOS_REG);
- }
-
- void VisitForStackValue(Expression* expr) {
- StackValueContext context(this);
- Visit(expr);
- PrepareForBailout(expr, NO_REGISTERS);
- }
-
- void VisitForControl(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- TestContext context(this, expr, if_true, if_false, fall_through);
- Visit(expr);
- // For test contexts, we prepare for bailout before branching, not at
- // the end of the entire expression. This happens as part of visiting
- // the expression.
- }
-
- void VisitInDuplicateContext(Expression* expr);
-
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
- void DeclareModules(Handle<FixedArray> descriptions);
- void DeclareGlobals(Handle<FixedArray> pairs);
- int DeclareGlobalsFlags();
-
- // Generate code to allocate all (including nested) modules and contexts.
- // Because of recursive linking and the presence of module alias declarations,
- // this has to be a separate pass _before_ populating or executing any module.
- void AllocateModules(ZoneList<Declaration*>* declarations);
-
- // Try to perform a comparison as a fast inlined literal compare if
- // the operands allow it. Returns true if the compare operations
- // has been matched and all code generated; false otherwise.
- bool TryLiteralCompare(CompareOperation* compare);
-
- // Platform-specific code for comparing the type of a value with
- // a given literal string.
- void EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check);
-
- // Platform-specific code for equality comparison with a nil-like value.
- void EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil);
-
- // Bailout support.
- void PrepareForBailout(Expression* node, State state);
- void PrepareForBailoutForId(BailoutId id, State state);
-
- // Cache cell support. This associates AST ids with global property cells
- // that will be cleared during GC and collected by the type-feedback oracle.
- void RecordTypeFeedbackCell(TypeFeedbackId id,
- Handle<JSGlobalPropertyCell> cell);
-
- // Record a call's return site offset, used to rebuild the frame if the
- // called function was inlined at the site.
- void RecordJSReturnSite(Call* call);
-
- // Prepare for bailout before a test (or compare) and branch. If
- // should_normalize, then the following comparison will not handle the
- // canonical JS true value so we will insert a (dead) test against true at
- // the actual bailout target from the optimized code. If not
- // should_normalize, the true and false labels are ignored.
- void PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false);
-
- // If enabled, emit debug code for checking that the current context is
- // neither a with nor a catch context.
- void EmitDebugCheckDeclarationContext(Variable* variable);
-
- // This is meant to be called at loop back edges, |back_edge_target| is
- // the jump target of the back edge and is used to approximate the amount
- // of code inside the loop.
- void EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target);
- // Record the OSR AST id corresponding to a back edge in the code.
- void RecordBackEdge(BailoutId osr_ast_id);
- // Emit a table of stack check ids and pcs into the code stream. Return
- // the offset of the start of the table.
- unsigned EmitStackCheckTable();
-
- void EmitProfilingCounterDecrement(int delta);
- void EmitProfilingCounterReset();
-
- // Platform-specific return sequence
- void EmitReturnSequence();
-
- // Platform-specific code sequences for calls
- void EmitCallWithStub(Call* expr, CallFunctionFlags flags);
- void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
- void EmitKeyedCallWithIC(Call* expr, Expression* key);
-
- // Platform-specific code for inline runtime calls.
- InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
-
- void EmitInlineRuntimeCall(CallRuntime* expr);
-
-#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
- void Emit##name(CallRuntime* expr);
- INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
- INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
-#undef EMIT_INLINE_RUNTIME_CALL
-
- // Platform-specific code for loading variables.
- void EmitLoadGlobalCheckExtensions(Variable* var,
- TypeofState typeof_state,
- Label* slow);
- MemOperand ContextSlotOperandCheckExtensions(Variable* var, Label* slow);
- void EmitDynamicLookupFastCase(Variable* var,
- TypeofState typeof_state,
- Label* slow,
- Label* done);
- void EmitVariableLoad(VariableProxy* proxy);
-
- void EmitAccessor(Expression* expression);
-
- // Expects the arguments and the function already pushed.
- void EmitResolvePossiblyDirectEval(int arg_count);
-
- // Platform-specific support for allocating a new closure based on
- // the given function info.
- void EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure);
-
- // Platform-specific support for compiling assignments.
-
- // Load a value from a named property.
- // The receiver is left on the stack by the IC.
- void EmitNamedPropertyLoad(Property* expr);
-
- // Load a value from a keyed property.
- // The receiver and the key is left on the stack by the IC.
- void EmitKeyedPropertyLoad(Property* expr);
-
- // Apply the compound assignment operator. Expects the left operand on top
- // of the stack and the right one in the accumulator.
- void EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode);
-
- // Helper functions for generating inlined smi code for certain
- // binary operations.
- void EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left,
- Expression* right);
-
- // Assign to the given expression as if via '='. The right-hand-side value
- // is expected in the accumulator.
- void EmitAssignment(Expression* expr);
-
- // Complete a variable assignment. The right-hand-side value is expected
- // in the accumulator.
- void EmitVariableAssignment(Variable* var,
- Token::Value op);
-
- // Complete a named property assignment. The receiver is expected on top
- // of the stack and the right-hand-side value in the accumulator.
- void EmitNamedPropertyAssignment(Assignment* expr);
-
- // Complete a keyed property assignment. The receiver and key are
- // expected on top of the stack and the right-hand-side value in the
- // accumulator.
- void EmitKeyedPropertyAssignment(Assignment* expr);
-
- void CallIC(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId id = TypeFeedbackId::None());
-
- void SetFunctionPosition(FunctionLiteral* fun);
- void SetReturnPosition(FunctionLiteral* fun);
- void SetStatementPosition(Statement* stmt);
- void SetExpressionPosition(Expression* expr, int pos);
- void SetStatementPosition(int pos);
- void SetSourcePosition(int pos);
-
- // Non-local control flow support.
- void EnterFinallyBlock();
- void ExitFinallyBlock();
-
- // Loop nesting counter.
- int loop_depth() { return loop_depth_; }
- void increment_loop_depth() { loop_depth_++; }
- void decrement_loop_depth() {
- ASSERT(loop_depth_ > 0);
- loop_depth_--;
- }
-
- MacroAssembler* masm() { return masm_; }
-
- class ExpressionContext;
- const ExpressionContext* context() { return context_; }
- void set_new_context(const ExpressionContext* context) { context_ = context; }
-
- Handle<Script> script() { return info_->script(); }
- bool is_eval() { return info_->is_eval(); }
- bool is_native() { return info_->is_native(); }
- bool is_classic_mode() { return language_mode() == CLASSIC_MODE; }
- LanguageMode language_mode() { return function()->language_mode(); }
- bool is_qml_mode() { return function()->qml_mode(); }
- FunctionLiteral* function() { return info_->function(); }
- Scope* scope() { return scope_; }
-
- static Register result_register();
- static Register context_register();
-
- // Set fields in the stack frame. Offsets are the frame pointer relative
- // offsets defined in, e.g., StandardFrameConstants.
- void StoreToFrameField(int frame_offset, Register value);
-
- // Load a value from the current context. Indices are defined as an enum
- // in v8::internal::Context.
- void LoadContextField(Register dst, int context_index);
-
- // Push the function argument for the runtime functions PushWithContext
- // and PushCatchContext.
- void PushFunctionArgumentForContextAllocation();
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- void EmitUnaryOperation(UnaryOperation* expr, const char* comment);
-
- void VisitComma(BinaryOperation* expr);
- void VisitLogicalExpression(BinaryOperation* expr);
- void VisitArithmeticExpression(BinaryOperation* expr);
-
- void VisitForTypeofValue(Expression* expr);
-
- void Generate();
- void PopulateDeoptimizationData(Handle<Code> code);
- void PopulateTypeFeedbackInfo(Handle<Code> code);
- void PopulateTypeFeedbackCells(Handle<Code> code);
-
- Handle<FixedArray> handler_table() { return handler_table_; }
-
- struct BailoutEntry {
- BailoutId id;
- unsigned pc_and_state;
- };
-
- struct TypeFeedbackCellEntry {
- TypeFeedbackId ast_id;
- Handle<JSGlobalPropertyCell> cell;
- };
-
-
- class ExpressionContext BASE_EMBEDDED {
- public:
- explicit ExpressionContext(FullCodeGenerator* codegen)
- : masm_(codegen->masm()), old_(codegen->context()), codegen_(codegen) {
- codegen->set_new_context(this);
- }
-
- virtual ~ExpressionContext() {
- codegen_->set_new_context(old_);
- }
-
- Isolate* isolate() const { return codegen_->isolate(); }
-
- // Convert constant control flow (true or false) to the result expected for
- // this expression context.
- virtual void Plug(bool flag) const = 0;
-
- // Emit code to convert a pure value (in a register, known variable
- // location, as a literal, or on top of the stack) into the result
- // expected according to this expression context.
- virtual void Plug(Register reg) const = 0;
- virtual void Plug(Variable* var) const = 0;
- virtual void Plug(Handle<Object> lit) const = 0;
- virtual void Plug(Heap::RootListIndex index) const = 0;
- virtual void PlugTOS() const = 0;
-
- // Emit code to convert pure control flow to a pair of unbound labels into
- // the result expected according to this expression context. The
- // implementation will bind both labels unless it's a TestContext, which
- // won't bind them at this point.
- virtual void Plug(Label* materialize_true,
- Label* materialize_false) const = 0;
-
- // Emit code to discard count elements from the top of stack, then convert
- // a pure value into the result expected according to this expression
- // context.
- virtual void DropAndPlug(int count, Register reg) const = 0;
-
- // Set up branch labels for a test expression. The three Label** parameters
- // are output parameters.
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const = 0;
-
- // Returns true if we are evaluating only for side effects (i.e. if the
- // result will be discarded).
- virtual bool IsEffect() const { return false; }
-
- // Returns true if we are evaluating for the value (in accu/on stack).
- virtual bool IsAccumulatorValue() const { return false; }
- virtual bool IsStackValue() const { return false; }
-
- // Returns true if we are branching on the value rather than materializing
- // it. Only used for asserts.
- virtual bool IsTest() const { return false; }
-
- protected:
- FullCodeGenerator* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return masm_; }
- MacroAssembler* masm_;
-
- private:
- const ExpressionContext* old_;
- FullCodeGenerator* codegen_;
- };
-
- class AccumulatorValueContext : public ExpressionContext {
- public:
- explicit AccumulatorValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) { }
-
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsAccumulatorValue() const { return true; }
- };
-
- class StackValueContext : public ExpressionContext {
- public:
- explicit StackValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) { }
-
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsStackValue() const { return true; }
- };
-
- class TestContext : public ExpressionContext {
- public:
- TestContext(FullCodeGenerator* codegen,
- Expression* condition,
- Label* true_label,
- Label* false_label,
- Label* fall_through)
- : ExpressionContext(codegen),
- condition_(condition),
- true_label_(true_label),
- false_label_(false_label),
- fall_through_(fall_through) { }
-
- static const TestContext* cast(const ExpressionContext* context) {
- ASSERT(context->IsTest());
- return reinterpret_cast<const TestContext*>(context);
- }
-
- Expression* condition() const { return condition_; }
- Label* true_label() const { return true_label_; }
- Label* false_label() const { return false_label_; }
- Label* fall_through() const { return fall_through_; }
-
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsTest() const { return true; }
-
- private:
- Expression* condition_;
- Label* true_label_;
- Label* false_label_;
- Label* fall_through_;
- };
-
- class EffectContext : public ExpressionContext {
- public:
- explicit EffectContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) { }
-
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsEffect() const { return true; }
- };
-
- MacroAssembler* masm_;
- CompilationInfo* info_;
- Scope* scope_;
- Label return_label_;
- NestedStatement* nesting_stack_;
- int loop_depth_;
- ZoneList<Handle<Object> >* globals_;
- Handle<FixedArray> modules_;
- int module_index_;
- const ExpressionContext* context_;
- ZoneList<BailoutEntry> bailout_entries_;
- GrowableBitVector prepared_bailout_ids_;
- // TODO(svenpanne) Rename this to something like back_edges_ and rename
- // related functions accordingly.
- ZoneList<BailoutEntry> stack_checks_;
- ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
- int ic_total_count_;
- Handle<FixedArray> handler_table_;
- Handle<JSGlobalPropertyCell> profiling_counter_;
- bool generate_debug_code_;
- Zone* zone_;
-
- friend class NestedStatement;
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
- DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
-};
-
-
-// A map from property names to getter/setter pairs allocated in the zone.
-class AccessorTable: public TemplateHashMap<Literal,
- ObjectLiteral::Accessors,
- ZoneAllocationPolicy> {
- public:
- explicit AccessorTable(Zone* zone) :
- TemplateHashMap<Literal, ObjectLiteral::Accessors,
- ZoneAllocationPolicy>(Literal::Match,
- ZoneAllocationPolicy(zone)),
- zone_(zone) { }
-
- Iterator lookup(Literal* literal) {
- Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
- if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
- return it;
- }
-
- private:
- Zone* zone_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FULL_CODEGEN_H_
diff --git a/src/3rdparty/v8/src/func-name-inferrer.cc b/src/3rdparty/v8/src/func-name-inferrer.cc
deleted file mode 100644
index 84d3bf0..0000000
--- a/src/3rdparty/v8/src/func-name-inferrer.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "func-name-inferrer.h"
-#include "list-inl.h"
-
-namespace v8 {
-namespace internal {
-
-FuncNameInferrer::FuncNameInferrer(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- entries_stack_(10, zone),
- names_stack_(5, zone),
- funcs_to_infer_(4, zone),
- zone_(zone) {
-}
-
-
-void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
- // Enclosing name is a name of a constructor function. To check
- // that it is really a constructor, we check that it is not empty
- // and starts with a capital letter.
- if (name->length() > 0 && Runtime::IsUpperCaseChar(
- isolate()->runtime_state(), name->Get(0))) {
- names_stack_.Add(Name(name, kEnclosingConstructorName), zone());
- }
-}
-
-
-void FuncNameInferrer::PushLiteralName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->prototype_string()->Equals(*name)) {
- names_stack_.Add(Name(name, kLiteralName), zone());
- }
-}
-
-
-void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->result_string()->Equals(*name)) {
- names_stack_.Add(Name(name, kVariableName), zone());
- }
-}
-
-
-Handle<String> FuncNameInferrer::MakeNameFromStack() {
- return MakeNameFromStackHelper(0, isolate()->factory()->empty_string());
-}
-
-
-Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
- Handle<String> prev) {
- if (pos >= names_stack_.length()) return prev;
- if (pos < names_stack_.length() - 1 &&
- names_stack_.at(pos).type == kVariableName &&
- names_stack_.at(pos + 1).type == kVariableName) {
- // Skip consecutive variable declarations.
- return MakeNameFromStackHelper(pos + 1, prev);
- } else {
- if (prev->length() > 0) {
- Factory* factory = isolate()->factory();
- Handle<String> curr = factory->NewConsString(
- factory->dot_string(), names_stack_.at(pos).name);
- return MakeNameFromStackHelper(pos + 1,
- factory->NewConsString(prev, curr));
- } else {
- return MakeNameFromStackHelper(pos + 1, names_stack_.at(pos).name);
- }
- }
-}
-
-
-void FuncNameInferrer::InferFunctionsNames() {
- Handle<String> func_name = MakeNameFromStack();
- for (int i = 0; i < funcs_to_infer_.length(); ++i) {
- funcs_to_infer_[i]->set_inferred_name(func_name);
- }
- funcs_to_infer_.Rewind(0);
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/func-name-inferrer.h b/src/3rdparty/v8/src/func-name-inferrer.h
deleted file mode 100644
index f57e778..0000000
--- a/src/3rdparty/v8/src/func-name-inferrer.h
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FUNC_NAME_INFERRER_H_
-#define V8_FUNC_NAME_INFERRER_H_
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-
-// FuncNameInferrer is a stateful class that is used to perform name
-// inference for anonymous functions during static analysis of source code.
-// Inference is performed in cases when an anonymous function is assigned
-// to a variable or a property (see test-func-name-inference.cc for examples.)
-//
-// The basic idea is that during parsing of LHSs of certain expressions
-// (assignments, declarations, object literals) we collect name strings,
-// and during parsing of the RHS, a function literal can be collected. After
-// parsing the RHS we can infer a name for function literals that do not have
-// a name.
-class FuncNameInferrer : public ZoneObject {
- public:
- FuncNameInferrer(Isolate* isolate, Zone* zone);
-
- // Returns whether we have entered name collection state.
- bool IsOpen() const { return !entries_stack_.is_empty(); }
-
- // Pushes an enclosing the name of enclosing function onto names stack.
- void PushEnclosingName(Handle<String> name);
-
- // Enters name collection state.
- void Enter() {
- entries_stack_.Add(names_stack_.length(), zone());
- }
-
- // Pushes an encountered name onto names stack when in collection state.
- void PushLiteralName(Handle<String> name);
-
- void PushVariableName(Handle<String> name);
-
- // Adds a function to infer name for.
- void AddFunction(FunctionLiteral* func_to_infer) {
- if (IsOpen()) {
- funcs_to_infer_.Add(func_to_infer, zone());
- }
- }
-
- void RemoveLastFunction() {
- if (IsOpen() && !funcs_to_infer_.is_empty()) {
- funcs_to_infer_.RemoveLast();
- }
- }
-
- // Infers a function name and leaves names collection state.
- void Infer() {
- ASSERT(IsOpen());
- if (!funcs_to_infer_.is_empty()) {
- InferFunctionsNames();
- }
- }
-
- // Leaves names collection state.
- void Leave() {
- ASSERT(IsOpen());
- names_stack_.Rewind(entries_stack_.RemoveLast());
- if (entries_stack_.is_empty())
- funcs_to_infer_.Clear();
- }
-
- private:
- enum NameType {
- kEnclosingConstructorName,
- kLiteralName,
- kVariableName
- };
- struct Name {
- Name(Handle<String> name, NameType type) : name(name), type(type) { }
- Handle<String> name;
- NameType type;
- };
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
-
- // Constructs a full name in dotted notation from gathered names.
- Handle<String> MakeNameFromStack();
-
- // A helper function for MakeNameFromStack.
- Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev);
-
- // Performs name inferring for added functions.
- void InferFunctionsNames();
-
- Isolate* isolate_;
- ZoneList<int> entries_stack_;
- ZoneList<Name> names_stack_;
- ZoneList<FunctionLiteral*> funcs_to_infer_;
- Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FUNC_NAME_INFERRER_H_
diff --git a/src/3rdparty/v8/src/gdb-jit.cc b/src/3rdparty/v8/src/gdb-jit.cc
deleted file mode 100644
index dde6bbd..0000000
--- a/src/3rdparty/v8/src/gdb-jit.cc
+++ /dev/null
@@ -1,2173 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef ENABLE_GDB_JIT_INTERFACE
-#include "v8.h"
-#include "gdb-jit.h"
-
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "frames.h"
-#include "frames-inl.h"
-#include "global-handles.h"
-#include "messages.h"
-#include "natives.h"
-#include "platform.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef __APPLE__
-#define __MACH_O
-class MachO;
-class MachOSection;
-typedef MachO DebugObject;
-typedef MachOSection DebugSection;
-#else
-#define __ELF
-class ELF;
-class ELFSection;
-typedef ELF DebugObject;
-typedef ELFSection DebugSection;
-#endif
-
-class Writer BASE_EMBEDDED {
- public:
- explicit Writer(DebugObject* debug_object)
- : debug_object_(debug_object),
- position_(0),
- capacity_(1024),
- buffer_(reinterpret_cast<byte*>(malloc(capacity_))) {
- }
-
- ~Writer() {
- free(buffer_);
- }
-
- uintptr_t position() const {
- return position_;
- }
-
- template<typename T>
- class Slot {
- public:
- Slot(Writer* w, uintptr_t offset) : w_(w), offset_(offset) { }
-
- T* operator-> () {
- return w_->RawSlotAt<T>(offset_);
- }
-
- void set(const T& value) {
- *w_->RawSlotAt<T>(offset_) = value;
- }
-
- Slot<T> at(int i) {
- return Slot<T>(w_, offset_ + sizeof(T) * i);
- }
-
- private:
- Writer* w_;
- uintptr_t offset_;
- };
-
- template<typename T>
- void Write(const T& val) {
- Ensure(position_ + sizeof(T));
- *RawSlotAt<T>(position_) = val;
- position_ += sizeof(T);
- }
-
- template<typename T>
- Slot<T> SlotAt(uintptr_t offset) {
- Ensure(offset + sizeof(T));
- return Slot<T>(this, offset);
- }
-
- template<typename T>
- Slot<T> CreateSlotHere() {
- return CreateSlotsHere<T>(1);
- }
-
- template<typename T>
- Slot<T> CreateSlotsHere(uint32_t count) {
- uintptr_t slot_position = position_;
- position_ += sizeof(T) * count;
- Ensure(position_);
- return SlotAt<T>(slot_position);
- }
-
- void Ensure(uintptr_t pos) {
- if (capacity_ < pos) {
- while (capacity_ < pos) capacity_ *= 2;
- buffer_ = reinterpret_cast<byte*>(realloc(buffer_, capacity_));
- }
- }
-
- DebugObject* debug_object() { return debug_object_; }
-
- byte* buffer() { return buffer_; }
-
- void Align(uintptr_t align) {
- uintptr_t delta = position_ % align;
- if (delta == 0) return;
- uintptr_t padding = align - delta;
- Ensure(position_ += padding);
- ASSERT((position_ % align) == 0);
- }
-
- void WriteULEB128(uintptr_t value) {
- do {
- uint8_t byte = value & 0x7F;
- value >>= 7;
- if (value != 0) byte |= 0x80;
- Write<uint8_t>(byte);
- } while (value != 0);
- }
-
- void WriteSLEB128(intptr_t value) {
- bool more = true;
- while (more) {
- int8_t byte = value & 0x7F;
- bool byte_sign = byte & 0x40;
- value >>= 7;
-
- if ((value == 0 && !byte_sign) || (value == -1 && byte_sign)) {
- more = false;
- } else {
- byte |= 0x80;
- }
-
- Write<int8_t>(byte);
- }
- }
-
- void WriteString(const char* str) {
- do {
- Write<char>(*str);
- } while (*str++);
- }
-
- private:
- template<typename T> friend class Slot;
-
- template<typename T>
- T* RawSlotAt(uintptr_t offset) {
- ASSERT(offset < capacity_ && offset + sizeof(T) <= capacity_);
- return reinterpret_cast<T*>(&buffer_[offset]);
- }
-
- DebugObject* debug_object_;
- uintptr_t position_;
- uintptr_t capacity_;
- byte* buffer_;
-};
-
-class StringTable;
-
-template<typename THeader>
-class DebugSectionBase : public ZoneObject {
- public:
- virtual ~DebugSectionBase() { }
-
- virtual void WriteBody(Writer::Slot<THeader> header, Writer* writer) {
- uintptr_t start = writer->position();
- if (WriteBodyInternal(writer)) {
- uintptr_t end = writer->position();
- header->offset = start;
-#if defined(__MACH_O)
- header->addr = 0;
-#endif
- header->size = end - start;
- }
- }
-
- virtual bool WriteBodyInternal(Writer* writer) {
- return false;
- }
-
- typedef THeader Header;
-};
-
-
-struct MachOSectionHeader {
- char sectname[16];
- char segname[16];
-#if defined(V8_TARGET_ARCH_IA32)
- uint32_t addr;
- uint32_t size;
-#else
- uint64_t addr;
- uint64_t size;
-#endif
- uint32_t offset;
- uint32_t align;
- uint32_t reloff;
- uint32_t nreloc;
- uint32_t flags;
- uint32_t reserved1;
- uint32_t reserved2;
-};
-
-
-class MachOSection : public DebugSectionBase<MachOSectionHeader> {
- public:
- enum Type {
- S_REGULAR = 0x0u,
- S_ATTR_COALESCED = 0xbu,
- S_ATTR_SOME_INSTRUCTIONS = 0x400u,
- S_ATTR_DEBUG = 0x02000000u,
- S_ATTR_PURE_INSTRUCTIONS = 0x80000000u
- };
-
- MachOSection(const char* name,
- const char* segment,
- uintptr_t align,
- uint32_t flags)
- : name_(name),
- segment_(segment),
- align_(align),
- flags_(flags) {
- ASSERT(IsPowerOf2(align));
- if (align_ != 0) {
- align_ = WhichPowerOf2(align_);
- }
- }
-
- virtual ~MachOSection() { }
-
- virtual void PopulateHeader(Writer::Slot<Header> header) {
- header->addr = 0;
- header->size = 0;
- header->offset = 0;
- header->align = align_;
- header->reloff = 0;
- header->nreloc = 0;
- header->flags = flags_;
- header->reserved1 = 0;
- header->reserved2 = 0;
- memset(header->sectname, 0, sizeof(header->sectname));
- memset(header->segname, 0, sizeof(header->segname));
- ASSERT(strlen(name_) < sizeof(header->sectname));
- ASSERT(strlen(segment_) < sizeof(header->segname));
- strncpy(header->sectname, name_, sizeof(header->sectname));
- strncpy(header->segname, segment_, sizeof(header->segname));
- }
-
- private:
- const char* name_;
- const char* segment_;
- uintptr_t align_;
- uint32_t flags_;
-};
-
-
-struct ELFSectionHeader {
- uint32_t name;
- uint32_t type;
- uintptr_t flags;
- uintptr_t address;
- uintptr_t offset;
- uintptr_t size;
- uint32_t link;
- uint32_t info;
- uintptr_t alignment;
- uintptr_t entry_size;
-};
-
-
-#if defined(__ELF)
-class ELFSection : public DebugSectionBase<ELFSectionHeader> {
- public:
- enum Type {
- TYPE_NULL = 0,
- TYPE_PROGBITS = 1,
- TYPE_SYMTAB = 2,
- TYPE_STRTAB = 3,
- TYPE_RELA = 4,
- TYPE_HASH = 5,
- TYPE_DYNAMIC = 6,
- TYPE_NOTE = 7,
- TYPE_NOBITS = 8,
- TYPE_REL = 9,
- TYPE_SHLIB = 10,
- TYPE_DYNSYM = 11,
- TYPE_LOPROC = 0x70000000,
- TYPE_X86_64_UNWIND = 0x70000001,
- TYPE_HIPROC = 0x7fffffff,
- TYPE_LOUSER = 0x80000000,
- TYPE_HIUSER = 0xffffffff
- };
-
- enum Flags {
- FLAG_WRITE = 1,
- FLAG_ALLOC = 2,
- FLAG_EXEC = 4
- };
-
- enum SpecialIndexes {
- INDEX_ABSOLUTE = 0xfff1
- };
-
- ELFSection(const char* name, Type type, uintptr_t align)
- : name_(name), type_(type), align_(align) { }
-
- virtual ~ELFSection() { }
-
- void PopulateHeader(Writer::Slot<Header> header, StringTable* strtab);
-
- virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
- uintptr_t start = w->position();
- if (WriteBodyInternal(w)) {
- uintptr_t end = w->position();
- header->offset = start;
- header->size = end - start;
- }
- }
-
- virtual bool WriteBodyInternal(Writer* w) {
- return false;
- }
-
- uint16_t index() const { return index_; }
- void set_index(uint16_t index) { index_ = index; }
-
- protected:
- virtual void PopulateHeader(Writer::Slot<Header> header) {
- header->flags = 0;
- header->address = 0;
- header->offset = 0;
- header->size = 0;
- header->link = 0;
- header->info = 0;
- header->entry_size = 0;
- }
-
- private:
- const char* name_;
- Type type_;
- uintptr_t align_;
- uint16_t index_;
-};
-#endif // defined(__ELF)
-
-
-#if defined(__MACH_O)
-class MachOTextSection : public MachOSection {
- public:
- MachOTextSection(uintptr_t align,
- uintptr_t addr,
- uintptr_t size)
- : MachOSection("__text",
- "__TEXT",
- align,
- MachOSection::S_REGULAR |
- MachOSection::S_ATTR_SOME_INSTRUCTIONS |
- MachOSection::S_ATTR_PURE_INSTRUCTIONS),
- addr_(addr),
- size_(size) { }
-
- protected:
- virtual void PopulateHeader(Writer::Slot<Header> header) {
- MachOSection::PopulateHeader(header);
- header->addr = addr_;
- header->size = size_;
- }
-
- private:
- uintptr_t addr_;
- uintptr_t size_;
-};
-#endif // defined(__MACH_O)
-
-
-#if defined(__ELF)
-class FullHeaderELFSection : public ELFSection {
- public:
- FullHeaderELFSection(const char* name,
- Type type,
- uintptr_t align,
- uintptr_t addr,
- uintptr_t offset,
- uintptr_t size,
- uintptr_t flags)
- : ELFSection(name, type, align),
- addr_(addr),
- offset_(offset),
- size_(size),
- flags_(flags) { }
-
- protected:
- virtual void PopulateHeader(Writer::Slot<Header> header) {
- ELFSection::PopulateHeader(header);
- header->address = addr_;
- header->offset = offset_;
- header->size = size_;
- header->flags = flags_;
- }
-
- private:
- uintptr_t addr_;
- uintptr_t offset_;
- uintptr_t size_;
- uintptr_t flags_;
-};
-
-
-class StringTable : public ELFSection {
- public:
- explicit StringTable(const char* name)
- : ELFSection(name, TYPE_STRTAB, 1), writer_(NULL), offset_(0), size_(0) {
- }
-
- uintptr_t Add(const char* str) {
- if (*str == '\0') return 0;
-
- uintptr_t offset = size_;
- WriteString(str);
- return offset;
- }
-
- void AttachWriter(Writer* w) {
- writer_ = w;
- offset_ = writer_->position();
-
- // First entry in the string table should be an empty string.
- WriteString("");
- }
-
- void DetachWriter() {
- writer_ = NULL;
- }
-
- virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
- ASSERT(writer_ == NULL);
- header->offset = offset_;
- header->size = size_;
- }
-
- private:
- void WriteString(const char* str) {
- uintptr_t written = 0;
- do {
- writer_->Write(*str);
- written++;
- } while (*str++);
- size_ += written;
- }
-
- Writer* writer_;
-
- uintptr_t offset_;
- uintptr_t size_;
-};
-
-
-void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
- StringTable* strtab) {
- header->name = strtab->Add(name_);
- header->type = type_;
- header->alignment = align_;
- PopulateHeader(header);
-}
-#endif // defined(__ELF)
-
-
-#if defined(__MACH_O)
-class MachO BASE_EMBEDDED {
- public:
- MachO() : sections_(6) { }
-
- uint32_t AddSection(MachOSection* section) {
- sections_.Add(section);
- return sections_.length() - 1;
- }
-
- void Write(Writer* w, uintptr_t code_start, uintptr_t code_size) {
- Writer::Slot<MachOHeader> header = WriteHeader(w);
- uintptr_t load_command_start = w->position();
- Writer::Slot<MachOSegmentCommand> cmd = WriteSegmentCommand(w,
- code_start,
- code_size);
- WriteSections(w, cmd, header, load_command_start);
- }
-
- private:
- struct MachOHeader {
- uint32_t magic;
- uint32_t cputype;
- uint32_t cpusubtype;
- uint32_t filetype;
- uint32_t ncmds;
- uint32_t sizeofcmds;
- uint32_t flags;
-#if defined(V8_TARGET_ARCH_X64)
- uint32_t reserved;
-#endif
- };
-
- struct MachOSegmentCommand {
- uint32_t cmd;
- uint32_t cmdsize;
- char segname[16];
-#if defined(V8_TARGET_ARCH_IA32)
- uint32_t vmaddr;
- uint32_t vmsize;
- uint32_t fileoff;
- uint32_t filesize;
-#else
- uint64_t vmaddr;
- uint64_t vmsize;
- uint64_t fileoff;
- uint64_t filesize;
-#endif
- uint32_t maxprot;
- uint32_t initprot;
- uint32_t nsects;
- uint32_t flags;
- };
-
- enum MachOLoadCommandCmd {
- LC_SEGMENT_32 = 0x00000001u,
- LC_SEGMENT_64 = 0x00000019u
- };
-
-
- Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
- ASSERT(w->position() == 0);
- Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
-#if defined(V8_TARGET_ARCH_IA32)
- header->magic = 0xFEEDFACEu;
- header->cputype = 7; // i386
- header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
-#elif defined(V8_TARGET_ARCH_X64)
- header->magic = 0xFEEDFACFu;
- header->cputype = 7 | 0x01000000; // i386 | 64-bit ABI
- header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
- header->reserved = 0;
-#else
-#error Unsupported target architecture.
-#endif
- header->filetype = 0x1; // MH_OBJECT
- header->ncmds = 1;
- header->sizeofcmds = 0;
- header->flags = 0;
- return header;
- }
-
-
- Writer::Slot<MachOSegmentCommand> WriteSegmentCommand(Writer* w,
- uintptr_t code_start,
- uintptr_t code_size) {
- Writer::Slot<MachOSegmentCommand> cmd =
- w->CreateSlotHere<MachOSegmentCommand>();
-#if defined(V8_TARGET_ARCH_IA32)
- cmd->cmd = LC_SEGMENT_32;
-#else
- cmd->cmd = LC_SEGMENT_64;
-#endif
- cmd->vmaddr = code_start;
- cmd->vmsize = code_size;
- cmd->fileoff = 0;
- cmd->filesize = 0;
- cmd->maxprot = 7;
- cmd->initprot = 7;
- cmd->flags = 0;
- cmd->nsects = sections_.length();
- memset(cmd->segname, 0, 16);
- cmd->cmdsize = sizeof(MachOSegmentCommand) + sizeof(MachOSection::Header) *
- cmd->nsects;
- return cmd;
- }
-
-
- void WriteSections(Writer* w,
- Writer::Slot<MachOSegmentCommand> cmd,
- Writer::Slot<MachOHeader> header,
- uintptr_t load_command_start) {
- Writer::Slot<MachOSection::Header> headers =
- w->CreateSlotsHere<MachOSection::Header>(sections_.length());
- cmd->fileoff = w->position();
- header->sizeofcmds = w->position() - load_command_start;
- for (int section = 0; section < sections_.length(); ++section) {
- sections_[section]->PopulateHeader(headers.at(section));
- sections_[section]->WriteBody(headers.at(section), w);
- }
- cmd->filesize = w->position() - (uintptr_t)cmd->fileoff;
- }
-
-
- ZoneList<MachOSection*> sections_;
-};
-#endif // defined(__MACH_O)
-
-
-#if defined(__ELF)
-class ELF BASE_EMBEDDED {
- public:
- ELF(Zone* zone) : sections_(6, zone) {
- sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone);
- sections_.Add(new(zone) StringTable(".shstrtab"), zone);
- }
-
- void Write(Writer* w) {
- WriteHeader(w);
- WriteSectionTable(w);
- WriteSections(w);
- }
-
- ELFSection* SectionAt(uint32_t index) {
- return sections_[index];
- }
-
- uint32_t AddSection(ELFSection* section, Zone* zone) {
- sections_.Add(section, zone);
- section->set_index(sections_.length() - 1);
- return sections_.length() - 1;
- }
-
- private:
- struct ELFHeader {
- uint8_t ident[16];
- uint16_t type;
- uint16_t machine;
- uint32_t version;
- uintptr_t entry;
- uintptr_t pht_offset;
- uintptr_t sht_offset;
- uint32_t flags;
- uint16_t header_size;
- uint16_t pht_entry_size;
- uint16_t pht_entry_num;
- uint16_t sht_entry_size;
- uint16_t sht_entry_num;
- uint16_t sht_strtab_index;
- };
-
-
- void WriteHeader(Writer* w) {
- ASSERT(w->position() == 0);
- Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
- const uint8_t ident[16] =
- { 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-#elif defined(V8_TARGET_ARCH_X64)
- const uint8_t ident[16] =
- { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-#else
-#error Unsupported target architecture.
-#endif
- memcpy(header->ident, ident, 16);
- header->type = 1;
-#if defined(V8_TARGET_ARCH_IA32)
- header->machine = 3;
-#elif defined(V8_TARGET_ARCH_X64)
- // Processor identification value for x64 is 62 as defined in
- // System V ABI, AMD64 Supplement
- // http://www.x86-64.org/documentation/abi.pdf
- header->machine = 62;
-#elif defined(V8_TARGET_ARCH_ARM)
- // Set to EM_ARM, defined as 40, in "ARM ELF File Format" at
- // infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf
- header->machine = 40;
-#else
-#error Unsupported target architecture.
-#endif
- header->version = 1;
- header->entry = 0;
- header->pht_offset = 0;
- header->sht_offset = sizeof(ELFHeader); // Section table follows header.
- header->flags = 0;
- header->header_size = sizeof(ELFHeader);
- header->pht_entry_size = 0;
- header->pht_entry_num = 0;
- header->sht_entry_size = sizeof(ELFSection::Header);
- header->sht_entry_num = sections_.length();
- header->sht_strtab_index = 1;
- }
-
- void WriteSectionTable(Writer* w) {
- // Section headers table immediately follows file header.
- ASSERT(w->position() == sizeof(ELFHeader));
-
- Writer::Slot<ELFSection::Header> headers =
- w->CreateSlotsHere<ELFSection::Header>(sections_.length());
-
- // String table for section table is the first section.
- StringTable* strtab = static_cast<StringTable*>(SectionAt(1));
- strtab->AttachWriter(w);
- for (int i = 0, length = sections_.length();
- i < length;
- i++) {
- sections_[i]->PopulateHeader(headers.at(i), strtab);
- }
- strtab->DetachWriter();
- }
-
- int SectionHeaderPosition(uint32_t section_index) {
- return sizeof(ELFHeader) + sizeof(ELFSection::Header) * section_index;
- }
-
- void WriteSections(Writer* w) {
- Writer::Slot<ELFSection::Header> headers =
- w->SlotAt<ELFSection::Header>(sizeof(ELFHeader));
-
- for (int i = 0, length = sections_.length();
- i < length;
- i++) {
- sections_[i]->WriteBody(headers.at(i), w);
- }
- }
-
- ZoneList<ELFSection*> sections_;
-};
-
-
-class ELFSymbol BASE_EMBEDDED {
- public:
- enum Type {
- TYPE_NOTYPE = 0,
- TYPE_OBJECT = 1,
- TYPE_FUNC = 2,
- TYPE_SECTION = 3,
- TYPE_FILE = 4,
- TYPE_LOPROC = 13,
- TYPE_HIPROC = 15
- };
-
- enum Binding {
- BIND_LOCAL = 0,
- BIND_GLOBAL = 1,
- BIND_WEAK = 2,
- BIND_LOPROC = 13,
- BIND_HIPROC = 15
- };
-
- ELFSymbol(const char* name,
- uintptr_t value,
- uintptr_t size,
- Binding binding,
- Type type,
- uint16_t section)
- : name(name),
- value(value),
- size(size),
- info((binding << 4) | type),
- other(0),
- section(section) {
- }
-
- Binding binding() const {
- return static_cast<Binding>(info >> 4);
- }
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
- struct SerializedLayout {
- SerializedLayout(uint32_t name,
- uintptr_t value,
- uintptr_t size,
- Binding binding,
- Type type,
- uint16_t section)
- : name(name),
- value(value),
- size(size),
- info((binding << 4) | type),
- other(0),
- section(section) {
- }
-
- uint32_t name;
- uintptr_t value;
- uintptr_t size;
- uint8_t info;
- uint8_t other;
- uint16_t section;
- };
-#elif defined(V8_TARGET_ARCH_X64)
- struct SerializedLayout {
- SerializedLayout(uint32_t name,
- uintptr_t value,
- uintptr_t size,
- Binding binding,
- Type type,
- uint16_t section)
- : name(name),
- info((binding << 4) | type),
- other(0),
- section(section),
- value(value),
- size(size) {
- }
-
- uint32_t name;
- uint8_t info;
- uint8_t other;
- uint16_t section;
- uintptr_t value;
- uintptr_t size;
- };
-#endif
-
- void Write(Writer::Slot<SerializedLayout> s, StringTable* t) {
- // Convert symbol names from strings to indexes in the string table.
- s->name = t->Add(name);
- s->value = value;
- s->size = size;
- s->info = info;
- s->other = other;
- s->section = section;
- }
-
- private:
- const char* name;
- uintptr_t value;
- uintptr_t size;
- uint8_t info;
- uint8_t other;
- uint16_t section;
-};
-
-
-class ELFSymbolTable : public ELFSection {
- public:
- ELFSymbolTable(const char* name, Zone* zone)
- : ELFSection(name, TYPE_SYMTAB, sizeof(uintptr_t)),
- locals_(1, zone),
- globals_(1, zone) {
- }
-
- virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
- w->Align(header->alignment);
- int total_symbols = locals_.length() + globals_.length() + 1;
- header->offset = w->position();
-
- Writer::Slot<ELFSymbol::SerializedLayout> symbols =
- w->CreateSlotsHere<ELFSymbol::SerializedLayout>(total_symbols);
-
- header->size = w->position() - header->offset;
-
- // String table for this symbol table should follow it in the section table.
- StringTable* strtab =
- static_cast<StringTable*>(w->debug_object()->SectionAt(index() + 1));
- strtab->AttachWriter(w);
- symbols.at(0).set(ELFSymbol::SerializedLayout(0,
- 0,
- 0,
- ELFSymbol::BIND_LOCAL,
- ELFSymbol::TYPE_NOTYPE,
- 0));
- WriteSymbolsList(&locals_, symbols.at(1), strtab);
- WriteSymbolsList(&globals_, symbols.at(locals_.length() + 1), strtab);
- strtab->DetachWriter();
- }
-
- void Add(const ELFSymbol& symbol, Zone* zone) {
- if (symbol.binding() == ELFSymbol::BIND_LOCAL) {
- locals_.Add(symbol, zone);
- } else {
- globals_.Add(symbol, zone);
- }
- }
-
- protected:
- virtual void PopulateHeader(Writer::Slot<Header> header) {
- ELFSection::PopulateHeader(header);
- // We are assuming that string table will follow symbol table.
- header->link = index() + 1;
- header->info = locals_.length() + 1;
- header->entry_size = sizeof(ELFSymbol::SerializedLayout);
- }
-
- private:
- void WriteSymbolsList(const ZoneList<ELFSymbol>* src,
- Writer::Slot<ELFSymbol::SerializedLayout> dst,
- StringTable* strtab) {
- for (int i = 0, len = src->length();
- i < len;
- i++) {
- src->at(i).Write(dst.at(i), strtab);
- }
- }
-
- ZoneList<ELFSymbol> locals_;
- ZoneList<ELFSymbol> globals_;
-};
-#endif // defined(__ELF)
-
-
-class CodeDescription BASE_EMBEDDED {
- public:
-#ifdef V8_TARGET_ARCH_X64
- enum StackState {
- POST_RBP_PUSH,
- POST_RBP_SET,
- POST_RBP_POP,
- STACK_STATE_MAX
- };
-#endif
-
- CodeDescription(const char* name,
- Code* code,
- Handle<Script> script,
- GDBJITLineInfo* lineinfo,
- GDBJITInterface::CodeTag tag,
- CompilationInfo* info)
- : name_(name),
- code_(code),
- script_(script),
- lineinfo_(lineinfo),
- tag_(tag),
- info_(info) {
- }
-
- const char* name() const {
- return name_;
- }
-
- GDBJITLineInfo* lineinfo() const {
- return lineinfo_;
- }
-
- GDBJITInterface::CodeTag tag() const {
- return tag_;
- }
-
- CompilationInfo* info() const {
- return info_;
- }
-
- bool IsInfoAvailable() const {
- return info_ != NULL;
- }
-
- uintptr_t CodeStart() const {
- return reinterpret_cast<uintptr_t>(code_->instruction_start());
- }
-
- uintptr_t CodeEnd() const {
- return reinterpret_cast<uintptr_t>(code_->instruction_end());
- }
-
- uintptr_t CodeSize() const {
- return CodeEnd() - CodeStart();
- }
-
- bool IsLineInfoAvailable() {
- return !script_.is_null() &&
- script_->source()->IsString() &&
- script_->HasValidSource() &&
- script_->name()->IsString() &&
- lineinfo_ != NULL;
- }
-
-#ifdef V8_TARGET_ARCH_X64
- uintptr_t GetStackStateStartAddress(StackState state) const {
- ASSERT(state < STACK_STATE_MAX);
- return stack_state_start_addresses_[state];
- }
-
- void SetStackStateStartAddress(StackState state, uintptr_t addr) {
- ASSERT(state < STACK_STATE_MAX);
- stack_state_start_addresses_[state] = addr;
- }
-#endif
-
- SmartArrayPointer<char> GetFilename() {
- return String::cast(script_->name())->ToCString();
- }
-
- int GetScriptLineNumber(int pos) {
- return GetScriptLineNumberSafe(script_, pos) + 1;
- }
-
-
- private:
- const char* name_;
- Code* code_;
- Handle<Script> script_;
- GDBJITLineInfo* lineinfo_;
- GDBJITInterface::CodeTag tag_;
- CompilationInfo* info_;
-#ifdef V8_TARGET_ARCH_X64
- uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
-#endif
-};
-
-#if defined(__ELF)
-static void CreateSymbolsTable(CodeDescription* desc,
- ELF* elf,
- int text_section_index) {
- Zone* zone = desc->info()->zone();
- ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone);
- StringTable* strtab = new(zone) StringTable(".strtab");
-
- // Symbol table should be followed by the linked string table.
- elf->AddSection(symtab, zone);
- elf->AddSection(strtab, zone);
-
- symtab->Add(ELFSymbol("V8 Code",
- 0,
- 0,
- ELFSymbol::BIND_LOCAL,
- ELFSymbol::TYPE_FILE,
- ELFSection::INDEX_ABSOLUTE),
- zone);
-
- symtab->Add(ELFSymbol(desc->name(),
- 0,
- desc->CodeSize(),
- ELFSymbol::BIND_GLOBAL,
- ELFSymbol::TYPE_FUNC,
- text_section_index),
- zone);
-}
-#endif // defined(__ELF)
-
-
-class DebugInfoSection : public DebugSection {
- public:
- explicit DebugInfoSection(CodeDescription* desc)
-#if defined(__ELF)
- : ELFSection(".debug_info", TYPE_PROGBITS, 1),
-#else
- : MachOSection("__debug_info",
- "__DWARF",
- 1,
- MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
-#endif
- desc_(desc) { }
-
- // DWARF2 standard
- enum DWARF2LocationOp {
- DW_OP_reg0 = 0x50,
- DW_OP_reg1 = 0x51,
- DW_OP_reg2 = 0x52,
- DW_OP_reg3 = 0x53,
- DW_OP_reg4 = 0x54,
- DW_OP_reg5 = 0x55,
- DW_OP_reg6 = 0x56,
- DW_OP_reg7 = 0x57,
- DW_OP_fbreg = 0x91 // 1 param: SLEB128 offset
- };
-
- enum DWARF2Encoding {
- DW_ATE_ADDRESS = 0x1,
- DW_ATE_SIGNED = 0x5
- };
-
- bool WriteBodyInternal(Writer* w) {
- uintptr_t cu_start = w->position();
- Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>();
- uintptr_t start = w->position();
- w->Write<uint16_t>(2); // DWARF version.
- w->Write<uint32_t>(0); // Abbreviation table offset.
- w->Write<uint8_t>(sizeof(intptr_t));
-
- w->WriteULEB128(1); // Abbreviation code.
- w->WriteString(*desc_->GetFilename());
- w->Write<intptr_t>(desc_->CodeStart());
- w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
- w->Write<uint32_t>(0);
-
- uint32_t ty_offset = static_cast<uint32_t>(w->position() - cu_start);
- w->WriteULEB128(3);
- w->Write<uint8_t>(kPointerSize);
- w->WriteString("v8value");
-
- if (desc_->IsInfoAvailable()) {
- Scope* scope = desc_->info()->scope();
- w->WriteULEB128(2);
- w->WriteString(desc_->name());
- w->Write<intptr_t>(desc_->CodeStart());
- w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
- Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
- uintptr_t fb_block_start = w->position();
-#if defined(V8_TARGET_ARCH_IA32)
- w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
-#elif defined(V8_TARGET_ARCH_X64)
- w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
-#elif defined(V8_TARGET_ARCH_ARM)
- UNIMPLEMENTED();
-#elif defined(V8_TARGET_ARCH_MIPS)
- UNIMPLEMENTED();
-#else
-#error Unsupported target architecture.
-#endif
- fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start));
-
- int params = scope->num_parameters();
- int slots = scope->num_stack_slots();
- int context_slots = scope->ContextLocalCount();
- // The real slot ID is internal_slots + context_slot_id.
- int internal_slots = Context::MIN_CONTEXT_SLOTS;
- int locals = scope->StackLocalCount();
- int current_abbreviation = 4;
-
- for (int param = 0; param < params; ++param) {
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(
- *scope->parameter(param)->name()->ToCString(DISALLOW_NULLS));
- w->Write<uint32_t>(ty_offset);
- Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
- uintptr_t block_start = w->position();
- w->Write<uint8_t>(DW_OP_fbreg);
- w->WriteSLEB128(
- JavaScriptFrameConstants::kLastParameterOffset +
- kPointerSize * (params - param - 1));
- block_size.set(static_cast<uint32_t>(w->position() - block_start));
- }
-
- EmbeddedVector<char, 256> buffer;
- StringBuilder builder(buffer.start(), buffer.length());
-
- for (int slot = 0; slot < slots; ++slot) {
- w->WriteULEB128(current_abbreviation++);
- builder.Reset();
- builder.AddFormatted("slot%d", slot);
- w->WriteString(builder.Finalize());
- }
-
- // See contexts.h for more information.
- ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
- ASSERT(Context::CLOSURE_INDEX == 0);
- ASSERT(Context::PREVIOUS_INDEX == 1);
- ASSERT(Context::EXTENSION_INDEX == 2);
- ASSERT(Context::GLOBAL_OBJECT_INDEX == 3);
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(".closure");
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(".previous");
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(".extension");
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(".global");
-
- for (int context_slot = 0;
- context_slot < context_slots;
- ++context_slot) {
- w->WriteULEB128(current_abbreviation++);
- builder.Reset();
- builder.AddFormatted("context_slot%d", context_slot + internal_slots);
- w->WriteString(builder.Finalize());
- }
-
- ZoneList<Variable*> stack_locals(locals, scope->zone());
- ZoneList<Variable*> context_locals(context_slots, scope->zone());
- scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
- for (int local = 0; local < locals; ++local) {
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(
- *stack_locals[local]->name()->ToCString(DISALLOW_NULLS));
- w->Write<uint32_t>(ty_offset);
- Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
- uintptr_t block_start = w->position();
- w->Write<uint8_t>(DW_OP_fbreg);
- w->WriteSLEB128(
- JavaScriptFrameConstants::kLocal0Offset -
- kPointerSize * local);
- block_size.set(static_cast<uint32_t>(w->position() - block_start));
- }
-
- {
- w->WriteULEB128(current_abbreviation++);
- w->WriteString("__function");
- w->Write<uint32_t>(ty_offset);
- Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
- uintptr_t block_start = w->position();
- w->Write<uint8_t>(DW_OP_fbreg);
- w->WriteSLEB128(JavaScriptFrameConstants::kFunctionOffset);
- block_size.set(static_cast<uint32_t>(w->position() - block_start));
- }
-
- {
- w->WriteULEB128(current_abbreviation++);
- w->WriteString("__context");
- w->Write<uint32_t>(ty_offset);
- Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
- uintptr_t block_start = w->position();
- w->Write<uint8_t>(DW_OP_fbreg);
- w->WriteSLEB128(StandardFrameConstants::kContextOffset);
- block_size.set(static_cast<uint32_t>(w->position() - block_start));
- }
- }
-
- size.set(static_cast<uint32_t>(w->position() - start));
- return true;
- }
-
- private:
- CodeDescription* desc_;
-};
-
-
-class DebugAbbrevSection : public DebugSection {
- public:
- explicit DebugAbbrevSection(CodeDescription* desc)
-#ifdef __ELF
- : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1),
-#else
- : MachOSection("__debug_abbrev",
- "__DWARF",
- 1,
- MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
-#endif
- desc_(desc) { }
-
- // DWARF2 standard, figure 14.
- enum DWARF2Tags {
- DW_TAG_FORMAL_PARAMETER = 0x05,
- DW_TAG_POINTER_TYPE = 0xf,
- DW_TAG_COMPILE_UNIT = 0x11,
- DW_TAG_STRUCTURE_TYPE = 0x13,
- DW_TAG_BASE_TYPE = 0x24,
- DW_TAG_SUBPROGRAM = 0x2e,
- DW_TAG_VARIABLE = 0x34
- };
-
- // DWARF2 standard, figure 16.
- enum DWARF2ChildrenDetermination {
- DW_CHILDREN_NO = 0,
- DW_CHILDREN_YES = 1
- };
-
- // DWARF standard, figure 17.
- enum DWARF2Attribute {
- DW_AT_LOCATION = 0x2,
- DW_AT_NAME = 0x3,
- DW_AT_BYTE_SIZE = 0xb,
- DW_AT_STMT_LIST = 0x10,
- DW_AT_LOW_PC = 0x11,
- DW_AT_HIGH_PC = 0x12,
- DW_AT_ENCODING = 0x3e,
- DW_AT_FRAME_BASE = 0x40,
- DW_AT_TYPE = 0x49
- };
-
- // DWARF2 standard, figure 19.
- enum DWARF2AttributeForm {
- DW_FORM_ADDR = 0x1,
- DW_FORM_BLOCK4 = 0x4,
- DW_FORM_STRING = 0x8,
- DW_FORM_DATA4 = 0x6,
- DW_FORM_BLOCK = 0x9,
- DW_FORM_DATA1 = 0xb,
- DW_FORM_FLAG = 0xc,
- DW_FORM_REF4 = 0x13
- };
-
- void WriteVariableAbbreviation(Writer* w,
- int abbreviation_code,
- bool has_value,
- bool is_parameter) {
- w->WriteULEB128(abbreviation_code);
- w->WriteULEB128(is_parameter ? DW_TAG_FORMAL_PARAMETER : DW_TAG_VARIABLE);
- w->Write<uint8_t>(DW_CHILDREN_NO);
- w->WriteULEB128(DW_AT_NAME);
- w->WriteULEB128(DW_FORM_STRING);
- if (has_value) {
- w->WriteULEB128(DW_AT_TYPE);
- w->WriteULEB128(DW_FORM_REF4);
- w->WriteULEB128(DW_AT_LOCATION);
- w->WriteULEB128(DW_FORM_BLOCK4);
- }
- w->WriteULEB128(0);
- w->WriteULEB128(0);
- }
-
- bool WriteBodyInternal(Writer* w) {
- int current_abbreviation = 1;
- bool extra_info = desc_->IsInfoAvailable();
- ASSERT(desc_->IsLineInfoAvailable());
- w->WriteULEB128(current_abbreviation++);
- w->WriteULEB128(DW_TAG_COMPILE_UNIT);
- w->Write<uint8_t>(extra_info ? DW_CHILDREN_YES : DW_CHILDREN_NO);
- w->WriteULEB128(DW_AT_NAME);
- w->WriteULEB128(DW_FORM_STRING);
- w->WriteULEB128(DW_AT_LOW_PC);
- w->WriteULEB128(DW_FORM_ADDR);
- w->WriteULEB128(DW_AT_HIGH_PC);
- w->WriteULEB128(DW_FORM_ADDR);
- w->WriteULEB128(DW_AT_STMT_LIST);
- w->WriteULEB128(DW_FORM_DATA4);
- w->WriteULEB128(0);
- w->WriteULEB128(0);
-
- if (extra_info) {
- Scope* scope = desc_->info()->scope();
- int params = scope->num_parameters();
- int slots = scope->num_stack_slots();
- int context_slots = scope->ContextLocalCount();
- // The real slot ID is internal_slots + context_slot_id.
- int internal_slots = Context::MIN_CONTEXT_SLOTS;
- int locals = scope->StackLocalCount();
- int total_children =
- params + slots + context_slots + internal_slots + locals + 2;
-
- // The extra duplication below seems to be necessary to keep
- // gdb from getting upset on OSX.
- w->WriteULEB128(current_abbreviation++); // Abbreviation code.
- w->WriteULEB128(DW_TAG_SUBPROGRAM);
- w->Write<uint8_t>(
- total_children != 0 ? DW_CHILDREN_YES : DW_CHILDREN_NO);
- w->WriteULEB128(DW_AT_NAME);
- w->WriteULEB128(DW_FORM_STRING);
- w->WriteULEB128(DW_AT_LOW_PC);
- w->WriteULEB128(DW_FORM_ADDR);
- w->WriteULEB128(DW_AT_HIGH_PC);
- w->WriteULEB128(DW_FORM_ADDR);
- w->WriteULEB128(DW_AT_FRAME_BASE);
- w->WriteULEB128(DW_FORM_BLOCK4);
- w->WriteULEB128(0);
- w->WriteULEB128(0);
-
- w->WriteULEB128(current_abbreviation++);
- w->WriteULEB128(DW_TAG_STRUCTURE_TYPE);
- w->Write<uint8_t>(DW_CHILDREN_NO);
- w->WriteULEB128(DW_AT_BYTE_SIZE);
- w->WriteULEB128(DW_FORM_DATA1);
- w->WriteULEB128(DW_AT_NAME);
- w->WriteULEB128(DW_FORM_STRING);
- w->WriteULEB128(0);
- w->WriteULEB128(0);
-
- for (int param = 0; param < params; ++param) {
- WriteVariableAbbreviation(w, current_abbreviation++, true, true);
- }
-
- for (int slot = 0; slot < slots; ++slot) {
- WriteVariableAbbreviation(w, current_abbreviation++, false, false);
- }
-
- for (int internal_slot = 0;
- internal_slot < internal_slots;
- ++internal_slot) {
- WriteVariableAbbreviation(w, current_abbreviation++, false, false);
- }
-
- for (int context_slot = 0;
- context_slot < context_slots;
- ++context_slot) {
- WriteVariableAbbreviation(w, current_abbreviation++, false, false);
- }
-
- for (int local = 0; local < locals; ++local) {
- WriteVariableAbbreviation(w, current_abbreviation++, true, false);
- }
-
- // The function.
- WriteVariableAbbreviation(w, current_abbreviation++, true, false);
-
- // The context.
- WriteVariableAbbreviation(w, current_abbreviation++, true, false);
-
- if (total_children != 0) {
- w->WriteULEB128(0); // Terminate the sibling list.
- }
- }
-
- w->WriteULEB128(0); // Terminate the table.
- return true;
- }
-
- private:
- CodeDescription* desc_;
-};
-
-
-class DebugLineSection : public DebugSection {
- public:
- explicit DebugLineSection(CodeDescription* desc)
-#ifdef __ELF
- : ELFSection(".debug_line", TYPE_PROGBITS, 1),
-#else
- : MachOSection("__debug_line",
- "__DWARF",
- 1,
- MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
-#endif
- desc_(desc) { }
-
- // DWARF2 standard, figure 34.
- enum DWARF2Opcodes {
- DW_LNS_COPY = 1,
- DW_LNS_ADVANCE_PC = 2,
- DW_LNS_ADVANCE_LINE = 3,
- DW_LNS_SET_FILE = 4,
- DW_LNS_SET_COLUMN = 5,
- DW_LNS_NEGATE_STMT = 6
- };
-
- // DWARF2 standard, figure 35.
- enum DWARF2ExtendedOpcode {
- DW_LNE_END_SEQUENCE = 1,
- DW_LNE_SET_ADDRESS = 2,
- DW_LNE_DEFINE_FILE = 3
- };
-
- bool WriteBodyInternal(Writer* w) {
- // Write prologue.
- Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>();
- uintptr_t start = w->position();
-
- // Used for special opcodes
- const int8_t line_base = 1;
- const uint8_t line_range = 7;
- const int8_t max_line_incr = (line_base + line_range - 1);
- const uint8_t opcode_base = DW_LNS_NEGATE_STMT + 1;
-
- w->Write<uint16_t>(2); // Field version.
- Writer::Slot<uint32_t> prologue_length = w->CreateSlotHere<uint32_t>();
- uintptr_t prologue_start = w->position();
- w->Write<uint8_t>(1); // Field minimum_instruction_length.
- w->Write<uint8_t>(1); // Field default_is_stmt.
- w->Write<int8_t>(line_base); // Field line_base.
- w->Write<uint8_t>(line_range); // Field line_range.
- w->Write<uint8_t>(opcode_base); // Field opcode_base.
- w->Write<uint8_t>(0); // DW_LNS_COPY operands count.
- w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count.
- w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count.
- w->Write<uint8_t>(1); // DW_LNS_SET_FILE operands count.
- w->Write<uint8_t>(1); // DW_LNS_SET_COLUMN operands count.
- w->Write<uint8_t>(0); // DW_LNS_NEGATE_STMT operands count.
- w->Write<uint8_t>(0); // Empty include_directories sequence.
- w->WriteString(*desc_->GetFilename()); // File name.
- w->WriteULEB128(0); // Current directory.
- w->WriteULEB128(0); // Unknown modification time.
- w->WriteULEB128(0); // Unknown file size.
- w->Write<uint8_t>(0);
- prologue_length.set(static_cast<uint32_t>(w->position() - prologue_start));
-
- WriteExtendedOpcode(w, DW_LNE_SET_ADDRESS, sizeof(intptr_t));
- w->Write<intptr_t>(desc_->CodeStart());
- w->Write<uint8_t>(DW_LNS_COPY);
-
- intptr_t pc = 0;
- intptr_t line = 1;
- bool is_statement = true;
-
- List<GDBJITLineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
- pc_info->Sort(&ComparePCInfo);
-
- int pc_info_length = pc_info->length();
- for (int i = 0; i < pc_info_length; i++) {
- GDBJITLineInfo::PCInfo* info = &pc_info->at(i);
- ASSERT(info->pc_ >= pc);
-
- // Reduce bloating in the debug line table by removing duplicate line
- // entries (per DWARF2 standard).
- intptr_t new_line = desc_->GetScriptLineNumber(info->pos_);
- if (new_line == line) {
- continue;
- }
-
- // Mark statement boundaries. For a better debugging experience, mark
- // the last pc address in the function as a statement (e.g. "}"), so that
- // a user can see the result of the last line executed in the function,
- // should control reach the end.
- if ((i+1) == pc_info_length) {
- if (!is_statement) {
- w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
- }
- } else if (is_statement != info->is_statement_) {
- w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
- is_statement = !is_statement;
- }
-
- // Generate special opcodes, if possible. This results in more compact
- // debug line tables. See the DWARF 2.0 standard to learn more about
- // special opcodes.
- uintptr_t pc_diff = info->pc_ - pc;
- intptr_t line_diff = new_line - line;
-
- // Compute special opcode (see DWARF 2.0 standard)
- intptr_t special_opcode = (line_diff - line_base) +
- (line_range * pc_diff) + opcode_base;
-
- // If special_opcode is less than or equal to 255, it can be used as a
- // special opcode. If line_diff is larger than the max line increment
- // allowed for a special opcode, or if line_diff is less than the minimum
- // line that can be added to the line register (i.e. line_base), then
- // special_opcode can't be used.
- if ((special_opcode >= opcode_base) && (special_opcode <= 255) &&
- (line_diff <= max_line_incr) && (line_diff >= line_base)) {
- w->Write<uint8_t>(special_opcode);
- } else {
- w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
- w->WriteSLEB128(pc_diff);
- w->Write<uint8_t>(DW_LNS_ADVANCE_LINE);
- w->WriteSLEB128(line_diff);
- w->Write<uint8_t>(DW_LNS_COPY);
- }
-
- // Increment the pc and line operands.
- pc += pc_diff;
- line += line_diff;
- }
- // Advance the pc to the end of the routine, since the end sequence opcode
- // requires this.
- w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
- w->WriteSLEB128(desc_->CodeSize() - pc);
- WriteExtendedOpcode(w, DW_LNE_END_SEQUENCE, 0);
- total_length.set(static_cast<uint32_t>(w->position() - start));
- return true;
- }
-
- private:
- void WriteExtendedOpcode(Writer* w,
- DWARF2ExtendedOpcode op,
- size_t operands_size) {
- w->Write<uint8_t>(0);
- w->WriteULEB128(operands_size + 1);
- w->Write<uint8_t>(op);
- }
-
- static int ComparePCInfo(const GDBJITLineInfo::PCInfo* a,
- const GDBJITLineInfo::PCInfo* b) {
- if (a->pc_ == b->pc_) {
- if (a->is_statement_ != b->is_statement_) {
- return b->is_statement_ ? +1 : -1;
- }
- return 0;
- } else if (a->pc_ > b->pc_) {
- return +1;
- } else {
- return -1;
- }
- }
-
- CodeDescription* desc_;
-};
-
-
-#ifdef V8_TARGET_ARCH_X64
-
-class UnwindInfoSection : public DebugSection {
- public:
- explicit UnwindInfoSection(CodeDescription* desc);
- virtual bool WriteBodyInternal(Writer* w);
-
- int WriteCIE(Writer* w);
- void WriteFDE(Writer* w, int);
-
- void WriteFDEStateOnEntry(Writer* w);
- void WriteFDEStateAfterRBPPush(Writer* w);
- void WriteFDEStateAfterRBPSet(Writer* w);
- void WriteFDEStateAfterRBPPop(Writer* w);
-
- void WriteLength(Writer* w,
- Writer::Slot<uint32_t>* length_slot,
- int initial_position);
-
- private:
- CodeDescription* desc_;
-
- // DWARF3 Specification, Table 7.23
- enum CFIInstructions {
- DW_CFA_ADVANCE_LOC = 0x40,
- DW_CFA_OFFSET = 0x80,
- DW_CFA_RESTORE = 0xC0,
- DW_CFA_NOP = 0x00,
- DW_CFA_SET_LOC = 0x01,
- DW_CFA_ADVANCE_LOC1 = 0x02,
- DW_CFA_ADVANCE_LOC2 = 0x03,
- DW_CFA_ADVANCE_LOC4 = 0x04,
- DW_CFA_OFFSET_EXTENDED = 0x05,
- DW_CFA_RESTORE_EXTENDED = 0x06,
- DW_CFA_UNDEFINED = 0x07,
- DW_CFA_SAME_VALUE = 0x08,
- DW_CFA_REGISTER = 0x09,
- DW_CFA_REMEMBER_STATE = 0x0A,
- DW_CFA_RESTORE_STATE = 0x0B,
- DW_CFA_DEF_CFA = 0x0C,
- DW_CFA_DEF_CFA_REGISTER = 0x0D,
- DW_CFA_DEF_CFA_OFFSET = 0x0E,
-
- DW_CFA_DEF_CFA_EXPRESSION = 0x0F,
- DW_CFA_EXPRESSION = 0x10,
- DW_CFA_OFFSET_EXTENDED_SF = 0x11,
- DW_CFA_DEF_CFA_SF = 0x12,
- DW_CFA_DEF_CFA_OFFSET_SF = 0x13,
- DW_CFA_VAL_OFFSET = 0x14,
- DW_CFA_VAL_OFFSET_SF = 0x15,
- DW_CFA_VAL_EXPRESSION = 0x16
- };
-
- // System V ABI, AMD64 Supplement, Version 0.99.5, Figure 3.36
- enum RegisterMapping {
- // Only the relevant ones have been added to reduce clutter.
- AMD64_RBP = 6,
- AMD64_RSP = 7,
- AMD64_RA = 16
- };
-
- enum CFIConstants {
- CIE_ID = 0,
- CIE_VERSION = 1,
- CODE_ALIGN_FACTOR = 1,
- DATA_ALIGN_FACTOR = 1,
- RETURN_ADDRESS_REGISTER = AMD64_RA
- };
-};
-
-
-void UnwindInfoSection::WriteLength(Writer* w,
- Writer::Slot<uint32_t>* length_slot,
- int initial_position) {
- uint32_t align = (w->position() - initial_position) % kPointerSize;
-
- if (align != 0) {
- for (uint32_t i = 0; i < (kPointerSize - align); i++) {
- w->Write<uint8_t>(DW_CFA_NOP);
- }
- }
-
- ASSERT((w->position() - initial_position) % kPointerSize == 0);
- length_slot->set(w->position() - initial_position);
-}
-
-
-UnwindInfoSection::UnwindInfoSection(CodeDescription* desc)
-#ifdef __ELF
- : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
-#else
- : MachOSection("__eh_frame", "__TEXT", sizeof(uintptr_t),
- MachOSection::S_REGULAR),
-#endif
- desc_(desc) { }
-
-int UnwindInfoSection::WriteCIE(Writer* w) {
- Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
- uint32_t cie_position = w->position();
-
- // Write out the CIE header. Currently no 'common instructions' are
- // emitted onto the CIE; every FDE has its own set of instructions.
-
- w->Write<uint32_t>(CIE_ID);
- w->Write<uint8_t>(CIE_VERSION);
- w->Write<uint8_t>(0); // Null augmentation string.
- w->WriteSLEB128(CODE_ALIGN_FACTOR);
- w->WriteSLEB128(DATA_ALIGN_FACTOR);
- w->Write<uint8_t>(RETURN_ADDRESS_REGISTER);
-
- WriteLength(w, &cie_length_slot, cie_position);
-
- return cie_position;
-}
-
-
-void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) {
- // The only FDE for this function. The CFA is the current RBP.
- Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>();
- int fde_position = w->position();
- w->Write<int32_t>(fde_position - cie_position + 4);
-
- w->Write<uintptr_t>(desc_->CodeStart());
- w->Write<uintptr_t>(desc_->CodeSize());
-
- WriteFDEStateOnEntry(w);
- WriteFDEStateAfterRBPPush(w);
- WriteFDEStateAfterRBPSet(w);
- WriteFDEStateAfterRBPPop(w);
-
- WriteLength(w, &fde_length_slot, fde_position);
-}
-
-
-void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
- // The first state, just after the control has been transferred to the the
- // function.
-
- // RBP for this function will be the value of RSP after pushing the RBP
- // for the previous function. The previous RBP has not been pushed yet.
- w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
- w->WriteULEB128(AMD64_RSP);
- w->WriteSLEB128(-kPointerSize);
-
- // The RA is stored at location CFA + kCallerPCOffset. This is an invariant,
- // and hence omitted from the next states.
- w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
- w->WriteULEB128(AMD64_RA);
- w->WriteSLEB128(StandardFrameConstants::kCallerPCOffset);
-
- // The RBP of the previous function is still in RBP.
- w->Write<uint8_t>(DW_CFA_SAME_VALUE);
- w->WriteULEB128(AMD64_RBP);
-
- // Last location described by this entry.
- w->Write<uint8_t>(DW_CFA_SET_LOC);
- w->Write<uint64_t>(
- desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_PUSH));
-}
-
-
-void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) {
- // The second state, just after RBP has been pushed.
-
- // RBP / CFA for this function is now the current RSP, so just set the
- // offset from the previous rule (from -8) to 0.
- w->Write<uint8_t>(DW_CFA_DEF_CFA_OFFSET);
- w->WriteULEB128(0);
-
- // The previous RBP is stored at CFA + kCallerFPOffset. This is an invariant
- // in this and the next state, and hence omitted in the next state.
- w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
- w->WriteULEB128(AMD64_RBP);
- w->WriteSLEB128(StandardFrameConstants::kCallerFPOffset);
-
- // Last location described by this entry.
- w->Write<uint8_t>(DW_CFA_SET_LOC);
- w->Write<uint64_t>(
- desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_SET));
-}
-
-
-void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) {
- // The third state, after the RBP has been set.
-
- // The CFA can now directly be set to RBP.
- w->Write<uint8_t>(DW_CFA_DEF_CFA);
- w->WriteULEB128(AMD64_RBP);
- w->WriteULEB128(0);
-
- // Last location described by this entry.
- w->Write<uint8_t>(DW_CFA_SET_LOC);
- w->Write<uint64_t>(
- desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_POP));
-}
-
-
-void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
- // The fourth (final) state. The RBP has been popped (just before issuing a
- // return).
-
- // The CFA can is now calculated in the same way as in the first state.
- w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
- w->WriteULEB128(AMD64_RSP);
- w->WriteSLEB128(-kPointerSize);
-
- // The RBP
- w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
- w->WriteULEB128(AMD64_RBP);
- w->WriteSLEB128(StandardFrameConstants::kCallerFPOffset);
-
- // Last location described by this entry.
- w->Write<uint8_t>(DW_CFA_SET_LOC);
- w->Write<uint64_t>(desc_->CodeEnd());
-}
-
-
-bool UnwindInfoSection::WriteBodyInternal(Writer* w) {
- uint32_t cie_position = WriteCIE(w);
- WriteFDE(w, cie_position);
- return true;
-}
-
-
-#endif // V8_TARGET_ARCH_X64
-
-static void CreateDWARFSections(CodeDescription* desc, DebugObject* obj) {
- Zone* zone = desc->info()->zone();
- if (desc->IsLineInfoAvailable()) {
- obj->AddSection(new(zone) DebugInfoSection(desc), zone);
- obj->AddSection(new(zone) DebugAbbrevSection(desc), zone);
- obj->AddSection(new(zone) DebugLineSection(desc), zone);
- }
-#ifdef V8_TARGET_ARCH_X64
- obj->AddSection(new(zone) UnwindInfoSection(desc), zone);
-#endif
-}
-
-
-// -------------------------------------------------------------------
-// Binary GDB JIT Interface as described in
-// http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
-extern "C" {
- typedef enum {
- JIT_NOACTION = 0,
- JIT_REGISTER_FN,
- JIT_UNREGISTER_FN
- } JITAction;
-
- struct JITCodeEntry {
- JITCodeEntry* next_;
- JITCodeEntry* prev_;
- Address symfile_addr_;
- uint64_t symfile_size_;
- };
-
- struct JITDescriptor {
- uint32_t version_;
- uint32_t action_flag_;
- JITCodeEntry* relevant_entry_;
- JITCodeEntry* first_entry_;
- };
-
- // GDB will place breakpoint into this function.
- // To prevent GCC from inlining or removing it we place noinline attribute
- // and inline assembler statement inside.
- void __attribute__((noinline)) __jit_debug_register_code() {
- __asm__("");
- }
-
- // GDB will inspect contents of this descriptor.
- // Static initialization is necessary to prevent GDB from seeing
- // uninitialized descriptor.
- JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
-
-#ifdef OBJECT_PRINT
- void __gdb_print_v8_object(MaybeObject* object) {
- object->Print();
- fprintf(stdout, "\n");
- }
-#endif
-}
-
-
-static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
- uintptr_t symfile_size) {
- JITCodeEntry* entry = static_cast<JITCodeEntry*>(
- malloc(sizeof(JITCodeEntry) + symfile_size));
-
- entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1);
- entry->symfile_size_ = symfile_size;
- memcpy(entry->symfile_addr_, symfile_addr, symfile_size);
-
- entry->prev_ = entry->next_ = NULL;
-
- return entry;
-}
-
-
-static void DestroyCodeEntry(JITCodeEntry* entry) {
- free(entry);
-}
-
-
-static void RegisterCodeEntry(JITCodeEntry* entry,
- bool dump_if_enabled,
- const char* name_hint) {
-#if defined(DEBUG) && !defined(WIN32)
- static int file_num = 0;
- if (FLAG_gdbjit_dump && dump_if_enabled) {
- static const int kMaxFileNameSize = 64;
- static const char* kElfFilePrefix = "/tmp/elfdump";
- static const char* kObjFileExt = ".o";
- char file_name[64];
-
- OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize),
- "%s%s%d%s",
- kElfFilePrefix,
- (name_hint != NULL) ? name_hint : "",
- file_num++,
- kObjFileExt);
- WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
- }
-#endif
-
- entry->next_ = __jit_debug_descriptor.first_entry_;
- if (entry->next_ != NULL) entry->next_->prev_ = entry;
- __jit_debug_descriptor.first_entry_ =
- __jit_debug_descriptor.relevant_entry_ = entry;
-
- __jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
- __jit_debug_register_code();
-}
-
-
-static void UnregisterCodeEntry(JITCodeEntry* entry) {
- if (entry->prev_ != NULL) {
- entry->prev_->next_ = entry->next_;
- } else {
- __jit_debug_descriptor.first_entry_ = entry->next_;
- }
-
- if (entry->next_ != NULL) {
- entry->next_->prev_ = entry->prev_;
- }
-
- __jit_debug_descriptor.relevant_entry_ = entry;
- __jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
- __jit_debug_register_code();
-}
-
-
-static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
- Zone* zone = desc->info()->zone();
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
-#ifdef __MACH_O
- MachO mach_o;
- Writer w(&mach_o);
-
- mach_o.AddSection(new MachOTextSection(kCodeAlignment,
- desc->CodeStart(),
- desc->CodeSize()));
-
- CreateDWARFSections(desc, &mach_o);
-
- mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
-#else
- ELF elf(zone);
- Writer w(&elf);
-
- int text_section_index = elf.AddSection(
- new(zone) FullHeaderELFSection(
- ".text",
- ELFSection::TYPE_NOBITS,
- kCodeAlignment,
- desc->CodeStart(),
- 0,
- desc->CodeSize(),
- ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC),
- zone);
-
- CreateSymbolsTable(desc, &elf, text_section_index);
-
- CreateDWARFSections(desc, &elf);
-
- elf.Write(&w);
-#endif
-
- return CreateCodeEntry(w.buffer(), w.position());
-}
-
-
-static bool SameCodeObjects(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
-static HashMap* GetEntries() {
- static HashMap* entries = NULL;
- if (entries == NULL) {
- entries = new HashMap(&SameCodeObjects);
- }
- return entries;
-}
-
-
-static uint32_t HashForCodeObject(Code* code) {
- static const uintptr_t kGoldenRatio = 2654435761u;
- uintptr_t hash = reinterpret_cast<uintptr_t>(code->address());
- return static_cast<uint32_t>((hash >> kCodeAlignmentBits) * kGoldenRatio);
-}
-
-
-static const intptr_t kLineInfoTag = 0x1;
-
-
-static bool IsLineInfoTagged(void* ptr) {
- return 0 != (reinterpret_cast<intptr_t>(ptr) & kLineInfoTag);
-}
-
-
-static void* TagLineInfo(GDBJITLineInfo* ptr) {
- return reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(ptr) | kLineInfoTag);
-}
-
-
-static GDBJITLineInfo* UntagLineInfo(void* ptr) {
- return reinterpret_cast<GDBJITLineInfo*>(
- reinterpret_cast<intptr_t>(ptr) & ~kLineInfoTag);
-}
-
-
-void GDBJITInterface::AddCode(Handle<String> name,
- Handle<Script> script,
- Handle<Code> code,
- CompilationInfo* info) {
- if (!FLAG_gdbjit) return;
-
- // Force initialization of line_ends array.
- GetScriptLineNumber(script, 0);
-
- if (!name.is_null()) {
- SmartArrayPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
- AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
- } else {
- AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
- }
-}
-
-static void AddUnwindInfo(CodeDescription* desc) {
-#ifdef V8_TARGET_ARCH_X64
- if (desc->tag() == GDBJITInterface::FUNCTION) {
- // To avoid propagating unwinding information through
- // compilation pipeline we use an approximation.
- // For most use cases this should not affect usability.
- static const int kFramePointerPushOffset = 1;
- static const int kFramePointerSetOffset = 4;
- static const int kFramePointerPopOffset = -3;
-
- uintptr_t frame_pointer_push_address =
- desc->CodeStart() + kFramePointerPushOffset;
-
- uintptr_t frame_pointer_set_address =
- desc->CodeStart() + kFramePointerSetOffset;
-
- uintptr_t frame_pointer_pop_address =
- desc->CodeEnd() + kFramePointerPopOffset;
-
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH,
- frame_pointer_push_address);
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET,
- frame_pointer_set_address);
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_POP,
- frame_pointer_pop_address);
- } else {
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH,
- desc->CodeStart());
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET,
- desc->CodeStart());
- desc->SetStackStateStartAddress(CodeDescription::POST_RBP_POP,
- desc->CodeEnd());
- }
-#endif // V8_TARGET_ARCH_X64
-}
-
-
-static LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
-
-
-void GDBJITInterface::AddCode(const char* name,
- Code* code,
- GDBJITInterface::CodeTag tag,
- Script* script,
- CompilationInfo* info) {
- if (!FLAG_gdbjit) return;
-
- ScopedLock lock(mutex.Pointer());
- AssertNoAllocation no_gc;
-
- HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
- if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
-
- GDBJITLineInfo* lineinfo = UntagLineInfo(e->value);
- CodeDescription code_desc(name,
- code,
- script != NULL ? Handle<Script>(script)
- : Handle<Script>(),
- lineinfo,
- tag,
- info);
-
- if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
- delete lineinfo;
- GetEntries()->Remove(code, HashForCodeObject(code));
- return;
- }
-
- AddUnwindInfo(&code_desc);
- JITCodeEntry* entry = CreateELFObject(&code_desc);
- ASSERT(!IsLineInfoTagged(entry));
-
- delete lineinfo;
- e->value = entry;
-
- const char* name_hint = NULL;
- bool should_dump = false;
- if (FLAG_gdbjit_dump) {
- if (strlen(FLAG_gdbjit_dump_filter) == 0) {
- name_hint = name;
- should_dump = true;
- } else if (name != NULL) {
- name_hint = strstr(name, FLAG_gdbjit_dump_filter);
- should_dump = (name_hint != NULL);
- }
- }
- RegisterCodeEntry(entry, should_dump, name_hint);
-}
-
-
-void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
- const char* name,
- Code* code) {
- if (!FLAG_gdbjit) return;
-
- EmbeddedVector<char, 256> buffer;
- StringBuilder builder(buffer.start(), buffer.length());
-
- builder.AddString(Tag2String(tag));
- if ((name != NULL) && (*name != '\0')) {
- builder.AddString(": ");
- builder.AddString(name);
- } else {
- builder.AddFormatted(": code object %p", static_cast<void*>(code));
- }
-
- AddCode(builder.Finalize(), code, tag, NULL, NULL);
-}
-
-
-void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
- String* name,
- Code* code) {
- if (!FLAG_gdbjit) return;
- AddCode(tag, name != NULL ? *name->ToCString(DISALLOW_NULLS) : NULL, code);
-}
-
-
-void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
- if (!FLAG_gdbjit) return;
-
- AddCode(tag, "", code);
-}
-
-
-void GDBJITInterface::RemoveCode(Code* code) {
- if (!FLAG_gdbjit) return;
-
- ScopedLock lock(mutex.Pointer());
- HashMap::Entry* e = GetEntries()->Lookup(code,
- HashForCodeObject(code),
- false);
- if (e == NULL) return;
-
- if (IsLineInfoTagged(e->value)) {
- delete UntagLineInfo(e->value);
- } else {
- JITCodeEntry* entry = static_cast<JITCodeEntry*>(e->value);
- UnregisterCodeEntry(entry);
- DestroyCodeEntry(entry);
- }
- e->value = NULL;
- GetEntries()->Remove(code, HashForCodeObject(code));
-}
-
-
-void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
- GDBJITLineInfo* line_info) {
- ScopedLock lock(mutex.Pointer());
- ASSERT(!IsLineInfoTagged(line_info));
- HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
- ASSERT(e->value == NULL);
- e->value = TagLineInfo(line_info);
-}
-
-
-} } // namespace v8::internal
-#endif
diff --git a/src/3rdparty/v8/src/gdb-jit.h b/src/3rdparty/v8/src/gdb-jit.h
deleted file mode 100644
index 0eca938..0000000
--- a/src/3rdparty/v8/src/gdb-jit.h
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_GDB_JIT_H_
-#define V8_GDB_JIT_H_
-
-#include "allocation.h"
-
-//
-// Basic implementation of GDB JIT Interface client.
-// GBD JIT Interface is supported in GDB 7.0 and above.
-// Currently on x64 and ia32 architectures and Linux OS are supported.
-//
-
-#ifdef ENABLE_GDB_JIT_INTERFACE
-#include "v8.h"
-#include "factory.h"
-
-namespace v8 {
-namespace internal {
-
-class CompilationInfo;
-
-#define CODE_TAGS_LIST(V) \
- V(LOAD_IC) \
- V(KEYED_LOAD_IC) \
- V(STORE_IC) \
- V(KEYED_STORE_IC) \
- V(CALL_IC) \
- V(CALL_INITIALIZE) \
- V(CALL_PRE_MONOMORPHIC) \
- V(CALL_NORMAL) \
- V(CALL_MEGAMORPHIC) \
- V(CALL_MISS) \
- V(STUB) \
- V(BUILTIN) \
- V(SCRIPT) \
- V(EVAL) \
- V(FUNCTION)
-
-class GDBJITLineInfo : public Malloced {
- public:
- GDBJITLineInfo()
- : pc_info_(10) { }
-
- void SetPosition(intptr_t pc, int pos, bool is_statement) {
- AddPCInfo(PCInfo(pc, pos, is_statement));
- }
-
- struct PCInfo {
- PCInfo(intptr_t pc, int pos, bool is_statement)
- : pc_(pc), pos_(pos), is_statement_(is_statement) { }
-
- intptr_t pc_;
- int pos_;
- bool is_statement_;
- };
-
- List<PCInfo>* pc_info() {
- return &pc_info_;
- }
-
- private:
- void AddPCInfo(const PCInfo& pc_info) {
- pc_info_.Add(pc_info);
- }
-
- List<PCInfo> pc_info_;
-};
-
-
-class GDBJITInterface: public AllStatic {
- public:
- enum CodeTag {
-#define V(x) x,
- CODE_TAGS_LIST(V)
-#undef V
- TAG_COUNT
- };
-
- static const char* Tag2String(CodeTag tag) {
- switch (tag) {
-#define V(x) case x: return #x;
- CODE_TAGS_LIST(V)
-#undef V
- default:
- return NULL;
- }
- }
-
- static void AddCode(const char* name,
- Code* code,
- CodeTag tag,
- Script* script,
- CompilationInfo* info);
-
- static void AddCode(Handle<String> name,
- Handle<Script> script,
- Handle<Code> code,
- CompilationInfo* info);
-
- static void AddCode(CodeTag tag, String* name, Code* code);
-
- static void AddCode(CodeTag tag, const char* name, Code* code);
-
- static void AddCode(CodeTag tag, Code* code);
-
- static void RemoveCode(Code* code);
-
- static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
-};
-
-#define GDBJIT(action) GDBJITInterface::action
-
-} } // namespace v8::internal
-#else
-#define GDBJIT(action) ((void) 0)
-#endif
-
-#endif
diff --git a/src/3rdparty/v8/src/global-handles.cc b/src/3rdparty/v8/src/global-handles.cc
deleted file mode 100644
index 299449a..0000000
--- a/src/3rdparty/v8/src/global-handles.cc
+++ /dev/null
@@ -1,863 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "global-handles.h"
-
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-ObjectGroup::~ObjectGroup() {
- if (info_ != NULL) info_->Dispose();
-}
-
-
-class GlobalHandles::Node {
- public:
- // State transition diagram:
- // FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
- enum State {
- FREE = 0,
- NORMAL, // Normal global handle.
- WEAK, // Flagged as weak but not yet finalized.
- PENDING, // Has been recognized as only reachable by weak handles.
- NEAR_DEATH // Callback has informed the handle is near death.
- };
-
- // Maps handle location (slot) to the containing node.
- static Node* FromLocation(Object** location) {
- ASSERT(OFFSET_OF(Node, object_) == 0);
- return reinterpret_cast<Node*>(location);
- }
-
- Node() {
- ASSERT(OFFSET_OF(Node, class_id_) == Internals::kNodeClassIdOffset);
- ASSERT(OFFSET_OF(Node, flags_) == Internals::kNodeFlagsOffset);
- STATIC_ASSERT(static_cast<int>(NodeState::kMask) ==
- Internals::kNodeStateMask);
- STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue);
- STATIC_ASSERT(NEAR_DEATH == Internals::kNodeStateIsNearDeathValue);
- STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) ==
- Internals::kNodeIsIndependentShift);
- STATIC_ASSERT(static_cast<int>(IsPartiallyDependent::kShift) ==
- Internals::kNodeIsPartiallyDependentShift);
- }
-
-#ifdef DEBUG
- ~Node() {
- // TODO(1428): if it's a weak handle we should have invoked its callback.
- // Zap the values for eager trapping.
- object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
- class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- index_ = 0;
- set_independent(false);
- set_partially_dependent(false);
- set_in_new_space_list(false);
- parameter_or_next_free_.next_free = NULL;
- weak_reference_callback_ = NULL;
- near_death_callback_ = NULL;
- }
-#endif
-
- void Initialize(int index, Node** first_free) {
- index_ = static_cast<uint8_t>(index);
- ASSERT(static_cast<int>(index_) == index);
- set_state(FREE);
- set_in_new_space_list(false);
- parameter_or_next_free_.next_free = *first_free;
- *first_free = this;
- }
-
- void Acquire(Object* object, GlobalHandles* global_handles) {
- ASSERT(state() == FREE);
- object_ = object;
- class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- set_independent(false);
- set_partially_dependent(false);
- set_state(NORMAL);
- parameter_or_next_free_.parameter = NULL;
- weak_reference_callback_ = NULL;
- near_death_callback_ = NULL;
- IncreaseBlockUses(global_handles);
- }
-
- void Release(GlobalHandles* global_handles) {
- ASSERT(state() != FREE);
- set_state(FREE);
- // TODO(176056): Enable as soon as WebKit bindings are fixed.
-#ifdef DEBUG_TODO
- // Zap the values for eager trapping.
- object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
- class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- set_independent(false);
- set_partially_dependent(false);
- weak_reference_callback_ = NULL;
- near_death_callback_ = NULL;
-#endif
- parameter_or_next_free_.next_free = global_handles->first_free_;
- global_handles->first_free_ = this;
- DecreaseBlockUses(global_handles);
- }
-
- // Object slot accessors.
- Object* object() const { return object_; }
- Object** location() { return &object_; }
- Handle<Object> handle() { return Handle<Object>(location()); }
-
- // Wrapper class ID accessors.
- bool has_wrapper_class_id() const {
- return class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId;
- }
-
- uint16_t wrapper_class_id() const { return class_id_; }
-
- // State and flag accessors.
-
- State state() const {
- return NodeState::decode(flags_);
- }
- void set_state(State state) {
- flags_ = NodeState::update(flags_, state);
- }
-
- bool is_independent() {
- return IsIndependent::decode(flags_);
- }
- void set_independent(bool v) {
- flags_ = IsIndependent::update(flags_, v);
- }
-
- bool is_partially_dependent() {
- return IsPartiallyDependent::decode(flags_);
- }
- void set_partially_dependent(bool v) {
- flags_ = IsPartiallyDependent::update(flags_, v);
- }
-
- bool is_in_new_space_list() {
- return IsInNewSpaceList::decode(flags_);
- }
- void set_in_new_space_list(bool v) {
- flags_ = IsInNewSpaceList::update(flags_, v);
- }
-
- bool IsNearDeath() const {
- // Check for PENDING to ensure correct answer when processing callbacks.
- return state() == PENDING || state() == NEAR_DEATH;
- }
-
- bool IsWeak() const { return state() == WEAK; }
-
- bool IsRetainer() const { return state() != FREE; }
-
- bool IsStrongRetainer() const { return state() == NORMAL; }
-
- bool IsWeakRetainer() const {
- return state() == WEAK || state() == PENDING || state() == NEAR_DEATH;
- }
-
- void MarkPending() {
- ASSERT(state() == WEAK);
- set_state(PENDING);
- }
-
- // Independent flag accessors.
- void MarkIndependent() {
- ASSERT(state() != FREE);
- set_independent(true);
- }
-
- void MarkPartiallyDependent(GlobalHandles* global_handles) {
- ASSERT(state() != FREE);
- if (global_handles->isolate()->heap()->InNewSpace(object_)) {
- set_partially_dependent(true);
- }
- }
- void clear_partially_dependent() { set_partially_dependent(false); }
-
- // Callback accessor.
- // TODO(svenpanne) Re-enable or nuke later.
- // WeakReferenceCallback callback() { return callback_; }
-
- // Callback parameter accessors.
- void set_parameter(void* parameter) {
- ASSERT(state() != FREE);
- parameter_or_next_free_.parameter = parameter;
- }
- void* parameter() const {
- ASSERT(state() != FREE);
- return parameter_or_next_free_.parameter;
- }
-
- // Accessors for next free node in the free list.
- Node* next_free() {
- ASSERT(state() == FREE);
- return parameter_or_next_free_.next_free;
- }
- void set_next_free(Node* value) {
- ASSERT(state() == FREE);
- parameter_or_next_free_.next_free = value;
- }
-
- void MakeWeak(GlobalHandles* global_handles,
- void* parameter,
- WeakReferenceCallback weak_reference_callback,
- NearDeathCallback near_death_callback) {
- ASSERT(state() != FREE);
- set_state(WEAK);
- set_parameter(parameter);
- weak_reference_callback_ = weak_reference_callback;
- near_death_callback_ = near_death_callback;
- }
-
- void ClearWeakness(GlobalHandles* global_handles) {
- ASSERT(state() != FREE);
- set_state(NORMAL);
- set_parameter(NULL);
- }
-
- bool PostGarbageCollectionProcessing(Isolate* isolate,
- GlobalHandles* global_handles) {
- if (state() != Node::PENDING) return false;
- if (weak_reference_callback_ == NULL &&
- near_death_callback_ == NULL) {
- Release(global_handles);
- return false;
- }
- void* par = parameter();
- set_state(NEAR_DEATH);
- set_parameter(NULL);
-
- v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
- {
- // Check that we are not passing a finalized external string to
- // the callback.
- ASSERT(!object_->IsExternalAsciiString() ||
- ExternalAsciiString::cast(object_)->resource() != NULL);
- ASSERT(!object_->IsExternalTwoByteString() ||
- ExternalTwoByteString::cast(object_)->resource() != NULL);
- // Leaving V8.
- VMState state(isolate, EXTERNAL);
- if (weak_reference_callback_ != NULL) {
- weak_reference_callback_(object, par);
- }
- if (near_death_callback_ != NULL) {
- near_death_callback_(reinterpret_cast<v8::Isolate*>(isolate),
- object,
- par);
- }
- }
- // Absence of explicit cleanup or revival of weak handle
- // in most of the cases would lead to memory leak.
- ASSERT(state() != NEAR_DEATH);
- return true;
- }
-
- private:
- inline NodeBlock* FindBlock();
- inline void IncreaseBlockUses(GlobalHandles* global_handles);
- inline void DecreaseBlockUses(GlobalHandles* global_handles);
-
- // Storage for object pointer.
- // Placed first to avoid offset computation.
- Object* object_;
-
- // Next word stores class_id, index, state, and independent.
- // Note: the most aligned fields should go first.
-
- // Wrapper class ID.
- uint16_t class_id_;
-
- // Index in the containing handle block.
- uint8_t index_;
-
- // This stores three flags (independent, partially_dependent and
- // in_new_space_list) and a State.
- class NodeState: public BitField<State, 0, 4> {};
- class IsIndependent: public BitField<bool, 4, 1> {};
- class IsPartiallyDependent: public BitField<bool, 5, 1> {};
- class IsInNewSpaceList: public BitField<bool, 6, 1> {};
-
- uint8_t flags_;
-
- // Handle specific callback.
- WeakReferenceCallback weak_reference_callback_;
- NearDeathCallback near_death_callback_;
-
- // Provided data for callback. In FREE state, this is used for
- // the free list link.
- union {
- void* parameter;
- Node* next_free;
- } parameter_or_next_free_;
-
- DISALLOW_COPY_AND_ASSIGN(Node);
-};
-
-
-class GlobalHandles::NodeBlock {
- public:
- static const int kSize = 256;
-
- explicit NodeBlock(NodeBlock* next)
- : next_(next), used_nodes_(0), next_used_(NULL), prev_used_(NULL) {}
-
- void PutNodesOnFreeList(Node** first_free) {
- for (int i = kSize - 1; i >= 0; --i) {
- nodes_[i].Initialize(i, first_free);
- }
- }
-
- Node* node_at(int index) {
- ASSERT(0 <= index && index < kSize);
- return &nodes_[index];
- }
-
- void IncreaseUses(GlobalHandles* global_handles) {
- ASSERT(used_nodes_ < kSize);
- if (used_nodes_++ == 0) {
- NodeBlock* old_first = global_handles->first_used_block_;
- global_handles->first_used_block_ = this;
- next_used_ = old_first;
- prev_used_ = NULL;
- if (old_first == NULL) return;
- old_first->prev_used_ = this;
- }
- }
-
- void DecreaseUses(GlobalHandles* global_handles) {
- ASSERT(used_nodes_ > 0);
- if (--used_nodes_ == 0) {
- if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
- if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
- if (this == global_handles->first_used_block_) {
- global_handles->first_used_block_ = next_used_;
- }
- }
- }
-
- // Next block in the list of all blocks.
- NodeBlock* next() const { return next_; }
-
- // Next/previous block in the list of blocks with used nodes.
- NodeBlock* next_used() const { return next_used_; }
- NodeBlock* prev_used() const { return prev_used_; }
-
- private:
- Node nodes_[kSize];
- NodeBlock* const next_;
- int used_nodes_;
- NodeBlock* next_used_;
- NodeBlock* prev_used_;
-};
-
-
-GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
- intptr_t ptr = reinterpret_cast<intptr_t>(this);
- ptr = ptr - index_ * sizeof(Node);
- NodeBlock* block = reinterpret_cast<NodeBlock*>(ptr);
- ASSERT(block->node_at(index_) == this);
- return block;
-}
-
-
-void GlobalHandles::Node::IncreaseBlockUses(GlobalHandles* global_handles) {
- FindBlock()->IncreaseUses(global_handles);
-}
-
-
-void GlobalHandles::Node::DecreaseBlockUses(GlobalHandles* global_handles) {
- FindBlock()->DecreaseUses(global_handles);
-}
-
-
-class GlobalHandles::NodeIterator {
- public:
- explicit NodeIterator(GlobalHandles* global_handles)
- : block_(global_handles->first_used_block_),
- index_(0) {}
-
- bool done() const { return block_ == NULL; }
-
- Node* node() const {
- ASSERT(!done());
- return block_->node_at(index_);
- }
-
- void Advance() {
- ASSERT(!done());
- if (++index_ < NodeBlock::kSize) return;
- index_ = 0;
- block_ = block_->next_used();
- }
-
- private:
- NodeBlock* block_;
- int index_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeIterator);
-};
-
-
-GlobalHandles::GlobalHandles(Isolate* isolate)
- : isolate_(isolate),
- number_of_global_handles_(0),
- first_block_(NULL),
- first_used_block_(NULL),
- first_free_(NULL),
- post_gc_processing_count_(0) {}
-
-
-GlobalHandles::~GlobalHandles() {
- NodeBlock* block = first_block_;
- while (block != NULL) {
- NodeBlock* tmp = block->next();
- delete block;
- block = tmp;
- }
- first_block_ = NULL;
-}
-
-
-Handle<Object> GlobalHandles::Create(Object* value) {
- isolate_->counters()->global_handles()->Increment();
- number_of_global_handles_++;
- if (first_free_ == NULL) {
- first_block_ = new NodeBlock(first_block_);
- first_block_->PutNodesOnFreeList(&first_free_);
- }
- ASSERT(first_free_ != NULL);
- // Take the first node in the free list.
- Node* result = first_free_;
- first_free_ = result->next_free();
- result->Acquire(value, this);
- if (isolate_->heap()->InNewSpace(value) &&
- !result->is_in_new_space_list()) {
- new_space_nodes_.Add(result);
- result->set_in_new_space_list(true);
- }
- return result->handle();
-}
-
-
-void GlobalHandles::Destroy(Object** location) {
- isolate_->counters()->global_handles()->Decrement();
- number_of_global_handles_--;
- if (location == NULL) return;
- Node::FromLocation(location)->Release(this);
-}
-
-
-void GlobalHandles::MakeWeak(Object** location,
- void* parameter,
- WeakReferenceCallback weak_reference_callback,
- NearDeathCallback near_death_callback) {
- ASSERT((weak_reference_callback != NULL) !=
- (near_death_callback != NULL));
- Node::FromLocation(location)->MakeWeak(this,
- parameter,
- weak_reference_callback,
- near_death_callback);
-}
-
-
-void GlobalHandles::ClearWeakness(Object** location) {
- Node::FromLocation(location)->ClearWeakness(this);
-}
-
-
-void GlobalHandles::MarkIndependent(Object** location) {
- Node::FromLocation(location)->MarkIndependent();
-}
-
-
-void GlobalHandles::MarkPartiallyDependent(Object** location) {
- Node::FromLocation(location)->MarkPartiallyDependent(this);
-}
-
-
-bool GlobalHandles::IsIndependent(Object** location) {
- return Node::FromLocation(location)->is_independent();
-}
-
-
-bool GlobalHandles::IsNearDeath(Object** location) {
- return Node::FromLocation(location)->IsNearDeath();
-}
-
-
-bool GlobalHandles::IsWeak(Object** location) {
- return Node::FromLocation(location)->IsWeak();
-}
-
-
-void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeakRetainer()) v->VisitPointer(it.node()->location());
- }
-}
-
-
-void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeak() && f(it.node()->location())) {
- it.node()->MarkPending();
- }
- }
-}
-
-
-void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
- if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- !node->is_partially_dependent())) {
- v->VisitPointer(node->location());
- }
- }
-}
-
-
-void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
- WeakSlotCallbackWithHeap f) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
- ASSERT(node->is_in_new_space_list());
- if ((node->is_independent() || node->is_partially_dependent()) &&
- node->IsWeak() && f(isolate_->heap(), node->location())) {
- node->MarkPending();
- }
- }
-}
-
-
-void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
- ASSERT(node->is_in_new_space_list());
- if ((node->is_independent() || node->is_partially_dependent()) &&
- node->IsWeakRetainer()) {
- v->VisitPointer(node->location());
- }
- }
-}
-
-
-bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
- WeakSlotCallbackWithHeap can_skip) {
- int last = 0;
- bool any_group_was_visited = false;
- for (int i = 0; i < object_groups_.length(); i++) {
- ObjectGroup* entry = object_groups_.at(i);
- ASSERT(entry != NULL);
-
- Object*** objects = entry->objects_;
- bool group_should_be_visited = false;
- for (size_t j = 0; j < entry->length_; j++) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- if (!can_skip(isolate_->heap(), &object)) {
- group_should_be_visited = true;
- break;
- }
- }
- }
-
- if (!group_should_be_visited) {
- object_groups_[last++] = entry;
- continue;
- }
-
- // An object in the group requires visiting, so iterate over all
- // objects in the group.
- for (size_t j = 0; j < entry->length_; ++j) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- v->VisitPointer(&object);
- any_group_was_visited = true;
- }
- }
-
- // Once the entire group has been iterated over, set the object
- // group to NULL so it won't be processed again.
- entry->Dispose();
- object_groups_.at(i) = NULL;
- }
- object_groups_.Rewind(last);
- return any_group_was_visited;
-}
-
-
-bool GlobalHandles::PostGarbageCollectionProcessing(
- GarbageCollector collector, GCTracer* tracer) {
- // Process weak global handle callbacks. This must be done after the
- // GC is completely done, because the callbacks may invoke arbitrary
- // API functions.
- ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
- const int initial_post_gc_processing_count = ++post_gc_processing_count_;
- bool next_gc_likely_to_collect_more = false;
- if (collector == SCAVENGER) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
- ASSERT(node->is_in_new_space_list());
- // Skip dependent handles. Their weak callbacks might expect to be
- // called between two global garbage collection callbacks which
- // are not called for minor collections.
- if (!node->is_independent() && !node->is_partially_dependent()) {
- continue;
- }
- node->clear_partially_dependent();
- if (node->PostGarbageCollectionProcessing(isolate_, this)) {
- if (initial_post_gc_processing_count != post_gc_processing_count_) {
- // Weak callback triggered another GC and another round of
- // PostGarbageCollection processing. The current node might
- // have been deleted in that round, so we need to bail out (or
- // restart the processing).
- return next_gc_likely_to_collect_more;
- }
- }
- if (!node->IsRetainer()) {
- next_gc_likely_to_collect_more = true;
- }
- }
- } else {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- it.node()->clear_partially_dependent();
- if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
- if (initial_post_gc_processing_count != post_gc_processing_count_) {
- // See the comment above.
- return next_gc_likely_to_collect_more;
- }
- }
- if (!it.node()->IsRetainer()) {
- next_gc_likely_to_collect_more = true;
- }
- }
- }
- // Update the list of new space nodes.
- int last = 0;
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
- ASSERT(node->is_in_new_space_list());
- if (node->IsRetainer()) {
- if (isolate_->heap()->InNewSpace(node->object())) {
- new_space_nodes_[last++] = node;
- tracer->increment_nodes_copied_in_new_space();
- } else {
- node->set_in_new_space_list(false);
- tracer->increment_nodes_promoted();
- }
- } else {
- node->set_in_new_space_list(false);
- tracer->increment_nodes_died_in_new_space();
- }
- }
- new_space_nodes_.Rewind(last);
- return next_gc_likely_to_collect_more;
-}
-
-
-void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsStrongRetainer()) {
- v->VisitPointer(it.node()->location());
- }
- }
-}
-
-
-void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsRetainer()) {
- v->VisitPointer(it.node()->location());
- }
- }
-}
-
-
-void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsRetainer() && it.node()->has_wrapper_class_id()) {
- v->VisitEmbedderReference(it.node()->location(),
- it.node()->wrapper_class_id());
- }
- }
-}
-
-
-void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
- if (node->IsRetainer() && node->has_wrapper_class_id()) {
- v->VisitEmbedderReference(node->location(),
- node->wrapper_class_id());
- }
- }
-}
-
-
-int GlobalHandles::NumberOfWeakHandles() {
- int count = 0;
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeakRetainer()) {
- count++;
- }
- }
- return count;
-}
-
-
-int GlobalHandles::NumberOfGlobalObjectWeakHandles() {
- int count = 0;
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeakRetainer() &&
- it.node()->object()->IsJSGlobalObject()) {
- count++;
- }
- }
- return count;
-}
-
-
-void GlobalHandles::RecordStats(HeapStats* stats) {
- *stats->global_handle_count = 0;
- *stats->weak_global_handle_count = 0;
- *stats->pending_global_handle_count = 0;
- *stats->near_death_global_handle_count = 0;
- *stats->free_global_handle_count = 0;
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- *stats->global_handle_count += 1;
- if (it.node()->state() == Node::WEAK) {
- *stats->weak_global_handle_count += 1;
- } else if (it.node()->state() == Node::PENDING) {
- *stats->pending_global_handle_count += 1;
- } else if (it.node()->state() == Node::NEAR_DEATH) {
- *stats->near_death_global_handle_count += 1;
- } else if (it.node()->state() == Node::FREE) {
- *stats->free_global_handle_count += 1;
- }
- }
-}
-
-#ifdef DEBUG
-
-void GlobalHandles::PrintStats() {
- int total = 0;
- int weak = 0;
- int pending = 0;
- int near_death = 0;
- int destroyed = 0;
-
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- total++;
- if (it.node()->state() == Node::WEAK) weak++;
- if (it.node()->state() == Node::PENDING) pending++;
- if (it.node()->state() == Node::NEAR_DEATH) near_death++;
- if (it.node()->state() == Node::FREE) destroyed++;
- }
-
- PrintF("Global Handle Statistics:\n");
- PrintF(" allocated memory = %" V8_PTR_PREFIX "dB\n", sizeof(Node) * total);
- PrintF(" # weak = %d\n", weak);
- PrintF(" # pending = %d\n", pending);
- PrintF(" # near_death = %d\n", near_death);
- PrintF(" # free = %d\n", destroyed);
- PrintF(" # total = %d\n", total);
-}
-
-void GlobalHandles::Print() {
- PrintF("Global handles:\n");
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- PrintF(" handle %p to %p%s\n",
- reinterpret_cast<void*>(it.node()->location()),
- reinterpret_cast<void*>(it.node()->object()),
- it.node()->IsWeak() ? " (weak)" : "");
- }
-}
-
-#endif
-
-
-
-void GlobalHandles::AddObjectGroup(Object*** handles,
- size_t length,
- v8::RetainedObjectInfo* info) {
-#ifdef DEBUG
- for (size_t i = 0; i < length; ++i) {
- ASSERT(!Node::FromLocation(handles[i])->is_independent());
- }
-#endif
- if (length == 0) {
- if (info != NULL) info->Dispose();
- return;
- }
- object_groups_.Add(ObjectGroup::New(handles, length, info));
-}
-
-
-void GlobalHandles::AddImplicitReferences(HeapObject** parent,
- Object*** children,
- size_t length) {
-#ifdef DEBUG
- ASSERT(!Node::FromLocation(BitCast<Object**>(parent))->is_independent());
- for (size_t i = 0; i < length; ++i) {
- ASSERT(!Node::FromLocation(children[i])->is_independent());
- }
-#endif
- if (length == 0) return;
- implicit_ref_groups_.Add(ImplicitRefGroup::New(parent, children, length));
-}
-
-
-void GlobalHandles::RemoveObjectGroups() {
- for (int i = 0; i < object_groups_.length(); i++) {
- object_groups_.at(i)->Dispose();
- }
- object_groups_.Clear();
-}
-
-
-void GlobalHandles::RemoveImplicitRefGroups() {
- for (int i = 0; i < implicit_ref_groups_.length(); i++) {
- implicit_ref_groups_.at(i)->Dispose();
- }
- implicit_ref_groups_.Clear();
-}
-
-
-void GlobalHandles::TearDown() {
- // TODO(1428): invoke weak callbacks.
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/global-handles.h b/src/3rdparty/v8/src/global-handles.h
deleted file mode 100644
index 9900144..0000000
--- a/src/3rdparty/v8/src/global-handles.h
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_GLOBAL_HANDLES_H_
-#define V8_GLOBAL_HANDLES_H_
-
-#include "../include/v8-profiler.h"
-
-#include "list.h"
-
-namespace v8 {
-namespace internal {
-
-// Structure for tracking global handles.
-// A single list keeps all the allocated global handles.
-// Destroyed handles stay in the list but is added to the free list.
-// At GC the destroyed global handles are removed from the free list
-// and deallocated.
-
-// An object group is treated like a single JS object: if one of object in
-// the group is alive, all objects in the same group are considered alive.
-// An object group is used to simulate object relationship in a DOM tree.
-class ObjectGroup {
- public:
- static ObjectGroup* New(Object*** handles,
- size_t length,
- v8::RetainedObjectInfo* info) {
- ASSERT(length > 0);
- ObjectGroup* group = reinterpret_cast<ObjectGroup*>(
- malloc(OFFSET_OF(ObjectGroup, objects_[length])));
- group->length_ = length;
- group->info_ = info;
- CopyWords(group->objects_, handles, static_cast<int>(length));
- return group;
- }
-
- void Dispose() {
- if (info_ != NULL) info_->Dispose();
- free(this);
- }
-
- size_t length_;
- v8::RetainedObjectInfo* info_;
- Object** objects_[1]; // Variable sized array.
-
- private:
- void* operator new(size_t size);
- void operator delete(void* p);
- ~ObjectGroup();
- DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectGroup);
-};
-
-
-// An implicit references group consists of two parts: a parent object and
-// a list of children objects. If the parent is alive, all the children
-// are alive too.
-class ImplicitRefGroup {
- public:
- static ImplicitRefGroup* New(HeapObject** parent,
- Object*** children,
- size_t length) {
- ASSERT(length > 0);
- ImplicitRefGroup* group = reinterpret_cast<ImplicitRefGroup*>(
- malloc(OFFSET_OF(ImplicitRefGroup, children_[length])));
- group->parent_ = parent;
- group->length_ = length;
- CopyWords(group->children_, children, static_cast<int>(length));
- return group;
- }
-
- void Dispose() {
- free(this);
- }
-
- HeapObject** parent_;
- size_t length_;
- Object** children_[1]; // Variable sized array.
-
- private:
- void* operator new(size_t size);
- void operator delete(void* p);
- ~ImplicitRefGroup();
- DISALLOW_IMPLICIT_CONSTRUCTORS(ImplicitRefGroup);
-};
-
-
-class GlobalHandles {
- public:
- ~GlobalHandles();
-
- // Creates a new global handle that is alive until Destroy is called.
- Handle<Object> Create(Object* value);
-
- // Destroy a global handle.
- void Destroy(Object** location);
-
- // Make the global handle weak and set the callback parameter for the
- // handle. When the garbage collector recognizes that only weak global
- // handles point to an object the handles are cleared and the callback
- // function is invoked (for each handle) with the handle and corresponding
- // parameter as arguments. Note: cleared means set to Smi::FromInt(0). The
- // reason is that Smi::FromInt(0) does not change during garage collection.
- void MakeWeak(Object** location,
- void* parameter,
- WeakReferenceCallback weak_reference_callback,
- NearDeathCallback near_death_callback);
-
- void RecordStats(HeapStats* stats);
-
- // Returns the current number of weak handles.
- int NumberOfWeakHandles();
-
- // Returns the current number of weak handles to global objects.
- // These handles are also included in NumberOfWeakHandles().
- int NumberOfGlobalObjectWeakHandles();
-
- // Returns the current number of handles to global objects.
- int NumberOfGlobalHandles() {
- return number_of_global_handles_;
- }
-
- // Clear the weakness of a global handle.
- void ClearWeakness(Object** location);
-
- // Clear the weakness of a global handle.
- void MarkIndependent(Object** location);
-
- // Mark the reference to this object externaly unreachable.
- void MarkPartiallyDependent(Object** location);
-
- static bool IsIndependent(Object** location);
-
- // Tells whether global handle is near death.
- static bool IsNearDeath(Object** location);
-
- // Tells whether global handle is weak.
- static bool IsWeak(Object** location);
-
- // Process pending weak handles.
- // Returns true if next major GC is likely to collect more garbage.
- bool PostGarbageCollectionProcessing(GarbageCollector collector,
- GCTracer* tracer);
-
- // Iterates over all strong handles.
- void IterateStrongRoots(ObjectVisitor* v);
-
- // Iterates over all handles.
- void IterateAllRoots(ObjectVisitor* v);
-
- // Iterates over all handles that have embedder-assigned class ID.
- void IterateAllRootsWithClassIds(ObjectVisitor* v);
-
- // Iterates over all handles in the new space that have embedder-assigned
- // class ID.
- void IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v);
-
- // Iterates over all weak roots in heap.
- void IterateWeakRoots(ObjectVisitor* v);
-
- // Find all weak handles satisfying the callback predicate, mark
- // them as pending.
- void IdentifyWeakHandles(WeakSlotCallback f);
-
- // NOTE: Three ...NewSpace... functions below are used during
- // scavenge collections and iterate over sets of handles that are
- // guaranteed to contain all handles holding new space objects (but
- // may also include old space objects).
-
- // Iterates over strong and dependent handles. See the node above.
- void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
-
- // Finds weak independent or partially independent handles satisfying
- // the callback predicate and marks them as pending. See the note above.
- void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
-
- // Iterates over weak independent or partially independent handles.
- // See the note above.
- void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
-
- // Iterate over objects in object groups that have at least one object
- // which requires visiting. The callback has to return true if objects
- // can be skipped and false otherwise.
- bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip);
-
- // Add an object group.
- // Should be only used in GC callback function before a collection.
- // All groups are destroyed after a garbage collection.
- void AddObjectGroup(Object*** handles,
- size_t length,
- v8::RetainedObjectInfo* info);
-
- // Add an implicit references' group.
- // Should be only used in GC callback function before a collection.
- // All groups are destroyed after a mark-compact collection.
- void AddImplicitReferences(HeapObject** parent,
- Object*** children,
- size_t length);
-
- // Returns the object groups.
- List<ObjectGroup*>* object_groups() { return &object_groups_; }
-
- // Returns the implicit references' groups.
- List<ImplicitRefGroup*>* implicit_ref_groups() {
- return &implicit_ref_groups_;
- }
-
- // Remove bags, this should only happen after GC.
- void RemoveObjectGroups();
- void RemoveImplicitRefGroups();
-
- // Tear down the global handle structure.
- void TearDown();
-
- Isolate* isolate() { return isolate_; }
-
-#ifdef DEBUG
- void PrintStats();
- void Print();
-#endif
-
- private:
- explicit GlobalHandles(Isolate* isolate);
-
- // Internal node structures.
- class Node;
- class NodeBlock;
- class NodeIterator;
-
- Isolate* isolate_;
-
- // Field always containing the number of handles to global objects.
- int number_of_global_handles_;
-
- // List of all allocated node blocks.
- NodeBlock* first_block_;
-
- // List of node blocks with used nodes.
- NodeBlock* first_used_block_;
-
- // Free list of nodes.
- Node* first_free_;
-
- // Contains all nodes holding new space objects. Note: when the list
- // is accessed, some of the objects may have been promoted already.
- List<Node*> new_space_nodes_;
-
- int post_gc_processing_count_;
-
- List<ObjectGroup*> object_groups_;
- List<ImplicitRefGroup*> implicit_ref_groups_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(GlobalHandles);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_GLOBAL_HANDLES_H_
diff --git a/src/3rdparty/v8/src/globals.h b/src/3rdparty/v8/src/globals.h
deleted file mode 100644
index 7205361..0000000
--- a/src/3rdparty/v8/src/globals.h
+++ /dev/null
@@ -1,409 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_GLOBALS_H_
-#define V8_GLOBALS_H_
-
-// Define V8_INFINITY
-#define V8_INFINITY INFINITY
-
-// GCC specific stuff
-#ifdef __GNUC__
-
-#define __GNUC_VERSION_FOR_INFTY__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
-
-// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
-// warning flag and certain versions of GCC due to a bug:
-// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
-// For now, we use the more involved template-based version from <limits>, but
-// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
-// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
-#if __GNUC_VERSION_FOR_INFTY__ >= 29600 && __GNUC_VERSION_FOR_INFTY__ < 40100
-#include <limits>
-#undef V8_INFINITY
-#define V8_INFINITY std::numeric_limits<double>::infinity()
-#endif
-#undef __GNUC_VERSION_FOR_INFTY__
-
-#endif // __GNUC__
-
-#ifdef _MSC_VER
-#undef V8_INFINITY
-#define V8_INFINITY HUGE_VAL
-#endif
-
-
-#include "../include/v8stdint.h"
-
-namespace v8 {
-namespace internal {
-
-// Processor architecture detection. For more info on what's defined, see:
-// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
-// http://www.agner.org/optimize/calling_conventions.pdf
-// or with gcc, run: "echo | gcc -E -dM -"
-#if defined(_M_X64) || defined(__x86_64__)
-#define V8_HOST_ARCH_X64 1
-#define V8_HOST_ARCH_64_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#elif defined(_M_IX86) || defined(__i386__)
-#define V8_HOST_ARCH_IA32 1
-#define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#elif defined(__ARMEL__) || defined(_M_ARM)
-#define V8_HOST_ARCH_ARM 1
-#define V8_HOST_ARCH_32_BIT 1
-// Some CPU-OS combinations allow unaligned access on ARM. We assume
-// that unaligned accesses are not allowed unless the build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#endif
-#elif defined(__MIPSEL__)
-#define V8_HOST_ARCH_MIPS 1
-#define V8_HOST_ARCH_32_BIT 1
-#else
-#error Host architecture was not detected as supported by v8
-#endif
-
-// Target architecture detection. This may be set externally. If not, detect
-// in the same way as the host architecture, that is, target the native
-// environment as presented by the compiler.
-#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \
- !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
-#if defined(_M_X64) || defined(__x86_64__)
-#define V8_TARGET_ARCH_X64 1
-#elif defined(_M_IX86) || defined(__i386__)
-#define V8_TARGET_ARCH_IA32 1
-#elif defined(__ARMEL__)
-#define V8_TARGET_ARCH_ARM 1
-#elif defined(__MIPSEL__)
-#define V8_TARGET_ARCH_MIPS 1
-#else
-#error Target architecture was not detected as supported by v8
-#endif
-#endif
-
-// Check for supported combinations of host and target architectures.
-#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
-#error Target architecture ia32 is only supported on ia32 host
-#endif
-#if defined(V8_TARGET_ARCH_X64) && !defined(V8_HOST_ARCH_X64)
-#error Target architecture x64 is only supported on x64 host
-#endif
-#if (defined(V8_TARGET_ARCH_ARM) && \
- !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM)))
-#error Target architecture arm is only supported on arm and ia32 host
-#endif
-#if (defined(V8_TARGET_ARCH_MIPS) && \
- !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS)))
-#error Target architecture mips is only supported on mips and ia32 host
-#endif
-
-// Determine whether we are running in a simulated environment.
-// Setting USE_SIMULATOR explicitly from the build script will force
-// the use of a simulated environment.
-#if !defined(USE_SIMULATOR)
-#if (defined(V8_TARGET_ARCH_ARM) && !defined(V8_HOST_ARCH_ARM) && !defined(_WIN32_WCE))
-#define USE_SIMULATOR 1
-#endif
-#if (defined(V8_TARGET_ARCH_MIPS) && !defined(V8_HOST_ARCH_MIPS))
-#define USE_SIMULATOR 1
-#endif
-#endif
-
-// Support for alternative bool type. This is only enabled if the code is
-// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
-// For instance, 'bool b = "false";' results in b == true! This is a hidden
-// source of bugs.
-// However, redefining the bool type does have some negative impact on some
-// platforms. It gives rise to compiler warnings (i.e. with
-// MSVC) in the API header files when mixing code that uses the standard
-// bool with code that uses the redefined version.
-// This does not actually belong in the platform code, but needs to be
-// defined here because the platform code uses bool, and platform.h is
-// include very early in the main include file.
-
-#ifdef USE_MYBOOL
-typedef unsigned int __my_bool__;
-#define bool __my_bool__ // use 'indirection' to avoid name clashes
-#endif
-
-typedef uint8_t byte;
-typedef byte* Address;
-
-// Define our own macros for writing 64-bit constants. This is less fragile
-// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
-// works on compilers that don't have it (like MSVC).
-#if V8_HOST_ARCH_64_BIT
-#if defined(_MSC_VER)
-#define V8_UINT64_C(x) (x ## UI64)
-#define V8_INT64_C(x) (x ## I64)
-#define V8_INTPTR_C(x) (x ## I64)
-#define V8_PTR_PREFIX "ll"
-#elif defined(__MINGW64__)
-#define V8_UINT64_C(x) (x ## ULL)
-#define V8_INT64_C(x) (x ## LL)
-#define V8_INTPTR_C(x) (x ## LL)
-#define V8_PTR_PREFIX "I64"
-#else
-#define V8_UINT64_C(x) (x ## UL)
-#define V8_INT64_C(x) (x ## L)
-#define V8_INTPTR_C(x) (x ## L)
-#define V8_PTR_PREFIX "l"
-#endif
-#else // V8_HOST_ARCH_64_BIT
-#define V8_INTPTR_C(x) (x)
-#define V8_PTR_PREFIX ""
-#endif // V8_HOST_ARCH_64_BIT
-
-// The following macro works on both 32 and 64-bit platforms.
-// Usage: instead of writing 0x1234567890123456
-// write V8_2PART_UINT64_C(0x12345678,90123456);
-#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
-
-#define V8PRIxPTR V8_PTR_PREFIX "x"
-#define V8PRIdPTR V8_PTR_PREFIX "d"
-#define V8PRIuPTR V8_PTR_PREFIX "u"
-
-// Fix for Mac OS X defining uintptr_t as "unsigned long":
-#if defined(__APPLE__) && defined(__MACH__)
-#undef V8PRIxPTR
-#define V8PRIxPTR "lx"
-#endif
-
-#if (defined(__APPLE__) && defined(__MACH__)) || \
- defined(__FreeBSD__) || defined(__OpenBSD__)
-#define USING_BSD_ABI
-#endif
-
-// -----------------------------------------------------------------------------
-// Constants
-
-const int KB = 1024;
-const int MB = KB * KB;
-const int GB = KB * KB * KB;
-const int kMaxInt = 0x7FFFFFFF;
-const int kMinInt = -kMaxInt - 1;
-
-const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
-
-const int kCharSize = sizeof(char); // NOLINT
-const int kShortSize = sizeof(short); // NOLINT
-const int kIntSize = sizeof(int); // NOLINT
-const int kDoubleSize = sizeof(double); // NOLINT
-const int kIntptrSize = sizeof(intptr_t); // NOLINT
-const int kPointerSize = sizeof(void*); // NOLINT
-
-const int kDoubleSizeLog2 = 3;
-
-// Size of the state of a the random number generator.
-const int kRandomStateSize = 2 * kIntSize;
-
-#if V8_HOST_ARCH_64_BIT
-const int kPointerSizeLog2 = 3;
-const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
-const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
-#else
-const int kPointerSizeLog2 = 2;
-const intptr_t kIntptrSignBit = 0x80000000;
-const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
-#endif
-
-const int kBitsPerByte = 8;
-const int kBitsPerByteLog2 = 3;
-const int kBitsPerPointer = kPointerSize * kBitsPerByte;
-const int kBitsPerInt = kIntSize * kBitsPerByte;
-
-// IEEE 754 single precision floating point number bit layout.
-const uint32_t kBinary32SignMask = 0x80000000u;
-const uint32_t kBinary32ExponentMask = 0x7f800000u;
-const uint32_t kBinary32MantissaMask = 0x007fffffu;
-const int kBinary32ExponentBias = 127;
-const int kBinary32MaxExponent = 0xFE;
-const int kBinary32MinExponent = 0x01;
-const int kBinary32MantissaBits = 23;
-const int kBinary32ExponentShift = 23;
-
-// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
-// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
-
-// Latin1/UTF-16 constants
-// Code-point values in Unicode 4.0 are 21 bits wide.
-// Code units in UTF-16 are 16 bits wide.
-typedef uint16_t uc16;
-typedef int32_t uc32;
-const int kOneByteSize = kCharSize;
-const int kUC16Size = sizeof(uc16); // NOLINT
-
-
-// The expression OFFSET_OF(type, field) computes the byte-offset
-// of the specified field relative to the containing type. This
-// corresponds to 'offsetof' (in stddef.h), except that it doesn't
-// use 0 or NULL, which causes a problem with the compiler warnings
-// we have enabled (which is also why 'offsetof' doesn't seem to work).
-// Here we simply use the non-zero value 4, which seems to work.
-#define OFFSET_OF(type, field) \
- (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
-
-
-// The expression ARRAY_SIZE(a) is a compile-time constant of type
-// size_t which represents the number of elements of the given
-// array. You should only use ARRAY_SIZE on statically allocated
-// arrays.
-#define ARRAY_SIZE(a) \
- ((sizeof(a) / sizeof(*(a))) / \
- static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
-
-
-// The USE(x) template is used to silence C++ compiler warnings
-// issued for (yet) unused variables (typically parameters).
-template <typename T>
-inline void USE(T) { }
-
-
-// FUNCTION_ADDR(f) gets the address of a C function f.
-#define FUNCTION_ADDR(f) \
- (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
-
-
-// FUNCTION_CAST<F>(addr) casts an address into a function
-// of type F. Used to invoke generated code from within C.
-template <typename F>
-F FUNCTION_CAST(Address addr) {
- return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
-}
-
-
-// A macro to disallow the evil copy constructor and operator= functions
-// This should be used in the private: declarations for a class
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
-
-
-// A macro to disallow all the implicit constructors, namely the
-// default constructor, copy constructor and operator= functions.
-//
-// This should be used in the private: declarations for a class
-// that wants to prevent anyone from instantiating it. This is
-// especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName(); \
- DISALLOW_COPY_AND_ASSIGN(TypeName)
-
-
-// Define used for helping GCC to make better inlining. Don't bother for debug
-// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
-// errors in debug build.
-#if defined(__GNUC__) && !defined(DEBUG)
-#if (__GNUC__ >= 4)
-#define INLINE(header) inline header __attribute__((always_inline))
-#define NO_INLINE(header) header __attribute__((noinline))
-#else
-#define INLINE(header) inline __attribute__((always_inline)) header
-#define NO_INLINE(header) __attribute__((noinline)) header
-#endif
-#elif defined(_MSC_VER) && !defined(DEBUG)
-#define INLINE(header) __forceinline header
-#define NO_INLINE(header) header
-#else
-#define INLINE(header) inline header
-#define NO_INLINE(header) header
-#endif
-
-
-#if defined(__GNUC__) && __GNUC__ >= 4
-#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
-#else
-#define MUST_USE_RESULT
-#endif
-
-
-// Define DISABLE_ASAN macros.
-#if defined(__has_feature)
-#if __has_feature(address_sanitizer)
-#define DISABLE_ASAN __attribute__((no_address_safety_analysis))
-#endif
-#endif
-
-
-#ifndef DISABLE_ASAN
-#define DISABLE_ASAN
-#endif
-
-
-// -----------------------------------------------------------------------------
-// Forward declarations for frequently used classes
-// (sorted alphabetically)
-
-class FreeStoreAllocationPolicy;
-template <typename T, class P = FreeStoreAllocationPolicy> class List;
-
-// -----------------------------------------------------------------------------
-// Declarations for use in both the preparser and the rest of V8.
-
-// The different language modes that V8 implements. ES5 defines two language
-// modes: an unrestricted mode respectively a strict mode which are indicated by
-// CLASSIC_MODE respectively STRICT_MODE in the enum. The harmony spec drafts
-// for the next ES standard specify a new third mode which is called 'extended
-// mode'. The extended mode is only available if the harmony flag is set. It is
-// based on the 'strict mode' and adds new functionality to it. This means that
-// most of the semantics of these two modes coincide.
-//
-// In the current draft the term 'base code' is used to refer to code that is
-// neither in strict nor extended mode. However, the more distinguishing term
-// 'classic mode' is used in V8 instead to avoid mix-ups.
-
-enum LanguageMode {
- CLASSIC_MODE,
- STRICT_MODE,
- EXTENDED_MODE
-};
-
-
-// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-//
-// This flag is used in the backend to represent the language mode. So far
-// there is no semantic difference between the strict and the extended mode in
-// the backend, so both modes are represented by the kStrictMode value.
-enum StrictModeFlag {
- kNonStrictMode,
- kStrictMode
-};
-
-// The QML Compilation Mode
-enum QmlModeFlag {
- kNonQmlMode,
- kQmlMode
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_GLOBALS_H_
diff --git a/src/3rdparty/v8/src/handles-inl.h b/src/3rdparty/v8/src/handles-inl.h
deleted file mode 100644
index c1daae2..0000000
--- a/src/3rdparty/v8/src/handles-inl.h
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#ifndef V8_HANDLES_INL_H_
-#define V8_HANDLES_INL_H_
-
-#include "api.h"
-#include "apiutils.h"
-#include "handles.h"
-#include "isolate.h"
-
-namespace v8 {
-namespace internal {
-
-template<typename T>
-Handle<T>::Handle(T* obj) {
- ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(obj->GetIsolate(), obj);
-}
-
-
-template<typename T>
-Handle<T>::Handle(T* obj, Isolate* isolate) {
- ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(isolate, obj);
-}
-
-
-template <typename T>
-inline T* Handle<T>::operator*() const {
- ASSERT(location_ != NULL);
- ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
- SLOW_ASSERT(ISOLATE->allow_handle_deref());
- return *BitCast<T**>(location_);
-}
-
-template <typename T>
-inline T** Handle<T>::location() const {
- ASSERT(location_ == NULL ||
- reinterpret_cast<Address>(*location_) != kZapValue);
- SLOW_ASSERT(ISOLATE->allow_handle_deref());
- return location_;
-}
-
-
-HandleScope::HandleScope(Isolate* isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate_ = isolate;
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- current->level++;
-}
-
-
-HandleScope::~HandleScope() {
- CloseScope();
-}
-
-void HandleScope::CloseScope() {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- current->next = prev_next_;
- current->level--;
- if (current->limit != prev_limit_) {
- current->limit = prev_limit_;
- DeleteExtensions(isolate_);
- }
-#ifdef DEBUG
- ZapRange(prev_next_, prev_limit_);
-#endif
-}
-
-
-template <typename T>
-Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
- T* value = *handle_value;
- // Throw away all handles in the current scope.
- CloseScope();
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- // Allocate one handle in the parent scope.
- ASSERT(current->level > 0);
- Handle<T> result(CreateHandle<T>(isolate_, value));
- // Reinitialize the current scope (so that it's ready
- // to be used or closed again).
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- current->level++;
- return result;
-}
-
-
-template <typename T>
-T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
-
- internal::Object** cur = current->next;
- if (cur == current->limit) cur = Extend(isolate);
- // Update the current next field, set the value in the created
- // handle, and return the result.
- ASSERT(cur < current->limit);
- current->next = cur + 1;
-
- T** result = reinterpret_cast<T**>(cur);
- *result = value;
- return result;
-}
-
-
-#ifdef DEBUG
-inline NoHandleAllocation::NoHandleAllocation(Isolate* isolate)
- : isolate_(isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
-
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- // Shrink the current handle scope to make it impossible to do
- // handle allocations without an explicit handle scope.
- current->limit = current->next;
-
- level_ = current->level;
- current->level = 0;
- }
-}
-
-
-inline NoHandleAllocation::~NoHandleAllocation() {
- if (active_) {
- // Restore state in current handle scope to re-enable handle
- // allocations.
- v8::ImplementationUtilities::HandleScopeData* data =
- isolate_->handle_scope_data();
- ASSERT_EQ(0, data->level);
- data->level = level_;
- }
-}
-
-
-NoHandleDereference::NoHandleDereference(Isolate* isolate)
- : isolate_(isolate) {
- // The guard is set on a per-isolate basis, so it affects all threads.
- // That's why we can only use it when running without parallel recompilation.
- if (FLAG_parallel_recompilation) return;
- old_state_ = isolate->allow_handle_deref();
- isolate_->set_allow_handle_deref(false);
-}
-
-
-NoHandleDereference::~NoHandleDereference() {
- if (FLAG_parallel_recompilation) return;
- isolate_->set_allow_handle_deref(old_state_);
-}
-
-
-AllowHandleDereference::AllowHandleDereference(Isolate* isolate)
- : isolate_(isolate) {
- // The guard is set on a per-isolate basis, so it affects all threads.
- // That's why we can only use it when running without parallel recompilation.
- if (FLAG_parallel_recompilation) return;
- old_state_ = isolate->allow_handle_deref();
- isolate_->set_allow_handle_deref(true);
-}
-
-
-AllowHandleDereference::~AllowHandleDereference() {
- if (FLAG_parallel_recompilation) return;
- isolate_->set_allow_handle_deref(old_state_);
-}
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_HANDLES_INL_H_
diff --git a/src/3rdparty/v8/src/handles.cc b/src/3rdparty/v8/src/handles.cc
deleted file mode 100644
index 7496cc1..0000000
--- a/src/3rdparty/v8/src/handles.cc
+++ /dev/null
@@ -1,935 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "natives.h"
-#include "runtime.h"
-#include "string-search.h"
-#include "stub-cache.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-int HandleScope::NumberOfHandles(Isolate* isolate) {
- HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- int n = impl->blocks()->length();
- if (n == 0) return 0;
- return ((n - 1) * kHandleBlockSize) + static_cast<int>(
- (isolate->handle_scope_data()->next - impl->blocks()->last()));
-}
-
-
-Object** HandleScope::Extend(Isolate* isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
-
- Object** result = current->next;
-
- ASSERT(result == current->limit);
- // Make sure there's at least one scope on the stack and that the
- // top of the scope stack isn't a barrier.
- if (current->level == 0) {
- Utils::ReportApiFailure("v8::HandleScope::CreateHandle()",
- "Cannot create a handle without a HandleScope");
- return NULL;
- }
- HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- // If there's more room in the last block, we use that. This is used
- // for fast creation of scopes after scope barriers.
- if (!impl->blocks()->is_empty()) {
- Object** limit = &impl->blocks()->last()[kHandleBlockSize];
- if (current->limit != limit) {
- current->limit = limit;
- ASSERT(limit - current->next < kHandleBlockSize);
- }
- }
-
- // If we still haven't found a slot for the handle, we extend the
- // current handle scope by allocating a new handle block.
- if (result == current->limit) {
- // If there's a spare block, use it for growing the current scope.
- result = impl->GetSpareOrNewBlock();
- // Add the extension to the global list of blocks, but count the
- // extension as part of the current scope.
- impl->blocks()->Add(result);
- current->limit = &result[kHandleBlockSize];
- }
-
- return result;
-}
-
-
-void HandleScope::DeleteExtensions(Isolate* isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
-}
-
-
-void HandleScope::ZapRange(Object** start, Object** end) {
- ASSERT(end - start <= kHandleBlockSize);
- for (Object** p = start; p != end; p++) {
- *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
- }
-}
-
-
-Address HandleScope::current_level_address(Isolate* isolate) {
- return reinterpret_cast<Address>(&isolate->handle_scope_data()->level);
-}
-
-
-Address HandleScope::current_next_address(Isolate* isolate) {
- return reinterpret_cast<Address>(&isolate->handle_scope_data()->next);
-}
-
-
-Address HandleScope::current_limit_address(Isolate* isolate) {
- return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
-}
-
-
-Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
- Handle<JSArray> array) {
- CALL_HEAP_FUNCTION(content->GetIsolate(),
- content->AddKeysFromJSArray(*array), FixedArray);
-}
-
-
-Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
- Handle<FixedArray> second) {
- CALL_HEAP_FUNCTION(first->GetIsolate(),
- first->UnionOfKeys(*second), FixedArray);
-}
-
-
-Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
- Handle<JSFunction> constructor,
- Handle<JSGlobalProxy> global) {
- CALL_HEAP_FUNCTION(
- constructor->GetIsolate(),
- constructor->GetHeap()->ReinitializeJSGlobalProxy(*constructor, *global),
- JSGlobalProxy);
-}
-
-
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
- // If objects constructed from this function exist then changing
- // 'estimated_nof_properties' is dangerous since the previous value might
- // have been compiled into the fast construct stub. More over, the inobject
- // slack tracking logic might have adjusted the previous value, so even
- // passing the same value is risky.
- if (func->shared()->live_objects_may_exist()) return;
-
- func->shared()->set_expected_nof_properties(nof);
- if (func->has_initial_map()) {
- Handle<Map> new_initial_map =
- func->GetIsolate()->factory()->CopyMap(
- Handle<Map>(func->initial_map()));
- new_initial_map->set_unused_property_fields(nof);
- func->set_initial_map(*new_initial_map);
- }
-}
-
-
-void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
- CALL_HEAP_FUNCTION_VOID(func->GetIsolate(),
- func->SetPrototype(*value));
-}
-
-
-static int ExpectedNofPropertiesFromEstimate(int estimate) {
- // If no properties are added in the constructor, they are more likely
- // to be added later.
- if (estimate == 0) estimate = 2;
-
- // We do not shrink objects that go into a snapshot (yet), so we adjust
- // the estimate conservatively.
- if (Serializer::enabled()) return estimate + 2;
-
- // Inobject slack tracking will reclaim redundant inobject space later,
- // so we can afford to adjust the estimate generously.
- if (FLAG_clever_optimizations) {
- return estimate + 8;
- } else {
- return estimate + 3;
- }
-}
-
-
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate) {
- // See the comment in SetExpectedNofProperties.
- if (shared->live_objects_may_exist()) return;
-
- shared->set_expected_nof_properties(
- ExpectedNofPropertiesFromEstimate(estimate));
-}
-
-
-void FlattenString(Handle<String> string) {
- CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
-}
-
-
-Handle<String> FlattenGetString(Handle<String> string) {
- CALL_HEAP_FUNCTION(string->GetIsolate(), string->TryFlatten(), String);
-}
-
-
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype) {
- ASSERT(function->should_have_prototype());
- CALL_HEAP_FUNCTION(function->GetIsolate(),
- Accessors::FunctionSetPrototype(*function,
- *prototype,
- NULL),
- Object);
-}
-
-
-Handle<Object> SetProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(
- isolate,
- Runtime::SetObjectProperty(
- isolate, object, key, value, attributes, strict_mode),
- Object);
-}
-
-
-Handle<Object> ForceSetProperty(Handle<JSObject> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(
- isolate,
- Runtime::ForceSetObjectProperty(
- isolate, object, key, value, attributes),
- Object);
-}
-
-
-Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
- Handle<Object> key) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- Runtime::ForceDeleteObjectProperty(isolate, object, key),
- Object);
-}
-
-
-Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetPropertyWithInterceptor(*key,
- *value,
- attributes,
- strict_mode),
- Object);
-}
-
-
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
- const char* name) {
- Isolate* isolate = obj->GetIsolate();
- Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
- CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object);
-}
-
-
-Handle<Object> GetProperty(Isolate* isolate,
- Handle<Object> obj,
- Handle<Object> key) {
- CALL_HEAP_FUNCTION(isolate,
- Runtime::GetObjectProperty(isolate, obj, key), Object);
-}
-
-
-Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name,
- PropertyAttributes* attributes) {
- Isolate* isolate = receiver->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- holder->GetPropertyWithInterceptor(*receiver,
- *name,
- attributes),
- Object);
-}
-
-
-Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
- const bool skip_hidden_prototypes = false;
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->SetPrototype(*value, skip_hidden_prototypes), Object);
-}
-
-
-Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
- uint32_t index) {
- CALL_HEAP_FUNCTION(
- isolate,
- isolate->heap()->LookupSingleCharacterStringFromCode(index), Object);
-}
-
-
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(str->GetIsolate(),
- str->SubString(start, end, pretenure), String);
-}
-
-
-Handle<JSObject> Copy(Handle<JSObject> obj) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- isolate->heap()->CopyJSObject(*obj), JSObject);
-}
-
-
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DefineAccessor(*info), Object);
-}
-
-
-// Wrappers for scripts are kept alive and cached in weak global
-// handles referred from foreign objects held by the scripts as long as
-// they are used. When they are not used anymore, the garbage
-// collector will call the weak callback on the global handle
-// associated with the wrapper and get rid of both the wrapper and the
-// handle.
-static void ClearWrapperCache(v8::Isolate* v8_isolate,
- Persistent<v8::Value> handle,
- void*) {
- Handle<Object> cache = Utils::OpenHandle(*handle);
- JSValue* wrapper = JSValue::cast(*cache);
- Foreign* foreign = Script::cast(wrapper->value())->wrapper();
- ASSERT(foreign->foreign_address() ==
- reinterpret_cast<Address>(cache.location()));
- foreign->set_foreign_address(0);
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- isolate->global_handles()->Destroy(cache.location());
- isolate->counters()->script_wrappers()->Decrement();
-}
-
-
-Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
- if (script->wrapper()->foreign_address() != NULL) {
- // Return the script wrapper directly from the cache.
- return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
- }
- Isolate* isolate = script->GetIsolate();
- // Construct a new script wrapper.
- isolate->counters()->script_wrappers()->Increment();
- Handle<JSFunction> constructor = isolate->script_function();
- Handle<JSValue> result =
- Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
-
- // The allocation might have triggered a GC, which could have called this
- // function recursively, and a wrapper has already been created and cached.
- // In that case, simply return the cached wrapper.
- if (script->wrapper()->foreign_address() != NULL) {
- return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
- }
-
- result->set_value(*script);
-
- // Create a new weak global handle and use it to cache the wrapper
- // for future use. The cache will automatically be cleared by the
- // garbage collector when it is not used anymore.
- Handle<Object> handle = isolate->global_handles()->Create(*result);
- isolate->global_handles()->MakeWeak(handle.location(),
- NULL,
- NULL,
- &ClearWrapperCache);
- script->wrapper()->set_foreign_address(
- reinterpret_cast<Address>(handle.location()));
- return result;
-}
-
-
-// Init line_ends array with code positions of line ends inside script
-// source.
-void InitScriptLineEnds(Handle<Script> script) {
- if (!script->line_ends()->IsUndefined()) return;
-
- Isolate* isolate = script->GetIsolate();
-
- if (!script->source()->IsString()) {
- ASSERT(script->source()->IsUndefined());
- Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
- script->set_line_ends(*empty);
- ASSERT(script->line_ends()->IsFixedArray());
- return;
- }
-
- Handle<String> src(String::cast(script->source()), isolate);
-
- Handle<FixedArray> array = CalculateLineEnds(src, true);
-
- if (*array != isolate->heap()->empty_fixed_array()) {
- array->set_map(isolate->heap()->fixed_cow_array_map());
- }
-
- script->set_line_ends(*array);
- ASSERT(script->line_ends()->IsFixedArray());
-}
-
-
-template <typename SourceChar>
-static void CalculateLineEnds(Isolate* isolate,
- List<int>* line_ends,
- Vector<const SourceChar> src,
- bool with_last_line) {
- const int src_len = src.length();
- StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n"));
-
- // Find and record line ends.
- int position = 0;
- while (position != -1 && position < src_len) {
- position = search.Search(src, position);
- if (position != -1) {
- line_ends->Add(position);
- position++;
- } else if (with_last_line) {
- // Even if the last line misses a line end, it is counted.
- line_ends->Add(src_len);
- return;
- }
- }
-}
-
-
-Handle<FixedArray> CalculateLineEnds(Handle<String> src,
- bool with_last_line) {
- src = FlattenGetString(src);
- // Rough estimate of line count based on a roughly estimated average
- // length of (unpacked) code.
- int line_count_estimate = src->length() >> 4;
- List<int> line_ends(line_count_estimate);
- Isolate* isolate = src->GetIsolate();
- {
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid.
- // Dispatch on type of strings.
- String::FlatContent content = src->GetFlatContent();
- ASSERT(content.IsFlat());
- if (content.IsAscii()) {
- CalculateLineEnds(isolate,
- &line_ends,
- content.ToOneByteVector(),
- with_last_line);
- } else {
- CalculateLineEnds(isolate,
- &line_ends,
- content.ToUC16Vector(),
- with_last_line);
- }
- }
- int line_count = line_ends.length();
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count);
- for (int i = 0; i < line_count; i++) {
- array->set(i, Smi::FromInt(line_ends[i]));
- }
- return array;
-}
-
-
-// Convert code position into line number.
-int GetScriptLineNumber(Handle<Script> script, int code_pos) {
- InitScriptLineEnds(script);
- AssertNoAllocation no_allocation;
- FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
- const int line_ends_len = line_ends_array->length();
-
- if (!line_ends_len) return -1;
-
- if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) {
- return script->line_offset()->value();
- }
-
- int left = 0;
- int right = line_ends_len;
- while (int half = (right - left) / 2) {
- if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
- right -= half;
- } else {
- left += half;
- }
- }
- return right + script->line_offset()->value();
-}
-
-// Convert code position into column number.
-int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
- int line_number = GetScriptLineNumber(script, code_pos);
- if (line_number == -1) return -1;
-
- AssertNoAllocation no_allocation;
- FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
- line_number = line_number - script->line_offset()->value();
- if (line_number == 0) return code_pos + script->column_offset()->value();
- int prev_line_end_pos =
- Smi::cast(line_ends_array->get(line_number - 1))->value();
- return code_pos - (prev_line_end_pos + 1);
-}
-
-int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
- AssertNoAllocation no_allocation;
- if (!script->line_ends()->IsUndefined()) {
- return GetScriptLineNumber(script, code_pos);
- }
- // Slow mode: we do not have line_ends. We have to iterate through source.
- if (!script->source()->IsString()) {
- return -1;
- }
- String* source = String::cast(script->source());
- int line = 0;
- int len = source->length();
- for (int pos = 0; pos < len; pos++) {
- if (pos == code_pos) {
- break;
- }
- if (source->Get(pos) == '\n') {
- line++;
- }
- }
- return line;
-}
-
-
-void CustomArguments::IterateInstance(ObjectVisitor* v) {
- v->VisitPointers(values_, values_ + ARRAY_SIZE(values_));
-}
-
-
-// Compute the property keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
- Handle<JSObject> object) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- CustomArguments args(isolate, interceptor->data(), *receiver, *object);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Array> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::NamedPropertyEnumerator enum_fun =
- v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = enum_fun(info);
- }
- }
-#if ENABLE_EXTRA_CHECKS
- CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
-#endif
- return result;
-}
-
-
-// Compute the element keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
- Handle<JSObject> object) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- CustomArguments args(isolate, interceptor->data(), *receiver, *object);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Array> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::IndexedPropertyEnumerator enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = enum_fun(info);
-#if ENABLE_EXTRA_CHECKS
- CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
-#endif
- }
- }
- return result;
-}
-
-
-Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) {
- Isolate* isolate = script->GetIsolate();
- Handle<String> name_or_source_url_key =
- isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("nameOrSourceURL"));
- Handle<JSValue> script_wrapper = GetScriptWrapper(script);
- Handle<Object> property = GetProperty(isolate,
- script_wrapper,
- name_or_source_url_key);
- ASSERT(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
- NULL, &caught_exception);
- if (caught_exception) {
- result = isolate->factory()->undefined_value();
- }
- return result;
-}
-
-
-static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
- int len = array->length();
- for (int i = 0; i < len; i++) {
- Object* e = array->get(i);
- if (!(e->IsString() || e->IsNumber())) return false;
- }
- return true;
-}
-
-
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
- KeyCollectionType type,
- bool* threw) {
- USE(ContainsOnlyValidKeys);
- Isolate* isolate = object->GetIsolate();
- Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
- Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
- isolate->context()->native_context()->arguments_boilerplate(),
- isolate);
- Handle<JSFunction> arguments_function = Handle<JSFunction>(
- JSFunction::cast(arguments_boilerplate->map()->constructor()),
- isolate);
-
- // Only collect keys if access is permitted.
- for (Handle<Object> p = object;
- *p != isolate->heap()->null_value();
- p = Handle<Object>(p->GetPrototype(isolate), isolate)) {
- if (p->IsJSProxy()) {
- Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
- Handle<Object> args[] = { proxy };
- Handle<Object> names = Execution::Call(
- isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
- if (*threw) return content;
- content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
- break;
- }
-
- Handle<JSObject> current(JSObject::cast(*p), isolate);
-
- // Check access rights if required.
- if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*current,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
- break;
- }
-
- // Compute the element keys.
- Handle<FixedArray> element_keys =
- isolate->factory()->NewFixedArray(current->NumberOfEnumElements());
- current->GetEnumElementKeys(*element_keys);
- content = UnionOfKeys(content, element_keys);
- ASSERT(ContainsOnlyValidKeys(content));
-
- // Add the element keys from the interceptor.
- if (current->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForIndexedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- ASSERT(ContainsOnlyValidKeys(content));
- }
-
- // We can cache the computed property keys if access checks are
- // not needed and no interceptors are involved.
- //
- // We do not use the cache if the object has elements and
- // therefore it does not make sense to cache the property names
- // for arguments objects. Arguments objects will always have
- // elements.
- // Wrapped strings have elements, but don't have an elements
- // array or dictionary. So the fast inline test for whether to
- // use the cache says yes, so we should not create a cache.
- bool cache_enum_keys =
- ((current->map()->constructor() != *arguments_function) &&
- !current->IsJSValue() &&
- !current->IsAccessCheckNeeded() &&
- !current->HasNamedInterceptor() &&
- !current->HasIndexedInterceptor());
- // Compute the property keys and cache them if possible.
- content =
- UnionOfKeys(content, GetEnumPropertyKeys(current, cache_enum_keys));
- ASSERT(ContainsOnlyValidKeys(content));
-
- // Add the property keys from the interceptor.
- if (current->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForNamedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- ASSERT(ContainsOnlyValidKeys(content));
- }
-
- // If we only want local properties we bail out after the first
- // iteration.
- if (type == LOCAL_ONLY)
- break;
- }
- return content;
-}
-
-
-Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
- Isolate* isolate = object->GetIsolate();
- isolate->counters()->for_in()->Increment();
- Handle<FixedArray> elements =
- GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, threw);
- return isolate->factory()->NewJSArrayWithElements(elements);
-}
-
-
-Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length) {
- ASSERT(array->length() >= length);
- if (array->length() == length) return array;
-
- Handle<FixedArray> new_array =
- array->GetIsolate()->factory()->NewFixedArray(length);
- for (int i = 0; i < length; ++i) new_array->set(i, array->get(i));
- return new_array;
-}
-
-
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_result) {
- Isolate* isolate = object->GetIsolate();
- if (object->HasFastProperties()) {
- if (object->map()->instance_descriptors()->HasEnumCache()) {
- int own_property_count = object->map()->EnumLength();
- // If we have an enum cache, but the enum length of the given map is set
- // to kInvalidEnumCache, this means that the map itself has never used the
- // present enum cache. The first step to using the cache is to set the
- // enum length of the map by counting the number of own descriptors that
- // are not DONT_ENUM.
- if (own_property_count == Map::kInvalidEnumCache) {
- own_property_count = object->map()->NumberOfDescribedProperties(
- OWN_DESCRIPTORS, DONT_ENUM);
-
- if (cache_result) object->map()->SetEnumLength(own_property_count);
- }
-
- DescriptorArray* desc = object->map()->instance_descriptors();
- Handle<FixedArray> keys(desc->GetEnumCache(), isolate);
-
- // In case the number of properties required in the enum are actually
- // present, we can reuse the enum cache. Otherwise, this means that the
- // enum cache was generated for a previous (smaller) version of the
- // Descriptor Array. In that case we regenerate the enum cache.
- if (own_property_count <= keys->length()) {
- isolate->counters()->enum_cache_hits()->Increment();
- return ReduceFixedArrayTo(keys, own_property_count);
- }
- }
-
- Handle<Map> map(object->map());
-
- if (map->instance_descriptors()->IsEmpty()) {
- isolate->counters()->enum_cache_hits()->Increment();
- if (cache_result) map->SetEnumLength(0);
- return isolate->factory()->empty_fixed_array();
- }
-
- isolate->counters()->enum_cache_misses()->Increment();
- int num_enum = map->NumberOfDescribedProperties(ALL_DESCRIPTORS, DONT_ENUM);
-
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
- Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum);
-
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
-
- int real_size = map->NumberOfOwnDescriptors();
- int enum_size = 0;
- int index = 0;
-
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- if (!details.IsDontEnum()) {
- if (i < real_size) ++enum_size;
- storage->set(index, descs->GetKey(i));
- if (!indices.is_null()) {
- if (details.type() != FIELD) {
- indices = Handle<FixedArray>();
- } else {
- int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
- if (field_index >= map->inobject_properties()) {
- field_index = -(field_index - map->inobject_properties() + 1);
- }
- indices->set(index, Smi::FromInt(field_index));
- }
- }
- index++;
- }
- }
- ASSERT(index == storage->length());
-
- Handle<FixedArray> bridge_storage =
- isolate->factory()->NewFixedArray(
- DescriptorArray::kEnumCacheBridgeLength);
- DescriptorArray* desc = object->map()->instance_descriptors();
- desc->SetEnumCache(*bridge_storage,
- *storage,
- indices.is_null() ? Object::cast(Smi::FromInt(0))
- : Object::cast(*indices));
- if (cache_result) {
- object->map()->SetEnumLength(enum_size);
- }
-
- return ReduceFixedArrayTo(storage, enum_size);
- } else {
- Handle<StringDictionary> dictionary(object->property_dictionary());
-
- int length = dictionary->NumberOfElements();
- if (length == 0) {
- return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
- }
-
- // The enumeration array is generated by allocating an array big enough to
- // hold all properties that have been seen, whether they are are deleted or
- // not. Subsequently all visible properties are added to the array. If some
- // properties were not visible, the array is trimmed so it only contains
- // visible properties. This improves over adding elements and sorting by
- // index by having linear complexity rather than n*log(n).
-
- // By comparing the monotonous NextEnumerationIndex to the NumberOfElements,
- // we can predict the number of holes in the final array. If there will be
- // more than 50% holes, regenerate the enumeration indices to reduce the
- // number of holes to a minimum. This avoids allocating a large array if
- // many properties were added but subsequently deleted.
- int next_enumeration = dictionary->NextEnumerationIndex();
- if (!object->IsGlobalObject() && next_enumeration > (length * 3) / 2) {
- StringDictionary::DoGenerateNewEnumerationIndices(dictionary);
- next_enumeration = dictionary->NextEnumerationIndex();
- }
-
- Handle<FixedArray> storage =
- isolate->factory()->NewFixedArray(next_enumeration);
-
- storage = Handle<FixedArray>(dictionary->CopyEnumKeysTo(*storage));
- ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_ENUM));
- return storage;
- }
-}
-
-
-Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
- Handle<Object> key) {
- CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Add(*key),
- ObjectHashSet);
-}
-
-
-Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
- Handle<Object> key) {
- CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Remove(*key),
- ObjectHashSet);
-}
-
-
-Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<Object> key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Put(*key, *value),
- ObjectHashTable);
-}
-
-
-DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
- : impl_(isolate->handle_scope_implementer()) {
- impl_->BeginDeferredScope();
- v8::ImplementationUtilities::HandleScopeData* data =
- impl_->isolate()->handle_scope_data();
- Object** new_next = impl_->GetSpareOrNewBlock();
- Object** new_limit = &new_next[kHandleBlockSize];
- ASSERT(data->limit == &impl_->blocks()->last()[kHandleBlockSize]);
- impl_->blocks()->Add(new_next);
-
-#ifdef DEBUG
- prev_level_ = data->level;
-#endif
- data->level++;
- prev_limit_ = data->limit;
- prev_next_ = data->next;
- data->next = new_next;
- data->limit = new_limit;
-}
-
-
-DeferredHandleScope::~DeferredHandleScope() {
- impl_->isolate()->handle_scope_data()->level--;
- ASSERT(handles_detached_);
- ASSERT(impl_->isolate()->handle_scope_data()->level == prev_level_);
-}
-
-
-DeferredHandles* DeferredHandleScope::Detach() {
- DeferredHandles* deferred = impl_->Detach(prev_limit_);
- v8::ImplementationUtilities::HandleScopeData* data =
- impl_->isolate()->handle_scope_data();
- data->next = prev_next_;
- data->limit = prev_limit_;
-#ifdef DEBUG
- handles_detached_ = true;
-#endif
- return deferred;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/handles.h b/src/3rdparty/v8/src/handles.h
deleted file mode 100644
index b0b271c..0000000
--- a/src/3rdparty/v8/src/handles.h
+++ /dev/null
@@ -1,375 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HANDLES_H_
-#define V8_HANDLES_H_
-
-#include "allocation.h"
-#include "apiutils.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// A Handle provides a reference to an object that survives relocation by
-// the garbage collector.
-// Handles are only valid within a HandleScope.
-// When a handle is created for an object a cell is allocated in the heap.
-
-template<typename T>
-class Handle {
- public:
- INLINE(explicit Handle(T** location)) { location_ = location; }
- INLINE(explicit Handle(T* obj));
- INLINE(Handle(T* obj, Isolate* isolate));
-
- INLINE(Handle()) : location_(NULL) {}
-
- // Constructor for handling automatic up casting.
- // Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
- template <class S> Handle(Handle<S> handle) {
-#ifdef DEBUG
- T* a = NULL;
- S* b = NULL;
- a = b; // Fake assignment to enforce type checks.
- USE(a);
-#endif
- location_ = reinterpret_cast<T**>(handle.location_);
- }
-
- INLINE(T* operator ->() const) { return operator*(); }
-
- // Check if this handle refers to the exact same object as the other handle.
- bool is_identical_to(const Handle<T> other) const {
- return *location_ == *other.location_;
- }
-
- // Provides the C++ dereference operator.
- INLINE(T* operator*() const);
-
- // Returns the address to where the raw pointer is stored.
- INLINE(T** location() const);
-
- template <class S> static Handle<T> cast(Handle<S> that) {
- T::cast(*that);
- return Handle<T>(reinterpret_cast<T**>(that.location()));
- }
-
- static Handle<T> null() { return Handle<T>(); }
- bool is_null() const { return location_ == NULL; }
-
- // Closes the given scope, but lets this handle escape. See
- // implementation in api.h.
- inline Handle<T> EscapeFrom(v8::HandleScope* scope);
-
- private:
- T** location_;
-
- // Handles of different classes are allowed to access each other's location_.
- template<class S> friend class Handle;
-};
-
-
-// Convenience wrapper.
-template<class T>
-inline Handle<T> handle(T* t, Isolate* isolate) {
- return Handle<T>(t, isolate);
-}
-
-
-class DeferredHandles;
-class HandleScopeImplementer;
-
-
-// A stack-allocated class that governs a number of local handles.
-// After a handle scope has been created, all local handles will be
-// allocated within that handle scope until either the handle scope is
-// deleted or another handle scope is created. If there is already a
-// handle scope and a new one is created, all allocations will take
-// place in the new handle scope until it is deleted. After that,
-// new handles will again be allocated in the original handle scope.
-//
-// After the handle scope of a local handle has been deleted the
-// garbage collector will no longer track the object stored in the
-// handle and may deallocate it. The behavior of accessing a handle
-// for which the handle scope has been deleted is undefined.
-class HandleScope {
- public:
- explicit inline HandleScope(Isolate* isolate);
-
- inline ~HandleScope();
-
- // Counts the number of allocated handles.
- static int NumberOfHandles(Isolate* isolate);
-
- // Creates a new handle with the given value.
- template <typename T>
- static inline T** CreateHandle(Isolate* isolate, T* value);
-
- // Deallocates any extensions used by the current scope.
- static void DeleteExtensions(Isolate* isolate);
-
- static Address current_next_address(Isolate* isolate);
- static Address current_limit_address(Isolate* isolate);
- static Address current_level_address(Isolate* isolate);
-
- // Closes the HandleScope (invalidating all handles
- // created in the scope of the HandleScope) and returns
- // a Handle backed by the parent scope holding the
- // value of the argument handle.
- template <typename T>
- Handle<T> CloseAndEscape(Handle<T> handle_value);
-
- Isolate* isolate() { return isolate_; }
-
- private:
- // Prevent heap allocation or illegal handle scopes.
- HandleScope(const HandleScope&);
- void operator=(const HandleScope&);
- void* operator new(size_t size);
- void operator delete(void* size_t);
-
- inline void CloseScope();
-
- Isolate* isolate_;
- Object** prev_next_;
- Object** prev_limit_;
-
- // Extend the handle scope making room for more handles.
- static internal::Object** Extend(Isolate* isolate);
-
- // Zaps the handles in the half-open interval [start, end).
- static void ZapRange(internal::Object** start, internal::Object** end);
-
- friend class v8::internal::DeferredHandles;
- friend class v8::HandleScope;
- friend class v8::internal::HandleScopeImplementer;
- friend class v8::ImplementationUtilities;
- friend class v8::internal::Isolate;
-};
-
-
-class DeferredHandles;
-
-
-class DeferredHandleScope {
- public:
- explicit DeferredHandleScope(Isolate* isolate);
- // The DeferredHandles object returned stores the Handles created
- // since the creation of this DeferredHandleScope. The Handles are
- // alive as long as the DeferredHandles object is alive.
- DeferredHandles* Detach();
- ~DeferredHandleScope();
-
- private:
- Object** prev_limit_;
- Object** prev_next_;
- HandleScopeImplementer* impl_;
-
-#ifdef DEBUG
- bool handles_detached_;
- int prev_level_;
-#endif
-
- friend class HandleScopeImplementer;
-};
-
-
-// ----------------------------------------------------------------------------
-// Handle operations.
-// They might invoke garbage collection. The result is an handle to
-// an object of expected type, or the handle is an error if running out
-// of space or encountering an internal error.
-
-// Flattens a string.
-void FlattenString(Handle<String> str);
-
-// Flattens a string and returns the underlying external or sequential
-// string.
-Handle<String> FlattenGetString(Handle<String> str);
-
-Handle<Object> SetProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
-Handle<Object> ForceSetProperty(Handle<JSObject> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
-Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
- Handle<Object> key);
-
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
- const char* name);
-
-Handle<Object> GetProperty(Isolate* isolate,
- Handle<Object> obj,
- Handle<Object> key);
-
-Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name,
- PropertyAttributes* attributes);
-
-Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
-
-Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
- uint32_t index);
-
-Handle<JSObject> Copy(Handle<JSObject> obj);
-
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
-
-Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
- Handle<JSArray> array);
-
-// Get the JS object corresponding to the given script; create it
-// if none exists.
-Handle<JSValue> GetScriptWrapper(Handle<Script> script);
-
-// Script line number computations. Note that the line number is zero-based.
-void InitScriptLineEnds(Handle<Script> script);
-// For string calculates an array of line end positions. If the string
-// does not end with a new line character, this character may optionally be
-// imagined.
-Handle<FixedArray> CalculateLineEnds(Handle<String> string,
- bool with_imaginary_last_new_line);
-int GetScriptLineNumber(Handle<Script> script, int code_position);
-// The safe version does not make heap allocations but may work much slower.
-int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
-int GetScriptColumnNumber(Handle<Script> script, int code_position);
-Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script);
-
-// Computes the enumerable keys from interceptors. Used for debug mirrors and
-// by GetKeysInFixedArrayFor below.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
- Handle<JSObject> object);
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
- Handle<JSObject> object);
-
-enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
-
-// Computes the enumerable keys for a JSObject. Used for implementing
-// "for (n in object) { }".
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
- KeyCollectionType type,
- bool* threw);
-Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw);
-Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length);
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_result);
-
-// Computes the union of keys and return the result.
-// Used for implementing "for (n in object) { }"
-Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
- Handle<FixedArray> second);
-
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
-
-// Sets the expected number of properties for the function's instances.
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
-
-// Sets the prototype property for a function instance.
-void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value);
-
-// Sets the expected number of properties based on estimate from compiler.
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate);
-
-
-Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
- Handle<JSFunction> constructor,
- Handle<JSGlobalProxy> global);
-
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype);
-
-Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
- Handle<Object> key);
-
-Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
- Handle<Object> key);
-
-Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<Object> key,
- Handle<Object> value);
-
-class NoHandleAllocation BASE_EMBEDDED {
- public:
-#ifndef DEBUG
- explicit NoHandleAllocation(Isolate* isolate) {}
- ~NoHandleAllocation() {}
-#else
- explicit inline NoHandleAllocation(Isolate* isolate);
- inline ~NoHandleAllocation();
- private:
- Isolate* isolate_;
- int level_;
- bool active_;
-#endif
-};
-
-
-class NoHandleDereference BASE_EMBEDDED {
- public:
-#ifndef DEBUG
- explicit NoHandleDereference(Isolate* isolate) {}
- ~NoHandleDereference() {}
-#else
- explicit inline NoHandleDereference(Isolate* isolate);
- inline ~NoHandleDereference();
- private:
- Isolate* isolate_;
- bool old_state_;
-#endif
-};
-
-
-class AllowHandleDereference BASE_EMBEDDED {
- public:
-#ifndef DEBUG
- explicit AllowHandleDereference(Isolate* isolate) {}
- ~AllowHandleDereference() {}
-#else
- explicit inline AllowHandleDereference(Isolate* isolate);
- inline ~AllowHandleDereference();
- private:
- Isolate* isolate_;
- bool old_state_;
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_HANDLES_H_
diff --git a/src/3rdparty/v8/src/hashmap.h b/src/3rdparty/v8/src/hashmap.h
deleted file mode 100644
index 11f6ace..0000000
--- a/src/3rdparty/v8/src/hashmap.h
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HASHMAP_H_
-#define V8_HASHMAP_H_
-
-#include "allocation.h"
-#include "checks.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-template<class AllocationPolicy>
-class TemplateHashMapImpl {
- public:
- typedef bool (*MatchFun) (void* key1, void* key2);
-
- // The default capacity. This is used by the call sites which want
- // to pass in a non-default AllocationPolicy but want to use the
- // default value of capacity specified by the implementation.
- static const uint32_t kDefaultHashMapCapacity = 8;
-
- // initial_capacity is the size of the initial hash map;
- // it must be a power of 2 (and thus must not be 0).
- TemplateHashMapImpl(MatchFun match,
- uint32_t capacity = kDefaultHashMapCapacity,
- AllocationPolicy allocator = AllocationPolicy());
-
- ~TemplateHashMapImpl();
-
- // HashMap entries are (key, value, hash) triplets.
- // Some clients may not need to use the value slot
- // (e.g. implementers of sets, where the key is the value).
- struct Entry {
- void* key;
- void* value;
- uint32_t hash; // The full hash value for key
- int order; // If you never remove entries this is the insertion order.
- };
-
- // If an entry with matching key is found, Lookup()
- // returns that entry. If no matching entry is found,
- // but insert is set, a new entry is inserted with
- // corresponding key, key hash, and NULL value.
- // Otherwise, NULL is returned.
- Entry* Lookup(void* key, uint32_t hash, bool insert,
- AllocationPolicy allocator = AllocationPolicy());
-
- // Removes the entry with matching key.
- // It returns the value of the deleted entry
- // or null if there is no value for such key.
- void* Remove(void* key, uint32_t hash);
-
- // Empties the hash map (occupancy() == 0).
- void Clear();
-
- // The number of (non-empty) entries in the table.
- uint32_t occupancy() const { return occupancy_; }
-
- // The capacity of the table. The implementation
- // makes sure that occupancy is at most 80% of
- // the table capacity.
- uint32_t capacity() const { return capacity_; }
-
- // Iteration
- //
- // for (Entry* p = map.Start(); p != NULL; p = map.Next(p)) {
- // ...
- // }
- //
- // If entries are inserted during iteration, the effect of
- // calling Next() is undefined.
- Entry* Start() const;
- Entry* Next(Entry* p) const;
-
- private:
- MatchFun match_;
- Entry* map_;
- uint32_t capacity_;
- uint32_t occupancy_;
-
- Entry* map_end() const { return map_ + capacity_; }
- Entry* Probe(void* key, uint32_t hash);
- void Initialize(uint32_t capacity, AllocationPolicy allocator);
- void Resize(AllocationPolicy allocator);
-};
-
-typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
-
-template<class AllocationPolicy>
-TemplateHashMapImpl<AllocationPolicy>::TemplateHashMapImpl(
- MatchFun match, uint32_t initial_capacity, AllocationPolicy allocator) {
- match_ = match;
- Initialize(initial_capacity, allocator);
-}
-
-
-template<class AllocationPolicy>
-TemplateHashMapImpl<AllocationPolicy>::~TemplateHashMapImpl() {
- AllocationPolicy::Delete(map_);
-}
-
-
-template<class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Lookup(
- void* key, uint32_t hash, bool insert, AllocationPolicy allocator) {
- // Find a matching entry.
- Entry* p = Probe(key, hash);
- if (p->key != NULL) {
- return p;
- }
-
- // No entry found; insert one if necessary.
- if (insert) {
- p->key = key;
- p->value = NULL;
- p->hash = hash;
- p->order = occupancy_;
- occupancy_++;
-
- // Grow the map if we reached >= 80% occupancy.
- if (occupancy_ + occupancy_/4 >= capacity_) {
- Resize(allocator);
- p = Probe(key, hash);
- }
-
- return p;
- }
-
- // No entry found and none inserted.
- return NULL;
-}
-
-
-template<class AllocationPolicy>
-void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
- // Lookup the entry for the key to remove.
- Entry* p = Probe(key, hash);
- if (p->key == NULL) {
- // Key not found nothing to remove.
- return NULL;
- }
-
- void* value = p->value;
- // To remove an entry we need to ensure that it does not create an empty
- // entry that will cause the search for another entry to stop too soon. If all
- // the entries between the entry to remove and the next empty slot have their
- // initial position inside this interval, clearing the entry to remove will
- // not break the search. If, while searching for the next empty entry, an
- // entry is encountered which does not have its initial position between the
- // entry to remove and the position looked at, then this entry can be moved to
- // the place of the entry to remove without breaking the search for it. The
- // entry made vacant by this move is now the entry to remove and the process
- // starts over.
- // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
-
- // This guarantees loop termination as there is at least one empty entry so
- // eventually the removed entry will have an empty entry after it.
- ASSERT(occupancy_ < capacity_);
-
- // p is the candidate entry to clear. q is used to scan forwards.
- Entry* q = p; // Start at the entry to remove.
- while (true) {
- // Move q to the next entry.
- q = q + 1;
- if (q == map_end()) {
- q = map_;
- }
-
- // All entries between p and q have their initial position between p and q
- // and the entry p can be cleared without breaking the search for these
- // entries.
- if (q->key == NULL) {
- break;
- }
-
- // Find the initial position for the entry at position q.
- Entry* r = map_ + (q->hash & (capacity_ - 1));
-
- // If the entry at position q has its initial position outside the range
- // between p and q it can be moved forward to position p and will still be
- // found. There is now a new candidate entry for clearing.
- if ((q > p && (r <= p || r > q)) ||
- (q < p && (r <= p && r > q))) {
- *p = *q;
- p = q;
- }
- }
-
- // Clear the entry which is allowed to en emptied.
- p->key = NULL;
- occupancy_--;
- return value;
-}
-
-
-template<class AllocationPolicy>
-void TemplateHashMapImpl<AllocationPolicy>::Clear() {
- // Mark all entries as empty.
- const Entry* end = map_end();
- for (Entry* p = map_; p < end; p++) {
- p->key = NULL;
- }
- occupancy_ = 0;
-}
-
-
-template<class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
- TemplateHashMapImpl<AllocationPolicy>::Start() const {
- return Next(map_ - 1);
-}
-
-
-template<class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
- TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
- const Entry* end = map_end();
- ASSERT(map_ - 1 <= p && p < end);
- for (p++; p < end; p++) {
- if (p->key != NULL) {
- return p;
- }
- }
- return NULL;
-}
-
-
-template<class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
- TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) {
- ASSERT(key != NULL);
-
- ASSERT(IsPowerOf2(capacity_));
- Entry* p = map_ + (hash & (capacity_ - 1));
- const Entry* end = map_end();
- ASSERT(map_ <= p && p < end);
-
- ASSERT(occupancy_ < capacity_); // Guarantees loop termination.
- while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
- p++;
- if (p >= end) {
- p = map_;
- }
- }
-
- return p;
-}
-
-
-template<class AllocationPolicy>
-void TemplateHashMapImpl<AllocationPolicy>::Initialize(
- uint32_t capacity, AllocationPolicy allocator) {
- ASSERT(IsPowerOf2(capacity));
- map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
- if (map_ == NULL) {
- v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
- return;
- }
- capacity_ = capacity;
- Clear();
-}
-
-
-template<class AllocationPolicy>
-void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
- Entry* map = map_;
- uint32_t n = occupancy_;
-
- // Allocate larger map.
- Initialize(capacity_ * 2, allocator);
-
- // Rehash all current entries.
- for (Entry* p = map; n > 0; p++) {
- if (p->key != NULL) {
- Entry* entry = Lookup(p->key, p->hash, true, allocator);
- entry->value = p->value;
- entry->order = p->order;
- n--;
- }
- }
-
- // Delete old map.
- AllocationPolicy::Delete(map);
-}
-
-
-// A hash map for pointer keys and values with an STL-like interface.
-template<class Key, class Value, class AllocationPolicy>
-class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
- public:
- STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT
- STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); // NOLINT
- struct value_type {
- Key* first;
- Value* second;
- };
-
- class Iterator {
- public:
- Iterator& operator++() {
- entry_ = map_->Next(entry_);
- return *this;
- }
-
- value_type* operator->() { return reinterpret_cast<value_type*>(entry_); }
- bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
-
- private:
- Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
- typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry) :
- map_(map), entry_(entry) { }
-
- const TemplateHashMapImpl<AllocationPolicy>* map_;
- typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
-
- friend class TemplateHashMap;
- };
-
- TemplateHashMap(
- typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match,
- AllocationPolicy allocator = AllocationPolicy())
- : TemplateHashMapImpl<AllocationPolicy>(
- match,
- TemplateHashMapImpl<AllocationPolicy>::kDefaultHashMapCapacity,
- allocator) { }
-
- Iterator begin() const { return Iterator(this, this->Start()); }
- Iterator end() const { return Iterator(this, NULL); }
- Iterator find(Key* key, bool insert = false,
- AllocationPolicy allocator = AllocationPolicy()) {
- return Iterator(this, this->Lookup(key, key->Hash(), insert, allocator));
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_HASHMAP_H_
diff --git a/src/3rdparty/v8/src/heap-inl.h b/src/3rdparty/v8/src/heap-inl.h
deleted file mode 100644
index c3dff99..0000000
--- a/src/3rdparty/v8/src/heap-inl.h
+++ /dev/null
@@ -1,901 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HEAP_INL_H_
-#define V8_HEAP_INL_H_
-
-#include "heap.h"
-#include "isolate.h"
-#include "list-inl.h"
-#include "objects.h"
-#include "platform.h"
-#include "v8-counters.h"
-#include "store-buffer.h"
-#include "store-buffer-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void PromotionQueue::insert(HeapObject* target, int size) {
- if (emergency_stack_ != NULL) {
- emergency_stack_->Add(Entry(target, size));
- return;
- }
-
- if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
- NewSpacePage* rear_page =
- NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
- ASSERT(!rear_page->prev_page()->is_anchor());
- rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
- ActivateGuardIfOnTheSamePage();
- }
-
- if (guard_) {
- ASSERT(GetHeadPage() ==
- Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
-
- if ((rear_ - 2) < limit_) {
- RelocateQueueHead();
- emergency_stack_->Add(Entry(target, size));
- return;
- }
- }
-
- *(--rear_) = reinterpret_cast<intptr_t>(target);
- *(--rear_) = size;
- // Assert no overflow into live objects.
-#ifdef DEBUG
- SemiSpace::AssertValidRange(HEAP->new_space()->top(),
- reinterpret_cast<Address>(rear_));
-#endif
-}
-
-
-void PromotionQueue::ActivateGuardIfOnTheSamePage() {
- guard_ = guard_ ||
- heap_->new_space()->active_space()->current_page()->address() ==
- GetHeadPage()->address();
-}
-
-
-MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
- PretenureFlag pretenure) {
- // Check for ASCII first since this is the common case.
- const char* start = str.start();
- int length = str.length();
- int non_ascii_start = String::NonAsciiStart(start, length);
- if (non_ascii_start >= length) {
- // If the string is ASCII, we do not need to convert the characters
- // since UTF8 is backwards compatible with ASCII.
- return AllocateStringFromOneByte(str, pretenure);
- }
- // Non-ASCII and we need to decode.
- return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure);
-}
-
-
-template<>
-bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
- // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
- // ASCII only check.
- return chars == str.length();
-}
-
-
-template<>
-bool inline Heap::IsOneByte(String* str, int chars) {
- return str->IsOneByteRepresentation();
-}
-
-
-MaybeObject* Heap::AllocateInternalizedStringFromUtf8(
- Vector<const char> str, int chars, uint32_t hash_field) {
- if (IsOneByte(str, chars)) {
- return AllocateOneByteInternalizedString(
- Vector<const uint8_t>::cast(str), hash_field);
- }
- return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
-}
-
-
-template<typename T>
-MaybeObject* Heap::AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field) {
- if (IsOneByte(t, chars)) {
- return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
- }
- return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
-}
-
-
-MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
- uint32_t hash_field) {
- if (str.length() > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x2);
- }
- // Compute map and object size.
- Map* map = ascii_internalized_string_map();
- int size = SeqOneByteString::SizeFor(str.length());
-
- // Allocate string.
- Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // String maps are all immortal immovable objects.
- reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(str.length());
- answer->set_hash_field(hash_field);
- SeqString::cast(answer)->set_symbol_id(0);
-
- ASSERT_EQ(size, answer->Size());
-
- // Fill in the characters.
- memcpy(answer->address() + SeqOneByteString::kHeaderSize,
- str.start(), str.length());
-
- return answer;
-}
-
-
-MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
- uint32_t hash_field) {
- if (str.length() > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x3);
- }
- // Compute map and object size.
- Map* map = internalized_string_map();
- int size = SeqTwoByteString::SizeFor(str.length());
-
- // Allocate string.
- Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<HeapObject*>(result)->set_map(map);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(str.length());
- answer->set_hash_field(hash_field);
-
- ASSERT_EQ(size, answer->Size());
-
- // Fill in the characters.
- memcpy(answer->address() + SeqTwoByteString::kHeaderSize,
- str.start(), str.length() * kUC16Size);
-
- return answer;
-}
-
-MaybeObject* Heap::CopyFixedArray(FixedArray* src) {
- return CopyFixedArrayWithMap(src, src->map());
-}
-
-
-MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
- return CopyFixedDoubleArrayWithMap(src, src->map());
-}
-
-
-MaybeObject* Heap::AllocateRaw(int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space) {
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- ASSERT(space != NEW_SPACE ||
- retry_space == OLD_POINTER_SPACE ||
- retry_space == OLD_DATA_SPACE ||
- retry_space == LO_SPACE);
-#ifdef DEBUG
- if (FLAG_gc_interval >= 0 &&
- !disallow_allocation_failure_ &&
- Heap::allocation_timeout_-- <= 0) {
- return Failure::RetryAfterGC(space);
- }
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result;
- if (NEW_SPACE == space) {
- result = new_space_.AllocateRaw(size_in_bytes);
- if (always_allocate() && result->IsFailure()) {
- space = retry_space;
- } else {
- return result;
- }
- }
-
- if (OLD_POINTER_SPACE == space) {
- result = old_pointer_space_->AllocateRaw(size_in_bytes);
- } else if (OLD_DATA_SPACE == space) {
- result = old_data_space_->AllocateRaw(size_in_bytes);
- } else if (CODE_SPACE == space) {
- result = code_space_->AllocateRaw(size_in_bytes);
- } else if (LO_SPACE == space) {
- result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
- } else if (CELL_SPACE == space) {
- result = cell_space_->AllocateRaw(size_in_bytes);
- } else {
- ASSERT(MAP_SPACE == space);
- result = map_space_->AllocateRaw(size_in_bytes);
- }
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-MaybeObject* Heap::NumberFromInt32(
- int32_t value, PretenureFlag pretenure) {
- if (Smi::IsValid(value)) return Smi::FromInt(value);
- // Bypass NumberFromDouble to avoid various redundant checks.
- return AllocateHeapNumber(FastI2D(value), pretenure);
-}
-
-
-MaybeObject* Heap::NumberFromUint32(
- uint32_t value, PretenureFlag pretenure) {
- if (static_cast<int32_t>(value) >= 0 &&
- Smi::IsValid(static_cast<int32_t>(value))) {
- return Smi::FromInt(static_cast<int32_t>(value));
- }
- // Bypass NumberFromDouble to avoid various redundant checks.
- return AllocateHeapNumber(FastUI2D(value), pretenure);
-}
-
-
-void Heap::FinalizeExternalString(HeapObject* string) {
- ASSERT(string->IsExternalString() || string->map()->has_external_resource());
-
- if (string->IsExternalString()) {
- v8::String::ExternalStringResourceBase** resource_addr =
- reinterpret_cast<v8::String::ExternalStringResourceBase**>(
- reinterpret_cast<byte*>(string) +
- ExternalString::kResourceOffset -
- kHeapObjectTag);
-
- // Dispose of the C++ object if it has not already been disposed.
- if (*resource_addr != NULL) {
- (*resource_addr)->Dispose();
- *resource_addr = NULL;
- }
- } else {
- JSObject *object = JSObject::cast(string);
- Object *value = object->GetExternalResourceObject();
- if (value->IsForeign()) {
- v8::Object::ExternalResource* resource =
- reinterpret_cast<v8::Object::ExternalResource*>(
- Foreign::cast(value)->foreign_address());
- ASSERT(resource != NULL);
- resource->Dispose();
- }
- }
-}
-
-
-MaybeObject* Heap::AllocateRawMap() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawCell() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-bool Heap::InNewSpace(Object* object) {
- bool result = new_space_.Contains(object);
- ASSERT(!result || // Either not in new space
- gc_state_ != NOT_IN_GC || // ... or in the middle of GC
- InToSpace(object)); // ... or in to-space (where we allocate).
- return result;
-}
-
-
-bool Heap::InNewSpace(Address addr) {
- return new_space_.Contains(addr);
-}
-
-
-bool Heap::InFromSpace(Object* object) {
- return new_space_.FromSpaceContains(object);
-}
-
-
-bool Heap::InToSpace(Object* object) {
- return new_space_.ToSpaceContains(object);
-}
-
-
-bool Heap::OldGenerationAllocationLimitReached() {
- if (!incremental_marking()->IsStopped()) return false;
- return OldGenerationSpaceAvailable() < 0;
-}
-
-
-bool Heap::ShouldBePromoted(Address old_address, int object_size) {
- // An object should be promoted if:
- // - the object has survived a scavenge operation or
- // - to space is already 25% full.
- NewSpacePage* page = NewSpacePage::FromAddress(old_address);
- Address age_mark = new_space_.age_mark();
- bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
- (!page->ContainsLimit(age_mark) || old_address < age_mark);
- return below_mark || (new_space_.Size() + object_size) >=
- (new_space_.EffectiveCapacity() >> 2);
-}
-
-
-void Heap::RecordWrite(Address address, int offset) {
- if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
-}
-
-
-void Heap::RecordWrites(Address address, int start, int len) {
- if (!InNewSpace(address)) {
- for (int i = 0; i < len; i++) {
- store_buffer_.Mark(address + start + i * kPointerSize);
- }
- }
-}
-
-
-OldSpace* Heap::TargetSpace(HeapObject* object) {
- InstanceType type = object->map()->instance_type();
- AllocationSpace space = TargetSpaceId(type);
- return (space == OLD_POINTER_SPACE)
- ? old_pointer_space_
- : old_data_space_;
-}
-
-
-AllocationSpace Heap::TargetSpaceId(InstanceType type) {
- // Heap numbers and sequential strings are promoted to old data space, all
- // other object types are promoted to old pointer space. We do not use
- // object->IsHeapNumber() and object->IsSeqString() because we already
- // know that object has the heap object tag.
-
- // These objects are never allocated in new space.
- ASSERT(type != MAP_TYPE);
- ASSERT(type != CODE_TYPE);
- ASSERT(type != ODDBALL_TYPE);
- ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
-
- if (type < FIRST_NONSTRING_TYPE) {
- // There are four string representations: sequential strings, external
- // strings, cons strings, and sliced strings.
- // Only the latter two contain non-map-word pointers to heap objects.
- return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
- ? OLD_POINTER_SPACE
- : OLD_DATA_SPACE;
- } else {
- return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
- }
-}
-
-
-void Heap::CopyBlock(Address dst, Address src, int byte_size) {
- CopyWords(reinterpret_cast<Object**>(dst),
- reinterpret_cast<Object**>(src),
- byte_size / kPointerSize);
-}
-
-
-void Heap::MoveBlock(Address dst, Address src, int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
-
- int size_in_words = byte_size / kPointerSize;
-
- if ((dst < src) || (dst >= (src + byte_size))) {
- Object** src_slot = reinterpret_cast<Object**>(src);
- Object** dst_slot = reinterpret_cast<Object**>(dst);
- Object** end_slot = src_slot + size_in_words;
-
- while (src_slot != end_slot) {
- *dst_slot++ = *src_slot++;
- }
- } else {
- memmove(dst, src, byte_size);
- }
-}
-
-
-void Heap::ScavengePointer(HeapObject** p) {
- ScavengeObject(p, *p);
-}
-
-
-void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- ASSERT(HEAP->InFromSpace(object));
-
- // We use the first word (where the map pointer usually is) of a heap
- // object to record the forwarding pointer. A forwarding pointer can
- // point to an old space, the code space, or the to space of the new
- // generation.
- MapWord first_word = object->map_word();
-
- // If the first word is a forwarding address, the object has already been
- // copied.
- if (first_word.IsForwardingAddress()) {
- HeapObject* dest = first_word.ToForwardingAddress();
- ASSERT(HEAP->InFromSpace(*p));
- *p = dest;
- return;
- }
-
- // Call the slow part of scavenge object.
- return ScavengeObjectSlow(p, object);
-}
-
-
-MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<Object> allocation_site_payload) {
- return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0,
- allocation_site_payload,
- DONT_INITIALIZE_ARRAY_ELEMENTS);
-}
-
-
-bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
- const char* collector_reason = NULL;
- GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
- return CollectGarbage(space, collector, gc_reason, collector_reason);
-}
-
-
-MaybeObject* Heap::PrepareForCompare(String* str) {
- // Always flatten small strings and force flattening of long strings
- // after we have accumulated a certain amount we failed to flatten.
- static const int kMaxAlwaysFlattenLength = 32;
- static const int kFlattenLongThreshold = 16*KB;
-
- const int length = str->length();
- MaybeObject* obj = str->TryFlatten();
- if (length <= kMaxAlwaysFlattenLength ||
- unflattened_strings_length_ >= kFlattenLongThreshold) {
- return obj;
- }
- if (obj->IsFailure()) {
- unflattened_strings_length_ += length;
- }
- return str;
-}
-
-
-intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes) {
- ASSERT(HasBeenSetUp());
- intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
- if (change_in_bytes > 0) {
- // Avoid overflow.
- if (amount > amount_of_external_allocated_memory_) {
- amount_of_external_allocated_memory_ = amount;
- } else {
- // Give up and reset the counters in case of an overflow.
- amount_of_external_allocated_memory_ = 0;
- amount_of_external_allocated_memory_at_last_global_gc_ = 0;
- }
- intptr_t amount_since_last_global_gc = PromotedExternalMemorySize();
- if (amount_since_last_global_gc > external_allocation_limit_) {
- CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
- }
- } else {
- // Avoid underflow.
- if (amount >= 0) {
- amount_of_external_allocated_memory_ = amount;
- } else {
- // Give up and reset the counters in case of an overflow.
- amount_of_external_allocated_memory_ = 0;
- amount_of_external_allocated_memory_at_last_global_gc_ = 0;
- }
- }
- if (FLAG_trace_external_memory) {
- PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
- PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
- " amount=%6" V8_PTR_PREFIX "d KB, isolate=0x%08" V8PRIxPTR ".\n",
- change_in_bytes / 1024, amount_of_external_allocated_memory_ / 1024,
- reinterpret_cast<intptr_t>(isolate()));
- }
- ASSERT(amount_of_external_allocated_memory_ >= 0);
- return amount_of_external_allocated_memory_;
-}
-
-
-void Heap::SetLastScriptId(Object* last_script_id) {
- roots_[kLastScriptIdRootIndex] = last_script_id;
-}
-
-
-Isolate* Heap::isolate() {
- return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
- reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
-}
-
-
-#ifdef DEBUG
-#define GC_GREEDY_CHECK() \
- if (FLAG_gc_greedy) HEAP->GarbageCollectionGreedyCheck()
-#else
-#define GC_GREEDY_CHECK() { }
-#endif
-
-// Calls the FUNCTION_CALL function and retries it up to three times
-// to guarantee that any allocations performed during the call will
-// succeed if there's enough memory.
-
-// Warning: Do not use the identifiers __object__, __maybe_object__ or
-// __scope__ in a call to this macro.
-
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\
- do { \
- GC_GREEDY_CHECK(); \
- MaybeObject* __maybe_object__ = FUNCTION_CALL; \
- Object* __object__ = NULL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
- allocation_space(), \
- "allocation failure"); \
- __maybe_object__ = FUNCTION_CALL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
- ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \
- { \
- AlwaysAllocateScope __scope__; \
- __maybe_object__ = FUNCTION_CALL; \
- } \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory() || \
- __maybe_object__->IsRetryAfterGC()) { \
- /* TODO(1181417): Fix this. */ \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2", true);\
- } \
- RETURN_EMPTY; \
- } while (false)
-
-
-#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
- CALL_AND_RETRY(ISOLATE, \
- FUNCTION_CALL, \
- return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
- return Handle<TYPE>())
-
-
-#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
- CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return)
-
-
-#ifdef DEBUG
-
-inline bool Heap::allow_allocation(bool new_state) {
- bool old = allocation_allowed_;
- allocation_allowed_ = new_state;
- return old;
-}
-
-#endif
-
-
-void ExternalStringTable::AddString(String* string) {
- ASSERT(string->IsExternalString());
- if (heap_->InNewSpace(string)) {
- new_space_strings_.Add(string);
- } else {
- old_space_strings_.Add(string);
- }
-}
-
-
-void ExternalStringTable::AddObject(HeapObject* object) {
- ASSERT(object->map()->has_external_resource());
- if (heap_->InNewSpace(object)) {
- new_space_strings_.Add(object);
- } else {
- old_space_strings_.Add(object);
- }
-}
-
-
-void ExternalStringTable::Iterate(ObjectVisitor* v) {
- if (!new_space_strings_.is_empty()) {
- Object** start = &new_space_strings_[0];
- v->VisitPointers(start, start + new_space_strings_.length());
- }
- if (!old_space_strings_.is_empty()) {
- Object** start = &old_space_strings_[0];
- v->VisitPointers(start, start + old_space_strings_.length());
- }
-}
-
-
-// Verify() is inline to avoid ifdef-s around its calls in release
-// mode.
-void ExternalStringTable::Verify() {
-#ifdef DEBUG
- for (int i = 0; i < new_space_strings_.length(); ++i) {
- Object* obj = Object::cast(new_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
- ASSERT(heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->the_hole_value());
-#ifndef ENABLE_LATIN_1
- if (obj->IsExternalAsciiString()) {
- ExternalAsciiString* string = ExternalAsciiString::cast(obj);
- ASSERT(String::IsAscii(string->GetChars(), string->length()));
- }
-#endif
- }
- for (int i = 0; i < old_space_strings_.length(); ++i) {
- Object* obj = Object::cast(old_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
- ASSERT(!heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->the_hole_value());
-#ifndef ENABLE_LATIN_1
- if (obj->IsExternalAsciiString()) {
- ExternalAsciiString* string = ExternalAsciiString::cast(obj);
- ASSERT(String::IsAscii(string->GetChars(), string->length()));
- }
-#endif
- }
-#endif
-}
-
-
-void ExternalStringTable::AddOldObject(HeapObject* object) {
- ASSERT(object->IsExternalString() || object->map()->has_external_resource());
- ASSERT(!heap_->InNewSpace(object));
- old_space_strings_.Add(object);
-}
-
-
-void ExternalStringTable::ShrinkNewObjects(int position) {
- new_space_strings_.Rewind(position);
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
-#endif
-}
-
-
-void ErrorObjectList::Add(JSObject* object) {
- list_.Add(object);
-}
-
-
-void ErrorObjectList::Iterate(ObjectVisitor* v) {
- if (!list_.is_empty()) {
- Object** start = &list_[0];
- v->VisitPointers(start, start + list_.length());
- }
-}
-
-
-void Heap::ClearInstanceofCache() {
- set_instanceof_cache_function(the_hole_value());
-}
-
-
-Object* Heap::ToBoolean(bool condition) {
- return condition ? true_value() : false_value();
-}
-
-
-void Heap::CompletelyClearInstanceofCache() {
- set_instanceof_cache_map(the_hole_value());
- set_instanceof_cache_function(the_hole_value());
-}
-
-
-MaybeObject* TranscendentalCache::Get(Type type, double input) {
- SubCache* cache = caches_[type];
- if (cache == NULL) {
- caches_[type] = cache = new SubCache(type);
- }
- return cache->Get(input);
-}
-
-
-Address TranscendentalCache::cache_array_address() {
- return reinterpret_cast<Address>(caches_);
-}
-
-
-double TranscendentalCache::SubCache::Calculate(double input) {
- switch (type_) {
- case ACOS:
- return acos(input);
- case ASIN:
- return asin(input);
- case ATAN:
- return atan(input);
- case COS:
- return fast_cos(input);
- case EXP:
- return exp(input);
- case LOG:
- return fast_log(input);
- case SIN:
- return fast_sin(input);
- case TAN:
- return fast_tan(input);
- default:
- return 0.0; // Never happens.
- }
-}
-
-
-MaybeObject* TranscendentalCache::SubCache::Get(double input) {
- Converter c;
- c.dbl = input;
- int hash = Hash(c);
- Element e = elements_[hash];
- if (e.in[0] == c.integers[0] &&
- e.in[1] == c.integers[1]) {
- ASSERT(e.output != NULL);
- isolate_->counters()->transcendental_cache_hit()->Increment();
- return e.output;
- }
- double answer = Calculate(input);
- isolate_->counters()->transcendental_cache_miss()->Increment();
- Object* heap_number;
- { MaybeObject* maybe_heap_number =
- isolate_->heap()->AllocateHeapNumber(answer);
- if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
- }
- elements_[hash].in[0] = c.integers[0];
- elements_[hash].in[1] = c.integers[1];
- elements_[hash].output = heap_number;
- return heap_number;
-}
-
-
-AlwaysAllocateScope::AlwaysAllocateScope() {
- // We shouldn't hit any nested scopes, because that requires
- // non-handle code to call handle code. The code still works but
- // performance will degrade, so we want to catch this situation
- // in debug mode.
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
- HEAP->always_allocate_scope_depth_++;
-}
-
-
-AlwaysAllocateScope::~AlwaysAllocateScope() {
- HEAP->always_allocate_scope_depth_--;
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
-}
-
-
-#ifdef VERIFY_HEAP
-NoWeakEmbeddedMapsVerificationScope::NoWeakEmbeddedMapsVerificationScope() {
- HEAP->no_weak_embedded_maps_verification_scope_depth_++;
-}
-
-
-NoWeakEmbeddedMapsVerificationScope::~NoWeakEmbeddedMapsVerificationScope() {
- HEAP->no_weak_embedded_maps_verification_scope_depth_--;
-}
-#endif
-
-
-void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK(HEAP->Contains(object));
- CHECK(object->map()->IsMap());
- }
- }
-}
-
-
-double GCTracer::SizeOfHeapObjects() {
- return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
-}
-
-
-DisallowAllocationFailure::DisallowAllocationFailure() {
-#ifdef DEBUG
- old_state_ = HEAP->disallow_allocation_failure_;
- HEAP->disallow_allocation_failure_ = true;
-#endif
-}
-
-
-DisallowAllocationFailure::~DisallowAllocationFailure() {
-#ifdef DEBUG
- HEAP->disallow_allocation_failure_ = old_state_;
-#endif
-}
-
-
-#ifdef DEBUG
-AssertNoAllocation::AssertNoAllocation() {
- Isolate* isolate = ISOLATE;
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- old_state_ = isolate->heap()->allow_allocation(false);
- }
-}
-
-
-AssertNoAllocation::~AssertNoAllocation() {
- if (active_) HEAP->allow_allocation(old_state_);
-}
-
-
-DisableAssertNoAllocation::DisableAssertNoAllocation() {
- Isolate* isolate = ISOLATE;
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- old_state_ = isolate->heap()->allow_allocation(true);
- }
-}
-
-
-DisableAssertNoAllocation::~DisableAssertNoAllocation() {
- if (active_) HEAP->allow_allocation(old_state_);
-}
-
-#else
-
-AssertNoAllocation::AssertNoAllocation() { }
-AssertNoAllocation::~AssertNoAllocation() { }
-DisableAssertNoAllocation::DisableAssertNoAllocation() { }
-DisableAssertNoAllocation::~DisableAssertNoAllocation() { }
-
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_HEAP_INL_H_
diff --git a/src/3rdparty/v8/src/heap-profiler.cc b/src/3rdparty/v8/src/heap-profiler.cc
deleted file mode 100644
index c9f1d50..0000000
--- a/src/3rdparty/v8/src/heap-profiler.cc
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2009-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "heap-profiler.h"
-#include "heap-snapshot-generator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-HeapProfiler::HeapProfiler(Heap* heap)
- : snapshots_(new HeapSnapshotsCollection(heap)),
- next_snapshot_uid_(1) {
-}
-
-
-HeapProfiler::~HeapProfiler() {
- delete snapshots_;
-}
-
-
-void HeapProfiler::ResetSnapshots() {
- Heap* the_heap = heap();
- delete snapshots_;
- snapshots_ = new HeapSnapshotsCollection(the_heap);
-}
-
-
-void HeapProfiler::SetUp() {
- Isolate* isolate = Isolate::Current();
- if (isolate->heap_profiler() == NULL) {
- isolate->set_heap_profiler(new HeapProfiler(isolate->heap()));
- }
-}
-
-
-void HeapProfiler::TearDown() {
- Isolate* isolate = Isolate::Current();
- delete isolate->heap_profiler();
- isolate->set_heap_profiler(NULL);
-}
-
-
-HeapSnapshot* HeapProfiler::TakeSnapshot(
- const char* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver) {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
- type,
- control,
- resolver);
-}
-
-
-HeapSnapshot* HeapProfiler::TakeSnapshot(
- String* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver) {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
- type,
- control,
- resolver);
-}
-
-
-void HeapProfiler::StartHeapObjectsTracking() {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- Isolate::Current()->heap_profiler()->StartHeapObjectsTrackingImpl();
-}
-
-
-void HeapProfiler::StopHeapObjectsTracking() {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- Isolate::Current()->heap_profiler()->StopHeapObjectsTrackingImpl();
-}
-
-
-SnapshotObjectId HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- return Isolate::Current()->heap_profiler()->PushHeapObjectsStatsImpl(stream);
-}
-
-
-void HeapProfiler::DefineWrapperClass(
- uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
- ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
- if (wrapper_callbacks_.length() <= class_id) {
- wrapper_callbacks_.AddBlock(
- NULL, class_id - wrapper_callbacks_.length() + 1);
- }
- wrapper_callbacks_[class_id] = callback;
-}
-
-
-v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
- uint16_t class_id, Object** wrapper) {
- if (wrapper_callbacks_.length() <= class_id) return NULL;
- return wrapper_callbacks_[class_id](
- class_id, Utils::ToLocal(Handle<Object>(wrapper)));
-}
-
-
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
- const char* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver) {
- HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
- HeapSnapshot* result =
- snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
- bool generation_completed = true;
- switch (s_type) {
- case HeapSnapshot::kFull: {
- HeapSnapshotGenerator generator(result, control, resolver, heap());
- generation_completed = generator.GenerateSnapshot();
- break;
- }
- default:
- UNREACHABLE();
- }
- if (!generation_completed) {
- delete result;
- result = NULL;
- }
- snapshots_->SnapshotGenerationFinished(result);
- return result;
-}
-
-
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
- String* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver) {
- return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control,
- resolver);
-}
-
-void HeapProfiler::StartHeapObjectsTrackingImpl() {
- snapshots_->StartHeapObjectsTracking();
-}
-
-
-SnapshotObjectId HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) {
- return snapshots_->PushHeapObjectsStats(stream);
-}
-
-
-void HeapProfiler::StopHeapObjectsTrackingImpl() {
- snapshots_->StopHeapObjectsTracking();
-}
-
-
-size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- size_t size = profiler->snapshots_->GetUsedMemorySize();
- return size;
-}
-
-
-int HeapProfiler::GetSnapshotsCount() {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->snapshots()->length();
-}
-
-
-HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->snapshots()->at(index);
-}
-
-
-HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->GetSnapshot(uid);
-}
-
-
-SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
- if (!obj->IsHeapObject())
- return v8::HeapProfiler::kUnknownObjectId;
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
-}
-
-
-void HeapProfiler::DeleteAllSnapshots() {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- profiler->ResetSnapshots();
-}
-
-
-void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
- snapshots_->ObjectMoveEvent(from, to);
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/heap-profiler.h b/src/3rdparty/v8/src/heap-profiler.h
deleted file mode 100644
index c8c94f5..0000000
--- a/src/3rdparty/v8/src/heap-profiler.h
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2009-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HEAP_PROFILER_H_
-#define V8_HEAP_PROFILER_H_
-
-#include "isolate.h"
-
-namespace v8 {
-namespace internal {
-
-class HeapSnapshot;
-class HeapSnapshotsCollection;
-
-#define HEAP_PROFILE(heap, call) \
- do { \
- v8::internal::HeapProfiler* profiler = heap->isolate()->heap_profiler(); \
- if (profiler != NULL && profiler->is_profiling()) { \
- profiler->call; \
- } \
- } while (false)
-
-class HeapProfiler {
- public:
- static void SetUp();
- static void TearDown();
-
- static size_t GetMemorySizeUsedByProfiler();
-
- static HeapSnapshot* TakeSnapshot(
- const char* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
- static HeapSnapshot* TakeSnapshot(
- String* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
-
- static void StartHeapObjectsTracking();
- static void StopHeapObjectsTracking();
- static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
- static int GetSnapshotsCount();
- static HeapSnapshot* GetSnapshot(int index);
- static HeapSnapshot* FindSnapshot(unsigned uid);
- static SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
- static void DeleteAllSnapshots();
-
- void ObjectMoveEvent(Address from, Address to);
-
- void DefineWrapperClass(
- uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
-
- v8::RetainedObjectInfo* ExecuteWrapperClassCallback(uint16_t class_id,
- Object** wrapper);
- INLINE(bool is_profiling()) {
- return snapshots_->is_tracking_objects();
- }
-
- private:
- explicit HeapProfiler(Heap* heap);
- ~HeapProfiler();
- HeapSnapshot* TakeSnapshotImpl(
- const char* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
- HeapSnapshot* TakeSnapshotImpl(
- String* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
- void ResetSnapshots();
-
- void StartHeapObjectsTrackingImpl();
- void StopHeapObjectsTrackingImpl();
- SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream);
-
- Heap* heap() const { return snapshots_->heap(); }
-
- HeapSnapshotsCollection* snapshots_;
- unsigned next_snapshot_uid_;
- List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_HEAP_PROFILER_H_
diff --git a/src/3rdparty/v8/src/heap-snapshot-generator-inl.h b/src/3rdparty/v8/src/heap-snapshot-generator-inl.h
deleted file mode 100644
index 1a878c6..0000000
--- a/src/3rdparty/v8/src/heap-snapshot-generator-inl.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
-#define V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
-
-#include "heap-snapshot-generator.h"
-
-namespace v8 {
-namespace internal {
-
-
-HeapEntry* HeapGraphEdge::from() const {
- return &snapshot()->entries()[from_index_];
-}
-
-
-HeapSnapshot* HeapGraphEdge::snapshot() const {
- return to_entry_->snapshot();
-}
-
-
-int HeapEntry::index() const {
- return static_cast<int>(this - &snapshot_->entries().first());
-}
-
-
-int HeapEntry::set_children_index(int index) {
- children_index_ = index;
- int next_index = index + children_count_;
- children_count_ = 0;
- return next_index;
-}
-
-
-HeapGraphEdge** HeapEntry::children_arr() {
- ASSERT(children_index_ >= 0);
- return &snapshot_->children()[children_index_];
-}
-
-
-SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
- return kGcRootsFirstSubrootId + delta * kObjectIdStep;
-}
-
-
-HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
- return reinterpret_cast<HeapObject*>(
- reinterpret_cast<char*>(kFirstGcSubrootObject) +
- delta * HeapObjectsMap::kObjectIdStep);
-}
-
-
-int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
- return static_cast<int>(
- (reinterpret_cast<char*>(subroot) -
- reinterpret_cast<char*>(kFirstGcSubrootObject)) /
- HeapObjectsMap::kObjectIdStep);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
-
diff --git a/src/3rdparty/v8/src/heap-snapshot-generator.cc b/src/3rdparty/v8/src/heap-snapshot-generator.cc
deleted file mode 100644
index 9e96685..0000000
--- a/src/3rdparty/v8/src/heap-snapshot-generator.cc
+++ /dev/null
@@ -1,2703 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "heap-snapshot-generator-inl.h"
-
-#include "heap-profiler.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
- : type_(type),
- from_index_(from),
- to_index_(to),
- name_(name) {
- ASSERT(type == kContextVariable
- || type == kProperty
- || type == kInternal
- || type == kShortcut);
-}
-
-
-HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
- : type_(type),
- from_index_(from),
- to_index_(to),
- index_(index) {
- ASSERT(type == kElement || type == kHidden || type == kWeak);
-}
-
-
-void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) {
- to_entry_ = &snapshot->entries()[to_index_];
-}
-
-
-const int HeapEntry::kNoEntry = -1;
-
-HeapEntry::HeapEntry(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- SnapshotObjectId id,
- int self_size)
- : type_(type),
- children_count_(0),
- children_index_(-1),
- self_size_(self_size),
- id_(id),
- snapshot_(snapshot),
- name_(name) { }
-
-
-void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
- const char* name,
- HeapEntry* entry) {
- HeapGraphEdge edge(type, name, this->index(), entry->index());
- snapshot_->edges().Add(edge);
- ++children_count_;
-}
-
-
-void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
- int index,
- HeapEntry* entry) {
- HeapGraphEdge edge(type, index, this->index(), entry->index());
- snapshot_->edges().Add(edge);
- ++children_count_;
-}
-
-
-Handle<HeapObject> HeapEntry::GetHeapObject() {
- return snapshot_->collection()->FindHeapObjectById(id());
-}
-
-
-void HeapEntry::Print(
- const char* prefix, const char* edge_name, int max_depth, int indent) {
- STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6d @%6u %*c %s%s: ",
- self_size(), id(), indent, ' ', prefix, edge_name);
- if (type() != kString) {
- OS::Print("%s %.40s\n", TypeAsString(), name_);
- } else {
- OS::Print("\"");
- const char* c = name_;
- while (*c && (c - name_) <= 40) {
- if (*c != '\n')
- OS::Print("%c", *c);
- else
- OS::Print("\\n");
- ++c;
- }
- OS::Print("\"\n");
- }
- if (--max_depth == 0) return;
- Vector<HeapGraphEdge*> ch = children();
- for (int i = 0; i < ch.length(); ++i) {
- HeapGraphEdge& edge = *ch[i];
- const char* edge_prefix = "";
- EmbeddedVector<char, 64> index;
- const char* edge_name = index.start();
- switch (edge.type()) {
- case HeapGraphEdge::kContextVariable:
- edge_prefix = "#";
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kElement:
- OS::SNPrintF(index, "%d", edge.index());
- break;
- case HeapGraphEdge::kInternal:
- edge_prefix = "$";
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kProperty:
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kHidden:
- edge_prefix = "$";
- OS::SNPrintF(index, "%d", edge.index());
- break;
- case HeapGraphEdge::kShortcut:
- edge_prefix = "^";
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kWeak:
- edge_prefix = "w";
- OS::SNPrintF(index, "%d", edge.index());
- break;
- default:
- OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
- }
- edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
- }
-}
-
-
-const char* HeapEntry::TypeAsString() {
- switch (type()) {
- case kHidden: return "/hidden/";
- case kObject: return "/object/";
- case kClosure: return "/closure/";
- case kString: return "/string/";
- case kCode: return "/code/";
- case kArray: return "/array/";
- case kRegExp: return "/regexp/";
- case kHeapNumber: return "/number/";
- case kNative: return "/native/";
- case kSynthetic: return "/synthetic/";
- default: return "???";
- }
-}
-
-
-// It is very important to keep objects that form a heap snapshot
-// as small as possible.
-namespace { // Avoid littering the global namespace.
-
-template <size_t ptr_size> struct SnapshotSizeConstants;
-
-template <> struct SnapshotSizeConstants<4> {
- static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 24;
- static const int kExpectedHeapSnapshotsCollectionSize = 100;
- static const int kExpectedHeapSnapshotSize = 136;
- static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
-};
-
-template <> struct SnapshotSizeConstants<8> {
- static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 32;
- static const int kExpectedHeapSnapshotsCollectionSize = 152;
- static const int kExpectedHeapSnapshotSize = 168;
- static const uint64_t kMaxSerializableSnapshotRawSize =
- static_cast<uint64_t>(6000) * MB;
-};
-
-} // namespace
-
-HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
- HeapSnapshot::Type type,
- const char* title,
- unsigned uid)
- : collection_(collection),
- type_(type),
- title_(title),
- uid_(uid),
- root_index_(HeapEntry::kNoEntry),
- gc_roots_index_(HeapEntry::kNoEntry),
- natives_root_index_(HeapEntry::kNoEntry),
- max_snapshot_js_object_id_(0) {
- STATIC_CHECK(
- sizeof(HeapGraphEdge) ==
- SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
- STATIC_CHECK(
- sizeof(HeapEntry) ==
- SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
- for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
- gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
- }
-}
-
-
-void HeapSnapshot::Delete() {
- collection_->RemoveSnapshot(this);
- delete this;
-}
-
-
-void HeapSnapshot::RememberLastJSObjectId() {
- max_snapshot_js_object_id_ = collection_->last_assigned_id();
-}
-
-
-HeapEntry* HeapSnapshot::AddRootEntry() {
- ASSERT(root_index_ == HeapEntry::kNoEntry);
- ASSERT(entries_.is_empty()); // Root entry must be the first one.
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
- "",
- HeapObjectsMap::kInternalRootObjectId,
- 0);
- root_index_ = entry->index();
- ASSERT(root_index_ == 0);
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddGcRootsEntry() {
- ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
- "(GC roots)",
- HeapObjectsMap::kGcRootsObjectId,
- 0);
- gc_roots_index_ = entry->index();
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
- ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
- ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
- HeapEntry* entry = AddEntry(
- HeapEntry::kObject,
- VisitorSynchronization::kTagNames[tag],
- HeapObjectsMap::GetNthGcSubrootId(tag),
- 0);
- gc_subroot_indexes_[tag] = entry->index();
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
- const char* name,
- SnapshotObjectId id,
- int size) {
- HeapEntry entry(this, type, name, id, size);
- entries_.Add(entry);
- return &entries_.last();
-}
-
-
-void HeapSnapshot::FillChildren() {
- ASSERT(children().is_empty());
- children().Allocate(edges().length());
- int children_index = 0;
- for (int i = 0; i < entries().length(); ++i) {
- HeapEntry* entry = &entries()[i];
- children_index = entry->set_children_index(children_index);
- }
- ASSERT(edges().length() == children_index);
- for (int i = 0; i < edges().length(); ++i) {
- HeapGraphEdge* edge = &edges()[i];
- edge->ReplaceToIndexWithEntry(this);
- edge->from()->add_child(edge);
- }
-}
-
-
-class FindEntryById {
- public:
- explicit FindEntryById(SnapshotObjectId id) : id_(id) { }
- int operator()(HeapEntry* const* entry) {
- if ((*entry)->id() == id_) return 0;
- return (*entry)->id() < id_ ? -1 : 1;
- }
- private:
- SnapshotObjectId id_;
-};
-
-
-HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
- List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
- // Perform a binary search by id.
- int index = SortedListBSearch(*entries_by_id, FindEntryById(id));
- if (index == -1)
- return NULL;
- return entries_by_id->at(index);
-}
-
-
-template<class T>
-static int SortByIds(const T* entry1_ptr,
- const T* entry2_ptr) {
- if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0;
- return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
-}
-
-
-List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
- if (sorted_entries_.is_empty()) {
- sorted_entries_.Allocate(entries_.length());
- for (int i = 0; i < entries_.length(); ++i) {
- sorted_entries_[i] = &entries_[i];
- }
- sorted_entries_.Sort(SortByIds);
- }
- return &sorted_entries_;
-}
-
-
-void HeapSnapshot::Print(int max_depth) {
- root()->Print("", "", max_depth, 0);
-}
-
-
-template<typename T, class P>
-static size_t GetMemoryUsedByList(const List<T, P>& list) {
- return list.length() * sizeof(T) + sizeof(list);
-}
-
-
-size_t HeapSnapshot::RawSnapshotSize() const {
- STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::kExpectedHeapSnapshotSize ==
- sizeof(HeapSnapshot)); // NOLINT
- return
- sizeof(*this) +
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(edges_) +
- GetMemoryUsedByList(children_) +
- GetMemoryUsedByList(sorted_entries_);
-}
-
-
-// We split IDs on evens for embedder objects (see
-// HeapObjectsMap::GenerateId) and odds for native objects.
-const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
-const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId =
- HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
-const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
- HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
-const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
- HeapObjectsMap::kGcRootsFirstSubrootId +
- VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
-
-HeapObjectsMap::HeapObjectsMap(Heap* heap)
- : next_id_(kFirstAvailableObjectId),
- entries_map_(AddressesMatch),
- heap_(heap) {
- // This dummy element solves a problem with entries_map_.
- // When we do lookup in HashMap we see no difference between two cases:
- // it has an entry with NULL as the value or it has created
- // a new entry on the fly with NULL as the default value.
- // With such dummy element we have a guaranty that all entries_map_ entries
- // will have the value field grater than 0.
- // This fact is using in MoveObject method.
- entries_.Add(EntryInfo(0, NULL, 0));
-}
-
-
-void HeapObjectsMap::SnapshotGenerationFinished() {
- RemoveDeadEntries();
-}
-
-
-void HeapObjectsMap::MoveObject(Address from, Address to) {
- ASSERT(to != NULL);
- ASSERT(from != NULL);
- if (from == to) return;
- void* from_value = entries_map_.Remove(from, AddressHash(from));
- if (from_value == NULL) {
- // It may occur that some untracked object moves to an address X and there
- // is a tracked object at that address. In this case we should remove the
- // entry as we know that the object has died.
- void* to_value = entries_map_.Remove(to, AddressHash(to));
- if (to_value != NULL) {
- int to_entry_info_index =
- static_cast<int>(reinterpret_cast<intptr_t>(to_value));
- entries_.at(to_entry_info_index).addr = NULL;
- }
- } else {
- HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true);
- if (to_entry->value != NULL) {
- // We found the existing entry with to address for an old object.
- // Without this operation we will have two EntryInfo's with the same
- // value in addr field. It is bad because later at RemoveDeadEntries
- // one of this entry will be removed with the corresponding entries_map_
- // entry.
- int to_entry_info_index =
- static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
- entries_.at(to_entry_info_index).addr = NULL;
- }
- int from_entry_info_index =
- static_cast<int>(reinterpret_cast<intptr_t>(from_value));
- entries_.at(from_entry_info_index).addr = to;
- to_entry->value = from_value;
- }
-}
-
-
-SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
- if (entry == NULL) return 0;
- int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- EntryInfo& entry_info = entries_.at(entry_index);
- ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- return entry_info.id;
-}
-
-
-SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
- unsigned int size) {
- ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
- if (entry->value != NULL) {
- int entry_index =
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- EntryInfo& entry_info = entries_.at(entry_index);
- entry_info.accessed = true;
- entry_info.size = size;
- return entry_info.id;
- }
- entry->value = reinterpret_cast<void*>(entries_.length());
- SnapshotObjectId id = next_id_;
- next_id_ += kObjectIdStep;
- entries_.Add(EntryInfo(id, addr, size));
- ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- return id;
-}
-
-
-void HeapObjectsMap::StopHeapObjectsTracking() {
- time_intervals_.Clear();
-}
-
-void HeapObjectsMap::UpdateHeapObjectsMap() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::UpdateHeapObjectsMap");
- HeapIterator iterator(heap_);
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- FindOrAddEntry(obj->address(), obj->Size());
- }
- RemoveDeadEntries();
-}
-
-
-SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
- UpdateHeapObjectsMap();
- time_intervals_.Add(TimeInterval(next_id_));
- int prefered_chunk_size = stream->GetChunkSize();
- List<v8::HeapStatsUpdate> stats_buffer;
- ASSERT(!entries_.is_empty());
- EntryInfo* entry_info = &entries_.first();
- EntryInfo* end_entry_info = &entries_.last() + 1;
- for (int time_interval_index = 0;
- time_interval_index < time_intervals_.length();
- ++time_interval_index) {
- TimeInterval& time_interval = time_intervals_[time_interval_index];
- SnapshotObjectId time_interval_id = time_interval.id;
- uint32_t entries_size = 0;
- EntryInfo* start_entry_info = entry_info;
- while (entry_info < end_entry_info && entry_info->id < time_interval_id) {
- entries_size += entry_info->size;
- ++entry_info;
- }
- uint32_t entries_count =
- static_cast<uint32_t>(entry_info - start_entry_info);
- if (time_interval.count != entries_count ||
- time_interval.size != entries_size) {
- stats_buffer.Add(v8::HeapStatsUpdate(
- time_interval_index,
- time_interval.count = entries_count,
- time_interval.size = entries_size));
- if (stats_buffer.length() >= prefered_chunk_size) {
- OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
- &stats_buffer.first(), stats_buffer.length());
- if (result == OutputStream::kAbort) return last_assigned_id();
- stats_buffer.Clear();
- }
- }
- }
- ASSERT(entry_info == end_entry_info);
- if (!stats_buffer.is_empty()) {
- OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
- &stats_buffer.first(), stats_buffer.length());
- if (result == OutputStream::kAbort) return last_assigned_id();
- }
- stream->EndOfStream();
- return last_assigned_id();
-}
-
-
-void HeapObjectsMap::RemoveDeadEntries() {
- ASSERT(entries_.length() > 0 &&
- entries_.at(0).id == 0 &&
- entries_.at(0).addr == NULL);
- int first_free_entry = 1;
- for (int i = 1; i < entries_.length(); ++i) {
- EntryInfo& entry_info = entries_.at(i);
- if (entry_info.accessed) {
- if (first_free_entry != i) {
- entries_.at(first_free_entry) = entry_info;
- }
- entries_.at(first_free_entry).accessed = false;
- HashMap::Entry* entry = entries_map_.Lookup(
- entry_info.addr, AddressHash(entry_info.addr), false);
- ASSERT(entry);
- entry->value = reinterpret_cast<void*>(first_free_entry);
- ++first_free_entry;
- } else {
- if (entry_info.addr) {
- entries_map_.Remove(entry_info.addr, AddressHash(entry_info.addr));
- }
- }
- }
- entries_.Rewind(first_free_entry);
- ASSERT(static_cast<uint32_t>(entries_.length()) - 1 ==
- entries_map_.occupancy());
-}
-
-
-SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
- SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
- const char* label = info->GetLabel();
- id ^= StringHasher::HashSequentialString(label,
- static_cast<int>(strlen(label)),
- HEAP->HashSeed());
- intptr_t element_count = info->GetElementCount();
- if (element_count != -1)
- id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
- v8::internal::kZeroHashSeed);
- return id << 1;
-}
-
-
-size_t HeapObjectsMap::GetUsedMemorySize() const {
- return
- sizeof(*this) +
- sizeof(HashMap::Entry) * entries_map_.capacity() +
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(time_intervals_);
-}
-
-
-HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
- : is_tracking_objects_(false),
- snapshots_uids_(HeapSnapshotsMatch),
- token_enumerator_(new TokenEnumerator()),
- ids_(heap) {
-}
-
-
-static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
- delete *snapshot_ptr;
-}
-
-
-HeapSnapshotsCollection::~HeapSnapshotsCollection() {
- delete token_enumerator_;
- snapshots_.Iterate(DeleteHeapSnapshot);
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
- const char* name,
- unsigned uid) {
- is_tracking_objects_ = true; // Start watching for heap objects moves.
- return new HeapSnapshot(this, type, name, uid);
-}
-
-
-void HeapSnapshotsCollection::SnapshotGenerationFinished(
- HeapSnapshot* snapshot) {
- ids_.SnapshotGenerationFinished();
- if (snapshot != NULL) {
- snapshots_.Add(snapshot);
- HashMap::Entry* entry =
- snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
- static_cast<uint32_t>(snapshot->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = snapshot;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
- HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL;
-}
-
-
-void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
- snapshots_.RemoveElement(snapshot);
- unsigned uid = snapshot->uid();
- snapshots_uids_.Remove(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid));
-}
-
-
-Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
- SnapshotObjectId id) {
- // First perform a full GC in order to avoid dead objects.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::FindHeapObjectById");
- AssertNoAllocation no_allocation;
- HeapObject* object = NULL;
- HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
- // Make sure that object with the given id is still reachable.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- if (ids_.FindEntry(obj->address()) == id) {
- ASSERT(object == NULL);
- object = obj;
- // Can't break -- kFilterUnreachable requires full heap traversal.
- }
- }
- return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
-}
-
-
-size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
- STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::
- kExpectedHeapSnapshotsCollectionSize ==
- sizeof(HeapSnapshotsCollection)); // NOLINT
- size_t size = sizeof(*this);
- size += names_.GetUsedMemorySize();
- size += ids_.GetUsedMemorySize();
- size += sizeof(HashMap::Entry) * snapshots_uids_.capacity();
- size += GetMemoryUsedByList(snapshots_);
- for (int i = 0; i < snapshots_.length(); ++i) {
- size += snapshots_[i]->RawSnapshotSize();
- }
- return size;
-}
-
-
-HeapEntriesMap::HeapEntriesMap()
- : entries_(HeapThingsMatch) {
-}
-
-
-int HeapEntriesMap::Map(HeapThing thing) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
- if (cache_entry == NULL) return HeapEntry::kNoEntry;
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
-void HeapEntriesMap::Pair(HeapThing thing, int entry) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
- ASSERT(cache_entry->value == NULL);
- cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
-}
-
-
-HeapObjectsSet::HeapObjectsSet()
- : entries_(HeapEntriesMap::HeapThingsMatch) {
-}
-
-
-void HeapObjectsSet::Clear() {
- entries_.Clear();
-}
-
-
-bool HeapObjectsSet::Contains(Object* obj) {
- if (!obj->IsHeapObject()) return false;
- HeapObject* object = HeapObject::cast(obj);
- return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL;
-}
-
-
-void HeapObjectsSet::Insert(Object* obj) {
- if (!obj->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(obj);
- entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
-}
-
-
-const char* HeapObjectsSet::GetTag(Object* obj) {
- HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
- entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
- return cache_entry != NULL
- ? reinterpret_cast<const char*>(cache_entry->value)
- : NULL;
-}
-
-
-void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
- if (!obj->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
- entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
- cache_entry->value = const_cast<char*>(tag);
-}
-
-
-HeapObject* const V8HeapExplorer::kInternalRootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject* const V8HeapExplorer::kGcRootsObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
-HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
-HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
-
-
-V8HeapExplorer::V8HeapExplorer(
- HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress,
- v8::HeapProfiler::ObjectNameResolver* resolver)
- : heap_(Isolate::Current()->heap()),
- snapshot_(snapshot),
- collection_(snapshot_->collection()),
- progress_(progress),
- filler_(NULL),
- global_object_name_resolver_(resolver) {
-}
-
-
-V8HeapExplorer::~V8HeapExplorer() {
-}
-
-
-HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
- return AddEntry(reinterpret_cast<HeapObject*>(ptr));
-}
-
-
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
- if (object == kInternalRootObject) {
- snapshot_->AddRootEntry();
- return snapshot_->root();
- } else if (object == kGcRootsObject) {
- HeapEntry* entry = snapshot_->AddGcRootsEntry();
- return entry;
- } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
- HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
- return entry;
- } else if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(object);
- SharedFunctionInfo* shared = func->shared();
- const char* name = shared->bound() ? "native_bind" :
- collection_->names()->GetName(String::cast(shared->name()));
- return AddEntry(object, HeapEntry::kClosure, name);
- } else if (object->IsJSRegExp()) {
- JSRegExp* re = JSRegExp::cast(object);
- return AddEntry(object,
- HeapEntry::kRegExp,
- collection_->names()->GetName(re->Pattern()));
- } else if (object->IsJSObject()) {
- const char* name = collection_->names()->GetName(
- GetConstructorName(JSObject::cast(object)));
- if (object->IsJSGlobalObject()) {
- const char* tag = objects_tags_.GetTag(object);
- if (tag != NULL) {
- name = collection_->names()->GetFormatted("%s / %s", name, tag);
- }
- }
- return AddEntry(object, HeapEntry::kObject, name);
- } else if (object->IsString()) {
- return AddEntry(object,
- HeapEntry::kString,
- collection_->names()->GetName(String::cast(object)));
- } else if (object->IsCode()) {
- return AddEntry(object, HeapEntry::kCode, "");
- } else if (object->IsSharedFunctionInfo()) {
- String* name = String::cast(SharedFunctionInfo::cast(object)->name());
- return AddEntry(object,
- HeapEntry::kCode,
- collection_->names()->GetName(name));
- } else if (object->IsScript()) {
- Object* name = Script::cast(object)->name();
- return AddEntry(object,
- HeapEntry::kCode,
- name->IsString()
- ? collection_->names()->GetName(String::cast(name))
- : "");
- } else if (object->IsNativeContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
- } else if (object->IsContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / Context");
- } else if (object->IsFixedArray() ||
- object->IsFixedDoubleArray() ||
- object->IsByteArray() ||
- object->IsExternalArray()) {
- return AddEntry(object, HeapEntry::kArray, "");
- } else if (object->IsHeapNumber()) {
- return AddEntry(object, HeapEntry::kHeapNumber, "number");
- }
- return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
-}
-
-
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
- HeapEntry::Type type,
- const char* name) {
- int object_size = object->Size();
- SnapshotObjectId object_id =
- collection_->GetObjectId(object->address(), object_size);
- return snapshot_->AddEntry(type, name, object_id, object_size);
-}
-
-
-class GcSubrootsEnumerator : public ObjectVisitor {
- public:
- GcSubrootsEnumerator(
- SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
- : filler_(filler),
- explorer_(explorer),
- previous_object_count_(0),
- object_count_(0) {
- }
- void VisitPointers(Object** start, Object** end) {
- object_count_ += end - start;
- }
- void Synchronize(VisitorSynchronization::SyncTag tag) {
- // Skip empty subroots.
- if (previous_object_count_ != object_count_) {
- previous_object_count_ = object_count_;
- filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
- }
- }
- private:
- SnapshotFillerInterface* filler_;
- V8HeapExplorer* explorer_;
- intptr_t previous_object_count_;
- intptr_t object_count_;
-};
-
-
-void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
- filler->AddEntry(kInternalRootObject, this);
- filler->AddEntry(kGcRootsObject, this);
- GcSubrootsEnumerator enumerator(filler, this);
- heap_->IterateRoots(&enumerator, VISIT_ALL);
-}
-
-
-const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
- switch (object->map()->instance_type()) {
- case MAP_TYPE:
- switch (Map::cast(object)->instance_type()) {
-#define MAKE_STRING_MAP_CASE(instance_type, size, name, Name) \
- case instance_type: return "system / Map (" #Name ")";
- STRING_TYPE_LIST(MAKE_STRING_MAP_CASE)
-#undef MAKE_STRING_MAP_CASE
- default: return "system / Map";
- }
- case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
- case FOREIGN_TYPE: return "system / Foreign";
- case ODDBALL_TYPE: return "system / Oddball";
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: return "system / "#Name;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return "system";
- }
-}
-
-
-int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
- int objects_count = 0;
- for (HeapObject* obj = iterator->next();
- obj != NULL;
- obj = iterator->next()) {
- objects_count++;
- }
- return objects_count;
-}
-
-
-class IndexedReferencesExtractor : public ObjectVisitor {
- public:
- IndexedReferencesExtractor(V8HeapExplorer* generator,
- HeapObject* parent_obj,
- int parent)
- : generator_(generator),
- parent_obj_(parent_obj),
- parent_(parent),
- next_index_(1) {
- }
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (CheckVisitedAndUnmark(p)) continue;
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
- }
- }
- static void MarkVisitedField(HeapObject* obj, int offset) {
- if (offset < 0) return;
- Address field = obj->address() + offset;
- ASSERT(!Memory::Object_at(field)->IsFailure());
- ASSERT(Memory::Object_at(field)->IsHeapObject());
- *field |= kFailureTag;
- }
-
- private:
- bool CheckVisitedAndUnmark(Object** field) {
- if ((*field)->IsFailure()) {
- intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
- *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
- ASSERT((*field)->IsHeapObject());
- return true;
- }
- return false;
- }
- V8HeapExplorer* generator_;
- HeapObject* parent_obj_;
- int parent_;
- int next_index_;
-};
-
-
-void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
- HeapEntry* heap_entry = GetEntry(obj);
- if (heap_entry == NULL) return; // No interest in this object.
- int entry = heap_entry->index();
-
- bool extract_indexed_refs = true;
- if (obj->IsJSGlobalProxy()) {
- ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj));
- } else if (obj->IsJSObject()) {
- ExtractJSObjectReferences(entry, JSObject::cast(obj));
- } else if (obj->IsString()) {
- ExtractStringReferences(entry, String::cast(obj));
- } else if (obj->IsContext()) {
- ExtractContextReferences(entry, Context::cast(obj));
- } else if (obj->IsMap()) {
- ExtractMapReferences(entry, Map::cast(obj));
- } else if (obj->IsSharedFunctionInfo()) {
- ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
- } else if (obj->IsScript()) {
- ExtractScriptReferences(entry, Script::cast(obj));
- } else if (obj->IsCodeCache()) {
- ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
- } else if (obj->IsCode()) {
- ExtractCodeReferences(entry, Code::cast(obj));
- } else if (obj->IsJSGlobalPropertyCell()) {
- ExtractJSGlobalPropertyCellReferences(
- entry, JSGlobalPropertyCell::cast(obj));
- extract_indexed_refs = false;
- }
- if (extract_indexed_refs) {
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- }
-}
-
-
-void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) {
- // We need to reference JS global objects from snapshot's root.
- // We use JSGlobalProxy because this is what embedder (e.g. browser)
- // uses for the global object.
- Object* object = proxy->map()->prototype();
- bool is_debug_object = false;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- is_debug_object = object->IsGlobalObject() &&
- Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object));
-#endif
- if (!is_debug_object) {
- SetUserGlobalReference(object);
- }
-}
-
-
-void V8HeapExplorer::ExtractJSObjectReferences(
- int entry, JSObject* js_obj) {
- HeapObject* obj = js_obj;
- ExtractClosureReferences(js_obj, entry);
- ExtractPropertyReferences(js_obj, entry);
- ExtractElementReferences(js_obj, entry);
- ExtractInternalReferences(js_obj, entry);
- SetPropertyReference(
- obj, entry, heap_->proto_string(), js_obj->GetPrototype());
- if (obj->IsJSFunction()) {
- JSFunction* js_fun = JSFunction::cast(js_obj);
- Object* proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole()) {
- if (!proto_or_map->IsMap()) {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_string(), proto_or_map,
- NULL,
- JSFunction::kPrototypeOrInitialMapOffset);
- } else {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_string(), js_fun->prototype());
- }
- }
- SharedFunctionInfo* shared_info = js_fun->shared();
- // JSFunction has either bindings or literals and never both.
- bool bound = shared_info->bound();
- TagObject(js_fun->literals_or_bindings(),
- bound ? "(function bindings)" : "(function literals)");
- SetInternalReference(js_fun, entry,
- bound ? "bindings" : "literals",
- js_fun->literals_or_bindings(),
- JSFunction::kLiteralsOffset);
- TagObject(shared_info, "(shared function info)");
- SetInternalReference(js_fun, entry,
- "shared", shared_info,
- JSFunction::kSharedFunctionInfoOffset);
- TagObject(js_fun->unchecked_context(), "(context)");
- SetInternalReference(js_fun, entry,
- "context", js_fun->unchecked_context(),
- JSFunction::kContextOffset);
- for (int i = JSFunction::kNonWeakFieldsEndOffset;
- i < JSFunction::kSize;
- i += kPointerSize) {
- SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
- }
- } else if (obj->IsGlobalObject()) {
- GlobalObject* global_obj = GlobalObject::cast(obj);
- SetInternalReference(global_obj, entry,
- "builtins", global_obj->builtins(),
- GlobalObject::kBuiltinsOffset);
- SetInternalReference(global_obj, entry,
- "native_context", global_obj->native_context(),
- GlobalObject::kNativeContextOffset);
- SetInternalReference(global_obj, entry,
- "global_receiver", global_obj->global_receiver(),
- GlobalObject::kGlobalReceiverOffset);
- }
- TagObject(js_obj->properties(), "(object properties)");
- SetInternalReference(obj, entry,
- "properties", js_obj->properties(),
- JSObject::kPropertiesOffset);
- TagObject(js_obj->elements(), "(object elements)");
- SetInternalReference(obj, entry,
- "elements", js_obj->elements(),
- JSObject::kElementsOffset);
-}
-
-
-void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
- if (string->IsConsString()) {
- ConsString* cs = ConsString::cast(string);
- SetInternalReference(cs, entry, "first", cs->first(),
- ConsString::kFirstOffset);
- SetInternalReference(cs, entry, "second", cs->second(),
- ConsString::kSecondOffset);
- } else if (string->IsSlicedString()) {
- SlicedString* ss = SlicedString::cast(string);
- SetInternalReference(ss, entry, "parent", ss->parent(),
- SlicedString::kParentOffset);
- }
-}
-
-
-void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
-#define EXTRACT_CONTEXT_FIELD(index, type, name) \
- SetInternalReference(context, entry, #name, context->get(Context::index), \
- FixedArray::OffsetOfElementAt(Context::index));
- EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
- EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
- EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
- EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
- if (context->IsNativeContext()) {
- TagObject(context->jsfunction_result_caches(),
- "(context func. result caches)");
- TagObject(context->normalized_map_cache(), "(context norm. map cache)");
- TagObject(context->runtime_context(), "(runtime context)");
- TagObject(context->embedder_data(), "(context data)");
- NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
-#undef EXTRACT_CONTEXT_FIELD
- for (int i = Context::FIRST_WEAK_SLOT;
- i < Context::NATIVE_CONTEXT_SLOTS;
- ++i) {
- SetWeakReference(context, entry, i, context->get(i),
- FixedArray::OffsetOfElementAt(i));
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
- SetInternalReference(map, entry,
- "prototype", map->prototype(), Map::kPrototypeOffset);
- SetInternalReference(map, entry,
- "constructor", map->constructor(),
- Map::kConstructorOffset);
- if (map->HasTransitionArray()) {
- TransitionArray* transitions = map->transitions();
-
- Object* back_pointer = transitions->back_pointer_storage();
- TagObject(transitions->back_pointer_storage(), "(back pointer)");
- SetInternalReference(transitions, entry,
- "backpointer", back_pointer,
- TransitionArray::kBackPointerStorageOffset);
- IndexedReferencesExtractor transitions_refs(this, transitions, entry);
- transitions->Iterate(&transitions_refs);
-
- TagObject(transitions, "(transition array)");
- SetInternalReference(map, entry,
- "transitions", transitions,
- Map::kTransitionsOrBackPointerOffset);
- } else {
- Object* back_pointer = map->GetBackPointer();
- TagObject(back_pointer, "(back pointer)");
- SetInternalReference(map, entry,
- "backpointer", back_pointer,
- Map::kTransitionsOrBackPointerOffset);
- }
- DescriptorArray* descriptors = map->instance_descriptors();
- TagObject(descriptors, "(map descriptors)");
- SetInternalReference(map, entry,
- "descriptors", descriptors,
- Map::kDescriptorsOffset);
-
- SetInternalReference(map, entry,
- "code_cache", map->code_cache(),
- Map::kCodeCacheOffset);
-}
-
-
-void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
- int entry, SharedFunctionInfo* shared) {
- HeapObject* obj = shared;
- SetInternalReference(obj, entry,
- "name", shared->name(),
- SharedFunctionInfo::kNameOffset);
- TagObject(shared->code(), "(code)");
- SetInternalReference(obj, entry,
- "code", shared->code(),
- SharedFunctionInfo::kCodeOffset);
- TagObject(shared->scope_info(), "(function scope info)");
- SetInternalReference(obj, entry,
- "scope_info", shared->scope_info(),
- SharedFunctionInfo::kScopeInfoOffset);
- SetInternalReference(obj, entry,
- "instance_class_name", shared->instance_class_name(),
- SharedFunctionInfo::kInstanceClassNameOffset);
- SetInternalReference(obj, entry,
- "script", shared->script(),
- SharedFunctionInfo::kScriptOffset);
- TagObject(shared->construct_stub(), "(code)");
- SetInternalReference(obj, entry,
- "construct_stub", shared->construct_stub(),
- SharedFunctionInfo::kConstructStubOffset);
- SetInternalReference(obj, entry,
- "function_data", shared->function_data(),
- SharedFunctionInfo::kFunctionDataOffset);
- SetInternalReference(obj, entry,
- "debug_info", shared->debug_info(),
- SharedFunctionInfo::kDebugInfoOffset);
- SetInternalReference(obj, entry,
- "inferred_name", shared->inferred_name(),
- SharedFunctionInfo::kInferredNameOffset);
- SetInternalReference(obj, entry,
- "this_property_assignments",
- shared->this_property_assignments(),
- SharedFunctionInfo::kThisPropertyAssignmentsOffset);
- SetWeakReference(obj, entry,
- 1, shared->initial_map(),
- SharedFunctionInfo::kInitialMapOffset);
-}
-
-
-void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
- HeapObject* obj = script;
- SetInternalReference(obj, entry,
- "source", script->source(),
- Script::kSourceOffset);
- SetInternalReference(obj, entry,
- "name", script->name(),
- Script::kNameOffset);
- SetInternalReference(obj, entry,
- "data", script->data(),
- Script::kDataOffset);
- SetInternalReference(obj, entry,
- "context_data", script->context_data(),
- Script::kContextOffset);
- TagObject(script->line_ends(), "(script line ends)");
- SetInternalReference(obj, entry,
- "line_ends", script->line_ends(),
- Script::kLineEndsOffset);
-}
-
-
-void V8HeapExplorer::ExtractCodeCacheReferences(
- int entry, CodeCache* code_cache) {
- TagObject(code_cache->default_cache(), "(default code cache)");
- SetInternalReference(code_cache, entry,
- "default_cache", code_cache->default_cache(),
- CodeCache::kDefaultCacheOffset);
- TagObject(code_cache->normal_type_cache(), "(code type cache)");
- SetInternalReference(code_cache, entry,
- "type_cache", code_cache->normal_type_cache(),
- CodeCache::kNormalTypeCacheOffset);
-}
-
-
-void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
- TagObject(code->relocation_info(), "(code relocation info)");
- SetInternalReference(code, entry,
- "relocation_info", code->relocation_info(),
- Code::kRelocationInfoOffset);
- SetInternalReference(code, entry,
- "handler_table", code->handler_table(),
- Code::kHandlerTableOffset);
- TagObject(code->deoptimization_data(), "(code deopt data)");
- SetInternalReference(code, entry,
- "deoptimization_data", code->deoptimization_data(),
- Code::kDeoptimizationDataOffset);
- if (code->kind() == Code::FUNCTION) {
- SetInternalReference(code, entry,
- "type_feedback_info", code->type_feedback_info(),
- Code::kTypeFeedbackInfoOffset);
- }
- SetInternalReference(code, entry,
- "gc_metadata", code->gc_metadata(),
- Code::kGCMetadataOffset);
-}
-
-
-void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences(
- int entry, JSGlobalPropertyCell* cell) {
- SetInternalReference(cell, entry, "value", cell->value());
-}
-
-
-void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
- if (!js_obj->IsJSFunction()) return;
-
- JSFunction* func = JSFunction::cast(js_obj);
- if (func->shared()->bound()) {
- FixedArray* bindings = func->function_bindings();
- SetNativeBindReference(js_obj, entry, "bound_this",
- bindings->get(JSFunction::kBoundThisIndex));
- SetNativeBindReference(js_obj, entry, "bound_function",
- bindings->get(JSFunction::kBoundFunctionIndex));
- for (int i = JSFunction::kBoundArgumentsStartIndex;
- i < bindings->length(); i++) {
- const char* reference_name = collection_->names()->GetFormatted(
- "bound_argument_%d",
- i - JSFunction::kBoundArgumentsStartIndex);
- SetNativeBindReference(js_obj, entry, reference_name,
- bindings->get(i));
- }
- } else {
- Context* context = func->context()->declaration_context();
- ScopeInfo* scope_info = context->closure()->shared()->scope_info();
- // Add context allocated locals.
- int context_locals = scope_info->ContextLocalCount();
- for (int i = 0; i < context_locals; ++i) {
- String* local_name = scope_info->ContextLocalName(i);
- int idx = Context::MIN_CONTEXT_SLOTS + i;
- SetClosureReference(js_obj, entry, local_name, context->get(idx));
- }
-
- // Add function variable.
- if (scope_info->HasFunctionName()) {
- String* name = scope_info->FunctionName();
- VariableMode mode;
- int idx = scope_info->FunctionContextSlotIndex(name, &mode);
- if (idx >= 0) {
- SetClosureReference(js_obj, entry, name, context->get(idx));
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
- if (js_obj->HasFastProperties()) {
- DescriptorArray* descs = js_obj->map()->instance_descriptors();
- int real_size = js_obj->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetDetails(i).descriptor_index() > real_size) continue;
- switch (descs->GetType(i)) {
- case FIELD: {
- int index = descs->GetFieldIndex(i);
-
- String* k = descs->GetKey(i);
- if (index < js_obj->map()->inobject_properties()) {
- Object* value = js_obj->InObjectPropertyAt(index);
- if (k != heap_->hidden_string()) {
- SetPropertyReference(
- js_obj, entry,
- k, value,
- NULL,
- js_obj->GetInObjectPropertyOffset(index));
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(
- js_obj, entry,
- "hidden_properties", value,
- js_obj->GetInObjectPropertyOffset(index));
- }
- } else {
- Object* value = js_obj->FastPropertyAt(index);
- if (k != heap_->hidden_string()) {
- SetPropertyReference(js_obj, entry, k, value);
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- }
- }
- break;
- }
- case CONSTANT_FUNCTION:
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), descs->GetConstantFunction(i));
- break;
- case CALLBACKS: {
- Object* callback_obj = descs->GetValue(i);
- if (callback_obj->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(callback_obj);
- if (Object* getter = accessors->getter()) {
- SetPropertyReference(js_obj, entry, descs->GetKey(i),
- getter, "get-%s");
- }
- if (Object* setter = accessors->setter()) {
- SetPropertyReference(js_obj, entry, descs->GetKey(i),
- setter, "set-%s");
- }
- }
- break;
- }
- case NORMAL: // only in slow mode
- case HANDLER: // only in lookup results, not in descriptors
- case INTERCEPTOR: // only in lookup results, not in descriptors
- break;
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
- } else {
- StringDictionary* dictionary = js_obj->property_dictionary();
- int length = dictionary->Capacity();
- for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
- Object* target = dictionary->ValueAt(i);
- // We assume that global objects can only have slow properties.
- Object* value = target->IsJSGlobalPropertyCell()
- ? JSGlobalPropertyCell::cast(target)->value()
- : target;
- if (k != heap_->hidden_string()) {
- SetPropertyReference(js_obj, entry, String::cast(k), value);
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- }
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
- if (js_obj->HasFastObjectElements()) {
- FixedArray* elements = FixedArray::cast(js_obj->elements());
- int length = js_obj->IsJSArray() ?
- Smi::cast(JSArray::cast(js_obj)->length())->value() :
- elements->length();
- for (int i = 0; i < length; ++i) {
- if (!elements->get(i)->IsTheHole()) {
- SetElementReference(js_obj, entry, i, elements->get(i));
- }
- }
- } else if (js_obj->HasDictionaryElements()) {
- SeededNumberDictionary* dictionary = js_obj->element_dictionary();
- int length = dictionary->Capacity();
- for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
- ASSERT(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
- int length = js_obj->GetInternalFieldCount();
- for (int i = 0; i < length; ++i) {
- Object* o = js_obj->GetInternalField(i);
- SetInternalReference(
- js_obj, entry, i, o, js_obj->GetInternalFieldOffset(i));
- }
-}
-
-
-String* V8HeapExplorer::GetConstructorName(JSObject* object) {
- Heap* heap = object->GetHeap();
- if (object->IsJSFunction()) return heap->closure_string();
- String* constructor_name = object->constructor_name();
- if (constructor_name == heap->Object_string()) {
- // Look up an immediate "constructor" property, if it is a function,
- // return its name. This is for instances of binding objects, which
- // have prototype constructor type "Object".
- Object* constructor_prop = NULL;
- LookupResult result(heap->isolate());
- object->LocalLookupRealNamedProperty(heap->constructor_string(), &result);
- if (!result.IsFound()) return object->constructor_name();
-
- constructor_prop = result.GetLazyValue();
- if (constructor_prop->IsJSFunction()) {
- Object* maybe_name =
- JSFunction::cast(constructor_prop)->shared()->name();
- if (maybe_name->IsString()) {
- String* name = String::cast(maybe_name);
- if (name->length() > 0) return name;
- }
- }
- }
- return object->constructor_name();
-}
-
-
-HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
- if (!obj->IsHeapObject()) return NULL;
- return filler_->FindOrAddEntry(obj, this);
-}
-
-
-class RootsReferencesExtractor : public ObjectVisitor {
- private:
- struct IndexTag {
- IndexTag(int index, VisitorSynchronization::SyncTag tag)
- : index(index), tag(tag) { }
- int index;
- VisitorSynchronization::SyncTag tag;
- };
-
- public:
- RootsReferencesExtractor()
- : collecting_all_references_(false),
- previous_reference_count_(0) {
- }
-
- void VisitPointers(Object** start, Object** end) {
- if (collecting_all_references_) {
- for (Object** p = start; p < end; p++) all_references_.Add(*p);
- } else {
- for (Object** p = start; p < end; p++) strong_references_.Add(*p);
- }
- }
-
- void SetCollectingAllReferences() { collecting_all_references_ = true; }
-
- void FillReferences(V8HeapExplorer* explorer) {
- ASSERT(strong_references_.length() <= all_references_.length());
- for (int i = 0; i < reference_tags_.length(); ++i) {
- explorer->SetGcRootsReference(reference_tags_[i].tag);
- }
- int strong_index = 0, all_index = 0, tags_index = 0;
- while (all_index < all_references_.length()) {
- if (strong_index < strong_references_.length() &&
- strong_references_[strong_index] == all_references_[all_index]) {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- false,
- all_references_[all_index++]);
- ++strong_index;
- } else {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- true,
- all_references_[all_index++]);
- }
- if (reference_tags_[tags_index].index == all_index) ++tags_index;
- }
- }
-
- void Synchronize(VisitorSynchronization::SyncTag tag) {
- if (collecting_all_references_ &&
- previous_reference_count_ != all_references_.length()) {
- previous_reference_count_ = all_references_.length();
- reference_tags_.Add(IndexTag(previous_reference_count_, tag));
- }
- }
-
- private:
- bool collecting_all_references_;
- List<Object*> strong_references_;
- List<Object*> all_references_;
- int previous_reference_count_;
- List<IndexTag> reference_tags_;
-};
-
-
-bool V8HeapExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
- HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
-
- filler_ = filler;
- bool interrupted = false;
-
- // Heap iteration with filtering must be finished in any case.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next(), progress_->ProgressStep()) {
- if (!interrupted) {
- ExtractReferences(obj);
- if (!progress_->ProgressReport(false)) interrupted = true;
- }
- }
- if (interrupted) {
- filler_ = NULL;
- return false;
- }
-
- SetRootGcRootsReference();
- RootsReferencesExtractor extractor;
- heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
- extractor.SetCollectingAllReferences();
- heap_->IterateRoots(&extractor, VISIT_ALL);
- extractor.FillReferences(this);
- filler_ = NULL;
- return progress_->ProgressReport(true);
-}
-
-
-bool V8HeapExplorer::IsEssentialObject(Object* object) {
- return object->IsHeapObject()
- && !object->IsOddball()
- && object != heap_->empty_byte_array()
- && object != heap_->empty_fixed_array()
- && object != heap_->empty_descriptor_array()
- && object != heap_->fixed_array_map()
- && object != heap_->global_property_cell_map()
- && object != heap_->shared_function_info_map()
- && object != heap_->free_space_map()
- && object != heap_->one_pointer_filler_map()
- && object != heap_->two_pointer_filler_map();
-}
-
-
-void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
- int parent_entry,
- String* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
- parent_entry,
- collection_->names()->GetName(reference_name),
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
- int parent_entry,
- const char* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kShortcut,
- parent_entry,
- reference_name,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetIndexedReference(HeapGraphEdge::kElement,
- parent_entry,
- index,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
- const char* reference_name,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == NULL) return;
- if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
- reference_name,
- child_entry);
- }
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
-}
-
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == NULL) return;
- if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
- collection_->names()->GetName(index),
- child_entry);
- }
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
-}
-
-
-void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL && IsEssentialObject(child_obj)) {
- filler_->SetIndexedReference(HeapGraphEdge::kHidden,
- parent_entry,
- index,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetIndexedReference(HeapGraphEdge::kWeak,
- parent_entry,
- index,
- child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
- }
-}
-
-
-void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
- int parent_entry,
- String* reference_name,
- Object* child_obj,
- const char* name_format_string,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- HeapGraphEdge::Type type = reference_name->length() > 0 ?
- HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
- const char* name = name_format_string != NULL ?
- collection_->names()->GetFormatted(
- name_format_string,
- *reference_name->ToCString(DISALLOW_NULLS,
- ROBUST_STRING_TRAVERSAL)) :
- collection_->names()->GetName(reference_name);
-
- filler_->SetNamedReference(type,
- parent_entry,
- name,
- child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
- }
-}
-
-
-void V8HeapExplorer::SetRootGcRootsReference() {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- snapshot_->gc_roots());
-}
-
-
-void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- ASSERT(child_entry != NULL);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kShortcut,
- snapshot_->root()->index(),
- child_entry);
-}
-
-
-void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->gc_roots()->index(),
- snapshot_->gc_subroot(tag));
-}
-
-
-void V8HeapExplorer::SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- const char* name = GetStrongGcSubrootName(child_obj);
- if (name != NULL) {
- filler_->SetNamedReference(
- HeapGraphEdge::kInternal,
- snapshot_->gc_subroot(tag)->index(),
- name,
- child_entry);
- } else {
- filler_->SetIndexedAutoIndexReference(
- is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
- }
- }
-}
-
-
-const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
- if (strong_gc_subroot_names_.is_empty()) {
-#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name);
-#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name)
- STRONG_ROOT_LIST(ROOT_NAME)
-#undef ROOT_NAME
-#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map)
- STRUCT_LIST(STRUCT_MAP_NAME)
-#undef STRUCT_MAP_NAME
-#define STRING_NAME(name, str) NAME_ENTRY(name)
- INTERNALIZED_STRING_LIST(STRING_NAME)
-#undef STRING_NAME
-#undef NAME_ENTRY
- CHECK(!strong_gc_subroot_names_.is_empty());
- }
- return strong_gc_subroot_names_.GetTag(object);
-}
-
-
-void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
- if (IsEssentialObject(obj)) {
- HeapEntry* entry = GetEntry(obj);
- if (entry->name()[0] == '\0') {
- entry->set_name(tag);
- }
- }
-}
-
-
-class GlobalObjectsEnumerator : public ObjectVisitor {
- public:
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsNativeContext()) {
- Context* context = Context::cast(*p);
- JSObject* proxy = context->global_proxy();
- if (proxy->IsJSGlobalProxy()) {
- Object* global = proxy->map()->prototype();
- if (global->IsJSGlobalObject()) {
- objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global)));
- }
- }
- }
- }
- }
- int count() { return objects_.length(); }
- Handle<JSGlobalObject>& at(int i) { return objects_[i]; }
-
- private:
- List<Handle<JSGlobalObject> > objects_;
-};
-
-
-// Modifies heap. Must not be run during heap traversal.
-void V8HeapExplorer::TagGlobalObjects() {
- Isolate* isolate = Isolate::Current();
- HandleScope scope(isolate);
- GlobalObjectsEnumerator enumerator;
- isolate->global_handles()->IterateAllRoots(&enumerator);
- const char** urls = NewArray<const char*>(enumerator.count());
- for (int i = 0, l = enumerator.count(); i < l; ++i) {
- if (global_object_name_resolver_) {
- HandleScope scope(isolate);
- Handle<JSGlobalObject> global_obj = enumerator.at(i);
- urls[i] = global_object_name_resolver_->GetName(
- Utils::ToLocal(Handle<JSObject>::cast(global_obj)));
- } else {
- urls[i] = NULL;
- }
- }
-
- AssertNoAllocation no_allocation;
- for (int i = 0, l = enumerator.count(); i < l; ++i) {
- objects_tags_.SetTag(*enumerator.at(i), urls[i]);
- }
-
- DeleteArray(urls);
-}
-
-
-class GlobalHandlesExtractor : public ObjectVisitor {
- public:
- explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
- : explorer_(explorer) {}
- virtual ~GlobalHandlesExtractor() {}
- virtual void VisitPointers(Object** start, Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {
- explorer_->VisitSubtreeWrapper(p, class_id);
- }
- private:
- NativeObjectsExplorer* explorer_;
-};
-
-
-class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
- public:
- BasicHeapEntriesAllocator(
- HeapSnapshot* snapshot,
- HeapEntry::Type entries_type)
- : snapshot_(snapshot),
- collection_(snapshot_->collection()),
- entries_type_(entries_type) {
- }
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
- private:
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- HeapEntry::Type entries_type_;
-};
-
-
-HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
- v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
- intptr_t elements = info->GetElementCount();
- intptr_t size = info->GetSizeInBytes();
- const char* name = elements != -1
- ? collection_->names()->GetFormatted(
- "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
- : collection_->names()->GetCopy(info->GetLabel());
- return snapshot_->AddEntry(
- entries_type_,
- name,
- HeapObjectsMap::GenerateId(info),
- size != -1 ? static_cast<int>(size) : 0);
-}
-
-
-NativeObjectsExplorer::NativeObjectsExplorer(
- HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
- : snapshot_(snapshot),
- collection_(snapshot_->collection()),
- progress_(progress),
- embedder_queried_(false),
- objects_by_info_(RetainedInfosMatch),
- native_groups_(StringsMatch),
- filler_(NULL) {
- synthetic_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
- native_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative);
-}
-
-
-NativeObjectsExplorer::~NativeObjectsExplorer() {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- info->Dispose();
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
- delete objects;
- }
- for (HashMap::Entry* p = native_groups_.Start();
- p != NULL;
- p = native_groups_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
- info->Dispose();
- }
- delete synthetic_entries_allocator_;
- delete native_entries_allocator_;
-}
-
-
-int NativeObjectsExplorer::EstimateObjectsCount() {
- FillRetainedObjects();
- return objects_by_info_.occupancy();
-}
-
-
-void NativeObjectsExplorer::FillRetainedObjects() {
- if (embedder_queried_) return;
- Isolate* isolate = Isolate::Current();
- const GCType major_gc_type = kGCTypeMarkSweepCompact;
- // Record objects that are joined into ObjectGroups.
- isolate->heap()->CallGCPrologueCallbacks(major_gc_type);
- List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
- for (int i = 0; i < groups->length(); ++i) {
- ObjectGroup* group = groups->at(i);
- if (group->info_ == NULL) continue;
- List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_);
- for (size_t j = 0; j < group->length_; ++j) {
- HeapObject* obj = HeapObject::cast(*group->objects_[j]);
- list->Add(obj);
- in_groups_.Insert(obj);
- }
- group->info_ = NULL; // Acquire info object ownership.
- }
- isolate->global_handles()->RemoveObjectGroups();
- isolate->heap()->CallGCEpilogueCallbacks(major_gc_type);
- // Record objects that are not in ObjectGroups, but have class ID.
- GlobalHandlesExtractor extractor(this);
- isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
- embedder_queried_ = true;
-}
-
-void NativeObjectsExplorer::FillImplicitReferences() {
- Isolate* isolate = Isolate::Current();
- List<ImplicitRefGroup*>* groups =
- isolate->global_handles()->implicit_ref_groups();
- for (int i = 0; i < groups->length(); ++i) {
- ImplicitRefGroup* group = groups->at(i);
- HeapObject* parent = *group->parent_;
- int parent_entry =
- filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
- ASSERT(parent_entry != HeapEntry::kNoEntry);
- Object*** children = group->children_;
- for (size_t j = 0; j < group->length_; ++j) {
- Object* child = *children[j];
- HeapEntry* child_entry =
- filler_->FindOrAddEntry(child, native_entries_allocator_);
- filler_->SetNamedReference(
- HeapGraphEdge::kInternal,
- parent_entry,
- "native",
- child_entry);
- }
- }
- isolate->global_handles()->RemoveImplicitRefGroups();
-}
-
-List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
- v8::RetainedObjectInfo* info) {
- HashMap::Entry* entry =
- objects_by_info_.Lookup(info, InfoHash(info), true);
- if (entry->value != NULL) {
- info->Dispose();
- } else {
- entry->value = new List<HeapObject*>(4);
- }
- return reinterpret_cast<List<HeapObject*>* >(entry->value);
-}
-
-
-bool NativeObjectsExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
- filler_ = filler;
- FillRetainedObjects();
- FillImplicitReferences();
- if (EstimateObjectsCount() > 0) {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- SetNativeRootReference(info);
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
- for (int i = 0; i < objects->length(); ++i) {
- SetWrapperNativeReferences(objects->at(i), info);
- }
- }
- SetRootNativeRootsReference();
- }
- filler_ = NULL;
- return true;
-}
-
-
-class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
- public:
- explicit NativeGroupRetainedObjectInfo(const char* label)
- : disposed_(false),
- hash_(reinterpret_cast<intptr_t>(label)),
- label_(label) {
- }
-
- virtual ~NativeGroupRetainedObjectInfo() {}
- virtual void Dispose() {
- CHECK(!disposed_);
- disposed_ = true;
- delete this;
- }
- virtual bool IsEquivalent(RetainedObjectInfo* other) {
- return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
- }
- virtual intptr_t GetHash() { return hash_; }
- virtual const char* GetLabel() { return label_; }
-
- private:
- bool disposed_;
- intptr_t hash_;
- const char* label_;
-};
-
-
-NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
- const char* label) {
- const char* label_copy = collection_->names()->GetCopy(label);
- uint32_t hash = StringHasher::HashSequentialString(
- label_copy,
- static_cast<int>(strlen(label_copy)),
- HEAP->HashSeed());
- HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
- hash, true);
- if (entry->value == NULL) {
- entry->value = new NativeGroupRetainedObjectInfo(label);
- }
- return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
-}
-
-
-void NativeObjectsExplorer::SetNativeRootReference(
- v8::RetainedObjectInfo* info) {
- HeapEntry* child_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
- ASSERT(child_entry != NULL);
- NativeGroupRetainedObjectInfo* group_info =
- FindOrAddGroupInfo(info->GetGroupLabel());
- HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kInternal,
- group_entry->index(),
- child_entry);
-}
-
-
-void NativeObjectsExplorer::SetWrapperNativeReferences(
- HeapObject* wrapper, v8::RetainedObjectInfo* info) {
- HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
- ASSERT(wrapper_entry != NULL);
- HeapEntry* info_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
- ASSERT(info_entry != NULL);
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- wrapper_entry->index(),
- "native",
- info_entry);
- filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- info_entry->index(),
- wrapper_entry);
-}
-
-
-void NativeObjectsExplorer::SetRootNativeRootsReference() {
- for (HashMap::Entry* entry = native_groups_.Start();
- entry;
- entry = native_groups_.Next(entry)) {
- NativeGroupRetainedObjectInfo* group_info =
- static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
- HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, native_entries_allocator_);
- ASSERT(group_entry != NULL);
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- group_entry);
- }
-}
-
-
-void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
- if (in_groups_.Contains(*p)) return;
- Isolate* isolate = Isolate::Current();
- v8::RetainedObjectInfo* info =
- isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
- if (info == NULL) return;
- GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p));
-}
-
-
-class SnapshotFiller : public SnapshotFillerInterface {
- public:
- explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
- : snapshot_(snapshot),
- collection_(snapshot->collection()),
- entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = allocator->AllocateEntry(ptr);
- entries_->Pair(ptr, entry->index());
- return entry;
- }
- HeapEntry* FindEntry(HeapThing ptr) {
- int index = entries_->Map(ptr);
- return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
- }
- HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = FindEntry(ptr);
- return entry != NULL ? entry : AddEntry(ptr, allocator);
- }
- void SetIndexedReference(HeapGraphEdge::Type type,
- int parent,
- int index,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetNamedReference(HeapGraphEdge::Type type,
- int parent,
- const char* reference_name,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetNamedReference(type, reference_name, child_entry);
- }
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetNamedReference(
- type,
- collection_->names()->GetName(index),
- child_entry);
- }
-
- private:
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- HeapEntriesMap* entries_;
-};
-
-
-HeapSnapshotGenerator::HeapSnapshotGenerator(
- HeapSnapshot* snapshot,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver,
- Heap* heap)
- : snapshot_(snapshot),
- control_(control),
- v8_heap_explorer_(snapshot_, this, resolver),
- dom_explorer_(snapshot_, this),
- heap_(heap) {
-}
-
-
-bool HeapSnapshotGenerator::GenerateSnapshot() {
- v8_heap_explorer_.TagGlobalObjects();
-
- // TODO(1562) Profiler assumes that any object that is in the heap after
- // full GC is reachable from the root when computing dominators.
- // This is not true for weakly reachable objects.
- // As a temporary solution we call GC twice.
- Isolate::Current()->heap()->CollectAllGarbage(
- Heap::kMakeHeapIterableMask,
- "HeapSnapshotGenerator::GenerateSnapshot");
- Isolate::Current()->heap()->CollectAllGarbage(
- Heap::kMakeHeapIterableMask,
- "HeapSnapshotGenerator::GenerateSnapshot");
-
-#ifdef VERIFY_HEAP
- Heap* debug_heap = Isolate::Current()->heap();
- CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
- CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
- CHECK(!debug_heap->code_space()->was_swept_conservatively());
- CHECK(!debug_heap->cell_space()->was_swept_conservatively());
- CHECK(!debug_heap->map_space()->was_swept_conservatively());
-#endif
-
- // The following code uses heap iterators, so we want the heap to be
- // stable. It should follow TagGlobalObjects as that can allocate.
- AssertNoAllocation no_alloc;
-
-#ifdef VERIFY_HEAP
- debug_heap->Verify();
-#endif
-
- SetProgressTotal(1); // 1 pass.
-
-#ifdef VERIFY_HEAP
- debug_heap->Verify();
-#endif
-
- if (!FillReferences()) return false;
-
- snapshot_->FillChildren();
- snapshot_->RememberLastJSObjectId();
-
- progress_counter_ = progress_total_;
- if (!ProgressReport(true)) return false;
- return true;
-}
-
-
-void HeapSnapshotGenerator::ProgressStep() {
- ++progress_counter_;
-}
-
-
-bool HeapSnapshotGenerator::ProgressReport(bool force) {
- const int kProgressReportGranularity = 10000;
- if (control_ != NULL
- && (force || progress_counter_ % kProgressReportGranularity == 0)) {
- return
- control_->ReportProgressValue(progress_counter_, progress_total_) ==
- v8::ActivityControl::kContinue;
- }
- return true;
-}
-
-
-void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
- if (control_ == NULL) return;
- HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
- progress_total_ = iterations_count * (
- v8_heap_explorer_.EstimateObjectsCount(&iterator) +
- dom_explorer_.EstimateObjectsCount());
- progress_counter_ = 0;
-}
-
-
-bool HeapSnapshotGenerator::FillReferences() {
- SnapshotFiller filler(snapshot_, &entries_);
- v8_heap_explorer_.AddRootEntries(&filler);
- return v8_heap_explorer_.IterateAndExtractReferences(&filler)
- && dom_explorer_.IterateAndExtractReferences(&filler);
-}
-
-
-template<int bytes> struct MaxDecimalDigitsIn;
-template<> struct MaxDecimalDigitsIn<4> {
- static const int kSigned = 11;
- static const int kUnsigned = 10;
-};
-template<> struct MaxDecimalDigitsIn<8> {
- static const int kSigned = 20;
- static const int kUnsigned = 20;
-};
-
-
-class OutputStreamWriter {
- public:
- explicit OutputStreamWriter(v8::OutputStream* stream)
- : stream_(stream),
- chunk_size_(stream->GetChunkSize()),
- chunk_(chunk_size_),
- chunk_pos_(0),
- aborted_(false) {
- ASSERT(chunk_size_ > 0);
- }
- bool aborted() { return aborted_; }
- void AddCharacter(char c) {
- ASSERT(c != '\0');
- ASSERT(chunk_pos_ < chunk_size_);
- chunk_[chunk_pos_++] = c;
- MaybeWriteChunk();
- }
- void AddString(const char* s) {
- AddSubstring(s, StrLength(s));
- }
- void AddSubstring(const char* s, int n) {
- if (n <= 0) return;
- ASSERT(static_cast<size_t>(n) <= strlen(s));
- const char* s_end = s + n;
- while (s < s_end) {
- int s_chunk_size = Min(
- chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
- ASSERT(s_chunk_size > 0);
- memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
- s += s_chunk_size;
- chunk_pos_ += s_chunk_size;
- MaybeWriteChunk();
- }
- }
- void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
- void Finalize() {
- if (aborted_) return;
- ASSERT(chunk_pos_ < chunk_size_);
- if (chunk_pos_ != 0) {
- WriteChunk();
- }
- stream_->EndOfStream();
- }
-
- private:
- template<typename T>
- void AddNumberImpl(T n, const char* format) {
- // Buffer for the longest value plus trailing \0
- static const int kMaxNumberSize =
- MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1;
- if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
- int result = OS::SNPrintF(
- chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
- ASSERT(result != -1);
- chunk_pos_ += result;
- MaybeWriteChunk();
- } else {
- EmbeddedVector<char, kMaxNumberSize> buffer;
- int result = OS::SNPrintF(buffer, format, n);
- USE(result);
- ASSERT(result != -1);
- AddString(buffer.start());
- }
- }
- void MaybeWriteChunk() {
- ASSERT(chunk_pos_ <= chunk_size_);
- if (chunk_pos_ == chunk_size_) {
- WriteChunk();
- }
- }
- void WriteChunk() {
- if (aborted_) return;
- if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
- v8::OutputStream::kAbort) aborted_ = true;
- chunk_pos_ = 0;
- }
-
- v8::OutputStream* stream_;
- int chunk_size_;
- ScopedVector<char> chunk_;
- int chunk_pos_;
- bool aborted_;
-};
-
-
-// type, name|index, to_node.
-const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
-// type, name, id, self_size, children_index.
-const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
-
-void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
- ASSERT(writer_ == NULL);
- writer_ = new OutputStreamWriter(stream);
-
- HeapSnapshot* original_snapshot = NULL;
- if (snapshot_->RawSnapshotSize() >=
- SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
- // The snapshot is too big. Serialize a fake snapshot.
- original_snapshot = snapshot_;
- snapshot_ = CreateFakeSnapshot();
- }
-
- SerializeImpl();
-
- delete writer_;
- writer_ = NULL;
-
- if (original_snapshot != NULL) {
- delete snapshot_;
- snapshot_ = original_snapshot;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
- HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(),
- HeapSnapshot::kFull,
- snapshot_->title(),
- snapshot_->uid());
- result->AddRootEntry();
- const char* text = snapshot_->collection()->names()->GetFormatted(
- "The snapshot is too big. "
- "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. "
- "Actual snapshot size is %" V8_PTR_PREFIX "u MB.",
- SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
- (snapshot_->RawSnapshotSize() + MB - 1) / MB);
- HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
- result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
- result->FillChildren();
- return result;
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeImpl() {
- ASSERT(0 == snapshot_->root()->index());
- writer_->AddCharacter('{');
- writer_->AddString("\"snapshot\":{");
- SerializeSnapshot();
- if (writer_->aborted()) return;
- writer_->AddString("},\n");
- writer_->AddString("\"nodes\":[");
- SerializeNodes();
- if (writer_->aborted()) return;
- writer_->AddString("],\n");
- writer_->AddString("\"edges\":[");
- SerializeEdges();
- if (writer_->aborted()) return;
- writer_->AddString("],\n");
- writer_->AddString("\"strings\":[");
- SerializeStrings();
- if (writer_->aborted()) return;
- writer_->AddCharacter(']');
- writer_->AddCharacter('}');
- writer_->Finalize();
-}
-
-
-int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
- HashMap::Entry* cache_entry = strings_.Lookup(
- const_cast<char*>(s), ObjectHash(s), true);
- if (cache_entry->value == NULL) {
- cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
- }
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
-static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
- int number_of_digits = 0;
- unsigned t = value;
- do {
- ++number_of_digits;
- } while (t /= 10);
-
- buffer_pos += number_of_digits;
- int result = buffer_pos;
- do {
- int last_digit = value % 10;
- buffer[--buffer_pos] = '0' + last_digit;
- value /= 10;
- } while (value);
- return result;
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
- bool first_edge) {
- // The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
- static const int kBufferSize =
- MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 2; // NOLINT
- EmbeddedVector<char, kBufferSize> buffer;
- int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
- || edge->type() == HeapGraphEdge::kHidden
- || edge->type() == HeapGraphEdge::kWeak
- ? edge->index() : GetStringId(edge->name());
- int buffer_pos = 0;
- if (!first_edge) {
- buffer[buffer_pos++] = ',';
- }
- buffer_pos = utoa(edge->type(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
- buffer[buffer_pos++] = '\n';
- buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeEdges() {
- List<HeapGraphEdge*>& edges = snapshot_->children();
- for (int i = 0; i < edges.length(); ++i) {
- ASSERT(i == 0 ||
- edges[i - 1]->from()->index() <= edges[i]->from()->index());
- SerializeEdge(edges[i], i == 0);
- if (writer_->aborted()) return;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
- // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
- static const int kBufferSize =
- 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
- + 5 + 1 + 1;
- EmbeddedVector<char, kBufferSize> buffer;
- int buffer_pos = 0;
- if (entry_index(entry) != 0) {
- buffer[buffer_pos++] = ',';
- }
- buffer_pos = utoa(entry->type(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->id(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
- buffer[buffer_pos++] = '\n';
- buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeNodes() {
- List<HeapEntry>& entries = snapshot_->entries();
- for (int i = 0; i < entries.length(); ++i) {
- SerializeNode(&entries[i]);
- if (writer_->aborted()) return;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeSnapshot() {
- writer_->AddString("\"title\":\"");
- writer_->AddString(snapshot_->title());
- writer_->AddString("\"");
- writer_->AddString(",\"uid\":");
- writer_->AddNumber(snapshot_->uid());
- writer_->AddString(",\"meta\":");
- // The object describing node serialization layout.
- // We use a set of macros to improve readability.
-#define JSON_A(s) "[" s "]"
-#define JSON_O(s) "{" s "}"
-#define JSON_S(s) "\"" s "\""
- writer_->AddString(JSON_O(
- JSON_S("node_fields") ":" JSON_A(
- JSON_S("type") ","
- JSON_S("name") ","
- JSON_S("id") ","
- JSON_S("self_size") ","
- JSON_S("edge_count")) ","
- JSON_S("node_types") ":" JSON_A(
- JSON_A(
- JSON_S("hidden") ","
- JSON_S("array") ","
- JSON_S("string") ","
- JSON_S("object") ","
- JSON_S("code") ","
- JSON_S("closure") ","
- JSON_S("regexp") ","
- JSON_S("number") ","
- JSON_S("native") ","
- JSON_S("synthetic")) ","
- JSON_S("string") ","
- JSON_S("number") ","
- JSON_S("number") ","
- JSON_S("number") ","
- JSON_S("number") ","
- JSON_S("number")) ","
- JSON_S("edge_fields") ":" JSON_A(
- JSON_S("type") ","
- JSON_S("name_or_index") ","
- JSON_S("to_node")) ","
- JSON_S("edge_types") ":" JSON_A(
- JSON_A(
- JSON_S("context") ","
- JSON_S("element") ","
- JSON_S("property") ","
- JSON_S("internal") ","
- JSON_S("hidden") ","
- JSON_S("shortcut") ","
- JSON_S("weak")) ","
- JSON_S("string_or_number") ","
- JSON_S("node"))));
-#undef JSON_S
-#undef JSON_O
-#undef JSON_A
- writer_->AddString(",\"node_count\":");
- writer_->AddNumber(snapshot_->entries().length());
- writer_->AddString(",\"edge_count\":");
- writer_->AddNumber(snapshot_->edges().length());
-}
-
-
-static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
- static const char hex_chars[] = "0123456789ABCDEF";
- w->AddString("\\u");
- w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
- w->AddCharacter(hex_chars[u & 0xf]);
-}
-
-void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
- writer_->AddCharacter('\n');
- writer_->AddCharacter('\"');
- for ( ; *s != '\0'; ++s) {
- switch (*s) {
- case '\b':
- writer_->AddString("\\b");
- continue;
- case '\f':
- writer_->AddString("\\f");
- continue;
- case '\n':
- writer_->AddString("\\n");
- continue;
- case '\r':
- writer_->AddString("\\r");
- continue;
- case '\t':
- writer_->AddString("\\t");
- continue;
- case '\"':
- case '\\':
- writer_->AddCharacter('\\');
- writer_->AddCharacter(*s);
- continue;
- default:
- if (*s > 31 && *s < 128) {
- writer_->AddCharacter(*s);
- } else if (*s <= 31) {
- // Special character with no dedicated literal.
- WriteUChar(writer_, *s);
- } else {
- // Convert UTF-8 into \u UTF-16 literal.
- unsigned length = 1, cursor = 0;
- for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
- unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
- if (c != unibrow::Utf8::kBadChar) {
- WriteUChar(writer_, c);
- ASSERT(cursor != 0);
- s += cursor - 1;
- } else {
- writer_->AddCharacter('?');
- }
- }
- }
- }
- writer_->AddCharacter('\"');
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeStrings() {
- List<HashMap::Entry*> sorted_strings;
- SortHashMap(&strings_, &sorted_strings);
- writer_->AddString("\"<dummy>\"");
- for (int i = 0; i < sorted_strings.length(); ++i) {
- writer_->AddCharacter(',');
- SerializeString(
- reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
- if (writer_->aborted()) return;
- }
-}
-
-
-template<typename T>
-inline static int SortUsingEntryValue(const T* x, const T* y) {
- uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
- uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
- if (x_uint > y_uint) {
- return 1;
- } else if (x_uint == y_uint) {
- return 0;
- } else {
- return -1;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SortHashMap(
- HashMap* map, List<HashMap::Entry*>* sorted_entries) {
- for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
- sorted_entries->Add(p);
- sorted_entries->Sort(SortUsingEntryValue);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/heap-snapshot-generator.h b/src/3rdparty/v8/src/heap-snapshot-generator.h
deleted file mode 100644
index 77c659a..0000000
--- a/src/3rdparty/v8/src/heap-snapshot-generator.h
+++ /dev/null
@@ -1,697 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_
-#define V8_HEAP_SNAPSHOT_GENERATOR_H_
-
-namespace v8 {
-namespace internal {
-
-class HeapEntry;
-class HeapSnapshot;
-
-class HeapGraphEdge BASE_EMBEDDED {
- public:
- enum Type {
- kContextVariable = v8::HeapGraphEdge::kContextVariable,
- kElement = v8::HeapGraphEdge::kElement,
- kProperty = v8::HeapGraphEdge::kProperty,
- kInternal = v8::HeapGraphEdge::kInternal,
- kHidden = v8::HeapGraphEdge::kHidden,
- kShortcut = v8::HeapGraphEdge::kShortcut,
- kWeak = v8::HeapGraphEdge::kWeak
- };
-
- HeapGraphEdge() { }
- HeapGraphEdge(Type type, const char* name, int from, int to);
- HeapGraphEdge(Type type, int index, int from, int to);
- void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
-
- Type type() const { return static_cast<Type>(type_); }
- int index() const {
- ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
- return index_;
- }
- const char* name() const {
- ASSERT(type_ == kContextVariable
- || type_ == kProperty
- || type_ == kInternal
- || type_ == kShortcut);
- return name_;
- }
- INLINE(HeapEntry* from() const);
- HeapEntry* to() const { return to_entry_; }
-
- private:
- INLINE(HeapSnapshot* snapshot() const);
-
- unsigned type_ : 3;
- int from_index_ : 29;
- union {
- // During entries population |to_index_| is used for storing the index,
- // afterwards it is replaced with a pointer to the entry.
- int to_index_;
- HeapEntry* to_entry_;
- };
- union {
- int index_;
- const char* name_;
- };
-};
-
-
-// HeapEntry instances represent an entity from the heap (or a special
-// virtual node, e.g. root).
-class HeapEntry BASE_EMBEDDED {
- public:
- enum Type {
- kHidden = v8::HeapGraphNode::kHidden,
- kArray = v8::HeapGraphNode::kArray,
- kString = v8::HeapGraphNode::kString,
- kObject = v8::HeapGraphNode::kObject,
- kCode = v8::HeapGraphNode::kCode,
- kClosure = v8::HeapGraphNode::kClosure,
- kRegExp = v8::HeapGraphNode::kRegExp,
- kHeapNumber = v8::HeapGraphNode::kHeapNumber,
- kNative = v8::HeapGraphNode::kNative,
- kSynthetic = v8::HeapGraphNode::kSynthetic
- };
- static const int kNoEntry;
-
- HeapEntry() { }
- HeapEntry(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- SnapshotObjectId id,
- int self_size);
-
- HeapSnapshot* snapshot() { return snapshot_; }
- Type type() { return static_cast<Type>(type_); }
- const char* name() { return name_; }
- void set_name(const char* name) { name_ = name; }
- inline SnapshotObjectId id() { return id_; }
- int self_size() { return self_size_; }
- INLINE(int index() const);
- int children_count() const { return children_count_; }
- INLINE(int set_children_index(int index));
- void add_child(HeapGraphEdge* edge) {
- children_arr()[children_count_++] = edge;
- }
- Vector<HeapGraphEdge*> children() {
- return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
-
- void SetIndexedReference(
- HeapGraphEdge::Type type, int index, HeapEntry* entry);
- void SetNamedReference(
- HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
-
- void Print(
- const char* prefix, const char* edge_name, int max_depth, int indent);
-
- Handle<HeapObject> GetHeapObject();
-
- private:
- INLINE(HeapGraphEdge** children_arr());
- const char* TypeAsString();
-
- unsigned type_: 4;
- int children_count_: 28;
- int children_index_;
- int self_size_;
- SnapshotObjectId id_;
- HeapSnapshot* snapshot_;
- const char* name_;
-};
-
-
-class HeapSnapshotsCollection;
-
-// HeapSnapshot represents a single heap snapshot. It is stored in
-// HeapSnapshotsCollection, which is also a factory for
-// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
-// to be able to return them even if they were collected.
-// HeapSnapshotGenerator fills in a HeapSnapshot.
-class HeapSnapshot {
- public:
- enum Type {
- kFull = v8::HeapSnapshot::kFull
- };
-
- HeapSnapshot(HeapSnapshotsCollection* collection,
- Type type,
- const char* title,
- unsigned uid);
- void Delete();
-
- HeapSnapshotsCollection* collection() { return collection_; }
- Type type() { return type_; }
- const char* title() { return title_; }
- unsigned uid() { return uid_; }
- size_t RawSnapshotSize() const;
- HeapEntry* root() { return &entries_[root_index_]; }
- HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
- HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
- HeapEntry* gc_subroot(int index) {
- return &entries_[gc_subroot_indexes_[index]];
- }
- List<HeapEntry>& entries() { return entries_; }
- List<HeapGraphEdge>& edges() { return edges_; }
- List<HeapGraphEdge*>& children() { return children_; }
- void RememberLastJSObjectId();
- SnapshotObjectId max_snapshot_js_object_id() const {
- return max_snapshot_js_object_id_;
- }
-
- HeapEntry* AddEntry(HeapEntry::Type type,
- const char* name,
- SnapshotObjectId id,
- int size);
- HeapEntry* AddRootEntry();
- HeapEntry* AddGcRootsEntry();
- HeapEntry* AddGcSubrootEntry(int tag);
- HeapEntry* AddNativesRootEntry();
- HeapEntry* GetEntryById(SnapshotObjectId id);
- List<HeapEntry*>* GetSortedEntriesList();
- void FillChildren();
-
- void Print(int max_depth);
- void PrintEntriesSize();
-
- private:
- HeapSnapshotsCollection* collection_;
- Type type_;
- const char* title_;
- unsigned uid_;
- int root_index_;
- int gc_roots_index_;
- int natives_root_index_;
- int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
- List<HeapEntry> entries_;
- List<HeapGraphEdge> edges_;
- List<HeapGraphEdge*> children_;
- List<HeapEntry*> sorted_entries_;
- SnapshotObjectId max_snapshot_js_object_id_;
-
- friend class HeapSnapshotTester;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
-};
-
-
-class HeapObjectsMap {
- public:
- explicit HeapObjectsMap(Heap* heap);
-
- Heap* heap() const { return heap_; }
-
- void SnapshotGenerationFinished();
- SnapshotObjectId FindEntry(Address addr);
- SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
- void MoveObject(Address from, Address to);
- SnapshotObjectId last_assigned_id() const {
- return next_id_ - kObjectIdStep;
- }
-
- void StopHeapObjectsTracking();
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
- size_t GetUsedMemorySize() const;
-
- static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
- static inline SnapshotObjectId GetNthGcSubrootId(int delta);
-
- static const int kObjectIdStep = 2;
- static const SnapshotObjectId kInternalRootObjectId;
- static const SnapshotObjectId kGcRootsObjectId;
- static const SnapshotObjectId kNativesRootObjectId;
- static const SnapshotObjectId kGcRootsFirstSubrootId;
- static const SnapshotObjectId kFirstAvailableObjectId;
-
- private:
- struct EntryInfo {
- EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
- : id(id), addr(addr), size(size), accessed(true) { }
- EntryInfo(SnapshotObjectId id, Address addr, unsigned int size, bool accessed)
- : id(id), addr(addr), size(size), accessed(accessed) { }
- SnapshotObjectId id;
- Address addr;
- unsigned int size;
- bool accessed;
- };
- struct TimeInterval {
- explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { }
- SnapshotObjectId id;
- uint32_t size;
- uint32_t count;
- };
-
- void UpdateHeapObjectsMap();
- void RemoveDeadEntries();
-
- static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t AddressHash(Address addr) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)),
- v8::internal::kZeroHashSeed);
- }
-
- SnapshotObjectId next_id_;
- HashMap entries_map_;
- List<EntryInfo> entries_;
- List<TimeInterval> time_intervals_;
- Heap* heap_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
-};
-
-
-class HeapSnapshotsCollection {
- public:
- explicit HeapSnapshotsCollection(Heap* heap);
- ~HeapSnapshotsCollection();
-
- Heap* heap() const { return ids_.heap(); }
-
- bool is_tracking_objects() { return is_tracking_objects_; }
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
- return ids_.PushHeapObjectsStats(stream);
- }
- void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
- void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }
-
- HeapSnapshot* NewSnapshot(
- HeapSnapshot::Type type, const char* name, unsigned uid);
- void SnapshotGenerationFinished(HeapSnapshot* snapshot);
- List<HeapSnapshot*>* snapshots() { return &snapshots_; }
- HeapSnapshot* GetSnapshot(unsigned uid);
- void RemoveSnapshot(HeapSnapshot* snapshot);
-
- StringsStorage* names() { return &names_; }
- TokenEnumerator* token_enumerator() { return token_enumerator_; }
-
- SnapshotObjectId FindObjectId(Address object_addr) {
- return ids_.FindEntry(object_addr);
- }
- SnapshotObjectId GetObjectId(Address object_addr, int object_size) {
- return ids_.FindOrAddEntry(object_addr, object_size);
- }
- Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
- void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
- SnapshotObjectId last_assigned_id() const {
- return ids_.last_assigned_id();
- }
- size_t GetUsedMemorySize() const;
-
- private:
- INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- bool is_tracking_objects_; // Whether tracking object moves is needed.
- List<HeapSnapshot*> snapshots_;
- // Mapping from snapshots' uids to HeapSnapshot* pointers.
- HashMap snapshots_uids_;
- StringsStorage names_;
- TokenEnumerator* token_enumerator_;
- // Mapping from HeapObject addresses to objects' uids.
- HeapObjectsMap ids_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
-};
-
-
-// A typedef for referencing anything that can be snapshotted living
-// in any kind of heap memory.
-typedef void* HeapThing;
-
-
-// An interface that creates HeapEntries by HeapThings.
-class HeapEntriesAllocator {
- public:
- virtual ~HeapEntriesAllocator() { }
- virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
-};
-
-
-// The HeapEntriesMap instance is used to track a mapping between
-// real heap objects and their representations in heap snapshots.
-class HeapEntriesMap {
- public:
- HeapEntriesMap();
-
- int Map(HeapThing thing);
- void Pair(HeapThing thing, int entry);
-
- private:
- static uint32_t Hash(HeapThing thing) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
- v8::internal::kZeroHashSeed);
- }
- static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
- return key1 == key2;
- }
-
- HashMap entries_;
-
- friend class HeapObjectsSet;
-
- DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
-};
-
-
-class HeapObjectsSet {
- public:
- HeapObjectsSet();
- void Clear();
- bool Contains(Object* object);
- void Insert(Object* obj);
- const char* GetTag(Object* obj);
- void SetTag(Object* obj, const char* tag);
- bool is_empty() const { return entries_.occupancy() == 0; }
-
- private:
- HashMap entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
-};
-
-
-// An interface used to populate a snapshot with nodes and edges.
-class SnapshotFillerInterface {
- public:
- virtual ~SnapshotFillerInterface() { }
- virtual HeapEntry* AddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
- virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual void SetIndexedReference(HeapGraphEdge::Type type,
- int parent_entry,
- int index,
- HeapEntry* child_entry) = 0;
- virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedReference(HeapGraphEdge::Type type,
- int parent_entry,
- const char* reference_name,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
-};
-
-
-class SnapshottingProgressReportingInterface {
- public:
- virtual ~SnapshottingProgressReportingInterface() { }
- virtual void ProgressStep() = 0;
- virtual bool ProgressReport(bool force) = 0;
-};
-
-
-// An implementation of V8 heap graph extractor.
-class V8HeapExplorer : public HeapEntriesAllocator {
- public:
- V8HeapExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress,
- v8::HeapProfiler::ObjectNameResolver* resolver);
- virtual ~V8HeapExplorer();
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
- void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount(HeapIterator* iterator);
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
- void TagGlobalObjects();
-
- static String* GetConstructorName(JSObject* object);
-
- static HeapObject* const kInternalRootObject;
-
- private:
- HeapEntry* AddEntry(HeapObject* object);
- HeapEntry* AddEntry(HeapObject* object,
- HeapEntry::Type type,
- const char* name);
- const char* GetSystemEntryName(HeapObject* object);
-
- void ExtractReferences(HeapObject* obj);
- void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
- void ExtractJSObjectReferences(int entry, JSObject* js_obj);
- void ExtractStringReferences(int entry, String* obj);
- void ExtractContextReferences(int entry, Context* context);
- void ExtractMapReferences(int entry, Map* map);
- void ExtractSharedFunctionInfoReferences(int entry,
- SharedFunctionInfo* shared);
- void ExtractScriptReferences(int entry, Script* script);
- void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
- void ExtractCodeReferences(int entry, Code* code);
- void ExtractJSGlobalPropertyCellReferences(int entry,
- JSGlobalPropertyCell* cell);
- void ExtractClosureReferences(JSObject* js_obj, int entry);
- void ExtractPropertyReferences(JSObject* js_obj, int entry);
- void ExtractElementReferences(JSObject* js_obj, int entry);
- void ExtractInternalReferences(JSObject* js_obj, int entry);
- bool IsEssentialObject(Object* object);
- void SetClosureReference(HeapObject* parent_obj,
- int parent,
- String* reference_name,
- Object* child);
- void SetNativeBindReference(HeapObject* parent_obj,
- int parent,
- const char* reference_name,
- Object* child);
- void SetElementReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child);
- void SetInternalReference(HeapObject* parent_obj,
- int parent,
- const char* reference_name,
- Object* child,
- int field_offset = -1);
- void SetInternalReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child,
- int field_offset = -1);
- void SetHiddenReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child);
- void SetWeakReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child_obj,
- int field_offset);
- void SetPropertyReference(HeapObject* parent_obj,
- int parent,
- String* reference_name,
- Object* child,
- const char* name_format_string = NULL,
- int field_offset = -1);
- void SetUserGlobalReference(Object* user_global);
- void SetRootGcRootsReference();
- void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
- void SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
- const char* GetStrongGcSubrootName(Object* object);
- void TagObject(Object* obj, const char* tag);
-
- HeapEntry* GetEntry(Object* obj);
-
- static inline HeapObject* GetNthGcSubrootObject(int delta);
- static inline int GetGcSubrootOrder(HeapObject* subroot);
-
- Heap* heap_;
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- SnapshottingProgressReportingInterface* progress_;
- SnapshotFillerInterface* filler_;
- HeapObjectsSet objects_tags_;
- HeapObjectsSet strong_gc_subroot_names_;
- v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
-
- static HeapObject* const kGcRootsObject;
- static HeapObject* const kFirstGcSubrootObject;
- static HeapObject* const kLastGcSubrootObject;
-
- friend class IndexedReferencesExtractor;
- friend class GcSubrootsEnumerator;
- friend class RootsReferencesExtractor;
-
- DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
-};
-
-
-class NativeGroupRetainedObjectInfo;
-
-
-// An implementation of retained native objects extractor.
-class NativeObjectsExplorer {
- public:
- NativeObjectsExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
- virtual ~NativeObjectsExplorer();
- void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
-
- private:
- void FillRetainedObjects();
- void FillImplicitReferences();
- List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
- void SetNativeRootReference(v8::RetainedObjectInfo* info);
- void SetRootNativeRootsReference();
- void SetWrapperNativeReferences(HeapObject* wrapper,
- v8::RetainedObjectInfo* info);
- void VisitSubtreeWrapper(Object** p, uint16_t class_id);
-
- static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
- return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
- v8::internal::kZeroHashSeed);
- }
- static bool RetainedInfosMatch(void* key1, void* key2) {
- return key1 == key2 ||
- (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
- reinterpret_cast<v8::RetainedObjectInfo*>(key2));
- }
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
- }
-
- NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
-
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- SnapshottingProgressReportingInterface* progress_;
- bool embedder_queried_;
- HeapObjectsSet in_groups_;
- // RetainedObjectInfo* -> List<HeapObject*>*
- HashMap objects_by_info_;
- HashMap native_groups_;
- HeapEntriesAllocator* synthetic_entries_allocator_;
- HeapEntriesAllocator* native_entries_allocator_;
- // Used during references extraction.
- SnapshotFillerInterface* filler_;
-
- static HeapThing const kNativesRootObject;
-
- friend class GlobalHandlesExtractor;
-
- DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer);
-};
-
-
-class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
- public:
- HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver,
- Heap* heap);
- bool GenerateSnapshot();
-
- private:
- bool FillReferences();
- void ProgressStep();
- bool ProgressReport(bool force = false);
- void SetProgressTotal(int iterations_count);
-
- HeapSnapshot* snapshot_;
- v8::ActivityControl* control_;
- V8HeapExplorer v8_heap_explorer_;
- NativeObjectsExplorer dom_explorer_;
- // Mapping from HeapThing pointers to HeapEntry* pointers.
- HeapEntriesMap entries_;
- // Used during snapshot generation.
- int progress_counter_;
- int progress_total_;
- Heap* heap_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
-};
-
-class OutputStreamWriter;
-
-class HeapSnapshotJSONSerializer {
- public:
- explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
- : snapshot_(snapshot),
- strings_(ObjectsMatch),
- next_node_id_(1),
- next_string_id_(1),
- writer_(NULL) {
- }
- void Serialize(v8::OutputStream* stream);
-
- private:
- INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- INLINE(static uint32_t ObjectHash(const void* key)) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
- v8::internal::kZeroHashSeed);
- }
-
- HeapSnapshot* CreateFakeSnapshot();
- int GetStringId(const char* s);
- int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
- void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
- void SerializeEdges();
- void SerializeImpl();
- void SerializeNode(HeapEntry* entry);
- void SerializeNodes();
- void SerializeSnapshot();
- void SerializeString(const unsigned char* s);
- void SerializeStrings();
- void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
-
- static const int kEdgeFieldsCount;
- static const int kNodeFieldsCount;
-
- HeapSnapshot* snapshot_;
- HashMap strings_;
- int next_node_id_;
- int next_string_id_;
- OutputStreamWriter* writer_;
-
- friend class HeapSnapshotJSONSerializerEnumerator;
- friend class HeapSnapshotJSONSerializerIterator;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_HEAP_SNAPSHOT_GENERATOR_H_
-
diff --git a/src/3rdparty/v8/src/heap.cc b/src/3rdparty/v8/src/heap.cc
deleted file mode 100644
index 5b61436..0000000
--- a/src/3rdparty/v8/src/heap.cc
+++ /dev/null
@@ -1,7842 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "global-handles.h"
-#include "heap-profiler.h"
-#include "incremental-marking.h"
-#include "mark-compact.h"
-#include "natives.h"
-#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
-#include "once.h"
-#include "runtime-profiler.h"
-#include "scopeinfo.h"
-#include "snapshot.h"
-#include "store-buffer.h"
-#include "v8threads.h"
-#include "v8utils.h"
-#include "vm-state-inl.h"
-#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-#include "regexp-macro-assembler.h"
-#include "arm/regexp-macro-assembler-arm.h"
-#endif
-#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
-#include "regexp-macro-assembler.h"
-#include "mips/regexp-macro-assembler-mips.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-Heap::Heap()
- : isolate_(NULL),
-// semispace_size_ should be a power of 2 and old_generation_size_ should be
-// a multiple of Page::kPageSize.
-#if defined(V8_TARGET_ARCH_X64)
-#define LUMP_OF_MEMORY (2 * MB)
- code_range_size_(512*MB),
-#else
-#define LUMP_OF_MEMORY MB
- code_range_size_(0),
-#endif
-#if defined(ANDROID)
- reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(192*MB),
- max_executable_size_(max_old_generation_size_),
-#else
- reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(700ul * LUMP_OF_MEMORY),
- max_executable_size_(256l * LUMP_OF_MEMORY),
-#endif
-
-// Variables set based on semispace_size_ and old_generation_size_ in
-// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
-// Will be 4 * reserved_semispace_size_ to ensure that young
-// generation can be aligned to its size.
- survived_since_last_expansion_(0),
- sweep_generation_(0),
- always_allocate_scope_depth_(0),
- linear_allocation_scope_depth_(0),
- contexts_disposed_(0),
- global_ic_age_(0),
- flush_monomorphic_ics_(false),
- scan_on_scavenge_pages_(0),
- new_space_(this),
- old_pointer_space_(NULL),
- old_data_space_(NULL),
- code_space_(NULL),
- map_space_(NULL),
- cell_space_(NULL),
- lo_space_(NULL),
- gc_state_(NOT_IN_GC),
- gc_post_processing_depth_(0),
- ms_count_(0),
- gc_count_(0),
- remembered_unmapped_pages_index_(0),
- unflattened_strings_length_(0),
-#ifdef DEBUG
- allocation_allowed_(true),
- allocation_timeout_(0),
- disallow_allocation_failure_(false),
-#endif // DEBUG
- new_space_high_promotion_mode_active_(false),
- old_gen_promotion_limit_(kMinimumPromotionLimit),
- old_gen_allocation_limit_(kMinimumAllocationLimit),
- old_gen_limit_factor_(1),
- size_of_old_gen_at_last_old_space_gc_(0),
- external_allocation_limit_(0),
- amount_of_external_allocated_memory_(0),
- amount_of_external_allocated_memory_at_last_global_gc_(0),
- old_gen_exhausted_(false),
- store_buffer_rebuilder_(store_buffer()),
- hidden_string_(NULL),
- global_gc_prologue_callback_(NULL),
- global_gc_epilogue_callback_(NULL),
- gc_safe_size_of_old_object_(NULL),
- total_regexp_code_generated_(0),
- tracer_(NULL),
- young_survivors_after_last_gc_(0),
- high_survival_rate_period_length_(0),
- low_survival_rate_period_length_(0),
- survival_rate_(0),
- previous_survival_rate_trend_(Heap::STABLE),
- survival_rate_trend_(Heap::STABLE),
- max_gc_pause_(0.0),
- total_gc_time_ms_(0.0),
- max_alive_after_gc_(0),
- min_in_mutator_(kMaxInt),
- alive_after_last_gc_(0),
- last_gc_end_timestamp_(0.0),
- marking_time_(0.0),
- sweeping_time_(0.0),
- store_buffer_(this),
- marking_(this),
- incremental_marking_(this),
- number_idle_notifications_(0),
- last_idle_notification_gc_count_(0),
- last_idle_notification_gc_count_init_(false),
- mark_sweeps_since_idle_round_started_(0),
- ms_count_at_last_idle_notification_(0),
- gc_count_at_last_idle_gc_(0),
- scavenges_since_last_idle_round_(kIdleScavengeThreshold),
-#ifdef VERIFY_HEAP
- no_weak_embedded_maps_verification_scope_depth_(0),
-#endif
- promotion_queue_(this),
- configured_(false),
- chunks_queued_for_free_(NULL),
- relocation_mutex_(NULL) {
- // Allow build-time customization of the max semispace size. Building
- // V8 with snapshots and a non-default max semispace size is much
- // easier if you can define it as part of the build environment.
-#if defined(V8_MAX_SEMISPACE_SIZE)
- max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
-#endif
-
- intptr_t max_virtual = OS::MaxVirtualMemory();
-
- if (max_virtual > 0) {
- if (code_range_size_ > 0) {
- // Reserve no more than 1/8 of the memory for the code range.
- code_range_size_ = Min(code_range_size_, max_virtual >> 3);
- }
- }
-
- memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
- native_contexts_list_ = NULL;
- mark_compact_collector_.heap_ = this;
- external_string_table_.heap_ = this;
- // Put a dummy entry in the remembered pages so we can find the list the
- // minidump even if there are no real unmapped pages.
- RememberUnmappedPage(NULL, false);
-
- ClearObjectStats(true);
-}
-
-
-intptr_t Heap::Capacity() {
- if (!HasBeenSetUp()) return 0;
-
- return new_space_.Capacity() +
- old_pointer_space_->Capacity() +
- old_data_space_->Capacity() +
- code_space_->Capacity() +
- map_space_->Capacity() +
- cell_space_->Capacity();
-}
-
-
-intptr_t Heap::CommittedMemory() {
- if (!HasBeenSetUp()) return 0;
-
- return new_space_.CommittedMemory() +
- old_pointer_space_->CommittedMemory() +
- old_data_space_->CommittedMemory() +
- code_space_->CommittedMemory() +
- map_space_->CommittedMemory() +
- cell_space_->CommittedMemory() +
- lo_space_->Size();
-}
-
-
-size_t Heap::CommittedPhysicalMemory() {
- if (!HasBeenSetUp()) return 0;
-
- return new_space_.CommittedPhysicalMemory() +
- old_pointer_space_->CommittedPhysicalMemory() +
- old_data_space_->CommittedPhysicalMemory() +
- code_space_->CommittedPhysicalMemory() +
- map_space_->CommittedPhysicalMemory() +
- cell_space_->CommittedPhysicalMemory() +
- lo_space_->CommittedPhysicalMemory();
-}
-
-
-intptr_t Heap::CommittedMemoryExecutable() {
- if (!HasBeenSetUp()) return 0;
-
- return isolate()->memory_allocator()->SizeExecutable();
-}
-
-
-intptr_t Heap::Available() {
- if (!HasBeenSetUp()) return 0;
-
- return new_space_.Available() +
- old_pointer_space_->Available() +
- old_data_space_->Available() +
- code_space_->Available() +
- map_space_->Available() +
- cell_space_->Available();
-}
-
-
-bool Heap::HasBeenSetUp() {
- return old_pointer_space_ != NULL &&
- old_data_space_ != NULL &&
- code_space_ != NULL &&
- map_space_ != NULL &&
- cell_space_ != NULL &&
- lo_space_ != NULL;
-}
-
-
-int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
- if (IntrusiveMarking::IsMarked(object)) {
- return IntrusiveMarking::SizeOfMarkedObject(object);
- }
- return object->SizeFromMap(object->map());
-}
-
-
-GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
- const char** reason) {
- // Is global GC requested?
- if (space != NEW_SPACE) {
- isolate_->counters()->gc_compactor_caused_by_request()->Increment();
- *reason = "GC in old space requested";
- return MARK_COMPACTOR;
- }
-
- if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
- *reason = "GC in old space forced by flags";
- return MARK_COMPACTOR;
- }
-
- // Is enough data promoted to justify a global GC?
- if (OldGenerationPromotionLimitReached()) {
- isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
- *reason = "promotion limit reached";
- return MARK_COMPACTOR;
- }
-
- // Have allocation in OLD and LO failed?
- if (old_gen_exhausted_) {
- isolate_->counters()->
- gc_compactor_caused_by_oldspace_exhaustion()->Increment();
- *reason = "old generations exhausted";
- return MARK_COMPACTOR;
- }
-
- // Is there enough space left in OLD to guarantee that a scavenge can
- // succeed?
- //
- // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
- // for object promotion. It counts only the bytes that the memory
- // allocator has not yet allocated from the OS and assigned to any space,
- // and does not count available bytes already in the old space or code
- // space. Undercounting is safe---we may get an unrequested full GC when
- // a scavenge would have succeeded.
- if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
- isolate_->counters()->
- gc_compactor_caused_by_oldspace_exhaustion()->Increment();
- *reason = "scavenge might not succeed";
- return MARK_COMPACTOR;
- }
-
- // Default
- *reason = NULL;
- return SCAVENGER;
-}
-
-
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
-void Heap::ReportStatisticsBeforeGC() {
- // Heap::ReportHeapStatistics will also log NewSpace statistics when
- // compiled --log-gc is set. The following logic is used to avoid
- // double logging.
-#ifdef DEBUG
- if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
- if (FLAG_heap_stats) {
- ReportHeapStatistics("Before GC");
- } else if (FLAG_log_gc) {
- new_space_.ReportStatistics();
- }
- if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
-#else
- if (FLAG_log_gc) {
- new_space_.CollectStatistics();
- new_space_.ReportStatistics();
- new_space_.ClearHistograms();
- }
-#endif // DEBUG
-}
-
-
-void Heap::PrintShortHeapStatistics() {
- if (!FLAG_trace_gc_verbose) return;
- PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB\n",
- isolate_->memory_allocator()->Size() / KB,
- isolate_->memory_allocator()->Available() / KB);
- PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- new_space_.Size() / KB,
- new_space_.Available() / KB,
- new_space_.CommittedMemory() / KB);
- PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- old_pointer_space_->SizeOfObjects() / KB,
- old_pointer_space_->Available() / KB,
- old_pointer_space_->CommittedMemory() / KB);
- PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- old_data_space_->SizeOfObjects() / KB,
- old_data_space_->Available() / KB,
- old_data_space_->CommittedMemory() / KB);
- PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- code_space_->SizeOfObjects() / KB,
- code_space_->Available() / KB,
- code_space_->CommittedMemory() / KB);
- PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- map_space_->SizeOfObjects() / KB,
- map_space_->Available() / KB,
- map_space_->CommittedMemory() / KB);
- PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- cell_space_->SizeOfObjects() / KB,
- cell_space_->Available() / KB,
- cell_space_->CommittedMemory() / KB);
- PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- lo_space_->SizeOfObjects() / KB,
- lo_space_->Available() / KB,
- lo_space_->CommittedMemory() / KB);
- PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- this->SizeOfObjects() / KB,
- this->Available() / KB,
- this->CommittedMemory() / KB);
- PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
-}
-
-
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
-void Heap::ReportStatisticsAfterGC() {
- // Similar to the before GC, we use some complicated logic to ensure that
- // NewSpace statistics are logged exactly once when --log-gc is turned on.
-#if defined(DEBUG)
- if (FLAG_heap_stats) {
- new_space_.CollectStatistics();
- ReportHeapStatistics("After GC");
- } else if (FLAG_log_gc) {
- new_space_.ReportStatistics();
- }
-#else
- if (FLAG_log_gc) new_space_.ReportStatistics();
-#endif // DEBUG
-}
-
-
-void Heap::GarbageCollectionPrologue() {
- isolate_->transcendental_cache()->Clear();
- ClearJSFunctionResultCaches();
- gc_count_++;
- unflattened_strings_length_ = 0;
-
- if (FLAG_flush_code && FLAG_flush_code_incrementally) {
- mark_compact_collector()->EnableCodeFlushing(true);
- }
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
-#endif
-
-#ifdef DEBUG
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- allow_allocation(false);
-
- if (FLAG_gc_verbose) Print();
-
- ReportStatisticsBeforeGC();
-#endif // DEBUG
-
- store_buffer()->GCPrologue();
-}
-
-
-intptr_t Heap::SizeOfObjects() {
- intptr_t total = 0;
- AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
- total += space->SizeOfObjects();
- }
- return total;
-}
-
-
-void Heap::RepairFreeListsAfterBoot() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next();
- space != NULL;
- space = spaces.next()) {
- space->RepairFreeListsAfterBoot();
- }
-}
-
-
-void Heap::GarbageCollectionEpilogue() {
- store_buffer()->GCEpilogue();
-
- // In release mode, we only zap the from space under heap verification.
- if (Heap::ShouldZapGarbage()) {
- ZapFromSpace();
- }
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
-#endif
-
-#ifdef DEBUG
- allow_allocation(true);
- if (FLAG_print_global_handles) isolate_->global_handles()->Print();
- if (FLAG_print_handles) PrintHandles();
- if (FLAG_gc_verbose) Print();
- if (FLAG_code_stats) ReportCodeStatistics("After GC");
-#endif
-
- isolate_->counters()->alive_after_last_gc()->Set(
- static_cast<int>(SizeOfObjects()));
-
- isolate_->counters()->string_table_capacity()->Set(
- string_table()->Capacity());
- isolate_->counters()->number_of_symbols()->Set(
- string_table()->NumberOfElements());
-
- if (CommittedMemory() > 0) {
- isolate_->counters()->external_fragmentation_total()->AddSample(
- static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
-
- isolate_->counters()->heap_fraction_map_space()->AddSample(
- static_cast<int>(
- (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_cell_space()->AddSample(
- static_cast<int>(
- (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-
- isolate_->counters()->heap_sample_total_committed()->AddSample(
- static_cast<int>(CommittedMemory() / KB));
- isolate_->counters()->heap_sample_total_used()->AddSample(
- static_cast<int>(SizeOfObjects() / KB));
- isolate_->counters()->heap_sample_map_space_committed()->AddSample(
- static_cast<int>(map_space()->CommittedMemory() / KB));
- isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
- static_cast<int>(cell_space()->CommittedMemory() / KB));
- }
-
-#define UPDATE_COUNTERS_FOR_SPACE(space) \
- isolate_->counters()->space##_bytes_available()->Set( \
- static_cast<int>(space()->Available())); \
- isolate_->counters()->space##_bytes_committed()->Set( \
- static_cast<int>(space()->CommittedMemory())); \
- isolate_->counters()->space##_bytes_used()->Set( \
- static_cast<int>(space()->SizeOfObjects()));
-#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
- if (space()->CommittedMemory() > 0) { \
- isolate_->counters()->external_fragmentation_##space()->AddSample( \
- static_cast<int>(100 - \
- (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
- }
-#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
- UPDATE_COUNTERS_FOR_SPACE(space) \
- UPDATE_FRAGMENTATION_FOR_SPACE(space)
-
- UPDATE_COUNTERS_FOR_SPACE(new_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
-#undef UPDATE_COUNTERS_FOR_SPACE
-#undef UPDATE_FRAGMENTATION_FOR_SPACE
-#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
-
-#if defined(DEBUG)
- ReportStatisticsAfterGC();
-#endif // DEBUG
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate_->debug()->AfterGarbageCollection();
-#endif // ENABLE_DEBUGGER_SUPPORT
-
- error_object_list_.DeferredFormatStackTrace(isolate());
-}
-
-
-void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
- // Since we are ignoring the return value, the exact choice of space does
- // not matter, so long as we do not specify NEW_SPACE, which would not
- // cause a full GC.
- mark_compact_collector_.SetFlags(flags);
- CollectGarbage(OLD_POINTER_SPACE, gc_reason);
- mark_compact_collector_.SetFlags(kNoGCFlags);
-}
-
-
-void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
- // Since we are ignoring the return value, the exact choice of space does
- // not matter, so long as we do not specify NEW_SPACE, which would not
- // cause a full GC.
- // Major GC would invoke weak handle callbacks on weakly reachable
- // handles, but won't collect weakly reachable objects until next
- // major GC. Therefore if we collect aggressively and weak handle callback
- // has been invoked, we rerun major GC to release objects which become
- // garbage.
- // Note: as weak callbacks can execute arbitrary code, we cannot
- // hope that eventually there will be no weak callbacks invocations.
- // Therefore stop recollecting after several attempts.
- mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
- kReduceMemoryFootprintMask);
- isolate_->compilation_cache()->Clear();
- const int kMaxNumberOfAttempts = 7;
- for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
- break;
- }
- }
- mark_compact_collector()->SetFlags(kNoGCFlags);
- new_space_.Shrink();
- UncommitFromSpace();
- incremental_marking()->UncommitMarkingDeque();
-}
-
-
-bool Heap::CollectGarbage(AllocationSpace space,
- GarbageCollector collector,
- const char* gc_reason,
- const char* collector_reason) {
- // The VM is in the GC state until exiting this function.
- VMState state(isolate_, GC);
-
-#ifdef DEBUG
- // Reset the allocation timeout to the GC interval, but make sure to
- // allow at least a few allocations after a collection. The reason
- // for this is that we have a lot of allocation sequences and we
- // assume that a garbage collection will allow the subsequent
- // allocation attempts to go through.
- allocation_timeout_ = Max(6, FLAG_gc_interval);
-#endif
-
- if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Scavenge during marking.\n");
- }
- }
-
- if (collector == MARK_COMPACTOR &&
- !mark_compact_collector()->abort_incremental_marking() &&
- !incremental_marking()->IsStopped() &&
- !incremental_marking()->should_hurry() &&
- FLAG_incremental_marking_steps) {
- // Make progress in incremental marking.
- const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
- incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
- IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- if (!incremental_marking()->IsComplete()) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
- }
- collector = SCAVENGER;
- collector_reason = "incremental marking delaying mark-sweep";
- }
- }
-
- bool next_gc_likely_to_collect_more = false;
-
- { GCTracer tracer(this, gc_reason, collector_reason);
- GarbageCollectionPrologue();
- // The GC count was incremented in the prologue. Tell the tracer about
- // it.
- tracer.set_gc_count(gc_count_);
-
- // Tell the tracer which collector we've selected.
- tracer.set_collector(collector);
-
- {
- HistogramTimerScope histogram_timer_scope(
- (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
- : isolate_->counters()->gc_compactor());
- next_gc_likely_to_collect_more =
- PerformGarbageCollection(collector, &tracer);
- }
-
- GarbageCollectionEpilogue();
- }
-
- // Start incremental marking for the next cycle. The heap snapshot
- // generator needs incremental marking to stay off after it aborted.
- if (!mark_compact_collector()->abort_incremental_marking() &&
- incremental_marking()->IsStopped() &&
- incremental_marking()->WorthActivating() &&
- NextGCIsLikelyToBeFull()) {
- incremental_marking()->Start();
- }
-
- return next_gc_likely_to_collect_more;
-}
-
-
-void Heap::PerformScavenge() {
- GCTracer tracer(this, NULL, NULL);
- if (incremental_marking()->IsStopped()) {
- PerformGarbageCollection(SCAVENGER, &tracer);
- } else {
- PerformGarbageCollection(MARK_COMPACTOR, &tracer);
- }
-}
-
-
-void Heap::MoveElements(FixedArray* array,
- int dst_index,
- int src_index,
- int len) {
- if (len == 0) return;
-
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
- Object** dst_objects = array->data_start() + dst_index;
- memmove(dst_objects,
- array->data_start() + src_index,
- len * kPointerSize);
- if (!InNewSpace(array)) {
- for (int i = 0; i < len; i++) {
- // TODO(hpayer): check store buffer for entries
- if (InNewSpace(dst_objects[i])) {
- RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
- }
- }
- }
- incremental_marking()->RecordWrites(array);
-}
-
-
-#ifdef VERIFY_HEAP
-// Helper class for verifying the string table.
-class StringTableVerifier : public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- // Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject()) {
- // Check that the string is actually internalized.
- CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
- (*p)->IsInternalizedString());
- }
- }
- }
-};
-
-
-static void VerifyStringTable() {
- StringTableVerifier verifier;
- HEAP->string_table()->IterateElements(&verifier);
-}
-#endif // VERIFY_HEAP
-
-
-static bool AbortIncrementalMarkingAndCollectGarbage(
- Heap* heap,
- AllocationSpace space,
- const char* gc_reason = NULL) {
- heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
- bool result = heap->CollectGarbage(space, gc_reason);
- heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
- return result;
-}
-
-
-void Heap::ReserveSpace(
- int *sizes,
- Address *locations_out) {
- bool gc_performed = true;
- int counter = 0;
- static const int kThreshold = 20;
- while (gc_performed && counter++ < kThreshold) {
- gc_performed = false;
- ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
- for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
- if (sizes[space] != 0) {
- MaybeObject* allocation;
- if (space == NEW_SPACE) {
- allocation = new_space()->AllocateRaw(sizes[space]);
- } else {
- allocation = paged_space(space)->AllocateRaw(sizes[space]);
- }
- FreeListNode* node;
- if (!allocation->To<FreeListNode>(&node)) {
- if (space == NEW_SPACE) {
- Heap::CollectGarbage(NEW_SPACE,
- "failed to reserve space in the new space");
- } else {
- AbortIncrementalMarkingAndCollectGarbage(
- this,
- static_cast<AllocationSpace>(space),
- "failed to reserve space in paged space");
- }
- gc_performed = true;
- break;
- } else {
- // Mark with a free list node, in case we have a GC before
- // deserializing.
- node->set_size(this, sizes[space]);
- locations_out[space] = node->address();
- }
- }
- }
- }
-
- if (gc_performed) {
- // Failed to reserve the space after several attempts.
- V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
- }
-}
-
-
-void Heap::EnsureFromSpaceIsCommitted() {
- if (new_space_.CommitFromSpaceIfNeeded()) return;
-
- // Committing memory to from space failed.
- // Memory is exhausted and we will die.
- V8::FatalProcessOutOfMemory("Committing semi space failed.");
-}
-
-
-void Heap::ClearJSFunctionResultCaches() {
- if (isolate_->bootstrapper()->IsActive()) return;
-
- Object* context = native_contexts_list_;
- while (!context->IsUndefined()) {
- // Get the caches for this context. GC can happen when the context
- // is not fully initialized, so the caches can be undefined.
- Object* caches_or_undefined =
- Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
- if (!caches_or_undefined->IsUndefined()) {
- FixedArray* caches = FixedArray::cast(caches_or_undefined);
- // Clear the caches:
- int length = caches->length();
- for (int i = 0; i < length; i++) {
- JSFunctionResultCache::cast(caches->get(i))->Clear();
- }
- }
- // Get the next context:
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
-void Heap::ClearNormalizedMapCaches() {
- if (isolate_->bootstrapper()->IsActive() &&
- !incremental_marking()->IsMarking()) {
- return;
- }
-
- Object* context = native_contexts_list_;
- while (!context->IsUndefined()) {
- // GC can happen when the context is not fully initialized,
- // so the cache can be undefined.
- Object* cache =
- Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
- if (!cache->IsUndefined()) {
- NormalizedMapCache::cast(cache)->Clear();
- }
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
-void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
- double survival_rate =
- (static_cast<double>(young_survivors_after_last_gc_) * 100) /
- start_new_space_size;
-
- if (survival_rate > kYoungSurvivalRateHighThreshold) {
- high_survival_rate_period_length_++;
- } else {
- high_survival_rate_period_length_ = 0;
- }
-
- if (survival_rate < kYoungSurvivalRateLowThreshold) {
- low_survival_rate_period_length_++;
- } else {
- low_survival_rate_period_length_ = 0;
- }
-
- double survival_rate_diff = survival_rate_ - survival_rate;
-
- if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
- set_survival_rate_trend(DECREASING);
- } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
- set_survival_rate_trend(INCREASING);
- } else {
- set_survival_rate_trend(STABLE);
- }
-
- survival_rate_ = survival_rate;
-}
-
-bool Heap::PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer) {
- bool next_gc_likely_to_collect_more = false;
-
- if (collector != SCAVENGER) {
- PROFILE(isolate_, CodeMovingGCEvent());
- }
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyStringTable();
- }
-#endif
-
- GCType gc_type =
- collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
-
- {
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState state(isolate_, EXTERNAL);
- CallGCPrologueCallbacks(gc_type);
- }
-
- EnsureFromSpaceIsCommitted();
-
- int start_new_space_size = Heap::new_space()->SizeAsInt();
-
- if (IsHighSurvivalRate()) {
- // We speed up the incremental marker if it is running so that it
- // does not fall behind the rate of promotion, which would cause a
- // constantly growing old space.
- incremental_marking()->NotifyOfHighPromotionRate();
- }
-
- if (collector == MARK_COMPACTOR) {
- // Perform mark-sweep with optional compaction.
- MarkCompact(tracer);
- sweep_generation_++;
- bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
- IsStableOrIncreasingSurvivalTrend();
-
- UpdateSurvivalRateTrend(start_new_space_size);
-
- size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
-
- if (high_survival_rate_during_scavenges &&
- IsStableOrIncreasingSurvivalTrend()) {
- // Stable high survival rates of young objects both during partial and
- // full collection indicate that mutator is either building or modifying
- // a structure with a long lifetime.
- // In this case we aggressively raise old generation memory limits to
- // postpone subsequent mark-sweep collection and thus trade memory
- // space for the mutation speed.
- old_gen_limit_factor_ = 2;
- } else {
- old_gen_limit_factor_ = 1;
- }
-
- old_gen_promotion_limit_ =
- OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
- old_gen_allocation_limit_ =
- OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-
- old_gen_exhausted_ = false;
- } else {
- tracer_ = tracer;
- Scavenge();
- tracer_ = NULL;
-
- UpdateSurvivalRateTrend(start_new_space_size);
- }
-
- if (!new_space_high_promotion_mode_active_ &&
- new_space_.Capacity() == new_space_.MaximumCapacity() &&
- IsStableOrIncreasingSurvivalTrend() &&
- IsHighSurvivalRate()) {
- // Stable high survival rates even though young generation is at
- // maximum capacity indicates that most objects will be promoted.
- // To decrease scavenger pauses and final mark-sweep pauses, we
- // have to limit maximal capacity of the young generation.
- new_space_high_promotion_mode_active_ = true;
- if (FLAG_trace_gc) {
- PrintPID("Limited new space size due to high promotion rate: %d MB\n",
- new_space_.InitialCapacity() / MB);
- }
- } else if (new_space_high_promotion_mode_active_ &&
- IsStableOrDecreasingSurvivalTrend() &&
- IsLowSurvivalRate()) {
- // Decreasing low survival rates might indicate that the above high
- // promotion mode is over and we should allow the young generation
- // to grow again.
- new_space_high_promotion_mode_active_ = false;
- if (FLAG_trace_gc) {
- PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
- new_space_.MaximumCapacity() / MB);
- }
- }
-
- if (new_space_high_promotion_mode_active_ &&
- new_space_.Capacity() > new_space_.InitialCapacity()) {
- new_space_.Shrink();
- }
-
- isolate_->counters()->objs_since_last_young()->Set(0);
-
- // Callbacks that fire after this point might trigger nested GCs and
- // restart incremental marking, the assertion can't be moved down.
- ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
-
- gc_post_processing_depth_++;
- { DisableAssertNoAllocation allow_allocation;
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- next_gc_likely_to_collect_more =
- isolate_->global_handles()->PostGarbageCollectionProcessing(
- collector, tracer);
- }
- gc_post_processing_depth_--;
-
- // Update relocatables.
- Relocatable::PostGarbageCollectionProcessing();
-
- if (collector == MARK_COMPACTOR) {
- // Register the amount of external allocated memory.
- amount_of_external_allocated_memory_at_last_global_gc_ =
- amount_of_external_allocated_memory_;
- }
-
- {
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState state(isolate_, EXTERNAL);
- CallGCEpilogueCallbacks(gc_type);
- }
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyStringTable();
- }
-#endif
-
- return next_gc_likely_to_collect_more;
-}
-
-
-void Heap::CallGCPrologueCallbacks(GCType gc_type) {
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
- global_gc_prologue_callback_();
- }
- for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
- if (gc_type & gc_prologue_callbacks_[i].gc_type) {
- gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
- }
- }
-}
-
-
-void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
- for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
- if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
- gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
- }
- }
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
- global_gc_epilogue_callback_();
- }
-}
-
-
-void Heap::MarkCompact(GCTracer* tracer) {
- gc_state_ = MARK_COMPACT;
- LOG(isolate_, ResourceEvent("markcompact", "begin"));
-
- mark_compact_collector_.Prepare(tracer);
-
- ms_count_++;
- tracer->set_full_gc_count(ms_count_);
-
- MarkCompactPrologue();
-
- mark_compact_collector_.CollectGarbage();
-
- LOG(isolate_, ResourceEvent("markcompact", "end"));
-
- gc_state_ = NOT_IN_GC;
-
- isolate_->counters()->objs_since_last_full()->Set(0);
-
- contexts_disposed_ = 0;
-
- flush_monomorphic_ics_ = false;
-}
-
-
-void Heap::MarkCompactPrologue() {
- // At any old GC clear the keyed lookup cache to enable collection of unused
- // maps.
- isolate_->keyed_lookup_cache()->Clear();
- isolate_->context_slot_cache()->Clear();
- isolate_->descriptor_lookup_cache()->Clear();
- RegExpResultsCache::Clear(string_split_cache());
- RegExpResultsCache::Clear(regexp_multiple_cache());
-
- isolate_->compilation_cache()->MarkCompactPrologue();
-
- CompletelyClearInstanceofCache();
-
- FlushNumberStringCache();
- if (FLAG_cleanup_code_caches_at_gc) {
- polymorphic_code_cache()->set_cache(undefined_value());
- }
-
- ClearNormalizedMapCaches();
-}
-
-
-Object* Heap::FindCodeObject(Address a) {
- return isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(a);
-}
-
-
-// Helper class for copying HeapObjects
-class ScavengeVisitor: public ObjectVisitor {
- public:
- explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
-
- void VisitPointer(Object** p) { ScavengePointer(p); }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) ScavengePointer(p);
- }
-
- private:
- void ScavengePointer(Object** p) {
- Object* object = *p;
- if (!heap_->InNewSpace(object)) return;
- Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
- }
-
- Heap* heap_;
-};
-
-
-#ifdef VERIFY_HEAP
-// Visitor class to verify pointers in code or data space do not point into
-// new space.
-class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object**end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
- }
- }
- }
-};
-
-
-static void VerifyNonPointerSpacePointers() {
- // Verify that there are no pointers to new space in spaces where we
- // do not expect them.
- VerifyNonPointerSpacePointersVisitor v;
- HeapObjectIterator code_it(HEAP->code_space());
- for (HeapObject* object = code_it.Next();
- object != NULL; object = code_it.Next())
- object->Iterate(&v);
-
- // The old data space was normally swept conservatively so that the iterator
- // doesn't work, so we normally skip the next bit.
- if (!HEAP->old_data_space()->was_swept_conservatively()) {
- HeapObjectIterator data_it(HEAP->old_data_space());
- for (HeapObject* object = data_it.Next();
- object != NULL; object = data_it.Next())
- object->Iterate(&v);
- }
-}
-#endif // VERIFY_HEAP
-
-
-void Heap::CheckNewSpaceExpansionCriteria() {
- if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_.Capacity() &&
- !new_space_high_promotion_mode_active_) {
- // Grow the size of new space if there is room to grow, enough data
- // has survived scavenge since the last expansion and we are not in
- // high promotion mode.
- new_space_.Grow();
- survived_since_last_expansion_ = 0;
- }
-}
-
-
-static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
- return heap->InNewSpace(*p) &&
- !HeapObject::cast(*p)->map_word().IsForwardingAddress();
-}
-
-
-void Heap::ScavengeStoreBufferCallback(
- Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event) {
- heap->store_buffer_rebuilder_.Callback(page, event);
-}
-
-
-void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
- if (event == kStoreBufferStartScanningPagesEvent) {
- start_of_current_page_ = NULL;
- current_page_ = NULL;
- } else if (event == kStoreBufferScanningPageEvent) {
- if (current_page_ != NULL) {
- // If this page already overflowed the store buffer during this iteration.
- if (current_page_->scan_on_scavenge()) {
- // Then we should wipe out the entries that have been added for it.
- store_buffer_->SetTop(start_of_current_page_);
- } else if (store_buffer_->Top() - start_of_current_page_ >=
- (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
- // Did we find too many pointers in the previous page? The heuristic is
- // that no page can take more then 1/5 the remaining slots in the store
- // buffer.
- current_page_->set_scan_on_scavenge(true);
- store_buffer_->SetTop(start_of_current_page_);
- } else {
- // In this case the page we scanned took a reasonable number of slots in
- // the store buffer. It has now been rehabilitated and is no longer
- // marked scan_on_scavenge.
- ASSERT(!current_page_->scan_on_scavenge());
- }
- }
- start_of_current_page_ = store_buffer_->Top();
- current_page_ = page;
- } else if (event == kStoreBufferFullEvent) {
- // The current page overflowed the store buffer again. Wipe out its entries
- // in the store buffer and mark it scan-on-scavenge again. This may happen
- // several times while scanning.
- if (current_page_ == NULL) {
- // Store Buffer overflowed while scanning promoted objects. These are not
- // in any particular page, though they are likely to be clustered by the
- // allocation routines.
- store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
- } else {
- // Store Buffer overflowed while scanning a particular old space page for
- // pointers to new space.
- ASSERT(current_page_ == page);
- ASSERT(page != NULL);
- current_page_->set_scan_on_scavenge(true);
- ASSERT(start_of_current_page_ != store_buffer_->Top());
- store_buffer_->SetTop(start_of_current_page_);
- }
- } else {
- UNREACHABLE();
- }
-}
-
-
-void PromotionQueue::Initialize() {
- // Assumes that a NewSpacePage exactly fits a number of promotion queue
- // entries (where each is a pair of intptr_t). This allows us to simplify
- // the test fpr when to switch pages.
- ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
- == 0);
- limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
- front_ = rear_ =
- reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
- emergency_stack_ = NULL;
- guard_ = false;
-}
-
-
-void PromotionQueue::RelocateQueueHead() {
- ASSERT(emergency_stack_ == NULL);
-
- Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
- intptr_t* head_start = rear_;
- intptr_t* head_end =
- Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
-
- int entries_count =
- static_cast<int>(head_end - head_start) / kEntrySizeInWords;
-
- emergency_stack_ = new List<Entry>(2 * entries_count);
-
- while (head_start != head_end) {
- int size = static_cast<int>(*(head_start++));
- HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
- emergency_stack_->Add(Entry(obj, size));
- }
- rear_ = head_end;
-}
-
-
-class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
- public:
- explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
-
- virtual Object* RetainAs(Object* object) {
- if (!heap_->InFromSpace(object)) {
- return object;
- }
-
- MapWord map_word = HeapObject::cast(object)->map_word();
- if (map_word.IsForwardingAddress()) {
- return map_word.ToForwardingAddress();
- }
- return NULL;
- }
-
- private:
- Heap* heap_;
-};
-
-
-void Heap::Scavenge() {
- RelocationLock relocation_lock(this);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
-#endif
-
- gc_state_ = SCAVENGE;
-
- // Implements Cheney's copying algorithm
- LOG(isolate_, ResourceEvent("scavenge", "begin"));
-
- // Clear descriptor cache.
- isolate_->descriptor_lookup_cache()->Clear();
-
- // Used for updating survived_since_last_expansion_ at function end.
- intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
-
- CheckNewSpaceExpansionCriteria();
-
- SelectScavengingVisitorsTable();
-
- incremental_marking()->PrepareForScavenge();
-
- paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
- paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
-
- // Flip the semispaces. After flipping, to space is empty, from space has
- // live objects.
- new_space_.Flip();
- new_space_.ResetAllocationInfo();
-
- // We need to sweep newly copied objects which can be either in the
- // to space or promoted to the old generation. For to-space
- // objects, we treat the bottom of the to space as a queue. Newly
- // copied and unswept objects lie between a 'front' mark and the
- // allocation pointer.
- //
- // Promoted objects can go into various old-generation spaces, and
- // can be allocated internally in the spaces (from the free list).
- // We treat the top of the to space as a queue of addresses of
- // promoted objects. The addresses of newly promoted and unswept
- // objects lie between a 'front' mark and a 'rear' mark that is
- // updated as a side effect of promoting an object.
- //
- // There is guaranteed to be enough room at the top of the to space
- // for the addresses of promoted objects: every object promoted
- // frees up its size in bytes from the top of the new space, and
- // objects are at least one pointer in size.
- Address new_space_front = new_space_.ToSpaceStart();
- promotion_queue_.Initialize();
-
-#ifdef DEBUG
- store_buffer()->Clean();
-#endif
-
- ScavengeVisitor scavenge_visitor(this);
- // Copy roots.
- IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
-
- // Copy objects reachable from the old generation.
- {
- StoreBufferRebuildScope scope(this,
- store_buffer(),
- &ScavengeStoreBufferCallback);
- store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
- }
-
- // Copy objects reachable from cells by scavenging cell values directly.
- HeapObjectIterator cell_iterator(cell_space_);
- for (HeapObject* heap_object = cell_iterator.Next();
- heap_object != NULL;
- heap_object = cell_iterator.Next()) {
- if (heap_object->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
- Address value_address = cell->ValueAddress();
- scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
- }
- }
-
- // Copy objects reachable from the code flushing candidates list.
- MarkCompactCollector* collector = mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
- }
-
- // Scavenge object reachable from the native contexts list directly.
- scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
-
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
- while (isolate()->global_handles()->IterateObjectGroups(
- &scavenge_visitor, &IsUnscavengedHeapObject)) {
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
- }
- isolate()->global_handles()->RemoveObjectGroups();
- isolate()->global_handles()->RemoveImplicitRefGroups();
-
- isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
- &IsUnscavengedHeapObject);
- isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
- &scavenge_visitor);
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
- UpdateNewSpaceReferencesInExternalStringTable(
- &UpdateNewSpaceReferenceInExternalStringTableEntry);
-
- error_object_list_.UpdateReferencesInNewSpace(this);
-
- promotion_queue_.Destroy();
-
- if (!FLAG_watch_ic_patching) {
- isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
- }
- incremental_marking()->UpdateMarkingDequeAfterScavenge();
-
- ScavengeWeakObjectRetainer weak_object_retainer(this);
- ProcessWeakReferences(&weak_object_retainer);
-
- ASSERT(new_space_front == new_space_.top());
-
- // Set age mark.
- new_space_.set_age_mark(new_space_.top());
-
- new_space_.LowerInlineAllocationLimit(
- new_space_.inline_allocation_limit_step());
-
- // Update how much has survived scavenge.
- IncrementYoungSurvivorsCounter(static_cast<int>(
- (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
-
- LOG(isolate_, ResourceEvent("scavenge", "end"));
-
- gc_state_ = NOT_IN_GC;
-
- scavenges_since_last_idle_round_++;
-}
-
-
-HeapObject* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap,
- Object** p) {
- MapWord first_word = HeapObject::cast(*p)->map_word();
-
- if (!first_word.IsForwardingAddress()) {
- // Unreachable external string can be finalized.
- heap->FinalizeExternalString(HeapObject::cast(*p));
- return NULL;
- }
-
- // String is still reachable.
- return HeapObject::cast(first_word.ToForwardingAddress());
-}
-
-
-void Heap::UpdateNewSpaceReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- external_string_table_.Verify();
- }
-#endif
-
- if (external_string_table_.new_space_strings_.is_empty()) return;
-
- Object** start = &external_string_table_.new_space_strings_[0];
- Object** end = start + external_string_table_.new_space_strings_.length();
- Object** last = start;
-
- for (Object** p = start; p < end; ++p) {
- ASSERT(InFromSpace(*p));
- HeapObject* target = updater_func(this, p);
-
- if (target == NULL) continue;
-
- ASSERT(target->IsExternalString() ||
- target->map()->has_external_resource());
-
- if (InNewSpace(target)) {
- // String is still in new space. Update the table entry.
- *last = target;
- ++last;
- } else {
- // String got promoted. Move it to the old string list.
- external_string_table_.AddOldObject(target);
- }
- }
-
- ASSERT(last <= end);
- external_string_table_.ShrinkNewObjects(static_cast<int>(last - start));
-}
-
-
-void Heap::UpdateReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func) {
-
- // Update old space string references.
- if (external_string_table_.old_space_strings_.length() > 0) {
- Object** start = &external_string_table_.old_space_strings_[0];
- Object** end = start + external_string_table_.old_space_strings_.length();
- for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
- }
-
- UpdateNewSpaceReferencesInExternalStringTable(updater_func);
-}
-
-
-static Object* ProcessFunctionWeakReferences(Heap* heap,
- Object* function,
- WeakObjectRetainer* retainer,
- bool record_slots) {
- Object* undefined = heap->undefined_value();
- Object* head = undefined;
- JSFunction* tail = NULL;
- Object* candidate = function;
- while (candidate != undefined) {
- // Check whether to keep the candidate in the list.
- JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
- Object* retain = retainer->RetainAs(candidate);
- if (retain != NULL) {
- if (head == undefined) {
- // First element in the list.
- head = retain;
- } else {
- // Subsequent elements in the list.
- ASSERT(tail != NULL);
- tail->set_next_function_link(retain);
- if (record_slots) {
- Object** next_function =
- HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
- heap->mark_compact_collector()->RecordSlot(
- next_function, next_function, retain);
- }
- }
- // Retained function is new tail.
- candidate_function = reinterpret_cast<JSFunction*>(retain);
- tail = candidate_function;
-
- ASSERT(retain->IsUndefined() || retain->IsJSFunction());
-
- if (retain == undefined) break;
- }
-
- // Move to next element in the list.
- candidate = candidate_function->next_function_link();
- }
-
- // Terminate the list if there is one or more elements.
- if (tail != NULL) {
- tail->set_next_function_link(undefined);
- }
-
- return head;
-}
-
-
-void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- Object* undefined = undefined_value();
- Object* head = undefined;
- Context* tail = NULL;
- Object* candidate = native_contexts_list_;
-
- // We don't record weak slots during marking or scavenges.
- // Instead we do it once when we complete mark-compact cycle.
- // Note that write barrier has no effect if we are already in the middle of
- // compacting mark-sweep cycle and we have to record slots manually.
- bool record_slots =
- gc_state() == MARK_COMPACT &&
- mark_compact_collector()->is_compacting();
-
- while (candidate != undefined) {
- // Check whether to keep the candidate in the list.
- Context* candidate_context = reinterpret_cast<Context*>(candidate);
- Object* retain = retainer->RetainAs(candidate);
- if (retain != NULL) {
- if (head == undefined) {
- // First element in the list.
- head = retain;
- } else {
- // Subsequent elements in the list.
- ASSERT(tail != NULL);
- tail->set_unchecked(this,
- Context::NEXT_CONTEXT_LINK,
- retain,
- UPDATE_WRITE_BARRIER);
-
- if (record_slots) {
- Object** next_context =
- HeapObject::RawField(
- tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
- mark_compact_collector()->RecordSlot(
- next_context, next_context, retain);
- }
- }
- // Retained context is new tail.
- candidate_context = reinterpret_cast<Context*>(retain);
- tail = candidate_context;
-
- if (retain == undefined) break;
-
- // Process the weak list of optimized functions for the context.
- Object* function_list_head =
- ProcessFunctionWeakReferences(
- this,
- candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
- retainer,
- record_slots);
- candidate_context->set_unchecked(this,
- Context::OPTIMIZED_FUNCTIONS_LIST,
- function_list_head,
- UPDATE_WRITE_BARRIER);
- if (record_slots) {
- Object** optimized_functions =
- HeapObject::RawField(
- tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
- mark_compact_collector()->RecordSlot(
- optimized_functions, optimized_functions, function_list_head);
- }
- }
-
- // Move to next element in the list.
- candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
- }
-
- // Terminate the list if there is one or more elements.
- if (tail != NULL) {
- tail->set_unchecked(this,
- Context::NEXT_CONTEXT_LINK,
- Heap::undefined_value(),
- UPDATE_WRITE_BARRIER);
- }
-
- // Update the head of the list of contexts.
- native_contexts_list_ = head;
-}
-
-
-void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
- AssertNoAllocation no_allocation;
-
- // Both the external string table and the string table may contain
- // external strings, but neither lists them exhaustively, nor is the
- // intersection set empty. Therefore we iterate over the external string
- // table first, ignoring internalized strings, and then over the
- // internalized string table.
-
- class ExternalStringTableVisitorAdapter : public ObjectVisitor {
- public:
- explicit ExternalStringTableVisitorAdapter(
- v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- // Visit non-internalized external strings,
- // since internalized strings are listed in the string table.
- if (!(*p)->IsInternalizedString()) {
- ASSERT((*p)->IsExternalString());
- visitor_->VisitExternalString(Utils::ToLocal(
- Handle<String>(String::cast(*p))));
- }
- }
- }
- private:
- v8::ExternalResourceVisitor* visitor_;
- } external_string_table_visitor(visitor);
-
- external_string_table_.Iterate(&external_string_table_visitor);
-
- class StringTableVisitorAdapter : public ObjectVisitor {
- public:
- explicit StringTableVisitorAdapter(
- v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsExternalString()) {
- ASSERT((*p)->IsInternalizedString());
- visitor_->VisitExternalString(Utils::ToLocal(
- Handle<String>(String::cast(*p))));
- }
- }
- }
- private:
- v8::ExternalResourceVisitor* visitor_;
- } string_table_visitor(visitor);
-
- string_table()->IterateElements(&string_table_visitor);
-}
-
-
-class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
- public:
- static inline void VisitPointer(Heap* heap, Object** p) {
- Object* object = *p;
- if (!heap->InNewSpace(object)) return;
- Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
- }
-};
-
-
-Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front) {
- do {
- SemiSpace::AssertValidRange(new_space_front, new_space_.top());
- // The addresses new_space_front and new_space_.top() define a
- // queue of unprocessed copied objects. Process them until the
- // queue is empty.
- while (new_space_front != new_space_.top()) {
- if (!NewSpacePage::IsAtEnd(new_space_front)) {
- HeapObject* object = HeapObject::FromAddress(new_space_front);
- new_space_front +=
- NewSpaceScavenger::IterateBody(object->map(), object);
- } else {
- new_space_front =
- NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
- }
- }
-
- // Promote and process all the to-be-promoted objects.
- {
- StoreBufferRebuildScope scope(this,
- store_buffer(),
- &ScavengeStoreBufferCallback);
- while (!promotion_queue()->is_empty()) {
- HeapObject* target;
- int size;
- promotion_queue()->remove(&target, &size);
-
- // Promoted object might be already partially visited
- // during old space pointer iteration. Thus we search specificly
- // for pointers to from semispace instead of looking for pointers
- // to new space.
- ASSERT(!target->IsMap());
- IterateAndMarkPointersToFromSpace(target->address(),
- target->address() + size,
- &ScavengeObject);
- }
- }
-
- // Take another spin if there are now unswept objects in new space
- // (there are currently no more unswept promoted objects).
- } while (new_space_front != new_space_.top());
-
- return new_space_front;
-}
-
-
-STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
-
-
-INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
- HeapObject* object,
- int size));
-
-static HeapObject* EnsureDoubleAligned(Heap* heap,
- HeapObject* object,
- int size) {
- if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
- heap->CreateFillerObjectAt(object->address(), kPointerSize);
- return HeapObject::FromAddress(object->address() + kPointerSize);
- } else {
- heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
- kPointerSize);
- return object;
- }
-}
-
-
-enum LoggingAndProfiling {
- LOGGING_AND_PROFILING_ENABLED,
- LOGGING_AND_PROFILING_DISABLED
-};
-
-
-enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-
-
-template<MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
-class ScavengingVisitor : public StaticVisitorBase {
- public:
- static void Initialize() {
- table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
- table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
- table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
- table_.Register(kVisitByteArray, &EvacuateByteArray);
- table_.Register(kVisitFixedArray, &EvacuateFixedArray);
- table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
-
- table_.Register(kVisitNativeContext,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<Context::kSize>);
-
- table_.Register(kVisitConsString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<ConsString::kSize>);
-
- table_.Register(kVisitSlicedString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<SlicedString::kSize>);
-
- table_.Register(kVisitSharedFunctionInfo,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<SharedFunctionInfo::kSize>);
-
- table_.Register(kVisitJSWeakMap,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- Visit);
-
- table_.Register(kVisitJSRegExp,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- Visit);
-
- if (marks_handling == IGNORE_MARKS) {
- table_.Register(kVisitJSFunction,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>);
- } else {
- table_.Register(kVisitJSFunction, &EvacuateJSFunction);
- }
-
- table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
-
- table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
-
- table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
- kVisitStruct,
- kVisitStructGeneric>();
- }
-
- static VisitorDispatchTable<ScavengingCallback>* GetTable() {
- return &table_;
- }
-
- private:
- enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
- enum SizeRestriction { SMALL, UNKNOWN_SIZE };
-
- static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
- bool should_record = false;
-#ifdef DEBUG
- should_record = FLAG_heap_stats;
-#endif
- should_record = should_record || FLAG_log_gc;
- if (should_record) {
- if (heap->new_space()->Contains(obj)) {
- heap->new_space()->RecordAllocation(obj);
- } else {
- heap->new_space()->RecordPromotion(obj);
- }
- }
- }
-
- // Helper function used by CopyObject to copy a source object to an
- // allocated target object and update the forwarding pointer in the source
- // object. Returns the target object.
- INLINE(static void MigrateObject(Heap* heap,
- HeapObject* source,
- HeapObject* target,
- int size)) {
- // Copy the content of source to target.
- heap->CopyBlock(target->address(), source->address(), size);
-
- // Set the forwarding address.
- source->set_map_word(MapWord::FromForwardingAddress(target));
-
- if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
- // Update NewSpace stats if necessary.
- RecordCopiedObject(heap, target);
- HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
- Isolate* isolate = heap->isolate();
- if (isolate->logger()->is_logging_code_events() ||
- CpuProfiler::is_profiling(isolate)) {
- if (target->IsSharedFunctionInfo()) {
- PROFILE(isolate, SharedFunctionInfoMoveEvent(
- source->address(), target->address()));
- }
- }
- }
-
- if (marks_handling == TRANSFER_MARKS) {
- if (Marking::TransferColor(source, target)) {
- MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
- }
- }
- }
-
-
- template<ObjectContents object_contents,
- SizeRestriction size_restriction,
- int alignment>
- static inline void EvacuateObject(Map* map,
- HeapObject** slot,
- HeapObject* object,
- int object_size) {
- SLOW_ASSERT((size_restriction != SMALL) ||
- (object_size <= Page::kMaxNonCodeHeapObjectSize));
- SLOW_ASSERT(object->Size() == object_size);
-
- int allocation_size = object_size;
- if (alignment != kObjectAlignment) {
- ASSERT(alignment == kDoubleAlignment);
- allocation_size += kPointerSize;
- }
-
- Heap* heap = map->GetHeap();
- if (heap->ShouldBePromoted(object->address(), object_size)) {
- MaybeObject* maybe_result;
-
- if ((size_restriction != SMALL) &&
- (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
- maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
- NOT_EXECUTABLE);
- } else {
- if (object_contents == DATA_OBJECT) {
- maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
- } else {
- maybe_result =
- heap->old_pointer_space()->AllocateRaw(allocation_size);
- }
- }
-
- Object* result = NULL; // Initialization to please compiler.
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
-
- if (alignment != kObjectAlignment) {
- target = EnsureDoubleAligned(heap, target, allocation_size);
- }
-
- // Order is important: slot might be inside of the target if target
- // was allocated over a dead object and slot comes from the store
- // buffer.
- *slot = target;
- MigrateObject(heap, object, target, object_size);
-
- if (object_contents == POINTER_OBJECT) {
- if (map->instance_type() == JS_FUNCTION_TYPE) {
- heap->promotion_queue()->insert(
- target, JSFunction::kNonWeakFieldsEndOffset);
- } else {
- heap->promotion_queue()->insert(target, object_size);
- }
- }
-
- heap->tracer()->increment_promoted_objects_size(object_size);
- return;
- }
- }
- MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
- heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
- Object* result = allocation->ToObjectUnchecked();
- HeapObject* target = HeapObject::cast(result);
-
- if (alignment != kObjectAlignment) {
- target = EnsureDoubleAligned(heap, target, allocation_size);
- }
-
- // Order is important: slot might be inside of the target if target
- // was allocated over a dead object and slot comes from the store
- // buffer.
- *slot = target;
- MigrateObject(heap, object, target, object_size);
- return;
- }
-
-
- static inline void EvacuateJSFunction(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>(map, slot, object);
-
- HeapObject* target = *slot;
- MarkBit mark_bit = Marking::MarkBitFrom(target);
- if (Marking::IsBlack(mark_bit)) {
- // This object is black and it might not be rescanned by marker.
- // We should explicitly record code entry slot for compaction because
- // promotion queue processing (IterateAndMarkPointersToFromSpace) will
- // miss it as it is not HeapObject-tagged.
- Address code_entry_slot =
- target->address() + JSFunction::kCodeEntryOffset;
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- map->GetHeap()->mark_compact_collector()->
- RecordCodeEntrySlot(code_entry_slot, code);
- }
- }
-
-
- static inline void EvacuateFixedArray(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
- slot,
- object,
- object_size);
- }
-
-
- static inline void EvacuateFixedDoubleArray(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
- int object_size = FixedDoubleArray::SizeFor(length);
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
- map,
- slot,
- object,
- object_size);
- }
-
-
- static inline void EvacuateByteArray(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
- map, slot, object, object_size);
- }
-
-
- static inline void EvacuateSeqOneByteString(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqOneByteString::cast(object)->
- SeqOneByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
- map, slot, object, object_size);
- }
-
-
- static inline void EvacuateSeqTwoByteString(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqTwoByteString::cast(object)->
- SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
- map, slot, object, object_size);
- }
-
-
- static inline bool IsShortcutCandidate(int type) {
- return ((type & kShortcutTypeMask) == kShortcutTypeTag);
- }
-
- static inline void EvacuateShortcutCandidate(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- ASSERT(IsShortcutCandidate(map->instance_type()));
-
- Heap* heap = map->GetHeap();
-
- if (marks_handling == IGNORE_MARKS &&
- ConsString::cast(object)->unchecked_second() ==
- heap->empty_string()) {
- HeapObject* first =
- HeapObject::cast(ConsString::cast(object)->unchecked_first());
-
- *slot = first;
-
- if (!heap->InNewSpace(first)) {
- object->set_map_word(MapWord::FromForwardingAddress(first));
- return;
- }
-
- MapWord first_word = first->map_word();
- if (first_word.IsForwardingAddress()) {
- HeapObject* target = first_word.ToForwardingAddress();
-
- *slot = target;
- object->set_map_word(MapWord::FromForwardingAddress(target));
- return;
- }
-
- heap->DoScavengeObject(first->map(), slot, first);
- object->set_map_word(MapWord::FromForwardingAddress(*slot));
- return;
- }
-
- int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
- map, slot, object, object_size);
- }
-
- template<ObjectContents object_contents>
- class ObjectEvacuationStrategy {
- public:
- template<int object_size>
- static inline void VisitSpecialized(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- EvacuateObject<object_contents, SMALL, kObjectAlignment>(
- map, slot, object, object_size);
- }
-
- static inline void Visit(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = map->instance_size();
- EvacuateObject<object_contents, SMALL, kObjectAlignment>(
- map, slot, object, object_size);
- }
- };
-
- static VisitorDispatchTable<ScavengingCallback> table_;
-};
-
-
-template<MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
- ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
-
-
-static void InitializeScavengingVisitorsTables() {
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
-}
-
-
-void Heap::SelectScavengingVisitorsTable() {
- bool logging_and_profiling =
- isolate()->logger()->is_logging() ||
- CpuProfiler::is_profiling(isolate()) ||
- (isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_profiling());
-
- if (!incremental_marking()->IsMarking()) {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
- } else {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
-
- if (incremental_marking()->IsCompacting()) {
- // When compacting forbid short-circuiting of cons-strings.
- // Scavenging code relies on the fact that new space object
- // can't be evacuated into evacuation candidate but
- // short-circuiting violates this assumption.
- scavenging_visitors_table_.Register(
- StaticVisitorBase::kVisitShortcutCandidate,
- scavenging_visitors_table_.GetVisitorById(
- StaticVisitorBase::kVisitConsString));
- }
- }
-}
-
-
-void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- SLOW_ASSERT(HEAP->InFromSpace(object));
- MapWord first_word = object->map_word();
- SLOW_ASSERT(!first_word.IsForwardingAddress());
- Map* map = first_word.ToMap();
- map->GetHeap()->DoScavengeObject(map, p, object);
-}
-
-
-MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
- int instance_size) {
- Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->ToObject(&result)) return maybe_result;
-
- // Map::cast cannot be used due to uninitialized map field.
- reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
- reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
- reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
- reinterpret_cast<Map*>(result)->set_visitor_id(
- StaticVisitorBase::GetVisitorId(instance_type, instance_size));
- reinterpret_cast<Map*>(result)->set_inobject_properties(0);
- reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
- reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
- reinterpret_cast<Map*>(result)->set_bit_field(0);
- reinterpret_cast<Map*>(result)->set_bit_field2(0);
- int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
- Map::OwnsDescriptors::encode(true);
- reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateMap(InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind) {
- Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->To(&result)) return maybe_result;
-
- Map* map = reinterpret_cast<Map*>(result);
- map->set_map_no_write_barrier(meta_map());
- map->set_instance_type(instance_type);
- map->set_visitor_id(
- StaticVisitorBase::GetVisitorId(instance_type, instance_size));
- map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
- map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
- map->set_instance_size(instance_size);
- map->set_inobject_properties(0);
- map->set_pre_allocated_property_fields(0);
- map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
- map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
- map->init_back_pointer(undefined_value());
- map->set_unused_property_fields(0);
- map->set_instance_descriptors(empty_descriptor_array());
- map->set_bit_field(0);
- map->set_bit_field2(1 << Map::kIsExtensible);
- int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
- Map::OwnsDescriptors::encode(true);
- map->set_bit_field3(bit_field3);
- map->set_elements_kind(elements_kind);
-
- return map;
-}
-
-
-MaybeObject* Heap::AllocateCodeCache() {
- CodeCache* code_cache;
- { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
- if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
- }
- code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
- return code_cache;
-}
-
-
-MaybeObject* Heap::AllocatePolymorphicCodeCache() {
- return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
-}
-
-
-MaybeObject* Heap::AllocateAccessorPair() {
- AccessorPair* accessors;
- { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
- }
- accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
- accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
- return accessors;
-}
-
-
-MaybeObject* Heap::AllocateTypeFeedbackInfo() {
- TypeFeedbackInfo* info;
- { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
- if (!maybe_info->To(&info)) return maybe_info;
- }
- info->initialize_storage();
- info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
- return info;
-}
-
-
-MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
- AliasedArgumentsEntry* entry;
- { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
- if (!maybe_entry->To(&entry)) return maybe_entry;
- }
- entry->set_aliased_context_slot(aliased_context_slot);
- return entry;
-}
-
-
-const Heap::StringTypeTable Heap::string_type_table[] = {
-#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
- {type, size, k##camel_name##MapRootIndex},
- STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
-#undef STRING_TYPE_ELEMENT
-};
-
-
-const Heap::ConstantStringTable Heap::constant_string_table[] = {
-#define CONSTANT_STRING_ELEMENT(name, contents) \
- {contents, k##name##RootIndex},
- INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
-#undef CONSTANT_STRING_ELEMENT
-};
-
-
-const Heap::StructTable Heap::struct_table[] = {
-#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
- { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
- STRUCT_LIST(STRUCT_TABLE_ELEMENT)
-#undef STRUCT_TABLE_ELEMENT
-};
-
-
-bool Heap::CreateInitialMaps() {
- Object* obj;
- { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- // Map::cast cannot be used due to uninitialized map field.
- Map* new_meta_map = reinterpret_cast<Map*>(obj);
- set_meta_map(new_meta_map);
- new_meta_map->set_map(new_meta_map);
-
- { MaybeObject* maybe_obj =
- AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_oddball_map(Map::cast(obj));
-
- // Allocate the empty array.
- { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_fixed_array(FixedArray::cast(obj));
-
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_null_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kNull);
-
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undefined_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kUndefined);
- ASSERT(!InNewSpace(undefined_value()));
-
- // Allocate the empty descriptor array.
- { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_descriptor_array(DescriptorArray::cast(obj));
-
- // Fix the instance_descriptors for the existing maps.
- meta_map()->set_code_cache(empty_fixed_array());
- meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
- meta_map()->init_back_pointer(undefined_value());
- meta_map()->set_instance_descriptors(empty_descriptor_array());
-
- fixed_array_map()->set_code_cache(empty_fixed_array());
- fixed_array_map()->set_dependent_code(
- DependentCode::cast(empty_fixed_array()));
- fixed_array_map()->init_back_pointer(undefined_value());
- fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
-
- oddball_map()->set_code_cache(empty_fixed_array());
- oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
- oddball_map()->init_back_pointer(undefined_value());
- oddball_map()->set_instance_descriptors(empty_descriptor_array());
-
- // Fix prototype object for existing maps.
- meta_map()->set_prototype(null_value());
- meta_map()->set_constructor(null_value());
-
- fixed_array_map()->set_prototype(null_value());
- fixed_array_map()->set_constructor(null_value());
-
- oddball_map()->set_prototype(null_value());
- oddball_map()->set_constructor(null_value());
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_cow_array_map(Map::cast(obj));
- ASSERT(fixed_array_map() != fixed_cow_array_map());
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_scope_info_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_heap_number_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_symbol_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_foreign_map(Map::cast(obj));
-
- for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
- const StringTypeTable& entry = string_type_table[i];
- { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- roots_[entry.index] = Map::cast(obj);
- }
-
- { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undetectable_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- { MaybeObject* maybe_obj =
- AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undetectable_ascii_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_double_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_free_space_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_byte_array(ByteArray::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_pixel_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_short_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_short_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_int_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_int_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_float_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_non_strict_arguments_elements_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_double_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_code_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
- JSGlobalPropertyCell::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_global_property_cell_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_one_pointer_filler_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_two_pointer_filler_map(Map::cast(obj));
-
- for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
- const StructTable& entry = struct_table[i];
- { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- roots_[entry.index] = Map::cast(obj);
- }
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_hash_table_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_function_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_catch_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_with_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_block_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_module_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_global_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- Map* native_context_map = Map::cast(obj);
- native_context_map->set_dictionary_map(true);
- native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
- set_native_context_map(native_context_map);
-
- { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
- SharedFunctionInfo::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_shared_function_info_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
- JSMessageObject::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_message_object_map(Map::cast(obj));
-
- Map* external_map;
- { MaybeObject* maybe_obj =
- AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
- if (!maybe_obj->To(&external_map)) return false;
- }
- external_map->set_is_extensible(false);
- set_external_map(external_map);
-
- ASSERT(!InNewSpace(empty_fixed_array()));
- return true;
-}
-
-
-MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate heap numbers in paged
- // spaces.
- STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
- HeapNumber::cast(result)->set_value(value);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateHeapNumber(double value) {
- // Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, TENURED);
-
- // This version of AllocateHeapNumber is optimized for
- // allocation in new space.
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- Object* result;
- { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
- HeapNumber::cast(result)->set_value(value);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawCell();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- HeapObject::cast(result)->set_map_no_write_barrier(
- global_property_cell_map());
- JSGlobalPropertyCell::cast(result)->set_value(value);
- return result;
-}
-
-
-MaybeObject* Heap::CreateOddball(const char* to_string,
- Object* to_number,
- byte kind) {
- Object* result;
- { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return Oddball::cast(result)->Initialize(to_string, to_number, kind);
-}
-
-
-bool Heap::CreateApiObjects() {
- Object* obj;
-
- { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- // Don't use Smi-only elements optimizations for objects with the neander
- // map. There are too many cases where element values are set directly with a
- // bottleneck to trap the Smi-only -> fast elements transition, and there
- // appears to be no benefit for optimize this case.
- Map* new_neander_map = Map::cast(obj);
- new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
- set_neander_map(new_neander_map);
-
- { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- Object* elements;
- { MaybeObject* maybe_elements = AllocateFixedArray(2);
- if (!maybe_elements->ToObject(&elements)) return false;
- }
- FixedArray::cast(elements)->set(0, Smi::FromInt(0));
- JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
- set_message_listeners(JSObject::cast(obj));
-
- return true;
-}
-
-
-void Heap::CreateJSEntryStub() {
- JSEntryStub stub;
- set_js_entry_code(*stub.GetCode(isolate()));
-}
-
-
-void Heap::CreateJSConstructEntryStub() {
- JSConstructEntryStub stub;
- set_js_construct_entry_code(*stub.GetCode(isolate()));
-}
-
-
-void Heap::CreateFixedStubs() {
- // Here we create roots for fixed stubs. They are needed at GC
- // for cooking and uncooking (check out frames.cc).
- // The eliminates the need for doing dictionary lookup in the
- // stub cache for these stubs.
- HandleScope scope(isolate());
- // gcc-4.4 has problem generating correct code of following snippet:
- // { JSEntryStub stub;
- // js_entry_code_ = *stub.GetCode();
- // }
- // { JSConstructEntryStub stub;
- // js_construct_entry_code_ = *stub.GetCode();
- // }
- // To workaround the problem, make separate functions without inlining.
- Heap::CreateJSEntryStub();
- Heap::CreateJSConstructEntryStub();
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime(isolate());
-}
-
-
-bool Heap::CreateInitialObjects() {
- Object* obj;
-
- // The -0 value must be set before NumberFromDouble works.
- { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_minus_zero_value(HeapNumber::cast(obj));
- ASSERT(signbit(minus_zero_value()->Number()) != 0);
-
- { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_nan_value(HeapNumber::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_infinity_value(HeapNumber::cast(obj));
-
- // The hole has not been created yet, but we want to put something
- // predictable in the gaps in the string table, so lets make that Smi zero.
- set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
-
- // Allocate initial string table.
- { MaybeObject* maybe_obj = StringTable::Allocate(kInitialStringTableSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- // Don't use set_string_table() due to asserts.
- roots_[kStringTableRootIndex] = obj;
-
- // Finish initializing oddballs after creating the string table.
- { MaybeObject* maybe_obj =
- undefined_value()->Initialize("undefined",
- nan_value(),
- Oddball::kUndefined);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
-
- // Initialize the null_value.
- { MaybeObject* maybe_obj =
- null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
-
- { MaybeObject* maybe_obj = CreateOddball("true",
- Smi::FromInt(1),
- Oddball::kTrue);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_true_value(Oddball::cast(obj));
-
- { MaybeObject* maybe_obj = CreateOddball("false",
- Smi::FromInt(0),
- Oddball::kFalse);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_false_value(Oddball::cast(obj));
-
- { MaybeObject* maybe_obj = CreateOddball("hole",
- Smi::FromInt(-1),
- Oddball::kTheHole);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_the_hole_value(Oddball::cast(obj));
-
- { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-4),
- Oddball::kArgumentMarker);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_arguments_marker(Oddball::cast(obj));
-
- { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
- Smi::FromInt(-2),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_no_interceptor_result_sentinel(obj);
-
- { MaybeObject* maybe_obj = CreateOddball("termination_exception",
- Smi::FromInt(-3),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_termination_exception(obj);
-
- for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
- { MaybeObject* maybe_obj =
- InternalizeUtf8String(constant_string_table[i].contents);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- roots_[constant_string_table[i].index] = String::cast(obj);
- }
-
- // Allocate the hidden string which is used to identify the hidden properties
- // in JSObjects. The hash code has a special value so that it will not match
- // the empty string when searching for the property. It cannot be part of the
- // loop above because it needs to be allocated manually with the special
- // hash code in place. The hash code for the hidden_string is zero to ensure
- // that it will always be at the first entry in property descriptors.
- { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
- OneByteVector("", 0), String::kEmptyStringHash);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- hidden_string_ = String::cast(obj);
-
- // Allocate the foreign for __proto__.
- { MaybeObject* maybe_obj =
- AllocateForeign((Address) &Accessors::ObjectPrototype);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_prototype_accessors(Foreign::cast(obj));
-
- // Allocate the code_stubs dictionary. The initial size is set to avoid
- // expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_code_stubs(UnseededNumberDictionary::cast(obj));
-
-
- // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
- // is set to avoid expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
-
- { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
-
- set_instanceof_cache_function(Smi::FromInt(0));
- set_instanceof_cache_map(Smi::FromInt(0));
- set_instanceof_cache_answer(Smi::FromInt(0));
-
- CreateFixedStubs();
-
- // Allocate the dictionary of intrinsic function names.
- { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
- obj);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_intrinsic_function_names(StringDictionary::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_number_string_cache(FixedArray::cast(obj));
-
- // Allocate cache for single character one byte strings.
- { MaybeObject* maybe_obj =
- AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_single_character_string_cache(FixedArray::cast(obj));
-
- // Allocate cache for string split.
- { MaybeObject* maybe_obj = AllocateFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_string_split_cache(FixedArray::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_regexp_multiple_cache(FixedArray::cast(obj));
-
- // Allocate cache for external strings pointing to native source code.
- { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_natives_source_cache(FixedArray::cast(obj));
-
- // Allocate object to hold object observation state.
- { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_observation_state(JSObject::cast(obj));
-
- // Handling of script id generation is in FACTORY->NewScript.
- set_last_script_id(undefined_value());
-
- // Initialize keyed lookup cache.
- isolate_->keyed_lookup_cache()->Clear();
-
- // Initialize context slot cache.
- isolate_->context_slot_cache()->Clear();
-
- // Initialize descriptor cache.
- isolate_->descriptor_lookup_cache()->Clear();
-
- // Initialize compilation cache.
- isolate_->compilation_cache()->Clear();
-
- return true;
-}
-
-
-bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
- RootListIndex writable_roots[] = {
- kStoreBufferTopRootIndex,
- kStackLimitRootIndex,
- kNumberStringCacheRootIndex,
- kInstanceofCacheFunctionRootIndex,
- kInstanceofCacheMapRootIndex,
- kInstanceofCacheAnswerRootIndex,
- kCodeStubsRootIndex,
- kNonMonomorphicCacheRootIndex,
- kPolymorphicCodeCacheRootIndex,
- kLastScriptIdRootIndex,
- kEmptyScriptRootIndex,
- kRealStackLimitRootIndex,
- kArgumentsAdaptorDeoptPCOffsetRootIndex,
- kConstructStubDeoptPCOffsetRootIndex,
- kGetterStubDeoptPCOffsetRootIndex,
- kSetterStubDeoptPCOffsetRootIndex,
- kStringTableRootIndex,
- };
-
- for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
- if (root_index == writable_roots[i])
- return true;
- }
- return false;
-}
-
-
-Object* RegExpResultsCache::Lookup(Heap* heap,
- String* key_string,
- Object* key_pattern,
- ResultsCacheType type) {
- FixedArray* cache;
- if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
- if (type == STRING_SPLIT_SUBSTRINGS) {
- ASSERT(key_pattern->IsString());
- if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
- cache = heap->string_split_cache();
- } else {
- ASSERT(type == REGEXP_MULTIPLE_INDICES);
- ASSERT(key_pattern->IsFixedArray());
- cache = heap->regexp_multiple_cache();
- }
-
- uint32_t hash = key_string->Hash();
- uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == key_string &&
- cache->get(index + kPatternOffset) == key_pattern) {
- return cache->get(index + kArrayOffset);
- }
- index =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index + kStringOffset) == key_string &&
- cache->get(index + kPatternOffset) == key_pattern) {
- return cache->get(index + kArrayOffset);
- }
- return Smi::FromInt(0);
-}
-
-
-void RegExpResultsCache::Enter(Heap* heap,
- String* key_string,
- Object* key_pattern,
- FixedArray* value_array,
- ResultsCacheType type) {
- FixedArray* cache;
- if (!key_string->IsInternalizedString()) return;
- if (type == STRING_SPLIT_SUBSTRINGS) {
- ASSERT(key_pattern->IsString());
- if (!key_pattern->IsInternalizedString()) return;
- cache = heap->string_split_cache();
- } else {
- ASSERT(type == REGEXP_MULTIPLE_INDICES);
- ASSERT(key_pattern->IsFixedArray());
- cache = heap->regexp_multiple_cache();
- }
-
- uint32_t hash = key_string->Hash();
- uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index + kStringOffset, key_string);
- cache->set(index + kPatternOffset, key_pattern);
- cache->set(index + kArrayOffset, value_array);
- } else {
- uint32_t index2 =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index2 + kStringOffset, key_string);
- cache->set(index2 + kPatternOffset, key_pattern);
- cache->set(index2 + kArrayOffset, value_array);
- } else {
- cache->set(index2 + kStringOffset, Smi::FromInt(0));
- cache->set(index2 + kPatternOffset, Smi::FromInt(0));
- cache->set(index2 + kArrayOffset, Smi::FromInt(0));
- cache->set(index + kStringOffset, key_string);
- cache->set(index + kPatternOffset, key_pattern);
- cache->set(index + kArrayOffset, value_array);
- }
- }
- // If the array is a reasonably short list of substrings, convert it into a
- // list of internalized strings.
- if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
- for (int i = 0; i < value_array->length(); i++) {
- String* str = String::cast(value_array->get(i));
- Object* internalized_str;
- MaybeObject* maybe_string = heap->InternalizeString(str);
- if (maybe_string->ToObject(&internalized_str)) {
- value_array->set(i, internalized_str);
- }
- }
- }
- // Convert backing store to a copy-on-write array.
- value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
-}
-
-
-void RegExpResultsCache::Clear(FixedArray* cache) {
- for (int i = 0; i < kRegExpResultsCacheSize; i++) {
- cache->set(i, Smi::FromInt(0));
- }
-}
-
-
-MaybeObject* Heap::AllocateInitialNumberStringCache() {
- MaybeObject* maybe_obj =
- AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
- return maybe_obj;
-}
-
-
-int Heap::FullSizeNumberStringCacheLength() {
- // Compute the size of the number string cache based on the max newspace size.
- // The number string cache has a minimum size based on twice the initial cache
- // size to ensure that it is bigger after being made 'full size'.
- int number_string_cache_size = max_semispace_size_ / 512;
- number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
- Min(0x4000, number_string_cache_size));
- // There is a string and a number per entry so the length is twice the number
- // of entries.
- return number_string_cache_size * 2;
-}
-
-
-void Heap::AllocateFullSizeNumberStringCache() {
- // The idea is to have a small number string cache in the snapshot to keep
- // boot-time memory usage down. If we expand the number string cache already
- // while creating the snapshot then that didn't work out.
- ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
- MaybeObject* maybe_obj =
- AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
- Object* new_cache;
- if (maybe_obj->ToObject(&new_cache)) {
- // We don't bother to repopulate the cache with entries from the old cache.
- // It will be repopulated soon enough with new strings.
- set_number_string_cache(FixedArray::cast(new_cache));
- }
- // If allocation fails then we just return without doing anything. It is only
- // a cache, so best effort is OK here.
-}
-
-
-void Heap::FlushNumberStringCache() {
- // Flush the number to string cache.
- int len = number_string_cache()->length();
- for (int i = 0; i < len; i++) {
- number_string_cache()->set_undefined(this, i);
- }
-}
-
-
-static inline int double_get_hash(double d) {
- DoubleRepresentation rep(d);
- return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
-}
-
-
-static inline int smi_get_hash(Smi* smi) {
- return smi->value();
-}
-
-
-Object* Heap::GetNumberStringCache(Object* number) {
- int hash;
- int mask = (number_string_cache()->length() >> 1) - 1;
- if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number)) & mask;
- } else {
- hash = double_get_hash(number->Number()) & mask;
- }
- Object* key = number_string_cache()->get(hash * 2);
- if (key == number) {
- return String::cast(number_string_cache()->get(hash * 2 + 1));
- } else if (key->IsHeapNumber() &&
- number->IsHeapNumber() &&
- key->Number() == number->Number()) {
- return String::cast(number_string_cache()->get(hash * 2 + 1));
- }
- return undefined_value();
-}
-
-
-void Heap::SetNumberStringCache(Object* number, String* string) {
- int hash;
- int mask = (number_string_cache()->length() >> 1) - 1;
- if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number)) & mask;
- } else {
- hash = double_get_hash(number->Number()) & mask;
- }
- if (number_string_cache()->get(hash * 2) != undefined_value() &&
- number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
- // The first time we have a hash collision, we move to the full sized
- // number string cache.
- AllocateFullSizeNumberStringCache();
- return;
- }
- number_string_cache()->set(hash * 2, number);
- number_string_cache()->set(hash * 2 + 1, string);
-}
-
-
-MaybeObject* Heap::NumberToString(Object* number,
- bool check_number_string_cache) {
- isolate_->counters()->number_to_string_runtime()->Increment();
- if (check_number_string_cache) {
- Object* cached = GetNumberStringCache(number);
- if (cached != undefined_value()) {
- return cached;
- }
- }
-
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str;
- if (number->IsSmi()) {
- int num = Smi::cast(number)->value();
- str = IntToCString(num, buffer);
- } else {
- double num = HeapNumber::cast(number)->value();
- str = DoubleToCString(num, buffer);
- }
-
- Object* js_string;
- MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
- if (maybe_js_string->ToObject(&js_string)) {
- SetNumberStringCache(number, String::cast(js_string));
- }
- return maybe_js_string;
-}
-
-
-MaybeObject* Heap::Uint32ToString(uint32_t value,
- bool check_number_string_cache) {
- Object* number;
- MaybeObject* maybe = NumberFromUint32(value);
- if (!maybe->To<Object>(&number)) return maybe;
- return NumberToString(number, check_number_string_cache);
-}
-
-
-Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
- return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
-}
-
-
-Heap::RootListIndex Heap::RootIndexForExternalArrayType(
- ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- return kExternalByteArrayMapRootIndex;
- case kExternalUnsignedByteArray:
- return kExternalUnsignedByteArrayMapRootIndex;
- case kExternalShortArray:
- return kExternalShortArrayMapRootIndex;
- case kExternalUnsignedShortArray:
- return kExternalUnsignedShortArrayMapRootIndex;
- case kExternalIntArray:
- return kExternalIntArrayMapRootIndex;
- case kExternalUnsignedIntArray:
- return kExternalUnsignedIntArrayMapRootIndex;
- case kExternalFloatArray:
- return kExternalFloatArrayMapRootIndex;
- case kExternalDoubleArray:
- return kExternalDoubleArrayMapRootIndex;
- case kExternalPixelArray:
- return kExternalPixelArrayMapRootIndex;
- default:
- UNREACHABLE();
- return kUndefinedValueRootIndex;
- }
-}
-
-
-MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
- // We need to distinguish the minus zero value and this cannot be
- // done after conversion to int. Doing this by comparing bit
- // patterns is faster than using fpclassify() et al.
- static const DoubleRepresentation minus_zero(-0.0);
-
- DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
- return AllocateHeapNumber(-0.0, pretenure);
- }
-
- int int_value = FastD2I(value);
- if (value == int_value && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
- }
-
- // Materialize the value in the heap.
- return AllocateHeapNumber(value, pretenure);
-}
-
-
-MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate foreigns in paged spaces.
- STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- Foreign* result;
- MaybeObject* maybe_result = Allocate(foreign_map(), space);
- if (!maybe_result->To(&result)) return maybe_result;
- result->set_foreign_address(address);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
- SharedFunctionInfo* share;
- MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
- if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
-
- // Set pointer fields.
- share->set_name(name);
- Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
- share->set_code(illegal);
- share->ClearOptimizedCodeMap();
- share->set_scope_info(ScopeInfo::Empty(isolate_));
- Code* construct_stub =
- isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
- share->set_construct_stub(construct_stub);
- share->set_instance_class_name(Object_string());
- share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
- share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_ast_node_count(0);
- share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
- share->set_counters(0);
-
- // Set integer fields (smi or int, depending on the architecture).
- share->set_length(0);
- share->set_formal_parameter_count(0);
- share->set_expected_nof_properties(0);
- share->set_num_literals(0);
- share->set_start_position_and_type(0);
- share->set_end_position(0);
- share->set_function_token_position(0);
- // All compiler hints default to false or 0.
- share->set_compiler_hints(0);
- share->set_this_property_assignments_count(0);
- share->set_opt_count(0);
-
- return share;
-}
-
-
-MaybeObject* Heap::AllocateJSMessageObject(String* type,
- JSArray* arguments,
- int start_position,
- int end_position,
- Object* script,
- Object* stack_trace,
- Object* stack_frames) {
- Object* result;
- { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSMessageObject* message = JSMessageObject::cast(result);
- message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
- message->initialize_elements();
- message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
- message->set_type(type);
- message->set_arguments(arguments);
- message->set_start_position(start_position);
- message->set_end_position(end_position);
- message->set_script(script);
- message->set_stack_trace(stack_trace);
- message->set_stack_frames(stack_frames);
- return result;
-}
-
-
-
-// Returns true for a character in a range. Both limits are inclusive.
-static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
- // This makes uses of the the unsigned wraparound.
- return character - from <= to - from;
-}
-
-
-MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
- Heap* heap,
- uint16_t c1,
- uint16_t c2) {
- String* result;
- // Numeric strings have a different hash algorithm not known by
- // LookupTwoCharsStringIfExists, so we skip this step for such strings.
- if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
- heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
- return result;
- // Now we know the length is 2, we might as well make use of that fact
- // when building the new string.
- } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
- // We can do this.
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- dest[0] = static_cast<uint8_t>(c1);
- dest[1] = static_cast<uint8_t>(c2);
- return result;
- } else {
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- dest[0] = c1;
- dest[1] = c2;
- return result;
- }
-}
-
-
-MaybeObject* Heap::AllocateConsString(String* first, String* second) {
- int first_length = first->length();
- if (first_length == 0) {
- return second;
- }
-
- int second_length = second->length();
- if (second_length == 0) {
- return first;
- }
-
- int length = first_length + second_length;
-
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the string
- // table to prevent creation of many unneccesary strings.
- if (length == 2) {
- uint16_t c1 = first->Get(0);
- uint16_t c2 = second->Get(0);
- return MakeOrFindTwoCharacterString(this, c1, c2);
- }
-
- bool first_is_one_byte = first->IsOneByteRepresentation();
- bool second_is_one_byte = second->IsOneByteRepresentation();
- bool is_one_byte = first_is_one_byte && second_is_one_byte;
- // Make sure that an out of memory exception is thrown if the length
- // of the new cons string is too large.
- if (length > String::kMaxLength || length < 0) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x4);
- }
-
- bool is_ascii_data_in_two_byte_string = false;
- if (!is_one_byte) {
- // At least one of the strings uses two-byte representation so we
- // can't use the fast case code for short ASCII strings below, but
- // we can try to save memory if all chars actually fit in ASCII.
- is_ascii_data_in_two_byte_string =
- first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
- if (is_ascii_data_in_two_byte_string) {
- isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
- }
- }
-
- // If the resulting string is small make a flat string.
- if (length < ConsString::kMinLength) {
- // Note that neither of the two inputs can be a slice because:
- STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
- ASSERT(first->IsFlat());
- ASSERT(second->IsFlat());
- if (is_one_byte) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawOneByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- // Copy first part.
- const uint8_t* src;
- if (first->IsExternalString()) {
- src = ExternalAsciiString::cast(first)->GetChars();
- } else {
- src = SeqOneByteString::cast(first)->GetChars();
- }
- for (int i = 0; i < first_length; i++) *dest++ = src[i];
- // Copy second part.
- if (second->IsExternalString()) {
- src = ExternalAsciiString::cast(second)->GetChars();
- } else {
- src = SeqOneByteString::cast(second)->GetChars();
- }
- for (int i = 0; i < second_length; i++) *dest++ = src[i];
- return result;
- } else {
- if (is_ascii_data_in_two_byte_string) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawOneByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
- isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
- return result;
- }
-
- Object* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
- return result;
- }
- }
-
- Map* map = (is_one_byte || is_ascii_data_in_two_byte_string) ?
- cons_ascii_string_map() : cons_string_map();
-
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- AssertNoAllocation no_gc;
- ConsString* cons_string = ConsString::cast(result);
- WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
- cons_string->set_length(length);
- cons_string->set_hash_field(String::kEmptyHashField);
- cons_string->set_first(first, mode);
- cons_string->set_second(second, mode);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateSubString(String* buffer,
- int start,
- int end,
- PretenureFlag pretenure) {
- int length = end - start;
- if (length <= 0) {
- return empty_string();
- } else if (length == 1) {
- return LookupSingleCharacterStringFromCode(buffer->Get(start));
- } else if (length == 2) {
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the string
- // table to prevent creation of many unnecessary strings.
- uint16_t c1 = buffer->Get(start);
- uint16_t c2 = buffer->Get(start + 1);
- return MakeOrFindTwoCharacterString(this, c1, c2);
- }
-
- // Make an attempt to flatten the buffer to reduce access time.
- buffer = buffer->TryFlattenGetString();
-
- if (!FLAG_string_slices ||
- !buffer->IsFlat() ||
- length < SlicedString::kMinLength ||
- pretenure == TENURED) {
- Object* result;
- // WriteToFlat takes care of the case when an indirect string has a
- // different encoding from its underlying string. These encodings may
- // differ because of externalization.
- bool is_one_byte = buffer->IsOneByteRepresentation();
- { MaybeObject* maybe_result = is_one_byte
- ? AllocateRawOneByteString(length, pretenure)
- : AllocateRawTwoByteString(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- String* string_result = String::cast(result);
- // Copy the characters into the new object.
- if (is_one_byte) {
- ASSERT(string_result->IsOneByteRepresentation());
- uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
- String::WriteToFlat(buffer, dest, start, end);
- } else {
- ASSERT(string_result->IsTwoByteRepresentation());
- uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
- String::WriteToFlat(buffer, dest, start, end);
- }
- return result;
- }
-
- ASSERT(buffer->IsFlat());
-#if VERIFY_HEAP
- if (FLAG_verify_heap) {
- buffer->StringVerify();
- }
-#endif
-
- Object* result;
- // When slicing an indirect string we use its encoding for a newly created
- // slice and don't check the encoding of the underlying string. This is safe
- // even if the encodings are different because of externalization. If an
- // indirect ASCII string is pointing to a two-byte string, the two-byte char
- // codes of the underlying string must still fit into ASCII (because
- // externalization must not change char codes).
- { Map* map = buffer->IsOneByteRepresentation()
- ? sliced_ascii_string_map()
- : sliced_string_map();
- MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- AssertNoAllocation no_gc;
- SlicedString* sliced_string = SlicedString::cast(result);
- sliced_string->set_length(length);
- sliced_string->set_hash_field(String::kEmptyHashField);
- if (buffer->IsConsString()) {
- ConsString* cons = ConsString::cast(buffer);
- ASSERT(cons->second()->length() == 0);
- sliced_string->set_parent(cons->first());
- sliced_string->set_offset(start);
- } else if (buffer->IsSlicedString()) {
- // Prevent nesting sliced strings.
- SlicedString* parent_slice = SlicedString::cast(buffer);
- sliced_string->set_parent(parent_slice->parent());
- sliced_string->set_offset(start + parent_slice->offset());
- } else {
- sliced_string->set_parent(buffer);
- sliced_string->set_offset(start);
- }
- ASSERT(sliced_string->parent()->IsSeqString() ||
- sliced_string->parent()->IsExternalString());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource) {
- size_t length = resource->length();
- if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x5);
- }
-
-#ifndef ENABLE_LATIN_1
- ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
-#endif // ENABLE_LATIN_1
-
- Map* map = external_ascii_string_map();
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
- external_string->set_length(static_cast<int>(length));
- external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
-
- return result;
-}
-
-
-MaybeObject* Heap::AllocateExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource) {
- size_t length = resource->length();
- if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x6);
- }
-
- // For small strings we check whether the resource contains only
- // one byte characters. If yes, we use a different string map.
- static const size_t kAsciiCheckLengthLimit = 32;
- bool is_one_byte = length <= kAsciiCheckLengthLimit &&
- String::IsOneByte(resource->data(), static_cast<int>(length));
- Map* map = is_one_byte ?
- external_string_with_ascii_data_map() : external_string_map();
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
- external_string->set_length(static_cast<int>(length));
- external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
-
- return result;
-}
-
-
-MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
- if (code <= String::kMaxOneByteCharCode) {
- Object* value = single_character_string_cache()->get(code);
- if (value != undefined_value()) return value;
-
- uint8_t buffer[1];
- buffer[0] = static_cast<uint8_t>(code);
- Object* result;
- MaybeObject* maybe_result =
- InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
-
- if (!maybe_result->ToObject(&result)) return maybe_result;
- single_character_string_cache()->set(code, result);
- return result;
- }
-
- Object* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- String* answer = String::cast(result);
- answer->Set(0, code);
- return answer;
-}
-
-
-MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException(0x7);
- }
- if (pretenure == NOT_TENURED) {
- return AllocateByteArray(length);
- }
- int size = ByteArray::SizeFor(length);
- Object* result;
- { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
- ? old_data_space_->AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
- byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateByteArray(int length) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException(0x8);
- }
- int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
- byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-void Heap::CreateFillerObjectAt(Address addr, int size) {
- if (size == 0) return;
- HeapObject* filler = HeapObject::FromAddress(addr);
- if (size == kPointerSize) {
- filler->set_map_no_write_barrier(one_pointer_filler_map());
- } else if (size == 2 * kPointerSize) {
- filler->set_map_no_write_barrier(two_pointer_filler_map());
- } else {
- filler->set_map_no_write_barrier(free_space_map());
- FreeSpace::cast(filler)->set_size(size);
- }
-}
-
-
-MaybeObject* Heap::AllocateExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
- space,
- OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
- MapForExternalArrayType(array_type));
- reinterpret_cast<ExternalArray*>(result)->set_length(length);
- reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
- external_pointer);
-
- return result;
-}
-
-
-MaybeObject* Heap::CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
- ByteArray* reloc_info;
- MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
- if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
-
- // Compute size.
- int body_size = RoundUp(desc.instr_size, kObjectAlignment);
- int obj_size = Code::SizeFor(body_size);
- ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
- MaybeObject* maybe_result;
- // Large code objects and code objects which should stay at a fixed address
- // are allocated in large object space.
- HeapObject* result;
- bool force_lo_space = obj_size > code_space()->AreaSize();
- if (force_lo_space) {
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
- } else {
- maybe_result = code_space_->AllocateRaw(obj_size);
- }
- if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
-
- if (immovable && !force_lo_space &&
- // Objects on the first page of each space are never moved.
- !code_space_->FirstPage()->Contains(result->address())) {
- // Discard the first code allocation, which was on a page where it could be
- // moved.
- CreateFillerObjectAt(result->address(), obj_size);
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
- if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
- }
-
- // Initialize the object
- result->set_map_no_write_barrier(code_map());
- Code* code = Code::cast(result);
- ASSERT(!isolate_->code_range()->exists() ||
- isolate_->code_range()->contains(code->address()));
- code->set_instruction_size(desc.instr_size);
- code->set_relocation_info(reloc_info);
- code->set_flags(flags);
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
- code->set_check_type(RECEIVER_MAP_CHECK);
- }
- code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
- code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_gc_metadata(Smi::FromInt(0));
- code->set_ic_age(global_ic_age_);
- code->set_prologue_offset(kPrologueOffsetNotSet);
- if (code->kind() == Code::OPTIMIZED_FUNCTION) {
- code->set_marked_for_deoptimization(false);
- }
- // Allow self references to created code object by patching the handle to
- // point to the newly allocated Code object.
- if (!self_reference.is_null()) {
- *(self_reference.location()) = code;
- }
- // Migrate generated code.
- // The generated code can contain Object** values (typically from handles)
- // that are dereferenced during the copy to point directly to the actual heap
- // objects. These pointers can include references to the code object itself,
- // through the self_reference parameter.
- code->CopyFrom(desc);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- code->Verify();
- }
-#endif
- return code;
-}
-
-
-MaybeObject* Heap::CopyCode(Code* code) {
- // Allocate an object the same size as the code object.
- int obj_size = code->Size();
- MaybeObject* maybe_result;
- if (obj_size > code_space()->AreaSize()) {
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
- } else {
- maybe_result = code_space_->AllocateRaw(obj_size);
- }
-
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
-
- // Copy code object.
- Address old_addr = code->address();
- Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
- CopyBlock(new_addr, old_addr, obj_size);
- // Relocate the copy.
- Code* new_code = Code::cast(result);
- ASSERT(!isolate_->code_range()->exists() ||
- isolate_->code_range()->contains(code->address()));
- new_code->Relocate(new_addr - old_addr);
- return new_code;
-}
-
-
-MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
- Object* reloc_info_array;
- { MaybeObject* maybe_reloc_info_array =
- AllocateByteArray(reloc_info.length(), TENURED);
- if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
- return maybe_reloc_info_array;
- }
- }
-
- int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
-
- int new_obj_size = Code::SizeFor(new_body_size);
-
- Address old_addr = code->address();
-
- size_t relocation_offset =
- static_cast<size_t>(code->instruction_end() - old_addr);
-
- MaybeObject* maybe_result;
- if (new_obj_size > code_space()->AreaSize()) {
- maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
- } else {
- maybe_result = code_space_->AllocateRaw(new_obj_size);
- }
-
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
-
- // Copy code object.
- Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
-
- // Copy header and instructions.
- memcpy(new_addr, old_addr, relocation_offset);
-
- Code* new_code = Code::cast(result);
- new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
-
- // Copy patched rinfo.
- memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
-
- // Relocate the copy.
- ASSERT(!isolate_->code_range()->exists() ||
- isolate_->code_range()->contains(code->address()));
- new_code->Relocate(new_addr - old_addr);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- code->Verify();
- }
-#endif
- return new_code;
-}
-
-
-MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
- Handle<Object> allocation_site_info_payload) {
- ASSERT(gc_state_ == NOT_IN_GC);
- ASSERT(map->instance_type() != MAP_TYPE);
- // If allocation failures are disallowed, we may allocate in a different
- // space when new space is full and the object is not a large object.
- AllocationSpace retry_space =
- (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
- int size = map->instance_size() + AllocationSiteInfo::kSize;
- Object* result;
- MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // No need for write barrier since object is white and map is in old space.
- HeapObject::cast(result)->set_map_no_write_barrier(map);
- AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
- reinterpret_cast<Address>(result) + map->instance_size());
- alloc_info->set_map_no_write_barrier(allocation_site_info_map());
- alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
- return result;
-}
-
-
-MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
- ASSERT(gc_state_ == NOT_IN_GC);
- ASSERT(map->instance_type() != MAP_TYPE);
- // If allocation failures are disallowed, we may allocate in a different
- // space when new space is full and the object is not a large object.
- AllocationSpace retry_space =
- (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
- int size = map->instance_size();
- Object* result;
- MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // No need for write barrier since object is white and map is in old space.
- HeapObject::cast(result)->set_map_no_write_barrier(map);
- return result;
-}
-
-
-void Heap::InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype) {
- ASSERT(!prototype->IsMap());
- function->initialize_properties();
- function->initialize_elements();
- function->set_shared(shared);
- function->set_code(shared->code());
- function->set_prototype_or_initial_map(prototype);
- function->set_context(undefined_value());
- function->set_literals_or_bindings(empty_fixed_array());
- function->set_next_function_link(undefined_value());
-}
-
-
-MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
- // Allocate the prototype. Make sure to use the object function
- // from the function's context, since the function can be from a
- // different context.
- JSFunction* object_function =
- function->context()->native_context()->object_function();
-
- // Each function prototype gets a copy of the object function map.
- // This avoid unwanted sharing of maps between prototypes of different
- // constructors.
- Map* new_map;
- ASSERT(object_function->has_initial_map());
- MaybeObject* maybe_map = object_function->initial_map()->Copy();
- if (!maybe_map->To(&new_map)) return maybe_map;
-
- Object* prototype;
- MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
-
- // When creating the prototype for the function we must set its
- // constructor to the function.
- MaybeObject* maybe_failure =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
- constructor_string(), function, DONT_ENUM);
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- return prototype;
-}
-
-
-MaybeObject* Heap::AllocateFunction(Map* function_map,
- SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure) {
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = Allocate(function_map, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- InitializeFunction(JSFunction::cast(result), shared, prototype);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
- // To get fast allocation and map sharing for arguments objects we
- // allocate them based on an arguments boilerplate.
-
- JSObject* boilerplate;
- int arguments_object_size;
- bool strict_mode_callee = callee->IsJSFunction() &&
- !JSFunction::cast(callee)->shared()->is_classic_mode();
- if (strict_mode_callee) {
- boilerplate =
- isolate()->context()->native_context()->
- strict_mode_arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSizeStrict;
- } else {
- boilerplate =
- isolate()->context()->native_context()->arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSize;
- }
-
- // This calls Copy directly rather than using Heap::AllocateRaw so we
- // duplicate the check here.
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
-
- // Check that the size of the boilerplate matches our
- // expectations. The ArgumentsAccessStub::GenerateNewObject relies
- // on the size being a known constant.
- ASSERT(arguments_object_size == boilerplate->map()->instance_size());
-
- // Do the allocation.
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Copy the content. The arguments boilerplate doesn't have any
- // fields that point to new space so it's safe to skip the write
- // barrier here.
- CopyBlock(HeapObject::cast(result)->address(),
- boilerplate->address(),
- JSObject::kHeaderSize);
-
- // Set the length property.
- JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
- Smi::FromInt(length),
- SKIP_WRITE_BARRIER);
- // Set the callee property for non-strict mode arguments object only.
- if (!strict_mode_callee) {
- JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
- callee);
- }
-
- // Check the state of the object
- ASSERT(JSObject::cast(result)->HasFastProperties());
- ASSERT(JSObject::cast(result)->HasFastObjectElements());
-
- return result;
-}
-
-
-static bool HasDuplicates(DescriptorArray* descriptors) {
- int count = descriptors->number_of_descriptors();
- if (count > 1) {
- String* prev_key = descriptors->GetKey(0);
- for (int i = 1; i != count; i++) {
- String* current_key = descriptors->GetKey(i);
- if (prev_key == current_key) return true;
- prev_key = current_key;
- }
- }
- return false;
-}
-
-
-MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
- ASSERT(!fun->has_initial_map());
-
- // First create a new map with the size and number of in-object properties
- // suggested by the function.
- int instance_size = fun->shared()->CalculateInstanceSize();
- int in_object_properties = fun->shared()->CalculateInObjectProperties();
- Map* map;
- MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
- if (!maybe_map->To(&map)) return maybe_map;
-
- // Fetch or allocate prototype.
- Object* prototype;
- if (fun->has_instance_prototype()) {
- prototype = fun->instance_prototype();
- } else {
- MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
- if (!maybe_prototype->To(&prototype)) return maybe_prototype;
- }
- map->set_inobject_properties(in_object_properties);
- map->set_unused_property_fields(in_object_properties);
- map->set_prototype(prototype);
- ASSERT(map->has_fast_object_elements());
-
- // If the function has only simple this property assignments add
- // field descriptors for these to the initial map as the object
- // cannot be constructed without having these properties. Guard by
- // the inline_new flag so we only change the map if we generate a
- // specialized construct stub.
- ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
- if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
- int count = fun->shared()->this_property_assignments_count();
- if (count > in_object_properties) {
- // Inline constructor can only handle inobject properties.
- fun->shared()->ForbidInlineConstructor();
- } else {
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
- if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
-
- DescriptorArray::WhitenessWitness witness(descriptors);
- for (int i = 0; i < count; i++) {
- String* name = fun->shared()->GetThisPropertyAssignmentName(i);
- ASSERT(name->IsInternalizedString());
- FieldDescriptor field(name, i, NONE, i + 1);
- descriptors->Set(i, &field, witness);
- }
- descriptors->Sort();
-
- // The descriptors may contain duplicates because the compiler does not
- // guarantee the uniqueness of property names (it would have required
- // quadratic time). Once the descriptors are sorted we can check for
- // duplicates in linear time.
- if (HasDuplicates(descriptors)) {
- fun->shared()->ForbidInlineConstructor();
- } else {
- map->InitializeDescriptors(descriptors);
- map->set_pre_allocated_property_fields(count);
- map->set_unused_property_fields(in_object_properties - count);
- }
- }
- }
-
- fun->shared()->StartInobjectSlackTracking(map);
-
- return map;
-}
-
-
-void Heap::InitializeJSObjectFromMap(JSObject* obj,
- FixedArray* properties,
- Map* map) {
- obj->set_properties(properties);
- obj->initialize_elements();
- // TODO(1240798): Initialize the object's body using valid initial values
- // according to the object's initial map. For example, if the map's
- // instance type is JS_ARRAY_TYPE, the length field should be initialized
- // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
- // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
- // verification code has to cope with (temporarily) invalid objects. See
- // for example, JSArray::JSArrayVerify).
- Object* filler;
- // We cannot always fill with one_pointer_filler_map because objects
- // created from API functions expect their internal fields to be initialized
- // with undefined_value.
- // Pre-allocated fields need to be initialized with undefined_value as well
- // so that object accesses before the constructor completes (e.g. in the
- // debugger) will not cause a crash.
- if (map->constructor()->IsJSFunction() &&
- JSFunction::cast(map->constructor())->shared()->
- IsInobjectSlackTrackingInProgress()) {
- // We might want to shrink the object later.
- ASSERT(obj->GetInternalFieldCount() == 0);
- filler = Heap::one_pointer_filler_map();
- } else {
- filler = Heap::undefined_value();
- }
- obj->InitializeBody(map, Heap::undefined_value(), filler);
-}
-
-
-MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
- // JSFunctions should be allocated using AllocateFunction to be
- // properly initialized.
- ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
-
- // Both types of global objects should be allocated using
- // AllocateGlobalObject to be properly initialized.
- ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
-
- // Allocate the backing storage for the properties.
- int prop_size =
- map->pre_allocated_property_fields() +
- map->unused_property_fields() -
- map->inobject_properties();
- ASSERT(prop_size >= 0);
- Object* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
- }
-
- // Allocate the JSObject.
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
- Object* obj;
- MaybeObject* maybe_obj = Allocate(map, space);
- if (!maybe_obj->To(&obj)) return maybe_obj;
-
- // Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj),
- FixedArray::cast(properties),
- map);
- ASSERT(JSObject::cast(obj)->HasFastElements());
- return obj;
-}
-
-
-MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
- Handle<Object> allocation_site_info_payload) {
- // JSFunctions should be allocated using AllocateFunction to be
- // properly initialized.
- ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
-
- // Both types of global objects should be allocated using
- // AllocateGlobalObject to be properly initialized.
- ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
-
- // Allocate the backing storage for the properties.
- int prop_size =
- map->pre_allocated_property_fields() +
- map->unused_property_fields() -
- map->inobject_properties();
- ASSERT(prop_size >= 0);
- Object* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
- }
-
- // Allocate the JSObject.
- AllocationSpace space = NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
- Object* obj;
- MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
- allocation_site_info_payload);
- if (!maybe_obj->To(&obj)) return maybe_obj;
-
- // Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj),
- FixedArray::cast(properties),
- map);
- ASSERT(JSObject::cast(obj)->HasFastElements());
- return obj;
-}
-
-
-MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure) {
- // Allocate the initial map if absent.
- if (!constructor->has_initial_map()) {
- Object* initial_map;
- { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
- if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
- }
- constructor->set_initial_map(Map::cast(initial_map));
- Map::cast(initial_map)->set_constructor(constructor);
- }
- // Allocate the object based on the constructors initial map.
- MaybeObject* result = AllocateJSObjectFromMap(
- constructor->initial_map(), pretenure);
-#ifdef DEBUG
- // Make sure result is NOT a global object if valid.
- Object* non_failure;
- ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
-#endif
- return result;
-}
-
-
-MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
- Handle<Object> allocation_site_info_payload) {
- // Allocate the initial map if absent.
- if (!constructor->has_initial_map()) {
- Object* initial_map;
- { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
- if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
- }
- constructor->set_initial_map(Map::cast(initial_map));
- Map::cast(initial_map)->set_constructor(constructor);
- }
- // Allocate the object based on the constructors initial map, or the payload
- // advice
- Map* initial_map = constructor->initial_map();
-
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
- *allocation_site_info_payload);
- Smi* smi = Smi::cast(cell->value());
- ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
- AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
- if (to_kind != initial_map->elements_kind()) {
- MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap(
- isolate(), to_kind);
- if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
- // Possibly alter the mode, since we found an updated elements kind
- // in the type info cell.
- mode = AllocationSiteInfo::GetMode(to_kind);
- }
-
- MaybeObject* result;
- if (mode == TRACK_ALLOCATION_SITE) {
- result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
- allocation_site_info_payload);
- } else {
- result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
- }
-#ifdef DEBUG
- // Make sure result is NOT a global object if valid.
- Object* non_failure;
- ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
-#endif
- return result;
-}
-
-
-MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
- // Allocate a fresh map. Modules do not have a prototype.
- Map* map;
- MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
- if (!maybe_map->To(&map)) return maybe_map;
- // Allocate the object based on the map.
- JSModule* module;
- MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
- if (!maybe_module->To(&module)) return maybe_module;
- module->set_context(context);
- module->set_scope_info(scope_info);
- return module;
-}
-
-
-MaybeObject* Heap::AllocateJSArrayAndStorage(
- ElementsKind elements_kind,
- int length,
- int capacity,
- ArrayStorageAllocationMode mode,
- PretenureFlag pretenure) {
- MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
- JSArray* array;
- if (!maybe_array->To(&array)) return maybe_array;
-
- // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
- // for performance reasons.
- ASSERT(capacity >= length);
-
- if (capacity == 0) {
- array->set_length(Smi::FromInt(0));
- array->set_elements(empty_fixed_array());
- return array;
- }
-
- FixedArrayBase* elms;
- MaybeObject* maybe_elms = NULL;
- if (IsFastDoubleElementsKind(elements_kind)) {
- if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
- } else {
- ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
- }
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
- if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- maybe_elms = AllocateUninitializedFixedArray(capacity);
- } else {
- ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- maybe_elms = AllocateFixedArrayWithHoles(capacity);
- }
- }
- if (!maybe_elms->To(&elms)) return maybe_elms;
-
- array->set_elements(elms);
- array->set_length(Smi::FromInt(length));
- return array;
-}
-
-
-MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
- ElementsKind elements_kind,
- int length,
- int capacity,
- Handle<Object> allocation_site_payload,
- ArrayStorageAllocationMode mode) {
- MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
- allocation_site_payload);
- JSArray* array;
- if (!maybe_array->To(&array)) return maybe_array;
- return AllocateJSArrayStorage(array, length, capacity, mode);
-}
-
-
-MaybeObject* Heap::AllocateJSArrayStorage(
- JSArray* array,
- int length,
- int capacity,
- ArrayStorageAllocationMode mode) {
- ASSERT(capacity >= length);
-
- if (capacity == 0) {
- array->set_length(Smi::FromInt(0));
- array->set_elements(empty_fixed_array());
- return array;
- }
-
- FixedArrayBase* elms;
- MaybeObject* maybe_elms = NULL;
- ElementsKind elements_kind = array->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
- if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
- } else {
- ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
- }
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
- if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- maybe_elms = AllocateUninitializedFixedArray(capacity);
- } else {
- ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- maybe_elms = AllocateFixedArrayWithHoles(capacity);
- }
- }
- if (!maybe_elms->To(&elms)) return maybe_elms;
-
- array->set_elements(elms);
- array->set_length(Smi::FromInt(length));
- return array;
-}
-
-
-MaybeObject* Heap::AllocateJSArrayWithElements(
- FixedArrayBase* elements,
- ElementsKind elements_kind,
- int length,
- PretenureFlag pretenure) {
- MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
- JSArray* array;
- if (!maybe_array->To(&array)) return maybe_array;
-
- array->set_elements(elements);
- array->set_length(Smi::FromInt(length));
- array->ValidateElements();
- return array;
-}
-
-
-MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
- // Allocate map.
- // TODO(rossberg): Once we optimize proxies, think about a scheme to share
- // maps. Will probably depend on the identity of the handler object, too.
- Map* map;
- MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
- if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
- map->set_prototype(prototype);
-
- // Allocate the proxy object.
- JSProxy* result;
- MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
- result->InitializeBody(map->instance_size(), Smi::FromInt(0));
- result->set_handler(handler);
- result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
- Object* call_trap,
- Object* construct_trap,
- Object* prototype) {
- // Allocate map.
- // TODO(rossberg): Once we optimize proxies, think about a scheme to share
- // maps. Will probably depend on the identity of the handler object, too.
- Map* map;
- MaybeObject* maybe_map_obj =
- AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
- if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
- map->set_prototype(prototype);
-
- // Allocate the proxy object.
- JSFunctionProxy* result;
- MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
- result->InitializeBody(map->instance_size(), Smi::FromInt(0));
- result->set_handler(handler);
- result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
- result->set_call_trap(call_trap);
- result->set_construct_trap(construct_trap);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
- ASSERT(constructor->has_initial_map());
- Map* map = constructor->initial_map();
- ASSERT(map->is_dictionary_map());
-
- // Make sure no field properties are described in the initial map.
- // This guarantees us that normalizing the properties does not
- // require us to change property values to JSGlobalPropertyCells.
- ASSERT(map->NextFreePropertyIndex() == 0);
-
- // Make sure we don't have a ton of pre-allocated slots in the
- // global objects. They will be unused once we normalize the object.
- ASSERT(map->unused_property_fields() == 0);
- ASSERT(map->inobject_properties() == 0);
-
- // Initial size of the backing store to avoid resize of the storage during
- // bootstrapping. The size differs between the JS global object ad the
- // builtins object.
- int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
-
- // Allocate a dictionary object for backing storage.
- StringDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- StringDictionary::Allocate(
- map->NumberOfOwnDescriptors() * 2 + initial_size);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
-
- // The global object might be created from an object template with accessors.
- // Fill these accessors into the dictionary.
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d = PropertyDetails(details.attributes(),
- CALLBACKS,
- details.descriptor_index());
- Object* value = descs->GetCallbacksObject(i);
- MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
- if (!maybe_value->ToObject(&value)) return maybe_value;
-
- MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_added->To(&dictionary)) return maybe_added;
- }
-
- // Allocate the global object and initialize it with the backing store.
- JSObject* global;
- MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
- if (!maybe_global->To(&global)) return maybe_global;
-
- InitializeJSObjectFromMap(global, dictionary, map);
-
- // Create a new map for the global object.
- Map* new_map;
- MaybeObject* maybe_map = map->CopyDropDescriptors();
- if (!maybe_map->To(&new_map)) return maybe_map;
- new_map->set_dictionary_map(true);
-
- // Set up the global object as a normalized object.
- global->set_map(new_map);
- global->set_properties(dictionary);
-
- // Make sure result is a global object with properties in dictionary.
- ASSERT(global->IsGlobalObject());
- ASSERT(!global->HasFastProperties());
- return global;
-}
-
-
-MaybeObject* Heap::CopyJSObject(JSObject* source) {
- // Never used to copy functions. If functions need to be copied we
- // have to be careful to clear the literals array.
- SLOW_ASSERT(!source->IsJSFunction());
-
- // Make the clone.
- Map* map = source->map();
- int object_size = map->instance_size();
- Object* clone;
-
- WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
-
- // If we're forced to always allocate, we use the general allocation
- // functions which may leave us with an object in old space.
- if (always_allocate()) {
- { MaybeObject* maybe_clone =
- AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- Address clone_address = HeapObject::cast(clone)->address();
- CopyBlock(clone_address,
- source->address(),
- object_size);
- // Update write barrier for all fields that lie beyond the header.
- RecordWrites(clone_address,
- JSObject::kHeaderSize,
- (object_size - JSObject::kHeaderSize) / kPointerSize);
- } else {
- wb_mode = SKIP_WRITE_BARRIER;
-
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- SLOW_ASSERT(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(HeapObject::cast(clone)->address(),
- source->address(),
- object_size);
- }
-
- SLOW_ASSERT(
- JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- FixedArray* properties = FixedArray::cast(source->properties());
- // Update elements if necessary.
- if (elements->length() > 0) {
- Object* elem;
- { MaybeObject* maybe_elem;
- if (elements->map() == fixed_cow_array_map()) {
- maybe_elem = FixedArray::cast(elements);
- } else if (source->HasFastDoubleElements()) {
- maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
- } else {
- maybe_elem = CopyFixedArray(FixedArray::cast(elements));
- }
- if (!maybe_elem->ToObject(&elem)) return maybe_elem;
- }
- JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
- }
- // Update properties if necessary.
- if (properties->length() > 0) {
- Object* prop;
- { MaybeObject* maybe_prop = CopyFixedArray(properties);
- if (!maybe_prop->ToObject(&prop)) return maybe_prop;
- }
- JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
- }
- // Return the new clone.
- return clone;
-}
-
-
-MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
- // Never used to copy functions. If functions need to be copied we
- // have to be careful to clear the literals array.
- SLOW_ASSERT(!source->IsJSFunction());
-
- // Make the clone.
- Map* map = source->map();
- int object_size = map->instance_size();
- Object* clone;
-
- ASSERT(map->CanTrackAllocationSite());
- ASSERT(map->instance_type() == JS_ARRAY_TYPE);
- WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
-
- // If we're forced to always allocate, we use the general allocation
- // functions which may leave us with an object in old space.
- int adjusted_object_size = object_size;
- if (always_allocate()) {
- // We'll only track origin if we are certain to allocate in new space
- const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
- if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
- adjusted_object_size += AllocationSiteInfo::kSize;
- }
-
- { MaybeObject* maybe_clone =
- AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- Address clone_address = HeapObject::cast(clone)->address();
- CopyBlock(clone_address,
- source->address(),
- object_size);
- // Update write barrier for all fields that lie beyond the header.
- int write_barrier_offset = adjusted_object_size > object_size
- ? JSArray::kSize + AllocationSiteInfo::kSize
- : JSObject::kHeaderSize;
- if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
- RecordWrites(clone_address,
- write_barrier_offset,
- (object_size - write_barrier_offset) / kPointerSize);
- }
-
- // Track allocation site information, if we failed to allocate it inline.
- if (InNewSpace(clone) &&
- adjusted_object_size == object_size) {
- MaybeObject* maybe_alloc_info =
- AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
- AllocationSiteInfo* alloc_info;
- if (maybe_alloc_info->To(&alloc_info)) {
- alloc_info->set_map_no_write_barrier(allocation_site_info_map());
- alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
- }
- }
- } else {
- wb_mode = SKIP_WRITE_BARRIER;
- adjusted_object_size += AllocationSiteInfo::kSize;
-
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- SLOW_ASSERT(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(HeapObject::cast(clone)->address(),
- source->address(),
- object_size);
- }
-
- if (adjusted_object_size > object_size) {
- AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
- reinterpret_cast<Address>(clone) + object_size);
- alloc_info->set_map_no_write_barrier(allocation_site_info_map());
- alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
- }
-
- SLOW_ASSERT(
- JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- FixedArray* properties = FixedArray::cast(source->properties());
- // Update elements if necessary.
- if (elements->length() > 0) {
- Object* elem;
- { MaybeObject* maybe_elem;
- if (elements->map() == fixed_cow_array_map()) {
- maybe_elem = FixedArray::cast(elements);
- } else if (source->HasFastDoubleElements()) {
- maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
- } else {
- maybe_elem = CopyFixedArray(FixedArray::cast(elements));
- }
- if (!maybe_elem->ToObject(&elem)) return maybe_elem;
- }
- JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
- }
- // Update properties if necessary.
- if (properties->length() > 0) {
- Object* prop;
- { MaybeObject* maybe_prop = CopyFixedArray(properties);
- if (!maybe_prop->ToObject(&prop)) return maybe_prop;
- }
- JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
- }
- // Return the new clone.
- return clone;
-}
-
-
-MaybeObject* Heap::ReinitializeJSReceiver(
- JSReceiver* object, InstanceType type, int size) {
- ASSERT(type >= FIRST_JS_OBJECT_TYPE);
-
- // Allocate fresh map.
- // TODO(rossberg): Once we optimize proxies, cache these maps.
- Map* map;
- MaybeObject* maybe = AllocateMap(type, size);
- if (!maybe->To<Map>(&map)) return maybe;
-
- // Check that the receiver has at least the size of the fresh object.
- int size_difference = object->map()->instance_size() - map->instance_size();
- ASSERT(size_difference >= 0);
-
- map->set_prototype(object->map()->prototype());
-
- // Allocate the backing storage for the properties.
- int prop_size = map->unused_property_fields() - map->inobject_properties();
- Object* properties;
- maybe = AllocateFixedArray(prop_size, TENURED);
- if (!maybe->ToObject(&properties)) return maybe;
-
- // Functions require some allocation, which might fail here.
- SharedFunctionInfo* shared = NULL;
- if (type == JS_FUNCTION_TYPE) {
- String* name;
- maybe =
- InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
- if (!maybe->To<String>(&name)) return maybe;
- maybe = AllocateSharedFunctionInfo(name);
- if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
- }
-
- // Because of possible retries of this function after failure,
- // we must NOT fail after this point, where we have changed the type!
-
- // Reset the map for the object.
- object->set_map(map);
- JSObject* jsobj = JSObject::cast(object);
-
- // Reinitialize the object from the constructor map.
- InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
-
- // Functions require some minimal initialization.
- if (type == JS_FUNCTION_TYPE) {
- map->set_function_with_prototype(true);
- InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
- JSFunction::cast(object)->set_context(
- isolate()->context()->native_context());
- }
-
- // Put in filler if the new object is smaller than the old.
- if (size_difference > 0) {
- CreateFillerObjectAt(
- object->address() + map->instance_size(), size_difference);
- }
-
- return object;
-}
-
-
-MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
- JSGlobalProxy* object) {
- ASSERT(constructor->has_initial_map());
- Map* map = constructor->initial_map();
-
- // Check that the already allocated object has the same size and type as
- // objects allocated using the constructor.
- ASSERT(map->instance_size() == object->map()->instance_size());
- ASSERT(map->instance_type() == object->map()->instance_type());
-
- // Allocate the backing storage for the properties.
- int prop_size = map->unused_property_fields() - map->inobject_properties();
- Object* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
- }
-
- // Reset the map for the object.
- object->set_map(constructor->initial_map());
-
- // Reinitialize the object from the constructor map.
- InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
- return object;
-}
-
-
-MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
- PretenureFlag pretenure) {
- int length = string.length();
- if (length == 1) {
- return Heap::LookupSingleCharacterStringFromCode(string[0]);
- }
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRawOneByteString(string.length(), pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Copy the characters into the new object.
- CopyChars(SeqOneByteString::cast(result)->GetChars(),
- string.start(),
- length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
- int non_ascii_start,
- PretenureFlag pretenure) {
- // Continue counting the number of characters in the UTF-8 string, starting
- // from the first non-ascii character or word.
- Access<UnicodeCache::Utf8Decoder>
- decoder(isolate_->unicode_cache()->utf8_decoder());
- decoder->Reset(string.start() + non_ascii_start,
- string.length() - non_ascii_start);
- int utf16_length = decoder->Utf16Length();
- ASSERT(utf16_length > 0);
- // Allocate string.
- Object* result;
- {
- int chars = non_ascii_start + utf16_length;
- MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Convert and copy the characters into the new object.
- SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
- // Copy ascii portion.
- uint16_t* data = twobyte->GetChars();
- if (non_ascii_start != 0) {
- const char* ascii_data = string.start();
- for (int i = 0; i < non_ascii_start; i++) {
- *data++ = *ascii_data++;
- }
- }
- // Now write the remainder.
- decoder->WriteUtf16(data, utf16_length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
- PretenureFlag pretenure) {
- // Check if the string is an ASCII string.
- Object* result;
- int length = string.length();
- const uc16* start = string.start();
-
- if (String::IsOneByte(start, length)) {
- MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
- } else { // It's not a one byte string.
- MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
- }
- return result;
-}
-
-
-Map* Heap::InternalizedStringMapForString(String* string) {
- // If the string is in new space it cannot be used as internalized.
- if (InNewSpace(string)) return NULL;
-
- // Find the corresponding internalized string map for strings.
- switch (string->map()->instance_type()) {
- case STRING_TYPE: return internalized_string_map();
- case ASCII_STRING_TYPE: return ascii_internalized_string_map();
- case CONS_STRING_TYPE: return cons_internalized_string_map();
- case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
- case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
- case EXTERNAL_ASCII_STRING_TYPE:
- return external_ascii_internalized_string_map();
- case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- return external_internalized_string_with_ascii_data_map();
- case SHORT_EXTERNAL_STRING_TYPE:
- return short_external_internalized_string_map();
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- return short_external_ascii_internalized_string_map();
- case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- return short_external_internalized_string_with_ascii_data_map();
- default: return NULL; // No match found.
- }
-}
-
-
-static inline void WriteOneByteData(Vector<const char> vector,
- uint8_t* chars,
- int len) {
- // Only works for ascii.
- ASSERT(vector.length() == len);
- memcpy(chars, vector.start(), len);
-}
-
-static inline void WriteTwoByteData(Vector<const char> vector,
- uint16_t* chars,
- int len) {
- const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
- unsigned stream_length = vector.length();
- while (stream_length != 0) {
- unsigned consumed = 0;
- uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
- ASSERT(c != unibrow::Utf8::kBadChar);
- ASSERT(consumed <= stream_length);
- stream_length -= consumed;
- stream += consumed;
- if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- len -= 2;
- if (len < 0) break;
- *chars++ = unibrow::Utf16::LeadSurrogate(c);
- *chars++ = unibrow::Utf16::TrailSurrogate(c);
- } else {
- len -= 1;
- if (len < 0) break;
- *chars++ = c;
- }
- }
- ASSERT(stream_length == 0);
- ASSERT(len == 0);
-}
-
-
-static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
- ASSERT(s->length() == len);
- String::WriteToFlat(s, chars, 0, len);
-}
-
-static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
- ASSERT(s->length() == len);
- String::WriteToFlat(s, chars, 0, len);
-}
-
-
-template<bool is_one_byte, typename T>
-MaybeObject* Heap::AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field) {
- ASSERT(chars >= 0);
- // Compute map and object size.
- int size;
- Map* map;
-
- if (is_one_byte) {
- if (chars > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x9);
- }
- map = ascii_internalized_string_map();
- size = SeqOneByteString::SizeFor(chars);
- } else {
- if (chars > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xa);
- }
- map = internalized_string_map();
- size = SeqTwoByteString::SizeFor(chars);
- }
-
- // Allocate string.
- Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(chars);
- answer->set_hash_field(hash_field);
- SeqString::cast(answer)->set_symbol_id(0);
-
- ASSERT_EQ(size, answer->Size());
-
- if (is_one_byte) {
- WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
- } else {
- WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
- }
- return answer;
-}
-
-
-// Need explicit instantiations.
-template
-MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
-template
-MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
- String*, int, uint32_t);
-template
-MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
- Vector<const char>, int, uint32_t);
-
-
-MaybeObject* Heap::AllocateRawOneByteString(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xb);
- }
-
- int size = SeqOneByteString::SizeFor(length);
- ASSERT(size <= SeqOneByteString::kMaxSize);
-
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (space == NEW_SPACE) {
- if (size > kMaxObjectSizeInNewSpace) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- } else if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in new space, retry in large object space.
- retry_space = LO_SPACE;
- }
- } else if (space == OLD_DATA_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- space = LO_SPACE;
- }
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Partially initialize the object.
- HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
- String::cast(result)->set_length(length);
- String::cast(result)->set_hash_field(String::kEmptyHashField);
- ASSERT_EQ(size, HeapObject::cast(result)->Size());
-
-#ifndef ENABLE_LATIN_1
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- // Initialize string's content to ensure ASCII-ness (character range 0-127)
- // as required when verifying the heap.
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- memset(dest, 0x0F, length * kCharSize);
- }
-#endif
-#endif
-
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawTwoByteString(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xc);
- }
- int size = SeqTwoByteString::SizeFor(length);
- ASSERT(size <= SeqTwoByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (space == NEW_SPACE) {
- if (size > kMaxObjectSizeInNewSpace) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- } else if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in new space, retry in large object space.
- retry_space = LO_SPACE;
- }
- } else if (space == OLD_DATA_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- space = LO_SPACE;
- }
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Partially initialize the object.
- HeapObject::cast(result)->set_map_no_write_barrier(string_map());
- String::cast(result)->set_length(length);
- String::cast(result)->set_hash_field(String::kEmptyHashField);
- ASSERT_EQ(size, HeapObject::cast(result)->Size());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateJSArray(
- ElementsKind elements_kind,
- PretenureFlag pretenure) {
- Context* native_context = isolate()->context()->native_context();
- JSFunction* array_function = native_context->array_function();
- Map* map = array_function->initial_map();
- Object* maybe_map_array = native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(elements_kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- map = Map::cast(maybe_transitioned_map);
- }
- }
-
- return AllocateJSObjectFromMap(map, pretenure);
-}
-
-
-MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<Object> allocation_site_info_payload) {
- Context* native_context = isolate()->context()->native_context();
- JSFunction* array_function = native_context->array_function();
- Map* map = array_function->initial_map();
- Object* maybe_map_array = native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(elements_kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- map = Map::cast(maybe_transitioned_map);
- }
- }
- return AllocateJSObjectFromMapWithAllocationSite(map,
- allocation_site_info_payload);
-}
-
-
-MaybeObject* Heap::AllocateEmptyFixedArray() {
- int size = FixedArray::SizeFor(0);
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Initialize the object.
- reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
- fixed_array_map());
- reinterpret_cast<FixedArray*>(result)->set_length(0);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawFixedArray(int length) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xd);
- }
- ASSERT(length > 0);
- // Use the general function if we're forced to always allocate.
- if (always_allocate()) return AllocateFixedArray(length, TENURED);
- // Allocate the raw data for a fixed array.
- int size = FixedArray::SizeFor(length);
- return size <= kMaxObjectSizeInNewSpace
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
-}
-
-
-MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
- int len = src->length();
- Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- if (InNewSpace(obj)) {
- HeapObject* dst = HeapObject::cast(obj);
- dst->set_map_no_write_barrier(map);
- CopyBlock(dst->address() + kPointerSize,
- src->address() + kPointerSize,
- FixedArray::SizeFor(len) - kPointerSize);
- return obj;
- }
- HeapObject::cast(obj)->set_map_no_write_barrier(map);
- FixedArray* result = FixedArray::cast(obj);
- result->set_length(len);
-
- // Copy the content
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
- return result;
-}
-
-
-MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
- Map* map) {
- int len = src->length();
- Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- HeapObject* dst = HeapObject::cast(obj);
- dst->set_map_no_write_barrier(map);
- CopyBlock(
- dst->address() + FixedDoubleArray::kLengthOffset,
- src->address() + FixedDoubleArray::kLengthOffset,
- FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
- return obj;
-}
-
-
-MaybeObject* Heap::AllocateFixedArray(int length) {
- ASSERT(length >= 0);
- if (length == 0) return empty_fixed_array();
- Object* result;
- { MaybeObject* maybe_result = AllocateRawFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Initialize header.
- FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map_no_write_barrier(fixed_array_map());
- array->set_length(length);
- // Initialize body.
- ASSERT(!InNewSpace(undefined_value()));
- MemsetPointer(array->data_start(), undefined_value(), length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xe);
- }
-
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- int size = FixedArray::SizeFor(length);
- if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
- // Too big for new space.
- space = LO_SPACE;
- } else if (space == OLD_POINTER_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- // Too big for old pointer space.
- space = LO_SPACE;
- }
-
- AllocationSpace retry_space =
- (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
-
- return AllocateRaw(size, space, retry_space);
-}
-
-
-MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
- Heap* heap,
- int length,
- PretenureFlag pretenure,
- Object* filler) {
- ASSERT(length >= 0);
- ASSERT(heap->empty_fixed_array()->IsFixedArray());
- if (length == 0) return heap->empty_fixed_array();
-
- ASSERT(!heap->InNewSpace(filler));
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
- FixedArray* array = FixedArray::cast(result);
- array->set_length(length);
- MemsetPointer(array->data_start(), filler, length);
- return array;
-}
-
-
-MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- undefined_value());
-}
-
-
-MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
- PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- the_hole_value());
-}
-
-
-MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
- if (length == 0) return empty_fixed_array();
-
- Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
- fixed_array_map());
- FixedArray::cast(obj)->set_length(length);
- return obj;
-}
-
-
-MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
- int size = FixedDoubleArray::SizeFor(0);
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Initialize the object.
- reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
- fixed_double_array_map());
- reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
- int length,
- PretenureFlag pretenure) {
- if (length == 0) return empty_fixed_array();
-
- Object* elements_object;
- MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
- if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
- FixedDoubleArray* elements =
- reinterpret_cast<FixedDoubleArray*>(elements_object);
-
- elements->set_map_no_write_barrier(fixed_double_array_map());
- elements->set_length(length);
- return elements;
-}
-
-
-MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
- int length,
- PretenureFlag pretenure) {
- if (length == 0) return empty_fixed_array();
-
- Object* elements_object;
- MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
- if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
- FixedDoubleArray* elements =
- reinterpret_cast<FixedDoubleArray*>(elements_object);
-
- for (int i = 0; i < length; ++i) {
- elements->set_the_hole(i);
- }
-
- elements->set_map_no_write_barrier(fixed_double_array_map());
- elements->set_length(length);
- return elements;
-}
-
-
-MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xf);
- }
-
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- int size = FixedDoubleArray::SizeFor(length);
-
-#ifndef V8_HOST_ARCH_64_BIT
- size += kPointerSize;
-#endif
-
- if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
- // Too big for new space.
- space = LO_SPACE;
- } else if (space == OLD_DATA_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- // Too big for old data space.
- space = LO_SPACE;
- }
-
- AllocationSpace retry_space =
- (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
-
- HeapObject* object;
- { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
- if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
- }
-
- return EnsureDoubleAligned(this, object, size);
-}
-
-
-MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
- hash_table_map());
- ASSERT(result->IsHashTable());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateSymbol(PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate symbols in paged spaces.
- STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- Object* result;
- MaybeObject* maybe = AllocateRaw(Symbol::kSize, space, OLD_DATA_SPACE);
- if (!maybe->ToObject(&result)) return maybe;
-
- HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
-
- // Generate a random hash value.
- int hash;
- int attempts = 0;
- do {
- hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
- attempts++;
- } while (hash == 0 && attempts < 30);
- if (hash == 0) hash = 1; // never return 0
-
- Symbol::cast(result)->set_hash_field(
- Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
-
- ASSERT(result->IsSymbol());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateNativeContext() {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(native_context_map());
- context->set_js_array_maps(undefined_value());
- ASSERT(context->IsNativeContext());
- ASSERT(result->IsContext());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
- ScopeInfo* scope_info) {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArray(scope_info->ContextLength(), TENURED);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(global_context_map());
- context->set_closure(function);
- context->set_previous(function->context());
- context->set_extension(scope_info);
- context->set_global_object(function->context()->global_object());
- ASSERT(context->IsGlobalContext());
- ASSERT(result->IsContext());
- return context;
-}
-
-
-MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArray(scope_info->ContextLength(), TENURED);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(module_context_map());
- // Instance link will be set later.
- context->set_extension(Smi::FromInt(0));
- return context;
-}
-
-
-MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
- ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(function_context_map());
- context->set_closure(function);
- context->set_previous(function->context());
- context->set_extension(Smi::FromInt(0));
- context->set_global_object(function->context()->global_object());
- context->set_qml_global_object(function->context()->qml_global_object());
- return context;
-}
-
-
-MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
- Context* previous,
- String* name,
- Object* thrown_object) {
- STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(catch_context_map());
- context->set_closure(function);
- context->set_previous(previous);
- context->set_extension(name);
- context->set_global_object(previous->global_object());
- context->set_qml_global_object(previous->qml_global_object());
- context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
- return context;
-}
-
-
-MaybeObject* Heap::AllocateWithContext(JSFunction* function,
- Context* previous,
- JSObject* extension) {
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(with_context_map());
- context->set_closure(function);
- context->set_previous(previous);
- context->set_extension(extension);
- context->set_global_object(previous->global_object());
- context->set_qml_global_object(previous->qml_global_object());
- return context;
-}
-
-
-MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
- Context* previous,
- ScopeInfo* scope_info) {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArrayWithHoles(scope_info->ContextLength());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(block_context_map());
- context->set_closure(function);
- context->set_previous(previous);
- context->set_extension(scope_info);
- context->set_global_object(previous->global_object());
- context->set_qml_global_object(previous->qml_global_object());
- return context;
-}
-
-
-MaybeObject* Heap::AllocateScopeInfo(int length) {
- FixedArray* scope_info;
- MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
- if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
- scope_info->set_map_no_write_barrier(scope_info_map());
- return scope_info;
-}
-
-
-MaybeObject* Heap::AllocateExternal(void* value) {
- Foreign* foreign;
- { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
- if (!maybe_result->To(&foreign)) return maybe_result;
- }
- JSObject* external;
- { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
- if (!maybe_result->To(&external)) return maybe_result;
- }
- external->SetInternalField(0, foreign);
- return external;
-}
-
-
-MaybeObject* Heap::AllocateStruct(InstanceType type) {
- Map* map;
- switch (type) {
-#define MAKE_CASE(NAME, Name, name) \
- case NAME##_TYPE: map = name##_map(); break;
-STRUCT_LIST(MAKE_CASE)
-#undef MAKE_CASE
- default:
- UNREACHABLE();
- return Failure::InternalError();
- }
- int size = map->instance_size();
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Struct::cast(result)->InitializeBody(size);
- return result;
-}
-
-
-bool Heap::IsHeapIterable() {
- return (!old_pointer_space()->was_swept_conservatively() &&
- !old_data_space()->was_swept_conservatively());
-}
-
-
-void Heap::EnsureHeapIsIterable() {
- ASSERT(IsAllocationAllowed());
- if (!IsHeapIterable()) {
- CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
- }
- ASSERT(IsHeapIterable());
-}
-
-
-void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- incremental_marking()->Step(step_size,
- IncrementalMarking::NO_GC_VIA_STACK_GUARD);
-
- if (incremental_marking()->IsComplete()) {
- bool uncommit = false;
- if (gc_count_at_last_idle_gc_ == gc_count_) {
- // No GC since the last full GC, the mutator is probably not active.
- isolate_->compilation_cache()->Clear();
- uncommit = true;
- }
- CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
- gc_count_at_last_idle_gc_ = gc_count_;
- if (uncommit) {
- new_space_.Shrink();
- UncommitFromSpace();
- }
- }
-}
-
-
-bool Heap::IdleNotification(int hint) {
- // Hints greater than this value indicate that
- // the embedder is requesting a lot of GC work.
- const int kMaxHint = 1000;
- // Minimal hint that allows to do full GC.
- const int kMinHintForFullGC = 100;
- intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
- // The size factor is in range [5..250]. The numbers here are chosen from
- // experiments. If you changes them, make sure to test with
- // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
- intptr_t step_size =
- size_factor * IncrementalMarking::kAllocatedThreshold;
-
- if (contexts_disposed_ > 0) {
- if (hint >= kMaxHint) {
- // The embedder is requesting a lot of GC work after context disposal,
- // we age inline caches so that they don't keep objects from
- // the old context alive.
- AgeInlineCaches();
- }
- int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
- if (hint >= mark_sweep_time && !FLAG_expose_gc &&
- incremental_marking()->IsStopped()) {
- HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kReduceMemoryFootprintMask,
- "idle notification: contexts disposed");
- } else {
- AdvanceIdleIncrementalMarking(step_size);
- contexts_disposed_ = 0;
- }
- // After context disposal there is likely a lot of garbage remaining, reset
- // the idle notification counters in order to trigger more incremental GCs
- // on subsequent idle notifications.
- StartIdleRound();
- return false;
- }
-
- if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
- return IdleGlobalGC();
- }
-
- // By doing small chunks of GC work in each IdleNotification,
- // perform a round of incremental GCs and after that wait until
- // the mutator creates enough garbage to justify a new round.
- // An incremental GC progresses as follows:
- // 1. many incremental marking steps,
- // 2. one old space mark-sweep-compact,
- // 3. many lazy sweep steps.
- // Use mark-sweep-compact events to count incremental GCs in a round.
-
- if (incremental_marking()->IsStopped()) {
- if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
- !IsSweepingComplete() &&
- !AdvanceSweepers(static_cast<int>(step_size))) {
- return false;
- }
- }
-
- if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
- if (EnoughGarbageSinceLastIdleRound()) {
- StartIdleRound();
- } else {
- return true;
- }
- }
-
- int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
- mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
- ms_count_at_last_idle_notification_ = ms_count_;
-
- int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
- mark_sweeps_since_idle_round_started_;
-
- if (remaining_mark_sweeps <= 0) {
- FinishIdleRound();
- return true;
- }
-
- if (incremental_marking()->IsStopped()) {
- // If there are no more than two GCs left in this idle round and we are
- // allowed to do a full GC, then make those GCs full in order to compact
- // the code space.
- // TODO(ulan): Once we enable code compaction for incremental marking,
- // we can get rid of this special case and always start incremental marking.
- if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
- CollectAllGarbage(kReduceMemoryFootprintMask,
- "idle notification: finalize idle round");
- } else {
- incremental_marking()->Start();
- }
- }
- if (!incremental_marking()->IsStopped()) {
- AdvanceIdleIncrementalMarking(step_size);
- }
- return false;
-}
-
-
-bool Heap::IdleGlobalGC() {
- static const int kIdlesBeforeScavenge = 4;
- static const int kIdlesBeforeMarkSweep = 7;
- static const int kIdlesBeforeMarkCompact = 8;
- static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
- static const unsigned int kGCsBetweenCleanup = 4;
-
- if (!last_idle_notification_gc_count_init_) {
- last_idle_notification_gc_count_ = gc_count_;
- last_idle_notification_gc_count_init_ = true;
- }
-
- bool uncommit = true;
- bool finished = false;
-
- // Reset the number of idle notifications received when a number of
- // GCs have taken place. This allows another round of cleanup based
- // on idle notifications if enough work has been carried out to
- // provoke a number of garbage collections.
- if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
- number_idle_notifications_ =
- Min(number_idle_notifications_ + 1, kMaxIdleCount);
- } else {
- number_idle_notifications_ = 0;
- last_idle_notification_gc_count_ = gc_count_;
- }
-
- if (number_idle_notifications_ == kIdlesBeforeScavenge) {
- CollectGarbage(NEW_SPACE, "idle notification");
- new_space_.Shrink();
- last_idle_notification_gc_count_ = gc_count_;
- } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
- // Before doing the mark-sweep collections we clear the
- // compilation cache to avoid hanging on to source code and
- // generated code for cached functions.
- isolate_->compilation_cache()->Clear();
-
- CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
- new_space_.Shrink();
- last_idle_notification_gc_count_ = gc_count_;
-
- } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
- CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
- new_space_.Shrink();
- last_idle_notification_gc_count_ = gc_count_;
- number_idle_notifications_ = 0;
- finished = true;
- } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
- // If we have received more than kIdlesBeforeMarkCompact idle
- // notifications we do not perform any cleanup because we don't
- // expect to gain much by doing so.
- finished = true;
- }
-
- if (uncommit) UncommitFromSpace();
-
- return finished;
-}
-
-
-#ifdef DEBUG
-
-void Heap::Print() {
- if (!HasBeenSetUp()) return;
- isolate()->PrintStack();
- AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
- space->Print();
- }
-}
-
-
-void Heap::ReportCodeStatistics(const char* title) {
- PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
- PagedSpace::ResetCodeStatistics();
- // We do not look for code in new space, map space, or old space. If code
- // somehow ends up in those spaces, we would miss it here.
- code_space_->CollectCodeStatistics();
- lo_space_->CollectCodeStatistics();
- PagedSpace::ReportCodeStatistics();
-}
-
-
-// This function expects that NewSpace's allocated objects histogram is
-// populated (via a call to CollectStatistics or else as a side effect of a
-// just-completed scavenge collection).
-void Heap::ReportHeapStatistics(const char* title) {
- USE(title);
- PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
- title, gc_count_);
- PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
- old_gen_promotion_limit_);
- PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
- old_gen_allocation_limit_);
- PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
-
- PrintF("\n");
- PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
- isolate_->global_handles()->PrintStats();
- PrintF("\n");
-
- PrintF("Heap statistics : ");
- isolate_->memory_allocator()->ReportStatistics();
- PrintF("To space : ");
- new_space_.ReportStatistics();
- PrintF("Old pointer space : ");
- old_pointer_space_->ReportStatistics();
- PrintF("Old data space : ");
- old_data_space_->ReportStatistics();
- PrintF("Code space : ");
- code_space_->ReportStatistics();
- PrintF("Map space : ");
- map_space_->ReportStatistics();
- PrintF("Cell space : ");
- cell_space_->ReportStatistics();
- PrintF("Large object space : ");
- lo_space_->ReportStatistics();
- PrintF(">>>>>> ========================================= >>>>>>\n");
-}
-
-#endif // DEBUG
-
-bool Heap::Contains(HeapObject* value) {
- return Contains(value->address());
-}
-
-
-bool Heap::Contains(Address addr) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
- return HasBeenSetUp() &&
- (new_space_.ToSpaceContains(addr) ||
- old_pointer_space_->Contains(addr) ||
- old_data_space_->Contains(addr) ||
- code_space_->Contains(addr) ||
- map_space_->Contains(addr) ||
- cell_space_->Contains(addr) ||
- lo_space_->SlowContains(addr));
-}
-
-
-bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
- return InSpace(value->address(), space);
-}
-
-
-bool Heap::InSpace(Address addr, AllocationSpace space) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
- if (!HasBeenSetUp()) return false;
-
- switch (space) {
- case NEW_SPACE:
- return new_space_.ToSpaceContains(addr);
- case OLD_POINTER_SPACE:
- return old_pointer_space_->Contains(addr);
- case OLD_DATA_SPACE:
- return old_data_space_->Contains(addr);
- case CODE_SPACE:
- return code_space_->Contains(addr);
- case MAP_SPACE:
- return map_space_->Contains(addr);
- case CELL_SPACE:
- return cell_space_->Contains(addr);
- case LO_SPACE:
- return lo_space_->SlowContains(addr);
- }
-
- return false;
-}
-
-
-#ifdef VERIFY_HEAP
-void Heap::Verify() {
- CHECK(HasBeenSetUp());
-
- store_buffer()->Verify();
-
- VerifyPointersVisitor visitor;
- IterateRoots(&visitor, VISIT_ONLY_STRONG);
-
- new_space_.Verify();
-
- old_pointer_space_->Verify(&visitor);
- map_space_->Verify(&visitor);
-
- VerifyPointersVisitor no_dirty_regions_visitor;
- old_data_space_->Verify(&no_dirty_regions_visitor);
- code_space_->Verify(&no_dirty_regions_visitor);
- cell_space_->Verify(&no_dirty_regions_visitor);
-
- lo_space_->Verify();
-}
-#endif
-
-
-MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupUtf8String(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupOneByteString(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
- int from,
- int length) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupSubStringOneByteString(string,
- from,
- length,
- &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupTwoByteString(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-MaybeObject* Heap::InternalizeString(String* string) {
- if (string->IsInternalizedString()) return string;
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupString(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-bool Heap::InternalizeStringIfExists(String* string, String** result) {
- if (string->IsInternalizedString()) {
- *result = string;
- return true;
- }
- return string_table()->LookupStringIfExists(string, result);
-}
-
-
-void Heap::ZapFromSpace() {
- NewSpacePageIterator it(new_space_.FromSpaceStart(),
- new_space_.FromSpaceEnd());
- while (it.has_next()) {
- NewSpacePage* page = it.next();
- for (Address cursor = page->area_start(), limit = page->area_end();
- cursor < limit;
- cursor += kPointerSize) {
- Memory::Address_at(cursor) = kFromSpaceZapValue;
- }
- }
-}
-
-
-void Heap::IterateAndMarkPointersToFromSpace(Address start,
- Address end,
- ObjectSlotCallback callback) {
- Address slot_address = start;
-
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
- bool record_slots = false;
- if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
- record_slots = Marking::IsBlack(mark_bit);
- }
-
- while (slot_address < end) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* object = *slot;
- // If the store buffer becomes overfull we mark pages as being exempt from
- // the store buffer. These pages are scanned to find pointers that point
- // to the new space. In that case we may hit newly promoted objects and
- // fix the pointers before the promotion queue gets to them. Thus the 'if'.
- if (object->IsHeapObject()) {
- if (Heap::InFromSpace(object)) {
- callback(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(object));
- Object* new_object = *slot;
- if (InNewSpace(new_object)) {
- SLOW_ASSERT(Heap::InToSpace(new_object));
- SLOW_ASSERT(new_object->IsHeapObject());
- store_buffer_.EnterDirectlyIntoStoreBuffer(
- reinterpret_cast<Address>(slot));
- }
- SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
- } else if (record_slots &&
- MarkCompactCollector::IsOnEvacuationCandidate(object)) {
- mark_compact_collector()->RecordSlot(slot, slot, object);
- }
- }
- slot_address += kPointerSize;
- }
-}
-
-
-#ifdef DEBUG
-typedef bool (*CheckStoreBufferFilter)(Object** addr);
-
-
-bool IsAMapPointerAddress(Object** addr) {
- uintptr_t a = reinterpret_cast<uintptr_t>(addr);
- int mod = a % Map::kSize;
- return mod >= Map::kPointerFieldsBeginOffset &&
- mod < Map::kPointerFieldsEndOffset;
-}
-
-
-bool EverythingsAPointer(Object** addr) {
- return true;
-}
-
-
-static void CheckStoreBuffer(Heap* heap,
- Object** current,
- Object** limit,
- Object**** store_buffer_position,
- Object*** store_buffer_top,
- CheckStoreBufferFilter filter,
- Address special_garbage_start,
- Address special_garbage_end) {
- Map* free_space_map = heap->free_space_map();
- for ( ; current < limit; current++) {
- Object* o = *current;
- Address current_address = reinterpret_cast<Address>(current);
- // Skip free space.
- if (o == free_space_map) {
- Address current_address = reinterpret_cast<Address>(current);
- FreeSpace* free_space =
- FreeSpace::cast(HeapObject::FromAddress(current_address));
- int skip = free_space->Size();
- ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
- ASSERT(skip > 0);
- current_address += skip - kPointerSize;
- current = reinterpret_cast<Object**>(current_address);
- continue;
- }
- // Skip the current linear allocation space between top and limit which is
- // unmarked with the free space map, but can contain junk.
- if (current_address == special_garbage_start &&
- special_garbage_end != special_garbage_start) {
- current_address = special_garbage_end - kPointerSize;
- current = reinterpret_cast<Object**>(current_address);
- continue;
- }
- if (!(*filter)(current)) continue;
- ASSERT(current_address < special_garbage_start ||
- current_address >= special_garbage_end);
- ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
- // We have to check that the pointer does not point into new space
- // without trying to cast it to a heap object since the hash field of
- // a string can contain values like 1 and 3 which are tagged null
- // pointers.
- if (!heap->InNewSpace(o)) continue;
- while (**store_buffer_position < current &&
- *store_buffer_position < store_buffer_top) {
- (*store_buffer_position)++;
- }
- if (**store_buffer_position != current ||
- *store_buffer_position == store_buffer_top) {
- Object** obj_start = current;
- while (!(*obj_start)->IsMap()) obj_start--;
- UNREACHABLE();
- }
- }
-}
-
-
-// Check that the store buffer contains all intergenerational pointers by
-// scanning a page and ensuring that all pointers to young space are in the
-// store buffer.
-void Heap::OldPointerSpaceCheckStoreBuffer() {
- OldSpace* space = old_pointer_space();
- PageIterator pages(space);
-
- store_buffer()->SortUniq();
-
- while (pages.has_next()) {
- Page* page = pages.next();
- Object** current = reinterpret_cast<Object**>(page->area_start());
-
- Address end = page->area_end();
-
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
-
- Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &EverythingsAPointer,
- space->top(),
- space->limit());
- }
-}
-
-
-void Heap::MapSpaceCheckStoreBuffer() {
- MapSpace* space = map_space();
- PageIterator pages(space);
-
- store_buffer()->SortUniq();
-
- while (pages.has_next()) {
- Page* page = pages.next();
- Object** current = reinterpret_cast<Object**>(page->area_start());
-
- Address end = page->area_end();
-
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
-
- Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &IsAMapPointerAddress,
- space->top(),
- space->limit());
- }
-}
-
-
-void Heap::LargeObjectSpaceCheckStoreBuffer() {
- LargeObjectIterator it(lo_space());
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- // We only have code, sequential strings, or fixed arrays in large
- // object space, and only fixed arrays can possibly contain pointers to
- // the young generation.
- if (object->IsFixedArray()) {
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
- Object** current = reinterpret_cast<Object**>(object->address());
- Object** limit =
- reinterpret_cast<Object**>(object->address() + object->Size());
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &EverythingsAPointer,
- NULL,
- NULL);
- }
- }
-}
-#endif
-
-
-void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
- IterateStrongRoots(v, mode);
- IterateWeakRoots(v, mode);
-}
-
-
-void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
- v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
- v->Synchronize(VisitorSynchronization::kStringTable);
- if (mode != VISIT_ALL_IN_SCAVENGE &&
- mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
- // Scavenge collections have special processing for this.
- external_string_table_.Iterate(v);
- error_object_list_.Iterate(v);
- }
- v->Synchronize(VisitorSynchronization::kExternalStringsTable);
-}
-
-
-void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
- v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
- v->Synchronize(VisitorSynchronization::kStrongRootList);
-
- v->VisitPointer(BitCast<Object**>(&hidden_string_));
- v->Synchronize(VisitorSynchronization::kInternalizedString);
-
- isolate_->bootstrapper()->Iterate(v);
- v->Synchronize(VisitorSynchronization::kBootstrapper);
- isolate_->Iterate(v);
- v->Synchronize(VisitorSynchronization::kTop);
- Relocatable::Iterate(v);
- v->Synchronize(VisitorSynchronization::kRelocatable);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate_->debug()->Iterate(v);
- if (isolate_->deoptimizer_data() != NULL) {
- isolate_->deoptimizer_data()->Iterate(v);
- }
-#endif
- v->Synchronize(VisitorSynchronization::kDebug);
- isolate_->compilation_cache()->Iterate(v);
- v->Synchronize(VisitorSynchronization::kCompilationCache);
-
- // Iterate over local handles in handle scopes.
- isolate_->handle_scope_implementer()->Iterate(v);
- isolate_->IterateDeferredHandles(v);
- v->Synchronize(VisitorSynchronization::kHandleScope);
-
- // Iterate over the builtin code objects and code stubs in the
- // heap. Note that it is not necessary to iterate over code objects
- // on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE) {
- isolate_->builtins()->IterateBuiltins(v);
- }
- v->Synchronize(VisitorSynchronization::kBuiltins);
-
- // Iterate over global handles.
- switch (mode) {
- case VISIT_ONLY_STRONG:
- isolate_->global_handles()->IterateStrongRoots(v);
- break;
- case VISIT_ALL_IN_SCAVENGE:
- isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
- break;
- case VISIT_ALL_IN_SWEEP_NEWSPACE:
- case VISIT_ALL:
- isolate_->global_handles()->IterateAllRoots(v);
- break;
- }
- v->Synchronize(VisitorSynchronization::kGlobalHandles);
-
- // Iterate over pointers being held by inactive threads.
- isolate_->thread_manager()->Iterate(v);
- v->Synchronize(VisitorSynchronization::kThreadManager);
-
- // Iterate over the pointers the Serialization/Deserialization code is
- // holding.
- // During garbage collection this keeps the partial snapshot cache alive.
- // During deserialization of the startup snapshot this creates the partial
- // snapshot cache and deserializes the objects it refers to. During
- // serialization this does nothing, since the partial snapshot cache is
- // empty. However the next thing we do is create the partial snapshot,
- // filling up the partial snapshot cache with objects it needs as we go.
- SerializerDeserializer::Iterate(v);
- // We don't do a v->Synchronize call here, because in debug mode that will
- // output a flag to the snapshot. However at this point the serializer and
- // deserializer are deliberately a little unsynchronized (see above) so the
- // checking of the sync flag in the snapshot would fail.
-}
-
-
-// TODO(1236194): Since the heap size is configurable on the command line
-// and through the API, we should gracefully handle the case that the heap
-// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semispace_size,
- intptr_t max_old_gen_size,
- intptr_t max_executable_size) {
- if (HasBeenSetUp()) return false;
-
- if (FLAG_stress_compaction) {
- // This will cause more frequent GCs when stressing.
- max_semispace_size_ = Page::kPageSize;
- }
-
- if (max_semispace_size > 0) {
- if (max_semispace_size < Page::kPageSize) {
- max_semispace_size = Page::kPageSize;
- if (FLAG_trace_gc) {
- PrintPID("Max semispace size cannot be less than %dkbytes\n",
- Page::kPageSize >> 10);
- }
- }
- max_semispace_size_ = max_semispace_size;
- }
-
- if (Snapshot::IsEnabled()) {
- // If we are using a snapshot we always reserve the default amount
- // of memory for each semispace because code in the snapshot has
- // write-barrier code that relies on the size and alignment of new
- // space. We therefore cannot use a larger max semispace size
- // than the default reserved semispace size.
- if (max_semispace_size_ > reserved_semispace_size_) {
- max_semispace_size_ = reserved_semispace_size_;
- if (FLAG_trace_gc) {
- PrintPID("Max semispace size cannot be more than %dkbytes\n",
- reserved_semispace_size_ >> 10);
- }
- }
- } else {
- // If we are not using snapshots we reserve space for the actual
- // max semispace size.
- reserved_semispace_size_ = max_semispace_size_;
- }
-
- if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
- if (max_executable_size > 0) {
- max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
- }
-
- // The max executable size must be less than or equal to the max old
- // generation size.
- if (max_executable_size_ > max_old_generation_size_) {
- max_executable_size_ = max_old_generation_size_;
- }
-
- // The new space size must be a power of two to support single-bit testing
- // for containment.
- max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
- reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
- initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
- external_allocation_limit_ = 16 * max_semispace_size_;
-
- // The old generation is paged and needs at least one page for each space.
- int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
- max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
- Page::kPageSize),
- RoundUp(max_old_generation_size_,
- Page::kPageSize));
-
- configured_ = true;
- return true;
-}
-
-
-bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
- static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
- static_cast<intptr_t>(FLAG_max_executable_size) * MB);
-}
-
-
-void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
- *stats->start_marker = HeapStats::kStartMarker;
- *stats->end_marker = HeapStats::kEndMarker;
- *stats->new_space_size = new_space_.SizeAsInt();
- *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
- *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
- *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
- *stats->old_data_space_size = old_data_space_->SizeOfObjects();
- *stats->old_data_space_capacity = old_data_space_->Capacity();
- *stats->code_space_size = code_space_->SizeOfObjects();
- *stats->code_space_capacity = code_space_->Capacity();
- *stats->map_space_size = map_space_->SizeOfObjects();
- *stats->map_space_capacity = map_space_->Capacity();
- *stats->cell_space_size = cell_space_->SizeOfObjects();
- *stats->cell_space_capacity = cell_space_->Capacity();
- *stats->lo_space_size = lo_space_->Size();
- isolate_->global_handles()->RecordStats(stats);
- *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
- *stats->memory_allocator_capacity =
- isolate()->memory_allocator()->Size() +
- isolate()->memory_allocator()->Available();
- *stats->os_error = OS::GetLastError();
- isolate()->memory_allocator()->Available();
- if (take_snapshot) {
- HeapIterator iterator(this);
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- stats->objects_per_type[type]++;
- stats->size_per_type[type] += obj->Size();
- }
- }
-}
-
-
-intptr_t Heap::PromotedSpaceSizeOfObjects() {
- return old_pointer_space_->SizeOfObjects()
- + old_data_space_->SizeOfObjects()
- + code_space_->SizeOfObjects()
- + map_space_->SizeOfObjects()
- + cell_space_->SizeOfObjects()
- + lo_space_->SizeOfObjects();
-}
-
-
-intptr_t Heap::PromotedExternalMemorySize() {
- if (amount_of_external_allocated_memory_
- <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
- return amount_of_external_allocated_memory_
- - amount_of_external_allocated_memory_at_last_global_gc_;
-}
-
-
-V8_DECLARE_ONCE(initialize_gc_once);
-
-static void InitializeGCOnce() {
- InitializeScavengingVisitorsTables();
- NewSpaceScavenger::Initialize();
- MarkCompactCollector::Initialize();
-}
-
-bool Heap::SetUp() {
-#ifdef DEBUG
- allocation_timeout_ = FLAG_gc_interval;
-#endif
-
- // Initialize heap spaces and initial maps and objects. Whenever something
- // goes wrong, just return false. The caller should check the results and
- // call Heap::TearDown() to release allocated memory.
- //
- // If the heap is not yet configured (e.g. through the API), configure it.
- // Configuration is based on the flags new-space-size (really the semispace
- // size) and old-space-size if set or the initial values of semispace_size_
- // and old_generation_size_ otherwise.
- if (!configured_) {
- if (!ConfigureHeapDefault()) return false;
- }
-
- CallOnce(&initialize_gc_once, &InitializeGCOnce);
-
- MarkMapPointersAsEncoded(false);
-
- // Set up memory allocator.
- if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
- return false;
-
- // Set up new space.
- if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
- return false;
- }
-
- // Initialize old pointer space.
- old_pointer_space_ =
- new OldSpace(this,
- max_old_generation_size_,
- OLD_POINTER_SPACE,
- NOT_EXECUTABLE);
- if (old_pointer_space_ == NULL) return false;
- if (!old_pointer_space_->SetUp()) return false;
-
- // Initialize old data space.
- old_data_space_ =
- new OldSpace(this,
- max_old_generation_size_,
- OLD_DATA_SPACE,
- NOT_EXECUTABLE);
- if (old_data_space_ == NULL) return false;
- if (!old_data_space_->SetUp()) return false;
-
- // Initialize the code space, set its maximum capacity to the old
- // generation size. It needs executable memory.
- // On 64-bit platform(s), we put all code objects in a 2 GB range of
- // virtual address space, so that they can call each other with near calls.
- if (code_range_size_ > 0) {
- if (!isolate_->code_range()->SetUp(code_range_size_)) {
- return false;
- }
- }
-
- code_space_ =
- new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
- if (code_space_ == NULL) return false;
- if (!code_space_->SetUp()) return false;
-
- // Initialize map space.
- map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
- if (map_space_ == NULL) return false;
- if (!map_space_->SetUp()) return false;
-
- // Initialize global property cell space.
- cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
- if (cell_space_ == NULL) return false;
- if (!cell_space_->SetUp()) return false;
-
- // The large object code space may contain code or data. We set the memory
- // to be non-executable here for safety, but this means we need to enable it
- // explicitly when allocating large code objects.
- lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
- if (lo_space_ == NULL) return false;
- if (!lo_space_->SetUp()) return false;
-
- // Set up the seed that is used to randomize the string hash function.
- ASSERT(hash_seed() == 0);
- if (FLAG_randomize_hashes) {
- if (FLAG_hash_seed == 0) {
- set_hash_seed(
- Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
- } else {
- set_hash_seed(Smi::FromInt(FLAG_hash_seed));
- }
- }
-
- LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
- LOG(isolate_, IntPtrTEvent("heap-available", Available()));
-
- store_buffer()->SetUp();
-
- if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
-
- return true;
-}
-
-bool Heap::CreateHeapObjects() {
- // Create initial maps.
- if (!CreateInitialMaps()) return false;
- if (!CreateApiObjects()) return false;
-
- // Create initial objects
- if (!CreateInitialObjects()) return false;
-
- native_contexts_list_ = undefined_value();
- return true;
-}
-
-
-void Heap::SetStackLimits() {
- ASSERT(isolate_ != NULL);
- ASSERT(isolate_ == isolate());
- // On 64 bit machines, pointers are generally out of range of Smis. We write
- // something that looks like an out of range Smi to the GC.
-
- // Set up the special root array entries containing the stack limits.
- // These are actually addresses, but the tag makes the GC ignore it.
- roots_[kStackLimitRootIndex] =
- reinterpret_cast<Object*>(
- (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
- roots_[kRealStackLimitRootIndex] =
- reinterpret_cast<Object*>(
- (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
-}
-
-
-void Heap::TearDown() {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
-#endif
-
- if (FLAG_print_cumulative_gc_stat) {
- PrintF("\n");
- PrintF("gc_count=%d ", gc_count_);
- PrintF("mark_sweep_count=%d ", ms_count_);
- PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
- PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
- PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
- PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
- get_max_alive_after_gc());
- PrintF("total_marking_time=%.1f ", marking_time());
- PrintF("total_sweeping_time=%.1f ", sweeping_time());
- PrintF("\n\n");
- }
-
- isolate_->global_handles()->TearDown();
-
- external_string_table_.TearDown();
-
- error_object_list_.TearDown();
-
- new_space_.TearDown();
-
- if (old_pointer_space_ != NULL) {
- old_pointer_space_->TearDown();
- delete old_pointer_space_;
- old_pointer_space_ = NULL;
- }
-
- if (old_data_space_ != NULL) {
- old_data_space_->TearDown();
- delete old_data_space_;
- old_data_space_ = NULL;
- }
-
- if (code_space_ != NULL) {
- code_space_->TearDown();
- delete code_space_;
- code_space_ = NULL;
- }
-
- if (map_space_ != NULL) {
- map_space_->TearDown();
- delete map_space_;
- map_space_ = NULL;
- }
-
- if (cell_space_ != NULL) {
- cell_space_->TearDown();
- delete cell_space_;
- cell_space_ = NULL;
- }
-
- if (lo_space_ != NULL) {
- lo_space_->TearDown();
- delete lo_space_;
- lo_space_ = NULL;
- }
-
- store_buffer()->TearDown();
- incremental_marking()->TearDown();
-
- isolate_->memory_allocator()->TearDown();
-
- delete relocation_mutex_;
-}
-
-
-void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
- ASSERT(callback != NULL);
- GCPrologueCallbackPair pair(callback, gc_type);
- ASSERT(!gc_prologue_callbacks_.Contains(pair));
- return gc_prologue_callbacks_.Add(pair);
-}
-
-
-void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
- ASSERT(callback != NULL);
- for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
- if (gc_prologue_callbacks_[i].callback == callback) {
- gc_prologue_callbacks_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
- ASSERT(callback != NULL);
- GCEpilogueCallbackPair pair(callback, gc_type);
- ASSERT(!gc_epilogue_callbacks_.Contains(pair));
- return gc_epilogue_callbacks_.Add(pair);
-}
-
-
-void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
- ASSERT(callback != NULL);
- for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
- if (gc_epilogue_callbacks_[i].callback == callback) {
- gc_epilogue_callbacks_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-#ifdef DEBUG
-
-class PrintHandleVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++)
- PrintF(" handle %p to %p\n",
- reinterpret_cast<void*>(p),
- reinterpret_cast<void*>(*p));
- }
-};
-
-void Heap::PrintHandles() {
- PrintF("Handles:\n");
- PrintHandleVisitor v;
- isolate_->handle_scope_implementer()->Iterate(&v);
-}
-
-#endif
-
-
-Space* AllSpaces::next() {
- switch (counter_++) {
- case NEW_SPACE:
- return heap_->new_space();
- case OLD_POINTER_SPACE:
- return heap_->old_pointer_space();
- case OLD_DATA_SPACE:
- return heap_->old_data_space();
- case CODE_SPACE:
- return heap_->code_space();
- case MAP_SPACE:
- return heap_->map_space();
- case CELL_SPACE:
- return heap_->cell_space();
- case LO_SPACE:
- return heap_->lo_space();
- default:
- return NULL;
- }
-}
-
-
-PagedSpace* PagedSpaces::next() {
- switch (counter_++) {
- case OLD_POINTER_SPACE:
- return heap_->old_pointer_space();
- case OLD_DATA_SPACE:
- return heap_->old_data_space();
- case CODE_SPACE:
- return heap_->code_space();
- case MAP_SPACE:
- return heap_->map_space();
- case CELL_SPACE:
- return heap_->cell_space();
- default:
- return NULL;
- }
-}
-
-
-
-OldSpace* OldSpaces::next() {
- switch (counter_++) {
- case OLD_POINTER_SPACE:
- return heap_->old_pointer_space();
- case OLD_DATA_SPACE:
- return heap_->old_data_space();
- case CODE_SPACE:
- return heap_->code_space();
- default:
- return NULL;
- }
-}
-
-
-SpaceIterator::SpaceIterator(Heap* heap)
- : heap_(heap),
- current_space_(FIRST_SPACE),
- iterator_(NULL),
- size_func_(NULL) {
-}
-
-
-SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
- : heap_(heap),
- current_space_(FIRST_SPACE),
- iterator_(NULL),
- size_func_(size_func) {
-}
-
-
-SpaceIterator::~SpaceIterator() {
- // Delete active iterator if any.
- delete iterator_;
-}
-
-
-bool SpaceIterator::has_next() {
- // Iterate until no more spaces.
- return current_space_ != LAST_SPACE;
-}
-
-
-ObjectIterator* SpaceIterator::next() {
- if (iterator_ != NULL) {
- delete iterator_;
- iterator_ = NULL;
- // Move to the next space
- current_space_++;
- if (current_space_ > LAST_SPACE) {
- return NULL;
- }
- }
-
- // Return iterator for the new current space.
- return CreateIterator();
-}
-
-
-// Create an iterator for the space to iterate.
-ObjectIterator* SpaceIterator::CreateIterator() {
- ASSERT(iterator_ == NULL);
-
- switch (current_space_) {
- case NEW_SPACE:
- iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
- break;
- case OLD_POINTER_SPACE:
- iterator_ =
- new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
- break;
- case OLD_DATA_SPACE:
- iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
- break;
- case CODE_SPACE:
- iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
- break;
- case MAP_SPACE:
- iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
- break;
- case CELL_SPACE:
- iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
- break;
- case LO_SPACE:
- iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
- break;
- }
-
- // Return the newly allocated iterator;
- ASSERT(iterator_ != NULL);
- return iterator_;
-}
-
-
-class HeapObjectsFilter {
- public:
- virtual ~HeapObjectsFilter() {}
- virtual bool SkipObject(HeapObject* object) = 0;
-};
-
-
-class UnreachableObjectsFilter : public HeapObjectsFilter {
- public:
- UnreachableObjectsFilter() {
- MarkReachableObjects();
- }
-
- ~UnreachableObjectsFilter() {
- Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
- }
-
- bool SkipObject(HeapObject* object) {
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- return !mark_bit.Get();
- }
-
- private:
- class MarkingVisitor : public ObjectVisitor {
- public:
- MarkingVisitor() : marking_stack_(10) {}
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (!(*p)->IsHeapObject()) continue;
- HeapObject* obj = HeapObject::cast(*p);
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- if (!mark_bit.Get()) {
- mark_bit.Set();
- marking_stack_.Add(obj);
- }
- }
- }
-
- void TransitiveClosure() {
- while (!marking_stack_.is_empty()) {
- HeapObject* obj = marking_stack_.RemoveLast();
- obj->Iterate(this);
- }
- }
-
- private:
- List<HeapObject*> marking_stack_;
- };
-
- void MarkReachableObjects() {
- Heap* heap = Isolate::Current()->heap();
- MarkingVisitor visitor;
- heap->IterateRoots(&visitor, VISIT_ALL);
- visitor.TransitiveClosure();
- }
-
- AssertNoAllocation no_alloc;
-};
-
-
-HeapIterator::HeapIterator(Heap* heap)
- : heap_(heap),
- filtering_(HeapIterator::kNoFiltering),
- filter_(NULL) {
- Init();
-}
-
-
-HeapIterator::HeapIterator(Heap* heap,
- HeapIterator::HeapObjectsFiltering filtering)
- : heap_(heap),
- filtering_(filtering),
- filter_(NULL) {
- Init();
-}
-
-
-HeapIterator::~HeapIterator() {
- Shutdown();
-}
-
-
-void HeapIterator::Init() {
- // Start the iteration.
- space_iterator_ = new SpaceIterator(heap_);
- switch (filtering_) {
- case kFilterUnreachable:
- filter_ = new UnreachableObjectsFilter;
- break;
- default:
- break;
- }
- object_iterator_ = space_iterator_->next();
-}
-
-
-void HeapIterator::Shutdown() {
-#ifdef DEBUG
- // Assert that in filtering mode we have iterated through all
- // objects. Otherwise, heap will be left in an inconsistent state.
- if (filtering_ != kNoFiltering) {
- ASSERT(object_iterator_ == NULL);
- }
-#endif
- // Make sure the last iterator is deallocated.
- delete space_iterator_;
- space_iterator_ = NULL;
- object_iterator_ = NULL;
- delete filter_;
- filter_ = NULL;
-}
-
-
-HeapObject* HeapIterator::next() {
- if (filter_ == NULL) return NextObject();
-
- HeapObject* obj = NextObject();
- while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
- return obj;
-}
-
-
-HeapObject* HeapIterator::NextObject() {
- // No iterator means we are done.
- if (object_iterator_ == NULL) return NULL;
-
- if (HeapObject* obj = object_iterator_->next_object()) {
- // If the current iterator has more objects we are fine.
- return obj;
- } else {
- // Go though the spaces looking for one that has objects.
- while (space_iterator_->has_next()) {
- object_iterator_ = space_iterator_->next();
- if (HeapObject* obj = object_iterator_->next_object()) {
- return obj;
- }
- }
- }
- // Done with the last space.
- object_iterator_ = NULL;
- return NULL;
-}
-
-
-void HeapIterator::reset() {
- // Restart the iterator.
- Shutdown();
- Init();
-}
-
-
-#ifdef DEBUG
-
-Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
-
-class PathTracer::MarkVisitor: public ObjectVisitor {
- public:
- explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
- void VisitPointers(Object** start, Object** end) {
- // Scan all HeapObject pointers in [start, end)
- for (Object** p = start; !tracer_->found() && (p < end); p++) {
- if ((*p)->IsHeapObject())
- tracer_->MarkRecursively(p, this);
- }
- }
-
- private:
- PathTracer* tracer_;
-};
-
-
-class PathTracer::UnmarkVisitor: public ObjectVisitor {
- public:
- explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
- void VisitPointers(Object** start, Object** end) {
- // Scan all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- tracer_->UnmarkRecursively(p, this);
- }
- }
-
- private:
- PathTracer* tracer_;
-};
-
-
-void PathTracer::VisitPointers(Object** start, Object** end) {
- bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
- // Visit all HeapObject pointers in [start, end)
- for (Object** p = start; !done && (p < end); p++) {
- if ((*p)->IsHeapObject()) {
- TracePathFrom(p);
- done = ((what_to_find_ == FIND_FIRST) && found_target_);
- }
- }
-}
-
-
-void PathTracer::Reset() {
- found_target_ = false;
- object_stack_.Clear();
-}
-
-
-void PathTracer::TracePathFrom(Object** root) {
- ASSERT((search_target_ == kAnyGlobalObject) ||
- search_target_->IsHeapObject());
- found_target_in_trace_ = false;
- Reset();
-
- MarkVisitor mark_visitor(this);
- MarkRecursively(root, &mark_visitor);
-
- UnmarkVisitor unmark_visitor(this);
- UnmarkRecursively(root, &unmark_visitor);
-
- ProcessResults();
-}
-
-
-static bool SafeIsNativeContext(HeapObject* obj) {
- return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
-}
-
-
-void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (!map->IsHeapObject()) return; // visited before
-
- if (found_target_in_trace_) return; // stop if target found
- object_stack_.Add(obj);
- if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
- (obj == search_target_)) {
- found_target_in_trace_ = true;
- found_target_ = true;
- return;
- }
-
- bool is_native_context = SafeIsNativeContext(obj);
-
- // not visited yet
- Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
-
- Address map_addr = map_p->address();
-
- obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
-
- // Scan the object body.
- if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
- // This is specialized to scan Context's properly.
- Object** start = reinterpret_cast<Object**>(obj->address() +
- Context::kHeaderSize);
- Object** end = reinterpret_cast<Object**>(obj->address() +
- Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
- mark_visitor->VisitPointers(start, end);
- } else {
- obj->IterateBody(map_p->instance_type(),
- obj->SizeFromMap(map_p),
- mark_visitor);
- }
-
- // Scan the map after the body because the body is a lot more interesting
- // when doing leak detection.
- MarkRecursively(&map, mark_visitor);
-
- if (!found_target_in_trace_) // don't pop if found the target
- object_stack_.RemoveLast();
-}
-
-
-void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (map->IsHeapObject()) return; // unmarked already
-
- Address map_addr = reinterpret_cast<Address>(map);
-
- map_addr -= kMarkTag;
-
- ASSERT_TAG_ALIGNED(map_addr);
-
- HeapObject* map_p = HeapObject::FromAddress(map_addr);
-
- obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
-
- UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
-
- obj->IterateBody(Map::cast(map_p)->instance_type(),
- obj->SizeFromMap(Map::cast(map_p)),
- unmark_visitor);
-}
-
-
-void PathTracer::ProcessResults() {
- if (found_target_) {
- PrintF("=====================================\n");
- PrintF("==== Path to object ====\n");
- PrintF("=====================================\n\n");
-
- ASSERT(!object_stack_.is_empty());
- for (int i = 0; i < object_stack_.length(); i++) {
- if (i > 0) PrintF("\n |\n |\n V\n\n");
- Object* obj = object_stack_[i];
- obj->Print();
- }
- PrintF("=====================================\n");
- }
-}
-
-
-// Triggers a depth-first traversal of reachable objects from one
-// given root object and finds a path to a specific heap object and
-// prints it.
-void Heap::TracePathToObjectFrom(Object* target, Object* root) {
- PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
- tracer.VisitPointer(&root);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to a specific heap object and prints it.
-void Heap::TracePathToObject(Object* target) {
- PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
- IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to any global object and prints it. Useful for
-// determining the source for leaks of global objects.
-void Heap::TracePathToGlobal() {
- PathTracer tracer(PathTracer::kAnyGlobalObject,
- PathTracer::FIND_ALL,
- VISIT_ALL);
- IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-#endif
-
-
-static intptr_t CountTotalHolesSize(Heap* heap) {
- intptr_t holes_size = 0;
- OldSpaces spaces(heap);
- for (OldSpace* space = spaces.next();
- space != NULL;
- space = spaces.next()) {
- holes_size += space->Waste() + space->Available();
- }
- return holes_size;
-}
-
-
-GCTracer::GCTracer(Heap* heap,
- const char* gc_reason,
- const char* collector_reason)
- : start_time_(0.0),
- start_object_size_(0),
- start_memory_size_(0),
- gc_count_(0),
- full_gc_count_(0),
- allocated_since_last_gc_(0),
- spent_in_mutator_(0),
- promoted_objects_size_(0),
- nodes_died_in_new_space_(0),
- nodes_copied_in_new_space_(0),
- nodes_promoted_(0),
- heap_(heap),
- gc_reason_(gc_reason),
- collector_reason_(collector_reason) {
- if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
- start_time_ = OS::TimeCurrentMillis();
- start_object_size_ = heap_->SizeOfObjects();
- start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
-
- for (int i = 0; i < Scope::kNumberOfScopes; i++) {
- scopes_[i] = 0;
- }
-
- in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
-
- allocated_since_last_gc_ =
- heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
-
- if (heap_->last_gc_end_timestamp_ > 0) {
- spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
- }
-
- steps_count_ = heap_->incremental_marking()->steps_count();
- steps_took_ = heap_->incremental_marking()->steps_took();
- longest_step_ = heap_->incremental_marking()->longest_step();
- steps_count_since_last_gc_ =
- heap_->incremental_marking()->steps_count_since_last_gc();
- steps_took_since_last_gc_ =
- heap_->incremental_marking()->steps_took_since_last_gc();
-}
-
-
-GCTracer::~GCTracer() {
- // Printf ONE line iff flag is set.
- if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
-
- bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
-
- heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
- heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
-
- double time = heap_->last_gc_end_timestamp_ - start_time_;
-
- // Update cumulative GC statistics if required.
- if (FLAG_print_cumulative_gc_stat) {
- heap_->total_gc_time_ms_ += time;
- heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
- heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
- heap_->alive_after_last_gc_);
- if (!first_gc) {
- heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
- spent_in_mutator_);
- }
- } else if (FLAG_trace_gc_verbose) {
- heap_->total_gc_time_ms_ += time;
- }
-
- if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
-
- heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
-
- if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
- PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
-
- if (!FLAG_trace_gc_nvp) {
- int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
-
- double end_memory_size_mb =
- static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
-
- PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
- CollectorString(),
- static_cast<double>(start_object_size_) / MB,
- static_cast<double>(start_memory_size_) / MB,
- SizeOfHeapObjects(),
- end_memory_size_mb);
-
- if (external_time > 0) PrintF("%d / ", external_time);
- PrintF("%.1f ms", time);
- if (steps_count_ > 0) {
- if (collector_ == SCAVENGER) {
- PrintF(" (+ %.1f ms in %d steps since last GC)",
- steps_took_since_last_gc_,
- steps_count_since_last_gc_);
- } else {
- PrintF(" (+ %.1f ms in %d steps since start of marking, "
- "biggest step %.1f ms)",
- steps_took_,
- steps_count_,
- longest_step_);
- }
- }
-
- if (gc_reason_ != NULL) {
- PrintF(" [%s]", gc_reason_);
- }
-
- if (collector_reason_ != NULL) {
- PrintF(" [%s]", collector_reason_);
- }
-
- PrintF(".\n");
- } else {
- PrintF("pause=%.1f ", time);
- PrintF("mutator=%.1f ", spent_in_mutator_);
- PrintF("gc=");
- switch (collector_) {
- case SCAVENGER:
- PrintF("s");
- break;
- case MARK_COMPACTOR:
- PrintF("ms");
- break;
- default:
- UNREACHABLE();
- }
- PrintF(" ");
-
- PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
- PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
- PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
- PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
- PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
- PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
- PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
- PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
- PrintF("compaction_ptrs=%.1f ",
- scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
- PrintF("intracompaction_ptrs=%.1f ",
- scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
- PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
-
- PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
- PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
- PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
- in_free_list_or_wasted_before_gc_);
- PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
-
- PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
- PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
- PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
- PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
- PrintF("nodes_promoted=%d ", nodes_promoted_);
-
- if (collector_ == SCAVENGER) {
- PrintF("stepscount=%d ", steps_count_since_last_gc_);
- PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
- } else {
- PrintF("stepscount=%d ", steps_count_);
- PrintF("stepstook=%.1f ", steps_took_);
- PrintF("longeststep=%.1f ", longest_step_);
- }
-
- PrintF("\n");
- }
-
- heap_->PrintShortHeapStatistics();
-}
-
-
-const char* GCTracer::CollectorString() {
- switch (collector_) {
- case SCAVENGER:
- return "Scavenge";
- case MARK_COMPACTOR:
- return "Mark-sweep";
- }
- return "Unknown GC";
-}
-
-
-int KeyedLookupCache::Hash(Map* map, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
- return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
-}
-
-
-int KeyedLookupCache::Lookup(Map* map, String* name) {
- int index = (Hash(map, name) & kHashMask);
- for (int i = 0; i < kEntriesPerBucket; i++) {
- Key& key = keys_[index + i];
- if ((key.map == map) && key.name->Equals(name)) {
- return field_offsets_[index + i];
- }
- }
- return kNotFound;
-}
-
-
-void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
- String* internalized_name;
- if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
- int index = (Hash(map, internalized_name) & kHashMask);
- // After a GC there will be free slots, so we use them in order (this may
- // help to get the most frequently used one in position 0).
- for (int i = 0; i< kEntriesPerBucket; i++) {
- Key& key = keys_[index];
- Object* free_entry_indicator = NULL;
- if (key.map == free_entry_indicator) {
- key.map = map;
- key.name = internalized_name;
- field_offsets_[index + i] = field_offset;
- return;
- }
- }
- // No free entry found in this bucket, so we move them all down one and
- // put the new entry at position zero.
- for (int i = kEntriesPerBucket - 1; i > 0; i--) {
- Key& key = keys_[index + i];
- Key& key2 = keys_[index + i - 1];
- key = key2;
- field_offsets_[index + i] = field_offsets_[index + i - 1];
- }
-
- // Write the new first entry.
- Key& key = keys_[index];
- key.map = map;
- key.name = internalized_name;
- field_offsets_[index] = field_offset;
- }
-}
-
-
-void KeyedLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
-}
-
-
-void DescriptorLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
-}
-
-
-#ifdef DEBUG
-void Heap::GarbageCollectionGreedyCheck() {
- ASSERT(FLAG_gc_greedy);
- if (isolate_->bootstrapper()->IsActive()) return;
- if (disallow_allocation_failure()) return;
- CollectGarbage(NEW_SPACE);
-}
-#endif
-
-
-TranscendentalCache::SubCache::SubCache(Type t)
- : type_(t),
- isolate_(Isolate::Current()) {
- uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
- uint32_t in1 = 0xffffffffu; // generated by the FPU.
- for (int i = 0; i < kCacheSize; i++) {
- elements_[i].in[0] = in0;
- elements_[i].in[1] = in1;
- elements_[i].output = NULL;
- }
-}
-
-
-void TranscendentalCache::Clear() {
- for (int i = 0; i < kNumberOfCaches; i++) {
- if (caches_[i] != NULL) {
- delete caches_[i];
- caches_[i] = NULL;
- }
- }
-}
-
-
-void ExternalStringTable::CleanUp() {
- int last = 0;
- for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == heap_->the_hole_value()) {
- continue;
- }
- if (heap_->InNewSpace(new_space_strings_[i])) {
- new_space_strings_[last++] = new_space_strings_[i];
- } else {
- old_space_strings_.Add(new_space_strings_[i]);
- }
- }
- new_space_strings_.Rewind(last);
- new_space_strings_.Trim();
-
- last = 0;
- for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == heap_->the_hole_value()) {
- continue;
- }
- ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
- old_space_strings_[last++] = old_space_strings_[i];
- }
- old_space_strings_.Rewind(last);
- old_space_strings_.Trim();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
-#endif
-}
-
-
-void ExternalStringTable::TearDown() {
- for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
- HeapObject *object = HeapObject::cast(new_space_strings_[i]);
- if (!object->IsExternalString())
- heap_->FinalizeExternalString(object);
- }
- for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
- HeapObject *object = HeapObject::cast(old_space_strings_[i]);
- if (!object->IsExternalString())
- heap_->FinalizeExternalString(object);
- }
-
- new_space_strings_.Free();
- old_space_strings_.Free();
-}
-
-
-// Update all references.
-void ErrorObjectList::UpdateReferences() {
- for (int i = 0; i < list_.length(); i++) {
- HeapObject* object = HeapObject::cast(list_[i]);
- MapWord first_word = object->map_word();
- if (first_word.IsForwardingAddress()) {
- list_[i] = first_word.ToForwardingAddress();
- }
- }
-}
-
-
-// Unforwarded objects in new space are dead and removed from the list.
-void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
- if (list_.is_empty()) return;
- if (!nested_) {
- int write_index = 0;
- for (int i = 0; i < list_.length(); i++) {
- MapWord first_word = HeapObject::cast(list_[i])->map_word();
- if (first_word.IsForwardingAddress()) {
- list_[write_index++] = first_word.ToForwardingAddress();
- }
- }
- list_.Rewind(write_index);
- } else {
- // If a GC is triggered during DeferredFormatStackTrace, we do not move
- // objects in the list, just remove dead ones, as to not confuse the
- // loop in DeferredFormatStackTrace.
- for (int i = 0; i < list_.length(); i++) {
- MapWord first_word = HeapObject::cast(list_[i])->map_word();
- list_[i] = first_word.IsForwardingAddress()
- ? first_word.ToForwardingAddress()
- : heap->the_hole_value();
- }
- }
-}
-
-
-void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
- // If formatting the stack trace causes a GC, this method will be
- // recursively called. In that case, skip the recursive call, since
- // the loop modifies the list while iterating over it.
- if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
- nested_ = true;
- HandleScope scope(isolate);
- Handle<String> stack_key = isolate->factory()->stack_string();
- int write_index = 0;
- int budget = kBudgetPerGC;
- for (int i = 0; i < list_.length(); i++) {
- Object* object = list_[i];
- JSFunction* getter_fun;
-
- { AssertNoAllocation assert;
- // Skip possible holes in the list.
- if (object->IsTheHole()) continue;
- if (isolate->heap()->InNewSpace(object) || budget == 0) {
- list_[write_index++] = object;
- continue;
- }
-
- // Check whether the stack property is backed by the original getter.
- LookupResult lookup(isolate);
- JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
- if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
- Object* callback = lookup.GetCallbackObject();
- if (!callback->IsAccessorPair()) continue;
- Object* getter_obj = AccessorPair::cast(callback)->getter();
- if (!getter_obj->IsJSFunction()) continue;
- getter_fun = JSFunction::cast(getter_obj);
- String* key = isolate->heap()->hidden_stack_trace_string();
- if (key != getter_fun->GetHiddenProperty(key)) continue;
- }
-
- budget--;
- HandleScope scope(isolate);
- bool has_exception = false;
-#ifdef DEBUG
- Handle<Map> map(HeapObject::cast(object)->map(), isolate);
-#endif
- Handle<Object> object_handle(object, isolate);
- Handle<Object> getter_handle(getter_fun, isolate);
- Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
- ASSERT(*map == HeapObject::cast(*object_handle)->map());
- if (has_exception) {
- // Hit an exception (most likely a stack overflow).
- // Wrap up this pass and retry after another GC.
- isolate->clear_pending_exception();
- // We use the handle since calling the getter might have caused a GC.
- list_[write_index++] = *object_handle;
- budget = 0;
- }
- }
- list_.Rewind(write_index);
- list_.Trim();
- nested_ = false;
-}
-
-
-void ErrorObjectList::RemoveUnmarked(Heap* heap) {
- for (int i = 0; i < list_.length(); i++) {
- HeapObject* object = HeapObject::cast(list_[i]);
- if (!Marking::MarkBitFrom(object).Get()) {
- list_[i] = heap->the_hole_value();
- }
- }
-}
-
-
-void ErrorObjectList::TearDown() {
- list_.Free();
-}
-
-
-void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
- chunk->set_next_chunk(chunks_queued_for_free_);
- chunks_queued_for_free_ = chunk;
-}
-
-
-void Heap::FreeQueuedChunks() {
- if (chunks_queued_for_free_ == NULL) return;
- MemoryChunk* next;
- MemoryChunk* chunk;
- for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
- next = chunk->next_chunk();
- chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-
- if (chunk->owner()->identity() == LO_SPACE) {
- // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
- // If FromAnyPointerAddress encounters a slot that belongs to a large
- // chunk queued for deletion it will fail to find the chunk because
- // it try to perform a search in the list of pages owned by of the large
- // object space and queued chunks were detached from that list.
- // To work around this we split large chunk into normal kPageSize aligned
- // pieces and initialize size, owner and flags field of every piece.
- // If FromAnyPointerAddress encounters a slot that belongs to one of
- // these smaller pieces it will treat it as a slot on a normal Page.
- Address chunk_end = chunk->address() + chunk->size();
- MemoryChunk* inner = MemoryChunk::FromAddress(
- chunk->address() + Page::kPageSize);
- MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
- while (inner <= inner_last) {
- // Size of a large chunk is always a multiple of
- // OS::AllocateAlignment() so there is always
- // enough space for a fake MemoryChunk header.
- Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
- // Guard against overflow.
- if (area_end < inner->address()) area_end = chunk_end;
- inner->SetArea(inner->address(), area_end);
- inner->set_size(Page::kPageSize);
- inner->set_owner(lo_space());
- inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
- inner = MemoryChunk::FromAddress(
- inner->address() + Page::kPageSize);
- }
- }
- }
- isolate_->heap()->store_buffer()->Compact();
- isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
- for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
- next = chunk->next_chunk();
- isolate_->memory_allocator()->Free(chunk);
- }
- chunks_queued_for_free_ = NULL;
-}
-
-
-void Heap::RememberUnmappedPage(Address page, bool compacted) {
- uintptr_t p = reinterpret_cast<uintptr_t>(page);
- // Tag the page pointer to make it findable in the dump file.
- if (compacted) {
- p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
- } else {
- p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
- }
- remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
- reinterpret_cast<Address>(p);
- remembered_unmapped_pages_index_++;
- remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
-}
-
-
-void Heap::ClearObjectStats(bool clear_last_time_stats) {
- memset(object_counts_, 0, sizeof(object_counts_));
- memset(object_sizes_, 0, sizeof(object_sizes_));
- if (clear_last_time_stats) {
- memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
- memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
- }
-}
-
-
-static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
-
-
-void Heap::CheckpointObjectStats() {
- ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
- Counters* counters = isolate()->counters();
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- counters->count_of_##name()->Increment( \
- static_cast<int>(object_counts_[name])); \
- counters->count_of_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[name])); \
- counters->size_of_##name()->Increment( \
- static_cast<int>(object_sizes_[name])); \
- counters->size_of_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[name]));
- INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
- int index;
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
- counters->count_of_CODE_TYPE_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_CODE_TYPE_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_CODE_TYPE_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_CODE_TYPE_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[index]));
- CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
- counters->count_of_FIXED_ARRAY_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_FIXED_ARRAY_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_FIXED_ARRAY_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_FIXED_ARRAY_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[index]));
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-
- memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
- memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
- ClearObjectStats();
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/heap.h b/src/3rdparty/v8/src/heap.h
deleted file mode 100644
index d690e18..0000000
--- a/src/3rdparty/v8/src/heap.h
+++ /dev/null
@@ -1,3009 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HEAP_H_
-#define V8_HEAP_H_
-
-#include <math.h>
-
-#include "allocation.h"
-#include "globals.h"
-#include "incremental-marking.h"
-#include "list.h"
-#include "mark-compact.h"
-#include "objects-visiting.h"
-#include "spaces.h"
-#include "splay-tree-inl.h"
-#include "store-buffer.h"
-#include "v8-counters.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V) \
- V(Map, byte_array_map, ByteArrayMap) \
- V(Map, free_space_map, FreeSpaceMap) \
- V(Map, one_pointer_filler_map, OnePointerFillerMap) \
- V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
- /* Cluster the most popular ones in a few cache lines here at the top. */ \
- V(Smi, store_buffer_top, StoreBufferTop) \
- V(Oddball, undefined_value, UndefinedValue) \
- V(Oddball, the_hole_value, TheHoleValue) \
- V(Oddball, null_value, NullValue) \
- V(Oddball, true_value, TrueValue) \
- V(Oddball, false_value, FalseValue) \
- V(Map, global_property_cell_map, GlobalPropertyCellMap) \
- V(Map, shared_function_info_map, SharedFunctionInfoMap) \
- V(Map, meta_map, MetaMap) \
- V(Map, heap_number_map, HeapNumberMap) \
- V(Map, native_context_map, NativeContextMap) \
- V(Map, fixed_array_map, FixedArrayMap) \
- V(Map, code_map, CodeMap) \
- V(Map, scope_info_map, ScopeInfoMap) \
- V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
- V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
- V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
- V(Map, hash_table_map, HashTableMap) \
- V(FixedArray, empty_fixed_array, EmptyFixedArray) \
- V(ByteArray, empty_byte_array, EmptyByteArray) \
- V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
- V(Smi, stack_limit, StackLimit) \
- V(Oddball, arguments_marker, ArgumentsMarker) \
- /* The first 32 roots above this line should be boring from a GC point of */ \
- /* view. This means they are never in new space and never on a page that */ \
- /* is being compacted. */ \
- V(FixedArray, number_string_cache, NumberStringCache) \
- V(Object, instanceof_cache_function, InstanceofCacheFunction) \
- V(Object, instanceof_cache_map, InstanceofCacheMap) \
- V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
- V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
- V(FixedArray, string_split_cache, StringSplitCache) \
- V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
- V(Object, termination_exception, TerminationException) \
- V(Smi, hash_seed, HashSeed) \
- V(Map, symbol_map, SymbolMap) \
- V(Map, string_map, StringMap) \
- V(Map, ascii_string_map, AsciiStringMap) \
- V(Map, cons_string_map, ConsStringMap) \
- V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
- V(Map, sliced_string_map, SlicedStringMap) \
- V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \
- V(Map, external_string_map, ExternalStringMap) \
- V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
- V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
- V(Map, short_external_string_map, ShortExternalStringMap) \
- V(Map, \
- short_external_string_with_ascii_data_map, \
- ShortExternalStringWithAsciiDataMap) \
- V(Map, internalized_string_map, InternalizedStringMap) \
- V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \
- V(Map, cons_internalized_string_map, ConsInternalizedStringMap) \
- V(Map, cons_ascii_internalized_string_map, ConsAsciiInternalizedStringMap) \
- V(Map, \
- external_internalized_string_map, \
- ExternalInternalizedStringMap) \
- V(Map, \
- external_internalized_string_with_ascii_data_map, \
- ExternalInternalizedStringWithAsciiDataMap) \
- V(Map, \
- external_ascii_internalized_string_map, \
- ExternalAsciiInternalizedStringMap) \
- V(Map, \
- short_external_internalized_string_map, \
- ShortExternalInternalizedStringMap) \
- V(Map, \
- short_external_internalized_string_with_ascii_data_map, \
- ShortExternalInternalizedStringWithAsciiDataMap) \
- V(Map, \
- short_external_ascii_internalized_string_map, \
- ShortExternalAsciiInternalizedStringMap) \
- V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
- V(Map, undetectable_string_map, UndetectableStringMap) \
- V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
- V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
- V(Map, external_byte_array_map, ExternalByteArrayMap) \
- V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
- V(Map, external_short_array_map, ExternalShortArrayMap) \
- V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap) \
- V(Map, external_int_array_map, ExternalIntArrayMap) \
- V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
- V(Map, external_float_array_map, ExternalFloatArrayMap) \
- V(Map, external_double_array_map, ExternalDoubleArrayMap) \
- V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
- V(Map, function_context_map, FunctionContextMap) \
- V(Map, catch_context_map, CatchContextMap) \
- V(Map, with_context_map, WithContextMap) \
- V(Map, block_context_map, BlockContextMap) \
- V(Map, module_context_map, ModuleContextMap) \
- V(Map, global_context_map, GlobalContextMap) \
- V(Map, oddball_map, OddballMap) \
- V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, foreign_map, ForeignMap) \
- V(HeapNumber, nan_value, NanValue) \
- V(HeapNumber, infinity_value, InfinityValue) \
- V(HeapNumber, minus_zero_value, MinusZeroValue) \
- V(Map, neander_map, NeanderMap) \
- V(JSObject, message_listeners, MessageListeners) \
- V(Foreign, prototype_accessors, PrototypeAccessors) \
- V(UnseededNumberDictionary, code_stubs, CodeStubs) \
- V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
- V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
- V(Code, js_entry_code, JsEntryCode) \
- V(Code, js_construct_entry_code, JsConstructEntryCode) \
- V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Object, last_script_id, LastScriptId) \
- V(Script, empty_script, EmptyScript) \
- V(Smi, real_stack_limit, RealStackLimit) \
- V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
- V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
- V(JSObject, observation_state, ObservationState) \
- V(Map, external_map, ExternalMap)
-
-#define ROOT_LIST(V) \
- STRONG_ROOT_LIST(V) \
- V(StringTable, string_table, StringTable)
-
-#define INTERNALIZED_STRING_LIST(V) \
- V(Array_string, "Array") \
- V(Object_string, "Object") \
- V(proto_string, "__proto__") \
- V(StringImpl_string, "StringImpl") \
- V(arguments_string, "arguments") \
- V(Arguments_string, "Arguments") \
- V(call_string, "call") \
- V(apply_string, "apply") \
- V(caller_string, "caller") \
- V(boolean_string, "boolean") \
- V(Boolean_string, "Boolean") \
- V(callee_string, "callee") \
- V(constructor_string, "constructor") \
- V(code_string, ".code") \
- V(result_string, ".result") \
- V(dot_for_string, ".for.") \
- V(catch_var_string, ".catch-var") \
- V(empty_string, "") \
- V(eval_string, "eval") \
- V(function_string, "function") \
- V(length_string, "length") \
- V(module_string, "module") \
- V(name_string, "name") \
- V(native_string, "native") \
- V(null_string, "null") \
- V(number_string, "number") \
- V(Number_string, "Number") \
- V(nan_string, "NaN") \
- V(RegExp_string, "RegExp") \
- V(source_string, "source") \
- V(global_string, "global") \
- V(ignore_case_string, "ignoreCase") \
- V(multiline_string, "multiline") \
- V(input_string, "input") \
- V(index_string, "index") \
- V(last_index_string, "lastIndex") \
- V(object_string, "object") \
- V(prototype_string, "prototype") \
- V(string_string, "string") \
- V(String_string, "String") \
- V(Date_string, "Date") \
- V(Error_string, "Error") \
- V(this_string, "this") \
- V(to_string_string, "toString") \
- V(char_at_string, "CharAt") \
- V(undefined_string, "undefined") \
- V(value_of_string, "valueOf") \
- V(stack_string, "stack") \
- V(InitializeVarGlobal_string, "InitializeVarGlobal") \
- V(InitializeConstGlobal_string, "InitializeConstGlobal") \
- V(KeyedLoadElementMonomorphic_string, \
- "KeyedLoadElementMonomorphic") \
- V(KeyedStoreElementMonomorphic_string, \
- "KeyedStoreElementMonomorphic") \
- V(KeyedStoreAndGrowElementMonomorphic_string, \
- "KeyedStoreAndGrowElementMonomorphic") \
- V(stack_overflow_string, "kStackOverflowBoilerplate") \
- V(illegal_access_string, "illegal access") \
- V(out_of_memory_string, "out-of-memory") \
- V(illegal_execution_state_string, "illegal execution state") \
- V(get_string, "get") \
- V(set_string, "set") \
- V(map_field_string, "%map") \
- V(elements_field_string, "%elements") \
- V(length_field_string, "%length") \
- V(function_class_string, "Function") \
- V(illegal_argument_string, "illegal argument") \
- V(MakeReferenceError_string, "MakeReferenceError") \
- V(MakeSyntaxError_string, "MakeSyntaxError") \
- V(MakeTypeError_string, "MakeTypeError") \
- V(invalid_lhs_in_assignment_string, "invalid_lhs_in_assignment") \
- V(invalid_lhs_in_for_in_string, "invalid_lhs_in_for_in") \
- V(invalid_lhs_in_postfix_op_string, "invalid_lhs_in_postfix_op") \
- V(invalid_lhs_in_prefix_op_string, "invalid_lhs_in_prefix_op") \
- V(illegal_return_string, "illegal_return") \
- V(illegal_break_string, "illegal_break") \
- V(illegal_continue_string, "illegal_continue") \
- V(unknown_label_string, "unknown_label") \
- V(redeclaration_string, "redeclaration") \
- V(failure_string, "<failure>") \
- V(space_string, " ") \
- V(exec_string, "exec") \
- V(zero_string, "0") \
- V(global_eval_string, "GlobalEval") \
- V(identity_hash_string, "v8::IdentityHash") \
- V(closure_string, "(closure)") \
- V(use_strict_string, "use strict") \
- V(dot_string, ".") \
- V(anonymous_function_string, "(anonymous function)") \
- V(compare_ic_string, "==") \
- V(strict_compare_ic_string, "===") \
- V(infinity_string, "Infinity") \
- V(minus_infinity_string, "-Infinity") \
- V(hidden_stack_trace_string, "v8::hidden_stack_trace") \
- V(query_colon_string, "(?:)") \
-
-// Forward declarations.
-class GCTracer;
-class HeapStats;
-class Isolate;
-class WeakObjectRetainer;
-
-
-typedef HeapObject* (*ExternalStringTableUpdaterCallback)(Heap* heap,
- Object** pointer);
-
-class StoreBufferRebuilder {
- public:
- explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer) {
- }
-
- void Callback(MemoryChunk* page, StoreBufferEvent event);
-
- private:
- StoreBuffer* store_buffer_;
-
- // We record in this variable how full the store buffer was when we started
- // iterating over the current page, finding pointers to new space. If the
- // store buffer overflows again we can exempt the page from the store buffer
- // by rewinding to this point instead of having to search the store buffer.
- Object*** start_of_current_page_;
- // The current page we are scanning in the store buffer iterator.
- MemoryChunk* current_page_;
-};
-
-
-
-// A queue of objects promoted during scavenge. Each object is accompanied
-// by it's size to avoid dereferencing a map pointer for scanning.
-class PromotionQueue {
- public:
- explicit PromotionQueue(Heap* heap)
- : front_(NULL),
- rear_(NULL),
- limit_(NULL),
- emergency_stack_(0),
- heap_(heap) { }
-
- void Initialize();
-
- void Destroy() {
- ASSERT(is_empty());
- delete emergency_stack_;
- emergency_stack_ = NULL;
- }
-
- inline void ActivateGuardIfOnTheSamePage();
-
- Page* GetHeadPage() {
- return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
- }
-
- void SetNewLimit(Address limit) {
- if (!guard_) {
- return;
- }
-
- ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
- limit_ = reinterpret_cast<intptr_t*>(limit);
-
- if (limit_ <= rear_) {
- return;
- }
-
- RelocateQueueHead();
- }
-
- bool is_empty() {
- return (front_ == rear_) &&
- (emergency_stack_ == NULL || emergency_stack_->length() == 0);
- }
-
- inline void insert(HeapObject* target, int size);
-
- void remove(HeapObject** target, int* size) {
- ASSERT(!is_empty());
- if (front_ == rear_) {
- Entry e = emergency_stack_->RemoveLast();
- *target = e.obj_;
- *size = e.size_;
- return;
- }
-
- if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
- NewSpacePage* front_page =
- NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
- ASSERT(!front_page->prev_page()->is_anchor());
- front_ =
- reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
- }
- *target = reinterpret_cast<HeapObject*>(*(--front_));
- *size = static_cast<int>(*(--front_));
- // Assert no underflow.
- SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
- reinterpret_cast<Address>(front_));
- }
-
- private:
- // The front of the queue is higher in the memory page chain than the rear.
- intptr_t* front_;
- intptr_t* rear_;
- intptr_t* limit_;
-
- bool guard_;
-
- static const int kEntrySizeInWords = 2;
-
- struct Entry {
- Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
-
- HeapObject* obj_;
- int size_;
- };
- List<Entry>* emergency_stack_;
-
- Heap* heap_;
-
- void RelocateQueueHead();
-
- DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
-};
-
-
-typedef void (*ScavengingCallback)(Map* map,
- HeapObject** slot,
- HeapObject* object);
-
-
-// External strings table is a place where all external strings are
-// registered. We need to keep track of such strings to properly
-// finalize them.
-// The ExternalStringTable can contain both strings and objects with
-// external resources. It was not renamed to make the patch simpler.
-class ExternalStringTable {
- public:
- // Registers an external string.
- inline void AddString(String* string);
- // Registers an external object.
- inline void AddObject(HeapObject* string);
-
- inline void Iterate(ObjectVisitor* v);
-
- // Restores internal invariant and gets rid of collected strings.
- // Must be called after each Iterate() that modified the strings.
- void CleanUp();
-
- // Destroys all allocated memory.
- void TearDown();
-
- private:
- ExternalStringTable() { }
-
- friend class Heap;
-
- inline void Verify();
-
- inline void AddOldObject(HeapObject* string);
-
- // Notifies the table that only a prefix of the new list is valid.
- inline void ShrinkNewObjects(int position);
-
- // To speed up scavenge collections new space string are kept
- // separate from old space strings.
- List<Object*> new_space_strings_;
- List<Object*> old_space_strings_;
-
- Heap* heap_;
-
- DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
-};
-
-
-// The stack property of an error object is implemented as a getter that
-// formats the attached raw stack trace into a string. This raw stack trace
-// keeps code and function objects alive until the getter is called the first
-// time. To release those objects, we call the getter after each GC for
-// newly tenured error objects that are kept in a list.
-class ErrorObjectList {
- public:
- inline void Add(JSObject* object);
-
- inline void Iterate(ObjectVisitor* v);
-
- void TearDown();
-
- void RemoveUnmarked(Heap* heap);
-
- void DeferredFormatStackTrace(Isolate* isolate);
-
- void UpdateReferences();
-
- void UpdateReferencesInNewSpace(Heap* heap);
-
- private:
- static const int kBudgetPerGC = 16;
-
- ErrorObjectList() : nested_(false) { }
-
- friend class Heap;
-
- List<Object*> list_;
- bool nested_;
-
- DISALLOW_COPY_AND_ASSIGN(ErrorObjectList);
-};
-
-
-enum ArrayStorageAllocationMode {
- DONT_INITIALIZE_ARRAY_ELEMENTS,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
-};
-
-class Heap {
- public:
- // Configure heap size before setup. Return false if the heap has been
- // set up already.
- bool ConfigureHeap(int max_semispace_size,
- intptr_t max_old_gen_size,
- intptr_t max_executable_size);
- bool ConfigureHeapDefault();
-
- // Prepares the heap, setting up memory areas that are needed in the isolate
- // without actually creating any objects.
- bool SetUp();
-
- // Bootstraps the object heap with the core set of objects required to run.
- // Returns whether it succeeded.
- bool CreateHeapObjects();
-
- // Destroys all memory allocated by the heap.
- void TearDown();
-
- // Set the stack limit in the roots_ array. Some architectures generate
- // code that looks here, because it is faster than loading from the static
- // jslimit_/real_jslimit_ variable in the StackGuard.
- void SetStackLimits();
-
- // Returns whether SetUp has been called.
- bool HasBeenSetUp();
-
- // Returns the maximum amount of memory reserved for the heap. For
- // the young generation, we reserve 4 times the amount needed for a
- // semi space. The young generation consists of two semi spaces and
- // we reserve twice the amount needed for those in order to ensure
- // that new space can be aligned to its size.
- intptr_t MaxReserved() {
- return 4 * reserved_semispace_size_ + max_old_generation_size_;
- }
- int MaxSemiSpaceSize() { return max_semispace_size_; }
- int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
- int InitialSemiSpaceSize() { return initial_semispace_size_; }
- intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
- intptr_t MaxExecutableSize() { return max_executable_size_; }
-
- // Returns the capacity of the heap in bytes w/o growing. Heap grows when
- // more spaces are needed until it reaches the limit.
- intptr_t Capacity();
-
- // Returns the amount of memory currently committed for the heap.
- intptr_t CommittedMemory();
-
- // Returns the amount of executable memory currently committed for the heap.
- intptr_t CommittedMemoryExecutable();
-
- // Returns the amount of phyical memory currently committed for the heap.
- size_t CommittedPhysicalMemory();
-
- // Returns the available bytes in space w/o growing.
- // Heap doesn't guarantee that it can allocate an object that requires
- // all available bytes. Check MaxHeapObjectSize() instead.
- intptr_t Available();
-
- // Returns of size of all objects residing in the heap.
- intptr_t SizeOfObjects();
-
- // Return the starting address and a mask for the new space. And-masking an
- // address with the mask will result in the start address of the new space
- // for all addresses in either semispace.
- Address NewSpaceStart() { return new_space_.start(); }
- uintptr_t NewSpaceMask() { return new_space_.mask(); }
- Address NewSpaceTop() { return new_space_.top(); }
-
- NewSpace* new_space() { return &new_space_; }
- OldSpace* old_pointer_space() { return old_pointer_space_; }
- OldSpace* old_data_space() { return old_data_space_; }
- OldSpace* code_space() { return code_space_; }
- MapSpace* map_space() { return map_space_; }
- CellSpace* cell_space() { return cell_space_; }
- LargeObjectSpace* lo_space() { return lo_space_; }
- PagedSpace* paged_space(int idx) {
- switch (idx) {
- case OLD_POINTER_SPACE:
- return old_pointer_space();
- case OLD_DATA_SPACE:
- return old_data_space();
- case MAP_SPACE:
- return map_space();
- case CELL_SPACE:
- return cell_space();
- case CODE_SPACE:
- return code_space();
- case NEW_SPACE:
- case LO_SPACE:
- UNREACHABLE();
- }
- return NULL;
- }
-
- bool always_allocate() { return always_allocate_scope_depth_ != 0; }
- Address always_allocate_scope_depth_address() {
- return reinterpret_cast<Address>(&always_allocate_scope_depth_);
- }
- bool linear_allocation() {
- return linear_allocation_scope_depth_ != 0;
- }
-
- Address* NewSpaceAllocationTopAddress() {
- return new_space_.allocation_top_address();
- }
- Address* NewSpaceAllocationLimitAddress() {
- return new_space_.allocation_limit_address();
- }
-
- // Uncommit unused semi space.
- bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
-
- // Allocates and initializes a new JavaScript object based on a
- // constructor.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSObject(
- JSFunction* constructor,
- PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite(
- JSFunction* constructor,
- Handle<Object> allocation_site_info_payload);
-
- MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
- ScopeInfo* scope_info);
-
- // Allocate a JSArray with no elements
- MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
- ElementsKind elements_kind,
- PretenureFlag pretenure = NOT_TENURED) {
- return AllocateJSArrayAndStorage(elements_kind, 0, 0,
- DONT_INITIALIZE_ARRAY_ELEMENTS,
- pretenure);
- }
-
- inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<Object> allocation_site_payload);
-
- // Allocate a JSArray with a specified length but elements that are left
- // uninitialized.
- MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
- ElementsKind elements_kind,
- int length,
- int capacity,
- ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
- PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorageWithAllocationSite(
- ElementsKind elements_kind,
- int length,
- int capacity,
- Handle<Object> allocation_site_payload,
- ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
-
- MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage(
- JSArray* array,
- int length,
- int capacity,
- ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
-
- // Allocate a JSArray with no elements
- MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
- FixedArrayBase* array_base,
- ElementsKind elements_kind,
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates and initializes a new global object based on a constructor.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateGlobalObject(JSFunction* constructor);
-
- // Returns a deep copy of the JavaScript object.
- // Properties and elements are copied too.
- // Returns failure if allocation failed.
- MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
-
- MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(JSObject* source);
-
- // Allocates the function prototype.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
-
- // Allocates a Harmony proxy or function proxy.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSProxy(Object* handler,
- Object* prototype);
-
- MUST_USE_RESULT MaybeObject* AllocateJSFunctionProxy(Object* handler,
- Object* call_trap,
- Object* construct_trap,
- Object* prototype);
-
- // Reinitialize a JSReceiver into an (empty) JS object of respective type and
- // size, but keeping the original prototype. The receiver must have at least
- // the size of the new object. The object is reinitialized and behaves as an
- // object that has been freshly allocated.
- // Returns failure if an error occured, otherwise object.
- MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
- InstanceType type,
- int size);
-
- // Reinitialize an JSGlobalProxy based on a constructor. The object
- // must have the same size as objects allocated using the
- // constructor. The object is reinitialized and behaves as an
- // object that has been freshly allocated using the constructor.
- MUST_USE_RESULT MaybeObject* ReinitializeJSGlobalProxy(
- JSFunction* constructor, JSGlobalProxy* global);
-
- // Allocates and initializes a new JavaScript object based on a map.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
- Map* map, Handle<Object> allocation_site_info_payload);
-
- // Allocates a heap object based on the map.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
-
- MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map,
- AllocationSpace space, Handle<Object> allocation_site_info_payload);
-
- // Allocates a JS Map in the heap.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateMap(
- InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
-
- // Allocates a partial map for bootstrapping.
- MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
- int instance_size);
-
- // Allocate a map for the specified function
- MUST_USE_RESULT MaybeObject* AllocateInitialMap(JSFunction* fun);
-
- // Allocates an empty code cache.
- MUST_USE_RESULT MaybeObject* AllocateCodeCache();
-
- // Allocates a serialized scope info.
- MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
-
- // Allocates an External object for v8's external API.
- MUST_USE_RESULT MaybeObject* AllocateExternal(void* value);
-
- // Allocates an empty PolymorphicCodeCache.
- MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
-
- // Allocates a pre-tenured empty AccessorPair.
- MUST_USE_RESULT MaybeObject* AllocateAccessorPair();
-
- // Allocates an empty TypeFeedbackInfo.
- MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo();
-
- // Allocates an AliasedArgumentsEntry.
- MUST_USE_RESULT MaybeObject* AllocateAliasedArgumentsEntry(int slot);
-
- // Clear the Instanceof cache (used when a prototype changes).
- inline void ClearInstanceofCache();
-
- // For use during bootup.
- void RepairFreeListsAfterBoot();
-
- // Allocates and fully initializes a String. There are two String
- // encodings: ASCII and two byte. One should choose between the three string
- // allocation functions based on the encoding of the string buffer used to
- // initialized the string.
- // - ...FromAscii initializes the string from a buffer that is ASCII
- // encoded (it does not check that the buffer is ASCII encoded) and the
- // result will be ASCII encoded.
- // - ...FromUTF8 initializes the string from a buffer that is UTF-8
- // encoded. If the characters are all single-byte characters, the
- // result will be ASCII encoded, otherwise it will converted to two
- // byte.
- // - ...FromTwoByte initializes the string from a buffer that is two-byte
- // encoded. If the characters are all single-byte characters, the
- // result will be converted to ASCII, otherwise it will be left as
- // two-byte.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateStringFromOneByte(
- Vector<const uint8_t> str,
- PretenureFlag pretenure = NOT_TENURED);
- // TODO(dcarney): remove this function.
- MUST_USE_RESULT inline MaybeObject* AllocateStringFromOneByte(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED) {
- return AllocateStringFromOneByte(Vector<const uint8_t>::cast(str),
- pretenure);
- }
- MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow(
- Vector<const char> str,
- int non_ascii_start,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte(
- Vector<const uc16> str,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates an internalized string in old space based on the character
- // stream. Returns Failure::RetryAfterGC(requested_bytes, space) if the
- // allocation failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringFromUtf8(
- Vector<const char> str,
- int chars,
- uint32_t hash_field);
-
- MUST_USE_RESULT inline MaybeObject* AllocateOneByteInternalizedString(
- Vector<const uint8_t> str,
- uint32_t hash_field);
-
- MUST_USE_RESULT inline MaybeObject* AllocateTwoByteInternalizedString(
- Vector<const uc16> str,
- uint32_t hash_field);
-
- template<typename T>
- static inline bool IsOneByte(T t, int chars);
-
- template<typename T>
- MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field);
-
- template<bool is_one_byte, typename T>
- MUST_USE_RESULT MaybeObject* AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field);
-
- // Allocates and partially initializes a String. There are two String
- // encodings: ASCII and two byte. These functions allocate a string of the
- // given length and set its map and length fields. The characters of the
- // string are uninitialized.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateRawOneByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Computes a single character string where the character has code.
- // A cache is used for ASCII codes.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed. Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
- uint16_t code);
-
- // Allocate a byte array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length,
- PretenureFlag pretenure);
-
- // Allocate a non-tenured byte array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length);
-
- // Allocates an external array of the specified length and type.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateExternalArray(
- int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure);
-
- // Allocate a symbol.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateSymbol(
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a tenured JS global property cell.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
-
- // Allocates a fixed array initialized with undefined values
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length,
- PretenureFlag pretenure);
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length);
-
- // Allocates an uninitialized fixed array. It must be filled by the caller.
- //
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length);
-
- // Move len elements within a given array from src_index index to dst_index
- // index.
- void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
-
- // Make a copy of src and return it. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
-
- // Make a copy of src, set the map, and return the copy. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
-
- // Make a copy of src and return it. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT inline MaybeObject* CopyFixedDoubleArray(
- FixedDoubleArray* src);
-
- // Make a copy of src, set the map, and return the copy. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap(
- FixedDoubleArray* src, Map* map);
-
- // Allocates a fixed array initialized with the hole values.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithHoles(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
- int length,
- PretenureFlag pretenure);
-
- // Allocates a fixed double array with uninitialized values. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedDoubleArray(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a fixed double array with hole values. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedDoubleArrayWithHoles(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // AllocateHashTable is identical to AllocateFixedArray except
- // that the resulting object has hash_table_map as map.
- MUST_USE_RESULT MaybeObject* AllocateHashTable(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a native (but otherwise uninitialized) context.
- MUST_USE_RESULT MaybeObject* AllocateNativeContext();
-
- // Allocate a global context.
- MUST_USE_RESULT MaybeObject* AllocateGlobalContext(JSFunction* function,
- ScopeInfo* scope_info);
-
- // Allocate a module context.
- MUST_USE_RESULT MaybeObject* AllocateModuleContext(ScopeInfo* scope_info);
-
- // Allocate a function context.
- MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
- JSFunction* function);
-
- // Allocate a catch context.
- MUST_USE_RESULT MaybeObject* AllocateCatchContext(JSFunction* function,
- Context* previous,
- String* name,
- Object* thrown_object);
- // Allocate a 'with' context.
- MUST_USE_RESULT MaybeObject* AllocateWithContext(JSFunction* function,
- Context* previous,
- JSObject* extension);
-
- // Allocate a block context.
- MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
- Context* previous,
- ScopeInfo* info);
-
- // Allocates a new utility object in the old generation.
- MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
-
- // Allocates a function initialized with a shared part.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFunction(
- Map* function_map,
- SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure = TENURED);
-
- // Arguments object size.
- static const int kArgumentsObjectSize =
- JSObject::kHeaderSize + 2 * kPointerSize;
- // Strict mode arguments has no callee so it is smaller.
- static const int kArgumentsObjectSizeStrict =
- JSObject::kHeaderSize + 1 * kPointerSize;
- // Indicies for direct access into argument objects.
- static const int kArgumentsLengthIndex = 0;
- // callee is only valid in non-strict mode.
- static const int kArgumentsCalleeIndex = 1;
-
- // Allocates an arguments object - optionally with an elements array.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateArgumentsObject(
- Object* callee, int length);
-
- // Same as NewNumberFromDouble, but may return a preallocated/immutable
- // number object (e.g., minus_zero_value_, nan_value_)
- MUST_USE_RESULT MaybeObject* NumberFromDouble(
- double value, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocated a HeapNumber from value.
- MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
- double value,
- PretenureFlag pretenure);
- // pretenure = NOT_TENURED
- MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value);
-
- // Converts an int into either a Smi or a HeapNumber object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* NumberFromInt32(
- int32_t value, PretenureFlag pretenure = NOT_TENURED);
-
- // Converts an int into either a Smi or a HeapNumber object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* NumberFromUint32(
- uint32_t value, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a new foreign object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateForeign(
- Address address, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a new SharedFunctionInfo object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateSharedFunctionInfo(Object* name);
-
- // Allocates a new JSMessageObject object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note that this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSMessageObject(
- String* type,
- JSArray* arguments,
- int start_position,
- int end_position,
- Object* script,
- Object* stack_trace,
- Object* stack_frames);
-
- // Allocates a new cons string object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateConsString(String* first,
- String* second);
-
- // Allocates a new sub string object which is a substring of an underlying
- // string buffer stretching from the index start (inclusive) to the index
- // end (exclusive).
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateSubString(
- String* buffer,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a new external string object, which is backed by a string
- // resource that resides outside the V8 heap.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource);
- MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource);
-
- // Finalizes an external string by deleting the associated external
- // data and clearing the resource pointer.
- inline void FinalizeExternalString(HeapObject* string);
-
- // Allocates an uninitialized object. The memory is non-executable if the
- // hardware and OS allow.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space);
-
- // Initialize a filler object to keep the ability to iterate over the heap
- // when shortening objects.
- void CreateFillerObjectAt(Address addr, int size);
-
- // Makes a new native code object
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed. On success, the pointer to the Code object is stored in the
- // self_reference. This allows generated code to reference its own Code
- // object by containing this pointer.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false);
-
- MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
-
- // Copy the code and scope info part of the code object, but insert
- // the provided data as the relocation information.
- MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info);
-
- // Finds the internalized copy for string in the string table.
- // If not found, a new string is added to the table and returned.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* InternalizeUtf8String(Vector<const char> str);
- MUST_USE_RESULT MaybeObject* InternalizeUtf8String(const char* str) {
- return InternalizeUtf8String(CStrVector(str));
- }
- MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
- Vector<const uint8_t> str);
- MUST_USE_RESULT MaybeObject* InternalizeTwoByteString(Vector<const uc16> str);
- MUST_USE_RESULT MaybeObject* InternalizeString(String* str);
- MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
- Handle<SeqOneByteString> string, int from, int length);
-
- bool InternalizeStringIfExists(String* str, String** result);
- bool InternalizeTwoCharsStringIfExists(String* str, String** result);
-
- // Compute the matching internalized string map for a string if possible.
- // NULL is returned if string is in new space or not flattened.
- Map* InternalizedStringMapForString(String* str);
-
- // Tries to flatten a string before compare operation.
- //
- // Returns a failure in case it was decided that flattening was
- // necessary and failed. Note, if flattening is not necessary the
- // string might stay non-flat even when not a failure is returned.
- //
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* PrepareForCompare(String* str);
-
- // Converts the given boolean condition to JavaScript boolean value.
- inline Object* ToBoolean(bool condition);
-
- // Code that should be run before and after each GC. Includes some
- // reporting/verification activities when compiled with DEBUG set.
- void GarbageCollectionPrologue();
- void GarbageCollectionEpilogue();
-
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- bool CollectGarbage(AllocationSpace space,
- GarbageCollector collector,
- const char* gc_reason,
- const char* collector_reason);
-
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- inline bool CollectGarbage(AllocationSpace space,
- const char* gc_reason = NULL);
-
- static const int kNoGCFlags = 0;
- static const int kSweepPreciselyMask = 1;
- static const int kReduceMemoryFootprintMask = 2;
- static const int kAbortIncrementalMarkingMask = 4;
-
- // Making the heap iterable requires us to sweep precisely and abort any
- // incremental marking as well.
- static const int kMakeHeapIterableMask =
- kSweepPreciselyMask | kAbortIncrementalMarkingMask;
-
- // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
- // non-zero, then the slower precise sweeper is used, which leaves the heap
- // in a state where we can iterate over the heap visiting all objects.
- void CollectAllGarbage(int flags, const char* gc_reason = NULL);
-
- // Last hope GC, should try to squeeze as much as possible.
- void CollectAllAvailableGarbage(const char* gc_reason = NULL);
-
- // Check whether the heap is currently iterable.
- bool IsHeapIterable();
-
- // Ensure that we have swept all spaces in such a way that we can iterate
- // over all objects. May cause a GC.
- void EnsureHeapIsIterable();
-
- // Notify the heap that a context has been disposed.
- int NotifyContextDisposed() {
- flush_monomorphic_ics_ = true;
- return ++contexts_disposed_;
- }
-
- // Utility to invoke the scavenger. This is needed in test code to
- // ensure correct callback for weak global handles.
- void PerformScavenge();
-
- inline void increment_scan_on_scavenge_pages() {
- scan_on_scavenge_pages_++;
- if (FLAG_gc_verbose) {
- PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
- }
- }
-
- inline void decrement_scan_on_scavenge_pages() {
- scan_on_scavenge_pages_--;
- if (FLAG_gc_verbose) {
- PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
- }
- }
-
- PromotionQueue* promotion_queue() { return &promotion_queue_; }
-
-#ifdef DEBUG
- // Utility used with flag gc-greedy.
- void GarbageCollectionGreedyCheck();
-#endif
-
- void AddGCPrologueCallback(
- GCPrologueCallback callback, GCType gc_type_filter);
- void RemoveGCPrologueCallback(GCPrologueCallback callback);
-
- void AddGCEpilogueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter);
- void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
-
- void SetGlobalGCPrologueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
- global_gc_prologue_callback_ = callback;
- }
- void SetGlobalGCEpilogueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
- global_gc_epilogue_callback_ = callback;
- }
-
- // Heap root getters. We have versions with and without type::cast() here.
- // You can't use type::cast during GC because the assert fails.
- // TODO(1490): Try removing the unchecked accessors, now that GC marking does
- // not corrupt the map.
-#define ROOT_ACCESSOR(type, name, camel_name) \
- type* name() { \
- return type::cast(roots_[k##camel_name##RootIndex]); \
- } \
- type* raw_unchecked_##name() { \
- return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
- }
- ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
-
-// Utility type maps
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- Map* name##_map() { \
- return Map::cast(roots_[k##Name##MapRootIndex]); \
- }
- STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
-#define STRING_ACCESSOR(name, str) String* name() { \
- return String::cast(roots_[k##name##RootIndex]); \
- }
- INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
- // The hidden_string is special because it is the empty string, but does
- // not match the empty string.
- String* hidden_string() { return hidden_string_; }
-
- void set_native_contexts_list(Object* object) {
- native_contexts_list_ = object;
- }
- Object* native_contexts_list() { return native_contexts_list_; }
-
- // Number of mark-sweeps.
- unsigned int ms_count() { return ms_count_; }
-
- // Iterates over all roots in the heap.
- void IterateRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over all strong roots in the heap.
- void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over all the other roots in the heap.
- void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
-
- // Iterate pointers to from semispace of new space found in memory interval
- // from start to end.
- void IterateAndMarkPointersToFromSpace(Address start,
- Address end,
- ObjectSlotCallback callback);
-
- // Returns whether the object resides in new space.
- inline bool InNewSpace(Object* object);
- inline bool InNewSpace(Address addr);
- inline bool InNewSpacePage(Address addr);
- inline bool InFromSpace(Object* object);
- inline bool InToSpace(Object* object);
-
- // Checks whether an address/object in the heap (including auxiliary
- // area and unused area).
- bool Contains(Address addr);
- bool Contains(HeapObject* value);
-
- // Checks whether an address/object in a space.
- // Currently used by tests, serialization and heap verification only.
- bool InSpace(Address addr, AllocationSpace space);
- bool InSpace(HeapObject* value, AllocationSpace space);
-
- // Finds out which space an object should get promoted to based on its type.
- inline OldSpace* TargetSpace(HeapObject* object);
- inline AllocationSpace TargetSpaceId(InstanceType type);
-
- // Sets the stub_cache_ (only used when expanding the dictionary).
- void public_set_code_stubs(UnseededNumberDictionary* value) {
- roots_[kCodeStubsRootIndex] = value;
- }
-
- // Support for computing object sizes for old objects during GCs. Returns
- // a function that is guaranteed to be safe for computing object sizes in
- // the current GC phase.
- HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
- return gc_safe_size_of_old_object_;
- }
-
- // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
- void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
- roots_[kNonMonomorphicCacheRootIndex] = value;
- }
-
- void public_set_empty_script(Script* script) {
- roots_[kEmptyScriptRootIndex] = script;
- }
-
- void public_set_store_buffer_top(Address* top) {
- roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
- }
-
- // Update the next script id.
- inline void SetLastScriptId(Object* last_script_id);
-
- // Generated code can embed this address to get access to the roots.
- Object** roots_array_start() { return roots_; }
-
- Address* store_buffer_top_address() {
- return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
- }
-
- // Get address of native contexts list for serialization support.
- Object** native_contexts_list_address() {
- return &native_contexts_list_;
- }
-
-#ifdef VERIFY_HEAP
- // Verify the heap is in its normal state before or after a GC.
- void Verify();
-
-
- bool weak_embedded_maps_verification_enabled() {
- return no_weak_embedded_maps_verification_scope_depth_ == 0;
- }
-#endif
-
-#ifdef DEBUG
- void Print();
- void PrintHandles();
-
- void OldPointerSpaceCheckStoreBuffer();
- void MapSpaceCheckStoreBuffer();
- void LargeObjectSpaceCheckStoreBuffer();
-
- // Report heap statistics.
- void ReportHeapStatistics(const char* title);
- void ReportCodeStatistics(const char* title);
-#endif
-
- // Zapping is needed for verify heap, and always done in debug builds.
- static inline bool ShouldZapGarbage() {
-#ifdef DEBUG
- return true;
-#else
-#ifdef VERIFY_HEAP
- return FLAG_verify_heap;
-#else
- return false;
-#endif
-#endif
- }
-
- // Fill in bogus values in from space
- void ZapFromSpace();
-
- // Print short heap statistics.
- void PrintShortHeapStatistics();
-
- // Makes a new internalized string object
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateInternalizedString(
- const char* str, int length, int hash);
- MUST_USE_RESULT MaybeObject* CreateInternalizedString(String* str);
-
- // Write barrier support for address[offset] = o.
- INLINE(void RecordWrite(Address address, int offset));
-
- // Write barrier support for address[start : start + len[ = o.
- INLINE(void RecordWrites(Address address, int start, int len));
-
- // Given an address occupied by a live code object, return that object.
- Object* FindCodeObject(Address a);
-
- enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
- inline HeapState gc_state() { return gc_state_; }
-
- inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
-
-#ifdef DEBUG
- bool IsAllocationAllowed() { return allocation_allowed_; }
- inline bool allow_allocation(bool enable);
-
- bool disallow_allocation_failure() {
- return disallow_allocation_failure_;
- }
-
- void TracePathToObjectFrom(Object* target, Object* root);
- void TracePathToObject(Object* target);
- void TracePathToGlobal();
-#endif
-
- // Callback function passed to Heap::Iterate etc. Copies an object if
- // necessary, the object might be promoted to an old space. The caller must
- // ensure the precondition that the object is (a) a heap object and (b) in
- // the heap's from space.
- static inline void ScavengePointer(HeapObject** p);
- static inline void ScavengeObject(HeapObject** p, HeapObject* object);
-
- // Commits from space if it is uncommitted.
- void EnsureFromSpaceIsCommitted();
-
- // Support for partial snapshots. After calling this we have a linear
- // space to write objects in each space.
- void ReserveSpace(int *sizes, Address* addresses);
-
- //
- // Support for the API.
- //
-
- bool CreateApiObjects();
-
- // Attempt to find the number in a small cache. If we finds it, return
- // the string representation of the number. Otherwise return undefined.
- Object* GetNumberStringCache(Object* number);
-
- // Update the cache with a new number-string pair.
- void SetNumberStringCache(Object* number, String* str);
-
- // Adjusts the amount of registered external memory.
- // Returns the adjusted value.
- inline intptr_t AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes);
-
- // Allocate uninitialized fixed array.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
- PretenureFlag pretenure);
-
- inline intptr_t PromotedTotalSize() {
- return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
- }
-
- // True if we have reached the allocation limit in the old generation that
- // should force the next GC (caused normally) to be a full one.
- inline bool OldGenerationPromotionLimitReached() {
- return PromotedTotalSize() > old_gen_promotion_limit_;
- }
-
- inline intptr_t OldGenerationSpaceAvailable() {
- return old_gen_allocation_limit_ - PromotedTotalSize();
- }
-
- inline intptr_t OldGenerationCapacityAvailable() {
- return max_old_generation_size_ - PromotedTotalSize();
- }
-
- static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
- static const intptr_t kMinimumAllocationLimit =
- 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
-
- intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
- const int divisor = FLAG_stress_compaction ? 10 : 3;
- intptr_t limit =
- Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
- limit += new_space_.Capacity();
- limit *= old_gen_limit_factor_;
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
- }
-
- intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
- const int divisor = FLAG_stress_compaction ? 8 : 2;
- intptr_t limit =
- Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
- limit += new_space_.Capacity();
- limit *= old_gen_limit_factor_;
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
- }
-
- // Implements the corresponding V8 API function.
- bool IdleNotification(int hint);
-
- // Declare all the root indices.
- enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
-#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
- INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
-#undef STRING_DECLARATION
-
- // Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
-
- kStringTableRootIndex,
- kStrongRootListLength = kStringTableRootIndex,
- kRootListLength
- };
-
- STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
- STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex);
- STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
- STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
- STATIC_CHECK(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
-
- // Generated code can embed direct references to non-writable roots if
- // they are in new space.
- static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
-
- MUST_USE_RESULT MaybeObject* NumberToString(
- Object* number, bool check_number_string_cache = true);
- MUST_USE_RESULT MaybeObject* Uint32ToString(
- uint32_t value, bool check_number_string_cache = true);
-
- Map* MapForExternalArrayType(ExternalArrayType array_type);
- RootListIndex RootIndexForExternalArrayType(
- ExternalArrayType array_type);
-
- void RecordStats(HeapStats* stats, bool take_snapshot = false);
-
- // Copy block of memory from src to dst. Size of block should be aligned
- // by pointer size.
- static inline void CopyBlock(Address dst, Address src, int byte_size);
-
- // Optimized version of memmove for blocks with pointer size aligned sizes and
- // pointer size aligned addresses.
- static inline void MoveBlock(Address dst, Address src, int byte_size);
-
- // Check new space expansion criteria and expand semispaces if it was hit.
- void CheckNewSpaceExpansionCriteria();
-
- inline void IncrementYoungSurvivorsCounter(int survived) {
- ASSERT(survived >= 0);
- young_survivors_after_last_gc_ = survived;
- survived_since_last_expansion_ += survived;
- }
-
- inline bool NextGCIsLikelyToBeFull() {
- if (FLAG_gc_global) return true;
-
- if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
-
- intptr_t total_promoted = PromotedTotalSize();
-
- intptr_t adjusted_promotion_limit =
- old_gen_promotion_limit_ - new_space_.Capacity();
-
- if (total_promoted >= adjusted_promotion_limit) return true;
-
- intptr_t adjusted_allocation_limit =
- old_gen_allocation_limit_ - new_space_.Capacity() / 5;
-
- if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
-
- return false;
- }
-
-
- void UpdateNewSpaceReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func);
-
- void UpdateReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func);
-
- void ProcessWeakReferences(WeakObjectRetainer* retainer);
-
- void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
-
- // Helper function that governs the promotion policy from new space to
- // old. If the object's old address lies below the new space's age
- // mark or if we've already filled the bottom 1/16th of the to space,
- // we try to promote this object.
- inline bool ShouldBePromoted(Address old_address, int object_size);
-
- int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
-
- void ClearJSFunctionResultCaches();
-
- void ClearNormalizedMapCaches();
-
- GCTracer* tracer() { return tracer_; }
-
- // Returns the size of objects residing in non new spaces.
- intptr_t PromotedSpaceSizeOfObjects();
-
- double total_regexp_code_generated() { return total_regexp_code_generated_; }
- void IncreaseTotalRegexpCodeGenerated(int size) {
- total_regexp_code_generated_ += size;
- }
-
- // Returns maximum GC pause.
- double get_max_gc_pause() { return max_gc_pause_; }
-
- // Returns maximum size of objects alive after GC.
- intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
-
- // Returns minimal interval between two subsequent collections.
- double get_min_in_mutator() { return min_in_mutator_; }
-
- // TODO(hpayer): remove, should be handled by GCTracer
- void AddMarkingTime(double marking_time) {
- marking_time_ += marking_time;
- }
-
- double marking_time() const {
- return marking_time_;
- }
-
- // TODO(hpayer): remove, should be handled by GCTracer
- void AddSweepingTime(double sweeping_time) {
- sweeping_time_ += sweeping_time;
- }
-
- double sweeping_time() const {
- return sweeping_time_;
- }
-
- MarkCompactCollector* mark_compact_collector() {
- return &mark_compact_collector_;
- }
-
- StoreBuffer* store_buffer() {
- return &store_buffer_;
- }
-
- Marking* marking() {
- return &marking_;
- }
-
- IncrementalMarking* incremental_marking() {
- return &incremental_marking_;
- }
-
- bool IsSweepingComplete() {
- return !mark_compact_collector()->IsConcurrentSweepingInProgress() &&
- old_data_space()->IsLazySweepingComplete() &&
- old_pointer_space()->IsLazySweepingComplete();
- }
-
- bool AdvanceSweepers(int step_size) {
- ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
- bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
- sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
- return sweeping_complete;
- }
-
- ExternalStringTable* external_string_table() {
- return &external_string_table_;
- }
-
- ErrorObjectList* error_object_list() {
- return &error_object_list_;
- }
-
- // Returns the current sweep generation.
- int sweep_generation() {
- return sweep_generation_;
- }
-
- inline Isolate* isolate();
-
- void CallGCPrologueCallbacks(GCType gc_type);
- void CallGCEpilogueCallbacks(GCType gc_type);
-
- inline bool OldGenerationAllocationLimitReached();
-
- inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
- scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
- }
-
- void QueueMemoryChunkForFree(MemoryChunk* chunk);
- void FreeQueuedChunks();
-
- // Completely clear the Instanceof cache (to stop it keeping objects alive
- // around a GC).
- inline void CompletelyClearInstanceofCache();
-
- // The roots that have an index less than this are always in old space.
- static const int kOldSpaceRoots = 0x20;
-
- uint32_t HashSeed() {
- uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
- ASSERT(FLAG_randomize_hashes || seed == 0);
- return seed;
- }
-
- void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
- ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
- set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
- }
-
- void SetConstructStubDeoptPCOffset(int pc_offset) {
- ASSERT(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
- set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
- }
-
- void SetGetterStubDeoptPCOffset(int pc_offset) {
- ASSERT(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
- set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
- }
-
- void SetSetterStubDeoptPCOffset(int pc_offset) {
- ASSERT(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
- set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
- }
-
- // For post mortem debugging.
- void RememberUnmappedPage(Address page, bool compacted);
-
- // Global inline caching age: it is incremented on some GCs after context
- // disposal. We use it to flush inline caches.
- int global_ic_age() {
- return global_ic_age_;
- }
-
- void AgeInlineCaches() {
- global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
- }
-
- bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
-
- intptr_t amount_of_external_allocated_memory() {
- return amount_of_external_allocated_memory_;
- }
-
- // ObjectStats are kept in two arrays, counts and sizes. Related stats are
- // stored in a contiguous linear buffer. Stats groups are stored one after
- // another.
- enum {
- FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
- FIRST_FIXED_ARRAY_SUB_TYPE =
- FIRST_CODE_KIND_SUB_TYPE + Code::LAST_CODE_KIND + 1,
- OBJECT_STATS_COUNT =
- FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
- };
-
- void RecordObjectStats(InstanceType type, int sub_type, size_t size) {
- ASSERT(type <= LAST_TYPE);
- if (sub_type < 0) {
- object_counts_[type]++;
- object_sizes_[type] += size;
- } else {
- if (type == CODE_TYPE) {
- ASSERT(sub_type <= Code::LAST_CODE_KIND);
- object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
- object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
- } else if (type == FIXED_ARRAY_TYPE) {
- ASSERT(sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type]++;
- object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type] += size;
- }
- }
- }
-
- void CheckpointObjectStats();
-
- // We don't use a ScopedLock here since we want to lock the heap
- // only when FLAG_parallel_recompilation is true.
- class RelocationLock {
- public:
- explicit RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_parallel_recompilation) {
- heap_->relocation_mutex_->Lock();
- }
- }
- ~RelocationLock() {
- if (FLAG_parallel_recompilation) {
- heap_->relocation_mutex_->Unlock();
- }
- }
-
- private:
- Heap* heap_;
- };
-
- private:
- Heap();
-
- // This can be calculated directly from a pointer to the heap; however, it is
- // more expedient to get at the isolate directly from within Heap methods.
- Isolate* isolate_;
-
- Object* roots_[kRootListLength];
-
- intptr_t code_range_size_;
- int reserved_semispace_size_;
- int max_semispace_size_;
- int initial_semispace_size_;
- intptr_t max_old_generation_size_;
- intptr_t max_executable_size_;
-
- // For keeping track of how much data has survived
- // scavenge since last new space expansion.
- int survived_since_last_expansion_;
-
- // For keeping track on when to flush RegExp code.
- int sweep_generation_;
-
- int always_allocate_scope_depth_;
- int linear_allocation_scope_depth_;
-
- // For keeping track of context disposals.
- int contexts_disposed_;
-
- int global_ic_age_;
-
- bool flush_monomorphic_ics_;
-
- int scan_on_scavenge_pages_;
-
-#if defined(V8_TARGET_ARCH_X64)
- static const int kMaxObjectSizeInNewSpace = 1024*KB;
-#else
- static const int kMaxObjectSizeInNewSpace = 512*KB;
-#endif
-
- NewSpace new_space_;
- OldSpace* old_pointer_space_;
- OldSpace* old_data_space_;
- OldSpace* code_space_;
- MapSpace* map_space_;
- CellSpace* cell_space_;
- LargeObjectSpace* lo_space_;
- HeapState gc_state_;
- int gc_post_processing_depth_;
-
- // Returns the amount of external memory registered since last global gc.
- intptr_t PromotedExternalMemorySize();
-
- unsigned int ms_count_; // how many mark-sweep collections happened
- unsigned int gc_count_; // how many gc happened
-
- // For post mortem debugging.
- static const int kRememberedUnmappedPages = 128;
- int remembered_unmapped_pages_index_;
- Address remembered_unmapped_pages_[kRememberedUnmappedPages];
-
- // Total length of the strings we failed to flatten since the last GC.
- int unflattened_strings_length_;
-
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline void set_##name(type* value) { \
- /* The deserializer makes use of the fact that these common roots are */ \
- /* never in new space and never on a page that is being compacted. */ \
- ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
- roots_[k##camel_name##RootIndex] = value; \
- }
- ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
-
-#ifdef DEBUG
- bool allocation_allowed_;
-
- // If the --gc-interval flag is set to a positive value, this
- // variable holds the value indicating the number of allocations
- // remain until the next failure and garbage collection.
- int allocation_timeout_;
-
- // Do we expect to be able to handle allocation failure at this
- // time?
- bool disallow_allocation_failure_;
-#endif // DEBUG
-
- // Indicates that the new space should be kept small due to high promotion
- // rates caused by the mutator allocating a lot of long-lived objects.
- bool new_space_high_promotion_mode_active_;
-
- // Limit that triggers a global GC on the next (normally caused) GC. This
- // is checked when we have already decided to do a GC to help determine
- // which collector to invoke.
- intptr_t old_gen_promotion_limit_;
-
- // Limit that triggers a global GC as soon as is reasonable. This is
- // checked before expanding a paged space in the old generation and on
- // every allocation in large object space.
- intptr_t old_gen_allocation_limit_;
-
- // Sometimes the heuristics dictate that those limits are increased. This
- // variable records that fact.
- int old_gen_limit_factor_;
-
- // Used to adjust the limits that control the timing of the next GC.
- intptr_t size_of_old_gen_at_last_old_space_gc_;
-
- // Limit on the amount of externally allocated memory allowed
- // between global GCs. If reached a global GC is forced.
- intptr_t external_allocation_limit_;
-
- // The amount of external memory registered through the API kept alive
- // by global handles
- intptr_t amount_of_external_allocated_memory_;
-
- // Caches the amount of external memory registered at the last global gc.
- intptr_t amount_of_external_allocated_memory_at_last_global_gc_;
-
- // Indicates that an allocation has failed in the old generation since the
- // last GC.
- int old_gen_exhausted_;
-
- Object* native_contexts_list_;
-
- StoreBufferRebuilder store_buffer_rebuilder_;
-
- struct StringTypeTable {
- InstanceType type;
- int size;
- RootListIndex index;
- };
-
- struct ConstantStringTable {
- const char* contents;
- RootListIndex index;
- };
-
- struct StructTable {
- InstanceType type;
- int size;
- RootListIndex index;
- };
-
- static const StringTypeTable string_type_table[];
- static const ConstantStringTable constant_string_table[];
- static const StructTable struct_table[];
-
- // The special hidden string which is an empty string, but does not match
- // any string when looked up in properties.
- String* hidden_string_;
-
- // GC callback function, called before and after mark-compact GC.
- // Allocations in the callback function are disallowed.
- struct GCPrologueCallbackPair {
- GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
- }
- bool operator==(const GCPrologueCallbackPair& pair) const {
- return pair.callback == callback;
- }
- GCPrologueCallback callback;
- GCType gc_type;
- };
- List<GCPrologueCallbackPair> gc_prologue_callbacks_;
-
- struct GCEpilogueCallbackPair {
- GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
- }
- bool operator==(const GCEpilogueCallbackPair& pair) const {
- return pair.callback == callback;
- }
- GCEpilogueCallback callback;
- GCType gc_type;
- };
- List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
-
- GCCallback global_gc_prologue_callback_;
- GCCallback global_gc_epilogue_callback_;
-
- // Support for computing object sizes during GC.
- HeapObjectCallback gc_safe_size_of_old_object_;
- static int GcSafeSizeOfOldObject(HeapObject* object);
-
- // Update the GC state. Called from the mark-compact collector.
- void MarkMapPointersAsEncoded(bool encoded) {
- ASSERT(!encoded);
- gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
- }
-
- // Checks whether a global GC is necessary
- GarbageCollector SelectGarbageCollector(AllocationSpace space,
- const char** reason);
-
- // Performs garbage collection
- // Returns whether there is a chance another major GC could
- // collect more garbage.
- bool PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer);
-
- inline void UpdateOldSpaceLimits();
-
- // Allocate an uninitialized object in map space. The behavior is identical
- // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
- // have to test the allocation space argument and (b) can reduce code size
- // (since both AllocateRaw and AllocateRawMap are inlined).
- MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
-
- // Allocate an uninitialized object in the global property cell space.
- MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
-
- // Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(JSObject* obj,
- FixedArray* properties,
- Map* map);
-
- bool CreateInitialMaps();
- bool CreateInitialObjects();
-
- // These five Create*EntryStub functions are here and forced to not be inlined
- // because of a gcc-4.4 bug that assigns wrong vtable entries.
- NO_INLINE(void CreateJSEntryStub());
- NO_INLINE(void CreateJSConstructEntryStub());
-
- void CreateFixedStubs();
-
- MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string,
- Object* to_number,
- byte kind);
-
- // Allocate a JSArray with no elements
- MUST_USE_RESULT MaybeObject* AllocateJSArray(
- ElementsKind elements_kind,
- PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<Object> allocation_site_info_payload);
-
- // Allocate empty fixed array.
- MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
-
- // Allocate empty fixed double array.
- MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
-
- // Performs a minor collection in new generation.
- void Scavenge();
-
- static HeapObject* UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap,
- Object** pointer);
-
- Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
- static void ScavengeStoreBufferCallback(Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event);
-
- // Performs a major collection in the whole heap.
- void MarkCompact(GCTracer* tracer);
-
- // Code to be run before and after mark-compact.
- void MarkCompactPrologue();
-
- // Record statistics before and after garbage collection.
- void ReportStatisticsBeforeGC();
- void ReportStatisticsAfterGC();
-
- // Slow part of scavenge object.
- static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
-
- // Initializes a function with a shared part and prototype.
- // Note: this code was factored out of AllocateFunction such that
- // other parts of the VM could use it. Specifically, a function that creates
- // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
- // Please note this does not perform a garbage collection.
- inline void InitializeFunction(
- JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype);
-
- // Total RegExp code ever generated
- double total_regexp_code_generated_;
-
- GCTracer* tracer_;
-
- // Allocates a small number to string cache.
- MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache();
- // Creates and installs the full-sized number string cache.
- void AllocateFullSizeNumberStringCache();
- // Get the length of the number to string cache based on the max semispace
- // size.
- int FullSizeNumberStringCacheLength();
- // Flush the number to string cache.
- void FlushNumberStringCache();
-
- void UpdateSurvivalRateTrend(int start_new_space_size);
-
- enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
-
- static const int kYoungSurvivalRateHighThreshold = 90;
- static const int kYoungSurvivalRateLowThreshold = 10;
- static const int kYoungSurvivalRateAllowedDeviation = 15;
-
- int young_survivors_after_last_gc_;
- int high_survival_rate_period_length_;
- int low_survival_rate_period_length_;
- double survival_rate_;
- SurvivalRateTrend previous_survival_rate_trend_;
- SurvivalRateTrend survival_rate_trend_;
-
- void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
- ASSERT(survival_rate_trend != FLUCTUATING);
- previous_survival_rate_trend_ = survival_rate_trend_;
- survival_rate_trend_ = survival_rate_trend;
- }
-
- SurvivalRateTrend survival_rate_trend() {
- if (survival_rate_trend_ == STABLE) {
- return STABLE;
- } else if (previous_survival_rate_trend_ == STABLE) {
- return survival_rate_trend_;
- } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
- return FLUCTUATING;
- } else {
- return survival_rate_trend_;
- }
- }
-
- bool IsStableOrIncreasingSurvivalTrend() {
- switch (survival_rate_trend()) {
- case STABLE:
- case INCREASING:
- return true;
- default:
- return false;
- }
- }
-
- bool IsStableOrDecreasingSurvivalTrend() {
- switch (survival_rate_trend()) {
- case STABLE:
- case DECREASING:
- return true;
- default:
- return false;
- }
- }
-
- bool IsIncreasingSurvivalTrend() {
- return survival_rate_trend() == INCREASING;
- }
-
- bool IsHighSurvivalRate() {
- return high_survival_rate_period_length_ > 0;
- }
-
- bool IsLowSurvivalRate() {
- return low_survival_rate_period_length_ > 0;
- }
-
- void SelectScavengingVisitorsTable();
-
- void StartIdleRound() {
- mark_sweeps_since_idle_round_started_ = 0;
- ms_count_at_last_idle_notification_ = ms_count_;
- }
-
- void FinishIdleRound() {
- mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
- scavenges_since_last_idle_round_ = 0;
- }
-
- bool EnoughGarbageSinceLastIdleRound() {
- return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
- }
-
- // Estimates how many milliseconds a Mark-Sweep would take to complete.
- // In idle notification handler we assume that this function will return:
- // - a number less than 10 for small heaps, which are less than 8Mb.
- // - a number greater than 10 for large heaps, which are greater than 32Mb.
- int TimeMarkSweepWouldTakeInMs() {
- // Rough estimate of how many megabytes of heap can be processed in 1 ms.
- static const int kMbPerMs = 2;
-
- int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
- return heap_size_mb / kMbPerMs;
- }
-
- // Returns true if no more GC work is left.
- bool IdleGlobalGC();
-
- void AdvanceIdleIncrementalMarking(intptr_t step_size);
-
- void ClearObjectStats(bool clear_last_time_stats = false);
-
- static const int kInitialStringTableSize = 2048;
- static const int kInitialEvalCacheSize = 64;
- static const int kInitialNumberStringCacheSize = 256;
-
- // Object counts and used memory by InstanceType
- size_t object_counts_[OBJECT_STATS_COUNT];
- size_t object_counts_last_time_[OBJECT_STATS_COUNT];
- size_t object_sizes_[OBJECT_STATS_COUNT];
- size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
-
- // Maximum GC pause.
- double max_gc_pause_;
-
- // Total time spent in GC.
- double total_gc_time_ms_;
-
- // Maximum size of objects alive after GC.
- intptr_t max_alive_after_gc_;
-
- // Minimal interval between two subsequent collections.
- double min_in_mutator_;
-
- // Size of objects alive after last GC.
- intptr_t alive_after_last_gc_;
-
- double last_gc_end_timestamp_;
-
- // Cumulative GC time spent in marking
- double marking_time_;
-
- // Cumulative GC time spent in sweeping
- double sweeping_time_;
-
- MarkCompactCollector mark_compact_collector_;
-
- StoreBuffer store_buffer_;
-
- Marking marking_;
-
- IncrementalMarking incremental_marking_;
-
- int number_idle_notifications_;
- unsigned int last_idle_notification_gc_count_;
- bool last_idle_notification_gc_count_init_;
-
- int mark_sweeps_since_idle_round_started_;
- int ms_count_at_last_idle_notification_;
- unsigned int gc_count_at_last_idle_gc_;
- int scavenges_since_last_idle_round_;
-
-#ifdef VERIFY_HEAP
- int no_weak_embedded_maps_verification_scope_depth_;
-#endif
-
- static const int kMaxMarkSweepsInIdleRound = 7;
- static const int kIdleScavengeThreshold = 5;
-
- // Shared state read by the scavenge collector and set by ScavengeObject.
- PromotionQueue promotion_queue_;
-
- // Flag is set when the heap has been configured. The heap can be repeatedly
- // configured through the API until it is set up.
- bool configured_;
-
- ExternalStringTable external_string_table_;
-
- ErrorObjectList error_object_list_;
-
- VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
- MemoryChunk* chunks_queued_for_free_;
-
- Mutex* relocation_mutex_;
-
- friend class Factory;
- friend class GCTracer;
- friend class DisallowAllocationFailure;
- friend class AlwaysAllocateScope;
- friend class Page;
- friend class Isolate;
- friend class MarkCompactCollector;
- friend class MarkCompactMarkingVisitor;
- friend class MapCompact;
-#ifdef VERIFY_HEAP
- friend class NoWeakEmbeddedMapsVerificationScope;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(Heap);
-};
-
-
-class HeapStats {
- public:
- static const int kStartMarker = 0xDECADE00;
- static const int kEndMarker = 0xDECADE01;
-
- int* start_marker; // 0
- int* new_space_size; // 1
- int* new_space_capacity; // 2
- intptr_t* old_pointer_space_size; // 3
- intptr_t* old_pointer_space_capacity; // 4
- intptr_t* old_data_space_size; // 5
- intptr_t* old_data_space_capacity; // 6
- intptr_t* code_space_size; // 7
- intptr_t* code_space_capacity; // 8
- intptr_t* map_space_size; // 9
- intptr_t* map_space_capacity; // 10
- intptr_t* cell_space_size; // 11
- intptr_t* cell_space_capacity; // 12
- intptr_t* lo_space_size; // 13
- int* global_handle_count; // 14
- int* weak_global_handle_count; // 15
- int* pending_global_handle_count; // 16
- int* near_death_global_handle_count; // 17
- int* free_global_handle_count; // 18
- intptr_t* memory_allocator_size; // 19
- intptr_t* memory_allocator_capacity; // 20
- int* objects_per_type; // 21
- int* size_per_type; // 22
- int* os_error; // 23
- int* end_marker; // 24
-};
-
-
-class DisallowAllocationFailure {
- public:
- inline DisallowAllocationFailure();
- inline ~DisallowAllocationFailure();
-
-#ifdef DEBUG
- private:
- bool old_state_;
-#endif
-};
-
-
-class AlwaysAllocateScope {
- public:
- inline AlwaysAllocateScope();
- inline ~AlwaysAllocateScope();
-
- private:
- // Implicitly disable artificial allocation failures.
- DisallowAllocationFailure disallow_allocation_failure_;
-};
-
-#ifdef VERIFY_HEAP
-class NoWeakEmbeddedMapsVerificationScope {
- public:
- inline NoWeakEmbeddedMapsVerificationScope();
- inline ~NoWeakEmbeddedMapsVerificationScope();
-};
-#endif
-
-
-// Visitor class to verify interior pointers in spaces that do not contain
-// or care about intergenerational references. All heap object pointers have to
-// point into the heap to a location that has a map pointer at its first word.
-// Caveat: Heap::Contains is an approximation because it can return true for
-// objects in a heap space but above the allocation pointer.
-class VerifyPointersVisitor: public ObjectVisitor {
- public:
- inline void VisitPointers(Object** start, Object** end);
-};
-
-
-// Space iterator for iterating over all spaces of the heap. Returns each space
-// in turn, and null when it is done.
-class AllSpaces BASE_EMBEDDED {
- public:
- explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
- Space* next();
- private:
- Heap* heap_;
- int counter_;
-};
-
-
-// Space iterator for iterating over all old spaces of the heap: Old pointer
-// space, old data space and code space. Returns each space in turn, and null
-// when it is done.
-class OldSpaces BASE_EMBEDDED {
- public:
- explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
- OldSpace* next();
- private:
- Heap* heap_;
- int counter_;
-};
-
-
-// Space iterator for iterating over all the paged spaces of the heap: Map
-// space, old pointer space, old data space, code space and cell space. Returns
-// each space in turn, and null when it is done.
-class PagedSpaces BASE_EMBEDDED {
- public:
- explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
- PagedSpace* next();
- private:
- Heap* heap_;
- int counter_;
-};
-
-
-// Space iterator for iterating over all spaces of the heap.
-// For each space an object iterator is provided. The deallocation of the
-// returned object iterators is handled by the space iterator.
-class SpaceIterator : public Malloced {
- public:
- explicit SpaceIterator(Heap* heap);
- SpaceIterator(Heap* heap, HeapObjectCallback size_func);
- virtual ~SpaceIterator();
-
- bool has_next();
- ObjectIterator* next();
-
- private:
- ObjectIterator* CreateIterator();
-
- Heap* heap_;
- int current_space_; // from enum AllocationSpace.
- ObjectIterator* iterator_; // object iterator for the current space.
- HeapObjectCallback size_func_;
-};
-
-
-// A HeapIterator provides iteration over the whole heap. It
-// aggregates the specific iterators for the different spaces as
-// these can only iterate over one space only.
-//
-// HeapIterator can skip free list nodes (that is, de-allocated heap
-// objects that still remain in the heap). As implementation of free
-// nodes filtering uses GC marks, it can't be used during MS/MC GC
-// phases. Also, it is forbidden to interrupt iteration in this mode,
-// as this will leave heap objects marked (and thus, unusable).
-class HeapObjectsFilter;
-
-class HeapIterator BASE_EMBEDDED {
- public:
- enum HeapObjectsFiltering {
- kNoFiltering,
- kFilterUnreachable
- };
-
- explicit HeapIterator(Heap* heap);
- HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
- ~HeapIterator();
-
- HeapObject* next();
- void reset();
-
- private:
- // Perform the initialization.
- void Init();
- // Perform all necessary shutdown (destruction) work.
- void Shutdown();
- HeapObject* NextObject();
-
- Heap* heap_;
- HeapObjectsFiltering filtering_;
- HeapObjectsFilter* filter_;
- // Space iterator for iterating all the spaces.
- SpaceIterator* space_iterator_;
- // Object iterator for the space currently being iterated.
- ObjectIterator* object_iterator_;
-};
-
-
-// Cache for mapping (map, property name) into field offset.
-// Cleared at startup and prior to mark sweep collection.
-class KeyedLookupCache {
- public:
- // Lookup field offset for (map, name). If absent, -1 is returned.
- int Lookup(Map* map, String* name);
-
- // Update an element in the cache.
- void Update(Map* map, String* name, int field_offset);
-
- // Clear the cache.
- void Clear();
-
- static const int kLength = 256;
- static const int kCapacityMask = kLength - 1;
- static const int kMapHashShift = 5;
- static const int kHashMask = -4; // Zero the last two bits.
- static const int kEntriesPerBucket = 4;
- static const int kNotFound = -1;
-
- // kEntriesPerBucket should be a power of 2.
- STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
- STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
-
- private:
- KeyedLookupCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].map = NULL;
- keys_[i].name = NULL;
- field_offsets_[i] = kNotFound;
- }
- }
-
- static inline int Hash(Map* map, String* name);
-
- // Get the address of the keys and field_offsets arrays. Used in
- // generated code to perform cache lookups.
- Address keys_address() {
- return reinterpret_cast<Address>(&keys_);
- }
-
- Address field_offsets_address() {
- return reinterpret_cast<Address>(&field_offsets_);
- }
-
- struct Key {
- Map* map;
- String* name;
- };
-
- Key keys_[kLength];
- int field_offsets_[kLength];
-
- friend class ExternalReference;
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
-};
-
-
-// Cache for mapping (map, property name) into descriptor index.
-// The cache contains both positive and negative results.
-// Descriptor index equals kNotFound means the property is absent.
-// Cleared at startup and prior to any gc.
-class DescriptorLookupCache {
- public:
- // Lookup descriptor index for (map, name).
- // If absent, kAbsent is returned.
- int Lookup(Map* source, String* name) {
- if (!StringShape(name).IsInternalized()) return kAbsent;
- int index = Hash(source, name);
- Key& key = keys_[index];
- if ((key.source == source) && (key.name == name)) return results_[index];
- return kAbsent;
- }
-
- // Update an element in the cache.
- void Update(Map* source, String* name, int result) {
- ASSERT(result != kAbsent);
- if (StringShape(name).IsInternalized()) {
- int index = Hash(source, name);
- Key& key = keys_[index];
- key.source = source;
- key.name = name;
- results_[index] = result;
- }
- }
-
- // Clear the cache.
- void Clear();
-
- static const int kAbsent = -2;
-
- private:
- DescriptorLookupCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].source = NULL;
- keys_[i].name = NULL;
- results_[i] = kAbsent;
- }
- }
-
- static int Hash(Object* source, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uint32_t source_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source))
- >> kPointerSizeLog2;
- uint32_t name_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name))
- >> kPointerSizeLog2;
- return (source_hash ^ name_hash) % kLength;
- }
-
- static const int kLength = 64;
- struct Key {
- Map* source;
- String* name;
- };
-
- Key keys_[kLength];
- int results_[kLength];
-
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
-};
-
-
-// A helper class to document/test C++ scopes where we do not
-// expect a GC. Usage:
-//
-// /* Allocation not allowed: we cannot handle a GC in this scope. */
-// { AssertNoAllocation nogc;
-// ...
-// }
-class AssertNoAllocation {
- public:
- inline AssertNoAllocation();
- inline ~AssertNoAllocation();
-
-#ifdef DEBUG
- private:
- bool old_state_;
- bool active_;
-#endif
-};
-
-
-class DisableAssertNoAllocation {
- public:
- inline DisableAssertNoAllocation();
- inline ~DisableAssertNoAllocation();
-
-#ifdef DEBUG
- private:
- bool old_state_;
- bool active_;
-#endif
-};
-
-// GCTracer collects and prints ONE line after each garbage collector
-// invocation IFF --trace_gc is used.
-
-class GCTracer BASE_EMBEDDED {
- public:
- class Scope BASE_EMBEDDED {
- public:
- enum ScopeId {
- EXTERNAL,
- MC_MARK,
- MC_SWEEP,
- MC_SWEEP_NEWSPACE,
- MC_EVACUATE_PAGES,
- MC_UPDATE_NEW_TO_NEW_POINTERS,
- MC_UPDATE_ROOT_TO_NEW_POINTERS,
- MC_UPDATE_OLD_TO_NEW_POINTERS,
- MC_UPDATE_POINTERS_TO_EVACUATED,
- MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
- MC_UPDATE_MISC_POINTERS,
- MC_FLUSH_CODE,
- kNumberOfScopes
- };
-
- Scope(GCTracer* tracer, ScopeId scope)
- : tracer_(tracer),
- scope_(scope) {
- start_time_ = OS::TimeCurrentMillis();
- }
-
- ~Scope() {
- ASSERT(scope_ < kNumberOfScopes); // scope_ is unsigned.
- tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
- }
-
- private:
- GCTracer* tracer_;
- ScopeId scope_;
- double start_time_;
- };
-
- explicit GCTracer(Heap* heap,
- const char* gc_reason,
- const char* collector_reason);
- ~GCTracer();
-
- // Sets the collector.
- void set_collector(GarbageCollector collector) { collector_ = collector; }
-
- // Sets the GC count.
- void set_gc_count(unsigned int count) { gc_count_ = count; }
-
- // Sets the full GC count.
- void set_full_gc_count(int count) { full_gc_count_ = count; }
-
- void increment_promoted_objects_size(int object_size) {
- promoted_objects_size_ += object_size;
- }
-
- void increment_nodes_died_in_new_space() {
- nodes_died_in_new_space_++;
- }
-
- void increment_nodes_copied_in_new_space() {
- nodes_copied_in_new_space_++;
- }
-
- void increment_nodes_promoted() {
- nodes_promoted_++;
- }
-
- private:
- // Returns a string matching the collector.
- const char* CollectorString();
-
- // Returns size of object in heap (in MB).
- inline double SizeOfHeapObjects();
-
- // Timestamp set in the constructor.
- double start_time_;
-
- // Size of objects in heap set in constructor.
- intptr_t start_object_size_;
-
- // Size of memory allocated from OS set in constructor.
- intptr_t start_memory_size_;
-
- // Type of collector.
- GarbageCollector collector_;
-
- // A count (including this one, e.g. the first collection is 1) of the
- // number of garbage collections.
- unsigned int gc_count_;
-
- // A count (including this one) of the number of full garbage collections.
- int full_gc_count_;
-
- // Amounts of time spent in different scopes during GC.
- double scopes_[Scope::kNumberOfScopes];
-
- // Total amount of space either wasted or contained in one of free lists
- // before the current GC.
- intptr_t in_free_list_or_wasted_before_gc_;
-
- // Difference between space used in the heap at the beginning of the current
- // collection and the end of the previous collection.
- intptr_t allocated_since_last_gc_;
-
- // Amount of time spent in mutator that is time elapsed between end of the
- // previous collection and the beginning of the current one.
- double spent_in_mutator_;
-
- // Size of objects promoted during the current collection.
- intptr_t promoted_objects_size_;
-
- // Number of died nodes in the new space.
- int nodes_died_in_new_space_;
-
- // Number of copied nodes to the new space.
- int nodes_copied_in_new_space_;
-
- // Number of promoted nodes to the old space.
- int nodes_promoted_;
-
- // Incremental marking steps counters.
- int steps_count_;
- double steps_took_;
- double longest_step_;
- int steps_count_since_last_gc_;
- double steps_took_since_last_gc_;
-
- Heap* heap_;
-
- const char* gc_reason_;
- const char* collector_reason_;
-};
-
-
-class RegExpResultsCache {
- public:
- enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
-
- // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
- // On success, the returned result is guaranteed to be a COW-array.
- static Object* Lookup(Heap* heap,
- String* key_string,
- Object* key_pattern,
- ResultsCacheType type);
- // Attempt to add value_array to the cache specified by type. On success,
- // value_array is turned into a COW-array.
- static void Enter(Heap* heap,
- String* key_string,
- Object* key_pattern,
- FixedArray* value_array,
- ResultsCacheType type);
- static void Clear(FixedArray* cache);
- static const int kRegExpResultsCacheSize = 0x100;
-
- private:
- static const int kArrayEntriesPerCacheEntry = 4;
- static const int kStringOffset = 0;
- static const int kPatternOffset = 1;
- static const int kArrayOffset = 2;
-};
-
-
-class TranscendentalCache {
- public:
- enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
- static const int kTranscendentalTypeBits = 3;
- STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
-
- // Returns a heap number with f(input), where f is a math function specified
- // by the 'type' argument.
- MUST_USE_RESULT inline MaybeObject* Get(Type type, double input);
-
- // The cache contains raw Object pointers. This method disposes of
- // them before a garbage collection.
- void Clear();
-
- private:
- class SubCache {
- static const int kCacheSize = 512;
-
- explicit SubCache(Type t);
-
- MUST_USE_RESULT inline MaybeObject* Get(double input);
-
- inline double Calculate(double input);
-
- struct Element {
- uint32_t in[2];
- Object* output;
- };
-
- union Converter {
- double dbl;
- uint32_t integers[2];
- };
-
- inline static int Hash(const Converter& c) {
- uint32_t hash = (c.integers[0] ^ c.integers[1]);
- hash ^= static_cast<int32_t>(hash) >> 16;
- hash ^= static_cast<int32_t>(hash) >> 8;
- return (hash & (kCacheSize - 1));
- }
-
- Element elements_[kCacheSize];
- Type type_;
- Isolate* isolate_;
-
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
- // Inline implementation of the cache.
- friend class TranscendentalCacheStub;
- // For evaluating value.
- friend class TranscendentalCache;
-
- DISALLOW_COPY_AND_ASSIGN(SubCache);
- };
-
- TranscendentalCache() {
- for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
- }
-
- // Used to create an external reference.
- inline Address cache_array_address();
-
- // Instantiation
- friend class Isolate;
- // Inline implementation of the caching.
- friend class TranscendentalCacheStub;
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
-
- SubCache* caches_[kNumberOfCaches];
- DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
-};
-
-
-// Abstract base class for checking whether a weak object should be retained.
-class WeakObjectRetainer {
- public:
- virtual ~WeakObjectRetainer() {}
-
- // Return whether this object should be retained. If NULL is returned the
- // object has no references. Otherwise the address of the retained object
- // should be returned as in some GC situations the object has been moved.
- virtual Object* RetainAs(Object* object) = 0;
-};
-
-
-// Intrusive object marking uses least significant bit of
-// heap object's map word to mark objects.
-// Normally all map words have least significant bit set
-// because they contain tagged map pointer.
-// If the bit is not set object is marked.
-// All objects should be unmarked before resuming
-// JavaScript execution.
-class IntrusiveMarking {
- public:
- static bool IsMarked(HeapObject* object) {
- return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
- }
-
- static void ClearMark(HeapObject* object) {
- uintptr_t map_word = object->map_word().ToRawValue();
- object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
- ASSERT(!IsMarked(object));
- }
-
- static void SetMark(HeapObject* object) {
- uintptr_t map_word = object->map_word().ToRawValue();
- object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
- ASSERT(IsMarked(object));
- }
-
- static Map* MapOfMarkedObject(HeapObject* object) {
- uintptr_t map_word = object->map_word().ToRawValue();
- return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
- }
-
- static int SizeOfMarkedObject(HeapObject* object) {
- return object->SizeFromMap(MapOfMarkedObject(object));
- }
-
- private:
- static const uintptr_t kNotMarkedBit = 0x1;
- STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
-};
-
-
-#ifdef DEBUG
-// Helper class for tracing paths to a search target Object from all roots.
-// The TracePathFrom() method can be used to trace paths from a specific
-// object to the search target object.
-class PathTracer : public ObjectVisitor {
- public:
- enum WhatToFind {
- FIND_ALL, // Will find all matches.
- FIND_FIRST // Will stop the search after first match.
- };
-
- // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
- // after the first match. If FIND_ALL is specified, then tracing will be
- // done for all matches.
- PathTracer(Object* search_target,
- WhatToFind what_to_find,
- VisitMode visit_mode)
- : search_target_(search_target),
- found_target_(false),
- found_target_in_trace_(false),
- what_to_find_(what_to_find),
- visit_mode_(visit_mode),
- object_stack_(20),
- no_alloc() {}
-
- virtual void VisitPointers(Object** start, Object** end);
-
- void Reset();
- void TracePathFrom(Object** root);
-
- bool found() const { return found_target_; }
-
- static Object* const kAnyGlobalObject;
-
- protected:
- class MarkVisitor;
- class UnmarkVisitor;
-
- void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
- void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
- virtual void ProcessResults();
-
- // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
- static const int kMarkTag = 2;
-
- Object* search_target_;
- bool found_target_;
- bool found_target_in_trace_;
- WhatToFind what_to_find_;
- VisitMode visit_mode_;
- List<Object*> object_stack_;
-
- AssertNoAllocation no_alloc; // i.e. no gc allowed.
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
-};
-#endif // DEBUG
-
-} } // namespace v8::internal
-
-#endif // V8_HEAP_H_
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.cc b/src/3rdparty/v8/src/hydrogen-instructions.cc
deleted file mode 100644
index 37bfb8f..0000000
--- a/src/3rdparty/v8/src/hydrogen-instructions.cc
+++ /dev/null
@@ -1,3277 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "double.h"
-#include "factory.h"
-#include "hydrogen.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- LInstruction* H##type::CompileToLithium(LChunkBuilder* builder) { \
- return builder->Do##type(this); \
- }
-HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-
-const char* Representation::Mnemonic() const {
- switch (kind_) {
- case kNone: return "v";
- case kTagged: return "t";
- case kDouble: return "d";
- case kInteger32: return "i";
- case kExternal: return "x";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-int HValue::LoopWeight() const {
- const int w = FLAG_loop_weight;
- static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
- return weights[Min(block()->LoopNestingDepth(),
- static_cast<int>(ARRAY_SIZE(weights)-1))];
-}
-
-
-Isolate* HValue::isolate() const {
- ASSERT(block() != NULL);
- return block()->graph()->isolate();
-}
-
-
-void HValue::AssumeRepresentation(Representation r) {
- if (CheckFlag(kFlexibleRepresentation)) {
- ChangeRepresentation(r);
- // The representation of the value is dictated by type feedback and
- // will not be changed later.
- ClearFlag(kFlexibleRepresentation);
- }
-}
-
-
-void HValue::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
-}
-
-
-Representation HValue::RepresentationFromUses() {
- if (HasNoUses()) return Representation::None();
-
- // Array of use counts for each representation.
- int use_count[Representation::kNumRepresentations] = { 0 };
-
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- Representation rep = use->observed_input_representation(it.index());
- if (rep.IsNone()) continue;
- if (FLAG_trace_representation) {
- PrintF("#%d %s is used by #%d %s as %s%s\n",
- id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
- (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
- }
- use_count[rep.kind()] += use->LoopWeight();
- }
- if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
- int tagged_count = use_count[Representation::kTagged];
- int double_count = use_count[Representation::kDouble];
- int int32_count = use_count[Representation::kInteger32];
-
- if (tagged_count > 0) return Representation::Tagged();
- if (double_count > 0) return Representation::Double();
- if (int32_count > 0) return Representation::Integer32();
-
- return Representation::None();
-}
-
-
-void HValue::UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
- const char* reason) {
- Representation r = representation();
- if (new_rep.is_more_general_than(r)) {
- // When an HConstant is marked "not convertible to integer", then
- // never try to represent it as an integer.
- if (new_rep.IsInteger32() && !IsConvertibleToInteger()) {
- new_rep = Representation::Tagged();
- if (FLAG_trace_representation) {
- PrintF("Changing #%d %s representation %s -> %s because it's NCTI"
- " (%s want i)\n",
- id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
- }
- } else {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d %s representation %s -> %s based on %s\n",
- id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
- }
- }
- ChangeRepresentation(new_rep);
- AddDependantsToWorklist(h_infer);
- }
-}
-
-
-void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) {
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- h_infer->AddToWorklist(it.value());
- }
- for (int i = 0; i < OperandCount(); ++i) {
- h_infer->AddToWorklist(OperandAt(i));
- }
-}
-
-
-static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
- if (result > kMaxInt) {
- *overflow = true;
- return kMaxInt;
- }
- if (result < kMinInt) {
- *overflow = true;
- return kMinInt;
- }
- return static_cast<int32_t>(result);
-}
-
-
-static int32_t AddWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
- int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
-}
-
-
-static int32_t SubWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
- int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
-}
-
-
-static int32_t MulWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
- int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
-}
-
-
-int32_t Range::Mask() const {
- if (lower_ == upper_) return lower_;
- if (lower_ >= 0) {
- int32_t res = 1;
- while (res < upper_) {
- res = (res << 1) | 1;
- }
- return res;
- }
- return 0xffffffff;
-}
-
-
-void Range::AddConstant(int32_t value) {
- if (value == 0) return;
- bool may_overflow = false; // Overflow is ignored here.
- lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
- upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
-#ifdef DEBUG
- Verify();
-#endif
-}
-
-
-void Range::Intersect(Range* other) {
- upper_ = Min(upper_, other->upper_);
- lower_ = Max(lower_, other->lower_);
- bool b = CanBeMinusZero() && other->CanBeMinusZero();
- set_can_be_minus_zero(b);
-}
-
-
-void Range::Union(Range* other) {
- upper_ = Max(upper_, other->upper_);
- lower_ = Min(lower_, other->lower_);
- bool b = CanBeMinusZero() || other->CanBeMinusZero();
- set_can_be_minus_zero(b);
-}
-
-
-void Range::CombinedMax(Range* other) {
- upper_ = Max(upper_, other->upper_);
- lower_ = Max(lower_, other->lower_);
- set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
-}
-
-
-void Range::CombinedMin(Range* other) {
- upper_ = Min(upper_, other->upper_);
- lower_ = Min(lower_, other->lower_);
- set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
-}
-
-
-void Range::Sar(int32_t value) {
- int32_t bits = value & 0x1F;
- lower_ = lower_ >> bits;
- upper_ = upper_ >> bits;
- set_can_be_minus_zero(false);
-}
-
-
-void Range::Shl(int32_t value) {
- int32_t bits = value & 0x1F;
- int old_lower = lower_;
- int old_upper = upper_;
- lower_ = lower_ << bits;
- upper_ = upper_ << bits;
- if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) {
- upper_ = kMaxInt;
- lower_ = kMinInt;
- }
- set_can_be_minus_zero(false);
-}
-
-
-bool Range::AddAndCheckOverflow(Range* other) {
- bool may_overflow = false;
- lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
- upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
- KeepOrder();
-#ifdef DEBUG
- Verify();
-#endif
- return may_overflow;
-}
-
-
-bool Range::SubAndCheckOverflow(Range* other) {
- bool may_overflow = false;
- lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
- upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
- KeepOrder();
-#ifdef DEBUG
- Verify();
-#endif
- return may_overflow;
-}
-
-
-void Range::KeepOrder() {
- if (lower_ > upper_) {
- int32_t tmp = lower_;
- lower_ = upper_;
- upper_ = tmp;
- }
-}
-
-
-#ifdef DEBUG
-void Range::Verify() const {
- ASSERT(lower_ <= upper_);
-}
-#endif
-
-
-bool Range::MulAndCheckOverflow(Range* other) {
- bool may_overflow = false;
- int v1 = MulWithoutOverflow(lower_, other->lower(), &may_overflow);
- int v2 = MulWithoutOverflow(lower_, other->upper(), &may_overflow);
- int v3 = MulWithoutOverflow(upper_, other->lower(), &may_overflow);
- int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
- lower_ = Min(Min(v1, v2), Min(v3, v4));
- upper_ = Max(Max(v1, v2), Max(v3, v4));
-#ifdef DEBUG
- Verify();
-#endif
- return may_overflow;
-}
-
-
-const char* HType::ToString() {
- switch (type_) {
- case kTagged: return "tagged";
- case kTaggedPrimitive: return "primitive";
- case kTaggedNumber: return "number";
- case kSmi: return "smi";
- case kHeapNumber: return "heap-number";
- case kString: return "string";
- case kBoolean: return "boolean";
- case kNonPrimitive: return "non-primitive";
- case kJSArray: return "array";
- case kJSObject: return "object";
- case kUninitialized: return "uninitialized";
- }
- UNREACHABLE();
- return "Unreachable code";
-}
-
-
-HType HType::TypeFromValue(Isolate* isolate, Handle<Object> value) {
- // Handle dereferencing is safe here: an object's type as checked below
- // never changes.
- AllowHandleDereference allow_handle_deref(isolate);
-
- HType result = HType::Tagged();
- if (value->IsSmi()) {
- result = HType::Smi();
- } else if (value->IsHeapNumber()) {
- result = HType::HeapNumber();
- } else if (value->IsString()) {
- result = HType::String();
- } else if (value->IsBoolean()) {
- result = HType::Boolean();
- } else if (value->IsJSObject()) {
- result = HType::JSObject();
- } else if (value->IsJSArray()) {
- result = HType::JSArray();
- }
- return result;
-}
-
-
-bool HValue::Dominates(HValue* dominator, HValue* dominated) {
- if (dominator->block() != dominated->block()) {
- // If they are in different blocks we can use the dominance relation
- // between the blocks.
- return dominator->block()->Dominates(dominated->block());
- } else {
- // Otherwise we must see which instruction comes first, considering
- // that phis always precede regular instructions.
- if (dominator->IsInstruction()) {
- if (dominated->IsInstruction()) {
- for (HInstruction* next = HInstruction::cast(dominator)->next();
- next != NULL;
- next = next->next()) {
- if (next == dominated) return true;
- }
- return false;
- } else if (dominated->IsPhi()) {
- return false;
- } else {
- UNREACHABLE();
- }
- } else if (dominator->IsPhi()) {
- if (dominated->IsInstruction()) {
- return true;
- } else {
- // We cannot compare which phi comes first.
- UNREACHABLE();
- }
- } else {
- UNREACHABLE();
- }
- return false;
- }
-}
-
-
-bool HValue::TestDominanceUsingProcessedFlag(HValue* dominator,
- HValue* dominated) {
- if (dominator->block() != dominated->block()) {
- return dominator->block()->Dominates(dominated->block());
- } else {
- // If both arguments are in the same block we check if dominator is a phi
- // or if dominated has not already been processed: in either case we know
- // that dominator precedes dominated.
- return dominator->IsPhi() || !dominated->CheckFlag(kIDefsProcessingDone);
- }
-}
-
-
-bool HValue::IsDefinedAfter(HBasicBlock* other) const {
- return block()->block_id() > other->block_id();
-}
-
-
-HUseListNode* HUseListNode::tail() {
- // Skip and remove dead items in the use list.
- while (tail_ != NULL && tail_->value()->CheckFlag(HValue::kIsDead)) {
- tail_ = tail_->tail_;
- }
- return tail_;
-}
-
-
-bool HValue::CheckUsesForFlag(Flag f) {
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- if (it.value()->IsSimulate()) continue;
- if (!it.value()->CheckFlag(f)) return false;
- }
- return true;
-}
-
-
-HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
- Advance();
-}
-
-
-void HUseIterator::Advance() {
- current_ = next_;
- if (current_ != NULL) {
- next_ = current_->tail();
- value_ = current_->value();
- index_ = current_->index();
- }
-}
-
-
-int HValue::UseCount() const {
- int count = 0;
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) ++count;
- return count;
-}
-
-
-HUseListNode* HValue::RemoveUse(HValue* value, int index) {
- HUseListNode* previous = NULL;
- HUseListNode* current = use_list_;
- while (current != NULL) {
- if (current->value() == value && current->index() == index) {
- if (previous == NULL) {
- use_list_ = current->tail();
- } else {
- previous->set_tail(current->tail());
- }
- break;
- }
-
- previous = current;
- current = current->tail();
- }
-
-#ifdef DEBUG
- // Do not reuse use list nodes in debug mode, zap them.
- if (current != NULL) {
- HUseListNode* temp =
- new(block()->zone())
- HUseListNode(current->value(), current->index(), NULL);
- current->Zap();
- current = temp;
- }
-#endif
- return current;
-}
-
-
-bool HValue::Equals(HValue* other) {
- if (other->opcode() != opcode()) return false;
- if (!other->representation().Equals(representation())) return false;
- if (!other->type_.Equals(type_)) return false;
- if (other->flags() != flags()) return false;
- if (OperandCount() != other->OperandCount()) return false;
- for (int i = 0; i < OperandCount(); ++i) {
- if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false;
- }
- bool result = DataEquals(other);
- ASSERT(!result || Hashcode() == other->Hashcode());
- return result;
-}
-
-
-intptr_t HValue::Hashcode() {
- intptr_t result = opcode();
- int count = OperandCount();
- for (int i = 0; i < count; ++i) {
- result = result * 19 + OperandAt(i)->id() + (result >> 7);
- }
- return result;
-}
-
-
-const char* HValue::Mnemonic() const {
- switch (opcode()) {
-#define MAKE_CASE(type) case k##type: return #type;
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(MAKE_CASE)
-#undef MAKE_CASE
- case kPhi: return "Phi";
- default: return "";
- }
-}
-
-
-bool HValue::IsInteger32Constant() {
- return IsConstant() && HConstant::cast(this)->HasInteger32Value();
-}
-
-
-int32_t HValue::GetInteger32Constant() {
- return HConstant::cast(this)->Integer32Value();
-}
-
-
-void HValue::SetOperandAt(int index, HValue* value) {
- RegisterUse(index, value);
- InternalSetOperandAt(index, value);
-}
-
-
-void HValue::DeleteAndReplaceWith(HValue* other) {
- // We replace all uses first, so Delete can assert that there are none.
- if (other != NULL) ReplaceAllUsesWith(other);
- ASSERT(HasNoUses());
- Kill();
- DeleteFromGraph();
-}
-
-
-void HValue::ReplaceAllUsesWith(HValue* other) {
- while (use_list_ != NULL) {
- HUseListNode* list_node = use_list_;
- HValue* value = list_node->value();
- ASSERT(!value->block()->IsStartBlock());
- value->InternalSetOperandAt(list_node->index(), other);
- use_list_ = list_node->tail();
- list_node->set_tail(other->use_list_);
- other->use_list_ = list_node;
- }
-}
-
-
-void HValue::Kill() {
- // Instead of going through the entire use list of each operand, we only
- // check the first item in each use list and rely on the tail() method to
- // skip dead items, removing them lazily next time we traverse the list.
- SetFlag(kIsDead);
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* operand = OperandAt(i);
- if (operand == NULL) continue;
- HUseListNode* first = operand->use_list_;
- if (first != NULL && first->value() == this && first->index() == i) {
- operand->use_list_ = first->tail();
- }
- }
-}
-
-
-void HValue::SetBlock(HBasicBlock* block) {
- ASSERT(block_ == NULL || block == NULL);
- block_ = block;
- if (id_ == kNoNumber && block != NULL) {
- id_ = block->graph()->GetNextValueID(this);
- }
-}
-
-
-void HValue::PrintTypeTo(StringStream* stream) {
- if (!representation().IsTagged() || type().Equals(HType::Tagged())) return;
- stream->Add(" type[%s]", type().ToString());
-}
-
-
-void HValue::PrintRangeTo(StringStream* stream) {
- if (range() == NULL || range()->IsMostGeneric()) return;
- stream->Add(" range[%d,%d,m0=%d]",
- range()->lower(),
- range()->upper(),
- static_cast<int>(range()->CanBeMinusZero()));
-}
-
-
-void HValue::PrintChangesTo(StringStream* stream) {
- GVNFlagSet changes_flags = ChangesFlags();
- if (changes_flags.IsEmpty()) return;
- stream->Add(" changes[");
- if (changes_flags == AllSideEffectsFlagSet()) {
- stream->Add("*");
- } else {
- bool add_comma = false;
-#define PRINT_DO(type) \
- if (changes_flags.Contains(kChanges##type)) { \
- if (add_comma) stream->Add(","); \
- add_comma = true; \
- stream->Add(#type); \
- }
- GVN_TRACKED_FLAG_LIST(PRINT_DO);
- GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
-#undef PRINT_DO
- }
- stream->Add("]");
-}
-
-
-void HValue::PrintNameTo(StringStream* stream) {
- stream->Add("%s%d", representation_.Mnemonic(), id());
-}
-
-
-bool HValue::HasMonomorphicJSObjectType() {
- return !GetMonomorphicJSObjectMap().is_null();
-}
-
-
-bool HValue::UpdateInferredType() {
- HType type = CalculateInferredType();
- bool result = (!type.Equals(type_));
- type_ = type;
- return result;
-}
-
-
-void HValue::RegisterUse(int index, HValue* new_value) {
- HValue* old_value = OperandAt(index);
- if (old_value == new_value) return;
-
- HUseListNode* removed = NULL;
- if (old_value != NULL) {
- removed = old_value->RemoveUse(this, index);
- }
-
- if (new_value != NULL) {
- if (removed == NULL) {
- new_value->use_list_ = new(new_value->block()->zone()) HUseListNode(
- this, index, new_value->use_list_);
- } else {
- removed->set_tail(new_value->use_list_);
- new_value->use_list_ = removed;
- }
- }
-}
-
-
-void HValue::AddNewRange(Range* r, Zone* zone) {
- if (!HasRange()) ComputeInitialRange(zone);
- if (!HasRange()) range_ = new(zone) Range();
- ASSERT(HasRange());
- r->StackUpon(range_);
- range_ = r;
-}
-
-
-void HValue::RemoveLastAddedRange() {
- ASSERT(HasRange());
- ASSERT(range_->next() != NULL);
- range_ = range_->next();
-}
-
-
-void HValue::ComputeInitialRange(Zone* zone) {
- ASSERT(!HasRange());
- range_ = InferRange(zone);
- ASSERT(HasRange());
-}
-
-
-void HInstruction::PrintTo(StringStream* stream) {
- PrintMnemonicTo(stream);
- PrintDataTo(stream);
- PrintRangeTo(stream);
- PrintChangesTo(stream);
- PrintTypeTo(stream);
-}
-
-
-void HInstruction::PrintMnemonicTo(StringStream* stream) {
- stream->Add("%s ", Mnemonic());
-}
-
-
-void HInstruction::Unlink() {
- ASSERT(IsLinked());
- ASSERT(!IsControlInstruction()); // Must never move control instructions.
- ASSERT(!IsBlockEntry()); // Doesn't make sense to delete these.
- ASSERT(previous_ != NULL);
- previous_->next_ = next_;
- if (next_ == NULL) {
- ASSERT(block()->last() == this);
- block()->set_last(previous_);
- } else {
- next_->previous_ = previous_;
- }
- clear_block();
-}
-
-
-void HInstruction::InsertBefore(HInstruction* next) {
- ASSERT(!IsLinked());
- ASSERT(!next->IsBlockEntry());
- ASSERT(!IsControlInstruction());
- ASSERT(!next->block()->IsStartBlock());
- ASSERT(next->previous_ != NULL);
- HInstruction* prev = next->previous();
- prev->next_ = this;
- next->previous_ = this;
- next_ = next;
- previous_ = prev;
- SetBlock(next->block());
-}
-
-
-void HInstruction::InsertAfter(HInstruction* previous) {
- ASSERT(!IsLinked());
- ASSERT(!previous->IsControlInstruction());
- ASSERT(!IsControlInstruction() || previous->next_ == NULL);
- HBasicBlock* block = previous->block();
- // Never insert anything except constants into the start block after finishing
- // it.
- if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) {
- ASSERT(block->end()->SecondSuccessor() == NULL);
- InsertAfter(block->end()->FirstSuccessor()->first());
- return;
- }
-
- // If we're inserting after an instruction with side-effects that is
- // followed by a simulate instruction, we need to insert after the
- // simulate instruction instead.
- HInstruction* next = previous->next_;
- if (previous->HasObservableSideEffects() && next != NULL) {
- ASSERT(next->IsSimulate());
- previous = next;
- next = previous->next_;
- }
-
- previous_ = previous;
- next_ = next;
- SetBlock(block);
- previous->next_ = this;
- if (next != NULL) next->previous_ = this;
- if (block->last() == previous) {
- block->set_last(this);
- }
-}
-
-
-#ifdef DEBUG
-void HInstruction::Verify() {
- // Verify that input operands are defined before use.
- HBasicBlock* cur_block = block();
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* other_operand = OperandAt(i);
- if (other_operand == NULL) continue;
- HBasicBlock* other_block = other_operand->block();
- if (cur_block == other_block) {
- if (!other_operand->IsPhi()) {
- HInstruction* cur = this->previous();
- while (cur != NULL) {
- if (cur == other_operand) break;
- cur = cur->previous();
- }
- // Must reach other operand in the same block!
- ASSERT(cur == other_operand);
- }
- } else {
- // If the following assert fires, you may have forgotten an
- // AddInstruction.
- ASSERT(other_block->Dominates(cur_block));
- }
- }
-
- // Verify that instructions that may have side-effects are followed
- // by a simulate instruction.
- if (HasObservableSideEffects() && !IsOsrEntry()) {
- ASSERT(next()->IsSimulate());
- }
-
- // Verify that instructions that can be eliminated by GVN have overridden
- // HValue::DataEquals. The default implementation is UNREACHABLE. We
- // don't actually care whether DataEquals returns true or false here.
- if (CheckFlag(kUseGVN)) DataEquals(this);
-
- // Verify that all uses are in the graph.
- for (HUseIterator use = uses(); !use.Done(); use.Advance()) {
- if (use.value()->IsInstruction()) {
- ASSERT(HInstruction::cast(use.value())->IsLinked());
- }
- }
-}
-#endif
-
-
-HNumericConstraint* HNumericConstraint::AddToGraph(
- HValue* constrained_value,
- NumericRelation relation,
- HValue* related_value,
- HInstruction* insertion_point) {
- if (insertion_point == NULL) {
- if (constrained_value->IsInstruction()) {
- insertion_point = HInstruction::cast(constrained_value);
- } else if (constrained_value->IsPhi()) {
- insertion_point = constrained_value->block()->first();
- } else {
- UNREACHABLE();
- }
- }
- HNumericConstraint* result =
- new(insertion_point->block()->zone()) HNumericConstraint(
- constrained_value, relation, related_value);
- result->InsertAfter(insertion_point);
- return result;
-}
-
-
-void HNumericConstraint::PrintDataTo(StringStream* stream) {
- stream->Add("(");
- constrained_value()->PrintNameTo(stream);
- stream->Add(" %s ", relation().Mnemonic());
- related_value()->PrintNameTo(stream);
- stream->Add(")");
-}
-
-
-HInductionVariableAnnotation* HInductionVariableAnnotation::AddToGraph(
- HPhi* phi,
- NumericRelation relation,
- int operand_index) {
- HInductionVariableAnnotation* result =
- new(phi->block()->zone()) HInductionVariableAnnotation(phi, relation,
- operand_index);
- result->InsertAfter(phi->block()->first());
- return result;
-}
-
-
-void HInductionVariableAnnotation::PrintDataTo(StringStream* stream) {
- stream->Add("(");
- RedefinedOperand()->PrintNameTo(stream);
- stream->Add(" %s ", relation().Mnemonic());
- induction_base()->PrintNameTo(stream);
- stream->Add(")");
-}
-
-
-void HDummyUse::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
-}
-
-
-void HUnaryCall::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" ");
- stream->Add("#%d", argument_count());
-}
-
-
-void HBinaryCall::PrintDataTo(StringStream* stream) {
- first()->PrintNameTo(stream);
- stream->Add(" ");
- second()->PrintNameTo(stream);
- stream->Add(" ");
- stream->Add("#%d", argument_count());
-}
-
-
-void HBoundsCheck::AddInformativeDefinitions() {
- // TODO(mmassi): Executing this code during AddInformativeDefinitions
- // is a hack. Move it to some other HPhase.
- if (index()->IsRelationTrue(NumericRelation::Ge(),
- block()->graph()->GetConstant0()) &&
- index()->IsRelationTrue(NumericRelation::Lt(), length())) {
- set_skip_check(true);
- }
-}
-
-
-bool HBoundsCheck::IsRelationTrueInternal(NumericRelation relation,
- HValue* related_value) {
- if (related_value == length()) {
- // A HBoundsCheck is smaller than the length it compared against.
- return NumericRelation::Lt().Implies(relation);
- } else if (related_value == block()->graph()->GetConstant0()) {
- // A HBoundsCheck is greater than or equal to zero.
- return NumericRelation::Ge().Implies(relation);
- } else {
- return false;
- }
-}
-
-
-void HBoundsCheck::PrintDataTo(StringStream* stream) {
- index()->PrintNameTo(stream);
- stream->Add(" ");
- length()->PrintNameTo(stream);
- if (skip_check()) {
- stream->Add(" [DISABLED]");
- }
-}
-
-
-void HBoundsCheck::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- Representation r;
- if (key_mode_ == DONT_ALLOW_SMI_KEY ||
- !length()->representation().IsTagged()) {
- r = Representation::Integer32();
- } else if (index()->representation().IsTagged() ||
- (index()->ActualValue()->IsConstant() &&
- HConstant::cast(index()->ActualValue())->HasSmiValue())) {
- // If the index is tagged, or a constant that holds a Smi, allow the length
- // to be tagged, since it is usually already tagged from loading it out of
- // the length field of a JSArray. This allows for direct comparison without
- // untagging.
- r = Representation::Tagged();
- } else {
- r = Representation::Integer32();
- }
- UpdateRepresentation(r, h_infer, "boundscheck");
-}
-
-
-void HCallConstantFunction::PrintDataTo(StringStream* stream) {
- if (IsApplyFunction()) {
- stream->Add("optimized apply ");
- } else {
- stream->Add("%o ", function()->shared()->DebugName());
- }
- stream->Add("#%d", argument_count());
-}
-
-
-void HCallNamed::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- HUnaryCall::PrintDataTo(stream);
-}
-
-
-void HGlobalObject::PrintDataTo(StringStream* stream) {
- stream->Add("qml_global: %s ", qml_global()?"true":"false");
- HUnaryOperation::PrintDataTo(stream);
-}
-
-void HCallGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- HUnaryCall::PrintDataTo(stream);
-}
-
-
-void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", target()->shared()->DebugName());
- stream->Add("#%d", argument_count());
-}
-
-
-void HCallRuntime::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- stream->Add("#%d", argument_count());
-}
-
-
-void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("class_of_test(");
- value()->PrintNameTo(stream);
- stream->Add(", \"%o\")", *class_name());
-}
-
-
-void HWrapReceiver::PrintDataTo(StringStream* stream) {
- receiver()->PrintNameTo(stream);
- stream->Add(" ");
- function()->PrintNameTo(stream);
-}
-
-
-void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintNameTo(stream);
- stream->Add("[");
- index()->PrintNameTo(stream);
- stream->Add("], length ");
- length()->PrintNameTo(stream);
-}
-
-
-void HControlInstruction::PrintDataTo(StringStream* stream) {
- stream->Add(" goto (");
- bool first_block = true;
- for (HSuccessorIterator it(this); !it.Done(); it.Advance()) {
- stream->Add(first_block ? "B%d" : ", B%d", it.Current()->block_id());
- first_block = false;
- }
- stream->Add(")");
-}
-
-
-void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-void HReturn::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
-}
-
-
-Representation HBranch::observed_input_representation(int index) {
- static const ToBooleanStub::Types tagged_types(
- ToBooleanStub::UNDEFINED |
- ToBooleanStub::NULL_TYPE |
- ToBooleanStub::SPEC_OBJECT |
- ToBooleanStub::STRING);
- if (expected_input_types_.ContainsAnyOf(tagged_types)) {
- return Representation::Tagged();
- } else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
- return Representation::Double();
- } else if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
- return Representation::Integer32();
- } else {
- return Representation::None();
- }
-}
-
-
-void HCompareMap::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" (%p)", *map());
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-const char* HUnaryMathOperation::OpName() const {
- switch (op()) {
- case kMathFloor: return "floor";
- case kMathRound: return "round";
- case kMathCeil: return "ceil";
- case kMathAbs: return "abs";
- case kMathLog: return "log";
- case kMathSin: return "sin";
- case kMathCos: return "cos";
- case kMathTan: return "tan";
- case kMathASin: return "asin";
- case kMathACos: return "acos";
- case kMathATan: return "atan";
- case kMathExp: return "exp";
- case kMathSqrt: return "sqrt";
- default: break;
- }
- return "(unknown operation)";
-}
-
-
-void HUnaryMathOperation::PrintDataTo(StringStream* stream) {
- const char* name = OpName();
- stream->Add("%s ", name);
- value()->PrintNameTo(stream);
-}
-
-
-void HUnaryOperation::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
-}
-
-
-void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- switch (from_) {
- case FIRST_JS_RECEIVER_TYPE:
- if (to_ == LAST_TYPE) stream->Add(" spec_object");
- break;
- case JS_REGEXP_TYPE:
- if (to_ == JS_REGEXP_TYPE) stream->Add(" reg_exp");
- break;
- case JS_ARRAY_TYPE:
- if (to_ == JS_ARRAY_TYPE) stream->Add(" array");
- break;
- case JS_FUNCTION_TYPE:
- if (to_ == JS_FUNCTION_TYPE) stream->Add(" function");
- break;
- default:
- break;
- }
-}
-
-
-void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" == %o", *type_literal_);
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-void HCheckMapValue::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" ");
- map()->PrintNameTo(stream);
-}
-
-
-void HForInPrepareMap::PrintDataTo(StringStream* stream) {
- enumerable()->PrintNameTo(stream);
-}
-
-
-void HForInCacheArray::PrintDataTo(StringStream* stream) {
- enumerable()->PrintNameTo(stream);
- stream->Add(" ");
- map()->PrintNameTo(stream);
- stream->Add("[%d]", idx_);
-}
-
-
-void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(" ");
- index()->PrintNameTo(stream);
-}
-
-
-HValue* HBitwise::Canonicalize() {
- if (!representation().IsInteger32()) return this;
- // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
- int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
- if (left()->IsConstant() &&
- HConstant::cast(left())->HasInteger32Value() &&
- HConstant::cast(left())->Integer32Value() == nop_constant &&
- !right()->CheckFlag(kUint32)) {
- return right();
- }
- if (right()->IsConstant() &&
- HConstant::cast(right())->HasInteger32Value() &&
- HConstant::cast(right())->Integer32Value() == nop_constant &&
- !left()->CheckFlag(kUint32)) {
- return left();
- }
- return this;
-}
-
-
-HValue* HBitNot::Canonicalize() {
- // Optimize ~~x, a common pattern used for ToInt32(x).
- if (value()->IsBitNot()) {
- HValue* result = HBitNot::cast(value())->value();
- ASSERT(result->representation().IsInteger32());
- if (!result->CheckFlag(kUint32)) {
- return result;
- }
- }
- return this;
-}
-
-
-HValue* HAdd::Canonicalize() {
- if (!representation().IsInteger32()) return this;
- if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
- return this;
-}
-
-
-HValue* HSub::Canonicalize() {
- if (!representation().IsInteger32()) return this;
- if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
- return this;
-}
-
-
-HValue* HChange::Canonicalize() {
- return (from().Equals(to())) ? value() : this;
-}
-
-
-HValue* HWrapReceiver::Canonicalize() {
- if (HasNoUses()) return NULL;
- if (receiver()->type().IsJSObject()) {
- return receiver();
- }
- return this;
-}
-
-
-void HTypeof::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
-}
-
-
-void HForceRepresentation::PrintDataTo(StringStream* stream) {
- stream->Add("%s ", representation().Mnemonic());
- value()->PrintNameTo(stream);
-}
-
-
-void HChange::PrintDataTo(StringStream* stream) {
- HUnaryOperation::PrintDataTo(stream);
- stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
-
- if (CanTruncateToInt32()) stream->Add(" truncating-int32");
- if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
- if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
-}
-
-
-void HJSArrayLength::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- if (HasTypeCheck()) {
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
- }
-}
-
-
-HValue* HUnaryMathOperation::Canonicalize() {
- if (op() == kMathFloor) {
- // If the input is integer32 then we replace the floor instruction
- // with its input. This happens before the representation changes are
- // introduced.
- if (value()->representation().IsInteger32()) return value();
-
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
- defined(V8_TARGET_ARCH_X64)
- if (value()->IsDiv() && (value()->UseCount() == 1)) {
- // TODO(2038): Implement this optimization for non ARM architectures.
- HDiv* hdiv = HDiv::cast(value());
- HValue* left = hdiv->left();
- HValue* right = hdiv->right();
- // Try to simplify left and right values of the division.
- HValue* new_left =
- LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(left);
- HValue* new_right =
- LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
-
- // Return if left or right are not optimizable.
- if ((new_left == NULL) || (new_right == NULL)) return this;
-
- // Insert the new values in the graph.
- if (new_left->IsInstruction() &&
- !HInstruction::cast(new_left)->IsLinked()) {
- HInstruction::cast(new_left)->InsertBefore(this);
- }
- if (new_right->IsInstruction() &&
- !HInstruction::cast(new_right)->IsLinked()) {
- HInstruction::cast(new_right)->InsertBefore(this);
- }
- HMathFloorOfDiv* instr = new(block()->zone()) HMathFloorOfDiv(context(),
- new_left,
- new_right);
- // Replace this HMathFloor instruction by the new HMathFloorOfDiv.
- instr->InsertBefore(this);
- ReplaceAllUsesWith(instr);
- Kill();
- // We know the division had no other uses than this HMathFloor. Delete it.
- // Also delete the arguments of the division if they are not used any
- // more.
- hdiv->DeleteAndReplaceWith(NULL);
- ASSERT(left->IsChange() || left->IsConstant());
- ASSERT(right->IsChange() || right->IsConstant());
- if (left->HasNoUses()) left->DeleteAndReplaceWith(NULL);
- if (right->HasNoUses()) right->DeleteAndReplaceWith(NULL);
-
- // Return NULL to remove this instruction from the graph.
- return NULL;
- }
-#endif // V8_TARGET_ARCH_ARM
- }
- return this;
-}
-
-
-HValue* HCheckInstanceType::Canonicalize() {
- if (check_ == IS_STRING &&
- !value()->type().IsUninitialized() &&
- value()->type().IsString()) {
- return NULL;
- }
-
- if (check_ == IS_INTERNALIZED_STRING && value()->IsConstant()) {
- // Dereferencing is safe here:
- // an internalized string cannot become non-internalized.
- AllowHandleDereference allow_handle_deref(isolate());
- if (HConstant::cast(value())->handle()->IsInternalizedString()) return NULL;
- }
- return this;
-}
-
-
-void HCheckInstanceType::GetCheckInterval(InstanceType* first,
- InstanceType* last) {
- ASSERT(is_interval_check());
- switch (check_) {
- case IS_SPEC_OBJECT:
- *first = FIRST_SPEC_OBJECT_TYPE;
- *last = LAST_SPEC_OBJECT_TYPE;
- return;
- case IS_JS_ARRAY:
- *first = *last = JS_ARRAY_TYPE;
- return;
- default:
- UNREACHABLE();
- }
-}
-
-
-void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
- ASSERT(!is_interval_check());
- switch (check_) {
- case IS_STRING:
- *mask = kIsNotStringMask;
- *tag = kStringTag;
- return;
- case IS_INTERNALIZED_STRING:
- *mask = kIsInternalizedMask;
- *tag = kInternalizedTag;
- return;
- default:
- UNREACHABLE();
- }
-}
-
-
-void HCheckMaps::SetSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) {
- ASSERT(side_effect == kChangesMaps);
- // TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
- // type information is rich enough we should generalize this to any HType
- // for which the map is known.
- if (HasNoUses() && dominator->IsStoreNamedField()) {
- HStoreNamedField* store = HStoreNamedField::cast(dominator);
- Handle<Map> map = store->transition();
- if (map.is_null() || store->object() != value()) return;
- for (int i = 0; i < map_set()->length(); i++) {
- if (map.is_identical_to(map_set()->at(i))) {
- DeleteAndReplaceWith(NULL);
- return;
- }
- }
- }
-}
-
-
-void HLoadElements::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- if (HasTypeCheck()) {
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
- }
-}
-
-
-void HCheckMaps::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" [%p", *map_set()->first());
- for (int i = 1; i < map_set()->length(); ++i) {
- stream->Add(",%p", *map_set()->at(i));
- }
- stream->Add("]");
-}
-
-
-void HCheckFunction::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" %p", *target());
-}
-
-
-const char* HCheckInstanceType::GetCheckName() {
- switch (check_) {
- case IS_SPEC_OBJECT: return "object";
- case IS_JS_ARRAY: return "array";
- case IS_STRING: return "string";
- case IS_INTERNALIZED_STRING: return "internalized_string";
- }
- UNREACHABLE();
- return "";
-}
-
-void HCheckInstanceType::PrintDataTo(StringStream* stream) {
- stream->Add("%s ", GetCheckName());
- HUnaryOperation::PrintDataTo(stream);
-}
-
-
-void HCheckPrototypeMaps::PrintDataTo(StringStream* stream) {
- stream->Add("[receiver_prototype=%p,holder=%p]",
- *prototypes_.first(), *prototypes_.last());
-}
-
-
-void HCallStub::PrintDataTo(StringStream* stream) {
- stream->Add("%s ",
- CodeStub::MajorName(major_key_, false));
- HUnaryCall::PrintDataTo(stream);
-}
-
-
-void HInstanceOf::PrintDataTo(StringStream* stream) {
- left()->PrintNameTo(stream);
- stream->Add(" ");
- right()->PrintNameTo(stream);
- stream->Add(" ");
- context()->PrintNameTo(stream);
-}
-
-
-Range* HValue::InferRange(Zone* zone) {
- // Untagged integer32 cannot be -0, all other representations can.
- Range* result = new(zone) Range();
- result->set_can_be_minus_zero(!representation().IsInteger32());
- return result;
-}
-
-
-Range* HChange::InferRange(Zone* zone) {
- Range* input_range = value()->range();
- if (from().IsInteger32() &&
- to().IsTagged() &&
- !value()->CheckFlag(HInstruction::kUint32) &&
- input_range != NULL && input_range->IsInSmiRange()) {
- set_type(HType::Smi());
- }
- Range* result = (input_range != NULL)
- ? input_range->Copy(zone)
- : HValue::InferRange(zone);
- if (to().IsInteger32()) result->set_can_be_minus_zero(false);
- return result;
-}
-
-
-Range* HConstant::InferRange(Zone* zone) {
- if (has_int32_value_) {
- Range* result = new(zone) Range(int32_value_, int32_value_);
- result->set_can_be_minus_zero(false);
- return result;
- }
- return HValue::InferRange(zone);
-}
-
-
-Range* HPhi::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- if (block()->IsLoopHeader()) {
- Range* range = new(zone) Range(kMinInt, kMaxInt);
- return range;
- } else {
- Range* range = OperandAt(0)->range()->Copy(zone);
- for (int i = 1; i < OperandCount(); ++i) {
- range->Union(OperandAt(i)->range());
- }
- return range;
- }
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HAdd::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy(zone);
- if (!res->AddAndCheckOverflow(b)) {
- ClearFlag(kCanOverflow);
- }
- bool m0 = a->CanBeMinusZero() && b->CanBeMinusZero();
- res->set_can_be_minus_zero(m0);
- return res;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HSub::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy(zone);
- if (!res->SubAndCheckOverflow(b)) {
- ClearFlag(kCanOverflow);
- }
- res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
- return res;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HMul::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy(zone);
- if (!res->MulAndCheckOverflow(b)) {
- ClearFlag(kCanOverflow);
- }
- bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
- (a->CanBeNegative() && b->CanBeZero());
- res->set_can_be_minus_zero(m0);
- return res;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HDiv::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- Range* result = new(zone) Range();
- if (left()->range()->CanBeMinusZero()) {
- result->set_can_be_minus_zero(true);
- }
-
- if (left()->range()->CanBeZero() && right()->range()->CanBeNegative()) {
- result->set_can_be_minus_zero(true);
- }
-
- if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
- SetFlag(HValue::kCanOverflow);
- }
-
- if (!right()->range()->CanBeZero()) {
- ClearFlag(HValue::kCanBeDivByZero);
- }
- return result;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HMod::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* result = new(zone) Range();
- if (a->CanBeMinusZero() || a->CanBeNegative()) {
- result->set_can_be_minus_zero(true);
- }
-
- if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
- SetFlag(HValue::kCanOverflow);
- }
-
- if (!right()->range()->CanBeZero()) {
- ClearFlag(HValue::kCanBeDivByZero);
- }
- return result;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-void HPhi::AddInformativeDefinitions() {
- if (OperandCount() == 2) {
- // If one of the operands is an OSR block give up (this cannot be an
- // induction variable).
- if (OperandAt(0)->block()->is_osr_entry() ||
- OperandAt(1)->block()->is_osr_entry()) return;
-
- for (int operand_index = 0; operand_index < 2; operand_index++) {
- int other_operand_index = (operand_index + 1) % 2;
-
- static NumericRelation relations[] = {
- NumericRelation::Ge(),
- NumericRelation::Le()
- };
-
- // Check if this phi is an induction variable. If, e.g., we know that
- // its first input is greater than the phi itself, then that must be
- // the back edge, and the phi is always greater than its second input.
- for (int relation_index = 0; relation_index < 2; relation_index++) {
- if (OperandAt(operand_index)->IsRelationTrue(relations[relation_index],
- this)) {
- HInductionVariableAnnotation::AddToGraph(this,
- relations[relation_index],
- other_operand_index);
- }
- }
- }
- }
-}
-
-
-bool HPhi::IsRelationTrueInternal(NumericRelation relation, HValue* other) {
- if (CheckFlag(kNumericConstraintEvaluationInProgress)) return false;
-
- SetFlag(kNumericConstraintEvaluationInProgress);
- bool result = true;
- for (int i = 0; i < OperandCount(); i++) {
- // Skip OSR entry blocks
- if (OperandAt(i)->block()->is_osr_entry()) continue;
-
- if (!OperandAt(i)->IsRelationTrue(relation, other)) {
- result = false;
- break;
- }
- }
- ClearFlag(kNumericConstraintEvaluationInProgress);
-
- return result;
-}
-
-
-Range* HMathMinMax::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy(zone);
- if (operation_ == kMathMax) {
- res->CombinedMax(b);
- } else {
- ASSERT(operation_ == kMathMin);
- res->CombinedMin(b);
- }
- return res;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-void HPhi::PrintTo(StringStream* stream) {
- stream->Add("[");
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- stream->Add(" ");
- value->PrintNameTo(stream);
- stream->Add(" ");
- }
- stream->Add(" uses%d_%di_%dd_%dt",
- UseCount(),
- int32_non_phi_uses() + int32_indirect_uses(),
- double_non_phi_uses() + double_indirect_uses(),
- tagged_non_phi_uses() + tagged_indirect_uses());
- stream->Add("%s%s]",
- is_live() ? "_live" : "",
- IsConvertibleToInteger() ? "" : "_ncti");
-}
-
-
-void HPhi::AddInput(HValue* value) {
- inputs_.Add(NULL, value->block()->zone());
- SetOperandAt(OperandCount() - 1, value);
- // Mark phis that may have 'arguments' directly or indirectly as an operand.
- if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
- SetFlag(kIsArguments);
- }
-}
-
-
-bool HPhi::HasRealUses() {
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsPhi()) return true;
- }
- return false;
-}
-
-
-HValue* HPhi::GetRedundantReplacement() {
- HValue* candidate = NULL;
- int count = OperandCount();
- int position = 0;
- while (position < count && candidate == NULL) {
- HValue* current = OperandAt(position++);
- if (current != this) candidate = current;
- }
- while (position < count) {
- HValue* current = OperandAt(position++);
- if (current != this && current != candidate) return NULL;
- }
- ASSERT(candidate != this);
- return candidate;
-}
-
-
-void HPhi::DeleteFromGraph() {
- ASSERT(block() != NULL);
- block()->RemovePhi(this);
- ASSERT(block() == NULL);
-}
-
-
-void HPhi::InitRealUses(int phi_id) {
- // Initialize real uses.
- phi_id_ = phi_id;
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* value = it.value();
- if (!value->IsPhi()) {
- Representation rep = value->observed_input_representation(it.index());
- non_phi_uses_[rep.kind()] += value->LoopWeight();
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is used by real #%d %s as %s\n",
- id(), value->id(), value->Mnemonic(), rep.Mnemonic());
- }
- }
- }
-}
-
-
-void HPhi::AddNonPhiUsesFrom(HPhi* other) {
- if (FLAG_trace_representation) {
- PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n",
- id(), other->id(),
- other->non_phi_uses_[Representation::kInteger32],
- other->non_phi_uses_[Representation::kDouble],
- other->non_phi_uses_[Representation::kTagged]);
- }
-
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- indirect_uses_[i] += other->non_phi_uses_[i];
- }
-}
-
-
-void HPhi::AddIndirectUsesTo(int* dest) {
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- dest[i] += indirect_uses_[i];
- }
-}
-
-
-void HSimulate::MergeInto(HSimulate* other) {
- for (int i = 0; i < values_.length(); ++i) {
- HValue* value = values_[i];
- if (HasAssignedIndexAt(i)) {
- other->AddAssignedValue(GetAssignedIndexAt(i), value);
- } else {
- if (other->pop_count_ > 0) {
- other->pop_count_--;
- } else {
- other->AddPushedValue(value);
- }
- }
- }
- other->pop_count_ += pop_count();
-}
-
-
-void HSimulate::PrintDataTo(StringStream* stream) {
- stream->Add("id=%d", ast_id().ToInt());
- if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
- if (values_.length() > 0) {
- if (pop_count_ > 0) stream->Add(" /");
- for (int i = values_.length() - 1; i >= 0; --i) {
- if (i > 0) stream->Add(",");
- if (HasAssignedIndexAt(i)) {
- stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
- } else {
- stream->Add(" push ");
- }
- values_[i]->PrintNameTo(stream);
- }
- }
-}
-
-
-void HDeoptimize::PrintDataTo(StringStream* stream) {
- if (OperandCount() == 0) return;
- OperandAt(0)->PrintNameTo(stream);
- for (int i = 1; i < OperandCount(); ++i) {
- stream->Add(" ");
- OperandAt(i)->PrintNameTo(stream);
- }
-}
-
-
-void HEnterInlined::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name = function()->debug_name()->ToCString();
- stream->Add("%s, id=%d", *name, function()->id().ToInt());
-}
-
-
-static bool IsInteger32(double value) {
- double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
- return BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(value);
-}
-
-
-HConstant::HConstant(Handle<Object> handle, Representation r)
- : handle_(handle),
- has_int32_value_(false),
- has_double_value_(false) {
- // Dereferencing here is safe: the value of a number object does not change.
- AllowHandleDereference allow_handle_deref(Isolate::Current());
- if (handle_->IsNumber()) {
- double n = handle_->Number();
- has_int32_value_ = IsInteger32(n);
- int32_value_ = DoubleToInt32(n);
- double_value_ = n;
- has_double_value_ = true;
- }
- if (r.IsNone()) {
- if (has_int32_value_) {
- r = Representation::Integer32();
- } else if (has_double_value_) {
- r = Representation::Double();
- } else {
- r = Representation::Tagged();
- }
- }
- Initialize(r);
-}
-
-
-HConstant::HConstant(int32_t integer_value, Representation r)
- : has_int32_value_(true),
- has_double_value_(true),
- int32_value_(integer_value),
- double_value_(FastI2D(integer_value)) {
- Initialize(r);
-}
-
-
-HConstant::HConstant(double double_value, Representation r)
- : has_int32_value_(IsInteger32(double_value)),
- has_double_value_(true),
- int32_value_(DoubleToInt32(double_value)),
- double_value_(double_value) {
- Initialize(r);
-}
-
-
-void HConstant::Initialize(Representation r) {
- set_representation(r);
- SetFlag(kUseGVN);
- if (representation().IsInteger32()) {
- ClearGVNFlag(kDependsOnOsrEntries);
- }
-}
-
-
-HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
- if (r.IsInteger32() && !has_int32_value_) return NULL;
- if (r.IsDouble() && !has_double_value_) return NULL;
- if (handle_.is_null()) {
- ASSERT(has_int32_value_ || has_double_value_);
- if (has_int32_value_) return new(zone) HConstant(int32_value_, r);
- return new(zone) HConstant(double_value_, r);
- }
- return new(zone) HConstant(handle_, r);
-}
-
-
-HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
- if (has_int32_value_) {
- if (handle_.is_null()) {
- return new(zone) HConstant(int32_value_, Representation::Integer32());
- } else {
- // Re-use the existing Handle if possible.
- return new(zone) HConstant(handle_, Representation::Integer32());
- }
- } else if (has_double_value_) {
- return new(zone) HConstant(DoubleToInt32(double_value_),
- Representation::Integer32());
- } else {
- return NULL;
- }
-}
-
-
-bool HConstant::ToBoolean() {
- // Converts the constant's boolean value according to
- // ECMAScript section 9.2 ToBoolean conversion.
- if (HasInteger32Value()) return Integer32Value() != 0;
- if (HasDoubleValue()) {
- double v = DoubleValue();
- return v != 0 && !isnan(v);
- }
- // Dereferencing is safe: singletons do not change and strings are
- // immutable.
- AllowHandleDereference allow_handle_deref(isolate());
- if (handle_->IsTrue()) return true;
- if (handle_->IsFalse()) return false;
- if (handle_->IsUndefined()) return false;
- if (handle_->IsNull()) return false;
- if (handle_->IsString() && String::cast(*handle_)->length() == 0) {
- return false;
- }
- return true;
-}
-
-void HConstant::PrintDataTo(StringStream* stream) {
- if (has_int32_value_) {
- stream->Add("%d ", int32_value_);
- } else if (has_double_value_) {
- stream->Add("%f ", FmtElm(double_value_));
- } else {
- handle()->ShortPrint(stream);
- }
-}
-
-
-bool HArrayLiteral::IsCopyOnWrite() const {
- if (!boilerplate_object_->IsJSObject()) return false;
- return Handle<JSObject>::cast(boilerplate_object_)->elements()->map() ==
- HEAP->fixed_cow_array_map();
-}
-
-
-void HBinaryOperation::PrintDataTo(StringStream* stream) {
- left()->PrintNameTo(stream);
- stream->Add(" ");
- right()->PrintNameTo(stream);
- if (CheckFlag(kCanOverflow)) stream->Add(" !");
- if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
-}
-
-
-void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- // When the operation has information about its own output type, don't look
- // at uses.
- if (!observed_output_representation_.IsNone()) return;
- new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
-}
-
-
-Representation HBinaryOperation::RepresentationFromInputs() {
- // Determine the worst case of observed input representations and
- // the currently assumed output representation.
- Representation rep = representation();
- if (observed_output_representation_.is_more_general_than(rep)) {
- rep = observed_output_representation_;
- }
- for (int i = 1; i <= 2; ++i) {
- Representation input_rep = observed_input_representation(i);
- if (input_rep.is_more_general_than(rep)) rep = input_rep;
- }
- // If any of the actual input representation is more general than what we
- // have so far but not Tagged, use that representation instead.
- Representation left_rep = left()->representation();
- Representation right_rep = right()->representation();
-
- if (left_rep.is_more_general_than(rep) &&
- left()->CheckFlag(kFlexibleRepresentation)) {
- rep = left_rep;
- }
- if (right_rep.is_more_general_than(rep) &&
- right()->CheckFlag(kFlexibleRepresentation)) {
- rep = right_rep;
- }
- return rep;
-}
-
-
-void HBinaryOperation::AssumeRepresentation(Representation r) {
- set_observed_input_representation(r, r);
- HValue::AssumeRepresentation(r);
-}
-
-
-void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- // Do not care about uses.
-}
-
-
-Range* HBitwise::InferRange(Zone* zone) {
- if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
- const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
- int32_t left_mask = (left()->range() != NULL)
- ? left()->range()->Mask()
- : kDefaultMask;
- int32_t right_mask = (right()->range() != NULL)
- ? right()->range()->Mask()
- : kDefaultMask;
- int32_t result_mask = (op() == Token::BIT_AND)
- ? left_mask & right_mask
- : left_mask | right_mask;
- return (result_mask >= 0)
- ? new(zone) Range(0, result_mask)
- : HValue::InferRange(zone);
-}
-
-
-Range* HSar::InferRange(Zone* zone) {
- if (right()->IsConstant()) {
- HConstant* c = HConstant::cast(right());
- if (c->HasInteger32Value()) {
- Range* result = (left()->range() != NULL)
- ? left()->range()->Copy(zone)
- : new(zone) Range();
- result->Sar(c->Integer32Value());
- result->set_can_be_minus_zero(false);
- return result;
- }
- }
- return HValue::InferRange(zone);
-}
-
-
-Range* HShr::InferRange(Zone* zone) {
- if (right()->IsConstant()) {
- HConstant* c = HConstant::cast(right());
- if (c->HasInteger32Value()) {
- int shift_count = c->Integer32Value() & 0x1f;
- if (left()->range()->CanBeNegative()) {
- // Only compute bounds if the result always fits into an int32.
- return (shift_count >= 1)
- ? new(zone) Range(0,
- static_cast<uint32_t>(0xffffffff) >> shift_count)
- : new(zone) Range();
- } else {
- // For positive inputs we can use the >> operator.
- Range* result = (left()->range() != NULL)
- ? left()->range()->Copy(zone)
- : new(zone) Range();
- result->Sar(c->Integer32Value());
- result->set_can_be_minus_zero(false);
- return result;
- }
- }
- }
- return HValue::InferRange(zone);
-}
-
-
-Range* HShl::InferRange(Zone* zone) {
- if (right()->IsConstant()) {
- HConstant* c = HConstant::cast(right());
- if (c->HasInteger32Value()) {
- Range* result = (left()->range() != NULL)
- ? left()->range()->Copy(zone)
- : new(zone) Range();
- result->Shl(c->Integer32Value());
- result->set_can_be_minus_zero(false);
- return result;
- }
- }
- return HValue::InferRange(zone);
-}
-
-
-Range* HLoadKeyed::InferRange(Zone* zone) {
- switch (elements_kind()) {
- case EXTERNAL_PIXEL_ELEMENTS:
- return new(zone) Range(0, 255);
- case EXTERNAL_BYTE_ELEMENTS:
- return new(zone) Range(-128, 127);
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return new(zone) Range(0, 255);
- case EXTERNAL_SHORT_ELEMENTS:
- return new(zone) Range(-32768, 32767);
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return new(zone) Range(0, 65535);
- default:
- return HValue::InferRange(zone);
- }
-}
-
-
-void HCompareGeneric::PrintDataTo(StringStream* stream) {
- stream->Add(Token::Name(token()));
- stream->Add(" ");
- HBinaryOperation::PrintDataTo(stream);
-}
-
-
-void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add(Token::Name(token()));
- stream->Add(" ");
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-void HCompareIDAndBranch::AddInformativeDefinitions() {
- NumericRelation r = NumericRelation::FromToken(token());
- if (r.IsNone()) return;
-
- HNumericConstraint::AddToGraph(left(), r, right(), SuccessorAt(0)->first());
- HNumericConstraint::AddToGraph(
- left(), r.Negated(), right(), SuccessorAt(1)->first());
-}
-
-
-void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add(Token::Name(token()));
- stream->Add(" ");
- left()->PrintNameTo(stream);
- stream->Add(" ");
- right()->PrintNameTo(stream);
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
- left()->PrintNameTo(stream);
- stream->Add(" ");
- right()->PrintNameTo(stream);
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-void HGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", SuccessorAt(0)->block_id());
-}
-
-
-void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
- Representation rep = Representation::None();
- Representation left_rep = left()->representation();
- Representation right_rep = right()->representation();
- bool observed_integers =
- observed_input_representation(0).IsInteger32() &&
- observed_input_representation(1).IsInteger32();
- bool inputs_are_not_doubles =
- !left_rep.IsDouble() && !right_rep.IsDouble();
- if (observed_integers && inputs_are_not_doubles) {
- rep = Representation::Integer32();
- } else {
- rep = Representation::Double();
- // According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
- // and !=) have special handling of undefined, e.g. undefined == undefined
- // is 'true'. Relational comparisons have a different semantic, first
- // calling ToPrimitive() on their arguments. The standard Crankshaft
- // tagged-to-double conversion to ensure the HCompareIDAndBranch's inputs
- // are doubles caused 'undefined' to be converted to NaN. That's compatible
- // out-of-the box with ordered relational comparisons (<, >, <=,
- // >=). However, for equality comparisons (and for 'in' and 'instanceof'),
- // it is not consistent with the spec. For example, it would cause undefined
- // == undefined (should be true) to be evaluated as NaN == NaN
- // (false). Therefore, any comparisons other than ordered relational
- // comparisons must cause a deopt when one of their arguments is undefined.
- // See also v8:1434
- if (!Token::IsOrderedRelationalCompareOp(token_)) {
- SetFlag(kDeoptimizeOnUndefined);
- }
- }
- ChangeRepresentation(rep);
-}
-
-
-void HParameter::PrintDataTo(StringStream* stream) {
- stream->Add("%u", index());
-}
-
-
-void HLoadNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
-}
-
-
-// Returns true if an instance of this map can never find a property with this
-// name in its prototype chain. This means all prototypes up to the top are
-// fast and don't have the name in them. It would be good if we could optimize
-// polymorphic loads where the property is sometimes found in the prototype
-// chain.
-static bool PrototypeChainCanNeverResolve(
- Handle<Map> map, Handle<String> name) {
- Isolate* isolate = map->GetIsolate();
- Object* current = map->prototype();
- while (current != isolate->heap()->null_value()) {
- if (current->IsJSGlobalProxy() ||
- current->IsGlobalObject() ||
- !current->IsJSObject() ||
- JSObject::cast(current)->map()->has_named_interceptor() ||
- JSObject::cast(current)->IsAccessCheckNeeded() ||
- !JSObject::cast(current)->HasFastProperties()) {
- return false;
- }
-
- LookupResult lookup(isolate);
- Map* map = JSObject::cast(current)->map();
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) return false;
- if (!lookup.IsCacheable()) return false;
- current = JSObject::cast(current)->GetPrototype();
- }
- return true;
-}
-
-
-HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
- HValue* object,
- SmallMapList* types,
- Handle<String> name,
- Zone* zone)
- : types_(Min(types->length(), kMaxLoadPolymorphism), zone),
- name_(name),
- need_generic_(false) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetGVNFlag(kDependsOnMaps);
- SmallMapList negative_lookups;
- for (int i = 0;
- i < types->length() && types_.length() < kMaxLoadPolymorphism;
- ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup(map->GetIsolate());
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) {
- switch (lookup.type()) {
- case FIELD: {
- int index = lookup.GetLocalFieldIndexFromMap(*map);
- if (index < 0) {
- SetGVNFlag(kDependsOnInobjectFields);
- } else {
- SetGVNFlag(kDependsOnBackingStoreFields);
- }
- types_.Add(types->at(i), zone);
- break;
- }
- case CONSTANT_FUNCTION:
- types_.Add(types->at(i), zone);
- break;
- case CALLBACKS:
- break;
- case TRANSITION:
- case INTERCEPTOR:
- case NONEXISTENT:
- case NORMAL:
- case HANDLER:
- UNREACHABLE();
- break;
- }
- } else if (lookup.IsCacheable() &&
- // For dicts the lookup on the map will fail, but the object may
- // contain the property so we cannot generate a negative lookup
- // (which would just be a map check and return undefined).
- !map->is_dictionary_map() &&
- !map->has_named_interceptor() &&
- PrototypeChainCanNeverResolve(map, name)) {
- negative_lookups.Add(types->at(i), zone);
- }
- }
-
- bool need_generic =
- (types->length() != negative_lookups.length() + types_.length());
- if (!need_generic && FLAG_deoptimize_uncommon_cases) {
- SetFlag(kUseGVN);
- for (int i = 0; i < negative_lookups.length(); i++) {
- types_.Add(negative_lookups.at(i), zone);
- }
- } else {
- // We don't have an easy way to handle both a call (to the generic stub) and
- // a deopt in the same hydrogen instruction, so in this case we don't add
- // the negative lookups which can deopt - just let the generic stub handle
- // them.
- SetAllSideEffects();
- need_generic_ = true;
- }
-}
-
-
-bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
- HLoadNamedFieldPolymorphic* other = HLoadNamedFieldPolymorphic::cast(value);
- if (types_.length() != other->types()->length()) return false;
- if (!name_.is_identical_to(other->name())) return false;
- if (need_generic_ != other->need_generic_) return false;
- for (int i = 0; i < types_.length(); i++) {
- bool found = false;
- for (int j = 0; j < types_.length(); j++) {
- if (types_.at(j).is_identical_to(other->types()->at(i))) {
- found = true;
- break;
- }
- }
- if (!found) return false;
- }
- return true;
-}
-
-
-void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
-}
-
-
-void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
-}
-
-
-void HLoadKeyed::PrintDataTo(StringStream* stream) {
- if (!is_external()) {
- elements()->PrintNameTo(stream);
- } else {
- ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- elements()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(ElementsKindToString(elements_kind()));
- }
-
- stream->Add("[");
- key()->PrintNameTo(stream);
- if (IsDehoisted()) {
- stream->Add(" + %d]", index_offset());
- } else {
- stream->Add("]");
- }
-
- if (HasDependency()) {
- stream->Add(" ");
- dependency()->PrintNameTo(stream);
- }
-
- if (RequiresHoleCheck()) {
- stream->Add(" check_hole");
- }
-}
-
-
-bool HLoadKeyed::UsesMustHandleHole() const {
- if (IsFastPackedElementsKind(elements_kind())) {
- return false;
- }
-
- if (hole_mode() == ALLOW_RETURN_HOLE) return true;
-
- if (IsFastDoubleElementsKind(elements_kind())) {
- return false;
- }
-
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!use->IsChange()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-bool HLoadKeyed::RequiresHoleCheck() const {
- if (IsFastPackedElementsKind(elements_kind())) {
- return false;
- }
-
- return !UsesMustHandleHole();
-}
-
-
-void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("]");
-}
-
-
-HValue* HLoadKeyedGeneric::Canonicalize() {
- // Recognize generic keyed loads that use property name generated
- // by for-in statement as a key and rewrite them into fast property load
- // by index.
- if (key()->IsLoadKeyed()) {
- HLoadKeyed* key_load = HLoadKeyed::cast(key());
- if (key_load->elements()->IsForInCacheArray()) {
- HForInCacheArray* names_cache =
- HForInCacheArray::cast(key_load->elements());
-
- if (names_cache->enumerable() == object()) {
- HForInCacheArray* index_cache =
- names_cache->index_cache();
- HCheckMapValue* map_check =
- new(block()->zone()) HCheckMapValue(object(), names_cache->map());
- HInstruction* index = new(block()->zone()) HLoadKeyed(
- index_cache,
- key_load->key(),
- key_load->key(),
- key_load->elements_kind());
- map_check->InsertBefore(this);
- index->InsertBefore(this);
- HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
- object(), index);
- load->InsertBefore(this);
- return load;
- }
- }
- }
-
- return this;
-}
-
-
-void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(".");
- ASSERT(name()->IsString());
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" = ");
- value()->PrintNameTo(stream);
-}
-
-
-void HStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" = ");
- value()->PrintNameTo(stream);
- stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
- if (NeedsWriteBarrier()) {
- stream->Add(" (write-barrier)");
- }
- if (!transition().is_null()) {
- stream->Add(" (transition map %p)", *transition());
- }
-}
-
-
-void HStoreKeyed::PrintDataTo(StringStream* stream) {
- if (!is_external()) {
- elements()->PrintNameTo(stream);
- } else {
- elements()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(ElementsKindToString(elements_kind()));
- ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- }
-
- stream->Add("[");
- key()->PrintNameTo(stream);
- if (IsDehoisted()) {
- stream->Add(" + %d] = ", index_offset());
- } else {
- stream->Add("] = ");
- }
-
- value()->PrintNameTo(stream);
-}
-
-
-void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] = ");
- value()->PrintNameTo(stream);
-}
-
-
-void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- ElementsKind from_kind = original_map()->elements_kind();
- ElementsKind to_kind = transitioned_map()->elements_kind();
- stream->Add(" %p [%s] -> %p [%s]",
- *original_map(),
- ElementsAccessor::ForKind(from_kind)->name(),
- *transitioned_map(),
- ElementsAccessor::ForKind(to_kind)->name());
-}
-
-
-void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p]", *cell());
- if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
- if (details_.IsReadOnly()) stream->Add(" (read-only)");
-}
-
-
-bool HLoadGlobalCell::RequiresHoleCheck() const {
- if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!use->IsChange()) return true;
- }
- return false;
-}
-
-
-void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
-}
-
-
-void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p] = ", *cell());
- value()->PrintNameTo(stream);
- if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
- if (details_.IsReadOnly()) stream->Add(" (read-only)");
-}
-
-
-void HStoreGlobalGeneric::PrintDataTo(StringStream* stream) {
- stream->Add("%o = ", *name());
- value()->PrintNameTo(stream);
-}
-
-
-void HLoadContextSlot::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void HStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintNameTo(stream);
- stream->Add("[%d] = ", slot_index());
- value()->PrintNameTo(stream);
-}
-
-
-// Implementation of type inference and type conversions. Calculates
-// the inferred type of this instruction based on the input operands.
-
-HType HValue::CalculateInferredType() {
- return type_;
-}
-
-
-HType HCheckMaps::CalculateInferredType() {
- return value()->type();
-}
-
-
-HType HCheckFunction::CalculateInferredType() {
- return value()->type();
-}
-
-
-HType HCheckNonSmi::CalculateInferredType() {
- // TODO(kasperl): Is there any way to signal that this isn't a smi?
- return HType::Tagged();
-}
-
-
-HType HCheckSmi::CalculateInferredType() {
- return HType::Smi();
-}
-
-
-void HCheckSmiOrInt32::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- Representation r = value()->representation().IsTagged()
- ? Representation::Tagged() : Representation::Integer32();
- UpdateRepresentation(r, h_infer, "checksmiorint32");
-}
-
-
-HType HPhi::CalculateInferredType() {
- HType result = HType::Uninitialized();
- for (int i = 0; i < OperandCount(); ++i) {
- HType current = OperandAt(i)->type();
- result = result.Combine(current);
- }
- return result;
-}
-
-
-HType HConstant::CalculateInferredType() {
- if (has_int32_value_) {
- return Smi::IsValid(int32_value_) ? HType::Smi() : HType::HeapNumber();
- }
- if (has_double_value_) return HType::HeapNumber();
- return HType::TypeFromValue(isolate(), handle_);
-}
-
-
-HType HCompareGeneric::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HInstanceOf::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HDeleteProperty::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HInstanceOfKnownGlobal::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HChange::CalculateInferredType() {
- if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
- return type();
-}
-
-
-HType HBitwiseBinaryOperation::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HArithmeticBinaryOperation::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HAdd::CalculateInferredType() {
- return HType::Tagged();
-}
-
-
-HType HBitNot::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HUnaryMathOperation::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HStringCharFromCode::CalculateInferredType() {
- return HType::String();
-}
-
-
-HType HAllocateObject::CalculateInferredType() {
- return HType::JSObject();
-}
-
-
-HType HAllocate::CalculateInferredType() {
- return type_;
-}
-
-
-HType HFastLiteral::CalculateInferredType() {
- // TODO(mstarzinger): Be smarter, could also be JSArray here.
- return HType::JSObject();
-}
-
-
-HType HArrayLiteral::CalculateInferredType() {
- return HType::JSArray();
-}
-
-
-HType HObjectLiteral::CalculateInferredType() {
- return HType::JSObject();
-}
-
-
-HType HRegExpLiteral::CalculateInferredType() {
- return HType::JSObject();
-}
-
-
-HType HFunctionLiteral::CalculateInferredType() {
- return HType::JSObject();
-}
-
-
-HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
- BitVector* visited) {
- visited->Add(id());
- if (representation().IsInteger32() &&
- !value()->representation().IsInteger32()) {
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- }
- if (RequiredInputRepresentation(0).IsInteger32() &&
- representation().IsInteger32()) {
- return value();
- }
- return NULL;
-}
-
-
-
-HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (from().IsInteger32()) return NULL;
- if (CanTruncateToInt32()) return NULL;
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- ASSERT(!from().IsInteger32() || !to().IsInteger32());
- return NULL;
-}
-
-
-HValue* HForceRepresentation::EnsureAndPropagateNotMinusZero(
- BitVector* visited) {
- visited->Add(id());
- return value();
-}
-
-
-HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- return left();
- }
- return NULL;
-}
-
-
-HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HMathFloorOfDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- SetFlag(kBailoutOnMinusZero);
- return NULL;
-}
-
-
-HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HSub::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the add operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
-HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the sub operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
-bool HStoreKeyed::NeedsCanonicalization() {
- // If value is an integer or smi or comes from the result of a keyed load or
- // constant then it is either be a non-hole value or in the case of a constant
- // the hole is only being stored explicitly: no need for canonicalization.
- if (value()->IsLoadKeyed() || value()->IsConstant()) {
- return false;
- }
-
- if (value()->IsChange()) {
- if (HChange::cast(value())->from().IsInteger32()) {
- return false;
- }
- if (HChange::cast(value())->value()->type().IsSmi()) {
- return false;
- }
- }
- return true;
-}
-
-
-#define H_CONSTANT_INT32(val) \
-new(zone) HConstant(static_cast<int32_t>(val), Representation::Integer32())
-#define H_CONSTANT_DOUBLE(val) \
-new(zone) HConstant(static_cast<double>(val), Representation::Double())
-
-#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
-HInstruction* HInstr::New( \
- Zone* zone, HValue* context, HValue* left, HValue* right) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
- if (TypeInfo::IsInt32Double(double_res)) { \
- return H_CONSTANT_INT32(double_res); \
- } \
- return H_CONSTANT_DOUBLE(double_res); \
- } \
- } \
- return new(zone) HInstr(context, left, right); \
-}
-
-
-DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
-DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
-DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
-
-#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
-
-
-HInstruction* HStringAdd::New(
- Zone* zone, HValue* context, HValue* left, HValue* right) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_right = HConstant::cast(right);
- HConstant* c_left = HConstant::cast(left);
- if (c_left->HasStringValue() && c_right->HasStringValue()) {
- return new(zone) HConstant(FACTORY->NewConsString(c_left->StringValue(),
- c_right->StringValue()),
- Representation::Tagged());
- }
- }
- return new(zone) HStringAdd(context, left, right);
-}
-
-
-HInstruction* HStringCharFromCode::New(
- Zone* zone, HValue* context, HValue* char_code) {
- if (FLAG_fold_constants && char_code->IsConstant()) {
- HConstant* c_code = HConstant::cast(char_code);
- Isolate* isolate = Isolate::Current();
- if (c_code->HasNumberValue()) {
- if (isfinite(c_code->DoubleValue())) {
- uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
- return new(zone) HConstant(LookupSingleCharacterStringFromCode(isolate,
- code),
- Representation::Tagged());
- }
- return new(zone) HConstant(isolate->factory()->empty_string(),
- Representation::Tagged());
- }
- }
- return new(zone) HStringCharFromCode(context, char_code);
-}
-
-
-HInstruction* HStringLength::New(Zone* zone, HValue* string) {
- if (FLAG_fold_constants && string->IsConstant()) {
- HConstant* c_string = HConstant::cast(string);
- if (c_string->HasStringValue()) {
- return H_CONSTANT_INT32(c_string->StringValue()->length());
- }
- }
- return new(zone) HStringLength(string);
-}
-
-
-HInstruction* HUnaryMathOperation::New(
- Zone* zone, HValue* context, HValue* value, BuiltinFunctionId op) {
- do {
- if (!FLAG_fold_constants) break;
- if (!value->IsConstant()) break;
- HConstant* constant = HConstant::cast(value);
- if (!constant->HasNumberValue()) break;
- double d = constant->DoubleValue();
- if (isnan(d)) { // NaN poisons everything.
- return H_CONSTANT_DOUBLE(OS::nan_value());
- }
- if (isinf(d)) { // +Infinity and -Infinity.
- switch (op) {
- case kMathSin:
- case kMathCos:
- case kMathTan:
- return H_CONSTANT_DOUBLE(OS::nan_value());
- case kMathExp:
- return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
- case kMathLog:
- case kMathSqrt:
- return H_CONSTANT_DOUBLE((d > 0.0) ? d : OS::nan_value());
- case kMathPowHalf:
- case kMathAbs:
- return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d);
- case kMathRound:
- case kMathFloor:
- return H_CONSTANT_DOUBLE(d);
- default:
- UNREACHABLE();
- break;
- }
- }
- switch (op) {
- case kMathSin:
- return H_CONSTANT_DOUBLE(fast_sin(d));
- case kMathCos:
- return H_CONSTANT_DOUBLE(fast_cos(d));
- case kMathTan:
- return H_CONSTANT_DOUBLE(fast_tan(d));
- case kMathExp:
- return H_CONSTANT_DOUBLE(fast_exp(d));
- case kMathLog:
- return H_CONSTANT_DOUBLE(fast_log(d));
- case kMathSqrt:
- return H_CONSTANT_DOUBLE(fast_sqrt(d));
- case kMathPowHalf:
- return H_CONSTANT_DOUBLE(power_double_double(d, 0.5));
- case kMathAbs:
- return H_CONSTANT_DOUBLE((d >= 0.0) ? d + 0.0 : -d);
- case kMathRound:
- // -0.5 .. -0.0 round to -0.0.
- if ((d >= -0.5 && Double(d).Sign() < 0)) return H_CONSTANT_DOUBLE(-0.0);
- // Doubles are represented as Significant * 2 ^ Exponent. If the
- // Exponent is not negative, the double value is already an integer.
- if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d);
- return H_CONSTANT_DOUBLE(floor(d + 0.5));
- case kMathFloor:
- return H_CONSTANT_DOUBLE(floor(d));
- default:
- UNREACHABLE();
- break;
- }
- } while (false);
- return new(zone) HUnaryMathOperation(context, value, op);
-}
-
-
-HInstruction* HPower::New(Zone* zone, HValue* left, HValue* right) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
- double result = power_helper(c_left->DoubleValue(),
- c_right->DoubleValue());
- return H_CONSTANT_DOUBLE(isnan(result) ? OS::nan_value() : result);
- }
- }
- return new(zone) HPower(left, right);
-}
-
-
-HInstruction* HMathMinMax::New(
- Zone* zone, HValue* context, HValue* left, HValue* right, Operation op) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
- double d_left = c_left->DoubleValue();
- double d_right = c_right->DoubleValue();
- if (op == kMathMin) {
- if (d_left > d_right) return H_CONSTANT_DOUBLE(d_right);
- if (d_left < d_right) return H_CONSTANT_DOUBLE(d_left);
- if (d_left == d_right) {
- // Handle +0 and -0.
- return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_left
- : d_right);
- }
- } else {
- if (d_left < d_right) return H_CONSTANT_DOUBLE(d_right);
- if (d_left > d_right) return H_CONSTANT_DOUBLE(d_left);
- if (d_left == d_right) {
- // Handle +0 and -0.
- return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_right
- : d_left);
- }
- }
- // All comparisons failed, must be NaN.
- return H_CONSTANT_DOUBLE(OS::nan_value());
- }
- }
- return new(zone) HMathMinMax(context, left, right, op);
-}
-
-
-HInstruction* HMod::New(
- Zone* zone, HValue* context, HValue* left, HValue* right) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
- int32_t dividend = c_left->Integer32Value();
- int32_t divisor = c_right->Integer32Value();
- if (divisor != 0) {
- int32_t res = dividend % divisor;
- if ((res == 0) && (dividend < 0)) {
- return H_CONSTANT_DOUBLE(-0.0);
- }
- return H_CONSTANT_INT32(res);
- }
- }
- }
- return new(zone) HMod(context, left, right);
-}
-
-
-HInstruction* HDiv::New(
- Zone* zone, HValue* context, HValue* left, HValue* right) {
- // If left and right are constant values, try to return a constant value.
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
- if (c_right->DoubleValue() != 0) {
- double double_res = c_left->DoubleValue() / c_right->DoubleValue();
- if (TypeInfo::IsInt32Double(double_res)) {
- return H_CONSTANT_INT32(double_res);
- }
- return H_CONSTANT_DOUBLE(double_res);
- } else {
- int sign = Double(c_left->DoubleValue()).Sign() *
- Double(c_right->DoubleValue()).Sign(); // Right could be -0.
- return H_CONSTANT_DOUBLE(sign * V8_INFINITY);
- }
- }
- }
- return new(zone) HDiv(context, left, right);
-}
-
-
-HInstruction* HBitwise::New(
- Zone* zone, Token::Value op, HValue* context, HValue* left, HValue* right) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
- int32_t result;
- int32_t v_left = c_left->NumberValueAsInteger32();
- int32_t v_right = c_right->NumberValueAsInteger32();
- switch (op) {
- case Token::BIT_XOR:
- result = v_left ^ v_right;
- break;
- case Token::BIT_AND:
- result = v_left & v_right;
- break;
- case Token::BIT_OR:
- result = v_left | v_right;
- break;
- default:
- result = 0; // Please the compiler.
- UNREACHABLE();
- }
- return H_CONSTANT_INT32(result);
- }
- }
- return new(zone) HBitwise(op, context, left, right);
-}
-
-
-#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
-HInstruction* HInstr::New( \
- Zone* zone, HValue* context, HValue* left, HValue* right) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- return H_CONSTANT_INT32(result); \
- } \
- } \
- return new(zone) HInstr(context, left, right); \
-}
-
-
-DEFINE_NEW_H_BITWISE_INSTR(HSar,
-c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
-DEFINE_NEW_H_BITWISE_INSTR(HShl,
-c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
-
-#undef DEFINE_NEW_H_BITWISE_INSTR
-
-
-HInstruction* HShr::New(
- Zone* zone, HValue* context, HValue* left, HValue* right) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
- int32_t left_val = c_left->NumberValueAsInteger32();
- int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
- if ((right_val == 0) && (left_val < 0)) {
- return H_CONSTANT_DOUBLE(static_cast<uint32_t>(left_val));
- }
- return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val);
- }
- }
- return new(zone) HShr(context, left, right);
-}
-
-
-#undef H_CONSTANT_INT32
-#undef H_CONSTANT_DOUBLE
-
-
-void HIn::PrintDataTo(StringStream* stream) {
- key()->PrintNameTo(stream);
- stream->Add(" ");
- object()->PrintNameTo(stream);
-}
-
-
-void HBitwise::PrintDataTo(StringStream* stream) {
- stream->Add(Token::Name(op_));
- stream->Add(" ");
- HBitwiseBinaryOperation::PrintDataTo(stream);
-}
-
-
-void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- // If there are non-Phi uses, and all of them have observed the same
- // representation, than that's what this Phi is going to use.
- Representation new_rep = RepresentationObservedByAllNonPhiUses();
- if (!new_rep.IsNone()) {
- UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
- return;
- }
- new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
- new_rep = RepresentationFromUseRequirements();
- UpdateRepresentation(new_rep, h_infer, "use requirements");
-}
-
-
-Representation HPhi::RepresentationObservedByAllNonPhiUses() {
- int non_phi_use_count = 0;
- for (int i = Representation::kInteger32;
- i < Representation::kNumRepresentations; ++i) {
- non_phi_use_count += non_phi_uses_[i];
- }
- if (non_phi_use_count <= 1) return Representation::None();
- for (int i = 0; i < Representation::kNumRepresentations; ++i) {
- if (non_phi_uses_[i] == non_phi_use_count) {
- return Representation::FromKind(static_cast<Representation::Kind>(i));
- }
- }
- return Representation::None();
-}
-
-
-Representation HPhi::RepresentationFromInputs() {
- bool double_occurred = false;
- bool int32_occurred = false;
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- if (value->IsUnknownOSRValue()) {
- HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
- if (hint_value != NULL) {
- Representation hint = hint_value->representation();
- if (hint.IsTagged()) return hint;
- if (hint.IsDouble()) double_occurred = true;
- if (hint.IsInteger32()) int32_occurred = true;
- }
- continue;
- }
- if (value->representation().IsDouble()) double_occurred = true;
- if (value->representation().IsInteger32()) int32_occurred = true;
- if (value->representation().IsTagged()) {
- if (value->IsConstant()) {
- HConstant* constant = HConstant::cast(value);
- if (constant->IsConvertibleToInteger()) {
- int32_occurred = true;
- } else if (constant->HasNumberValue()) {
- double_occurred = true;
- } else {
- return Representation::Tagged();
- }
- } else {
- if (value->IsPhi() && !IsConvertibleToInteger()) {
- return Representation::Tagged();
- }
- }
- }
- }
-
- if (double_occurred) return Representation::Double();
-
- if (int32_occurred) return Representation::Integer32();
-
- return Representation::None();
-}
-
-
-Representation HPhi::RepresentationFromUseRequirements() {
- Representation all_uses_require = Representation::None();
- bool all_uses_require_the_same = true;
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- // We check for observed_input_representation elsewhere.
- Representation use_rep =
- it.value()->RequiredInputRepresentation(it.index());
- // No useful info from this use -> look at the next one.
- if (use_rep.IsNone()) {
- continue;
- }
- if (use_rep.Equals(all_uses_require)) {
- continue;
- }
- // This use's representation contradicts what we've seen so far.
- if (!all_uses_require.IsNone()) {
- ASSERT(!use_rep.Equals(all_uses_require));
- all_uses_require_the_same = false;
- break;
- }
- // Otherwise, initialize observed representation.
- all_uses_require = use_rep;
- }
- if (all_uses_require_the_same) {
- return all_uses_require;
- }
-
- return Representation::None();
-}
-
-
-// Node-specific verification code is only included in debug mode.
-#ifdef DEBUG
-
-void HPhi::Verify() {
- ASSERT(OperandCount() == block()->predecessors()->length());
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- HBasicBlock* defining_block = value->block();
- HBasicBlock* predecessor_block = block()->predecessors()->at(i);
- ASSERT(defining_block == predecessor_block ||
- defining_block->Dominates(predecessor_block));
- }
-}
-
-
-void HSimulate::Verify() {
- HInstruction::Verify();
- ASSERT(HasAstId());
-}
-
-
-void HCheckSmi::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
-void HCheckNonSmi::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
-void HCheckFunction::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-#endif
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.h b/src/3rdparty/v8/src/hydrogen-instructions.h
deleted file mode 100644
index 7c2135d..0000000
--- a/src/3rdparty/v8/src/hydrogen-instructions.h
+++ /dev/null
@@ -1,6186 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HYDROGEN_INSTRUCTIONS_H_
-#define V8_HYDROGEN_INSTRUCTIONS_H_
-
-#include "v8.h"
-
-#include "allocation.h"
-#include "code-stubs.h"
-#include "data-flow.h"
-#include "small-pointer-list.h"
-#include "string-stream.h"
-#include "v8conversions.h"
-#include "v8utils.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class HBasicBlock;
-class HEnvironment;
-class HInferRepresentation;
-class HInstruction;
-class HLoopInformation;
-class HValue;
-class LInstruction;
-class LChunkBuilder;
-
-
-#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
- V(BinaryOperation) \
- V(BitwiseBinaryOperation) \
- V(ControlInstruction) \
- V(Instruction) \
-
-
-#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
- V(AbnormalExit) \
- V(AccessArgumentsAt) \
- V(Add) \
- V(Allocate) \
- V(AllocateObject) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArgumentsObject) \
- V(ArrayLiteral) \
- V(Bitwise) \
- V(BitNot) \
- V(BlockEntry) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(Change) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(CheckSmiOrInt32) \
- V(ClampToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareIDAndBranch) \
- V(CompareGeneric) \
- V(CompareObjectEqAndBranch) \
- V(CompareMap) \
- V(CompareConstantEqAndBranch) \
- V(Constant) \
- V(Context) \
- V(DeclareGlobals) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(Div) \
- V(DummyUse) \
- V(ElementsKind) \
- V(EnterInlined) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(ForceRepresentation) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InductionVariableAnnotation) \
- V(In) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
- V(LeaveInlined) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(MapEnumLength) \
- V(MathFloorOfDiv) \
- V(MathMinMax) \
- V(Mod) \
- V(Mul) \
- V(NumericConstraint) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(Random) \
- V(RegExpLiteral) \
- V(Return) \
- V(Ror) \
- V(Sar) \
- V(SeqStringSetChar) \
- V(Shl) \
- V(Shr) \
- V(Simulate) \
- V(SoftDeoptimize) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(StringLength) \
- V(Sub) \
- V(ThisFunction) \
- V(Throw) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(UseConst) \
- V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver)
-
-#define GVN_TRACKED_FLAG_LIST(V) \
- V(Maps) \
- V(NewSpacePromotion)
-
-#define GVN_UNTRACKED_FLAG_LIST(V) \
- V(Calls) \
- V(InobjectFields) \
- V(BackingStoreFields) \
- V(ElementsKind) \
- V(ElementsPointer) \
- V(ArrayElements) \
- V(DoubleArrayElements) \
- V(SpecializedArrayElements) \
- V(GlobalVars) \
- V(ArrayLengths) \
- V(ContextSlots) \
- V(OsrEntries)
-
-#define DECLARE_ABSTRACT_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static H##type* cast(HValue* value) { \
- ASSERT(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
- }
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type) \
- virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
- static H##type* cast(HValue* value) { \
- ASSERT(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
- } \
- virtual Opcode opcode() const { return HValue::k##type; }
-
-
-#ifdef DEBUG
-#define ASSERT_ALLOCATION_DISABLED \
- ASSERT(isolate()->optimizing_compiler_thread()->IsOptimizerThread() || \
- !isolate()->heap()->IsAllocationAllowed())
-#else
-#define ASSERT_ALLOCATION_DISABLED do {} while (0)
-#endif
-
-class Range: public ZoneObject {
- public:
- Range()
- : lower_(kMinInt),
- upper_(kMaxInt),
- next_(NULL),
- can_be_minus_zero_(false) { }
-
- Range(int32_t lower, int32_t upper)
- : lower_(lower),
- upper_(upper),
- next_(NULL),
- can_be_minus_zero_(false) { }
-
- int32_t upper() const { return upper_; }
- int32_t lower() const { return lower_; }
- Range* next() const { return next_; }
- Range* CopyClearLower(Zone* zone) const {
- return new(zone) Range(kMinInt, upper_);
- }
- Range* CopyClearUpper(Zone* zone) const {
- return new(zone) Range(lower_, kMaxInt);
- }
- Range* Copy(Zone* zone) const {
- Range* result = new(zone) Range(lower_, upper_);
- result->set_can_be_minus_zero(CanBeMinusZero());
- return result;
- }
- int32_t Mask() const;
- void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; }
- bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
- bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
- bool CanBeNegative() const { return lower_ < 0; }
- bool Includes(int value) const { return lower_ <= value && upper_ >= value; }
- bool IsMostGeneric() const {
- return lower_ == kMinInt && upper_ == kMaxInt && CanBeMinusZero();
- }
- bool IsInSmiRange() const {
- return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
- }
- void KeepOrder();
-#ifdef DEBUG
- void Verify() const;
-#endif
-
- void StackUpon(Range* other) {
- Intersect(other);
- next_ = other;
- }
-
- void Intersect(Range* other);
- void Union(Range* other);
- void CombinedMax(Range* other);
- void CombinedMin(Range* other);
-
- void AddConstant(int32_t value);
- void Sar(int32_t value);
- void Shl(int32_t value);
- bool AddAndCheckOverflow(Range* other);
- bool SubAndCheckOverflow(Range* other);
- bool MulAndCheckOverflow(Range* other);
-
- private:
- int32_t lower_;
- int32_t upper_;
- Range* next_;
- bool can_be_minus_zero_;
-};
-
-
-class Representation {
- public:
- enum Kind {
- kNone,
- kInteger32,
- kDouble,
- kTagged,
- kExternal,
- kNumRepresentations
- };
-
- Representation() : kind_(kNone) { }
-
- static Representation None() { return Representation(kNone); }
- static Representation Tagged() { return Representation(kTagged); }
- static Representation Integer32() { return Representation(kInteger32); }
- static Representation Double() { return Representation(kDouble); }
- static Representation External() { return Representation(kExternal); }
-
- static Representation FromKind(Kind kind) { return Representation(kind); }
-
- bool Equals(const Representation& other) {
- return kind_ == other.kind_;
- }
-
- bool is_more_general_than(const Representation& other) {
- ASSERT(kind_ != kExternal);
- ASSERT(other.kind_ != kExternal);
- return kind_ > other.kind_;
- }
-
- Kind kind() const { return static_cast<Kind>(kind_); }
- bool IsNone() const { return kind_ == kNone; }
- bool IsTagged() const { return kind_ == kTagged; }
- bool IsInteger32() const { return kind_ == kInteger32; }
- bool IsDouble() const { return kind_ == kDouble; }
- bool IsExternal() const { return kind_ == kExternal; }
- bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble;
- }
- const char* Mnemonic() const;
-
- private:
- explicit Representation(Kind k) : kind_(k) { }
-
- // Make sure kind fits in int8.
- STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
-
- int8_t kind_;
-};
-
-
-class HType {
- public:
- HType() : type_(kUninitialized) { }
-
- static HType Tagged() { return HType(kTagged); }
- static HType TaggedPrimitive() { return HType(kTaggedPrimitive); }
- static HType TaggedNumber() { return HType(kTaggedNumber); }
- static HType Smi() { return HType(kSmi); }
- static HType HeapNumber() { return HType(kHeapNumber); }
- static HType String() { return HType(kString); }
- static HType Boolean() { return HType(kBoolean); }
- static HType NonPrimitive() { return HType(kNonPrimitive); }
- static HType JSArray() { return HType(kJSArray); }
- static HType JSObject() { return HType(kJSObject); }
- static HType Uninitialized() { return HType(kUninitialized); }
-
- // Return the weakest (least precise) common type.
- HType Combine(HType other) {
- return HType(static_cast<Type>(type_ & other.type_));
- }
-
- bool Equals(const HType& other) {
- return type_ == other.type_;
- }
-
- bool IsSubtypeOf(const HType& other) {
- return Combine(other).Equals(other);
- }
-
- bool IsTagged() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kTagged) == kTagged);
- }
-
- bool IsTaggedPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kTaggedPrimitive) == kTaggedPrimitive);
- }
-
- bool IsTaggedNumber() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kTaggedNumber) == kTaggedNumber);
- }
-
- bool IsSmi() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kSmi) == kSmi);
- }
-
- bool IsHeapNumber() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kHeapNumber) == kHeapNumber);
- }
-
- bool IsString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kString) == kString);
- }
-
- bool IsBoolean() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kBoolean) == kBoolean);
- }
-
- bool IsNonPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNonPrimitive) == kNonPrimitive);
- }
-
- bool IsJSArray() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kJSArray) == kJSArray);
- }
-
- bool IsJSObject() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kJSObject) == kJSObject);
- }
-
- bool IsUninitialized() {
- return type_ == kUninitialized;
- }
-
- bool IsHeapObject() {
- ASSERT(type_ != kUninitialized);
- return IsHeapNumber() || IsString() || IsNonPrimitive();
- }
-
- static HType TypeFromValue(Isolate* isolate, Handle<Object> value);
-
- const char* ToString();
-
- private:
- enum Type {
- kTagged = 0x1, // 0000 0000 0000 0001
- kTaggedPrimitive = 0x5, // 0000 0000 0000 0101
- kTaggedNumber = 0xd, // 0000 0000 0000 1101
- kSmi = 0x1d, // 0000 0000 0001 1101
- kHeapNumber = 0x2d, // 0000 0000 0010 1101
- kString = 0x45, // 0000 0000 0100 0101
- kBoolean = 0x85, // 0000 0000 1000 0101
- kNonPrimitive = 0x101, // 0000 0001 0000 0001
- kJSObject = 0x301, // 0000 0011 0000 0001
- kJSArray = 0x701, // 0000 0111 0000 0001
- kUninitialized = 0x1fff // 0001 1111 1111 1111
- };
-
- // Make sure type fits in int16.
- STATIC_ASSERT(kUninitialized < (1 << (2 * kBitsPerByte)));
-
- explicit HType(Type t) : type_(t) { }
-
- int16_t type_;
-};
-
-
-class HUseListNode: public ZoneObject {
- public:
- HUseListNode(HValue* value, int index, HUseListNode* tail)
- : tail_(tail), value_(value), index_(index) {
- }
-
- HUseListNode* tail();
- HValue* value() const { return value_; }
- int index() const { return index_; }
-
- void set_tail(HUseListNode* list) { tail_ = list; }
-
-#ifdef DEBUG
- void Zap() {
- tail_ = reinterpret_cast<HUseListNode*>(1);
- value_ = NULL;
- index_ = -1;
- }
-#endif
-
- private:
- HUseListNode* tail_;
- HValue* value_;
- int index_;
-};
-
-
-// We reuse use list nodes behind the scenes as uses are added and deleted.
-// This class is the safe way to iterate uses while deleting them.
-class HUseIterator BASE_EMBEDDED {
- public:
- bool Done() { return current_ == NULL; }
- void Advance();
-
- HValue* value() {
- ASSERT(!Done());
- return value_;
- }
-
- int index() {
- ASSERT(!Done());
- return index_;
- }
-
- private:
- explicit HUseIterator(HUseListNode* head);
-
- HUseListNode* current_;
- HUseListNode* next_;
- HValue* value_;
- int index_;
-
- friend class HValue;
-};
-
-
-// There must be one corresponding kDepends flag for every kChanges flag and
-// the order of the kChanges flags must be exactly the same as of the kDepends
-// flags. All tracked flags should appear before untracked ones.
-enum GVNFlag {
- // Declare global value numbering flags.
-#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
- GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
- GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- kAfterLastFlag,
- kLastFlag = kAfterLastFlag - 1,
-#define COUNT_FLAG(type) + 1
- kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
-#undef COUNT_FLAG
-};
-
-
-class NumericRelation {
- public:
- enum Kind { NONE, EQ, GT, GE, LT, LE, NE };
- static const char* MnemonicFromKind(Kind kind) {
- switch (kind) {
- case NONE: return "NONE";
- case EQ: return "EQ";
- case GT: return "GT";
- case GE: return "GE";
- case LT: return "LT";
- case LE: return "LE";
- case NE: return "NE";
- }
- UNREACHABLE();
- return NULL;
- }
- const char* Mnemonic() const { return MnemonicFromKind(kind_); }
-
- static NumericRelation None() { return NumericRelation(NONE); }
- static NumericRelation Eq() { return NumericRelation(EQ); }
- static NumericRelation Gt() { return NumericRelation(GT); }
- static NumericRelation Ge() { return NumericRelation(GE); }
- static NumericRelation Lt() { return NumericRelation(LT); }
- static NumericRelation Le() { return NumericRelation(LE); }
- static NumericRelation Ne() { return NumericRelation(NE); }
-
- bool IsNone() { return kind_ == NONE; }
-
- static NumericRelation FromToken(Token::Value token) {
- switch (token) {
- case Token::EQ: return Eq();
- case Token::EQ_STRICT: return Eq();
- case Token::LT: return Lt();
- case Token::GT: return Gt();
- case Token::LTE: return Le();
- case Token::GTE: return Ge();
- case Token::NE: return Ne();
- case Token::NE_STRICT: return Ne();
- default: return None();
- }
- }
-
- // The semantics of "Reversed" is that if "x rel y" is true then also
- // "y rel.Reversed() x" is true, and that rel.Reversed().Reversed() == rel.
- NumericRelation Reversed() {
- switch (kind_) {
- case NONE: return None();
- case EQ: return Eq();
- case GT: return Lt();
- case GE: return Le();
- case LT: return Gt();
- case LE: return Ge();
- case NE: return Ne();
- }
- UNREACHABLE();
- return None();
- }
-
- // The semantics of "Negated" is that if "x rel y" is true then also
- // "!(x rel.Negated() y)" is true.
- NumericRelation Negated() {
- switch (kind_) {
- case NONE: return None();
- case EQ: return Ne();
- case GT: return Le();
- case GE: return Lt();
- case LT: return Ge();
- case LE: return Gt();
- case NE: return Eq();
- }
- UNREACHABLE();
- return None();
- }
-
- // The semantics of "Implies" is that if "x rel y" is true
- // then also "x other_relation y" is true.
- bool Implies(NumericRelation other_relation) {
- switch (kind_) {
- case NONE: return false;
- case EQ: return (other_relation.kind_ == EQ)
- || (other_relation.kind_ == GE)
- || (other_relation.kind_ == LE);
- case GT: return (other_relation.kind_ == GT)
- || (other_relation.kind_ == GE)
- || (other_relation.kind_ == NE);
- case LT: return (other_relation.kind_ == LT)
- || (other_relation.kind_ == LE)
- || (other_relation.kind_ == NE);
- case GE: return (other_relation.kind_ == GE);
- case LE: return (other_relation.kind_ == LE);
- case NE: return (other_relation.kind_ == NE);
- }
- UNREACHABLE();
- return false;
- }
-
- // The semantics of "IsExtendable" is that if
- // "rel.IsExtendable(direction)" is true then
- // "x rel y" implies "(x + direction) rel y" .
- bool IsExtendable(int direction) {
- switch (kind_) {
- case NONE: return false;
- case EQ: return false;
- case GT: return (direction >= 0);
- case GE: return (direction >= 0);
- case LT: return (direction <= 0);
- case LE: return (direction <= 0);
- case NE: return false;
- }
- UNREACHABLE();
- return false;
- }
-
- private:
- explicit NumericRelation(Kind kind) : kind_(kind) {}
-
- Kind kind_;
-};
-
-
-typedef EnumSet<GVNFlag> GVNFlagSet;
-
-
-class HValue: public ZoneObject {
- public:
- static const int kNoNumber = -1;
-
- enum Flag {
- kFlexibleRepresentation,
- // Participate in Global Value Numbering, i.e. elimination of
- // unnecessary recomputations. If an instruction sets this flag, it must
- // implement DataEquals(), which will be used to determine if other
- // occurrences of the instruction are indeed the same.
- kUseGVN,
- // Track instructions that are dominating side effects. If an instruction
- // sets this flag, it must implement SetSideEffectDominator() and should
- // indicate which side effects to track by setting GVN flags.
- kTrackSideEffectDominators,
- kCanOverflow,
- kBailoutOnMinusZero,
- kCanBeDivByZero,
- kDeoptimizeOnUndefined,
- kIsArguments,
- kTruncatingToInt32,
- kIsDead,
- // Instructions that are allowed to produce full range unsigned integer
- // values are marked with kUint32 flag. If arithmetic shift or a load from
- // EXTERNAL_UNSIGNED_INT_ELEMENTS array is not marked with this flag
- // it will deoptimize if result does not fit into signed integer range.
- // HGraph::ComputeSafeUint32Operations is responsible for setting this
- // flag.
- kUint32,
- // If a phi is involved in the evaluation of a numeric constraint the
- // recursion can cause an endless cycle: we use this flag to exit the loop.
- kNumericConstraintEvaluationInProgress,
- // This flag is set to true after the SetupInformativeDefinitions() pass
- // has processed this instruction.
- kIDefsProcessingDone,
- kLastFlag = kIDefsProcessingDone
- };
-
- STATIC_ASSERT(kLastFlag < kBitsPerInt);
-
- static const int kChangesToDependsFlagsLeftShift = 1;
-
- static GVNFlag ChangesFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2);
- }
- static GVNFlag DependsOnFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2 + 1);
- }
- static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
- return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
- }
-
- static HValue* cast(HValue* value) { return value; }
-
- enum Opcode {
- // Declare a unique enum value for each hydrogen instruction.
- #define DECLARE_OPCODE(type) k##type,
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kPhi
- #undef DECLARE_OPCODE
- };
- virtual Opcode opcode() const = 0;
-
- // Declare a non-virtual predicates for each concrete HInstruction or HValue.
- #define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
- #undef DECLARE_PREDICATE
- bool IsPhi() const { return opcode() == kPhi; }
-
- // Declare virtual predicates for abstract HInstruction or HValue
- #define DECLARE_PREDICATE(type) \
- virtual bool Is##type() const { return false; }
- HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
- #undef DECLARE_PREDICATE
-
- HValue() : block_(NULL),
- id_(kNoNumber),
- type_(HType::Tagged()),
- use_list_(NULL),
- range_(NULL),
- flags_(0) {}
- virtual ~HValue() {}
-
- HBasicBlock* block() const { return block_; }
- void SetBlock(HBasicBlock* block);
- int LoopWeight() const;
-
- // Note: Never call this method for an unlinked value.
- Isolate* isolate() const;
-
- int id() const { return id_; }
- void set_id(int id) { id_ = id; }
-
- HUseIterator uses() const { return HUseIterator(use_list_); }
-
- virtual bool EmitAtUses() { return false; }
- Representation representation() const { return representation_; }
- void ChangeRepresentation(Representation r) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- RepresentationChanged(r);
- representation_ = r;
- if (r.IsTagged()) {
- // Tagged is the bottom of the lattice, don't go any further.
- ClearFlag(kFlexibleRepresentation);
- }
- }
- virtual void AssumeRepresentation(Representation r);
-
- virtual bool IsConvertibleToInteger() const { return true; }
-
- HType type() const { return type_; }
- void set_type(HType new_type) {
- ASSERT(new_type.IsSubtypeOf(type_));
- type_ = new_type;
- }
-
- // An operation needs to override this function iff:
- // 1) it can produce an int32 output.
- // 2) the true value of its output can potentially be minus zero.
- // The implementation must set a flag so that it bails out in the case where
- // it would otherwise output what should be a minus zero as an int32 zero.
- // If the operation also exists in a form that takes int32 and outputs int32
- // then the operation should return its input value so that we can propagate
- // back. There are three operations that need to propagate back to more than
- // one input. They are phi and binary div and mul. They always return NULL
- // and expect the caller to take care of things.
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- return NULL;
- }
-
- // There are HInstructions that do not really change a value, they
- // only add pieces of information to it (like bounds checks, map checks,
- // smi checks...).
- // We call these instructions "informative definitions", or "iDef".
- // One of the iDef operands is special because it is the value that is
- // "transferred" to the output, we call it the "redefined operand".
- // If an HValue is an iDef it must override RedefinedOperandIndex() so that
- // it does not return kNoRedefinedOperand;
- static const int kNoRedefinedOperand = -1;
- virtual int RedefinedOperandIndex() { return kNoRedefinedOperand; }
- bool IsInformativeDefinition() {
- return RedefinedOperandIndex() != kNoRedefinedOperand;
- }
- HValue* RedefinedOperand() {
- return IsInformativeDefinition() ? OperandAt(RedefinedOperandIndex())
- : NULL;
- }
-
- // A purely informative definition is an idef that will not emit code and
- // should therefore be removed from the graph in the RestoreActualValues
- // phase (so that live ranges will be shorter).
- virtual bool IsPurelyInformativeDefinition() { return false; }
-
- // This method must always return the original HValue SSA definition
- // (regardless of any iDef of this value).
- HValue* ActualValue() {
- return IsInformativeDefinition() ? RedefinedOperand()->ActualValue()
- : this;
- }
-
- virtual void AddInformativeDefinitions() {}
-
- void UpdateRedefinedUsesWhileSettingUpInformativeDefinitions() {
- UpdateRedefinedUsesInner<TestDominanceUsingProcessedFlag>();
- }
- void UpdateRedefinedUses() {
- UpdateRedefinedUsesInner<Dominates>();
- }
-
- bool IsInteger32Constant();
- int32_t GetInteger32Constant();
-
- bool IsDefinedAfter(HBasicBlock* other) const;
-
- // Operands.
- virtual int OperandCount() = 0;
- virtual HValue* OperandAt(int index) const = 0;
- void SetOperandAt(int index, HValue* value);
-
- void DeleteAndReplaceWith(HValue* other);
- void ReplaceAllUsesWith(HValue* other);
- bool HasNoUses() const { return use_list_ == NULL; }
- bool HasMultipleUses() const {
- return use_list_ != NULL && use_list_->tail() != NULL;
- }
- int UseCount() const;
-
- // Mark this HValue as dead and to be removed from other HValues' use lists.
- void Kill();
-
- int flags() const { return flags_; }
- void SetFlag(Flag f) { flags_ |= (1 << f); }
- void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
- bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
-
- // Returns true if the flag specified is set for all uses, false otherwise.
- bool CheckUsesForFlag(Flag f);
-
- GVNFlagSet gvn_flags() const { return gvn_flags_; }
- void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
- void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
- bool CheckGVNFlag(GVNFlag f) const { return gvn_flags_.Contains(f); }
- void SetAllSideEffects() { gvn_flags_.Add(AllSideEffectsFlagSet()); }
- void ClearAllSideEffects() {
- gvn_flags_.Remove(AllSideEffectsFlagSet());
- }
- bool HasSideEffects() const {
- return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
- }
- bool HasObservableSideEffects() const {
- return gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
- }
-
- GVNFlagSet DependsOnFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllDependsOnFlagSet());
- return result;
- }
-
- GVNFlagSet SideEffectFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllSideEffectsFlagSet());
- return result;
- }
-
- GVNFlagSet ChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
- return result;
- }
-
- GVNFlagSet ObservableChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
- result.Intersect(AllObservableSideEffectsFlagSet());
- return result;
- }
-
- Range* range() const { return range_; }
- bool HasRange() const { return range_ != NULL; }
- void AddNewRange(Range* r, Zone* zone);
- void RemoveLastAddedRange();
- void ComputeInitialRange(Zone* zone);
-
- // Representation helpers.
- virtual Representation observed_input_representation(int index) {
- return Representation::None();
- }
- virtual Representation RequiredInputRepresentation(int index) = 0;
- virtual void InferRepresentation(HInferRepresentation* h_infer);
-
- // This gives the instruction an opportunity to replace itself with an
- // instruction that does the same in some better way. To replace an
- // instruction with a new one, first add the new instruction to the graph,
- // then return it. Return NULL to have the instruction deleted.
- virtual HValue* Canonicalize() { return this; }
-
- bool Equals(HValue* other);
- virtual intptr_t Hashcode();
-
- // Printing support.
- virtual void PrintTo(StringStream* stream) = 0;
- void PrintNameTo(StringStream* stream);
- void PrintTypeTo(StringStream* stream);
- void PrintRangeTo(StringStream* stream);
- void PrintChangesTo(StringStream* stream);
-
- const char* Mnemonic() const;
-
- // Type information helpers.
- bool HasMonomorphicJSObjectType();
-
- // TODO(mstarzinger): For now instructions can override this function to
- // specify statically known types, once HType can convey more information
- // it should be based on the HType.
- virtual Handle<Map> GetMonomorphicJSObjectMap() { return Handle<Map>(); }
-
- // Updated the inferred type of this instruction and returns true if
- // it has changed.
- bool UpdateInferredType();
-
- virtual HType CalculateInferredType();
-
- // This function must be overridden for instructions which have the
- // kTrackSideEffectDominators flag set, to track instructions that are
- // dominating side effects.
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
- UNREACHABLE();
- }
-
- bool IsDead() const {
- return HasNoUses() && !HasObservableSideEffects() && IsDeletable();
- }
-
-#ifdef DEBUG
- virtual void Verify() = 0;
-#endif
-
- // This method is recursive but it is guaranteed to terminate because
- // RedefinedOperand() always dominates "this".
- bool IsRelationTrue(NumericRelation relation, HValue* other) {
- if (this == other) {
- return NumericRelation::Eq().Implies(relation);
- }
-
- bool result = IsRelationTrueInternal(relation, other) ||
- other->IsRelationTrueInternal(relation.Reversed(), this);
- if (!result) {
- HValue* redefined = RedefinedOperand();
- if (redefined != NULL) {
- result = redefined->IsRelationTrue(relation, other);
- }
- }
- return result;
- }
-
- protected:
- // This function must be overridden for instructions with flag kUseGVN, to
- // compare the non-Operand parts of the instruction.
- virtual bool DataEquals(HValue* other) {
- UNREACHABLE();
- return false;
- }
-
- virtual Representation RepresentationFromInputs() {
- return representation();
- }
- Representation RepresentationFromUses();
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
- const char* reason);
- void AddDependantsToWorklist(HInferRepresentation* h_infer);
-
- virtual void RepresentationChanged(Representation to) { }
-
- virtual Range* InferRange(Zone* zone);
- virtual void DeleteFromGraph() = 0;
- virtual void InternalSetOperandAt(int index, HValue* value) = 0;
- void clear_block() {
- ASSERT(block_ != NULL);
- block_ = NULL;
- }
-
- void set_representation(Representation r) {
- ASSERT(representation_.IsNone() && !r.IsNone());
- representation_ = r;
- }
-
- // Signature of a function testing if a HValue properly dominates another.
- typedef bool (*DominanceTest)(HValue*, HValue*);
-
- // Simple implementation of DominanceTest implemented walking the chain
- // of Hinstructions (used in UpdateRedefinedUsesInner).
- static bool Dominates(HValue* dominator, HValue* dominated);
-
- // A fast implementation of DominanceTest that works only for the
- // "current" instruction in the SetupInformativeDefinitions() phase.
- // During that phase we use a flag to mark processed instructions, and by
- // checking the flag we can quickly test if an instruction comes before or
- // after the "current" one.
- static bool TestDominanceUsingProcessedFlag(HValue* dominator,
- HValue* dominated);
-
- // If we are redefining an operand, update all its dominated uses (the
- // function that checks if a use is dominated is the template argument).
- template<DominanceTest TestDominance>
- void UpdateRedefinedUsesInner() {
- HValue* input = RedefinedOperand();
- if (input != NULL) {
- for (HUseIterator uses = input->uses(); !uses.Done(); uses.Advance()) {
- HValue* use = uses.value();
- if (TestDominance(this, use)) {
- use->SetOperandAt(uses.index(), this);
- }
- }
- }
- }
-
- // Informative definitions can override this method to state any numeric
- // relation they provide on the redefined value.
- virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other) {
- return false;
- }
-
- static GVNFlagSet AllDependsOnFlagSet() {
- GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kDependsOn##type);
- GVN_TRACKED_FLAG_LIST(ADD_FLAG)
- GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
-#undef ADD_FLAG
- return result;
- }
-
- static GVNFlagSet AllChangesFlagSet() {
- GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kChanges##type);
- GVN_TRACKED_FLAG_LIST(ADD_FLAG)
- GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
-#undef ADD_FLAG
- return result;
- }
-
- // A flag mask to mark an instruction as having arbitrary side effects.
- static GVNFlagSet AllSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesOsrEntries);
- return result;
- }
-
- // A flag mask of all side effects that can make observable changes in
- // an executing program (i.e. are not safe to repeat, move or remove);
- static GVNFlagSet AllObservableSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesNewSpacePromotion);
- result.Remove(kChangesElementsKind);
- result.Remove(kChangesElementsPointer);
- result.Remove(kChangesMaps);
- return result;
- }
-
- // Remove the matching use from the use list if present. Returns the
- // removed list node or NULL.
- HUseListNode* RemoveUse(HValue* value, int index);
-
- void RegisterUse(int index, HValue* new_value);
-
- HBasicBlock* block_;
-
- // The id of this instruction in the hydrogen graph, assigned when first
- // added to the graph. Reflects creation order.
- int id_;
-
- Representation representation_;
- HType type_;
- HUseListNode* use_list_;
- Range* range_;
- int flags_;
- GVNFlagSet gvn_flags_;
-
- private:
- virtual bool IsDeletable() const { return false; }
-
- DISALLOW_COPY_AND_ASSIGN(HValue);
-};
-
-
-class HInstruction: public HValue {
- public:
- HInstruction* next() const { return next_; }
- HInstruction* previous() const { return previous_; }
-
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) { }
-
- bool IsLinked() const { return block() != NULL; }
- void Unlink();
- void InsertBefore(HInstruction* next);
- void InsertAfter(HInstruction* previous);
-
- // The position is a write-once variable.
- int position() const { return position_; }
- bool has_position() const { return position_ != RelocInfo::kNoPosition; }
- void set_position(int position) {
- ASSERT(!has_position());
- ASSERT(position != RelocInfo::kNoPosition);
- position_ = position;
- }
-
- bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
-
- virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- virtual bool IsCall() { return false; }
-
- DECLARE_ABSTRACT_INSTRUCTION(Instruction)
-
- protected:
- HInstruction()
- : next_(NULL),
- previous_(NULL),
- position_(RelocInfo::kNoPosition) {
- SetGVNFlag(kDependsOnOsrEntries);
- }
-
- virtual void DeleteFromGraph() { Unlink(); }
-
- private:
- void InitializeAsFirst(HBasicBlock* block) {
- ASSERT(!IsLinked());
- SetBlock(block);
- }
-
- void PrintMnemonicTo(StringStream* stream);
-
- HInstruction* next_;
- HInstruction* previous_;
- int position_;
-
- friend class HBasicBlock;
-};
-
-
-template<int V>
-class HTemplateInstruction : public HInstruction {
- public:
- int OperandCount() { return V; }
- HValue* OperandAt(int i) const { return inputs_[i]; }
-
- protected:
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
-
- private:
- EmbeddedContainer<HValue*, V> inputs_;
-};
-
-
-class HControlInstruction: public HInstruction {
- public:
- virtual HBasicBlock* SuccessorAt(int i) = 0;
- virtual int SuccessorCount() = 0;
- virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
-
- virtual void PrintDataTo(StringStream* stream);
-
- HBasicBlock* FirstSuccessor() {
- return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
- }
- HBasicBlock* SecondSuccessor() {
- return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
- }
-
- DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
-};
-
-
-class HSuccessorIterator BASE_EMBEDDED {
- public:
- explicit HSuccessorIterator(HControlInstruction* instr)
- : instr_(instr), current_(0) { }
-
- bool Done() { return current_ >= instr_->SuccessorCount(); }
- HBasicBlock* Current() { return instr_->SuccessorAt(current_); }
- void Advance() { current_++; }
-
- private:
- HControlInstruction* instr_;
- int current_;
-};
-
-
-template<int S, int V>
-class HTemplateControlInstruction: public HControlInstruction {
- public:
- int SuccessorCount() { return S; }
- HBasicBlock* SuccessorAt(int i) { return successors_[i]; }
- void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
-
- int OperandCount() { return V; }
- HValue* OperandAt(int i) const { return inputs_[i]; }
-
-
- protected:
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
-
- private:
- EmbeddedContainer<HBasicBlock*, S> successors_;
- EmbeddedContainer<HValue*, V> inputs_;
-};
-
-
-class HBlockEntry: public HTemplateInstruction<0> {
- public:
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(BlockEntry)
-};
-
-
-class HDummyUse: public HTemplateInstruction<1> {
- public:
- explicit HDummyUse(HValue* value) {
- SetOperandAt(0, value);
- // Pretend to be a Smi so that the HChange instructions inserted
- // before any use generate as little code as possible.
- set_representation(Representation::Tagged());
- set_type(HType::Smi());
- }
-
- HValue* value() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(DummyUse);
-};
-
-
-class HNumericConstraint : public HTemplateInstruction<2> {
- public:
- static HNumericConstraint* AddToGraph(HValue* constrained_value,
- NumericRelation relation,
- HValue* related_value,
- HInstruction* insertion_point = NULL);
-
- HValue* constrained_value() { return OperandAt(0); }
- HValue* related_value() { return OperandAt(1); }
- NumericRelation relation() { return relation_; }
-
- virtual int RedefinedOperandIndex() { return 0; }
- virtual bool IsPurelyInformativeDefinition() { return true; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return representation();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual bool IsRelationTrueInternal(NumericRelation other_relation,
- HValue* other_related_value) {
- if (related_value() == other_related_value) {
- return relation().Implies(other_relation);
- } else {
- return false;
- }
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumericConstraint)
-
- private:
- HNumericConstraint(HValue* constrained_value,
- NumericRelation relation,
- HValue* related_value)
- : relation_(relation) {
- SetOperandAt(0, constrained_value);
- SetOperandAt(1, related_value);
- set_representation(constrained_value->representation());
- }
-
- NumericRelation relation_;
-};
-
-
-// We insert soft-deoptimize when we hit code with unknown typefeedback,
-// so that we get a chance of re-optimizing with useful typefeedback.
-// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
-class HSoftDeoptimize: public HTemplateInstruction<0> {
- public:
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SoftDeoptimize)
-};
-
-
-class HDeoptimize: public HControlInstruction {
- public:
- HDeoptimize(int environment_length, Zone* zone)
- : values_(environment_length, zone) { }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
- virtual void PrintDataTo(StringStream* stream);
-
- virtual int SuccessorCount() { return 0; }
- virtual HBasicBlock* SuccessorAt(int i) {
- UNREACHABLE();
- return NULL;
- }
- virtual void SetSuccessorAt(int i, HBasicBlock* block) {
- UNREACHABLE();
- }
-
- void AddEnvironmentValue(HValue* value, Zone* zone) {
- values_.Add(NULL, zone);
- SetOperandAt(values_.length() - 1, value);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
-
- enum UseEnvironment {
- kNoUses,
- kUseAll
- };
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- values_[index] = value;
- }
-
- private:
- ZoneList<HValue*> values_;
-};
-
-
-class HGoto: public HTemplateControlInstruction<1, 0> {
- public:
- explicit HGoto(HBasicBlock* target) {
- SetSuccessorAt(0, target);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(Goto)
-};
-
-
-class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
- public:
- HUnaryControlInstruction(HValue* value,
- HBasicBlock* true_target,
- HBasicBlock* false_target) {
- SetOperandAt(0, value);
- SetSuccessorAt(0, true_target);
- SetSuccessorAt(1, false_target);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* value() { return OperandAt(0); }
-};
-
-
-class HBranch: public HUnaryControlInstruction {
- public:
- HBranch(HValue* value,
- HBasicBlock* true_target,
- HBasicBlock* false_target,
- ToBooleanStub::Types expected_input_types = ToBooleanStub::no_types())
- : HUnaryControlInstruction(value, true_target, false_target),
- expected_input_types_(expected_input_types) {
- ASSERT(true_target != NULL && false_target != NULL);
- }
- explicit HBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
-
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
- virtual Representation observed_input_representation(int index);
-
- ToBooleanStub::Types expected_input_types() const {
- return expected_input_types_;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch)
-
- private:
- ToBooleanStub::Types expected_input_types_;
-};
-
-
-class HCompareMap: public HUnaryControlInstruction {
- public:
- HCompareMap(HValue* value,
- Handle<Map> map,
- HBasicBlock* true_target,
- HBasicBlock* false_target)
- : HUnaryControlInstruction(value, true_target, false_target),
- map_(map) {
- ASSERT(true_target != NULL);
- ASSERT(false_target != NULL);
- ASSERT(!map.is_null());
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Map> map() const { return map_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMap)
-
- private:
- Handle<Map> map_;
-};
-
-
-class HReturn: public HTemplateControlInstruction<0, 2> {
- public:
- HReturn(HValue* value, HValue* context) {
- SetOperandAt(0, value);
- SetOperandAt(1, context);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* value() { return OperandAt(0); }
- HValue* context() { return OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(Return)
-};
-
-
-class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
- public:
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
-};
-
-
-class HUnaryOperation: public HTemplateInstruction<1> {
- public:
- explicit HUnaryOperation(HValue* value) {
- SetOperandAt(0, value);
- }
-
- static HUnaryOperation* cast(HValue* value) {
- return reinterpret_cast<HUnaryOperation*>(value);
- }
-
- HValue* value() const { return OperandAt(0); }
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class HThrow: public HTemplateInstruction<2> {
- public:
- HThrow(HValue* context, HValue* value) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw)
-};
-
-
-class HUseConst: public HUnaryOperation {
- public:
- explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(UseConst)
-};
-
-
-class HForceRepresentation: public HTemplateInstruction<1> {
- public:
- HForceRepresentation(HValue* value, Representation required_representation) {
- SetOperandAt(0, value);
- set_representation(required_representation);
- }
-
- HValue* value() { return OperandAt(0); }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return representation(); // Same as the output representation.
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
-};
-
-
-class HChange: public HUnaryOperation {
- public:
- HChange(HValue* value,
- Representation to,
- bool is_truncating,
- bool deoptimize_on_undefined)
- : HUnaryOperation(value) {
- ASSERT(!value->representation().IsNone() && !to.IsNone());
- ASSERT(!value->representation().Equals(to));
- set_representation(to);
- set_type(HType::TaggedNumber());
- SetFlag(kUseGVN);
- if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
- if (is_truncating) SetFlag(kTruncatingToInt32);
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual HType CalculateInferredType();
- virtual HValue* Canonicalize();
-
- Representation from() const { return value()->representation(); }
- Representation to() const { return representation(); }
- bool deoptimize_on_undefined() const {
- return CheckFlag(kDeoptimizeOnUndefined);
- }
- bool deoptimize_on_minus_zero() const {
- return CheckFlag(kBailoutOnMinusZero);
- }
- virtual Representation RequiredInputRepresentation(int index) {
- return from();
- }
-
- virtual Range* InferRange(Zone* zone);
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(Change)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const {
- return !from().IsTagged() || value()->type().IsSmi();
- }
-};
-
-
-class HClampToUint8: public HUnaryOperation {
- public:
- explicit HClampToUint8(HValue* value)
- : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-enum RemovableSimulate {
- REMOVABLE_SIMULATE,
- FIXED_SIMULATE
-};
-
-
-class HSimulate: public HInstruction {
- public:
- HSimulate(BailoutId ast_id,
- int pop_count,
- Zone* zone,
- RemovableSimulate removable)
- : ast_id_(ast_id),
- pop_count_(pop_count),
- values_(2, zone),
- assigned_indexes_(2, zone),
- zone_(zone),
- removable_(removable) {}
- virtual ~HSimulate() {}
-
- virtual void PrintDataTo(StringStream* stream);
-
- bool HasAstId() const { return !ast_id_.IsNone(); }
- BailoutId ast_id() const { return ast_id_; }
- void set_ast_id(BailoutId id) {
- ASSERT(!HasAstId());
- ast_id_ = id;
- }
-
- int pop_count() const { return pop_count_; }
- const ZoneList<HValue*>* values() const { return &values_; }
- int GetAssignedIndexAt(int index) const {
- ASSERT(HasAssignedIndexAt(index));
- return assigned_indexes_[index];
- }
- bool HasAssignedIndexAt(int index) const {
- return assigned_indexes_[index] != kNoIndex;
- }
- void AddAssignedValue(int index, HValue* value) {
- AddValue(index, value);
- }
- void AddPushedValue(HValue* value) {
- AddValue(kNoIndex, value);
- }
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- void MergeInto(HSimulate* other);
- bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
-
- DECLARE_CONCRETE_INSTRUCTION(Simulate)
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- values_[index] = value;
- }
-
- private:
- static const int kNoIndex = -1;
- void AddValue(int index, HValue* value) {
- assigned_indexes_.Add(index, zone_);
- // Resize the list of pushed values.
- values_.Add(NULL, zone_);
- // Set the operand through the base method in HValue to make sure that the
- // use lists are correctly updated.
- SetOperandAt(values_.length() - 1, value);
- }
- BailoutId ast_id_;
- int pop_count_;
- ZoneList<HValue*> values_;
- ZoneList<int> assigned_indexes_;
- Zone* zone_;
- RemovableSimulate removable_;
-};
-
-
-class HStackCheck: public HTemplateInstruction<1> {
- public:
- enum Type {
- kFunctionEntry,
- kBackwardsBranch
- };
-
- HStackCheck(HValue* context, Type type) : type_(type) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- HValue* context() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- void Eliminate() {
- // The stack check eliminator might try to eliminate the same stack
- // check instruction multiple times.
- if (IsLinked()) {
- DeleteAndReplaceWith(NULL);
- }
- }
-
- bool is_function_entry() { return type_ == kFunctionEntry; }
- bool is_backwards_branch() { return type_ == kBackwardsBranch; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck)
-
- private:
- Type type_;
-};
-
-
-enum InliningKind {
- NORMAL_RETURN, // Normal function/method call and return.
- DROP_EXTRA_ON_RETURN, // Drop an extra value from the environment on return.
- CONSTRUCT_CALL_RETURN, // Either use allocated receiver or return value.
- GETTER_CALL_RETURN, // Returning from a getter, need to restore context.
- SETTER_CALL_RETURN // Use the RHS of the assignment as the return value.
-};
-
-
-class HEnterInlined: public HTemplateInstruction<0> {
- public:
- HEnterInlined(Handle<JSFunction> closure,
- int arguments_count,
- FunctionLiteral* function,
- InliningKind inlining_kind,
- Variable* arguments_var,
- ZoneList<HValue*>* arguments_values,
- bool undefined_receiver)
- : closure_(closure),
- arguments_count_(arguments_count),
- arguments_pushed_(false),
- function_(function),
- inlining_kind_(inlining_kind),
- arguments_var_(arguments_var),
- arguments_values_(arguments_values),
- undefined_receiver_(undefined_receiver) {
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> closure() const { return closure_; }
- int arguments_count() const { return arguments_count_; }
- bool arguments_pushed() const { return arguments_pushed_; }
- void set_arguments_pushed() { arguments_pushed_ = true; }
- FunctionLiteral* function() const { return function_; }
- InliningKind inlining_kind() const { return inlining_kind_; }
- bool undefined_receiver() const { return undefined_receiver_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- Variable* arguments_var() { return arguments_var_; }
- ZoneList<HValue*>* arguments_values() { return arguments_values_; }
-
- DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
-
- private:
- Handle<JSFunction> closure_;
- int arguments_count_;
- bool arguments_pushed_;
- FunctionLiteral* function_;
- InliningKind inlining_kind_;
- Variable* arguments_var_;
- ZoneList<HValue*>* arguments_values_;
- bool undefined_receiver_;
-};
-
-
-class HLeaveInlined: public HTemplateInstruction<0> {
- public:
- HLeaveInlined() { }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
-};
-
-
-class HPushArgument: public HUnaryOperation {
- public:
- explicit HPushArgument(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* argument() { return OperandAt(0); }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument)
-};
-
-
-class HThisFunction: public HTemplateInstruction<0> {
- public:
- HThisFunction() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HContext: public HTemplateInstruction<0> {
- public:
- HContext() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Context)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HOuterContext: public HUnaryOperation {
- public:
- explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HDeclareGlobals: public HUnaryOperation {
- public:
- HDeclareGlobals(HValue* context,
- Handle<FixedArray> pairs,
- int flags)
- : HUnaryOperation(context),
- pairs_(pairs),
- flags_(flags) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> pairs() const { return pairs_; }
- int flags() const { return flags_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- private:
- Handle<FixedArray> pairs_;
- int flags_;
-};
-
-
-class HGlobalObject: public HUnaryOperation {
- public:
- explicit HGlobalObject(HValue* context)
- : HUnaryOperation(context), qml_global_(false) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- bool qml_global() { return qml_global_; }
- void set_qml_global(bool v) { qml_global_ = v; }
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HGlobalObject* o = HGlobalObject::cast(other);
- return o->qml_global_ == qml_global_;
- }
-
- private:
- virtual bool IsDeletable() const { return true; }
- bool qml_global_;
-};
-
-
-class HGlobalReceiver: public HUnaryOperation {
- public:
- explicit HGlobalReceiver(HValue* global_object)
- : HUnaryOperation(global_object) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-template <int V>
-class HCall: public HTemplateInstruction<V> {
- public:
- // The argument count includes the receiver.
- explicit HCall<V>(int argument_count) : argument_count_(argument_count) {
- this->set_representation(Representation::Tagged());
- this->SetAllSideEffects();
- }
-
- virtual HType CalculateInferredType() { return HType::Tagged(); }
-
- virtual int argument_count() const { return argument_count_; }
-
- virtual bool IsCall() { return true; }
-
- private:
- int argument_count_;
-};
-
-
-class HUnaryCall: public HCall<1> {
- public:
- HUnaryCall(HValue* value, int argument_count)
- : HCall<1>(argument_count) {
- SetOperandAt(0, value);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* value() { return OperandAt(0); }
-};
-
-
-class HBinaryCall: public HCall<2> {
- public:
- HBinaryCall(HValue* first, HValue* second, int argument_count)
- : HCall<2>(argument_count) {
- SetOperandAt(0, first);
- SetOperandAt(1, second);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* first() { return OperandAt(0); }
- HValue* second() { return OperandAt(1); }
-};
-
-
-class HInvokeFunction: public HBinaryCall {
- public:
- HInvokeFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
- }
-
- HInvokeFunction(HValue* context,
- HValue* function,
- Handle<JSFunction> known_function,
- int argument_count)
- : HBinaryCall(context, function, argument_count),
- known_function_(known_function) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* context() { return first(); }
- HValue* function() { return second(); }
- Handle<JSFunction> known_function() { return known_function_; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
-
- private:
- Handle<JSFunction> known_function_;
-};
-
-
-class HCallConstantFunction: public HCall<0> {
- public:
- HCallConstantFunction(Handle<JSFunction> function, int argument_count)
- : HCall<0>(argument_count), function_(function) { }
-
- Handle<JSFunction> function() const { return function_; }
-
- bool IsApplyFunction() const {
- return function_->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
-
- private:
- Handle<JSFunction> function_;
-};
-
-
-class HCallKeyed: public HBinaryCall {
- public:
- HCallKeyed(HValue* context, HValue* key, int argument_count)
- : HBinaryCall(context, key, argument_count) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* context() { return first(); }
- HValue* key() { return second(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
-};
-
-
-class HCallNamed: public HUnaryCall {
- public:
- HCallNamed(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* context() { return value(); }
- Handle<String> name() const { return name_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNamed)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- private:
- Handle<String> name_;
-};
-
-
-class HCallFunction: public HBinaryCall {
- public:
- HCallFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
- }
-
- HValue* context() { return first(); }
- HValue* function() { return second(); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction)
-};
-
-
-class HCallGlobal: public HUnaryCall {
- public:
- HCallGlobal(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name), qml_global_(false) {
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* context() { return value(); }
- Handle<String> name() const { return name_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- bool qml_global() { return qml_global_; }
- void set_qml_global(bool v) { qml_global_ = v; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
-
- private:
- Handle<String> name_;
- bool qml_global_;
-};
-
-
-class HCallKnownGlobal: public HCall<0> {
- public:
- HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
- : HCall<0>(argument_count), target_(target) { }
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return target_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal)
-
- private:
- Handle<JSFunction> target_;
-};
-
-
-class HCallNew: public HBinaryCall {
- public:
- HCallNew(HValue* context, HValue* constructor, int argument_count)
- : HBinaryCall(context, constructor, argument_count) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* context() { return first(); }
- HValue* constructor() { return second(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew)
-};
-
-
-class HCallNewArray: public HCallNew {
- public:
- HCallNewArray(HValue* context, HValue* constructor, int argument_count,
- Handle<JSGlobalPropertyCell> type_cell)
- : HCallNew(context, constructor, argument_count),
- type_cell_(type_cell) {
- }
-
- Handle<JSGlobalPropertyCell> property_cell() const {
- return type_cell_;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
-
- private:
- Handle<JSGlobalPropertyCell> type_cell_;
-};
-
-
-class HCallRuntime: public HCall<1> {
- public:
- HCallRuntime(HValue* context,
- Handle<String> name,
- const Runtime::Function* c_function,
- int argument_count)
- : HCall<1>(argument_count), c_function_(c_function), name_(name) {
- SetOperandAt(0, context);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* context() { return OperandAt(0); }
- const Runtime::Function* function() const { return c_function_; }
- Handle<String> name() const { return name_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
-
- private:
- const Runtime::Function* c_function_;
- Handle<String> name_;
-};
-
-
-class HJSArrayLength: public HTemplateInstruction<2> {
- public:
- HJSArrayLength(HValue* value, HValue* typecheck,
- HType type = HType::Tagged()) {
- set_type(type);
- // The length of an array is stored as a tagged value in the array
- // object. It is guaranteed to be 32 bit integer, but it can be
- // represented as either a smi or heap number.
- SetOperandAt(0, value);
- SetOperandAt(1, typecheck != NULL ? typecheck : value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnArrayLengths);
- SetGVNFlag(kDependsOnMaps);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* value() { return OperandAt(0); }
- HValue* typecheck() {
- ASSERT(HasTypeCheck());
- return OperandAt(1);
- }
- bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
-
- protected:
- virtual bool DataEquals(HValue* other_raw) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HFixedArrayBaseLength: public HUnaryOperation {
- public:
- explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
- set_type(HType::Smi());
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnArrayLengths);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HMapEnumLength: public HUnaryOperation {
- public:
- explicit HMapEnumLength(HValue* value) : HUnaryOperation(value) {
- set_type(HType::Smi());
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HElementsKind: public HUnaryOperation {
- public:
- explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnElementsKind);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HBitNot: public HUnaryOperation {
- public:
- explicit HBitNot(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetFlag(kTruncatingToInt32);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
- }
- virtual Representation observed_input_representation(int index) {
- return Representation::Integer32();
- }
- virtual HType CalculateInferredType();
-
- virtual HValue* Canonicalize();
-
- DECLARE_CONCRETE_INSTRUCTION(BitNot)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HUnaryMathOperation: public HTemplateInstruction<2> {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* value,
- BuiltinFunctionId op);
-
- HValue* context() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType();
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual Representation RequiredInputRepresentation(int index) {
- if (index == 0) {
- return Representation::Tagged();
- } else {
- switch (op_) {
- case kMathFloor:
- case kMathRound:
- case kMathSqrt:
- case kMathPowHalf:
- case kMathLog:
- case kMathExp:
- case kMathSin:
- case kMathCos:
- case kMathTan:
- return Representation::Double();
- case kMathAbs:
- return representation();
- default:
- UNREACHABLE();
- return Representation::None();
- }
- }
- }
-
- virtual HValue* Canonicalize();
-
- BuiltinFunctionId op() const { return op_; }
- const char* OpName() const;
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
- return op_ == b->op();
- }
-
- private:
- HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
- : op_(op) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- switch (op) {
- case kMathFloor:
- case kMathRound:
- case kMathCeil:
- set_representation(Representation::Integer32());
- break;
- case kMathAbs:
- // Not setting representation here: it is None intentionally.
- SetFlag(kFlexibleRepresentation);
- SetGVNFlag(kChangesNewSpacePromotion);
- break;
- case kMathSqrt:
- case kMathPowHalf:
- case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
- set_representation(Representation::Double());
- SetGVNFlag(kChangesNewSpacePromotion);
- break;
- case kMathExp:
- set_representation(Representation::Double());
- break;
- default:
- UNREACHABLE();
- }
- SetFlag(kUseGVN);
- }
-
- virtual bool IsDeletable() const { return true; }
-
- BuiltinFunctionId op_;
-};
-
-
-class HLoadElements: public HTemplateInstruction<2> {
- public:
- HLoadElements(HValue* value, HValue* typecheck) {
- SetOperandAt(0, value);
- SetOperandAt(1, typecheck != NULL ? typecheck : value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnElementsPointer);
- }
-
- HValue* value() { return OperandAt(0); }
- HValue* typecheck() {
- ASSERT(HasTypeCheck());
- return OperandAt(1);
- }
- bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HLoadExternalArrayPointer: public HUnaryOperation {
- public:
- explicit HLoadExternalArrayPointer(HValue* value)
- : HUnaryOperation(value) {
- set_representation(Representation::External());
- // The result of this instruction is idempotent as long as its inputs don't
- // change. The external array of a specialized array elements object cannot
- // change once set, so it's no necessary to introduce any additional
- // dependencies on top of the inputs.
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HCheckMaps: public HTemplateInstruction<2> {
- public:
- HCheckMaps(HValue* value, Handle<Map> map, Zone* zone,
- HValue* typecheck = NULL) {
- SetOperandAt(0, value);
- // If callers don't depend on a typecheck, they can pass in NULL. In that
- // case we use a copy of the |value| argument as a dummy value.
- SetOperandAt(1, typecheck != NULL ? typecheck : value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
- map_set()->Add(map, zone);
- }
- HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone) {
- SetOperandAt(0, value);
- SetOperandAt(1, value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
- for (int i = 0; i < maps->length(); i++) {
- map_set()->Add(maps->at(i), zone);
- }
- map_set()->Sort();
- }
-
- static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map,
- Zone* zone) {
- HCheckMaps* check_map = new(zone) HCheckMaps(object, map, zone);
- SmallMapList* map_set = check_map->map_set();
-
- // Since transitioned elements maps of the initial map don't fail the map
- // check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
- check_map->ClearGVNFlag(kDependsOnElementsKind);
-
- ElementsKind kind = map->elements_kind();
- bool packed = IsFastPackedElementsKind(kind);
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- Map* transitioned_map =
- map->LookupElementsTransitionMap(kind);
- if (transitioned_map) {
- map_set->Add(Handle<Map>(transitioned_map), zone);
- }
- };
- map_set->Sort();
- return check_map;
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator);
- virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType();
-
- HValue* value() { return OperandAt(0); }
- SmallMapList* map_set() { return &map_set_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HCheckMaps* b = HCheckMaps::cast(other);
- // Relies on the fact that map_set has been sorted before.
- if (map_set()->length() != b->map_set()->length()) return false;
- for (int i = 0; i < map_set()->length(); i++) {
- if (!map_set()->at(i).is_identical_to(b->map_set()->at(i))) return false;
- }
- return true;
- }
-
- private:
- SmallMapList map_set_;
-};
-
-
-class HCheckFunction: public HUnaryOperation {
- public:
- HCheckFunction(HValue* value, Handle<JSFunction> function)
- : HUnaryOperation(value), target_(function) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- target_in_new_space_ = Isolate::Current()->heap()->InNewSpace(*function);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- Handle<JSFunction> target() const { return target_; }
- bool target_in_new_space() const { return target_in_new_space_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HCheckFunction* b = HCheckFunction::cast(other);
- return target_.is_identical_to(b->target());
- }
-
- private:
- Handle<JSFunction> target_;
- bool target_in_new_space_;
-};
-
-
-class HCheckInstanceType: public HUnaryOperation {
- public:
- static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
- }
- static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_JS_ARRAY);
- }
- static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_STRING);
- }
- static HCheckInstanceType* NewIsInternalizedString(
- HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_INTERNALIZED_STRING);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HValue* Canonicalize();
-
- bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
- void GetCheckInterval(InstanceType* first, InstanceType* last);
- void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag);
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType)
-
- protected:
- // TODO(ager): It could be nice to allow the ommision of instance
- // type checks if we have already performed an instance type check
- // with a larger range.
- virtual bool DataEquals(HValue* other) {
- HCheckInstanceType* b = HCheckInstanceType::cast(other);
- return check_ == b->check_;
- }
-
- private:
- enum Check {
- IS_SPEC_OBJECT,
- IS_JS_ARRAY,
- IS_STRING,
- IS_INTERNALIZED_STRING,
- LAST_INTERVAL_CHECK = IS_JS_ARRAY
- };
-
- const char* GetCheckName();
-
- HCheckInstanceType(HValue* value, Check check)
- : HUnaryOperation(value), check_(check) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- const Check check_;
-};
-
-
-class HCheckNonSmi: public HUnaryOperation {
- public:
- explicit HCheckNonSmi(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- virtual HValue* Canonicalize() {
- HType value_type = value()->type();
- if (!value_type.IsUninitialized() &&
- (value_type.IsHeapNumber() ||
- value_type.IsString() ||
- value_type.IsBoolean() ||
- value_type.IsNonPrimitive())) {
- return NULL;
- }
- return this;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HCheckPrototypeMaps: public HTemplateInstruction<0> {
- public:
- HCheckPrototypeMaps(Handle<JSObject> prototype,
- Handle<JSObject> holder,
- Zone* zone) : prototypes_(2, zone), maps_(2, zone) {
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- // Keep a list of all objects on the prototype chain up to the holder
- // and the expected maps.
- while (true) {
- prototypes_.Add(prototype, zone);
- maps_.Add(Handle<Map>(prototype->map()), zone);
- if (prototype.is_identical_to(holder)) break;
- prototype = Handle<JSObject>(JSObject::cast(prototype->GetPrototype()));
- }
- }
-
- ZoneList<Handle<JSObject> >* prototypes() { return &prototypes_; }
-
- ZoneList<Handle<Map> >* maps() { return &maps_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual intptr_t Hashcode() {
- ASSERT_ALLOCATION_DISABLED;
- // Dereferencing to use the object's raw address for hashing is safe.
- AllowHandleDereference allow_handle_deref(isolate());
- intptr_t hash = 0;
- for (int i = 0; i < prototypes_.length(); i++) {
- hash = 17 * hash + reinterpret_cast<intptr_t>(*prototypes_[i]);
- hash = 17 * hash + reinterpret_cast<intptr_t>(*maps_[i]);
- }
- return hash;
- }
-
- bool CanOmitPrototypeChecks() {
- for (int i = 0; i < maps()->length(); i++) {
- if (!maps()->at(i)->CanOmitPrototypeChecks()) return false;
- }
- return true;
- }
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
-#ifdef DEBUG
- if (prototypes_.length() != b->prototypes()->length()) return false;
- for (int i = 0; i < prototypes_.length(); i++) {
- if (!prototypes_[i].is_identical_to(b->prototypes()->at(i))) return false;
- if (!maps_[i].is_identical_to(b->maps()->at(i))) return false;
- }
- return true;
-#else
- return prototypes_.first().is_identical_to(b->prototypes()->first()) &&
- prototypes_.last().is_identical_to(b->prototypes()->last());
-#endif // DEBUG
- }
-
- private:
- ZoneList<Handle<JSObject> > prototypes_;
- ZoneList<Handle<Map> > maps_;
-};
-
-
-class HCheckSmi: public HUnaryOperation {
- public:
- explicit HCheckSmi(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HCheckSmiOrInt32: public HUnaryOperation {
- public:
- explicit HCheckSmiOrInt32(HValue* value) : HUnaryOperation(value) {
- SetFlag(kFlexibleRepresentation);
- SetFlag(kUseGVN);
- }
-
- virtual int RedefinedOperandIndex() { return 0; }
- virtual Representation RequiredInputRepresentation(int index) {
- return representation();
- }
- virtual void InferRepresentation(HInferRepresentation* h_infer);
-
- virtual Representation observed_input_representation(int index) {
- return Representation::Integer32();
- }
-
- virtual HValue* Canonicalize() {
- if (representation().IsTagged() && !type().IsSmi()) {
- return this;
- } else {
- return value();
- }
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmiOrInt32)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HPhi: public HValue {
- public:
- HPhi(int merged_index, Zone* zone)
- : inputs_(2, zone),
- merged_index_(merged_index),
- phi_id_(-1),
- is_live_(false),
- is_convertible_to_integer_(true) {
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- non_phi_uses_[i] = 0;
- indirect_uses_[i] = 0;
- }
- ASSERT(merged_index >= 0);
- SetFlag(kFlexibleRepresentation);
- }
-
- virtual Representation RepresentationFromInputs();
-
- virtual Range* InferRange(Zone* zone);
- virtual void InferRepresentation(HInferRepresentation* h_infer);
- Representation RepresentationObservedByAllNonPhiUses();
- Representation RepresentationFromUseRequirements();
- virtual Representation RequiredInputRepresentation(int index) {
- return representation();
- }
- virtual HType CalculateInferredType();
- virtual int OperandCount() { return inputs_.length(); }
- virtual HValue* OperandAt(int index) const { return inputs_[index]; }
- HValue* GetRedundantReplacement();
- void AddInput(HValue* value);
- bool HasRealUses();
-
- bool IsReceiver() { return merged_index_ == 0; }
-
- int merged_index() const { return merged_index_; }
-
- virtual void AddInformativeDefinitions();
-
- virtual void PrintTo(StringStream* stream);
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- void InitRealUses(int id);
- void AddNonPhiUsesFrom(HPhi* other);
- void AddIndirectUsesTo(int* use_count);
-
- int tagged_non_phi_uses() const {
- return non_phi_uses_[Representation::kTagged];
- }
- int int32_non_phi_uses() const {
- return non_phi_uses_[Representation::kInteger32];
- }
- int double_non_phi_uses() const {
- return non_phi_uses_[Representation::kDouble];
- }
- int tagged_indirect_uses() const {
- return indirect_uses_[Representation::kTagged];
- }
- int int32_indirect_uses() const {
- return indirect_uses_[Representation::kInteger32];
- }
- int double_indirect_uses() const {
- return indirect_uses_[Representation::kDouble];
- }
- int phi_id() { return phi_id_; }
- bool is_live() { return is_live_; }
- void set_is_live(bool b) { is_live_ = b; }
-
- static HPhi* cast(HValue* value) {
- ASSERT(value->IsPhi());
- return reinterpret_cast<HPhi*>(value);
- }
- virtual Opcode opcode() const { return HValue::kPhi; }
-
- virtual bool IsConvertibleToInteger() const {
- return is_convertible_to_integer_;
- }
-
- void set_is_convertible_to_integer(bool b) {
- is_convertible_to_integer_ = b;
- }
-
- bool AllOperandsConvertibleToInteger() {
- for (int i = 0; i < OperandCount(); ++i) {
- if (!OperandAt(i)->IsConvertibleToInteger()) {
- if (FLAG_trace_representation) {
- HValue* input = OperandAt(i);
- PrintF("#%d %s: Input #%d %s at %d is NCTI\n",
- id(), Mnemonic(), input->id(), input->Mnemonic(), i);
- }
- return false;
- }
- }
- return true;
- }
-
- protected:
- virtual void DeleteFromGraph();
- virtual void InternalSetOperandAt(int index, HValue* value) {
- inputs_[index] = value;
- }
-
- virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other);
-
- private:
- ZoneList<HValue*> inputs_;
- int merged_index_;
-
- int non_phi_uses_[Representation::kNumRepresentations];
- int indirect_uses_[Representation::kNumRepresentations];
- int phi_id_;
- bool is_live_;
- bool is_convertible_to_integer_;
-};
-
-
-class HInductionVariableAnnotation : public HUnaryOperation {
- public:
- static HInductionVariableAnnotation* AddToGraph(HPhi* phi,
- NumericRelation relation,
- int operand_index);
-
- NumericRelation relation() { return relation_; }
- HValue* induction_base() { return phi_->OperandAt(operand_index_); }
-
- virtual int RedefinedOperandIndex() { return 0; }
- virtual bool IsPurelyInformativeDefinition() { return true; }
- virtual Representation RequiredInputRepresentation(int index) {
- return representation();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual bool IsRelationTrueInternal(NumericRelation other_relation,
- HValue* other_related_value) {
- if (induction_base() == other_related_value) {
- return relation().Implies(other_relation);
- } else {
- return false;
- }
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InductionVariableAnnotation)
-
- private:
- HInductionVariableAnnotation(HPhi* phi,
- NumericRelation relation,
- int operand_index)
- : HUnaryOperation(phi),
- phi_(phi), relation_(relation), operand_index_(operand_index) {
- set_representation(phi->representation());
- }
-
- // We need to store the phi both here and in the instruction operand because
- // the operand can change if a new idef of the phi is added between the phi
- // and this instruction (inserting an idef updates every use).
- HPhi* phi_;
- NumericRelation relation_;
- int operand_index_;
-};
-
-
-class HArgumentsObject: public HTemplateInstruction<0> {
- public:
- HArgumentsObject() {
- set_representation(Representation::Tagged());
- SetFlag(kIsArguments);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HConstant: public HTemplateInstruction<0> {
- public:
- HConstant(Handle<Object> handle, Representation r);
- HConstant(int32_t value, Representation r);
- HConstant(double value, Representation r);
-
- Handle<Object> handle() {
- if (handle_.is_null()) {
- handle_ = FACTORY->NewNumber(double_value_, TENURED);
- }
- ASSERT(has_int32_value_ || !handle_->IsSmi());
- return handle_;
- }
-
- bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
-
- bool ImmortalImmovable() const {
- if (has_int32_value_) {
- return false;
- }
- if (has_double_value_) {
- if (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
- isnan(double_value_)) {
- return true;
- }
- return false;
- }
-
- ASSERT(!handle_.is_null());
- Heap* heap = isolate()->heap();
- // We should have handled minus_zero_value and nan_value in the
- // has_double_value_ clause above.
- // Dereferencing is safe to compare against singletons.
- AllowHandleDereference allow_handle_deref(isolate());
- ASSERT(*handle_ != heap->minus_zero_value());
- ASSERT(*handle_ != heap->nan_value());
- return *handle_ == heap->undefined_value() ||
- *handle_ == heap->null_value() ||
- *handle_ == heap->true_value() ||
- *handle_ == heap->false_value() ||
- *handle_ == heap->the_hole_value() ||
- *handle_ == heap->empty_string();
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- virtual bool IsConvertibleToInteger() const {
- return has_int32_value_;
- }
-
- virtual bool EmitAtUses() { return !representation().IsDouble(); }
- virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType();
- bool IsInteger() { return handle()->IsSmi(); }
- HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
- HConstant* CopyToTruncatedInt32(Zone* zone) const;
- bool HasInteger32Value() const { return has_int32_value_; }
- int32_t Integer32Value() const {
- ASSERT(HasInteger32Value());
- return int32_value_;
- }
- bool HasSmiValue() const {
- return HasInteger32Value() && Smi::IsValid(Integer32Value());
- }
- bool HasDoubleValue() const { return has_double_value_; }
- double DoubleValue() const {
- ASSERT(HasDoubleValue());
- return double_value_;
- }
- bool HasNumberValue() const { return has_double_value_; }
- int32_t NumberValueAsInteger32() const {
- ASSERT(HasNumberValue());
- // Irrespective of whether a numeric HConstant can be safely
- // represented as an int32, we store the (in some cases lossy)
- // representation of the number in int32_value_.
- return int32_value_;
- }
- bool HasStringValue() const {
- if (has_double_value_ || has_int32_value_) return false;
- ASSERT(!handle_.is_null());
- return handle_->IsString();
- }
- Handle<String> StringValue() const {
- ASSERT(HasStringValue());
- return Handle<String>::cast(handle_);
- }
-
- bool ToBoolean();
-
- bool IsUint32() {
- return HasInteger32Value() && (Integer32Value() >= 0);
- }
-
- virtual intptr_t Hashcode() {
- ASSERT_ALLOCATION_DISABLED;
- intptr_t hash;
-
- if (has_int32_value_) {
- hash = static_cast<intptr_t>(int32_value_);
- } else if (has_double_value_) {
- hash = static_cast<intptr_t>(BitCast<int64_t>(double_value_));
- } else {
- ASSERT(!handle_.is_null());
- // Dereferencing to use the object's raw address for hashing is safe.
- AllowHandleDereference allow_handle_deref(isolate());
- hash = reinterpret_cast<intptr_t>(*handle_);
- }
-
- return hash;
- }
-
-#ifdef DEBUG
- virtual void Verify() { }
-#endif
-
- DECLARE_CONCRETE_INSTRUCTION(Constant)
-
- protected:
- virtual Range* InferRange(Zone* zone);
-
- virtual bool DataEquals(HValue* other) {
- HConstant* other_constant = HConstant::cast(other);
- if (has_int32_value_) {
- return other_constant->has_int32_value_ &&
- int32_value_ == other_constant->int32_value_;
- } else if (has_double_value_) {
- return other_constant->has_double_value_ &&
- BitCast<int64_t>(double_value_) ==
- BitCast<int64_t>(other_constant->double_value_);
- } else {
- ASSERT(!handle_.is_null());
- return !other_constant->handle_.is_null() &&
- handle_.is_identical_to(other_constant->handle_);
- }
- }
-
- private:
- void Initialize(Representation r);
-
- virtual bool IsDeletable() const { return true; }
-
- // If this is a numerical constant, handle_ either points to to the
- // HeapObject the constant originated from or is null. If the
- // constant is non-numeric, handle_ always points to a valid
- // constant HeapObject.
- Handle<Object> handle_;
-
- // We store the HConstant in the most specific form safely possible.
- // The two flags, has_int32_value_ and has_double_value_ tell us if
- // int32_value_ and double_value_ hold valid, safe representations
- // of the constant. has_int32_value_ implies has_double_value_ but
- // not the converse.
- bool has_int32_value_ : 1;
- bool has_double_value_ : 1;
- int32_t int32_value_;
- double double_value_;
-};
-
-
-class HBinaryOperation: public HTemplateInstruction<3> {
- public:
- HBinaryOperation(HValue* context, HValue* left, HValue* right)
- : observed_output_representation_(Representation::None()) {
- ASSERT(left != NULL && right != NULL);
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- SetOperandAt(2, right);
- observed_input_representation_[0] = Representation::None();
- observed_input_representation_[1] = Representation::None();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* left() { return OperandAt(1); }
- HValue* right() { return OperandAt(2); }
-
- // TODO(kasperl): Move these helpers to the IA-32 Lithium
- // instruction sequence builder.
- HValue* LeastConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return right();
- return left();
- }
-
- HValue* MostConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return left();
- return right();
- }
-
- void set_observed_input_representation(Representation left,
- Representation right) {
- observed_input_representation_[0] = left;
- observed_input_representation_[1] = right;
- }
-
- virtual void initialize_output_representation(Representation observed) {
- observed_output_representation_ = observed;
- }
-
- virtual Representation observed_input_representation(int index) {
- if (index == 0) return Representation::Tagged();
- return observed_input_representation_[index - 1];
- }
-
- virtual void InferRepresentation(HInferRepresentation* h_infer);
- virtual Representation RepresentationFromInputs();
- virtual void AssumeRepresentation(Representation r);
-
- virtual bool IsCommutative() const { return false; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
-
- private:
- Representation observed_input_representation_[2];
- Representation observed_output_representation_;
-};
-
-
-class HWrapReceiver: public HTemplateInstruction<2> {
- public:
- HWrapReceiver(HValue* receiver, HValue* function) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, receiver);
- SetOperandAt(1, function);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* receiver() { return OperandAt(0); }
- HValue* function() { return OperandAt(1); }
-
- virtual HValue* Canonicalize();
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
-};
-
-
-class HApplyArguments: public HTemplateInstruction<4> {
- public:
- HApplyArguments(HValue* function,
- HValue* receiver,
- HValue* length,
- HValue* elements) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, function);
- SetOperandAt(1, receiver);
- SetOperandAt(2, length);
- SetOperandAt(3, elements);
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- // The length is untagged, all other inputs are tagged.
- return (index == 2)
- ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- HValue* function() { return OperandAt(0); }
- HValue* receiver() { return OperandAt(1); }
- HValue* length() { return OperandAt(2); }
- HValue* elements() { return OperandAt(3); }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
-};
-
-
-class HArgumentsElements: public HTemplateInstruction<0> {
- public:
- explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
- // The value produced by this instruction is a pointer into the stack
- // that looks as if it was a smi because of alignment.
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- bool from_inlined() const { return from_inlined_; }
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-
- bool from_inlined_;
-};
-
-
-class HArgumentsLength: public HUnaryOperation {
- public:
- explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HAccessArgumentsAt: public HTemplateInstruction<3> {
- public:
- HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetOperandAt(0, arguments);
- SetOperandAt(1, length);
- SetOperandAt(2, index);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- // The arguments elements is considered tagged.
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
- }
-
- HValue* arguments() { return OperandAt(0); }
- HValue* length() { return OperandAt(1); }
- HValue* index() { return OperandAt(2); }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
-
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-enum BoundsCheckKeyMode {
- DONT_ALLOW_SMI_KEY,
- ALLOW_SMI_KEY
-};
-
-
-class HBoundsCheck: public HTemplateInstruction<2> {
- public:
- // Normally HBoundsCheck should be created using the
- // HGraphBuilder::AddBoundsCheck() helper, which also guards the index with
- // a HCheckSmiOrInt32 check.
- // However when building stubs, where we know that the arguments are Int32,
- // it makes sense to invoke this constructor directly.
- HBoundsCheck(HValue* index,
- HValue* length,
- BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY,
- Representation r = Representation::None())
- : key_mode_(key_mode), skip_check_(false) {
- SetOperandAt(0, index);
- SetOperandAt(1, length);
- if (r.IsNone()) {
- // In the normal compilation pipeline the representation is flexible
- // (see InferRepresentation).
- SetFlag(kFlexibleRepresentation);
- } else {
- // When compiling stubs we want to set the representation explicitly
- // so the compilation pipeline can skip the HInferRepresentation phase.
- set_representation(r);
- }
- SetFlag(kUseGVN);
- }
-
- bool skip_check() { return skip_check_; }
- void set_skip_check(bool skip_check) { skip_check_ = skip_check; }
-
- virtual Representation RequiredInputRepresentation(int arg_index) {
- return representation();
- }
- virtual Representation observed_input_representation(int index) {
- return Representation::Integer32();
- }
-
- virtual bool IsRelationTrueInternal(NumericRelation relation,
- HValue* related_value);
-
- virtual void PrintDataTo(StringStream* stream);
- virtual void InferRepresentation(HInferRepresentation* h_infer);
-
- HValue* index() { return OperandAt(0); }
- HValue* length() { return OperandAt(1); }
-
- virtual int RedefinedOperandIndex() { return 0; }
- virtual bool IsPurelyInformativeDefinition() { return skip_check(); }
- virtual void AddInformativeDefinitions();
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
- BoundsCheckKeyMode key_mode_;
- bool skip_check_;
-};
-
-
-class HBitwiseBinaryOperation: public HBinaryOperation {
- public:
- HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- SetFlag(kFlexibleRepresentation);
- SetFlag(kTruncatingToInt32);
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
-
- virtual void RepresentationChanged(Representation to) {
- if (!to.IsTagged()) {
- ASSERT(to.IsInteger32());
- ClearAllSideEffects();
- SetFlag(kUseGVN);
- } else {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- }
- }
-
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
- const char* reason) {
- // We only generate either int32 or generic tagged bitwise operations.
- if (new_rep.IsDouble()) new_rep = Representation::Integer32();
- HValue::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- virtual void initialize_output_representation(Representation observed) {
- if (observed.IsDouble()) observed = Representation::Integer32();
- HBinaryOperation::initialize_output_representation(observed);
- }
-
- virtual HType CalculateInferredType();
-
- DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HMathFloorOfDiv: public HBinaryOperation {
- public:
- HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetFlag(kCanOverflow);
- if (!right->IsConstant()) {
- SetFlag(kCanBeDivByZero);
- }
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HArithmeticBinaryOperation: public HBinaryOperation {
- public:
- HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- SetAllSideEffects();
- SetFlag(kFlexibleRepresentation);
- }
-
- virtual void RepresentationChanged(Representation to) {
- if (to.IsTagged()) {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- } else {
- ClearAllSideEffects();
- SetFlag(kUseGVN);
- }
- }
-
- virtual HType CalculateInferredType();
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HCompareGeneric: public HBinaryOperation {
- public:
- HCompareGeneric(HValue* context,
- HValue* left,
- HValue* right,
- Token::Value token)
- : HBinaryOperation(context, left, right), token_(token) {
- ASSERT(Token::IsCompareOp(token));
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
-
- Token::Value token() const { return token_; }
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
-
- private:
- Token::Value token_;
-};
-
-
-class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
- public:
- HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
- : token_(token) {
- SetFlag(kFlexibleRepresentation);
- ASSERT(Token::IsCompareOp(token));
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- }
-
- HValue* left() { return OperandAt(0); }
- HValue* right() { return OperandAt(1); }
- Token::Value token() const { return token_; }
-
- void set_observed_input_representation(Representation left,
- Representation right) {
- observed_input_representation_[0] = left;
- observed_input_representation_[1] = right;
- }
-
- virtual void InferRepresentation(HInferRepresentation* h_infer);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return representation();
- }
- virtual Representation observed_input_representation(int index) {
- return observed_input_representation_[index];
- }
- virtual void PrintDataTo(StringStream* stream);
-
- virtual void AddInformativeDefinitions();
-
- DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
-
- private:
- Representation observed_input_representation_[2];
- Token::Value token_;
-};
-
-
-class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
- public:
- HCompareObjectEqAndBranch(HValue* left, HValue* right) {
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- }
-
- HValue* left() { return OperandAt(0); }
- HValue* right() { return OperandAt(1); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
-};
-
-
-class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
- public:
- HCompareConstantEqAndBranch(HValue* left, int right, Token::Value op)
- : HUnaryControlInstruction(left, NULL, NULL), op_(op), right_(right) {
- ASSERT(op == Token::EQ_STRICT);
- }
-
- Token::Value op() const { return op_; }
- HValue* left() { return value(); }
- int right() const { return right_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareConstantEqAndBranch);
-
- private:
- const Token::Value op_;
- const int right_;
-};
-
-
-class HIsNilAndBranch: public HUnaryControlInstruction {
- public:
- HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
- : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }
-
- EqualityKind kind() const { return kind_; }
- NilValue nil() const { return nil_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual Representation observed_input_representation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
-
- private:
- EqualityKind kind_;
- NilValue nil_;
-};
-
-
-class HIsObjectAndBranch: public HUnaryControlInstruction {
- public:
- explicit HIsObjectAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
-};
-
-class HIsStringAndBranch: public HUnaryControlInstruction {
- public:
- explicit HIsStringAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
-};
-
-
-class HIsSmiAndBranch: public HUnaryControlInstruction {
- public:
- explicit HIsSmiAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HIsUndetectableAndBranch: public HUnaryControlInstruction {
- public:
- explicit HIsUndetectableAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
-};
-
-
-class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
- public:
- HStringCompareAndBranch(HValue* context,
- HValue* left,
- HValue* right,
- Token::Value token)
- : token_(token) {
- ASSERT(Token::IsCompareOp(token));
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- SetOperandAt(2, right);
- set_representation(Representation::Tagged());
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* left() { return OperandAt(1); }
- HValue* right() { return OperandAt(2); }
- Token::Value token() const { return token_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- Representation GetInputRepresentation() const {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
-
- private:
- Token::Value token_;
-};
-
-
-class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
- public:
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
-};
-
-
-class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
- public:
- HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
- : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
- HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
- : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
- ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
- }
-
- InstanceType from() { return from_; }
- InstanceType to() { return to_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
-
- private:
- InstanceType from_;
- InstanceType to_; // Inclusive range, not all combinations work.
-};
-
-
-class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
- public:
- explicit HHasCachedArrayIndexAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
-};
-
-
-class HGetCachedArrayIndex: public HUnaryOperation {
- public:
- explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HClassOfTestAndBranch: public HUnaryControlInstruction {
- public:
- HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
- : HUnaryControlInstruction(value, NULL, NULL),
- class_name_(class_name) { }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> class_name() const { return class_name_; }
-
- private:
- Handle<String> class_name_;
-};
-
-
-class HTypeofIsAndBranch: public HUnaryControlInstruction {
- public:
- HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
- : HUnaryControlInstruction(value, NULL, NULL),
- type_literal_(type_literal) { }
-
- Handle<String> type_literal() { return type_literal_; }
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- private:
- Handle<String> type_literal_;
-};
-
-
-class HInstanceOf: public HBinaryOperation {
- public:
- HInstanceOf(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType();
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
-};
-
-
-class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
- public:
- HInstanceOfKnownGlobal(HValue* context,
- HValue* left,
- Handle<JSFunction> right)
- : function_(right) {
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* left() { return OperandAt(1); }
- Handle<JSFunction> function() { return function_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
-
- private:
- Handle<JSFunction> function_;
-};
-
-
-// TODO(mstarzinger): This instruction should be modeled as a load of the map
-// field followed by a load of the instance size field once HLoadNamedField is
-// flexible enough to accommodate byte-field loads.
-class HInstanceSize: public HTemplateInstruction<1> {
- public:
- explicit HInstanceSize(HValue* object) {
- SetOperandAt(0, object);
- set_representation(Representation::Integer32());
- }
-
- HValue* object() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize)
-};
-
-
-class HPower: public HTemplateInstruction<2> {
- public:
- static HInstruction* New(Zone* zone, HValue* left, HValue* right);
-
- HValue* left() { return OperandAt(0); }
- HValue* right() const { return OperandAt(1); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Double()
- : Representation::None();
- }
- virtual Representation observed_input_representation(int index) {
- return RequiredInputRepresentation(index);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Power)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- HPower(HValue* left, HValue* right) {
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- set_representation(Representation::Double());
- SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- virtual bool IsDeletable() const {
- return !right()->representation().IsTagged();
- }
-};
-
-
-class HRandom: public HTemplateInstruction<1> {
- public:
- explicit HRandom(HValue* global_object) {
- SetOperandAt(0, global_object);
- set_representation(Representation::Double());
- }
-
- HValue* global_object() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Random)
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HAdd: public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- // Add is only commutative if two integer values are added and not if two
- // tagged values are added (because it might be a String concatenation).
- virtual bool IsCommutative() const {
- return !representation().IsTagged();
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual HType CalculateInferredType();
-
- virtual HValue* Canonicalize();
-
- virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other) {
- HValue* base = NULL;
- int32_t offset = 0;
- if (left()->IsInteger32Constant()) {
- base = right();
- offset = left()->GetInteger32Constant();
- } else if (right()->IsInteger32Constant()) {
- base = left();
- offset = right()->GetInteger32Constant();
- } else {
- return false;
- }
-
- return relation.IsExtendable(offset)
- ? base->IsRelationTrue(relation, other) : false;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Add)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange(Zone* zone);
-
- private:
- HAdd(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
- }
-};
-
-
-class HSub: public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual HValue* Canonicalize();
-
- virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other) {
- if (right()->IsInteger32Constant()) {
- HValue* base = left();
- int32_t offset = right()->GetInteger32Constant();
- return relation.IsExtendable(-offset)
- ? base->IsRelationTrue(relation, other) : false;
- } else {
- return false;
- }
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Sub)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange(Zone* zone);
-
- private:
- HSub(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
- }
-};
-
-
-class HMul: public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- // Only commutative if it is certain that not two objects are multiplicated.
- virtual bool IsCommutative() const {
- return !representation().IsTagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Mul)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange(Zone* zone);
-
- private:
- HMul(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
- }
-};
-
-
-class HMod: public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- bool HasPowerOf2Divisor() {
- if (right()->IsConstant() &&
- HConstant::cast(right())->HasInteger32Value()) {
- int32_t value = HConstant::cast(right())->Integer32Value();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
- }
-
- return false;
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- DECLARE_CONCRETE_INSTRUCTION(Mod)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange(Zone* zone);
-
- private:
- HMod(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanBeDivByZero);
- }
-};
-
-
-class HDiv: public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- bool HasPowerOf2Divisor() {
- if (right()->IsConstant() &&
- HConstant::cast(right())->HasInteger32Value()) {
- int32_t value = HConstant::cast(right())->Integer32Value();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
- }
-
- return false;
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- DECLARE_CONCRETE_INSTRUCTION(Div)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange(Zone* zone);
-
- private:
- HDiv(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanBeDivByZero);
- SetFlag(kCanOverflow);
- }
-};
-
-
-class HMathMinMax: public HArithmeticBinaryOperation {
- public:
- enum Operation { kMathMin, kMathMax };
-
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right,
- Operation op);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0 ? Representation::Tagged()
- : representation();
- }
-
- virtual Representation observed_input_representation(int index) {
- return RequiredInputRepresentation(index);
- }
-
- virtual void InferRepresentation(HInferRepresentation* h_infer);
-
- virtual Representation RepresentationFromInputs() {
- Representation left_rep = left()->representation();
- Representation right_rep = right()->representation();
- if ((left_rep.IsNone() || left_rep.IsInteger32()) &&
- (right_rep.IsNone() || right_rep.IsInteger32())) {
- return Representation::Integer32();
- }
- return Representation::Double();
- }
-
- virtual bool IsCommutative() const { return true; }
-
- Operation operation() { return operation_; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- return other->IsMathMinMax() &&
- HMathMinMax::cast(other)->operation_ == operation_;
- }
-
- virtual Range* InferRange(Zone* zone);
-
- private:
- HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
- : HArithmeticBinaryOperation(context, left, right),
- operation_(op) { }
-
- Operation operation_;
-};
-
-
-class HBitwise: public HBitwiseBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- Token::Value op,
- HValue* context,
- HValue* left,
- HValue* right);
-
- Token::Value op() const { return op_; }
-
- virtual bool IsCommutative() const { return true; }
-
- virtual HValue* Canonicalize();
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(Bitwise)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- return op() == HBitwise::cast(other)->op();
- }
-
- virtual Range* InferRange(Zone* zone);
-
- private:
- HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right), op_(op) {
- ASSERT(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
- }
-
- Token::Value op_;
-};
-
-
-class HShl: public HBitwiseBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- virtual Range* InferRange(Zone* zone);
-
- DECLARE_CONCRETE_INSTRUCTION(Shl)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- HShl(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
-};
-
-
-class HShr: public HBitwiseBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- virtual Range* InferRange(Zone* zone);
-
- DECLARE_CONCRETE_INSTRUCTION(Shr)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- HShr(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
-};
-
-
-class HSar: public HBitwiseBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- virtual Range* InferRange(Zone* zone);
-
- DECLARE_CONCRETE_INSTRUCTION(Sar)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- HSar(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
-};
-
-
-class HRor: public HBitwiseBinaryOperation {
- public:
- HRor(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {
- ChangeRepresentation(Representation::Integer32());
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Ror)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HOsrEntry: public HTemplateInstruction<0> {
- public:
- explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
- SetGVNFlag(kChangesOsrEntries);
- }
-
- BailoutId ast_id() const { return ast_id_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
-
- private:
- BailoutId ast_id_;
-};
-
-
-class HParameter: public HTemplateInstruction<0> {
- public:
- enum ParameterKind {
- STACK_PARAMETER,
- REGISTER_PARAMETER
- };
-
- explicit HParameter(unsigned index,
- ParameterKind kind = STACK_PARAMETER)
- : index_(index),
- kind_(kind) {
- set_representation(Representation::Tagged());
- }
-
- unsigned index() const { return index_; }
- ParameterKind kind() const { return kind_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Parameter)
-
- private:
- unsigned index_;
- ParameterKind kind_;
-};
-
-
-class HCallStub: public HUnaryCall {
- public:
- HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
- : HUnaryCall(context, argument_count),
- major_key_(major_key),
- transcendental_type_(TranscendentalCache::kNumberOfCaches) {
- }
-
- CodeStub::Major major_key() { return major_key_; }
-
- HValue* context() { return value(); }
-
- void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
- transcendental_type_ = transcendental_type;
- }
- TranscendentalCache::Type transcendental_type() {
- return transcendental_type_;
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub)
-
- private:
- CodeStub::Major major_key_;
- TranscendentalCache::Type transcendental_type_;
-};
-
-
-class HUnknownOSRValue: public HTemplateInstruction<0> {
- public:
- HUnknownOSRValue()
- : incoming_value_(NULL) {
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- void set_incoming_value(HPhi* value) {
- incoming_value_ = value;
- }
-
- HPhi* incoming_value() {
- return incoming_value_;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
-
- private:
- HPhi* incoming_value_;
-};
-
-
-class HLoadGlobalCell: public HTemplateInstruction<0> {
- public:
- HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
- : cell_(cell), details_(details) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnGlobalVars);
- }
-
- Handle<JSGlobalPropertyCell> cell() const { return cell_; }
- bool RequiresHoleCheck() const;
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual intptr_t Hashcode() {
- ASSERT_ALLOCATION_DISABLED;
- // Dereferencing to use the object's raw address for hashing is safe.
- AllowHandleDereference allow_handle_deref(isolate());
- return reinterpret_cast<intptr_t>(*cell_);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
- return cell_.is_identical_to(b->cell());
- }
-
- private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
-
- Handle<JSGlobalPropertyCell> cell_;
- PropertyDetails details_;
-};
-
-
-class HLoadGlobalGeneric: public HTemplateInstruction<2> {
- public:
- HLoadGlobalGeneric(HValue* context,
- HValue* global_object,
- Handle<Object> name,
- bool for_typeof)
- : name_(name),
- for_typeof_(for_typeof) {
- SetOperandAt(0, context);
- SetOperandAt(1, global_object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* global_object() { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
- bool for_typeof() const { return for_typeof_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
-
- private:
- Handle<Object> name_;
- bool for_typeof_;
-};
-
-
-class HAllocateObject: public HTemplateInstruction<1> {
- public:
- HAllocateObject(HValue* context, Handle<JSFunction> constructor)
- : constructor_(constructor) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- // Maximum instance size for which allocations will be inlined.
- static const int kMaxSize = 64 * kPointerSize;
-
- HValue* context() { return OperandAt(0); }
- Handle<JSFunction> constructor() { return constructor_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual Handle<Map> GetMonomorphicJSObjectMap() {
- ASSERT(constructor()->has_initial_map());
- return Handle<Map>(constructor()->initial_map());
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
-
- private:
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // virtual bool IsDeletable() const { return true; }
-
- Handle<JSFunction> constructor_;
-};
-
-
-class HAllocate: public HTemplateInstruction<2> {
- public:
- enum Flags {
- CAN_ALLOCATE_IN_NEW_SPACE = 1 << 0,
- CAN_ALLOCATE_IN_OLD_DATA_SPACE = 1 << 1,
- CAN_ALLOCATE_IN_OLD_POINTER_SPACE = 1 << 2,
- ALLOCATE_DOUBLE_ALIGNED = 1 << 3
- };
-
- HAllocate(HValue* context, HValue* size, HType type, Flags flags)
- : type_(type),
- flags_(flags) {
- ASSERT((flags & CAN_ALLOCATE_IN_OLD_DATA_SPACE) == 0); // unimplemented
- ASSERT((flags & CAN_ALLOCATE_IN_OLD_POINTER_SPACE) == 0); // unimplemented
- SetOperandAt(0, context);
- SetOperandAt(1, size);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* size() { return OperandAt(1); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- if (index == 0) {
- return Representation::Tagged();
- } else {
- return Representation::Integer32();
- }
- }
-
- virtual HType CalculateInferredType();
-
- bool CanAllocateInNewSpace() const {
- return (flags_ & CAN_ALLOCATE_IN_NEW_SPACE) != 0;
- }
-
- bool CanAllocateInOldDataSpace() const {
- return (flags_ & CAN_ALLOCATE_IN_OLD_DATA_SPACE) != 0;
- }
-
- bool CanAllocateInOldPointerSpace() const {
- return (flags_ & CAN_ALLOCATE_IN_OLD_POINTER_SPACE) != 0;
- }
-
- bool CanAllocateInOldSpace() const {
- return CanAllocateInOldDataSpace() ||
- CanAllocateInOldPointerSpace();
- }
-
- bool GuaranteedInNewSpace() const {
- return CanAllocateInNewSpace() && !CanAllocateInOldSpace();
- }
-
- bool MustAllocateDoubleAligned() const {
- return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate)
-
- private:
- HType type_;
- Flags flags_;
-};
-
-
-inline bool StoringValueNeedsWriteBarrier(HValue* value) {
- return !value->type().IsBoolean()
- && !value->type().IsSmi()
- && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
-}
-
-
-inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
- HValue* new_space_dominator) {
- if (object != new_space_dominator) return true;
- if (object->IsFastLiteral()) return false;
- if (object->IsAllocateObject()) return false;
- if (object->IsAllocate()) {
- return !HAllocate::cast(object)->GuaranteedInNewSpace();
- }
- return true;
-}
-
-
-class HStoreGlobalCell: public HUnaryOperation {
- public:
- HStoreGlobalCell(HValue* value,
- Handle<JSGlobalPropertyCell> cell,
- PropertyDetails details)
- : HUnaryOperation(value),
- cell_(cell),
- details_(details) {
- SetGVNFlag(kChangesGlobalVars);
- }
-
- Handle<JSGlobalPropertyCell> cell() const { return cell_; }
- bool RequiresHoleCheck() {
- return !details_.IsDontDelete() || details_.IsReadOnly();
- }
- bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
-
- private:
- Handle<JSGlobalPropertyCell> cell_;
- PropertyDetails details_;
-};
-
-
-class HStoreGlobalGeneric: public HTemplateInstruction<3> {
- public:
- HStoreGlobalGeneric(HValue* context,
- HValue* global_object,
- Handle<Object> name,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : name_(name),
- strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, context);
- SetOperandAt(1, global_object);
- SetOperandAt(2, value);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* global_object() { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
- HValue* value() { return OperandAt(2); }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric)
-
- private:
- Handle<Object> name_;
- StrictModeFlag strict_mode_flag_;
-};
-
-
-class HLoadContextSlot: public HUnaryOperation {
- public:
- enum Mode {
- // Perform a normal load of the context slot without checking its value.
- kNoCheck,
- // Load and check the value of the context slot. Deoptimize if it's the
- // hole value. This is used for checking for loading of uninitialized
- // harmony bindings where we deoptimize into full-codegen generated code
- // which will subsequently throw a reference error.
- kCheckDeoptimize,
- // Load and check the value of the context slot. Return undefined if it's
- // the hole value. This is used for non-harmony const assignments
- kCheckReturnUndefined
- };
-
- HLoadContextSlot(HValue* context, Variable* var)
- : HUnaryOperation(context), slot_index_(var->index()) {
- ASSERT(var->IsContextSlot());
- switch (var->mode()) {
- case LET:
- case CONST_HARMONY:
- mode_ = kCheckDeoptimize;
- break;
- case CONST:
- mode_ = kCheckReturnUndefined;
- break;
- default:
- mode_ = kNoCheck;
- }
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnContextSlots);
- }
-
- int slot_index() const { return slot_index_; }
- Mode mode() const { return mode_; }
-
- bool DeoptimizesOnHole() {
- return mode_ == kCheckDeoptimize;
- }
-
- bool RequiresHoleCheck() const {
- return mode_ != kNoCheck;
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HLoadContextSlot* b = HLoadContextSlot::cast(other);
- return (slot_index() == b->slot_index());
- }
-
- private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
-
- int slot_index_;
- Mode mode_;
-};
-
-
-class HStoreContextSlot: public HTemplateInstruction<2> {
- public:
- enum Mode {
- // Perform a normal store to the context slot without checking its previous
- // value.
- kNoCheck,
- // Check the previous value of the context slot and deoptimize if it's the
- // hole value. This is used for checking for assignments to uninitialized
- // harmony bindings where we deoptimize into full-codegen generated code
- // which will subsequently throw a reference error.
- kCheckDeoptimize,
- // Check the previous value and ignore assignment if it isn't a hole value
- kCheckIgnoreAssignment
- };
-
- HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
- : slot_index_(slot_index), mode_(mode) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- SetGVNFlag(kChangesContextSlots);
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
- int slot_index() const { return slot_index_; }
- Mode mode() const { return mode_; }
-
- bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
- }
-
- bool DeoptimizesOnHole() {
- return mode_ == kCheckDeoptimize;
- }
-
- bool RequiresHoleCheck() {
- return mode_ != kNoCheck;
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
-
- private:
- int slot_index_;
- Mode mode_;
-};
-
-
-class HLoadNamedField: public HUnaryOperation {
- public:
- HLoadNamedField(HValue* object, bool is_in_object, int offset)
- : HUnaryOperation(object),
- is_in_object_(is_in_object),
- offset_(offset) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- if (is_in_object) {
- SetGVNFlag(kDependsOnInobjectFields);
- } else {
- SetGVNFlag(kDependsOnBackingStoreFields);
- }
- }
-
- HValue* object() { return OperandAt(0); }
- bool is_in_object() const { return is_in_object_; }
- int offset() const { return offset_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HLoadNamedField* b = HLoadNamedField::cast(other);
- return is_in_object_ == b->is_in_object_ && offset_ == b->offset_;
- }
-
- private:
- virtual bool IsDeletable() const { return true; }
-
- bool is_in_object_;
- int offset_;
-};
-
-
-class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
- public:
- HLoadNamedFieldPolymorphic(HValue* context,
- HValue* object,
- SmallMapList* types,
- Handle<String> name,
- Zone* zone);
-
- HValue* context() { return OperandAt(0); }
- HValue* object() { return OperandAt(1); }
- SmallMapList* types() { return &types_; }
- Handle<String> name() { return name_; }
- bool need_generic() { return need_generic_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic)
-
- static const int kMaxLoadPolymorphism = 4;
-
- protected:
- virtual bool DataEquals(HValue* value);
-
- private:
- SmallMapList types_;
- Handle<String> name_;
- bool need_generic_;
-};
-
-
-
-class HLoadNamedGeneric: public HTemplateInstruction<2> {
- public:
- HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
- : name_(name) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* object() { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
-
- private:
- Handle<Object> name_;
-};
-
-
-class HLoadFunctionPrototype: public HUnaryOperation {
- public:
- explicit HLoadFunctionPrototype(HValue* function)
- : HUnaryOperation(function) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnCalls);
- }
-
- HValue* function() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-class ArrayInstructionInterface {
- public:
- virtual HValue* GetKey() = 0;
- virtual void SetKey(HValue* key) = 0;
- virtual void SetIndexOffset(uint32_t index_offset) = 0;
- virtual bool IsDehoisted() = 0;
- virtual void SetDehoisted(bool is_dehoisted) = 0;
- virtual ~ArrayInstructionInterface() { };
-
- static Representation KeyedAccessIndexRequirement(Representation r) {
- return r.IsInteger32() ? Representation::Integer32()
- : Representation::Tagged();
- }
-};
-
-
-enum LoadKeyedHoleMode {
- NEVER_RETURN_HOLE,
- ALLOW_RETURN_HOLE
-};
-
-
-class HLoadKeyed
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
- public:
- HLoadKeyed(HValue* obj,
- HValue* key,
- HValue* dependency,
- ElementsKind elements_kind,
- LoadKeyedHoleMode mode = NEVER_RETURN_HOLE)
- : bit_field_(0) {
- bit_field_ = ElementsKindField::encode(elements_kind) |
- HoleModeField::encode(mode);
-
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, dependency != NULL ? dependency : obj);
-
- if (!is_external()) {
- // I can detect the case between storing double (holey and fast) and
- // smi/object by looking at elements_kind_.
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
- IsFastDoubleElementsKind(elements_kind));
-
- if (IsFastSmiOrObjectElementsKind(elements_kind)) {
- if (IsFastSmiElementsKind(elements_kind)) {
- set_type(HType::Smi());
- }
-
- set_representation(Representation::Tagged());
- SetGVNFlag(kDependsOnArrayElements);
- } else {
- set_representation(Representation::Double());
- SetGVNFlag(kDependsOnDoubleArrayElements);
- }
- } else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- set_representation(Representation::Double());
- } else {
- set_representation(Representation::Integer32());
- }
-
- SetGVNFlag(kDependsOnSpecializedArrayElements);
- // Native code could change the specialized array.
- SetGVNFlag(kDependsOnCalls);
- }
-
- SetFlag(kUseGVN);
- }
-
- bool is_external() const {
- return IsExternalArrayElementsKind(elements_kind());
- }
- HValue* elements() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* dependency() {
- ASSERT(HasDependency());
- return OperandAt(2);
- }
- bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
- uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
- void SetIndexOffset(uint32_t index_offset) {
- bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
- }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
- void SetDehoisted(bool is_dehoisted) {
- bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
- }
- ElementsKind elements_kind() const {
- return ElementsKindField::decode(bit_field_);
- }
- LoadKeyedHoleMode hole_mode() const {
- return HoleModeField::decode(bit_field_);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- // kind_fast: tagged[int32] (none)
- // kind_double: tagged[int32] (none)
- // kind_external: external[int32] (none)
- if (index == 0) {
- return is_external() ? Representation::External()
- : Representation::Tagged();
- }
- if (index == 1) {
- return ArrayInstructionInterface::KeyedAccessIndexRequirement(
- OperandAt(1)->representation());
- }
- return Representation::None();
- }
-
- virtual Representation observed_input_representation(int index) {
- return RequiredInputRepresentation(index);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- bool UsesMustHandleHole() const;
- bool RequiresHoleCheck() const;
-
- virtual Range* InferRange(Zone* zone);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- if (!other->IsLoadKeyed()) return false;
- HLoadKeyed* other_load = HLoadKeyed::cast(other);
-
- if (IsDehoisted() && index_offset() != other_load->index_offset())
- return false;
- return elements_kind() == other_load->elements_kind();
- }
-
- private:
- virtual bool IsDeletable() const {
- return !RequiresHoleCheck();
- }
-
- // Establish some checks around our packed fields
- enum LoadKeyedBits {
- kBitsForElementsKind = 5,
- kBitsForHoleMode = 1,
- kBitsForIndexOffset = 25,
- kBitsForIsDehoisted = 1,
-
- kStartElementsKind = 0,
- kStartHoleMode = kStartElementsKind + kBitsForElementsKind,
- kStartIndexOffset = kStartHoleMode + kBitsForHoleMode,
- kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset
- };
-
- STATIC_ASSERT((kBitsForElementsKind + kBitsForIndexOffset +
- kBitsForIsDehoisted) <= sizeof(uint32_t)*8);
- STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind));
- class ElementsKindField:
- public BitField<ElementsKind, kStartElementsKind, kBitsForElementsKind>
- {}; // NOLINT
- class HoleModeField:
- public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode>
- {}; // NOLINT
- class IndexOffsetField:
- public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset>
- {}; // NOLINT
- class IsDehoistedField:
- public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted>
- {}; // NOLINT
- uint32_t bit_field_;
-};
-
-
-class HLoadKeyedGeneric: public HTemplateInstruction<3> {
- public:
- HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* context() { return OperandAt(2); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- // tagged[tagged]
- return Representation::Tagged();
- }
-
- virtual HValue* Canonicalize();
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
-};
-
-
-class HStoreNamedField: public HTemplateInstruction<2> {
- public:
- HStoreNamedField(HValue* obj,
- Handle<String> name,
- HValue* val,
- bool in_object,
- int offset)
- : name_(name),
- is_in_object_(in_object),
- offset_(offset),
- new_space_dominator_(NULL) {
- SetOperandAt(0, obj);
- SetOperandAt(1, val);
- SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnNewSpacePromotion);
- if (is_in_object_) {
- SetGVNFlag(kChangesInobjectFields);
- } else {
- SetGVNFlag(kChangesBackingStoreFields);
- }
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
- ASSERT(side_effect == kChangesNewSpacePromotion);
- new_space_dominator_ = dominator;
- }
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* object() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
-
- Handle<String> name() const { return name_; }
- bool is_in_object() const { return is_in_object_; }
- int offset() const { return offset_; }
- Handle<Map> transition() const { return transition_; }
- void set_transition(Handle<Map> map) { transition_ = map; }
- HValue* new_space_dominator() const { return new_space_dominator_; }
-
- bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
- }
-
- bool NeedsWriteBarrierForMap() {
- return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
- }
-
- private:
- Handle<String> name_;
- bool is_in_object_;
- int offset_;
- Handle<Map> transition_;
- HValue* new_space_dominator_;
-};
-
-
-class HStoreNamedGeneric: public HTemplateInstruction<3> {
- public:
- HStoreNamedGeneric(HValue* context,
- HValue* object,
- Handle<String> name,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : name_(name),
- strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, object);
- SetOperandAt(1, value);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
- HValue* context() { return OperandAt(2); }
- Handle<String> name() { return name_; }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
-
- private:
- Handle<String> name_;
- StrictModeFlag strict_mode_flag_;
-};
-
-
-class HStoreKeyed
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
- public:
- HStoreKeyed(HValue* obj, HValue* key, HValue* val,
- ElementsKind elements_kind)
- : elements_kind_(elements_kind),
- index_offset_(0),
- is_dehoisted_(false),
- new_space_dominator_(NULL) {
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, val);
-
- if (IsFastObjectElementsKind(elements_kind)) {
- SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnNewSpacePromotion);
- }
- if (is_external()) {
- SetGVNFlag(kChangesSpecializedArrayElements);
- } else if (IsFastDoubleElementsKind(elements_kind)) {
- SetGVNFlag(kChangesDoubleArrayElements);
- SetFlag(kDeoptimizeOnUndefined);
- } else {
- SetGVNFlag(kChangesArrayElements);
- }
-
- // EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
- if (elements_kind >= EXTERNAL_BYTE_ELEMENTS &&
- elements_kind <= EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- SetFlag(kTruncatingToInt32);
- }
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- // kind_fast: tagged[int32] = tagged
- // kind_double: tagged[int32] = double
- // kind_external: external[int32] = (double | int32)
- if (index == 0) {
- return is_external() ? Representation::External()
- : Representation::Tagged();
- } else if (index == 1) {
- return ArrayInstructionInterface::KeyedAccessIndexRequirement(
- OperandAt(1)->representation());
- }
-
- ASSERT_EQ(index, 2);
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
- return Representation::Double();
- }
-
- return is_external() ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- bool is_external() const {
- return IsExternalArrayElementsKind(elements_kind());
- }
-
- virtual Representation observed_input_representation(int index) {
- if (index < 2) return RequiredInputRepresentation(index);
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
- return Representation::Double();
- }
- if (is_external()) {
- return Representation::Integer32();
- }
- // For fast object elements kinds, don't assume anything.
- return Representation::None();
- }
-
- HValue* elements() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
- bool value_is_smi() const {
- return IsFastSmiElementsKind(elements_kind_);
- }
- ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
-
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
- ASSERT(side_effect == kChangesNewSpacePromotion);
- new_space_dominator_ = dominator;
- }
-
- HValue* new_space_dominator() const { return new_space_dominator_; }
-
- bool NeedsWriteBarrier() {
- if (value_is_smi()) {
- return false;
- } else {
- return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(elements(), new_space_dominator());
- }
- }
-
- bool NeedsCanonicalization();
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
-
- private:
- ElementsKind elements_kind_;
- uint32_t index_offset_;
- bool is_dehoisted_;
- HValue* new_space_dominator_;
-};
-
-
-class HStoreKeyedGeneric: public HTemplateInstruction<4> {
- public:
- HStoreKeyedGeneric(HValue* context,
- HValue* object,
- HValue* key,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, object);
- SetOperandAt(1, key);
- SetOperandAt(2, value);
- SetOperandAt(3, context);
- SetAllSideEffects();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
- HValue* context() { return OperandAt(3); }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- // tagged[tagged] = tagged
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
-
- private:
- StrictModeFlag strict_mode_flag_;
-};
-
-
-class HTransitionElementsKind: public HTemplateInstruction<2> {
- public:
- HTransitionElementsKind(HValue* context,
- HValue* object,
- Handle<Map> original_map,
- Handle<Map> transitioned_map)
- : original_map_(original_map),
- transitioned_map_(transitioned_map),
- from_kind_(original_map->elements_kind()),
- to_kind_(transitioned_map->elements_kind()) {
- SetOperandAt(0, object);
- SetOperandAt(1, context);
- SetFlag(kUseGVN);
- SetGVNFlag(kChangesElementsKind);
- if (original_map->has_fast_double_elements()) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
- if (transitioned_map->has_fast_double_elements()) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* context() { return OperandAt(1); }
- Handle<Map> original_map() { return original_map_; }
- Handle<Map> transitioned_map() { return transitioned_map_; }
- ElementsKind from_kind() { return from_kind_; }
- ElementsKind to_kind() { return to_kind_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
- return original_map_.is_identical_to(instr->original_map()) &&
- transitioned_map_.is_identical_to(instr->transitioned_map());
- }
-
- private:
- Handle<Map> original_map_;
- Handle<Map> transitioned_map_;
- ElementsKind from_kind_;
- ElementsKind to_kind_;
-};
-
-
-class HStringAdd: public HBinaryOperation {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType() {
- return HType::String();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
-
- private:
- HStringAdd(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // virtual bool IsDeletable() const { return true; }
-};
-
-
-class HStringCharCodeAt: public HTemplateInstruction<3> {
- public:
- HStringCharCodeAt(HValue* context, HValue* string, HValue* index) {
- SetOperandAt(0, context);
- SetOperandAt(1, string);
- SetOperandAt(2, index);
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- // The index is supposed to be Integer32.
- return index == 2
- ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* string() { return OperandAt(1); }
- HValue* index() { return OperandAt(2); }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange(Zone* zone) {
- return new(zone) Range(0, String::kMaxUtf16CodeUnit);
- }
-
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // private:
- // virtual bool IsDeletable() const { return true; }
-};
-
-
-class HStringCharFromCode: public HTemplateInstruction<2> {
- public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* char_code);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
- }
- virtual HType CalculateInferredType();
-
- HValue* context() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
-
- virtual bool DataEquals(HValue* other) { return true; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
-
- private:
- HStringCharFromCode(HValue* context, HValue* char_code) {
- SetOperandAt(0, context);
- SetOperandAt(1, char_code);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // virtual bool IsDeletable() const { return true; }
-};
-
-
-class HStringLength: public HUnaryOperation {
- public:
- static HInstruction* New(Zone* zone, HValue* string);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType() {
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- return HType::Smi();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange(Zone* zone) {
- return new(zone) Range(0, String::kMaxLength);
- }
-
- private:
- explicit HStringLength(HValue* string) : HUnaryOperation(string) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- }
-
- virtual bool IsDeletable() const { return true; }
-};
-
-
-template <int V>
-class HMaterializedLiteral: public HTemplateInstruction<V> {
- public:
- HMaterializedLiteral<V>(int index, int depth, AllocationSiteMode mode)
- : literal_index_(index), depth_(depth), allocation_site_mode_(mode) {
- this->set_representation(Representation::Tagged());
- }
-
- HMaterializedLiteral<V>(int index, int depth)
- : literal_index_(index), depth_(depth),
- allocation_site_mode_(DONT_TRACK_ALLOCATION_SITE) {
- this->set_representation(Representation::Tagged());
- }
-
- int literal_index() const { return literal_index_; }
- int depth() const { return depth_; }
- AllocationSiteMode allocation_site_mode() const {
- return allocation_site_mode_;
- }
-
- private:
- virtual bool IsDeletable() const { return true; }
-
- int literal_index_;
- int depth_;
- AllocationSiteMode allocation_site_mode_;
-};
-
-
-class HFastLiteral: public HMaterializedLiteral<1> {
- public:
- HFastLiteral(HValue* context,
- Handle<JSObject> boilerplate,
- int total_size,
- int literal_index,
- int depth,
- AllocationSiteMode mode)
- : HMaterializedLiteral<1>(literal_index, depth, mode),
- boilerplate_(boilerplate),
- total_size_(total_size) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- // Maximum depth and total number of elements and properties for literal
- // graphs to be considered for fast deep-copying.
- static const int kMaxLiteralDepth = 3;
- static const int kMaxLiteralProperties = 8;
-
- HValue* context() { return OperandAt(0); }
- Handle<JSObject> boilerplate() const { return boilerplate_; }
- int total_size() const { return total_size_; }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual Handle<Map> GetMonomorphicJSObjectMap() {
- return Handle<Map>(boilerplate()->map());
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral)
-
- private:
- Handle<JSObject> boilerplate_;
- int total_size_;
-};
-
-
-class HArrayLiteral: public HMaterializedLiteral<1> {
- public:
- HArrayLiteral(HValue* context,
- Handle<HeapObject> boilerplate_object,
- int length,
- int literal_index,
- int depth,
- AllocationSiteMode mode)
- : HMaterializedLiteral<1>(literal_index, depth, mode),
- length_(length),
- boilerplate_object_(boilerplate_object) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- HValue* context() { return OperandAt(0); }
- ElementsKind boilerplate_elements_kind() const {
- if (!boilerplate_object_->IsJSObject()) {
- return TERMINAL_FAST_ELEMENTS_KIND;
- }
- return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
- }
- Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
- int length() const { return length_; }
- bool IsCopyOnWrite() const;
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
-
- private:
- int length_;
- Handle<HeapObject> boilerplate_object_;
-};
-
-
-class HObjectLiteral: public HMaterializedLiteral<1> {
- public:
- HObjectLiteral(HValue* context,
- Handle<FixedArray> constant_properties,
- bool fast_elements,
- int literal_index,
- int depth,
- bool has_function)
- : HMaterializedLiteral<1>(literal_index, depth),
- constant_properties_(constant_properties),
- fast_elements_(fast_elements),
- has_function_(has_function) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> constant_properties() const {
- return constant_properties_;
- }
- bool fast_elements() const { return fast_elements_; }
- bool has_function() const { return has_function_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
-
- private:
- Handle<FixedArray> constant_properties_;
- bool fast_elements_;
- bool has_function_;
-};
-
-
-class HRegExpLiteral: public HMaterializedLiteral<1> {
- public:
- HRegExpLiteral(HValue* context,
- Handle<FixedArray> literals,
- Handle<String> pattern,
- Handle<String> flags,
- int literal_index)
- : HMaterializedLiteral<1>(literal_index, 0),
- literals_(literals),
- pattern_(pattern),
- flags_(flags) {
- SetOperandAt(0, context);
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> literals() { return literals_; }
- Handle<String> pattern() { return pattern_; }
- Handle<String> flags() { return flags_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
-
- private:
- Handle<FixedArray> literals_;
- Handle<String> pattern_;
- Handle<String> flags_;
-};
-
-
-class HFunctionLiteral: public HTemplateInstruction<1> {
- public:
- HFunctionLiteral(HValue* context,
- Handle<SharedFunctionInfo> shared,
- bool pretenure)
- : shared_info_(shared), pretenure_(pretenure) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- HValue* context() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- bool pretenure() const { return pretenure_; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-
- Handle<SharedFunctionInfo> shared_info_;
- bool pretenure_;
-};
-
-
-class HTypeof: public HTemplateInstruction<2> {
- public:
- explicit HTypeof(HValue* context, HValue* value) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- set_representation(Representation::Tagged());
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof)
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HTrapAllocationMemento : public HTemplateInstruction<1> {
- public:
- explicit HTrapAllocationMemento(HValue* obj) {
- SetOperandAt(0, obj);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* object() { return OperandAt(0); }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento)
-};
-
-
-class HToFastProperties: public HUnaryOperation {
- public:
- explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
- // This instruction is not marked as having side effects, but
- // changes the map of the input operand. Use it only when creating
- // object literals.
- ASSERT(value->IsObjectLiteral() || value->IsFastLiteral());
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HValueOf: public HUnaryOperation {
- public:
- explicit HValueOf(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf)
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HDateField: public HUnaryOperation {
- public:
- HDateField(HValue* date, Smi* index)
- : HUnaryOperation(date), index_(index) {
- set_representation(Representation::Tagged());
- }
-
- Smi* index() const { return index_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField)
-
- private:
- Smi* index_;
-};
-
-
-class HSeqStringSetChar: public HTemplateInstruction<3> {
- public:
- HSeqStringSetChar(String::Encoding encoding,
- HValue* string,
- HValue* index,
- HValue* value) : encoding_(encoding) {
- SetOperandAt(0, string);
- SetOperandAt(1, index);
- SetOperandAt(2, value);
- }
-
- String::Encoding encoding() { return encoding_; }
- HValue* string() { return OperandAt(0); }
- HValue* index() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
-class HDeleteProperty: public HBinaryOperation {
- public:
- HDeleteProperty(HValue* context, HValue* obj, HValue* key)
- : HBinaryOperation(context, obj, key) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty)
-
- HValue* object() { return left(); }
- HValue* key() { return right(); }
-};
-
-
-class HIn: public HTemplateInstruction<3> {
- public:
- HIn(HValue* context, HValue* key, HValue* object) {
- SetOperandAt(0, context);
- SetOperandAt(1, key);
- SetOperandAt(2, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* object() { return OperandAt(2); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType() {
- return HType::Boolean();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(In)
-};
-
-
-class HCheckMapValue: public HTemplateInstruction<2> {
- public:
- HCheckMapValue(HValue* value,
- HValue* map) {
- SetOperandAt(0, value);
- SetOperandAt(1, map);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType() {
- return HType::Tagged();
- }
-
- HValue* value() { return OperandAt(0); }
- HValue* map() { return OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- return true;
- }
-};
-
-
-class HForInPrepareMap : public HTemplateInstruction<2> {
- public:
- HForInPrepareMap(HValue* context,
- HValue* object) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* enumerable() { return OperandAt(1); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType() {
- return HType::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap);
-};
-
-
-class HForInCacheArray : public HTemplateInstruction<2> {
- public:
- HForInCacheArray(HValue* enumerable,
- HValue* keys,
- int idx) : idx_(idx) {
- SetOperandAt(0, enumerable);
- SetOperandAt(1, keys);
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* enumerable() { return OperandAt(0); }
- HValue* map() { return OperandAt(1); }
- int idx() { return idx_; }
-
- HForInCacheArray* index_cache() {
- return index_cache_;
- }
-
- void set_index_cache(HForInCacheArray* index_cache) {
- index_cache_ = index_cache;
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType() {
- return HType::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray);
-
- private:
- int idx_;
- HForInCacheArray* index_cache_;
-};
-
-
-class HLoadFieldByIndex : public HTemplateInstruction<2> {
- public:
- HLoadFieldByIndex(HValue* object,
- HValue* index) {
- SetOperandAt(0, object);
- SetOperandAt(1, index);
- set_representation(Representation::Tagged());
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* index() { return OperandAt(1); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType() {
- return HType::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-#undef DECLARE_INSTRUCTION
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_HYDROGEN_INSTRUCTIONS_H_
diff --git a/src/3rdparty/v8/src/hydrogen.cc b/src/3rdparty/v8/src/hydrogen.cc
deleted file mode 100644
index 82ffbb2..0000000
--- a/src/3rdparty/v8/src/hydrogen.cc
+++ /dev/null
@@ -1,10851 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "hydrogen.h"
-
-#include "codegen.h"
-#include "full-codegen.h"
-#include "hashmap.h"
-#include "lithium-allocator.h"
-#include "parser.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-codegen-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-codegen-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-codegen-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-codegen-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-HBasicBlock::HBasicBlock(HGraph* graph)
- : block_id_(graph->GetNextBlockID()),
- graph_(graph),
- phis_(4, graph->zone()),
- first_(NULL),
- last_(NULL),
- end_(NULL),
- loop_information_(NULL),
- predecessors_(2, graph->zone()),
- dominator_(NULL),
- dominated_blocks_(4, graph->zone()),
- last_environment_(NULL),
- argument_count_(-1),
- first_instruction_index_(-1),
- last_instruction_index_(-1),
- deleted_phis_(4, graph->zone()),
- parent_loop_header_(NULL),
- is_inline_return_target_(false),
- is_deoptimizing_(false),
- dominates_loop_successors_(false),
- is_osr_entry_(false) { }
-
-
-void HBasicBlock::AttachLoopInformation() {
- ASSERT(!IsLoopHeader());
- loop_information_ = new(zone()) HLoopInformation(this, zone());
-}
-
-
-void HBasicBlock::DetachLoopInformation() {
- ASSERT(IsLoopHeader());
- loop_information_ = NULL;
-}
-
-
-void HBasicBlock::AddPhi(HPhi* phi) {
- ASSERT(!IsStartBlock());
- phis_.Add(phi, zone());
- phi->SetBlock(this);
-}
-
-
-void HBasicBlock::RemovePhi(HPhi* phi) {
- ASSERT(phi->block() == this);
- ASSERT(phis_.Contains(phi));
- ASSERT(phi->HasNoUses() || !phi->is_live());
- phi->Kill();
- phis_.RemoveElement(phi);
- phi->SetBlock(NULL);
-}
-
-
-void HBasicBlock::AddInstruction(HInstruction* instr) {
- ASSERT(!IsStartBlock() || !IsFinished());
- ASSERT(!instr->IsLinked());
- ASSERT(!IsFinished());
- if (first_ == NULL) {
- HBlockEntry* entry = new(zone()) HBlockEntry();
- entry->InitializeAsFirst(this);
- first_ = last_ = entry;
- }
- instr->InsertAfter(last_);
-}
-
-
-HDeoptimize* HBasicBlock::CreateDeoptimize(
- HDeoptimize::UseEnvironment has_uses) {
- ASSERT(HasEnvironment());
- if (has_uses == HDeoptimize::kNoUses)
- return new(zone()) HDeoptimize(0, zone());
-
- HEnvironment* environment = last_environment();
- HDeoptimize* instr = new(zone()) HDeoptimize(environment->length(), zone());
- for (int i = 0; i < environment->length(); i++) {
- HValue* val = environment->values()->at(i);
- instr->AddEnvironmentValue(val, zone());
- }
-
- return instr;
-}
-
-
-HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
- RemovableSimulate removable) {
- ASSERT(HasEnvironment());
- HEnvironment* environment = last_environment();
- ASSERT(ast_id.IsNone() ||
- ast_id == BailoutId::StubEntry() ||
- environment->closure()->shared()->VerifyBailoutId(ast_id));
-
- int push_count = environment->push_count();
- int pop_count = environment->pop_count();
-
- HSimulate* instr =
- new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
- // Order of pushed values: newest (top of stack) first. This allows
- // HSimulate::MergeInto() to easily append additional pushed values
- // that are older (from further down the stack).
- for (int i = 0; i < push_count; ++i) {
- instr->AddPushedValue(environment->ExpressionStackAt(i));
- }
- for (GrowableBitVector::Iterator it(environment->assigned_variables(),
- zone());
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- instr->AddAssignedValue(index, environment->Lookup(index));
- }
- environment->ClearHistory();
- return instr;
-}
-
-
-void HBasicBlock::Finish(HControlInstruction* end) {
- ASSERT(!IsFinished());
- AddInstruction(end);
- end_ = end;
- for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
- it.Current()->RegisterPredecessor(this);
- }
-}
-
-
-void HBasicBlock::Goto(HBasicBlock* block, FunctionState* state) {
- bool drop_extra = state != NULL &&
- state->inlining_kind() == DROP_EXTRA_ON_RETURN;
-
- if (block->IsInlineReturnTarget()) {
- AddInstruction(new(zone()) HLeaveInlined());
- last_environment_ = last_environment()->DiscardInlined(drop_extra);
- }
-
- AddSimulate(BailoutId::None());
- HGoto* instr = new(zone()) HGoto(block);
- Finish(instr);
-}
-
-
-void HBasicBlock::AddLeaveInlined(HValue* return_value,
- FunctionState* state) {
- HBasicBlock* target = state->function_return();
- bool drop_extra = state->inlining_kind() == DROP_EXTRA_ON_RETURN;
-
- ASSERT(target->IsInlineReturnTarget());
- ASSERT(return_value != NULL);
- AddInstruction(new(zone()) HLeaveInlined());
- last_environment_ = last_environment()->DiscardInlined(drop_extra);
- last_environment()->Push(return_value);
- AddSimulate(BailoutId::None());
- HGoto* instr = new(zone()) HGoto(target);
- Finish(instr);
-}
-
-
-void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
- ASSERT(!HasEnvironment());
- ASSERT(first() == NULL);
- UpdateEnvironment(env);
-}
-
-
-void HBasicBlock::SetJoinId(BailoutId ast_id) {
- int length = predecessors_.length();
- ASSERT(length > 0);
- for (int i = 0; i < length; i++) {
- HBasicBlock* predecessor = predecessors_[i];
- ASSERT(predecessor->end()->IsGoto());
- HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
- // We only need to verify the ID once.
- ASSERT(i != 0 ||
- (predecessor->last_environment()->closure().is_null() ||
- predecessor->last_environment()->closure()->shared()
- ->VerifyBailoutId(ast_id)));
- simulate->set_ast_id(ast_id);
- }
-}
-
-
-bool HBasicBlock::Dominates(HBasicBlock* other) const {
- HBasicBlock* current = other->dominator();
- while (current != NULL) {
- if (current == this) return true;
- current = current->dominator();
- }
- return false;
-}
-
-
-int HBasicBlock::LoopNestingDepth() const {
- const HBasicBlock* current = this;
- int result = (current->IsLoopHeader()) ? 1 : 0;
- while (current->parent_loop_header() != NULL) {
- current = current->parent_loop_header();
- result++;
- }
- return result;
-}
-
-
-void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
- ASSERT(IsLoopHeader());
-
- SetJoinId(stmt->EntryId());
- if (predecessors()->length() == 1) {
- // This is a degenerated loop.
- DetachLoopInformation();
- return;
- }
-
- // Only the first entry into the loop is from outside the loop. All other
- // entries must be back edges.
- for (int i = 1; i < predecessors()->length(); ++i) {
- loop_information()->RegisterBackEdge(predecessors()->at(i));
- }
-}
-
-
-void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
- if (HasPredecessor()) {
- // Only loop header blocks can have a predecessor added after
- // instructions have been added to the block (they have phis for all
- // values in the environment, these phis may be eliminated later).
- ASSERT(IsLoopHeader() || first_ == NULL);
- HEnvironment* incoming_env = pred->last_environment();
- if (IsLoopHeader()) {
- ASSERT(phis()->length() == incoming_env->length());
- for (int i = 0; i < phis_.length(); ++i) {
- phis_[i]->AddInput(incoming_env->values()->at(i));
- }
- } else {
- last_environment()->AddIncomingEdge(this, pred->last_environment());
- }
- } else if (!HasEnvironment() && !IsFinished()) {
- ASSERT(!IsLoopHeader());
- SetInitialEnvironment(pred->last_environment()->Copy());
- }
-
- predecessors_.Add(pred, zone());
-}
-
-
-void HBasicBlock::AddDominatedBlock(HBasicBlock* block) {
- ASSERT(!dominated_blocks_.Contains(block));
- // Keep the list of dominated blocks sorted such that if there is two
- // succeeding block in this list, the predecessor is before the successor.
- int index = 0;
- while (index < dominated_blocks_.length() &&
- dominated_blocks_[index]->block_id() < block->block_id()) {
- ++index;
- }
- dominated_blocks_.InsertAt(index, block, zone());
-}
-
-
-void HBasicBlock::AssignCommonDominator(HBasicBlock* other) {
- if (dominator_ == NULL) {
- dominator_ = other;
- other->AddDominatedBlock(this);
- } else if (other->dominator() != NULL) {
- HBasicBlock* first = dominator_;
- HBasicBlock* second = other;
-
- while (first != second) {
- if (first->block_id() > second->block_id()) {
- first = first->dominator();
- } else {
- second = second->dominator();
- }
- ASSERT(first != NULL && second != NULL);
- }
-
- if (dominator_ != first) {
- ASSERT(dominator_->dominated_blocks_.Contains(this));
- dominator_->dominated_blocks_.RemoveElement(this);
- dominator_ = first;
- first->AddDominatedBlock(this);
- }
- }
-}
-
-
-void HBasicBlock::AssignLoopSuccessorDominators() {
- // Mark blocks that dominate all subsequent reachable blocks inside their
- // loop. Exploit the fact that blocks are sorted in reverse post order. When
- // the loop is visited in increasing block id order, if the number of
- // non-loop-exiting successor edges at the dominator_candidate block doesn't
- // exceed the number of previously encountered predecessor edges, there is no
- // path from the loop header to any block with higher id that doesn't go
- // through the dominator_candidate block. In this case, the
- // dominator_candidate block is guaranteed to dominate all blocks reachable
- // from it with higher ids.
- HBasicBlock* last = loop_information()->GetLastBackEdge();
- int outstanding_successors = 1; // one edge from the pre-header
- // Header always dominates everything.
- MarkAsLoopSuccessorDominator();
- for (int j = block_id(); j <= last->block_id(); ++j) {
- HBasicBlock* dominator_candidate = graph_->blocks()->at(j);
- for (HPredecessorIterator it(dominator_candidate); !it.Done();
- it.Advance()) {
- HBasicBlock* predecessor = it.Current();
- // Don't count back edges.
- if (predecessor->block_id() < dominator_candidate->block_id()) {
- outstanding_successors--;
- }
- }
-
- // If more successors than predecessors have been seen in the loop up to
- // now, it's not possible to guarantee that the current block dominates
- // all of the blocks with higher IDs. In this case, assume conservatively
- // that those paths through loop that don't go through the current block
- // contain all of the loop's dependencies. Also be careful to record
- // dominator information about the current loop that's being processed,
- // and not nested loops, which will be processed when
- // AssignLoopSuccessorDominators gets called on their header.
- ASSERT(outstanding_successors >= 0);
- HBasicBlock* parent_loop_header = dominator_candidate->parent_loop_header();
- if (outstanding_successors == 0 &&
- (parent_loop_header == this && !dominator_candidate->IsLoopHeader())) {
- dominator_candidate->MarkAsLoopSuccessorDominator();
- }
- HControlInstruction* end = dominator_candidate->end();
- for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
- HBasicBlock* successor = it.Current();
- // Only count successors that remain inside the loop and don't loop back
- // to a loop header.
- if (successor->block_id() > dominator_candidate->block_id() &&
- successor->block_id() <= last->block_id()) {
- // Backwards edges must land on loop headers.
- ASSERT(successor->block_id() > dominator_candidate->block_id() ||
- successor->IsLoopHeader());
- outstanding_successors++;
- }
- }
- }
-}
-
-
-int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const {
- for (int i = 0; i < predecessors_.length(); ++i) {
- if (predecessors_[i] == predecessor) return i;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-#ifdef DEBUG
-void HBasicBlock::Verify() {
- // Check that every block is finished.
- ASSERT(IsFinished());
- ASSERT(block_id() >= 0);
-
- // Check that the incoming edges are in edge split form.
- if (predecessors_.length() > 1) {
- for (int i = 0; i < predecessors_.length(); ++i) {
- ASSERT(predecessors_[i]->end()->SecondSuccessor() == NULL);
- }
- }
-}
-#endif
-
-
-void HLoopInformation::RegisterBackEdge(HBasicBlock* block) {
- this->back_edges_.Add(block, block->zone());
- AddBlock(block);
-}
-
-
-HBasicBlock* HLoopInformation::GetLastBackEdge() const {
- int max_id = -1;
- HBasicBlock* result = NULL;
- for (int i = 0; i < back_edges_.length(); ++i) {
- HBasicBlock* cur = back_edges_[i];
- if (cur->block_id() > max_id) {
- max_id = cur->block_id();
- result = cur;
- }
- }
- return result;
-}
-
-
-void HLoopInformation::AddBlock(HBasicBlock* block) {
- if (block == loop_header()) return;
- if (block->parent_loop_header() == loop_header()) return;
- if (block->parent_loop_header() != NULL) {
- AddBlock(block->parent_loop_header());
- } else {
- block->set_parent_loop_header(loop_header());
- blocks_.Add(block, block->zone());
- for (int i = 0; i < block->predecessors()->length(); ++i) {
- AddBlock(block->predecessors()->at(i));
- }
- }
-}
-
-
-#ifdef DEBUG
-
-// Checks reachability of the blocks in this graph and stores a bit in
-// the BitVector "reachable()" for every block that can be reached
-// from the start block of the graph. If "dont_visit" is non-null, the given
-// block is treated as if it would not be part of the graph. "visited_count()"
-// returns the number of reachable blocks.
-class ReachabilityAnalyzer BASE_EMBEDDED {
- public:
- ReachabilityAnalyzer(HBasicBlock* entry_block,
- int block_count,
- HBasicBlock* dont_visit)
- : visited_count_(0),
- stack_(16, entry_block->zone()),
- reachable_(block_count, entry_block->zone()),
- dont_visit_(dont_visit) {
- PushBlock(entry_block);
- Analyze();
- }
-
- int visited_count() const { return visited_count_; }
- const BitVector* reachable() const { return &reachable_; }
-
- private:
- void PushBlock(HBasicBlock* block) {
- if (block != NULL && block != dont_visit_ &&
- !reachable_.Contains(block->block_id())) {
- reachable_.Add(block->block_id());
- stack_.Add(block, block->zone());
- visited_count_++;
- }
- }
-
- void Analyze() {
- while (!stack_.is_empty()) {
- HControlInstruction* end = stack_.RemoveLast()->end();
- for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
- PushBlock(it.Current());
- }
- }
- }
-
- int visited_count_;
- ZoneList<HBasicBlock*> stack_;
- BitVector reachable_;
- HBasicBlock* dont_visit_;
-};
-
-
-void HGraph::Verify(bool do_full_verify) const {
- // Allow dereferencing for debug mode verification.
- AllowHandleDereference allow_handle_deref(isolate());
- for (int i = 0; i < blocks_.length(); i++) {
- HBasicBlock* block = blocks_.at(i);
-
- block->Verify();
-
- // Check that every block contains at least one node and that only the last
- // node is a control instruction.
- HInstruction* current = block->first();
- ASSERT(current != NULL && current->IsBlockEntry());
- while (current != NULL) {
- ASSERT((current->next() == NULL) == current->IsControlInstruction());
- ASSERT(current->block() == block);
- current->Verify();
- current = current->next();
- }
-
- // Check that successors are correctly set.
- HBasicBlock* first = block->end()->FirstSuccessor();
- HBasicBlock* second = block->end()->SecondSuccessor();
- ASSERT(second == NULL || first != NULL);
-
- // Check that the predecessor array is correct.
- if (first != NULL) {
- ASSERT(first->predecessors()->Contains(block));
- if (second != NULL) {
- ASSERT(second->predecessors()->Contains(block));
- }
- }
-
- // Check that phis have correct arguments.
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- phi->Verify();
- }
-
- // Check that all join blocks have predecessors that end with an
- // unconditional goto and agree on their environment node id.
- if (block->predecessors()->length() >= 2) {
- BailoutId id =
- block->predecessors()->first()->last_environment()->ast_id();
- for (int k = 0; k < block->predecessors()->length(); k++) {
- HBasicBlock* predecessor = block->predecessors()->at(k);
- ASSERT(predecessor->end()->IsGoto());
- ASSERT(predecessor->last_environment()->ast_id() == id);
- }
- }
- }
-
- // Check special property of first block to have no predecessors.
- ASSERT(blocks_.at(0)->predecessors()->is_empty());
-
- if (do_full_verify) {
- // Check that the graph is fully connected.
- ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
- ASSERT(analyzer.visited_count() == blocks_.length());
-
- // Check that entry block dominator is NULL.
- ASSERT(entry_block_->dominator() == NULL);
-
- // Check dominators.
- for (int i = 0; i < blocks_.length(); ++i) {
- HBasicBlock* block = blocks_.at(i);
- if (block->dominator() == NULL) {
- // Only start block may have no dominator assigned to.
- ASSERT(i == 0);
- } else {
- // Assert that block is unreachable if dominator must not be visited.
- ReachabilityAnalyzer dominator_analyzer(entry_block_,
- blocks_.length(),
- block->dominator());
- ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
- }
- }
- }
-}
-
-#endif
-
-
-HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
- Handle<Object> value) {
- if (!pointer->is_set()) {
- HConstant* constant = new(zone()) HConstant(value,
- Representation::Tagged());
- constant->InsertAfter(GetConstantUndefined());
- pointer->set(constant);
- }
- return pointer->get();
-}
-
-
-HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer,
- int32_t value) {
- if (!pointer->is_set()) {
- HConstant* constant =
- new(zone()) HConstant(value, Representation::Integer32());
- constant->InsertAfter(GetConstantUndefined());
- pointer->set(constant);
- }
- return pointer->get();
-}
-
-
-HConstant* HGraph::GetConstant0() {
- return GetConstantInt32(&constant_0_, 0);
-}
-
-
-HConstant* HGraph::GetConstant1() {
- return GetConstantInt32(&constant_1_, 1);
-}
-
-
-HConstant* HGraph::GetConstantMinus1() {
- return GetConstantInt32(&constant_minus1_, -1);
-}
-
-
-HConstant* HGraph::GetConstantTrue() {
- return GetConstant(&constant_true_, isolate()->factory()->true_value());
-}
-
-
-HConstant* HGraph::GetConstantFalse() {
- return GetConstant(&constant_false_, isolate()->factory()->false_value());
-}
-
-
-HConstant* HGraph::GetConstantHole() {
- return GetConstant(&constant_hole_, isolate()->factory()->the_hole_value());
-}
-
-
-HGraphBuilder::CheckBuilder::CheckBuilder(HGraphBuilder* builder, BailoutId id)
- : builder_(builder),
- finished_(false),
- id_(id) {
- HEnvironment* env = builder->environment();
- failure_block_ = builder->CreateBasicBlock(env->Copy());
- merge_block_ = builder->CreateBasicBlock(env->Copy());
-}
-
-
-void HGraphBuilder::CheckBuilder::CheckNotUndefined(HValue* value) {
- HEnvironment* env = builder_->environment();
- HIsNilAndBranch* compare =
- new(zone()) HIsNilAndBranch(value, kStrictEquality, kUndefinedValue);
- HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy());
- HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy());
- compare->SetSuccessorAt(0, failure_block);
- compare->SetSuccessorAt(1, success_block);
- failure_block->Goto(failure_block_);
- builder_->current_block()->Finish(compare);
- builder_->set_current_block(success_block);
-}
-
-
-void HGraphBuilder::CheckBuilder::CheckIntegerEq(HValue* left, HValue* right) {
- HEnvironment* env = builder_->environment();
- HCompareIDAndBranch* compare =
- new(zone()) HCompareIDAndBranch(left, right, Token::EQ);
- compare->AssumeRepresentation(Representation::Integer32());
- HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy());
- HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy());
- compare->SetSuccessorAt(0, success_block);
- compare->SetSuccessorAt(1, failure_block);
- failure_block->Goto(failure_block_);
- builder_->current_block()->Finish(compare);
- builder_->set_current_block(success_block);
-}
-
-
-void HGraphBuilder::CheckBuilder::End() {
- ASSERT(!finished_);
- builder_->current_block()->Goto(merge_block_);
- failure_block_->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
- failure_block_->SetJoinId(id_);
- builder_->set_current_block(merge_block_);
- merge_block_->SetJoinId(id_);
- finished_ = true;
-}
-
-
-HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, BailoutId id)
- : builder_(builder),
- finished_(false),
- id_(id) {
- HEnvironment* env = builder->environment();
- first_true_block_ = builder->CreateBasicBlock(env->Copy());
- last_true_block_ = NULL;
- first_false_block_ = builder->CreateBasicBlock(env->Copy());
-}
-
-
-HInstruction* HGraphBuilder::IfBuilder::BeginTrue(
- HValue* left,
- HValue* right,
- Token::Value token,
- Representation input_representation) {
- HCompareIDAndBranch* compare =
- new(zone()) HCompareIDAndBranch(left, right, token);
- compare->set_observed_input_representation(input_representation,
- input_representation);
- compare->ChangeRepresentation(input_representation);
- compare->SetSuccessorAt(0, first_true_block_);
- compare->SetSuccessorAt(1, first_false_block_);
- builder_->current_block()->Finish(compare);
- builder_->set_current_block(first_true_block_);
- return compare;
-}
-
-
-void HGraphBuilder::IfBuilder::BeginFalse() {
- last_true_block_ = builder_->current_block();
- ASSERT(!last_true_block_->IsFinished());
- builder_->set_current_block(first_false_block_);
-}
-
-
-void HGraphBuilder::IfBuilder::End() {
- ASSERT(!finished_);
- ASSERT(!last_true_block_->IsFinished());
- HBasicBlock* last_false_block = builder_->current_block();
- ASSERT(!last_false_block->IsFinished());
- HEnvironment* merge_env =
- last_true_block_->last_environment()->Copy();
- merge_block_ = builder_->CreateBasicBlock(merge_env);
- last_true_block_->Goto(merge_block_);
- last_false_block->Goto(merge_block_);
- merge_block_->SetJoinId(id_);
- builder_->set_current_block(merge_block_);
- finished_ = true;
-}
-
-
-HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
- HValue* context,
- LoopBuilder::Direction direction,
- BailoutId id)
- : builder_(builder),
- context_(context),
- direction_(direction),
- id_(id),
- finished_(false) {
- header_block_ = builder->CreateLoopHeaderBlock();
- body_block_ = NULL;
- exit_block_ = NULL;
-}
-
-
-HValue* HGraphBuilder::LoopBuilder::BeginBody(
- HValue* initial,
- HValue* terminating,
- Token::Value token,
- Representation input_representation) {
- HEnvironment* env = builder_->environment();
- phi_ = new(zone()) HPhi(env->values()->length(), zone());
- header_block_->AddPhi(phi_);
- phi_->AddInput(initial);
- phi_->ChangeRepresentation(Representation::Integer32());
- env->Push(initial);
- builder_->current_block()->Goto(header_block_);
-
- HEnvironment* body_env = env->Copy();
- HEnvironment* exit_env = env->Copy();
- body_block_ = builder_->CreateBasicBlock(body_env);
- exit_block_ = builder_->CreateBasicBlock(exit_env);
- // Remove the phi from the expression stack
- body_env->Pop();
-
- builder_->set_current_block(header_block_);
- HCompareIDAndBranch* compare =
- new(zone()) HCompareIDAndBranch(phi_, terminating, token);
- compare->set_observed_input_representation(input_representation,
- input_representation);
- compare->ChangeRepresentation(input_representation);
- compare->SetSuccessorAt(0, body_block_);
- compare->SetSuccessorAt(1, exit_block_);
- builder_->current_block()->Finish(compare);
-
- builder_->set_current_block(body_block_);
- if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
- HValue* one = builder_->graph()->GetConstant1();
- if (direction_ == kPreIncrement) {
- increment_ = HAdd::New(zone(), context_, phi_, one);
- } else {
- increment_ = HSub::New(zone(), context_, phi_, one);
- }
- increment_->ClearFlag(HValue::kCanOverflow);
- increment_->ChangeRepresentation(Representation::Integer32());
- builder_->AddInstruction(increment_);
- return increment_;
- } else {
- return phi_;
- }
-}
-
-
-void HGraphBuilder::LoopBuilder::EndBody() {
- ASSERT(!finished_);
-
- if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
- HValue* one = builder_->graph()->GetConstant1();
- if (direction_ == kPostIncrement) {
- increment_ = HAdd::New(zone(), context_, phi_, one);
- } else {
- increment_ = HSub::New(zone(), context_, phi_, one);
- }
- increment_->ClearFlag(HValue::kCanOverflow);
- increment_->ChangeRepresentation(Representation::Integer32());
- builder_->AddInstruction(increment_);
- }
-
- // Push the new increment value on the expression stack to merge into the phi.
- builder_->environment()->Push(increment_);
- builder_->current_block()->Goto(header_block_);
- header_block_->loop_information()->RegisterBackEdge(body_block_);
- header_block_->SetJoinId(id_);
-
- builder_->set_current_block(exit_block_);
- // Pop the phi from the expression stack
- builder_->environment()->Pop();
- finished_ = true;
-}
-
-
-HGraph* HGraphBuilder::CreateGraph() {
- graph_ = new(zone()) HGraph(info_);
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info_);
- HPhase phase("H_Block building");
- set_current_block(graph()->entry_block());
- if (!BuildGraph()) return NULL;
- return graph_;
-}
-
-
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddInstruction(instr);
- return instr;
-}
-
-
-void HGraphBuilder::AddSimulate(BailoutId id,
- RemovableSimulate removable) {
- ASSERT(current_block() != NULL);
- current_block()->AddSimulate(id, removable);
-}
-
-
-HBoundsCheck* HGraphBuilder::AddBoundsCheck(HValue* index,
- HValue* length,
- BoundsCheckKeyMode key_mode,
- Representation r) {
- if (!index->type().IsSmi()) {
- index = new(graph()->zone()) HCheckSmiOrInt32(index);
- AddInstruction(HCheckSmiOrInt32::cast(index));
- }
- if (!length->type().IsSmi()) {
- length = new(graph()->zone()) HCheckSmiOrInt32(length);
- AddInstruction(HCheckSmiOrInt32::cast(length));
- }
- HBoundsCheck* result = new(graph()->zone()) HBoundsCheck(
- index, length, key_mode, r);
- AddInstruction(result);
- return result;
-}
-
-
-HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
- HBasicBlock* b = graph()->CreateBasicBlock();
- b->SetInitialEnvironment(env);
- return b;
-}
-
-
-HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
- HBasicBlock* header = graph()->CreateBasicBlock();
- HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
- header->SetInitialEnvironment(entry_env);
- header->AttachLoopInformation();
- return header;
-}
-
-
-HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store) {
- Zone* zone = this->zone();
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- val = AddInstruction(new(zone) HClampToUint8(val));
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- return new(zone) HStoreKeyed(external_elements, checked_key,
- val, elements_kind);
- } else {
- ASSERT(val == NULL);
- HLoadKeyed* load =
- new(zone) HLoadKeyed(
- external_elements, checked_key, dependency, elements_kind);
- if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- graph()->RecordUint32Instruction(load);
- }
- return load;
- }
-}
-
-
-HInstruction* HGraphBuilder::BuildFastElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* load_dependency,
- ElementsKind elements_kind,
- bool is_store) {
- Zone* zone = this->zone();
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone) HCheckSmi(val));
- // Fall through.
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return new(zone) HStoreKeyed(elements, checked_key, val, elements_kind);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
- // It's an element load (!is_store).
- return new(zone) HLoadKeyed(elements,
- checked_key,
- load_dependency,
- elements_kind);
-}
-
-
-HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
- HValue* object,
- HValue* key,
- HValue* val,
- HCheckMaps* mapcheck,
- bool is_js_array,
- ElementsKind elements_kind,
- bool is_store,
- Representation checked_index_representation) {
- Zone* zone = this->zone();
- // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
- // on a HElementsTransition instruction. The flag can also be removed if the
- // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
- // ElementsKind transitions. Finally, the dependency can be removed for stores
- // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
- // generated store code.
- if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
- (elements_kind == FAST_ELEMENTS && is_store)) {
- if (mapcheck != NULL) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
- }
- bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
- bool fast_elements = IsFastObjectElementsKind(elements_kind);
- HInstruction* elements =
- AddInstruction(new(zone) HLoadElements(object, mapcheck));
- if (is_store && (fast_elements || fast_smi_only_elements)) {
- HCheckMaps* check_cow_map = new(zone) HCheckMaps(
- elements, graph()->isolate()->factory()->fixed_array_map(), zone);
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
- AddInstruction(check_cow_map);
- }
- HInstruction* length = NULL;
- HInstruction* checked_key = NULL;
- if (IsExternalArrayElementsKind(elements_kind)) {
- length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
- checked_key = AddBoundsCheck(
- key, length, ALLOW_SMI_KEY, checked_index_representation);
- HLoadExternalArrayPointer* external_elements =
- new(zone) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- return BuildExternalArrayElementAccess(
- external_elements, checked_key, val, mapcheck,
- elements_kind, is_store);
- }
- ASSERT(fast_smi_only_elements ||
- fast_elements ||
- IsFastDoubleElementsKind(elements_kind));
- if (is_js_array) {
- length = AddInstruction(new(zone) HJSArrayLength(object, mapcheck,
- HType::Smi()));
- } else {
- length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
- }
- checked_key = AddBoundsCheck(
- key, length, ALLOW_SMI_KEY, checked_index_representation);
- return BuildFastElementAccess(elements, checked_key, val, mapcheck,
- elements_kind, is_store);
-}
-
-
-HValue* HGraphBuilder::BuildAllocateElements(HContext* context,
- ElementsKind kind,
- HValue* capacity) {
- Zone* zone = this->zone();
-
- int elements_size = IsFastDoubleElementsKind(kind)
- ? kDoubleSize : kPointerSize;
- HConstant* elements_size_value =
- new(zone) HConstant(elements_size, Representation::Integer32());
- AddInstruction(elements_size_value);
- HValue* mul = AddInstruction(
- HMul::New(zone, context, capacity, elements_size_value));
- mul->ChangeRepresentation(Representation::Integer32());
- mul->ClearFlag(HValue::kCanOverflow);
-
- HConstant* header_size =
- new(zone) HConstant(FixedArray::kHeaderSize, Representation::Integer32());
- AddInstruction(header_size);
- HValue* total_size = AddInstruction(
- HAdd::New(zone, context, mul, header_size));
- total_size->ChangeRepresentation(Representation::Integer32());
- total_size->ClearFlag(HValue::kCanOverflow);
-
- HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
- if (IsFastDoubleElementsKind(kind)) {
- flags = static_cast<HAllocate::Flags>(
- flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED);
- }
-
- HValue* elements =
- AddInstruction(new(zone) HAllocate(context, total_size,
- HType::JSArray(), flags));
- Isolate* isolate = graph()->isolate();
-
- Factory* factory = isolate->factory();
- Handle<Map> map = IsFastDoubleElementsKind(kind)
- ? factory->fixed_double_array_map()
- : factory->fixed_array_map();
- BuildStoreMap(elements, map, BailoutId::StubEntry());
-
- Handle<String> fixed_array_length_field_name =
- isolate->factory()->length_field_string();
- HInstruction* store_length =
- new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
- capacity, true, FixedArray::kLengthOffset);
- AddInstruction(store_length);
- AddSimulate(BailoutId::StubEntry(), FIXED_SIMULATE);
-
- return elements;
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
- HValue* map,
- BailoutId id) {
- Zone* zone = this->zone();
- Isolate* isolate = graph()->isolate();
- Factory* factory = isolate->factory();
- Handle<String> map_field_name = factory->map_field_string();
- HInstruction* store_map =
- new(zone) HStoreNamedField(object, map_field_name, map,
- true, JSObject::kMapOffset);
- store_map->SetGVNFlag(kChangesMaps);
- AddInstruction(store_map);
- AddSimulate(id, FIXED_SIMULATE);
- return store_map;
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
- Handle<Map> map,
- BailoutId id) {
- Zone* zone = this->zone();
- HValue* map_constant =
- AddInstruction(new(zone) HConstant(map, Representation::Tagged()));
- return BuildStoreMap(object, map_constant, id);
-}
-
-
-void HGraphBuilder::BuildCopyElements(HContext* context,
- HValue* from_elements,
- ElementsKind from_elements_kind,
- HValue* to_elements,
- ElementsKind to_elements_kind,
- HValue* length) {
- LoopBuilder builder(this, context, LoopBuilder::kPostIncrement,
- BailoutId::StubEntry());
-
- HValue* key = builder.BeginBody(graph()->GetConstant0(),
- length, Token::LT);
-
- HValue* element =
- AddInstruction(new(zone()) HLoadKeyed(from_elements, key, NULL,
- from_elements_kind,
- ALLOW_RETURN_HOLE));
-
- AddInstruction(new(zone()) HStoreKeyed(to_elements, key, element,
- to_elements_kind));
- AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
-
- builder.EndBody();
-}
-
-
-HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
- TypeFeedbackOracle* oracle)
- : HGraphBuilder(info),
- function_state_(NULL),
- initial_function_state_(this, info, oracle, NORMAL_RETURN),
- ast_context_(NULL),
- break_scope_(NULL),
- inlined_count_(0),
- globals_(10, info->zone()),
- inline_bailout_(false) {
- // This is not initialized in the initializer list because the
- // constructor for the initial state relies on function_state_ == NULL
- // to know it's the initial state.
- function_state_= &initial_function_state_;
- InitializeAstVisitor();
-}
-
-
-HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- BailoutId join_id) {
- if (first == NULL) {
- return second;
- } else if (second == NULL) {
- return first;
- } else {
- HBasicBlock* join_block = graph()->CreateBasicBlock();
- first->Goto(join_block);
- second->Goto(join_block);
- join_block->SetJoinId(join_id);
- return join_block;
- }
-}
-
-
-HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
- HBasicBlock* exit_block,
- HBasicBlock* continue_block) {
- if (continue_block != NULL) {
- if (exit_block != NULL) exit_block->Goto(continue_block);
- continue_block->SetJoinId(statement->ContinueId());
- return continue_block;
- }
- return exit_block;
-}
-
-
-HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block) {
- if (body_exit != NULL) body_exit->Goto(loop_entry);
- loop_entry->PostProcessLoopHeader(statement);
- if (break_block != NULL) {
- if (loop_successor != NULL) loop_successor->Goto(break_block);
- break_block->SetJoinId(statement->ExitId());
- return break_block;
- }
- return loop_successor;
-}
-
-
-void HBasicBlock::FinishExit(HControlInstruction* instruction) {
- Finish(instruction);
- ClearEnvironment();
-}
-
-
-HGraph::HGraph(CompilationInfo* info)
- : isolate_(info->isolate()),
- next_block_id_(0),
- entry_block_(NULL),
- blocks_(8, info->zone()),
- values_(16, info->zone()),
- phi_list_(NULL),
- uint32_instructions_(NULL),
- info_(info),
- zone_(info->zone()),
- is_recursive_(false),
- use_optimistic_licm_(false),
- has_soft_deoptimize_(false),
- type_change_checksum_(0) {
- if (info->IsStub()) {
- HydrogenCodeStub* stub = info->code_stub();
- int param_count =
- stub->GetInterfaceDescriptor(isolate_)->register_param_count_;
- start_environment_ =
- new(zone_) HEnvironment(zone_, param_count);
- } else {
- start_environment_ =
- new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
- }
- start_environment_->set_ast_id(BailoutId::FunctionEntry());
- entry_block_ = CreateBasicBlock();
- entry_block_->SetInitialEnvironment(start_environment_);
-}
-
-
-HBasicBlock* HGraph::CreateBasicBlock() {
- HBasicBlock* result = new(zone()) HBasicBlock(this);
- blocks_.Add(result, zone());
- return result;
-}
-
-
-void HGraph::Canonicalize() {
- if (!FLAG_use_canonicalizing) return;
- HPhase phase("H_Canonicalize", this);
- for (int i = 0; i < blocks()->length(); ++i) {
- HInstruction* instr = blocks()->at(i)->first();
- while (instr != NULL) {
- HValue* value = instr->Canonicalize();
- if (value != instr) instr->DeleteAndReplaceWith(value);
- instr = instr->next();
- }
- }
-}
-
-// Block ordering was implemented with two mutually recursive methods,
-// HGraph::Postorder and HGraph::PostorderLoopBlocks.
-// The recursion could lead to stack overflow so the algorithm has been
-// implemented iteratively.
-// At a high level the algorithm looks like this:
-//
-// Postorder(block, loop_header) : {
-// if (block has already been visited or is of another loop) return;
-// mark block as visited;
-// if (block is a loop header) {
-// VisitLoopMembers(block, loop_header);
-// VisitSuccessorsOfLoopHeader(block);
-// } else {
-// VisitSuccessors(block)
-// }
-// put block in result list;
-// }
-//
-// VisitLoopMembers(block, outer_loop_header) {
-// foreach (block b in block loop members) {
-// VisitSuccessorsOfLoopMember(b, outer_loop_header);
-// if (b is loop header) VisitLoopMembers(b);
-// }
-// }
-//
-// VisitSuccessorsOfLoopMember(block, outer_loop_header) {
-// foreach (block b in block successors) Postorder(b, outer_loop_header)
-// }
-//
-// VisitSuccessorsOfLoopHeader(block) {
-// foreach (block b in block successors) Postorder(b, block)
-// }
-//
-// VisitSuccessors(block, loop_header) {
-// foreach (block b in block successors) Postorder(b, loop_header)
-// }
-//
-// The ordering is started calling Postorder(entry, NULL).
-//
-// Each instance of PostorderProcessor represents the "stack frame" of the
-// recursion, and particularly keeps the state of the loop (iteration) of the
-// "Visit..." function it represents.
-// To recycle memory we keep all the frames in a double linked list but
-// this means that we cannot use constructors to initialize the frames.
-//
-class PostorderProcessor : public ZoneObject {
- public:
- // Back link (towards the stack bottom).
- PostorderProcessor* parent() {return father_; }
- // Forward link (towards the stack top).
- PostorderProcessor* child() {return child_; }
- HBasicBlock* block() { return block_; }
- HLoopInformation* loop() { return loop_; }
- HBasicBlock* loop_header() { return loop_header_; }
-
- static PostorderProcessor* CreateEntryProcessor(Zone* zone,
- HBasicBlock* block,
- BitVector* visited) {
- PostorderProcessor* result = new(zone) PostorderProcessor(NULL);
- return result->SetupSuccessors(zone, block, NULL, visited);
- }
-
- PostorderProcessor* PerformStep(Zone* zone,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order) {
- PostorderProcessor* next =
- PerformNonBacktrackingStep(zone, visited, order);
- if (next != NULL) {
- return next;
- } else {
- return Backtrack(zone, visited, order);
- }
- }
-
- private:
- explicit PostorderProcessor(PostorderProcessor* father)
- : father_(father), child_(NULL), successor_iterator(NULL) { }
-
- // Each enum value states the cycle whose state is kept by this instance.
- enum LoopKind {
- NONE,
- SUCCESSORS,
- SUCCESSORS_OF_LOOP_HEADER,
- LOOP_MEMBERS,
- SUCCESSORS_OF_LOOP_MEMBER
- };
-
- // Each "Setup..." method is like a constructor for a cycle state.
- PostorderProcessor* SetupSuccessors(Zone* zone,
- HBasicBlock* block,
- HBasicBlock* loop_header,
- BitVector* visited) {
- if (block == NULL || visited->Contains(block->block_id()) ||
- block->parent_loop_header() != loop_header) {
- kind_ = NONE;
- block_ = NULL;
- loop_ = NULL;
- loop_header_ = NULL;
- return this;
- } else {
- block_ = block;
- loop_ = NULL;
- visited->Add(block->block_id());
-
- if (block->IsLoopHeader()) {
- kind_ = SUCCESSORS_OF_LOOP_HEADER;
- loop_header_ = block;
- InitializeSuccessors();
- PostorderProcessor* result = Push(zone);
- return result->SetupLoopMembers(zone, block, block->loop_information(),
- loop_header);
- } else {
- ASSERT(block->IsFinished());
- kind_ = SUCCESSORS;
- loop_header_ = loop_header;
- InitializeSuccessors();
- return this;
- }
- }
- }
-
- PostorderProcessor* SetupLoopMembers(Zone* zone,
- HBasicBlock* block,
- HLoopInformation* loop,
- HBasicBlock* loop_header) {
- kind_ = LOOP_MEMBERS;
- block_ = block;
- loop_ = loop;
- loop_header_ = loop_header;
- InitializeLoopMembers();
- return this;
- }
-
- PostorderProcessor* SetupSuccessorsOfLoopMember(
- HBasicBlock* block,
- HLoopInformation* loop,
- HBasicBlock* loop_header) {
- kind_ = SUCCESSORS_OF_LOOP_MEMBER;
- block_ = block;
- loop_ = loop;
- loop_header_ = loop_header;
- InitializeSuccessors();
- return this;
- }
-
- // This method "allocates" a new stack frame.
- PostorderProcessor* Push(Zone* zone) {
- if (child_ == NULL) {
- child_ = new(zone) PostorderProcessor(this);
- }
- return child_;
- }
-
- void ClosePostorder(ZoneList<HBasicBlock*>* order, Zone* zone) {
- ASSERT(block_->end()->FirstSuccessor() == NULL ||
- order->Contains(block_->end()->FirstSuccessor()) ||
- block_->end()->FirstSuccessor()->IsLoopHeader());
- ASSERT(block_->end()->SecondSuccessor() == NULL ||
- order->Contains(block_->end()->SecondSuccessor()) ||
- block_->end()->SecondSuccessor()->IsLoopHeader());
- order->Add(block_, zone);
- }
-
- // This method is the basic block to walk up the stack.
- PostorderProcessor* Pop(Zone* zone,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order) {
- switch (kind_) {
- case SUCCESSORS:
- case SUCCESSORS_OF_LOOP_HEADER:
- ClosePostorder(order, zone);
- return father_;
- case LOOP_MEMBERS:
- return father_;
- case SUCCESSORS_OF_LOOP_MEMBER:
- if (block()->IsLoopHeader() && block() != loop_->loop_header()) {
- // In this case we need to perform a LOOP_MEMBERS cycle so we
- // initialize it and return this instead of father.
- return SetupLoopMembers(zone, block(),
- block()->loop_information(), loop_header_);
- } else {
- return father_;
- }
- case NONE:
- return father_;
- }
- UNREACHABLE();
- return NULL;
- }
-
- // Walks up the stack.
- PostorderProcessor* Backtrack(Zone* zone,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order) {
- PostorderProcessor* parent = Pop(zone, visited, order);
- while (parent != NULL) {
- PostorderProcessor* next =
- parent->PerformNonBacktrackingStep(zone, visited, order);
- if (next != NULL) {
- return next;
- } else {
- parent = parent->Pop(zone, visited, order);
- }
- }
- return NULL;
- }
-
- PostorderProcessor* PerformNonBacktrackingStep(
- Zone* zone,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order) {
- HBasicBlock* next_block;
- switch (kind_) {
- case SUCCESSORS:
- next_block = AdvanceSuccessors();
- if (next_block != NULL) {
- PostorderProcessor* result = Push(zone);
- return result->SetupSuccessors(zone, next_block,
- loop_header_, visited);
- }
- break;
- case SUCCESSORS_OF_LOOP_HEADER:
- next_block = AdvanceSuccessors();
- if (next_block != NULL) {
- PostorderProcessor* result = Push(zone);
- return result->SetupSuccessors(zone, next_block,
- block(), visited);
- }
- break;
- case LOOP_MEMBERS:
- next_block = AdvanceLoopMembers();
- if (next_block != NULL) {
- PostorderProcessor* result = Push(zone);
- return result->SetupSuccessorsOfLoopMember(next_block,
- loop_, loop_header_);
- }
- break;
- case SUCCESSORS_OF_LOOP_MEMBER:
- next_block = AdvanceSuccessors();
- if (next_block != NULL) {
- PostorderProcessor* result = Push(zone);
- return result->SetupSuccessors(zone, next_block,
- loop_header_, visited);
- }
- break;
- case NONE:
- return NULL;
- }
- return NULL;
- }
-
- // The following two methods implement a "foreach b in successors" cycle.
- void InitializeSuccessors() {
- loop_index = 0;
- loop_length = 0;
- successor_iterator = HSuccessorIterator(block_->end());
- }
-
- HBasicBlock* AdvanceSuccessors() {
- if (!successor_iterator.Done()) {
- HBasicBlock* result = successor_iterator.Current();
- successor_iterator.Advance();
- return result;
- }
- return NULL;
- }
-
- // The following two methods implement a "foreach b in loop members" cycle.
- void InitializeLoopMembers() {
- loop_index = 0;
- loop_length = loop_->blocks()->length();
- }
-
- HBasicBlock* AdvanceLoopMembers() {
- if (loop_index < loop_length) {
- HBasicBlock* result = loop_->blocks()->at(loop_index);
- loop_index++;
- return result;
- } else {
- return NULL;
- }
- }
-
- LoopKind kind_;
- PostorderProcessor* father_;
- PostorderProcessor* child_;
- HLoopInformation* loop_;
- HBasicBlock* block_;
- HBasicBlock* loop_header_;
- int loop_index;
- int loop_length;
- HSuccessorIterator successor_iterator;
-};
-
-
-void HGraph::OrderBlocks() {
- HPhase phase("H_Block ordering");
- BitVector visited(blocks_.length(), zone());
-
- ZoneList<HBasicBlock*> reverse_result(8, zone());
- HBasicBlock* start = blocks_[0];
- PostorderProcessor* postorder =
- PostorderProcessor::CreateEntryProcessor(zone(), start, &visited);
- while (postorder != NULL) {
- postorder = postorder->PerformStep(zone(), &visited, &reverse_result);
- }
- blocks_.Rewind(0);
- int index = 0;
- for (int i = reverse_result.length() - 1; i >= 0; --i) {
- HBasicBlock* b = reverse_result[i];
- blocks_.Add(b, zone());
- b->set_block_id(index++);
- }
-}
-
-
-void HGraph::AssignDominators() {
- HPhase phase("H_Assign dominators", this);
- for (int i = 0; i < blocks_.length(); ++i) {
- HBasicBlock* block = blocks_[i];
- if (block->IsLoopHeader()) {
- // Only the first predecessor of a loop header is from outside the loop.
- // All others are back edges, and thus cannot dominate the loop header.
- block->AssignCommonDominator(block->predecessors()->first());
- block->AssignLoopSuccessorDominators();
- } else {
- for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) {
- blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
- }
- }
- }
-}
-
-
-// Mark all blocks that are dominated by an unconditional soft deoptimize to
-// prevent code motion across those blocks.
-void HGraph::PropagateDeoptimizingMark() {
- HPhase phase("H_Propagate deoptimizing mark", this);
- // Skip this phase if there is nothing to be done anyway.
- if (!has_soft_deoptimize()) return;
- MarkAsDeoptimizingRecursively(entry_block());
- NullifyUnreachableInstructions();
-}
-
-
-void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- HBasicBlock* dominated = block->dominated_blocks()->at(i);
- if (block->IsDeoptimizing()) dominated->MarkAsDeoptimizing();
- MarkAsDeoptimizingRecursively(dominated);
- }
-}
-
-
-void HGraph::NullifyUnreachableInstructions() {
- if (!FLAG_unreachable_code_elimination) return;
- int block_count = blocks_.length();
- for (int i = 0; i < block_count; ++i) {
- HBasicBlock* block = blocks_.at(i);
- bool nullify = false;
- const ZoneList<HBasicBlock*>* predecessors = block->predecessors();
- int predecessors_length = predecessors->length();
- bool all_predecessors_deoptimizing = (predecessors_length > 0);
- for (int j = 0; j < predecessors_length; ++j) {
- if (!predecessors->at(j)->IsDeoptimizing()) {
- all_predecessors_deoptimizing = false;
- break;
- }
- }
- if (all_predecessors_deoptimizing) nullify = true;
- for (HInstruction* instr = block->first(); instr != NULL;
- instr = instr->next()) {
- // Leave the basic structure of the graph intact.
- if (instr->IsBlockEntry()) continue;
- if (instr->IsControlInstruction()) continue;
- if (instr->IsSimulate()) continue;
- if (instr->IsEnterInlined()) continue;
- if (instr->IsLeaveInlined()) continue;
- if (nullify) {
- HInstruction* last_dummy = NULL;
- for (int j = 0; j < instr->OperandCount(); ++j) {
- HValue* operand = instr->OperandAt(j);
- // Insert an HDummyUse for each operand, unless the operand
- // is an HDummyUse itself. If it's even from the same block,
- // remember it as a potential replacement for the instruction.
- if (operand->IsDummyUse()) {
- if (operand->block() == instr->block() &&
- last_dummy == NULL) {
- last_dummy = HInstruction::cast(operand);
- }
- continue;
- }
- if (operand->IsControlInstruction()) {
- // Inserting a dummy use for a value that's not defined anywhere
- // will fail. Some instructions define fake inputs on such
- // values as control flow dependencies.
- continue;
- }
- HDummyUse* dummy = new(zone()) HDummyUse(operand);
- dummy->InsertBefore(instr);
- last_dummy = dummy;
- }
- if (last_dummy == NULL) last_dummy = GetConstant1();
- instr->DeleteAndReplaceWith(last_dummy);
- continue;
- }
- if (instr->IsSoftDeoptimize()) {
- ASSERT(block->IsDeoptimizing());
- nullify = true;
- }
- }
- }
-}
-
-
-// Replace all phis consisting of a single non-loop operand plus any number of
-// loop operands by that single non-loop operand.
-void HGraph::EliminateRedundantPhis() {
- HPhase phase("H_Redundant phi elimination", this);
-
- // We do a simple fixed point iteration without any work list, because
- // machine-generated JavaScript can lead to a very dense Hydrogen graph with
- // an enormous work list and will consequently result in OOM. Experiments
- // showed that this simple algorithm is good enough, and even e.g. tracking
- // the set or range of blocks to consider is not a real improvement.
- bool need_another_iteration;
- ZoneList<HPhi*> redundant_phis(blocks_.length(), zone());
- do {
- need_another_iteration = false;
- for (int i = 0; i < blocks_.length(); ++i) {
- HBasicBlock* block = blocks_[i];
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- HValue* replacement = phi->GetRedundantReplacement();
- if (replacement != NULL) {
- // Remember phi to avoid concurrent modification of the block's phis.
- redundant_phis.Add(phi, zone());
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* value = it.value();
- value->SetOperandAt(it.index(), replacement);
- need_another_iteration |= value->IsPhi();
- }
- }
- }
- for (int i = 0; i < redundant_phis.length(); i++) {
- block->RemovePhi(redundant_phis[i]);
- }
- redundant_phis.Clear();
- }
- } while (need_another_iteration);
-
-#if DEBUG
- // Make sure that we *really* removed all redundant phis.
- for (int i = 0; i < blocks_.length(); ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
- ASSERT(blocks_[i]->phis()->at(j)->GetRedundantReplacement() == NULL);
- }
- }
-#endif
-}
-
-
-void HGraph::EliminateUnreachablePhis() {
- HPhase phase("H_Unreachable phi elimination", this);
-
- // Initialize worklist.
- ZoneList<HPhi*> phi_list(blocks_.length(), zone());
- ZoneList<HPhi*> worklist(blocks_.length(), zone());
- for (int i = 0; i < blocks_.length(); ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list.Add(phi, zone());
- // We can't eliminate phis in the receiver position in the environment
- // because in case of throwing an error we need this value to
- // construct a stack trace.
- if (phi->HasRealUses() || phi->IsReceiver()) {
- phi->set_is_live(true);
- worklist.Add(phi, zone());
- }
- }
- }
-
- // Iteratively mark live phis.
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- for (int i = 0; i < phi->OperandCount(); i++) {
- HValue* operand = phi->OperandAt(i);
- if (operand->IsPhi() && !HPhi::cast(operand)->is_live()) {
- HPhi::cast(operand)->set_is_live(true);
- worklist.Add(HPhi::cast(operand), zone());
- }
- }
- }
-
- // Remove unreachable phis.
- for (int i = 0; i < phi_list.length(); i++) {
- HPhi* phi = phi_list[i];
- if (!phi->is_live()) {
- HBasicBlock* block = phi->block();
- block->RemovePhi(phi);
- block->RecordDeletedPhi(phi->merged_index());
- }
- }
-}
-
-
-bool HGraph::CheckArgumentsPhiUses() {
- int block_count = blocks_.length();
- for (int i = 0; i < block_count; ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- // We don't support phi uses of arguments for now.
- if (phi->CheckFlag(HValue::kIsArguments)) return false;
- }
- }
- return true;
-}
-
-
-bool HGraph::CheckConstPhiUses() {
- int block_count = blocks_.length();
- for (int i = 0; i < block_count; ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- // Check for the hole value (from an uninitialized const).
- for (int k = 0; k < phi->OperandCount(); k++) {
- if (phi->OperandAt(k) == GetConstantHole()) return false;
- }
- }
- }
- return true;
-}
-
-
-void HGraph::CollectPhis() {
- int block_count = blocks_.length();
- phi_list_ = new(zone()) ZoneList<HPhi*>(block_count, zone());
- for (int i = 0; i < block_count; ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list_->Add(phi, zone());
- }
- }
-}
-
-
-void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
- BitVector in_worklist(GetMaximumValueID(), zone());
- for (int i = 0; i < worklist->length(); ++i) {
- ASSERT(!in_worklist.Contains(worklist->at(i)->id()));
- in_worklist.Add(worklist->at(i)->id());
- }
-
- while (!worklist->is_empty()) {
- HValue* current = worklist->RemoveLast();
- in_worklist.Remove(current->id());
- if (current->UpdateInferredType()) {
- for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!in_worklist.Contains(use->id())) {
- in_worklist.Add(use->id());
- worklist->Add(use, zone());
- }
- }
- }
- }
-}
-
-
-class HRangeAnalysis BASE_EMBEDDED {
- public:
- explicit HRangeAnalysis(HGraph* graph) :
- graph_(graph), zone_(graph->zone()), changed_ranges_(16, zone_) { }
-
- void Analyze();
-
- private:
- void TraceRange(const char* msg, ...);
- void Analyze(HBasicBlock* block);
- void InferControlFlowRange(HCompareIDAndBranch* test, HBasicBlock* dest);
- void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
- void InferRange(HValue* value);
- void RollBackTo(int index);
- void AddRange(HValue* value, Range* range);
-
- HGraph* graph_;
- Zone* zone_;
- ZoneList<HValue*> changed_ranges_;
-};
-
-
-void HRangeAnalysis::TraceRange(const char* msg, ...) {
- if (FLAG_trace_range) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-void HRangeAnalysis::Analyze() {
- HPhase phase("H_Range analysis", graph_);
- Analyze(graph_->entry_block());
-}
-
-
-void HRangeAnalysis::Analyze(HBasicBlock* block) {
- TraceRange("Analyzing block B%d\n", block->block_id());
-
- int last_changed_range = changed_ranges_.length() - 1;
-
- // Infer range based on control flow.
- if (block->predecessors()->length() == 1) {
- HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsCompareIDAndBranch()) {
- InferControlFlowRange(HCompareIDAndBranch::cast(pred->end()), block);
- }
- }
-
- // Process phi instructions.
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- InferRange(phi);
- }
-
- // Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != block->end()) {
- InferRange(instr);
- instr = instr->next();
- }
-
- // Continue analysis in all dominated blocks.
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- Analyze(block->dominated_blocks()->at(i));
- }
-
- RollBackTo(last_changed_range);
-}
-
-
-void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
- HBasicBlock* dest) {
- ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->representation().IsInteger32()) {
- Token::Value op = test->token();
- if (test->SecondSuccessor() == dest) {
- op = Token::NegateCompareOp(op);
- }
- Token::Value inverted_op = Token::ReverseCompareOp(op);
- UpdateControlFlowRange(op, test->left(), test->right());
- UpdateControlFlowRange(inverted_op, test->right(), test->left());
- }
-}
-
-
-// We know that value [op] other. Use this information to update the range on
-// value.
-void HRangeAnalysis::UpdateControlFlowRange(Token::Value op,
- HValue* value,
- HValue* other) {
- Range temp_range;
- Range* range = other->range() != NULL ? other->range() : &temp_range;
- Range* new_range = NULL;
-
- TraceRange("Control flow range infer %d %s %d\n",
- value->id(),
- Token::Name(op),
- other->id());
-
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- // The same range has to apply for value.
- new_range = range->Copy(zone_);
- } else if (op == Token::LT || op == Token::LTE) {
- new_range = range->CopyClearLower(zone_);
- if (op == Token::LT) {
- new_range->AddConstant(-1);
- }
- } else if (op == Token::GT || op == Token::GTE) {
- new_range = range->CopyClearUpper(zone_);
- if (op == Token::GT) {
- new_range->AddConstant(1);
- }
- }
-
- if (new_range != NULL && !new_range->IsMostGeneric()) {
- AddRange(value, new_range);
- }
-}
-
-
-void HRangeAnalysis::InferRange(HValue* value) {
- ASSERT(!value->HasRange());
- if (!value->representation().IsNone()) {
- value->ComputeInitialRange(zone_);
- Range* range = value->range();
- TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
- value->id(),
- value->Mnemonic(),
- range->lower(),
- range->upper());
- }
-}
-
-
-void HRangeAnalysis::RollBackTo(int index) {
- for (int i = index + 1; i < changed_ranges_.length(); ++i) {
- changed_ranges_[i]->RemoveLastAddedRange();
- }
- changed_ranges_.Rewind(index + 1);
-}
-
-
-void HRangeAnalysis::AddRange(HValue* value, Range* range) {
- Range* original_range = value->range();
- value->AddNewRange(range, zone_);
- changed_ranges_.Add(value, zone_);
- Range* new_range = value->range();
- TraceRange("Updated range of %d set to [%d,%d]\n",
- value->id(),
- new_range->lower(),
- new_range->upper());
- if (original_range != NULL) {
- TraceRange("Original range was [%d,%d]\n",
- original_range->lower(),
- original_range->upper());
- }
- TraceRange("New information was [%d,%d]\n",
- range->lower(),
- range->upper());
-}
-
-
-void TraceGVN(const char* msg, ...) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
-}
-
-// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
-// --trace-gvn is off.
-#define TRACE_GVN_1(msg, a1) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1); \
- }
-
-#define TRACE_GVN_2(msg, a1, a2) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2); \
- }
-
-#define TRACE_GVN_3(msg, a1, a2, a3) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3); \
- }
-
-#define TRACE_GVN_4(msg, a1, a2, a3, a4) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3, a4); \
- }
-
-#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3, a4, a5); \
- }
-
-
-HValueMap::HValueMap(Zone* zone, const HValueMap* other)
- : array_size_(other->array_size_),
- lists_size_(other->lists_size_),
- count_(other->count_),
- present_flags_(other->present_flags_),
- array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
- lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_) {
- memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
- memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
-}
-
-
-void HValueMap::Kill(GVNFlagSet flags) {
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
- if (!present_flags_.ContainsAnyOf(depends_flags)) return;
- present_flags_.RemoveAll();
- for (int i = 0; i < array_size_; ++i) {
- HValue* value = array_[i].value;
- if (value != NULL) {
- // Clear list of collisions first, so we know if it becomes empty.
- int kept = kNil; // List of kept elements.
- int next;
- for (int current = array_[i].next; current != kNil; current = next) {
- next = lists_[current].next;
- HValue* value = lists_[current].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
- // Drop it.
- count_--;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- } else {
- // Keep it.
- lists_[current].next = kept;
- kept = current;
- present_flags_.Add(value->gvn_flags());
- }
- }
- array_[i].next = kept;
-
- // Now possibly drop directly indexed element.
- value = array_[i].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
- count_--;
- int head = array_[i].next;
- if (head == kNil) {
- array_[i].value = NULL;
- } else {
- array_[i].value = lists_[head].value;
- array_[i].next = lists_[head].next;
- lists_[head].next = free_list_head_;
- free_list_head_ = head;
- }
- } else {
- present_flags_.Add(value->gvn_flags()); // Keep it.
- }
- }
- }
-}
-
-
-HValue* HValueMap::Lookup(HValue* value) const {
- uint32_t hash = static_cast<uint32_t>(value->Hashcode());
- uint32_t pos = Bound(hash);
- if (array_[pos].value != NULL) {
- if (array_[pos].value->Equals(value)) return array_[pos].value;
- int next = array_[pos].next;
- while (next != kNil) {
- if (lists_[next].value->Equals(value)) return lists_[next].value;
- next = lists_[next].next;
- }
- }
- return NULL;
-}
-
-
-void HValueMap::Resize(int new_size, Zone* zone) {
- ASSERT(new_size > count_);
- // Hashing the values into the new array has no more collisions than in the
- // old hash map, so we can use the existing lists_ array, if we are careful.
-
- // Make sure we have at least one free element.
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1, zone);
- }
-
- HValueMapListElement* new_array =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
-
- HValueMapListElement* old_array = array_;
- int old_size = array_size_;
-
- int old_count = count_;
- count_ = 0;
- // Do not modify present_flags_. It is currently correct.
- array_size_ = new_size;
- array_ = new_array;
-
- if (old_array != NULL) {
- // Iterate over all the elements in lists, rehashing them.
- for (int i = 0; i < old_size; ++i) {
- if (old_array[i].value != NULL) {
- int current = old_array[i].next;
- while (current != kNil) {
- Insert(lists_[current].value, zone);
- int next = lists_[current].next;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- current = next;
- }
- // Rehash the directly stored value.
- Insert(old_array[i].value, zone);
- }
- }
- }
- USE(old_count);
- ASSERT(count_ == old_count);
-}
-
-
-void HValueMap::ResizeLists(int new_size, Zone* zone) {
- ASSERT(new_size > lists_size_);
-
- HValueMapListElement* new_lists =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
-
- HValueMapListElement* old_lists = lists_;
- int old_size = lists_size_;
-
- lists_size_ = new_size;
- lists_ = new_lists;
-
- if (old_lists != NULL) {
- memcpy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
- }
- for (int i = old_size; i < lists_size_; ++i) {
- lists_[i].next = free_list_head_;
- free_list_head_ = i;
- }
-}
-
-
-void HValueMap::Insert(HValue* value, Zone* zone) {
- ASSERT(value != NULL);
- // Resizing when half of the hashtable is filled up.
- if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
- ASSERT(count_ < array_size_);
- count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
- if (array_[pos].value == NULL) {
- array_[pos].value = value;
- array_[pos].next = kNil;
- } else {
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1, zone);
- }
- int new_element_pos = free_list_head_;
- ASSERT(new_element_pos != kNil);
- free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].value = value;
- lists_[new_element_pos].next = array_[pos].next;
- ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
- array_[pos].next = new_element_pos;
- }
-}
-
-
-HSideEffectMap::HSideEffectMap() : count_(0) {
- memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
-}
-
-
-HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
- *this = *other; // Calls operator=.
-}
-
-
-HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
- if (this != &other) {
- memcpy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
- }
- return *this;
-}
-
-void HSideEffectMap::Kill(GVNFlagSet flags) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
- if (data_[i] != NULL) count_--;
- data_[i] = NULL;
- }
- }
-}
-
-
-void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
- if (data_[i] == NULL) count_++;
- data_[i] = instr;
- }
- }
-}
-
-
-class HStackCheckEliminator BASE_EMBEDDED {
- public:
- explicit HStackCheckEliminator(HGraph* graph) : graph_(graph) { }
-
- void Process();
-
- private:
- HGraph* graph_;
-};
-
-
-void HStackCheckEliminator::Process() {
- // For each loop block walk the dominator tree from the backwards branch to
- // the loop header. If a call instruction is encountered the backwards branch
- // is dominated by a call and the stack check in the backwards branch can be
- // removed.
- for (int i = 0; i < graph_->blocks()->length(); i++) {
- HBasicBlock* block = graph_->blocks()->at(i);
- if (block->IsLoopHeader()) {
- HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
- HBasicBlock* dominator = back_edge;
- while (true) {
- HInstruction* instr = dominator->first();
- while (instr != NULL) {
- if (instr->IsCall()) {
- block->loop_information()->stack_check()->Eliminate();
- break;
- }
- instr = instr->next();
- }
-
- // Done when the loop header is processed.
- if (dominator == block) break;
-
- // Move up the dominator tree.
- dominator = dominator->dominator();
- }
- }
- }
-}
-
-
-// Simple sparse set with O(1) add, contains, and clear.
-class SparseSet {
- public:
- SparseSet(Zone* zone, int capacity)
- : capacity_(capacity),
- length_(0),
- dense_(zone->NewArray<int>(capacity)),
- sparse_(zone->NewArray<int>(capacity)) {
-#ifndef NVALGRIND
- // Initialize the sparse array to make valgrind happy.
- memset(sparse_, 0, sizeof(sparse_[0]) * capacity);
-#endif
- }
-
- bool Contains(int n) const {
- ASSERT(0 <= n && n < capacity_);
- int d = sparse_[n];
- return 0 <= d && d < length_ && dense_[d] == n;
- }
-
- bool Add(int n) {
- if (Contains(n)) return false;
- dense_[length_] = n;
- sparse_[n] = length_;
- ++length_;
- return true;
- }
-
- void Clear() { length_ = 0; }
-
- private:
- int capacity_;
- int length_;
- int* dense_;
- int* sparse_;
-
- DISALLOW_COPY_AND_ASSIGN(SparseSet);
-};
-
-
-class HGlobalValueNumberer BASE_EMBEDDED {
- public:
- explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
- : graph_(graph),
- info_(info),
- removed_side_effects_(false),
- block_side_effects_(graph->blocks()->length(), graph->zone()),
- loop_side_effects_(graph->blocks()->length(), graph->zone()),
- visited_on_paths_(graph->zone(), graph->blocks()->length()) {
-#ifdef DEBUG
- ASSERT(info->isolate()->optimizing_compiler_thread()->IsOptimizerThread() ||
- !info->isolate()->heap()->IsAllocationAllowed());
-#endif
- block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
- graph_->zone());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
- graph_->zone());
- }
-
- // Returns true if values with side effects are removed.
- bool Analyze();
-
- private:
- GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
- HBasicBlock* dominator,
- HBasicBlock* dominated);
- void AnalyzeGraph();
- void ComputeBlockSideEffects();
- void LoopInvariantCodeMotion();
- void ProcessLoopBlock(HBasicBlock* block,
- HBasicBlock* before_loop,
- GVNFlagSet loop_kills,
- GVNFlagSet* accumulated_first_time_depends,
- GVNFlagSet* accumulated_first_time_changes);
- bool AllowCodeMotion();
- bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
-
- HGraph* graph() { return graph_; }
- CompilationInfo* info() { return info_; }
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- CompilationInfo* info_;
- bool removed_side_effects_;
-
- // A map of block IDs to their side effects.
- ZoneList<GVNFlagSet> block_side_effects_;
-
- // A map of loop header block IDs to their loop's side effects.
- ZoneList<GVNFlagSet> loop_side_effects_;
-
- // Used when collecting side effects on paths from dominator to
- // dominated.
- SparseSet visited_on_paths_;
-};
-
-
-bool HGlobalValueNumberer::Analyze() {
- removed_side_effects_ = false;
- ComputeBlockSideEffects();
- if (FLAG_loop_invariant_code_motion) {
- LoopInvariantCodeMotion();
- }
- AnalyzeGraph();
- return removed_side_effects_;
-}
-
-
-void HGlobalValueNumberer::ComputeBlockSideEffects() {
- // The Analyze phase of GVN can be called multiple times. Clear loop side
- // effects before computing them to erase the contents from previous Analyze
- // passes.
- for (int i = 0; i < loop_side_effects_.length(); ++i) {
- loop_side_effects_[i].RemoveAll();
- }
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
- // Compute side effects for the block.
- HBasicBlock* block = graph_->blocks()->at(i);
- HInstruction* instr = block->first();
- int id = block->block_id();
- GVNFlagSet side_effects;
- while (instr != NULL) {
- side_effects.Add(instr->ChangesFlags());
- if (instr->IsSoftDeoptimize()) {
- block_side_effects_[id].RemoveAll();
- side_effects.RemoveAll();
- break;
- }
- instr = instr->next();
- }
- block_side_effects_[id].Add(side_effects);
-
- // Loop headers are part of their loop.
- if (block->IsLoopHeader()) {
- loop_side_effects_[id].Add(side_effects);
- }
-
- // Propagate loop side effects upwards.
- if (block->HasParentLoopHeader()) {
- int header_id = block->parent_loop_header()->block_id();
- loop_side_effects_[header_id].Add(block->IsLoopHeader()
- ? loop_side_effects_[id]
- : side_effects);
- }
- }
-}
-
-
-SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
- char underlying_buffer[kLastFlag * 128];
- Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
-#if DEBUG
- int offset = 0;
- const char* separator = "";
- const char* comma = ", ";
- buffer[0] = 0;
- uint32_t set_depends_on = 0;
- uint32_t set_changes = 0;
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if ((flags.ToIntegral() & (1 << bit)) != 0) {
- if (bit % 2 == 0) {
- set_changes++;
- } else {
- set_depends_on++;
- }
- }
- }
- bool positive_changes = set_changes < (kLastFlag / 2);
- bool positive_depends_on = set_depends_on < (kLastFlag / 2);
- if (set_changes > 0) {
- if (positive_changes) {
- offset += OS::SNPrintF(buffer + offset, "changes [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "changes all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kChanges##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
- if (set_depends_on > 0) {
- separator = "";
- if (set_changes > 0) {
- offset += OS::SNPrintF(buffer + offset, ", ");
- }
- if (positive_depends_on) {
- offset += OS::SNPrintF(buffer + offset, "depends on [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "depends on all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kDependsOn##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
-#else
- OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
-#endif
- size_t string_len = strlen(underlying_buffer) + 1;
- ASSERT(string_len <= sizeof(underlying_buffer));
- char* result = new char[strlen(underlying_buffer) + 1];
- memcpy(result, underlying_buffer, string_len);
- return SmartArrayPointer<char>(result);
-}
-
-
-void HGlobalValueNumberer::LoopInvariantCodeMotion() {
- TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
- graph_->use_optimistic_licm() ? "yes" : "no");
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- if (block->IsLoopHeader()) {
- GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
- TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(side_effects));
-
- GVNFlagSet accumulated_first_time_depends;
- GVNFlagSet accumulated_first_time_changes;
- HBasicBlock* last = block->loop_information()->GetLastBackEdge();
- for (int j = block->block_id(); j <= last->block_id(); ++j) {
- ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
- &accumulated_first_time_depends,
- &accumulated_first_time_changes);
- }
- }
- }
-}
-
-
-void HGlobalValueNumberer::ProcessLoopBlock(
- HBasicBlock* block,
- HBasicBlock* loop_header,
- GVNFlagSet loop_kills,
- GVNFlagSet* first_time_depends,
- GVNFlagSet* first_time_changes) {
- HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
- TRACE_GVN_2("Loop invariant motion for B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(depends_flags));
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- bool hoisted = false;
- if (instr->CheckFlag(HValue::kUseGVN)) {
- TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
- instr->id(),
- instr->Mnemonic(),
- *GetGVNFlagsString(instr->gvn_flags()),
- *GetGVNFlagsString(loop_kills));
- bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
- if (can_hoist && !graph()->use_optimistic_licm()) {
- can_hoist = block->IsLoopSuccessorDominator();
- }
-
- if (can_hoist) {
- bool inputs_loop_invariant = true;
- for (int i = 0; i < instr->OperandCount(); ++i) {
- if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
- inputs_loop_invariant = false;
- }
- }
-
- if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
- TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
- // Move the instruction out of the loop.
- instr->Unlink();
- instr->InsertBefore(pre_header->end());
- if (instr->HasSideEffects()) removed_side_effects_ = true;
- hoisted = true;
- }
- }
- }
- if (!hoisted) {
- // If an instruction is not hoisted, we have to account for its side
- // effects when hoisting later HTransitionElementsKind instructions.
- GVNFlagSet previous_depends = *first_time_depends;
- GVNFlagSet previous_changes = *first_time_changes;
- first_time_depends->Add(instr->DependsOnFlags());
- first_time_changes->Add(instr->ChangesFlags());
- if (!(previous_depends == *first_time_depends)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_depends));
- }
- if (!(previous_changes == *first_time_changes)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_changes));
- }
- }
- instr = next;
- }
-}
-
-
-bool HGlobalValueNumberer::AllowCodeMotion() {
- return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
-}
-
-
-bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
- HBasicBlock* loop_header) {
- // If we've disabled code motion or we're in a block that unconditionally
- // deoptimizes, don't move any instructions.
- return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
-}
-
-
-GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
- HBasicBlock* dominator, HBasicBlock* dominated) {
- GVNFlagSet side_effects;
- for (int i = 0; i < dominated->predecessors()->length(); ++i) {
- HBasicBlock* block = dominated->predecessors()->at(i);
- if (dominator->block_id() < block->block_id() &&
- block->block_id() < dominated->block_id() &&
- visited_on_paths_.Add(block->block_id())) {
- side_effects.Add(block_side_effects_[block->block_id()]);
- if (block->IsLoopHeader()) {
- side_effects.Add(loop_side_effects_[block->block_id()]);
- }
- side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
- dominator, block));
- }
- }
- return side_effects;
-}
-
-
-// Each instance of this class is like a "stack frame" for the recursive
-// traversal of the dominator tree done during GVN (the stack is handled
-// as a double linked list).
-// We reuse frames when possible so the list length is limited by the depth
-// of the dominator tree but this forces us to initialize each frame calling
-// an explicit "Initialize" method instead of a using constructor.
-class GvnBasicBlockState: public ZoneObject {
- public:
- static GvnBasicBlockState* CreateEntry(Zone* zone,
- HBasicBlock* entry_block,
- HValueMap* entry_map) {
- return new(zone)
- GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
- }
-
- HBasicBlock* block() { return block_; }
- HValueMap* map() { return map_; }
- HSideEffectMap* dominators() { return &dominators_; }
-
- GvnBasicBlockState* next_in_dominator_tree_traversal(
- Zone* zone,
- HBasicBlock** dominator) {
- // This assignment needs to happen before calling next_dominated() because
- // that call can reuse "this" if we are at the last dominated block.
- *dominator = block();
- GvnBasicBlockState* result = next_dominated(zone);
- if (result == NULL) {
- GvnBasicBlockState* dominator_state = pop();
- if (dominator_state != NULL) {
- // This branch is guaranteed not to return NULL because pop() never
- // returns a state where "is_done() == true".
- *dominator = dominator_state->block();
- result = dominator_state->next_dominated(zone);
- } else {
- // Unnecessary (we are returning NULL) but done for cleanness.
- *dominator = NULL;
- }
- }
- return result;
- }
-
- private:
- void Initialize(HBasicBlock* block,
- HValueMap* map,
- HSideEffectMap* dominators,
- bool copy_map,
- Zone* zone) {
- block_ = block;
- map_ = copy_map ? map->Copy(zone) : map;
- dominated_index_ = -1;
- length_ = block->dominated_blocks()->length();
- if (dominators != NULL) {
- dominators_ = *dominators;
- }
- }
- bool is_done() { return dominated_index_ >= length_; }
-
- GvnBasicBlockState(GvnBasicBlockState* previous,
- HBasicBlock* block,
- HValueMap* map,
- HSideEffectMap* dominators,
- Zone* zone)
- : previous_(previous), next_(NULL) {
- Initialize(block, map, dominators, true, zone);
- }
-
- GvnBasicBlockState* next_dominated(Zone* zone) {
- dominated_index_++;
- if (dominated_index_ == length_ - 1) {
- // No need to copy the map for the last child in the dominator tree.
- Initialize(block_->dominated_blocks()->at(dominated_index_),
- map(),
- dominators(),
- false,
- zone);
- return this;
- } else if (dominated_index_ < length_) {
- return push(zone,
- block_->dominated_blocks()->at(dominated_index_),
- dominators());
- } else {
- return NULL;
- }
- }
-
- GvnBasicBlockState* push(Zone* zone,
- HBasicBlock* block,
- HSideEffectMap* dominators) {
- if (next_ == NULL) {
- next_ =
- new(zone) GvnBasicBlockState(this, block, map(), dominators, zone);
- } else {
- next_->Initialize(block, map(), dominators, true, zone);
- }
- return next_;
- }
- GvnBasicBlockState* pop() {
- GvnBasicBlockState* result = previous_;
- while (result != NULL && result->is_done()) {
- TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
- block()->block_id(),
- previous_->block()->block_id())
- result = result->previous_;
- }
- return result;
- }
-
- GvnBasicBlockState* previous_;
- GvnBasicBlockState* next_;
- HBasicBlock* block_;
- HValueMap* map_;
- HSideEffectMap dominators_;
- int dominated_index_;
- int length_;
-};
-
-// This is a recursive traversal of the dominator tree but it has been turned
-// into a loop to avoid stack overflows.
-// The logical "stack frames" of the recursion are kept in a list of
-// GvnBasicBlockState instances.
-void HGlobalValueNumberer::AnalyzeGraph() {
- HBasicBlock* entry_block = graph_->entry_block();
- HValueMap* entry_map = new(zone()) HValueMap(zone());
- GvnBasicBlockState* current =
- GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
-
- while (current != NULL) {
- HBasicBlock* block = current->block();
- HValueMap* map = current->map();
- HSideEffectMap* dominators = current->dominators();
-
- TRACE_GVN_2("Analyzing block B%d%s\n",
- block->block_id(),
- block->IsLoopHeader() ? " (loop header)" : "");
-
- // If this is a loop header kill everything killed by the loop.
- if (block->IsLoopHeader()) {
- map->Kill(loop_side_effects_[block->block_id()]);
- }
-
- // Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- GVNFlagSet flags = instr->ChangesFlags();
- if (!flags.IsEmpty()) {
- // Clear all instructions in the map that are affected by side effects.
- // Store instruction as the dominating one for tracked side effects.
- map->Kill(flags);
- dominators->Store(flags, instr);
- TRACE_GVN_2("Instruction %d %s\n", instr->id(),
- *GetGVNFlagsString(flags));
- }
- if (instr->CheckFlag(HValue::kUseGVN)) {
- ASSERT(!instr->HasObservableSideEffects());
- HValue* other = map->Lookup(instr);
- if (other != NULL) {
- ASSERT(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- if (instr->HasSideEffects()) removed_side_effects_ = true;
- instr->DeleteAndReplaceWith(other);
- } else {
- map->Add(instr, zone());
- }
- }
- if (instr->IsLinked() &&
- instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- HValue* other = dominators->at(i);
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(depends_on_flag) &&
- (other != NULL)) {
- TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
- i,
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- instr->SetSideEffectDominator(changes_flag, other);
- }
- }
- }
- instr = next;
- }
-
- HBasicBlock* dominator_block;
- GvnBasicBlockState* next =
- current->next_in_dominator_tree_traversal(zone(), &dominator_block);
-
- if (next != NULL) {
- HBasicBlock* dominated = next->block();
- HValueMap* successor_map = next->map();
- HSideEffectMap* successor_dominators = next->dominators();
-
- // Kill everything killed on any path between this block and the
- // dominated block. We don't have to traverse these paths if the
- // value map and the dominators list is already empty. If the range
- // of block ids (block_id, dominated_id) is empty there are no such
- // paths.
- if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
- dominator_block->block_id() + 1 < dominated->block_id()) {
- visited_on_paths_.Clear();
- GVNFlagSet side_effects_on_all_paths =
- CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
- dominated);
- successor_map->Kill(side_effects_on_all_paths);
- successor_dominators->Kill(side_effects_on_all_paths);
- }
- }
- current = next;
- }
-}
-
-
-void HInferRepresentation::AddToWorklist(HValue* current) {
- if (current->representation().IsTagged()) return;
- if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
- if (in_worklist_.Contains(current->id())) return;
- worklist_.Add(current, zone());
- in_worklist_.Add(current->id());
-}
-
-
-void HInferRepresentation::Analyze() {
- HPhase phase("H_Infer representations", graph_);
-
- // (1) Initialize bit vectors and count real uses. Each phi gets a
- // bit-vector of length <number of phis>.
- const ZoneList<HPhi*>* phi_list = graph_->phi_list();
- int phi_count = phi_list->length();
- ZoneList<BitVector*> connected_phis(phi_count, graph_->zone());
- for (int i = 0; i < phi_count; ++i) {
- phi_list->at(i)->InitRealUses(i);
- BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone());
- connected_set->Add(i);
- connected_phis.Add(connected_set, zone());
- }
-
- // (2) Do a fixed point iteration to find the set of connected phis. A
- // phi is connected to another phi if its value is used either directly or
- // indirectly through a transitive closure of the def-use relation.
- bool change = true;
- while (change) {
- change = false;
- // We normally have far more "forward edges" than "backward edges",
- // so we terminate faster when we walk backwards.
- for (int i = phi_count - 1; i >= 0; --i) {
- HPhi* phi = phi_list->at(i);
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (use->IsPhi()) {
- int id = HPhi::cast(use)->phi_id();
- if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
- change = true;
- }
- }
- }
- }
-
- // (3a) Use the phi reachability information from step 2 to
- // push information about values which can't be converted to integer
- // without deoptimization through the phi use-def chains, avoiding
- // unnecessary deoptimizations later.
- for (int i = 0; i < phi_count; ++i) {
- HPhi* phi = phi_list->at(i);
- bool cti = phi->AllOperandsConvertibleToInteger();
- if (cti) continue;
-
- for (BitVector::Iterator it(connected_phis.at(i));
- !it.Done();
- it.Advance()) {
- HPhi* phi = phi_list->at(it.Current());
- phi->set_is_convertible_to_integer(false);
- }
- }
-
- // (3b) Use the phi reachability information from step 2 to
- // sum up the non-phi use counts of all connected phis.
- for (int i = 0; i < phi_count; ++i) {
- HPhi* phi = phi_list->at(i);
- for (BitVector::Iterator it(connected_phis.at(i));
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- HPhi* it_use = phi_list->at(index);
- if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice.
- }
- }
-
- // Initialize work list
- for (int i = 0; i < graph_->blocks()->length(); ++i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); ++j) {
- AddToWorklist(phis->at(j));
- }
-
- HInstruction* current = block->first();
- while (current != NULL) {
- AddToWorklist(current);
- current = current->next();
- }
- }
-
- // Do a fixed point iteration, trying to improve representations
- while (!worklist_.is_empty()) {
- HValue* current = worklist_.RemoveLast();
- in_worklist_.Remove(current->id());
- current->InferRepresentation(this);
- }
-
- // Lastly: any instruction that we don't have representation information
- // for defaults to Tagged.
- for (int i = 0; i < graph_->blocks()->length(); ++i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); ++j) {
- HPhi* phi = phis->at(j);
- if (phi->representation().IsNone()) {
- phi->ChangeRepresentation(Representation::Tagged());
- }
- }
- for (HInstruction* current = block->first();
- current != NULL; current = current->next()) {
- if (current->representation().IsNone() &&
- current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
- current->ChangeRepresentation(Representation::Tagged());
- }
- }
- }
-}
-
-
-void HGraph::MergeRemovableSimulates() {
- for (int i = 0; i < blocks()->length(); ++i) {
- HBasicBlock* block = blocks()->at(i);
- // Always reset the folding candidate at the start of a block.
- HSimulate* folding_candidate = NULL;
- // Nasty heuristic: Never remove the first simulate in a block. This
- // just so happens to have a beneficial effect on register allocation.
- bool first = true;
- for (HInstruction* current = block->first();
- current != NULL; current = current->next()) {
- if (current->IsLeaveInlined()) {
- // Never fold simulates from inlined environments into simulates
- // in the outer environment.
- // (Before each HEnterInlined, there is a non-foldable HSimulate
- // anyway, so we get the barrier in the other direction for free.)
- if (folding_candidate != NULL) {
- folding_candidate->DeleteAndReplaceWith(NULL);
- }
- folding_candidate = NULL;
- continue;
- }
- // If we have an HSimulate and a candidate, perform the folding.
- if (!current->IsSimulate()) continue;
- if (first) {
- first = false;
- continue;
- }
- HSimulate* current_simulate = HSimulate::cast(current);
- if (folding_candidate != NULL) {
- folding_candidate->MergeInto(current_simulate);
- folding_candidate->DeleteAndReplaceWith(NULL);
- folding_candidate = NULL;
- }
- // Check if the current simulate is a candidate for folding.
- if (current_simulate->previous()->HasObservableSideEffects() &&
- !current_simulate->next()->IsSimulate()) {
- continue;
- }
- if (!current_simulate->is_candidate_for_removal()) {
- continue;
- }
- folding_candidate = current_simulate;
- }
- }
-}
-
-
-void HGraph::InitializeInferredTypes() {
- HPhase phase("H_Inferring types", this);
- InitializeInferredTypes(0, this->blocks_.length() - 1);
-}
-
-
-void HGraph::InitializeInferredTypes(int from_inclusive, int to_inclusive) {
- for (int i = from_inclusive; i <= to_inclusive; ++i) {
- HBasicBlock* block = blocks_[i];
-
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); j++) {
- phis->at(j)->UpdateInferredType();
- }
-
- HInstruction* current = block->first();
- while (current != NULL) {
- current->UpdateInferredType();
- current = current->next();
- }
-
- if (block->IsLoopHeader()) {
- HBasicBlock* last_back_edge =
- block->loop_information()->GetLastBackEdge();
- InitializeInferredTypes(i + 1, last_back_edge->block_id());
- // Skip all blocks already processed by the recursive call.
- i = last_back_edge->block_id();
- // Update phis of the loop header now after the whole loop body is
- // guaranteed to be processed.
- ZoneList<HValue*> worklist(block->phis()->length(), zone());
- for (int j = 0; j < block->phis()->length(); ++j) {
- worklist.Add(block->phis()->at(j), zone());
- }
- InferTypes(&worklist);
- }
- }
-}
-
-
-void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
- HValue* current = value;
- while (current != NULL) {
- if (visited->Contains(current->id())) return;
-
- // For phis, we must propagate the check to all of its inputs.
- if (current->IsPhi()) {
- visited->Add(current->id());
- HPhi* phi = HPhi::cast(current);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- PropagateMinusZeroChecks(phi->OperandAt(i), visited);
- }
- break;
- }
-
- // For multiplication, division, and Math.min/max(), we must propagate
- // to the left and the right side.
- if (current->IsMul()) {
- HMul* mul = HMul::cast(current);
- mul->EnsureAndPropagateNotMinusZero(visited);
- PropagateMinusZeroChecks(mul->left(), visited);
- PropagateMinusZeroChecks(mul->right(), visited);
- } else if (current->IsDiv()) {
- HDiv* div = HDiv::cast(current);
- div->EnsureAndPropagateNotMinusZero(visited);
- PropagateMinusZeroChecks(div->left(), visited);
- PropagateMinusZeroChecks(div->right(), visited);
- } else if (current->IsMathMinMax()) {
- HMathMinMax* minmax = HMathMinMax::cast(current);
- visited->Add(minmax->id());
- PropagateMinusZeroChecks(minmax->left(), visited);
- PropagateMinusZeroChecks(minmax->right(), visited);
- }
-
- current = current->EnsureAndPropagateNotMinusZero(visited);
- }
-}
-
-
-void HGraph::InsertRepresentationChangeForUse(HValue* value,
- HValue* use_value,
- int use_index,
- Representation to) {
- // Insert the representation change right before its use. For phi-uses we
- // insert at the end of the corresponding predecessor.
- HInstruction* next = NULL;
- if (use_value->IsPhi()) {
- next = use_value->block()->predecessors()->at(use_index)->end();
- } else {
- next = HInstruction::cast(use_value);
- }
- // For constants we try to make the representation change at compile
- // time. When a representation change is not possible without loss of
- // information we treat constants like normal instructions and insert the
- // change instructions for them.
- HInstruction* new_value = NULL;
- bool is_truncating = use_value->CheckFlag(HValue::kTruncatingToInt32);
- bool deoptimize_on_undefined =
- use_value->CheckFlag(HValue::kDeoptimizeOnUndefined);
- if (value->IsConstant()) {
- HConstant* constant = HConstant::cast(value);
- // Try to create a new copy of the constant with the new representation.
- new_value = (is_truncating && to.IsInteger32())
- ? constant->CopyToTruncatedInt32(zone())
- : constant->CopyToRepresentation(to, zone());
- }
-
- if (new_value == NULL) {
- new_value = new(zone()) HChange(value, to,
- is_truncating, deoptimize_on_undefined);
- }
-
- new_value->InsertBefore(next);
- use_value->SetOperandAt(use_index, new_value);
-}
-
-
-void HGraph::InsertRepresentationChangesForValue(HValue* value) {
- Representation r = value->representation();
- if (r.IsNone()) return;
- if (value->HasNoUses()) return;
-
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use_value = it.value();
- int use_index = it.index();
- Representation req = use_value->RequiredInputRepresentation(use_index);
- if (req.IsNone() || req.Equals(r)) continue;
- InsertRepresentationChangeForUse(value, use_value, use_index, req);
- }
- if (value->HasNoUses()) {
- ASSERT(value->IsConstant());
- value->DeleteAndReplaceWith(NULL);
- }
-
- // The only purpose of a HForceRepresentation is to represent the value
- // after the (possible) HChange instruction. We make it disappear.
- if (value->IsForceRepresentation()) {
- value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
- }
-}
-
-
-void HGraph::InsertRepresentationChanges() {
- HPhase phase("H_Representation changes", this);
-
- // Compute truncation flag for phis: Initially assume that all
- // int32-phis allow truncation and iteratively remove the ones that
- // are used in an operation that does not allow a truncating
- // conversion.
- // TODO(fschneider): Replace this with a worklist-based iteration.
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (phi->representation().IsInteger32()) {
- phi->SetFlag(HValue::kTruncatingToInt32);
- }
- }
- bool change = true;
- while (change) {
- change = false;
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- // If a Phi is used as a non-truncating int32 or as a double,
- // clear its "truncating" flag.
- HValue* use = it.value();
- Representation input_representation =
- use->RequiredInputRepresentation(it.index());
- if ((input_representation.IsInteger32() &&
- !use->CheckFlag(HValue::kTruncatingToInt32)) ||
- input_representation.IsDouble()) {
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
- phi->id(), it.value()->id(), it.value()->Mnemonic());
- }
- phi->ClearFlag(HValue::kTruncatingToInt32);
- change = true;
- break;
- }
- }
- }
- }
-
- for (int i = 0; i < blocks_.length(); ++i) {
- // Process phi instructions first.
- const ZoneList<HPhi*>* phis = blocks_[i]->phis();
- for (int j = 0; j < phis->length(); j++) {
- InsertRepresentationChangesForValue(phis->at(j));
- }
-
- // Process normal instructions.
- HInstruction* current = blocks_[i]->first();
- while (current != NULL) {
- HInstruction* next = current->next();
- InsertRepresentationChangesForValue(current);
- current = next;
- }
- }
-}
-
-
-void HGraph::RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi) {
- if (phi->CheckFlag(HValue::kDeoptimizeOnUndefined)) return;
- phi->SetFlag(HValue::kDeoptimizeOnUndefined);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- HValue* input = phi->OperandAt(i);
- if (input->IsPhi()) {
- RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi::cast(input));
- }
- }
-}
-
-
-void HGraph::MarkDeoptimizeOnUndefined() {
- HPhase phase("H_MarkDeoptimizeOnUndefined", this);
- // Compute DeoptimizeOnUndefined flag for phis.
- // Any phi that can reach a use with DeoptimizeOnUndefined set must
- // have DeoptimizeOnUndefined set. Currently only HCompareIDAndBranch, with
- // double input representation, has this flag set.
- // The flag is used by HChange tagged->double, which must deoptimize
- // if one of its uses has this flag set.
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (phi->representation().IsDouble()) {
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- if (it.value()->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
- RecursivelyMarkPhiDeoptimizeOnUndefined(phi);
- break;
- }
- }
- }
- }
-}
-
-
-// Discover instructions that can be marked with kUint32 flag allowing
-// them to produce full range uint32 values.
-class Uint32Analysis BASE_EMBEDDED {
- public:
- explicit Uint32Analysis(Zone* zone) : zone_(zone), phis_(4, zone) { }
-
- void Analyze(HInstruction* current);
-
- void UnmarkUnsafePhis();
-
- private:
- bool IsSafeUint32Use(HValue* val, HValue* use);
- bool Uint32UsesAreSafe(HValue* uint32val);
- bool CheckPhiOperands(HPhi* phi);
- void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist);
-
- Zone* zone_;
- ZoneList<HPhi*> phis_;
-};
-
-
-bool Uint32Analysis::IsSafeUint32Use(HValue* val, HValue* use) {
- // Operations that operatate on bits are safe.
- if (use->IsBitwise() ||
- use->IsShl() ||
- use->IsSar() ||
- use->IsShr() ||
- use->IsBitNot()) {
- return true;
- } else if (use->IsChange() || use->IsSimulate()) {
- // Conversions and deoptimization have special support for unt32.
- return true;
- } else if (use->IsStoreKeyed()) {
- HStoreKeyed* store = HStoreKeyed::cast(use);
- if (store->is_external()) {
- // Storing a value into an external integer array is a bit level
- // operation.
- if (store->value() == val) {
- // Clamping or a conversion to double should have beed inserted.
- ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
- return true;
- }
- }
- }
-
- return false;
-}
-
-
-// Iterate over all uses and verify that they are uint32 safe: either don't
-// distinguish between int32 and uint32 due to their bitwise nature or
-// have special support for uint32 values.
-// Encountered phis are optimisitically treated as safe uint32 uses,
-// marked with kUint32 flag and collected in the phis_ list. A separate
-// path will be performed later by UnmarkUnsafePhis to clear kUint32 from
-// phis that are not actually uint32-safe (it requries fix point iteration).
-bool Uint32Analysis::Uint32UsesAreSafe(HValue* uint32val) {
- bool collect_phi_uses = false;
- for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
-
- if (use->IsPhi()) {
- if (!use->CheckFlag(HInstruction::kUint32)) {
- // There is a phi use of this value from a phis that is not yet
- // collected in phis_ array. Separate pass is required.
- collect_phi_uses = true;
- }
-
- // Optimistically treat phis as uint32 safe.
- continue;
- }
-
- if (!IsSafeUint32Use(uint32val, use)) {
- return false;
- }
- }
-
- if (collect_phi_uses) {
- for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
-
- // There is a phi use of this value from a phis that is not yet
- // collected in phis_ array. Separate pass is required.
- if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
- use->SetFlag(HInstruction::kUint32);
- phis_.Add(HPhi::cast(use), zone_);
- }
- }
- }
-
- return true;
-}
-
-
-// Analyze instruction and mark it with kUint32 if all its uses are uint32
-// safe.
-void Uint32Analysis::Analyze(HInstruction* current) {
- if (Uint32UsesAreSafe(current)) current->SetFlag(HInstruction::kUint32);
-}
-
-
-// Check if all operands to the given phi are marked with kUint32 flag.
-bool Uint32Analysis::CheckPhiOperands(HPhi* phi) {
- if (!phi->CheckFlag(HInstruction::kUint32)) {
- // This phi is not uint32 safe. No need to check operands.
- return false;
- }
-
- for (int j = 0; j < phi->OperandCount(); j++) {
- HValue* operand = phi->OperandAt(j);
- if (!operand->CheckFlag(HInstruction::kUint32)) {
- // Lazyly mark constants that fit into uint32 range with kUint32 flag.
- if (operand->IsConstant() &&
- HConstant::cast(operand)->IsUint32()) {
- operand->SetFlag(HInstruction::kUint32);
- continue;
- }
-
- // This phi is not safe, some operands are not uint32 values.
- return false;
- }
- }
-
- return true;
-}
-
-
-// Remove kUint32 flag from the phi itself and its operands. If any operand
-// was a phi marked with kUint32 place it into a worklist for
-// transitive clearing of kUint32 flag.
-void Uint32Analysis::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
- phi->ClearFlag(HInstruction::kUint32);
- for (int j = 0; j < phi->OperandCount(); j++) {
- HValue* operand = phi->OperandAt(j);
- if (operand->CheckFlag(HInstruction::kUint32)) {
- operand->ClearFlag(HInstruction::kUint32);
- if (operand->IsPhi()) {
- worklist->Add(HPhi::cast(operand), zone_);
- }
- }
- }
-}
-
-
-void Uint32Analysis::UnmarkUnsafePhis() {
- // No phis were collected. Nothing to do.
- if (phis_.length() == 0) return;
-
- // Worklist used to transitively clear kUint32 from phis that
- // are used as arguments to other phis.
- ZoneList<HPhi*> worklist(phis_.length(), zone_);
-
- // Phi can be used as a uint32 value if and only if
- // all its operands are uint32 values and all its
- // uses are uint32 safe.
-
- // Iterate over collected phis and unmark those that
- // are unsafe. When unmarking phi unmark its operands
- // and add it to the worklist if it is a phi as well.
- // Phis that are still marked as safe are shifted down
- // so that all safe phis form a prefix of the phis_ array.
- int phi_count = 0;
- for (int i = 0; i < phis_.length(); i++) {
- HPhi* phi = phis_[i];
-
- if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
- phis_[phi_count++] = phi;
- } else {
- UnmarkPhi(phi, &worklist);
- }
- }
-
- // Now phis array contains only those phis that have safe
- // non-phi uses. Start transitively clearing kUint32 flag
- // from phi operands of discovered non-safe phies until
- // only safe phies are left.
- while (!worklist.is_empty()) {
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- UnmarkPhi(phi, &worklist);
- }
-
- // Check if any operands to safe phies were unmarked
- // turning a safe phi into unsafe. The same value
- // can flow into several phis.
- int new_phi_count = 0;
- for (int i = 0; i < phi_count; i++) {
- HPhi* phi = phis_[i];
-
- if (CheckPhiOperands(phi)) {
- phis_[new_phi_count++] = phi;
- } else {
- UnmarkPhi(phi, &worklist);
- }
- }
- phi_count = new_phi_count;
- }
-}
-
-
-void HGraph::ComputeSafeUint32Operations() {
- if (!FLAG_opt_safe_uint32_operations || uint32_instructions_ == NULL) {
- return;
- }
-
- Uint32Analysis analysis(zone());
- for (int i = 0; i < uint32_instructions_->length(); ++i) {
- HInstruction* current = uint32_instructions_->at(i);
- if (current->IsLinked() && current->representation().IsInteger32()) {
- analysis.Analyze(current);
- }
- }
-
- // Some phis might have been optimistically marked with kUint32 flag.
- // Remove this flag from those phis that are unsafe and propagate
- // this information transitively potentially clearing kUint32 flag
- // from some non-phi operations that are used as operands to unsafe phis.
- analysis.UnmarkUnsafePhis();
-}
-
-
-void HGraph::ComputeMinusZeroChecks() {
- BitVector visited(GetMaximumValueID(), zone());
- for (int i = 0; i < blocks_.length(); ++i) {
- for (HInstruction* current = blocks_[i]->first();
- current != NULL;
- current = current->next()) {
- if (current->IsChange()) {
- HChange* change = HChange::cast(current);
- // Propagate flags for negative zero checks upwards from conversions
- // int32-to-tagged and int32-to-double.
- Representation from = change->value()->representation();
- ASSERT(from.Equals(change->from()));
- if (from.IsInteger32()) {
- ASSERT(change->to().IsTagged() || change->to().IsDouble());
- ASSERT(visited.IsEmpty());
- PropagateMinusZeroChecks(change->value(), &visited);
- visited.Clear();
- }
- }
- }
- }
-}
-
-
-// Implementation of utility class to encapsulate the translation state for
-// a (possibly inlined) function.
-FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
- CompilationInfo* info,
- TypeFeedbackOracle* oracle,
- InliningKind inlining_kind)
- : owner_(owner),
- compilation_info_(info),
- oracle_(oracle),
- call_context_(NULL),
- inlining_kind_(inlining_kind),
- function_return_(NULL),
- test_context_(NULL),
- entry_(NULL),
- arguments_elements_(NULL),
- outer_(owner->function_state()) {
- if (outer_ != NULL) {
- // State for an inline function.
- if (owner->ast_context()->IsTest()) {
- HBasicBlock* if_true = owner->graph()->CreateBasicBlock();
- HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
- if_true->MarkAsInlineReturnTarget();
- if_false->MarkAsInlineReturnTarget();
- TestContext* outer_test_context = TestContext::cast(owner->ast_context());
- Expression* cond = outer_test_context->condition();
- TypeFeedbackOracle* outer_oracle = outer_test_context->oracle();
- // The AstContext constructor pushed on the context stack. This newed
- // instance is the reason that AstContext can't be BASE_EMBEDDED.
- test_context_ =
- new TestContext(owner, cond, outer_oracle, if_true, if_false);
- } else {
- function_return_ = owner->graph()->CreateBasicBlock();
- function_return()->MarkAsInlineReturnTarget();
- }
- // Set this after possibly allocating a new TestContext above.
- call_context_ = owner->ast_context();
- }
-
- // Push on the state stack.
- owner->set_function_state(this);
-}
-
-
-FunctionState::~FunctionState() {
- delete test_context_;
- owner_->set_function_state(outer_);
-}
-
-
-// Implementation of utility classes to represent an expression's context in
-// the AST.
-AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
- : owner_(owner),
- kind_(kind),
- outer_(owner->ast_context()),
- for_typeof_(false) {
- owner->set_ast_context(this); // Push.
-#ifdef DEBUG
- ASSERT(owner->environment()->frame_type() == JS_FUNCTION);
- original_length_ = owner->environment()->length();
-#endif
-}
-
-
-AstContext::~AstContext() {
- owner_->set_ast_context(outer_); // Pop.
-}
-
-
-EffectContext::~EffectContext() {
- ASSERT(owner()->HasStackOverflow() ||
- owner()->current_block() == NULL ||
- (owner()->environment()->length() == original_length_ &&
- owner()->environment()->frame_type() == JS_FUNCTION));
-}
-
-
-ValueContext::~ValueContext() {
- ASSERT(owner()->HasStackOverflow() ||
- owner()->current_block() == NULL ||
- (owner()->environment()->length() == original_length_ + 1 &&
- owner()->environment()->frame_type() == JS_FUNCTION));
-}
-
-
-void EffectContext::ReturnValue(HValue* value) {
- // The value is simply ignored.
-}
-
-
-void ValueContext::ReturnValue(HValue* value) {
- // The value is tracked in the bailout environment, and communicated
- // through the environment as the result of the expression.
- if (!arguments_allowed() && value->CheckFlag(HValue::kIsArguments)) {
- owner()->Bailout("bad value context for arguments value");
- }
- owner()->Push(value);
-}
-
-
-void TestContext::ReturnValue(HValue* value) {
- BuildBranch(value);
-}
-
-
-void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
- ASSERT(!instr->IsControlInstruction());
- owner()->AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
- }
-}
-
-
-void EffectContext::ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) {
- ASSERT(!instr->HasObservableSideEffects());
- HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
- HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
- instr->SetSuccessorAt(0, empty_true);
- instr->SetSuccessorAt(1, empty_false);
- owner()->current_block()->Finish(instr);
- HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
- owner()->set_current_block(join);
-}
-
-
-void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
- ASSERT(!instr->IsControlInstruction());
- if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- return owner()->Bailout("bad value context for arguments object value");
- }
- owner()->AddInstruction(instr);
- owner()->Push(instr);
- if (instr->HasObservableSideEffects()) {
- owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
- }
-}
-
-
-void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
- ASSERT(!instr->HasObservableSideEffects());
- if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- return owner()->Bailout("bad value context for arguments object value");
- }
- HBasicBlock* materialize_false = owner()->graph()->CreateBasicBlock();
- HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
- instr->SetSuccessorAt(0, materialize_true);
- instr->SetSuccessorAt(1, materialize_false);
- owner()->current_block()->Finish(instr);
- owner()->set_current_block(materialize_true);
- owner()->Push(owner()->graph()->GetConstantTrue());
- owner()->set_current_block(materialize_false);
- owner()->Push(owner()->graph()->GetConstantFalse());
- HBasicBlock* join =
- owner()->CreateJoin(materialize_true, materialize_false, ast_id);
- owner()->set_current_block(join);
-}
-
-
-void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
- ASSERT(!instr->IsControlInstruction());
- HOptimizedGraphBuilder* builder = owner();
- builder->AddInstruction(instr);
- // We expect a simulate after every expression with side effects, though
- // this one isn't actually needed (and wouldn't work if it were targeted).
- if (instr->HasObservableSideEffects()) {
- builder->Push(instr);
- builder->AddSimulate(ast_id, REMOVABLE_SIMULATE);
- builder->Pop();
- }
- BuildBranch(instr);
-}
-
-
-void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
- ASSERT(!instr->HasObservableSideEffects());
- HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
- HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
- instr->SetSuccessorAt(0, empty_true);
- instr->SetSuccessorAt(1, empty_false);
- owner()->current_block()->Finish(instr);
- empty_true->Goto(if_true(), owner()->function_state());
- empty_false->Goto(if_false(), owner()->function_state());
- owner()->set_current_block(NULL);
-}
-
-
-void TestContext::BuildBranch(HValue* value) {
- // We expect the graph to be in edge-split form: there is no edge that
- // connects a branch node to a join node. We conservatively ensure that
- // property by always adding an empty block on the outgoing edges of this
- // branch.
- HOptimizedGraphBuilder* builder = owner();
- if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
- builder->Bailout("arguments object value in a test context");
- }
- if (value->IsConstant()) {
- HConstant* constant_value = HConstant::cast(value);
- if (constant_value->ToBoolean()) {
- builder->current_block()->Goto(if_true(), builder->function_state());
- } else {
- builder->current_block()->Goto(if_false(), builder->function_state());
- }
- builder->set_current_block(NULL);
- return;
- }
- HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
- HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- TypeFeedbackId test_id = condition()->test_id();
- ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
- HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
- builder->current_block()->Finish(test);
-
- empty_true->Goto(if_true(), builder->function_state());
- empty_false->Goto(if_false(), builder->function_state());
- builder->set_current_block(NULL);
-}
-
-
-// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
-#define CHECK_BAILOUT(call) \
- do { \
- call; \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-#define CHECK_ALIVE(call) \
- do { \
- call; \
- if (HasStackOverflow() || current_block() == NULL) return; \
- } while (false)
-
-
-void HOptimizedGraphBuilder::Bailout(const char* reason) {
- info()->set_bailout_reason(reason);
- SetStackOverflow();
-}
-
-
-void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
- EffectContext for_effect(this);
- Visit(expr);
-}
-
-
-void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
- ArgumentsAllowedFlag flag) {
- ValueContext for_value(this, flag);
- Visit(expr);
-}
-
-
-void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
- ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
- for_value.set_for_typeof(true);
- Visit(expr);
-}
-
-
-
-void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block) {
- TestContext for_test(this, expr, oracle(), true_block, false_block);
- Visit(expr);
-}
-
-
-void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
- CHECK_ALIVE(VisitForValue(expr));
- Push(AddInstruction(new(zone()) HPushArgument(Pop())));
-}
-
-
-void HOptimizedGraphBuilder::VisitArgumentList(
- ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- CHECK_ALIVE(VisitArgument(arguments->at(i)));
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitExpressions(
- ZoneList<Expression*>* exprs) {
- for (int i = 0; i < exprs->length(); ++i) {
- CHECK_ALIVE(VisitForValue(exprs->at(i)));
- }
-}
-
-
-bool HOptimizedGraphBuilder::BuildGraph() {
- Scope* scope = info()->scope();
- if (scope->HasIllegalRedeclaration()) {
- Bailout("function with illegal redeclaration");
- return false;
- }
- if (scope->calls_eval()) {
- Bailout("function calls eval");
- return false;
- }
- SetUpScope(scope);
-
- // Add an edge to the body entry. This is warty: the graph's start
- // environment will be used by the Lithium translation as the initial
- // environment on graph entry, but it has now been mutated by the
- // Hydrogen translation of the instructions in the start block. This
- // environment uses values which have not been defined yet. These
- // Hydrogen instructions will then be replayed by the Lithium
- // translation, so they cannot have an environment effect. The edge to
- // the body's entry block (along with some special logic for the start
- // block in HInstruction::InsertAfter) seals the start block from
- // getting unwanted instructions inserted.
- //
- // TODO(kmillikin): Fix this. Stop mutating the initial environment.
- // Make the Hydrogen instructions in the initial block into Hydrogen
- // values (but not instructions), present in the initial environment and
- // not replayed by the Lithium translation.
- HEnvironment* initial_env = environment()->CopyWithoutHistory();
- HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- current_block()->Goto(body_entry);
- body_entry->SetJoinId(BailoutId::FunctionEntry());
- set_current_block(body_entry);
-
- // Handle implicit declaration of the function name in named function
- // expressions before other declarations.
- if (scope->is_function_scope() && scope->function() != NULL) {
- VisitVariableDeclaration(scope->function());
- }
- VisitDeclarations(scope->declarations());
- AddSimulate(BailoutId::Declarations());
-
- HValue* context = environment()->LookupContext();
- AddInstruction(
- new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
-
- VisitStatements(info()->function()->body());
- if (HasStackOverflow()) return false;
-
- if (current_block() != NULL) {
- HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined(),
- context);
- current_block()->FinishExit(instr);
- set_current_block(NULL);
- }
-
- // If the checksum of the number of type info changes is the same as the
- // last time this function was compiled, then this recompile is likely not
- // due to missing/inadequate type feedback, but rather too aggressive
- // optimization. Disable optimistic LICM in that case.
- Handle<Code> unoptimized_code(info()->shared_info()->code());
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<TypeFeedbackInfo> type_info(
- TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
- int checksum = type_info->own_type_change_checksum();
- int composite_checksum = graph()->update_type_change_checksum(checksum);
- graph()->set_use_optimistic_licm(
- !type_info->matches_inlined_type_change_checksum(composite_checksum));
- type_info->set_inlined_type_change_checksum(composite_checksum);
-
- return true;
-}
-
-
-void HGraph::GlobalValueNumbering() {
- // Perform common subexpression elimination and loop-invariant code motion.
- if (FLAG_use_gvn) {
- HPhase phase("H_Global value numbering", this);
- HGlobalValueNumberer gvn(this, info());
- bool removed_side_effects = gvn.Analyze();
- // Trigger a second analysis pass to further eliminate duplicate values that
- // could only be discovered by removing side-effect-generating instructions
- // during the first pass.
- if (FLAG_smi_only_arrays && removed_side_effects) {
- removed_side_effects = gvn.Analyze();
- ASSERT(!removed_side_effects);
- }
- }
-}
-
-
-bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
- *bailout_reason = SmartArrayPointer<char>();
- OrderBlocks();
- AssignDominators();
-
- // We need to create a HConstant "zero" now so that GVN will fold every
- // zero-valued constant in the graph together.
- // The constant is needed to make idef-based bounds check work: the pass
- // evaluates relations with "zero" and that zero cannot be created after GVN.
- GetConstant0();
-
-#ifdef DEBUG
- // Do a full verify after building the graph and computing dominators.
- Verify(true);
-#endif
-
- PropagateDeoptimizingMark();
- if (!CheckConstPhiUses()) {
- *bailout_reason = SmartArrayPointer<char>(StrDup(
- "Unsupported phi use of const variable"));
- return false;
- }
- EliminateRedundantPhis();
- if (!CheckArgumentsPhiUses()) {
- *bailout_reason = SmartArrayPointer<char>(StrDup(
- "Unsupported phi use of arguments"));
- return false;
- }
- if (FLAG_eliminate_dead_phis) EliminateUnreachablePhis();
- CollectPhis();
-
- if (has_osr_loop_entry()) {
- const ZoneList<HPhi*>* phis = osr_loop_entry()->phis();
- for (int j = 0; j < phis->length(); j++) {
- HPhi* phi = phis->at(j);
- osr_values()->at(phi->merged_index())->set_incoming_value(phi);
- }
- }
-
- HInferRepresentation rep(this);
- rep.Analyze();
-
- // Remove HSimulate instructions that have turned out not to be needed
- // after all by folding them into the following HSimulate.
- // This must happen after inferring representations.
- MergeRemovableSimulates();
-
- MarkDeoptimizeOnUndefined();
- InsertRepresentationChanges();
-
- InitializeInferredTypes();
-
- // Must be performed before canonicalization to ensure that Canonicalize
- // will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
- // zero.
- ComputeSafeUint32Operations();
-
- Canonicalize();
-
- GlobalValueNumbering();
-
- if (FLAG_use_range) {
- HRangeAnalysis rangeAnalysis(this);
- rangeAnalysis.Analyze();
- }
- ComputeMinusZeroChecks();
-
- // Eliminate redundant stack checks on backwards branches.
- HStackCheckEliminator sce(this);
- sce.Process();
-
- if (FLAG_idefs) SetupInformativeDefinitions();
- if (FLAG_array_bounds_checks_elimination && !FLAG_idefs) {
- EliminateRedundantBoundsChecks();
- }
- if (FLAG_array_index_dehoisting) DehoistSimpleArrayIndexComputations();
- if (FLAG_dead_code_elimination) DeadCodeElimination();
-
- RestoreActualValues();
-
- return true;
-}
-
-
-void HGraph::SetupInformativeDefinitionsInBlock(HBasicBlock* block) {
- for (int phi_index = 0; phi_index < block->phis()->length(); phi_index++) {
- HPhi* phi = block->phis()->at(phi_index);
- phi->AddInformativeDefinitions();
- phi->SetFlag(HValue::kIDefsProcessingDone);
- // We do not support phis that "redefine just one operand".
- ASSERT(!phi->IsInformativeDefinition());
- }
-
- for (HInstruction* i = block->first(); i != NULL; i = i->next()) {
- i->AddInformativeDefinitions();
- i->SetFlag(HValue::kIDefsProcessingDone);
- i->UpdateRedefinedUsesWhileSettingUpInformativeDefinitions();
- }
-}
-
-
-// This method is recursive, so if its stack frame is large it could
-// cause a stack overflow.
-// To keep the individual stack frames small we do the actual work inside
-// SetupInformativeDefinitionsInBlock();
-void HGraph::SetupInformativeDefinitionsRecursively(HBasicBlock* block) {
- SetupInformativeDefinitionsInBlock(block);
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- SetupInformativeDefinitionsRecursively(block->dominated_blocks()->at(i));
- }
-}
-
-
-void HGraph::SetupInformativeDefinitions() {
- HPhase phase("H_Setup informative definitions", this);
- SetupInformativeDefinitionsRecursively(entry_block());
-}
-
-
-// We try to "factor up" HBoundsCheck instructions towards the root of the
-// dominator tree.
-// For now we handle checks where the index is like "exp + int32value".
-// If in the dominator tree we check "exp + v1" and later (dominated)
-// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
-// v2 > v1 we can use v2 in the 1st check and again remove the second.
-// To do so we keep a dictionary of all checks where the key if the pair
-// "exp, length".
-// The class BoundsCheckKey represents this key.
-class BoundsCheckKey : public ZoneObject {
- public:
- HValue* IndexBase() const { return index_base_; }
- HValue* Length() const { return length_; }
-
- uint32_t Hash() {
- return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
- }
-
- static BoundsCheckKey* Create(Zone* zone,
- HBoundsCheck* check,
- int32_t* offset) {
- if (!check->index()->representation().IsInteger32()) return NULL;
-
- HValue* index_base = NULL;
- HConstant* constant = NULL;
- bool is_sub = false;
-
- if (check->index()->IsAdd()) {
- HAdd* index = HAdd::cast(check->index());
- if (index->left()->IsConstant()) {
- constant = HConstant::cast(index->left());
- index_base = index->right();
- } else if (index->right()->IsConstant()) {
- constant = HConstant::cast(index->right());
- index_base = index->left();
- }
- } else if (check->index()->IsSub()) {
- HSub* index = HSub::cast(check->index());
- is_sub = true;
- if (index->left()->IsConstant()) {
- constant = HConstant::cast(index->left());
- index_base = index->right();
- } else if (index->right()->IsConstant()) {
- constant = HConstant::cast(index->right());
- index_base = index->left();
- }
- }
-
- if (constant != NULL && constant->HasInteger32Value()) {
- *offset = is_sub ? - constant->Integer32Value()
- : constant->Integer32Value();
- } else {
- *offset = 0;
- index_base = check->index();
- }
-
- return new(zone) BoundsCheckKey(index_base, check->length());
- }
-
- private:
- BoundsCheckKey(HValue* index_base, HValue* length)
- : index_base_(index_base),
- length_(length) { }
-
- HValue* index_base_;
- HValue* length_;
-};
-
-
-// Data about each HBoundsCheck that can be eliminated or moved.
-// It is the "value" in the dictionary indexed by "base-index, length"
-// (the key is BoundsCheckKey).
-// We scan the code with a dominator tree traversal.
-// Traversing the dominator tree we keep a stack (implemented as a singly
-// linked list) of "data" for each basic block that contains a relevant check
-// with the same key (the dictionary holds the head of the list).
-// We also keep all the "data" created for a given basic block in a list, and
-// use it to "clean up" the dictionary when backtracking in the dominator tree
-// traversal.
-// Doing this each dictionary entry always directly points to the check that
-// is dominating the code being examined now.
-// We also track the current "offset" of the index expression and use it to
-// decide if any check is already "covered" (so it can be removed) or not.
-class BoundsCheckBbData: public ZoneObject {
- public:
- BoundsCheckKey* Key() const { return key_; }
- int32_t LowerOffset() const { return lower_offset_; }
- int32_t UpperOffset() const { return upper_offset_; }
- HBasicBlock* BasicBlock() const { return basic_block_; }
- HBoundsCheck* LowerCheck() const { return lower_check_; }
- HBoundsCheck* UpperCheck() const { return upper_check_; }
- BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
- BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
-
- bool OffsetIsCovered(int32_t offset) const {
- return offset >= LowerOffset() && offset <= UpperOffset();
- }
-
- bool HasSingleCheck() { return lower_check_ == upper_check_; }
-
- // The goal of this method is to modify either upper_offset_ or
- // lower_offset_ so that also new_offset is covered (the covered
- // range grows).
- //
- // The precondition is that new_check follows UpperCheck() and
- // LowerCheck() in the same basic block, and that new_offset is not
- // covered (otherwise we could simply remove new_check).
- //
- // If HasSingleCheck() is true then new_check is added as "second check"
- // (either upper or lower; note that HasSingleCheck() becomes false).
- // Otherwise one of the current checks is modified so that it also covers
- // new_offset, and new_check is removed.
- //
- // If the check cannot be modified because the context is unknown it
- // returns false, otherwise it returns true.
- bool CoverCheck(HBoundsCheck* new_check,
- int32_t new_offset) {
- ASSERT(new_check->index()->representation().IsInteger32());
- bool keep_new_check = false;
-
- if (new_offset > upper_offset_) {
- upper_offset_ = new_offset;
- if (HasSingleCheck()) {
- keep_new_check = true;
- upper_check_ = new_check;
- } else {
- bool result = BuildOffsetAdd(upper_check_,
- &added_upper_index_,
- &added_upper_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
- upper_check_->ReplaceAllUsesWith(upper_check_->index());
- upper_check_->SetOperandAt(0, added_upper_index_);
- }
- } else if (new_offset < lower_offset_) {
- lower_offset_ = new_offset;
- if (HasSingleCheck()) {
- keep_new_check = true;
- lower_check_ = new_check;
- } else {
- bool result = BuildOffsetAdd(lower_check_,
- &added_lower_index_,
- &added_lower_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
- lower_check_->ReplaceAllUsesWith(lower_check_->index());
- lower_check_->SetOperandAt(0, added_lower_index_);
- }
- } else {
- ASSERT(false);
- }
-
- if (!keep_new_check) {
- new_check->DeleteAndReplaceWith(new_check->ActualValue());
- }
-
- return true;
- }
-
- void RemoveZeroOperations() {
- RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
- RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
- }
-
- BoundsCheckBbData(BoundsCheckKey* key,
- int32_t lower_offset,
- int32_t upper_offset,
- HBasicBlock* bb,
- HBoundsCheck* lower_check,
- HBoundsCheck* upper_check,
- BoundsCheckBbData* next_in_bb,
- BoundsCheckBbData* father_in_dt)
- : key_(key),
- lower_offset_(lower_offset),
- upper_offset_(upper_offset),
- basic_block_(bb),
- lower_check_(lower_check),
- upper_check_(upper_check),
- added_lower_index_(NULL),
- added_lower_offset_(NULL),
- added_upper_index_(NULL),
- added_upper_offset_(NULL),
- next_in_bb_(next_in_bb),
- father_in_dt_(father_in_dt) { }
-
- private:
- BoundsCheckKey* key_;
- int32_t lower_offset_;
- int32_t upper_offset_;
- HBasicBlock* basic_block_;
- HBoundsCheck* lower_check_;
- HBoundsCheck* upper_check_;
- HInstruction* added_lower_index_;
- HConstant* added_lower_offset_;
- HInstruction* added_upper_index_;
- HConstant* added_upper_offset_;
- BoundsCheckBbData* next_in_bb_;
- BoundsCheckBbData* father_in_dt_;
-
- // Given an existing add instruction and a bounds check it tries to
- // find the current context (either of the add or of the check index).
- HValue* IndexContext(HInstruction* add, HBoundsCheck* check) {
- if (add != NULL && add->IsAdd()) {
- return HAdd::cast(add)->context();
- }
- if (check->index()->IsBinaryOperation()) {
- return HBinaryOperation::cast(check->index())->context();
- }
- return NULL;
- }
-
- // This function returns false if it cannot build the add because the
- // current context cannot be determined.
- bool BuildOffsetAdd(HBoundsCheck* check,
- HInstruction** add,
- HConstant** constant,
- HValue* original_value,
- Representation representation,
- int32_t new_offset) {
- HValue* index_context = IndexContext(*add, check);
- if (index_context == NULL) return false;
-
- HConstant* new_constant = new(BasicBlock()->zone())
- HConstant(new_offset, Representation::Integer32());
- if (*add == NULL) {
- new_constant->InsertBefore(check);
- (*add) = HAdd::New(
- BasicBlock()->zone(), index_context, original_value, new_constant);
- (*add)->AssumeRepresentation(representation);
- (*add)->InsertBefore(check);
- } else {
- new_constant->InsertBefore(*add);
- (*constant)->DeleteAndReplaceWith(new_constant);
- }
- *constant = new_constant;
- return true;
- }
-
- void RemoveZeroAdd(HInstruction** add, HConstant** constant) {
- if (*add != NULL && (*add)->IsAdd() && (*constant)->Integer32Value() == 0) {
- (*add)->DeleteAndReplaceWith(HAdd::cast(*add)->left());
- (*constant)->DeleteAndReplaceWith(NULL);
- }
- }
-};
-
-
-static bool BoundsCheckKeyMatch(void* key1, void* key2) {
- BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
- BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
- return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
-}
-
-
-class BoundsCheckTable : private ZoneHashMap {
- public:
- BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone) {
- return reinterpret_cast<BoundsCheckBbData**>(
- &(Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value));
- }
-
- void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone) {
- Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value = data;
- }
-
- void Delete(BoundsCheckKey* key) {
- Remove(key, key->Hash());
- }
-
- explicit BoundsCheckTable(Zone* zone)
- : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)) { }
-};
-
-
-// Eliminates checks in bb and recursively in the dominated blocks.
-// Also replace the results of check instructions with the original value, if
-// the result is used. This is safe now, since we don't do code motion after
-// this point. It enables better register allocation since the value produced
-// by check instructions is really a copy of the original value.
-void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
- BoundsCheckTable* table) {
- BoundsCheckBbData* bb_data_list = NULL;
-
- for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
- if (!i->IsBoundsCheck()) continue;
-
- HBoundsCheck* check = HBoundsCheck::cast(i);
- int32_t offset;
- BoundsCheckKey* key =
- BoundsCheckKey::Create(zone(), check, &offset);
- if (key == NULL) continue;
- BoundsCheckBbData** data_p = table->LookupOrInsert(key, zone());
- BoundsCheckBbData* data = *data_p;
- if (data == NULL) {
- bb_data_list = new(zone()) BoundsCheckBbData(key,
- offset,
- offset,
- bb,
- check,
- check,
- bb_data_list,
- NULL);
- *data_p = bb_data_list;
- } else if (data->OffsetIsCovered(offset)) {
- check->DeleteAndReplaceWith(check->ActualValue());
- } else if (data->BasicBlock() != bb ||
- !data->CoverCheck(check, offset)) {
- // If the check is in the current BB we try to modify it by calling
- // "CoverCheck", but if also that fails we record the current offsets
- // in a new data instance because from now on they are covered.
- int32_t new_lower_offset = offset < data->LowerOffset()
- ? offset
- : data->LowerOffset();
- int32_t new_upper_offset = offset > data->UpperOffset()
- ? offset
- : data->UpperOffset();
- bb_data_list = new(zone()) BoundsCheckBbData(key,
- new_lower_offset,
- new_upper_offset,
- bb,
- data->LowerCheck(),
- data->UpperCheck(),
- bb_data_list,
- data);
- table->Insert(key, bb_data_list, zone());
- }
- }
-
- for (int i = 0; i < bb->dominated_blocks()->length(); ++i) {
- EliminateRedundantBoundsChecks(bb->dominated_blocks()->at(i), table);
- }
-
- for (BoundsCheckBbData* data = bb_data_list;
- data != NULL;
- data = data->NextInBasicBlock()) {
- data->RemoveZeroOperations();
- if (data->FatherInDominatorTree()) {
- table->Insert(data->Key(), data->FatherInDominatorTree(), zone());
- } else {
- table->Delete(data->Key());
- }
- }
-}
-
-
-void HGraph::EliminateRedundantBoundsChecks() {
- HPhase phase("H_Eliminate bounds checks", this);
- BoundsCheckTable checks_table(zone());
- EliminateRedundantBoundsChecks(entry_block(), &checks_table);
-}
-
-
-static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
- HValue* index = array_operation->GetKey()->ActualValue();
- if (!index->representation().IsInteger32()) return;
-
- HConstant* constant;
- HValue* subexpression;
- int32_t sign;
- if (index->IsAdd()) {
- sign = 1;
- HAdd* add = HAdd::cast(index);
- if (add->left()->IsConstant()) {
- subexpression = add->right();
- constant = HConstant::cast(add->left());
- } else if (add->right()->IsConstant()) {
- subexpression = add->left();
- constant = HConstant::cast(add->right());
- } else {
- return;
- }
- } else if (index->IsSub()) {
- sign = -1;
- HSub* sub = HSub::cast(index);
- if (sub->left()->IsConstant()) {
- subexpression = sub->right();
- constant = HConstant::cast(sub->left());
- } else if (sub->right()->IsConstant()) {
- subexpression = sub->left();
- constant = HConstant::cast(sub->right());
- } return;
- } else {
- return;
- }
-
- if (!constant->HasInteger32Value()) return;
- int32_t value = constant->Integer32Value() * sign;
- // We limit offset values to 30 bits because we want to avoid the risk of
- // overflows when the offset is added to the object header size.
- if (value >= 1 << 30 || value < 0) return;
- array_operation->SetKey(subexpression);
- if (index->HasNoUses()) {
- index->DeleteAndReplaceWith(NULL);
- }
- ASSERT(value >= 0);
- array_operation->SetIndexOffset(static_cast<uint32_t>(value));
- array_operation->SetDehoisted(true);
-}
-
-
-void HGraph::DehoistSimpleArrayIndexComputations() {
- HPhase phase("H_Dehoist index computations", this);
- for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstruction* instr = blocks()->at(i)->first();
- instr != NULL;
- instr = instr->next()) {
- ArrayInstructionInterface* array_instruction = NULL;
- if (instr->IsLoadKeyed()) {
- HLoadKeyed* op = HLoadKeyed::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyed()) {
- HStoreKeyed* op = HStoreKeyed::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else {
- continue;
- }
- DehoistArrayIndex(array_instruction);
- }
- }
-}
-
-
-void HGraph::DeadCodeElimination() {
- HPhase phase("H_Dead code elimination", this);
- ZoneList<HInstruction*> worklist(blocks_.length(), zone());
- for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstruction* instr = blocks()->at(i)->first();
- instr != NULL;
- instr = instr->next()) {
- if (instr->IsDead()) worklist.Add(instr, zone());
- }
- }
-
- while (!worklist.is_empty()) {
- HInstruction* instr = worklist.RemoveLast();
- if (FLAG_trace_dead_code_elimination) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- instr->PrintNameTo(&stream);
- stream.Add(" = ");
- instr->PrintTo(&stream);
- PrintF("[removing dead instruction %s]\n", *stream.ToCString());
- }
- instr->DeleteAndReplaceWith(NULL);
- for (int i = 0; i < instr->OperandCount(); ++i) {
- HValue* operand = instr->OperandAt(i);
- if (operand->IsDead()) worklist.Add(HInstruction::cast(operand), zone());
- }
- }
-}
-
-
-void HGraph::RestoreActualValues() {
- HPhase phase("H_Restore actual values", this);
-
- for (int block_index = 0; block_index < blocks()->length(); block_index++) {
- HBasicBlock* block = blocks()->at(block_index);
-
-#ifdef DEBUG
- for (int i = 0; i < block->phis()->length(); i++) {
- HPhi* phi = block->phis()->at(i);
- ASSERT(phi->ActualValue() == phi);
- }
-#endif
-
- for (HInstruction* instruction = block->first();
- instruction != NULL;
- instruction = instruction->next()) {
- if (instruction->ActualValue() != instruction) {
- ASSERT(instruction->IsInformativeDefinition());
- if (instruction->IsPurelyInformativeDefinition()) {
- instruction->DeleteAndReplaceWith(instruction->RedefinedOperand());
- } else {
- instruction->ReplaceAllUsesWith(instruction->ActualValue());
- }
- }
- }
- }
-}
-
-
-void HOptimizedGraphBuilder::AddPhi(HPhi* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddPhi(instr);
-}
-
-
-void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
- Push(instr);
- AddInstruction(instr);
-}
-
-
-void HOptimizedGraphBuilder::AddSoftDeoptimize() {
- if (FLAG_always_opt) return;
- if (current_block()->IsDeoptimizing()) return;
- AddInstruction(new(zone()) HSoftDeoptimize());
- current_block()->MarkAsDeoptimizing();
- graph()->set_has_soft_deoptimize(true);
-}
-
-
-template <class Instruction>
-HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
- int count = call->argument_count();
- ZoneList<HValue*> arguments(count, zone());
- for (int i = 0; i < count; ++i) {
- arguments.Add(Pop(), zone());
- }
-
- while (!arguments.is_empty()) {
- AddInstruction(new(zone()) HPushArgument(arguments.RemoveLast()));
- }
- return call;
-}
-
-
-void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
- HConstant* undefined_constant = new(zone()) HConstant(
- isolate()->factory()->undefined_value(), Representation::Tagged());
- AddInstruction(undefined_constant);
- graph()->set_undefined_constant(undefined_constant);
-
- HArgumentsObject* object = new(zone()) HArgumentsObject;
- AddInstruction(object);
- graph()->SetArgumentsObject(object);
-
- // Set the initial values of parameters including "this". "This" has
- // parameter index 0.
- ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
-
- for (int i = 0; i < environment()->parameter_count(); ++i) {
- HInstruction* parameter = AddInstruction(new(zone()) HParameter(i));
- environment()->Bind(i, parameter);
- }
-
- // First special is HContext.
- HInstruction* context = AddInstruction(new(zone()) HContext);
- environment()->BindContext(context);
-
- // Initialize specials and locals to undefined.
- for (int i = environment()->parameter_count() + 1;
- i < environment()->length();
- ++i) {
- environment()->Bind(i, undefined_constant);
- }
-
- // Handle the arguments and arguments shadow variables specially (they do
- // not have declarations).
- if (scope->arguments() != NULL) {
- if (!scope->arguments()->IsStackAllocated()) {
- return Bailout("context-allocated arguments");
- }
-
- environment()->Bind(scope->arguments(),
- graph()->GetArgumentsObject());
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- CHECK_ALIVE(Visit(statements->at(i)));
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- if (stmt->scope() != NULL) {
- return Bailout("ScopedBlock");
- }
- BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(VisitStatements(stmt->statements()));
- }
- HBasicBlock* break_block = break_info.break_block();
- if (break_block != NULL) {
- if (current_block() != NULL) current_block()->Goto(break_block);
- break_block->SetJoinId(stmt->ExitId());
- set_current_block(break_block);
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- VisitForEffect(stmt->expression());
-}
-
-
-void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
-}
-
-
-void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- if (stmt->condition()->ToBooleanIsTrue()) {
- AddSimulate(stmt->ThenId());
- Visit(stmt->then_statement());
- } else if (stmt->condition()->ToBooleanIsFalse()) {
- AddSimulate(stmt->ElseId());
- Visit(stmt->else_statement());
- } else {
- HBasicBlock* cond_true = graph()->CreateBasicBlock();
- HBasicBlock* cond_false = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
-
- if (cond_true->HasPredecessor()) {
- cond_true->SetJoinId(stmt->ThenId());
- set_current_block(cond_true);
- CHECK_BAILOUT(Visit(stmt->then_statement()));
- cond_true = current_block();
- } else {
- cond_true = NULL;
- }
-
- if (cond_false->HasPredecessor()) {
- cond_false->SetJoinId(stmt->ElseId());
- set_current_block(cond_false);
- CHECK_BAILOUT(Visit(stmt->else_statement()));
- cond_false = current_block();
- } else {
- cond_false = NULL;
- }
-
- HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
- set_current_block(join);
- }
-}
-
-
-HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
- BreakableStatement* stmt,
- BreakType type,
- int* drop_extra) {
- *drop_extra = 0;
- BreakAndContinueScope* current = this;
- while (current != NULL && current->info()->target() != stmt) {
- *drop_extra += current->info()->drop_extra();
- current = current->next();
- }
- ASSERT(current != NULL); // Always found (unless stack is malformed).
-
- if (type == BREAK) {
- *drop_extra += current->info()->drop_extra();
- }
-
- HBasicBlock* block = NULL;
- switch (type) {
- case BREAK:
- block = current->info()->break_block();
- if (block == NULL) {
- block = current->owner()->graph()->CreateBasicBlock();
- current->info()->set_break_block(block);
- }
- break;
-
- case CONTINUE:
- block = current->info()->continue_block();
- if (block == NULL) {
- block = current->owner()->graph()->CreateBasicBlock();
- current->info()->set_continue_block(block);
- }
- break;
- }
-
- return block;
-}
-
-
-void HOptimizedGraphBuilder::VisitContinueStatement(
- ContinueStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- int drop_extra = 0;
- HBasicBlock* continue_block = break_scope()->Get(stmt->target(),
- CONTINUE,
- &drop_extra);
- Drop(drop_extra);
- current_block()->Goto(continue_block);
- set_current_block(NULL);
-}
-
-
-void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- int drop_extra = 0;
- HBasicBlock* break_block = break_scope()->Get(stmt->target(),
- BREAK,
- &drop_extra);
- Drop(drop_extra);
- current_block()->Goto(break_block);
- set_current_block(NULL);
-}
-
-
-void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- FunctionState* state = function_state();
- AstContext* context = call_context();
- if (context == NULL) {
- // Not an inlined return, so an actual one.
- CHECK_ALIVE(VisitForValue(stmt->expression()));
- HValue* result = environment()->Pop();
- current_block()->FinishExit(new(zone()) HReturn(
- result,
- environment()->LookupContext()));
- } else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
- // Return from an inlined construct call. In a test context the return value
- // will always evaluate to true, in a value context the return value needs
- // to be a JSObject.
- if (context->IsTest()) {
- TestContext* test = TestContext::cast(context);
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(test->if_true(), state);
- } else if (context->IsEffect()) {
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), state);
- } else {
- ASSERT(context->IsValue());
- CHECK_ALIVE(VisitForValue(stmt->expression()));
- HValue* return_value = Pop();
- HValue* receiver = environment()->arguments_environment()->Lookup(0);
- HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(return_value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
- HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
- HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
- typecheck->SetSuccessorAt(0, if_spec_object);
- typecheck->SetSuccessorAt(1, not_spec_object);
- current_block()->Finish(typecheck);
- if_spec_object->AddLeaveInlined(return_value, state);
- if (!FLAG_harmony_symbols) {
- not_spec_object->AddLeaveInlined(receiver, state);
- } else {
- HHasInstanceTypeAndBranch* symbolcheck =
- new(zone()) HHasInstanceTypeAndBranch(return_value, SYMBOL_TYPE);
- HBasicBlock* is_symbol = graph()->CreateBasicBlock();
- HBasicBlock* not_symbol = graph()->CreateBasicBlock();
- symbolcheck->SetSuccessorAt(0, is_symbol);
- symbolcheck->SetSuccessorAt(1, not_symbol);
- not_spec_object->Finish(symbolcheck);
- is_symbol->AddLeaveInlined(return_value, state);
- not_symbol->AddLeaveInlined(receiver, state);
- }
- }
- } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
- // Return from an inlined setter call. The returned value is never used, the
- // value of an assignment is always the value of the RHS of the assignment.
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
- if (context->IsTest()) {
- HValue* rhs = environment()->arguments_environment()->Lookup(1);
- context->ReturnValue(rhs);
- } else if (context->IsEffect()) {
- current_block()->Goto(function_return(), state);
- } else {
- ASSERT(context->IsValue());
- HValue* rhs = environment()->arguments_environment()->Lookup(1);
- current_block()->AddLeaveInlined(rhs, state);
- }
- } else {
- // Return from a normal inlined function. Visit the subexpression in the
- // expression context of the call.
- if (context->IsTest()) {
- TestContext* test = TestContext::cast(context);
- VisitForControl(stmt->expression(), test->if_true(), test->if_false());
- } else if (context->IsEffect()) {
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), state);
- } else {
- ASSERT(context->IsValue());
- CHECK_ALIVE(VisitForValue(stmt->expression()));
- current_block()->AddLeaveInlined(Pop(), state);
- }
- }
- set_current_block(NULL);
-}
-
-
-void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- return Bailout("WithStatement");
-}
-
-
-void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- // We only optimize switch statements with smi-literal smi comparisons,
- // with a bounded number of clauses.
- const int kCaseClauseLimit = 128;
- ZoneList<CaseClause*>* clauses = stmt->cases();
- int clause_count = clauses->length();
- if (clause_count > kCaseClauseLimit) {
- return Bailout("SwitchStatement: too many clauses");
- }
-
- HValue* context = environment()->LookupContext();
-
- CHECK_ALIVE(VisitForValue(stmt->tag()));
- AddSimulate(stmt->EntryId());
- HValue* tag_value = Pop();
- HBasicBlock* first_test_block = current_block();
-
- SwitchType switch_type = UNKNOWN_SWITCH;
-
- // 1. Extract clause type
- for (int i = 0; i < clause_count; ++i) {
- CaseClause* clause = clauses->at(i);
- if (clause->is_default()) continue;
-
- if (switch_type == UNKNOWN_SWITCH) {
- if (clause->label()->IsSmiLiteral()) {
- switch_type = SMI_SWITCH;
- } else if (clause->label()->IsStringLiteral()) {
- switch_type = STRING_SWITCH;
- } else {
- return Bailout("SwitchStatement: non-literal switch label");
- }
- } else if ((switch_type == STRING_SWITCH &&
- !clause->label()->IsStringLiteral()) ||
- (switch_type == SMI_SWITCH &&
- !clause->label()->IsSmiLiteral())) {
- return Bailout("SwitchStatement: mixed label types are not supported");
- }
- }
-
- HUnaryControlInstruction* string_check = NULL;
- HBasicBlock* not_string_block = NULL;
-
- // Test switch's tag value if all clauses are string literals
- if (switch_type == STRING_SWITCH) {
- string_check = new(zone()) HIsStringAndBranch(tag_value);
- first_test_block = graph()->CreateBasicBlock();
- not_string_block = graph()->CreateBasicBlock();
-
- string_check->SetSuccessorAt(0, first_test_block);
- string_check->SetSuccessorAt(1, not_string_block);
- current_block()->Finish(string_check);
-
- set_current_block(first_test_block);
- }
-
- // 2. Build all the tests, with dangling true branches
- BailoutId default_id = BailoutId::None();
- for (int i = 0; i < clause_count; ++i) {
- CaseClause* clause = clauses->at(i);
- if (clause->is_default()) {
- default_id = clause->EntryId();
- continue;
- }
- if (switch_type == SMI_SWITCH) {
- clause->RecordTypeFeedback(oracle());
- }
-
- // Generate a compare and branch.
- CHECK_ALIVE(VisitForValue(clause->label()));
- HValue* label_value = Pop();
-
- HBasicBlock* next_test_block = graph()->CreateBasicBlock();
- HBasicBlock* body_block = graph()->CreateBasicBlock();
-
- HControlInstruction* compare;
-
- if (switch_type == SMI_SWITCH) {
- if (!clause->IsSmiCompare()) {
- // Finish with deoptimize and add uses of enviroment values to
- // account for invisible uses.
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
- set_current_block(NULL);
- break;
- }
-
- HCompareIDAndBranch* compare_ =
- new(zone()) HCompareIDAndBranch(tag_value,
- label_value,
- Token::EQ_STRICT);
- compare_->set_observed_input_representation(
- Representation::Integer32(), Representation::Integer32());
- compare = compare_;
- } else {
- compare = new(zone()) HStringCompareAndBranch(context, tag_value,
- label_value,
- Token::EQ_STRICT);
- }
-
- compare->SetSuccessorAt(0, body_block);
- compare->SetSuccessorAt(1, next_test_block);
- current_block()->Finish(compare);
-
- set_current_block(next_test_block);
- }
-
- // Save the current block to use for the default or to join with the
- // exit. This block is NULL if we deoptimized.
- HBasicBlock* last_block = current_block();
-
- if (not_string_block != NULL) {
- BailoutId join_id = !default_id.IsNone() ? default_id : stmt->ExitId();
- last_block = CreateJoin(last_block, not_string_block, join_id);
- }
-
- // 3. Loop over the clauses and the linked list of tests in lockstep,
- // translating the clause bodies.
- HBasicBlock* curr_test_block = first_test_block;
- HBasicBlock* fall_through_block = NULL;
-
- BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- for (int i = 0; i < clause_count; ++i) {
- CaseClause* clause = clauses->at(i);
-
- // Identify the block where normal (non-fall-through) control flow
- // goes to.
- HBasicBlock* normal_block = NULL;
- if (clause->is_default()) {
- if (last_block != NULL) {
- normal_block = last_block;
- last_block = NULL; // Cleared to indicate we've handled it.
- }
- } else if (!curr_test_block->end()->IsDeoptimize()) {
- normal_block = curr_test_block->end()->FirstSuccessor();
- curr_test_block = curr_test_block->end()->SecondSuccessor();
- }
-
- // Identify a block to emit the body into.
- if (normal_block == NULL) {
- if (fall_through_block == NULL) {
- // (a) Unreachable.
- if (clause->is_default()) {
- continue; // Might still be reachable clause bodies.
- } else {
- break;
- }
- } else {
- // (b) Reachable only as fall through.
- set_current_block(fall_through_block);
- }
- } else if (fall_through_block == NULL) {
- // (c) Reachable only normally.
- set_current_block(normal_block);
- } else {
- // (d) Reachable both ways.
- HBasicBlock* join = CreateJoin(fall_through_block,
- normal_block,
- clause->EntryId());
- set_current_block(join);
- }
-
- CHECK_BAILOUT(VisitStatements(clause->statements()));
- fall_through_block = current_block();
- }
- }
-
- // Create an up-to-3-way join. Use the break block if it exists since
- // it's already a join block.
- HBasicBlock* break_block = break_info.break_block();
- if (break_block == NULL) {
- set_current_block(CreateJoin(fall_through_block,
- last_block,
- stmt->ExitId()));
- } else {
- if (fall_through_block != NULL) fall_through_block->Goto(break_block);
- if (last_block != NULL) last_block->Goto(break_block);
- break_block->SetJoinId(stmt->ExitId());
- set_current_block(break_block);
- }
-}
-
-
-bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
- return statement->OsrEntryId() == info()->osr_ast_id();
-}
-
-
-bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
- if (!HasOsrEntryAt(statement)) return false;
-
- HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
- HBasicBlock* osr_entry = graph()->CreateBasicBlock();
- HValue* true_value = graph()->GetConstantTrue();
- HBranch* test = new(zone()) HBranch(true_value, non_osr_entry, osr_entry);
- current_block()->Finish(test);
-
- HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
- non_osr_entry->Goto(loop_predecessor);
-
- set_current_block(osr_entry);
- osr_entry->set_osr_entry();
- BailoutId osr_entry_id = statement->OsrEntryId();
- int first_expression_index = environment()->first_expression_index();
- int length = environment()->length();
- ZoneList<HUnknownOSRValue*>* osr_values =
- new(zone()) ZoneList<HUnknownOSRValue*>(length, zone());
-
- for (int i = 0; i < first_expression_index; ++i) {
- HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
- AddInstruction(osr_value);
- environment()->Bind(i, osr_value);
- osr_values->Add(osr_value, zone());
- }
-
- if (first_expression_index != length) {
- environment()->Drop(length - first_expression_index);
- for (int i = first_expression_index; i < length; ++i) {
- HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
- AddInstruction(osr_value);
- environment()->Push(osr_value);
- osr_values->Add(osr_value, zone());
- }
- }
-
- graph()->set_osr_values(osr_values);
-
- AddSimulate(osr_entry_id);
- AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
- HContext* context = new(zone()) HContext;
- AddInstruction(context);
- environment()->BindContext(context);
- current_block()->Goto(loop_predecessor);
- loop_predecessor->SetJoinId(statement->EntryId());
- set_current_block(loop_predecessor);
- return true;
-}
-
-
-void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
- HBasicBlock* loop_entry,
- BreakAndContinueInfo* break_info) {
- BreakAndContinueScope push(break_info, this);
- AddSimulate(stmt->StackCheckId());
- HValue* context = environment()->LookupContext();
- HStackCheck* stack_check =
- new(zone()) HStackCheck(context, HStackCheck::kBackwardsBranch);
- AddInstruction(stack_check);
- ASSERT(loop_entry->IsLoopHeader());
- loop_entry->loop_information()->set_stack_check(stack_check);
- CHECK_BAILOUT(Visit(stmt->body()));
-}
-
-
-void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- ASSERT(current_block() != NULL);
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
-
- BreakAndContinueInfo break_info(stmt);
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
- HBasicBlock* loop_successor = NULL;
- if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
- set_current_block(body_exit);
- // The block for a true condition, the actual predecessor block of the
- // back edge.
- body_exit = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
- if (body_exit->HasPredecessor()) {
- body_exit->SetJoinId(stmt->BackEdgeId());
- } else {
- body_exit = NULL;
- }
- if (loop_successor->HasPredecessor()) {
- loop_successor->SetJoinId(stmt->ExitId());
- } else {
- loop_successor = NULL;
- }
- }
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
- set_current_block(loop_exit);
-}
-
-
-void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- ASSERT(current_block() != NULL);
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
-
-
- // If the condition is constant true, do not generate a branch.
- HBasicBlock* loop_successor = NULL;
- if (!stmt->cond()->ToBooleanIsTrue()) {
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
- if (body_entry->HasPredecessor()) {
- body_entry->SetJoinId(stmt->BodyId());
- set_current_block(body_entry);
- }
- if (loop_successor->HasPredecessor()) {
- loop_successor->SetJoinId(stmt->ExitId());
- } else {
- loop_successor = NULL;
- }
- }
-
- BreakAndContinueInfo break_info(stmt);
- if (current_block() != NULL) {
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
- }
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
- set_current_block(loop_exit);
-}
-
-
-void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- if (stmt->init() != NULL) {
- CHECK_ALIVE(Visit(stmt->init()));
- }
- ASSERT(current_block() != NULL);
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
-
- HBasicBlock* loop_successor = NULL;
- if (stmt->cond() != NULL) {
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
- if (body_entry->HasPredecessor()) {
- body_entry->SetJoinId(stmt->BodyId());
- set_current_block(body_entry);
- }
- if (loop_successor->HasPredecessor()) {
- loop_successor->SetJoinId(stmt->ExitId());
- } else {
- loop_successor = NULL;
- }
- }
-
- BreakAndContinueInfo break_info(stmt);
- if (current_block() != NULL) {
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
- }
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
-
- if (stmt->next() != NULL && body_exit != NULL) {
- set_current_block(body_exit);
- CHECK_BAILOUT(Visit(stmt->next()));
- body_exit = current_block();
- }
-
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
- set_current_block(loop_exit);
-}
-
-
-void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
-
- if (!FLAG_optimize_for_in) {
- return Bailout("ForInStatement optimization is disabled");
- }
-
- if (!oracle()->IsForInFastCase(stmt)) {
- return Bailout("ForInStatement is not fast case");
- }
-
- if (!stmt->each()->IsVariableProxy() ||
- !stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
- return Bailout("ForInStatement with non-local each variable");
- }
-
- Variable* each_var = stmt->each()->AsVariableProxy()->var();
-
- CHECK_ALIVE(VisitForValue(stmt->enumerable()));
- HValue* enumerable = Top(); // Leave enumerable at the top.
-
- HInstruction* map = AddInstruction(new(zone()) HForInPrepareMap(
- environment()->LookupContext(), enumerable));
- AddSimulate(stmt->PrepareId());
-
- HInstruction* array = AddInstruction(
- new(zone()) HForInCacheArray(
- enumerable,
- map,
- DescriptorArray::kEnumCacheBridgeCacheIndex));
-
- HInstruction* enum_length = AddInstruction(new(zone()) HMapEnumLength(map));
-
- HInstruction* start_index = AddInstruction(new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(0), isolate()), Representation::Integer32()));
-
- Push(map);
- Push(array);
- Push(enum_length);
- Push(start_index);
-
- HInstruction* index_cache = AddInstruction(
- new(zone()) HForInCacheArray(
- enumerable,
- map,
- DescriptorArray::kEnumCacheBridgeIndicesCacheIndex));
- HForInCacheArray::cast(array)->set_index_cache(
- HForInCacheArray::cast(index_cache));
-
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
-
- HValue* index = environment()->ExpressionStackAt(0);
- HValue* limit = environment()->ExpressionStackAt(1);
-
- // Check that we still have more keys.
- HCompareIDAndBranch* compare_index =
- new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
- compare_index->set_observed_input_representation(
- Representation::Integer32(), Representation::Integer32());
-
- HBasicBlock* loop_body = graph()->CreateBasicBlock();
- HBasicBlock* loop_successor = graph()->CreateBasicBlock();
-
- compare_index->SetSuccessorAt(0, loop_body);
- compare_index->SetSuccessorAt(1, loop_successor);
- current_block()->Finish(compare_index);
-
- set_current_block(loop_successor);
- Drop(5);
-
- set_current_block(loop_body);
-
- HValue* key = AddInstruction(
- new(zone()) HLoadKeyed(
- environment()->ExpressionStackAt(2), // Enum cache.
- environment()->ExpressionStackAt(0), // Iteration index.
- environment()->ExpressionStackAt(0),
- FAST_ELEMENTS));
-
- // Check if the expected map still matches that of the enumerable.
- // If not just deoptimize.
- AddInstruction(new(zone()) HCheckMapValue(
- environment()->ExpressionStackAt(4),
- environment()->ExpressionStackAt(3)));
-
- Bind(each_var, key);
-
- BreakAndContinueInfo break_info(stmt, 5);
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
-
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
-
- if (body_exit != NULL) {
- set_current_block(body_exit);
-
- HValue* current_index = Pop();
- HInstruction* new_index = HAdd::New(zone(),
- environment()->LookupContext(),
- current_index,
- graph()->GetConstant1());
- new_index->AssumeRepresentation(Representation::Integer32());
- PushAndAdd(new_index);
- body_exit = current_block();
- }
-
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
-
- set_current_block(loop_exit);
-}
-
-
-void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- return Bailout("TryCatchStatement");
-}
-
-
-void HOptimizedGraphBuilder::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- return Bailout("TryFinallyStatement");
-}
-
-
-void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- return Bailout("DebuggerStatement");
-}
-
-
-static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
- Code* unoptimized_code, FunctionLiteral* expr) {
- int start_position = expr->start_position();
- RelocIterator it(unoptimized_code);
- for (;!it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
- Object* obj = rinfo->target_object();
- if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (shared->start_position() == start_position) {
- return Handle<SharedFunctionInfo>(shared);
- }
- }
- }
-
- return Handle<SharedFunctionInfo>();
-}
-
-
-void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- Handle<SharedFunctionInfo> shared_info =
- SearchSharedFunctionInfo(info()->shared_info()->code(),
- expr);
- if (shared_info.is_null()) {
- shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
- }
- // We also have a stack overflow if the recursive compilation did.
- if (HasStackOverflow()) return;
- HValue* context = environment()->LookupContext();
- HFunctionLiteral* instr =
- new(zone()) HFunctionLiteral(context, shared_info, expr->pretenure());
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- return Bailout("SharedFunctionInfoLiteral");
-}
-
-
-void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- HBasicBlock* cond_true = graph()->CreateBasicBlock();
- HBasicBlock* cond_false = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false));
-
- // Visit the true and false subexpressions in the same AST context as the
- // whole expression.
- if (cond_true->HasPredecessor()) {
- cond_true->SetJoinId(expr->ThenId());
- set_current_block(cond_true);
- CHECK_BAILOUT(Visit(expr->then_expression()));
- cond_true = current_block();
- } else {
- cond_true = NULL;
- }
-
- if (cond_false->HasPredecessor()) {
- cond_false->SetJoinId(expr->ElseId());
- set_current_block(cond_false);
- CHECK_BAILOUT(Visit(expr->else_expression()));
- cond_false = current_block();
- } else {
- cond_false = NULL;
- }
-
- if (!ast_context()->IsTest()) {
- HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
- set_current_block(join);
- if (join != NULL && !ast_context()->IsEffect()) {
- return ast_context()->ReturnValue(Pop());
- }
- }
-}
-
-
-HOptimizedGraphBuilder::GlobalPropertyAccess
- HOptimizedGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
- if (var->is_this() || !info()->has_global_object()) {
- return kUseGeneric;
- }
- Handle<GlobalObject> global(info()->global_object());
- global->Lookup(*var->name(), lookup);
- if (!lookup->IsNormal() ||
- (is_store && lookup->IsReadOnly()) ||
- lookup->holder() != *global) {
- return kUseGeneric;
- }
-
- return kUseCell;
-}
-
-
-HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
- ASSERT(var->IsContextSlot());
- HValue* context = environment()->LookupContext();
- int length = info()->scope()->ContextChainLength(var->scope());
- while (length-- > 0) {
- HInstruction* context_instruction = new(zone()) HOuterContext(context);
- AddInstruction(context_instruction);
- context = context_instruction;
- }
- return context;
-}
-
-
-void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- Variable* variable = expr->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- if (IsLexicalVariableMode(variable->mode())) {
- // TODO(rossberg): should this be an ASSERT?
- return Bailout("reference to global lexical variable");
- }
- // Handle known global constants like 'undefined' specially to avoid a
- // load from a global cell for them.
- Handle<Object> constant_value =
- isolate()->factory()->GlobalConstantFor(variable->name());
- if (!constant_value.is_null()) {
- HConstant* instr =
- new(zone()) HConstant(constant_value, Representation::Tagged());
- return ast_context()->ReturnInstruction(instr, expr->id());
- }
-
- LookupResult lookup(isolate());
- GlobalPropertyAccess type =
- LookupGlobalProperty(variable, &lookup, false);
-
- if (type == kUseCell &&
- info()->global_object()->IsAccessCheckNeeded()) {
- type = kUseGeneric;
- }
-
- if (type == kUseCell) {
- Handle<GlobalObject> global(info()->global_object());
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HLoadGlobalCell* instr =
- new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
- return ast_context()->ReturnInstruction(instr, expr->id());
- } else {
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- if (variable->is_qml_global()) global_object->set_qml_global(true);
- AddInstruction(global_object);
- HLoadGlobalGeneric* instr =
- new(zone()) HLoadGlobalGeneric(context,
- global_object,
- variable->name(),
- ast_context()->is_for_typeof());
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
- }
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- HValue* value = environment()->Lookup(variable);
- if (value == graph()->GetConstantHole()) {
- ASSERT(IsDeclaredVariableMode(variable->mode()) &&
- variable->mode() != VAR);
- return Bailout("reference to uninitialized variable");
- }
- return ast_context()->ReturnValue(value);
- }
-
- case Variable::CONTEXT: {
- HValue* context = BuildContextChainWalk(variable);
- HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, variable);
- return ast_context()->ReturnInstruction(instr, expr->id());
- }
-
- case Variable::LOOKUP:
- return Bailout("reference to a variable which requires dynamic lookup");
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- HConstant* instr =
- new(zone()) HConstant(expr->handle(), Representation::None());
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- Handle<FixedArray> literals(closure->literals());
- HValue* context = environment()->LookupContext();
-
- HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context,
- literals,
- expr->pattern(),
- expr->flags(),
- expr->literal_index());
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-static void LookupInPrototypes(Handle<Map> map,
- Handle<String> name,
- LookupResult* lookup) {
- while (map->prototype()->IsJSObject()) {
- Handle<JSObject> holder(JSObject::cast(map->prototype()));
- if (!holder->HasFastProperties()) break;
- map = Handle<Map>(holder->map());
- map->LookupDescriptor(*holder, *name, lookup);
- if (lookup->IsFound()) return;
- }
- lookup->NotFound();
-}
-
-
-// Tries to find a JavaScript accessor of the given name in the prototype chain
-// starting at the given map. Return true iff there is one, including the
-// corresponding AccessorPair plus its holder (which could be null when the
-// accessor is found directly in the given map).
-static bool LookupAccessorPair(Handle<Map> map,
- Handle<String> name,
- Handle<AccessorPair>* accessors,
- Handle<JSObject>* holder) {
- Isolate* isolate = map->GetIsolate();
- LookupResult lookup(isolate);
-
- // Check for a JavaScript accessor directly in the map.
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValueFromMap(*map), isolate);
- if (!callback->IsAccessorPair()) return false;
- *accessors = Handle<AccessorPair>::cast(callback);
- *holder = Handle<JSObject>();
- return true;
- }
-
- // Everything else, e.g. a field, can't be an accessor call.
- if (lookup.IsFound()) return false;
-
- // Check for a JavaScript accessor somewhere in the proto chain.
- LookupInPrototypes(map, name, &lookup);
- if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValue(), isolate);
- if (!callback->IsAccessorPair()) return false;
- *accessors = Handle<AccessorPair>::cast(callback);
- *holder = Handle<JSObject>(lookup.holder());
- return true;
- }
-
- // We haven't found a JavaScript accessor anywhere.
- return false;
-}
-
-
-static bool LookupGetter(Handle<Map> map,
- Handle<String> name,
- Handle<JSFunction>* getter,
- Handle<JSObject>* holder) {
- Handle<AccessorPair> accessors;
- if (LookupAccessorPair(map, name, &accessors, holder) &&
- accessors->getter()->IsJSFunction()) {
- *getter = Handle<JSFunction>(JSFunction::cast(accessors->getter()));
- return true;
- }
- return false;
-}
-
-
-static bool LookupSetter(Handle<Map> map,
- Handle<String> name,
- Handle<JSFunction>* setter,
- Handle<JSObject>* holder) {
- Handle<AccessorPair> accessors;
- if (LookupAccessorPair(map, name, &accessors, holder) &&
- accessors->setter()->IsJSFunction()) {
- *setter = Handle<JSFunction>(JSFunction::cast(accessors->setter()));
- return true;
- }
- return false;
-}
-
-
-// Determines whether the given array or object literal boilerplate satisfies
-// all limits to be considered for fast deep-copying and computes the total
-// size of all objects that are part of the graph.
-static bool IsFastLiteral(Handle<JSObject> boilerplate,
- int max_depth,
- int* max_properties,
- int* total_size) {
- ASSERT(max_depth >= 0 && *max_properties >= 0);
- if (max_depth == 0) return false;
-
- Isolate* isolate = boilerplate->GetIsolate();
- Handle<FixedArrayBase> elements(boilerplate->elements());
- if (elements->length() > 0 &&
- elements->map() != isolate->heap()->fixed_cow_array_map()) {
- if (boilerplate->HasFastDoubleElements()) {
- *total_size += FixedDoubleArray::SizeFor(elements->length());
- } else if (boilerplate->HasFastObjectElements()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- int length = elements->length();
- for (int i = 0; i < length; i++) {
- if ((*max_properties)-- == 0) return false;
- Handle<Object> value(fast_elements->get(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteral(value_object,
- max_depth - 1,
- max_properties,
- total_size)) {
- return false;
- }
- }
- }
- *total_size += FixedArray::SizeFor(length);
- } else {
- return false;
- }
- }
-
- Handle<FixedArray> properties(boilerplate->properties());
- if (properties->length() > 0) {
- return false;
- } else {
- int nof = boilerplate->map()->inobject_properties();
- for (int i = 0; i < nof; i++) {
- if ((*max_properties)-- == 0) return false;
- Handle<Object> value(boilerplate->InObjectPropertyAt(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteral(value_object,
- max_depth - 1,
- max_properties,
- total_size)) {
- return false;
- }
- }
- }
- }
-
- *total_size += boilerplate->map()->instance_size();
- return true;
-}
-
-
-void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- HValue* context = environment()->LookupContext();
- HInstruction* literal;
-
- // Check whether to use fast or slow deep-copying for boilerplate.
- int total_size = 0;
- int max_properties = HFastLiteral::kMaxLiteralProperties;
- Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()),
- isolate());
- if (boilerplate->IsJSObject() &&
- IsFastLiteral(Handle<JSObject>::cast(boilerplate),
- HFastLiteral::kMaxLiteralDepth,
- &max_properties,
- &total_size)) {
- Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
- literal = new(zone()) HFastLiteral(context,
- boilerplate_object,
- total_size,
- expr->literal_index(),
- expr->depth(),
- DONT_TRACK_ALLOCATION_SITE);
- } else {
- literal = new(zone()) HObjectLiteral(context,
- expr->constant_properties(),
- expr->fast_elements(),
- expr->literal_index(),
- expr->depth(),
- expr->has_function());
- }
-
- // The object is expected in the bailout environment during computation
- // of the property values and is the value of the entire expression.
- PushAndAdd(literal);
-
- expr->CalculateEmitStore(zone());
-
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
-
- switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
- if (property->emit_store()) {
- property->RecordTypeFeedback(oracle());
- CHECK_ALIVE(VisitForValue(value));
- HValue* value = Pop();
- Handle<Map> map = property->GetReceiverType();
- Handle<String> name = property->key()->AsPropertyName();
- HInstruction* store;
- if (map.is_null()) {
- // If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(literal, name, value));
- } else {
-#if DEBUG
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- ASSERT(!LookupSetter(map, name, &setter, &holder));
-#endif
- CHECK_ALIVE(store = BuildStoreNamedMonomorphic(literal,
- name,
- value,
- map));
- }
- AddInstruction(store);
- if (store->HasObservableSideEffects()) {
- AddSimulate(key->id(), REMOVABLE_SIMULATE);
- }
- } else {
- CHECK_ALIVE(VisitForEffect(value));
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- case ObjectLiteral::Property::SETTER:
- case ObjectLiteral::Property::GETTER:
- return Bailout("Object literal with complex property");
- default: UNREACHABLE();
- }
- }
-
- if (expr->has_function()) {
- // Return the result of the transformation to fast properties
- // instead of the original since this operation changes the map
- // of the object. This makes sure that the original object won't
- // be used by other optimized code before it is transformed
- // (e.g. because of code motion).
- HToFastProperties* result = new(zone()) HToFastProperties(Pop());
- AddInstruction(result);
- return ast_context()->ReturnValue(result);
- } else {
- return ast_context()->ReturnValue(Pop());
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
- HValue* context = environment()->LookupContext();
- HInstruction* literal;
-
- Handle<FixedArray> literals(environment()->closure()->literals());
- Handle<Object> raw_boilerplate(literals->get(expr->literal_index()),
- isolate());
-
- if (raw_boilerplate->IsUndefined()) {
- raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
- isolate(), literals, expr->constant_elements());
- if (raw_boilerplate.is_null()) {
- return Bailout("array boilerplate creation failed");
- }
- literals->set(expr->literal_index(), *raw_boilerplate);
- if (JSObject::cast(*raw_boilerplate)->elements()->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- isolate()->counters()->cow_arrays_created_runtime()->Increment();
- }
- }
-
- Handle<JSObject> boilerplate = Handle<JSObject>::cast(raw_boilerplate);
- ElementsKind boilerplate_elements_kind =
- Handle<JSObject>::cast(boilerplate)->GetElementsKind();
-
- // TODO(mvstanton): This heuristic is only a temporary solution. In the
- // end, we want to quit creating allocation site info after a certain number
- // of GCs for a call site.
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(
- boilerplate_elements_kind);
-
- // Check whether to use fast or slow deep-copying for boilerplate.
- int total_size = 0;
- int max_properties = HFastLiteral::kMaxLiteralProperties;
- if (IsFastLiteral(boilerplate,
- HFastLiteral::kMaxLiteralDepth,
- &max_properties,
- &total_size)) {
- if (mode == TRACK_ALLOCATION_SITE) {
- total_size += AllocationSiteInfo::kSize;
- }
- literal = new(zone()) HFastLiteral(context,
- boilerplate,
- total_size,
- expr->literal_index(),
- expr->depth(),
- mode);
- } else {
- literal = new(zone()) HArrayLiteral(context,
- boilerplate,
- length,
- expr->literal_index(),
- expr->depth(),
- mode);
- }
-
- // The array is expected in the bailout environment during computation
- // of the property values and is the value of the entire expression.
- PushAndAdd(literal);
-
- HLoadElements* elements = NULL;
-
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-
- CHECK_ALIVE(VisitForValue(subexpr));
- HValue* value = Pop();
- if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
-
- // Pass in literal as dummy depedency, since the receiver always has
- // elements.
- elements = new(zone()) HLoadElements(literal, literal);
- AddInstruction(elements);
-
- HValue* key = AddInstruction(
- new(zone()) HConstant(Handle<Object>(Smi::FromInt(i), isolate()),
- Representation::Integer32()));
-
- switch (boilerplate_elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone()) HCheckSmi(value));
- // Fall through.
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- AddInstruction(new(zone()) HStoreKeyed(
- elements,
- key,
- value,
- boilerplate_elements_kind));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- AddSimulate(expr->GetIdForElement(i));
- }
- return ast_context()->ReturnValue(Pop());
-}
-
-
-// Sets the lookup result and returns true if the load/store can be inlined.
-static bool ComputeLoadStoreField(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup,
- bool is_store) {
- if (type->has_named_interceptor()) {
- lookup->InterceptorResult(NULL);
- return false;
- }
- // If we directly find a field, the access can be inlined.
- type->LookupDescriptor(NULL, *name, lookup);
- if (lookup->IsField()) return true;
-
- // For a load, we are out of luck if there is no such field.
- if (!is_store) return false;
-
- // 2nd chance: A store into a non-existent field can still be inlined if we
- // have a matching transition and some room left in the object.
- type->LookupTransition(NULL, *name, lookup);
- return lookup->IsTransitionToField(*type) &&
- (type->unused_property_fields() > 0);
-}
-
-
-static int ComputeLoadStoreFieldIndex(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup) {
- ASSERT(lookup->IsField() || lookup->IsTransitionToField(*type));
- if (lookup->IsField()) {
- return lookup->GetLocalFieldIndexFromMap(*type);
- } else {
- Map* transition = lookup->GetTransitionMapFromMap(*type);
- return transition->PropertyIndexFor(*name) - type->inobject_properties();
- }
-}
-
-
-void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
- Handle<Map> map) {
- AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
- HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup) {
- ASSERT(lookup->IsFound());
- // If the property does not exist yet, we have to check that it wasn't made
- // readonly or turned into a setter by some meanwhile modifications on the
- // prototype chain.
- if (!lookup->IsProperty() && map->prototype()->IsJSReceiver()) {
- Object* proto = map->prototype();
- // First check that the prototype chain isn't affected already.
- LookupResult proto_result(isolate());
- proto->Lookup(*name, &proto_result);
- if (proto_result.IsProperty()) {
- // If the inherited property could induce readonly-ness, bail out.
- if (proto_result.IsReadOnly() || !proto_result.IsCacheable()) {
- Bailout("improper object on prototype chain for store");
- return NULL;
- }
- // We only need to check up to the preexisting property.
- proto = proto_result.holder();
- } else {
- // Otherwise, find the top prototype.
- while (proto->GetPrototype(isolate())->IsJSObject()) {
- proto = proto->GetPrototype(isolate());
- }
- ASSERT(proto->GetPrototype(isolate())->IsNull());
- }
- ASSERT(proto->IsJSObject());
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(map->prototype())),
- Handle<JSObject>(JSObject::cast(proto)),
- zone()));
- }
-
- int index = ComputeLoadStoreFieldIndex(map, name, lookup);
- bool is_in_object = index < 0;
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- offset += map->instance_size();
- } else {
- offset += FixedArray::kHeaderSize;
- }
- HStoreNamedField* instr =
- new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
- if (lookup->IsTransitionToField(*map)) {
- Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
- instr->set_transition(transition);
- // TODO(fschneider): Record the new map type of the object in the IR to
- // enable elimination of redundant checks after the transition store.
- instr->SetGVNFlag(kChangesMaps);
- }
- return instr;
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
- HValue* object,
- Handle<String> name,
- HValue* value) {
- HValue* context = environment()->LookupContext();
- return new(zone()) HStoreNamedGeneric(
- context,
- object,
- name,
- value,
- function_strict_mode_flag());
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
- HValue* object,
- HValue* value,
- Handle<Map> map,
- Handle<JSFunction> setter,
- Handle<JSObject> holder) {
- AddCheckConstantFunction(holder, object, map);
- AddInstruction(new(zone()) HPushArgument(object));
- AddInstruction(new(zone()) HPushArgument(value));
- return new(zone()) HCallConstantFunction(setter, 2);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
- HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map) {
- // Handle a store to a known field.
- LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, true)) {
- AddCheckMapsWithTransitions(object, map);
- return BuildStoreNamedField(object, name, value, map, &lookup);
- }
-
- // No luck, do a generic store.
- return BuildStoreNamedGeneric(object, name, value);
-}
-
-
-void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
- Property* expr,
- HValue* object,
- SmallMapList* types,
- Handle<String> name) {
- int count = 0;
- int previous_field_offset = 0;
- bool previous_field_is_in_object = false;
- bool is_monomorphic_field = true;
- Handle<Map> map;
- LookupResult lookup(isolate());
- for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- map = types->at(i);
- if (ComputeLoadStoreField(map, name, &lookup, false)) {
- int index = ComputeLoadStoreFieldIndex(map, name, &lookup);
- bool is_in_object = index < 0;
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- offset += map->instance_size();
- } else {
- offset += FixedArray::kHeaderSize;
- }
- if (count == 0) {
- previous_field_offset = offset;
- previous_field_is_in_object = is_in_object;
- } else if (is_monomorphic_field) {
- is_monomorphic_field = (offset == previous_field_offset) &&
- (is_in_object == previous_field_is_in_object);
- }
- ++count;
- }
- }
-
- // Use monomorphic load if property lookup results in the same field index
- // for all maps. Requires special map check on the set of all handled maps.
- AddInstruction(new(zone()) HCheckNonSmi(object));
- HInstruction* instr;
- if (count == types->length() && is_monomorphic_field) {
- AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
- instr = BuildLoadNamedField(object, map, &lookup);
- } else {
- HValue* context = environment()->LookupContext();
- instr = new(zone()) HLoadNamedFieldPolymorphic(context,
- object,
- types,
- name,
- zone());
- }
-
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
- Assignment* expr,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name) {
- // TODO(ager): We should recognize when the prototype chains for different
- // maps are identical. In that case we can avoid repeatedly generating the
- // same prototype map checks.
- int count = 0;
- HBasicBlock* join = NULL;
- for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, true)) {
- if (count == 0) {
- AddInstruction(new(zone()) HCheckNonSmi(object)); // Only needed once.
- join = graph()->CreateBasicBlock();
- }
- ++count;
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(object, map, if_true, if_false);
- current_block()->Finish(compare);
-
- set_current_block(if_true);
- HInstruction* instr;
- CHECK_ALIVE(instr =
- BuildStoreNamedField(object, name, value, map, &lookup));
- instr->set_position(expr->position());
- // Goto will add the HSimulate for the store.
- AddInstruction(instr);
- if (!ast_context()->IsEffect()) Push(value);
- current_block()->Goto(join);
-
- set_current_block(if_false);
- }
- }
-
- // Finish up. Unconditionally deoptimize if we've handled all the maps we
- // know about and do not want to handle ones we've never seen. Otherwise
- // use a generic IC.
- if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
- } else {
- HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
- instr->set_position(expr->position());
- AddInstruction(instr);
-
- if (join != NULL) {
- if (!ast_context()->IsEffect()) Push(value);
- current_block()->Goto(join);
- } else {
- // The HSimulate for the store should not see the stored value in
- // effect contexts (it is not materialized at expr->id() in the
- // unoptimized code).
- if (instr->HasObservableSideEffects()) {
- if (ast_context()->IsEffect()) {
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
- } else {
- Push(value);
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
- Drop(1);
- }
- }
- return ast_context()->ReturnValue(value);
- }
- }
-
- ASSERT(join != NULL);
- join->SetJoinId(expr->id());
- set_current_block(join);
- if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
-}
-
-
-void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- expr->RecordTypeFeedback(oracle(), zone());
- CHECK_ALIVE(VisitForValue(prop->obj()));
-
- if (prop->key()->IsPropertyName()) {
- // Named store.
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = environment()->ExpressionStackAt(0);
- HValue* object = environment()->ExpressionStackAt(1);
-
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->handle());
- ASSERT(!name.is_null());
-
- HInstruction* instr = NULL;
- SmallMapList* types = expr->GetReceiverTypes();
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> map;
- if (monomorphic) {
- map = types->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- AddCheckConstantFunction(holder, object, map);
- if (FLAG_inline_accessors && TryInlineSetter(setter, expr, value)) {
- return;
- }
- Drop(2);
- AddInstruction(new(zone()) HPushArgument(object));
- AddInstruction(new(zone()) HPushArgument(value));
- instr = new(zone()) HCallConstantFunction(setter, 2);
- } else {
- Drop(2);
- CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
- name,
- value,
- map));
- }
-
- } else if (types != NULL && types->length() > 1) {
- Drop(2);
- return HandlePolymorphicStoreNamedField(expr, object, value, types, name);
- } else {
- Drop(2);
- instr = BuildStoreNamedGeneric(object, name, value);
- }
-
- Push(value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- return ast_context()->ReturnValue(Pop());
-
- } else {
- // Keyed store.
- CHECK_ALIVE(VisitForValue(prop->key()));
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = Pop();
- HValue* key = Pop();
- HValue* object = Pop();
- bool has_side_effects = false;
- HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
- expr->position(),
- true, // is_store
- &has_side_effects);
- Push(value);
- ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- return ast_context()->ReturnValue(Pop());
- }
-}
-
-
-// Because not every expression has a position and there is not common
-// superclass of Assignment and CountOperation, we cannot just pass the
-// owning expression instead of position and ast_id separately.
-void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
- Variable* var,
- HValue* value,
- int position,
- BailoutId ast_id) {
- LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
- if (type == kUseCell) {
- Handle<GlobalObject> global(info()->global_object());
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HInstruction* instr =
- new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
- instr->set_position(position);
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
- }
- } else {
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- if (var->is_qml_global()) global_object->set_qml_global(true);
- AddInstruction(global_object);
- HStoreGlobalGeneric* instr =
- new(zone()) HStoreGlobalGeneric(context,
- global_object,
- var->name(),
- value,
- function_strict_mode_flag());
- instr->set_position(position);
- AddInstruction(instr);
- ASSERT(instr->HasObservableSideEffects());
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
- }
-}
-
-
-void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
- Expression* target = expr->target();
- VariableProxy* proxy = target->AsVariableProxy();
- Property* prop = target->AsProperty();
- ASSERT(proxy == NULL || prop == NULL);
-
- // We have a second position recorded in the FullCodeGenerator to have
- // type feedback for the binary operation.
- BinaryOperation* operation = expr->binary_operation();
-
- if (proxy != NULL) {
- Variable* var = proxy->var();
- if (var->mode() == LET) {
- return Bailout("unsupported let compound assignment");
- }
-
- CHECK_ALIVE(VisitForValue(operation));
-
- switch (var->location()) {
- case Variable::UNALLOCATED:
- HandleGlobalVariableAssignment(var,
- Top(),
- expr->position(),
- expr->AssignmentId());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (var->mode() == CONST) {
- return Bailout("unsupported const compound assignment");
- }
- Bind(var, Top());
- break;
-
- case Variable::CONTEXT: {
- // Bail out if we try to mutate a parameter value in a function
- // using the arguments object. We do not (yet) correctly handle the
- // arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
- // Parameters will be allocated to context slots. We have no
- // direct way to detect that the variable is a parameter so we do
- // a linear search of the parameter variables.
- int count = info()->scope()->num_parameters();
- for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
- Bailout(
- "assignment to parameter, function uses arguments object");
- }
- }
- }
-
- HStoreContextSlot::Mode mode;
-
- switch (var->mode()) {
- case LET:
- mode = HStoreContextSlot::kCheckDeoptimize;
- break;
- case CONST:
- return ast_context()->ReturnValue(Pop());
- case CONST_HARMONY:
- // This case is checked statically so no need to
- // perform checks here
- UNREACHABLE();
- default:
- mode = HStoreContextSlot::kNoCheck;
- }
-
- HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- break;
- }
-
- case Variable::LOOKUP:
- return Bailout("compound assignment to lookup slot");
- }
- return ast_context()->ReturnValue(Pop());
-
- } else if (prop != NULL) {
- prop->RecordTypeFeedback(oracle(), zone());
-
- if (prop->key()->IsPropertyName()) {
- // Named property.
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map;
- HInstruction* load;
- bool monomorphic = prop->IsMonomorphic();
- if (monomorphic) {
- map = prop->GetReceiverTypes()->first();
- // We can't generate code for a monomorphic dict mode load so
- // just pretend it is not monomorphic.
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- load = BuildCallGetter(object, map, getter, holder);
- } else {
- load = BuildLoadNamedMonomorphic(object, name, prop, map);
- }
- } else {
- load = BuildLoadNamedGeneric(object, name, prop);
- }
- PushAndAdd(load);
- if (load->HasObservableSideEffects()) {
- AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
- }
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* right = Pop();
- HValue* left = Pop();
-
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(operation->id(), REMOVABLE_SIMULATE);
- }
-
- HInstruction* store;
- if (!monomorphic || map->is_observed()) {
- // If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, instr));
- } else {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- store = BuildCallSetter(object, instr, map, setter, holder);
- } else {
- CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
- name,
- instr,
- map));
- }
- }
- AddInstruction(store);
- // Drop the simulated receiver and value. Return the value.
- Drop(2);
- Push(instr);
- if (store->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- return ast_context()->ReturnValue(Pop());
-
- } else {
- // Keyed property.
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
-
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
- false, // is_store
- &has_side_effects);
- Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
-
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* right = Pop();
- HValue* left = Pop();
-
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(operation->id(), REMOVABLE_SIMULATE);
- }
-
- expr->RecordTypeFeedback(oracle(), zone());
- HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
- RelocInfo::kNoPosition,
- true, // is_store
- &has_side_effects);
-
- // Drop the simulated receiver, key, and value. Return the value.
- Drop(3);
- Push(instr);
- ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- return ast_context()->ReturnValue(Pop());
- }
-
- } else {
- return Bailout("invalid lhs in compound assignment");
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- Property* prop = expr->target()->AsProperty();
- ASSERT(proxy == NULL || prop == NULL);
-
- if (expr->is_compound()) {
- HandleCompoundAssignment(expr);
- return;
- }
-
- if (prop != NULL) {
- HandlePropertyAssignment(expr);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
-
- if (var->mode() == CONST) {
- if (expr->op() != Token::INIT_CONST) {
- CHECK_ALIVE(VisitForValue(expr->value()));
- return ast_context()->ReturnValue(Pop());
- }
-
- if (var->IsStackAllocated()) {
- // We insert a use of the old value to detect unsupported uses of const
- // variables (e.g. initialization inside a loop).
- HValue* old_value = environment()->Lookup(var);
- AddInstruction(new(zone()) HUseConst(old_value));
- }
- } else if (var->mode() == CONST_HARMONY) {
- if (expr->op() != Token::INIT_CONST_HARMONY) {
- return Bailout("non-initializer assignment to const");
- }
- }
-
- if (proxy->IsArguments()) return Bailout("assignment to arguments");
-
- // Handle the assignment.
- switch (var->location()) {
- case Variable::UNALLOCATED:
- CHECK_ALIVE(VisitForValue(expr->value()));
- HandleGlobalVariableAssignment(var,
- Top(),
- expr->position(),
- expr->AssignmentId());
- return ast_context()->ReturnValue(Pop());
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- // Perform an initialization check for let declared variables
- // or parameters.
- if (var->mode() == LET && expr->op() == Token::ASSIGN) {
- HValue* env_value = environment()->Lookup(var);
- if (env_value == graph()->GetConstantHole()) {
- return Bailout("assignment to let variable before initialization");
- }
- }
- // We do not allow the arguments object to occur in a context where it
- // may escape, but assignments to stack-allocated locals are
- // permitted.
- CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
- HValue* value = Pop();
- Bind(var, value);
- return ast_context()->ReturnValue(value);
- }
-
- case Variable::CONTEXT: {
- // Bail out if we try to mutate a parameter value in a function using
- // the arguments object. We do not (yet) correctly handle the
- // arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
- // Parameters will rewrite to context slots. We have no direct way
- // to detect that the variable is a parameter.
- int count = info()->scope()->num_parameters();
- for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
- return Bailout("assignment to parameter in arguments object");
- }
- }
- }
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HStoreContextSlot::Mode mode;
- if (expr->op() == Token::ASSIGN) {
- switch (var->mode()) {
- case LET:
- mode = HStoreContextSlot::kCheckDeoptimize;
- break;
- case CONST:
- return ast_context()->ReturnValue(Pop());
- case CONST_HARMONY:
- // This case is checked statically so no need to
- // perform checks here
- UNREACHABLE();
- default:
- mode = HStoreContextSlot::kNoCheck;
- }
- } else if (expr->op() == Token::INIT_VAR ||
- expr->op() == Token::INIT_LET ||
- expr->op() == Token::INIT_CONST_HARMONY) {
- mode = HStoreContextSlot::kNoCheck;
- } else {
- ASSERT(expr->op() == Token::INIT_CONST);
-
- mode = HStoreContextSlot::kCheckIgnoreAssignment;
- }
-
- HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
- context, var->index(), mode, Top());
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- return ast_context()->ReturnValue(Pop());
- }
-
- case Variable::LOOKUP:
- return Bailout("assignment to LOOKUP variable");
- }
- } else {
- return Bailout("invalid left-hand side in assignment");
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- // We don't optimize functions with invalid left-hand sides in
- // assignments, count operations, or for-in. Consequently throw can
- // currently only occur in an effect context.
- ASSERT(ast_context()->IsEffect());
- CHECK_ALIVE(VisitForValue(expr->exception()));
-
- HValue* context = environment()->LookupContext();
- HValue* value = environment()->Pop();
- HThrow* instr = new(zone()) HThrow(context, value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- AddSimulate(expr->id());
- current_block()->FinishExit(new(zone()) HAbnormalExit);
- set_current_block(NULL);
-}
-
-
-HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
- HValue* object,
- Handle<Map> map,
- LookupResult* lookup) {
- int index = lookup->GetLocalFieldIndexFromMap(*map);
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- int offset = (index * kPointerSize) + map->instance_size();
- return new(zone()) HLoadNamedField(object, true, offset);
- } else {
- // Non-negative property indices are in the properties array.
- int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return new(zone()) HLoadNamedField(object, false, offset);
- }
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
- HValue* object,
- Handle<String> name,
- Property* expr) {
- if (expr->IsUninitialized()) {
- AddSoftDeoptimize();
- }
- HValue* context = environment()->LookupContext();
- return new(zone()) HLoadNamedGeneric(context, object, name);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
- HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder) {
- AddCheckConstantFunction(holder, object, map);
- AddInstruction(new(zone()) HPushArgument(object));
- return new(zone()) HCallConstantFunction(getter, 1);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
- HValue* object,
- Handle<String> name,
- Property* expr,
- Handle<Map> map) {
- // Handle a load from a known field.
- ASSERT(!map->is_dictionary_map());
- LookupResult lookup(isolate());
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsField()) {
- AddCheckMapsWithTransitions(object, map);
- return BuildLoadNamedField(object, map, &lookup);
- }
-
- // Handle a load of a constant known function.
- if (lookup.IsConstantFunction()) {
- AddCheckMapsWithTransitions(object, map);
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
- return new(zone()) HConstant(function, Representation::Tagged());
- }
-
- // Handle a load from a known field somewhere in the prototype chain.
- LookupInPrototypes(map, name, &lookup);
- if (lookup.IsField()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- AddCheckMapsWithTransitions(object, map);
- HInstruction* holder_value = AddInstruction(
- new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
- return BuildLoadNamedField(holder_value, holder_map, &lookup);
- }
-
- // Handle a load of a constant function somewhere in the prototype chain.
- if (lookup.IsConstantFunction()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- AddCheckMapsWithTransitions(object, map);
- AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*holder_map));
- return new(zone()) HConstant(function, Representation::Tagged());
- }
-
- // No luck, do a generic load.
- return BuildLoadNamedGeneric(object, name, expr);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
- HValue* context = environment()->LookupContext();
- return new(zone()) HLoadKeyedGeneric(context, object, key);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
- HValue* object,
- HValue* key,
- HValue* val,
- HValue* dependency,
- Handle<Map> map,
- bool is_store) {
- HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
- zone(), dependency);
- AddInstruction(mapcheck);
- if (dependency) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
- return BuildUncheckedMonomorphicElementAccess(
- object, key, val,
- mapcheck, map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), is_store);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
- HValue* object,
- HValue* key,
- HValue* val,
- SmallMapList* maps) {
- // For polymorphic loads of similar elements kinds (i.e. all tagged or all
- // double), always use the "worst case" code without a transition. This is
- // much faster than transitioning the elements to the worst case, trading a
- // HTransitionElements for a HCheckMaps, and avoiding mutation of the array.
- bool has_double_maps = false;
- bool has_smi_or_object_maps = false;
- bool has_js_array_access = false;
- bool has_non_js_array_access = false;
- Handle<Map> most_general_consolidated_map;
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- // Don't allow mixing of JSArrays with JSObjects.
- if (map->instance_type() == JS_ARRAY_TYPE) {
- if (has_non_js_array_access) return NULL;
- has_js_array_access = true;
- } else if (has_js_array_access) {
- return NULL;
- } else {
- has_non_js_array_access = true;
- }
- // Don't allow mixed, incompatible elements kinds.
- if (map->has_fast_double_elements()) {
- if (has_smi_or_object_maps) return NULL;
- has_double_maps = true;
- } else if (map->has_fast_smi_or_object_elements()) {
- if (has_double_maps) return NULL;
- has_smi_or_object_maps = true;
- } else {
- return NULL;
- }
- // Remember the most general elements kind, the code for its load will
- // properly handle all of the more specific cases.
- if ((i == 0) || IsMoreGeneralElementsKindTransition(
- most_general_consolidated_map->elements_kind(),
- map->elements_kind())) {
- most_general_consolidated_map = map;
- }
- }
- if (!has_double_maps && !has_smi_or_object_maps) return NULL;
-
- HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
- AddInstruction(check_maps);
- HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
- object, key, val, check_maps,
- most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
- most_general_consolidated_map->elements_kind(),
- false);
- return instr;
-}
-
-
-HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
- HValue* object,
- HValue* key,
- HValue* val,
- Expression* prop,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
- *has_side_effects = false;
- AddInstruction(new(zone()) HCheckNonSmi(object));
- SmallMapList* maps = prop->GetReceiverTypes();
- bool todo_external_array = false;
-
- if (!is_store) {
- HInstruction* consolidated_load =
- TryBuildConsolidatedElementLoad(object, key, val, maps);
- if (consolidated_load != NULL) {
- AddInstruction(consolidated_load);
- *has_side_effects |= consolidated_load->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) {
- consolidated_load->set_position(position);
- }
- return consolidated_load;
- }
- }
-
- static const int kNumElementTypes = kElementsKindCount;
- bool type_todo[kNumElementTypes];
- for (int i = 0; i < kNumElementTypes; ++i) {
- type_todo[i] = false;
- }
-
- // Elements_kind transition support.
- MapHandleList transition_target(maps->length());
- // Collect possible transition targets.
- MapHandleList possible_transitioned_maps(maps->length());
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- ElementsKind elements_kind = map->elements_kind();
- if (IsFastElementsKind(elements_kind) &&
- elements_kind != GetInitialFastElementsKind()) {
- possible_transitioned_maps.Add(map);
- }
- }
- // Get transition target for each map (NULL == no transition).
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- Handle<Map> transitioned_map =
- map->FindTransitionedMap(&possible_transitioned_maps);
- transition_target.Add(transitioned_map);
- }
-
- int num_untransitionable_maps = 0;
- Handle<Map> untransitionable_map;
- HTransitionElementsKind* transition = NULL;
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- ASSERT(map->IsMap());
- if (!transition_target.at(i).is_null()) {
- ASSERT(Map::IsValidElementsTransition(
- map->elements_kind(),
- transition_target.at(i)->elements_kind()));
- HValue* context = environment()->LookupContext();
- transition = new(zone()) HTransitionElementsKind(
- context, object, map, transition_target.at(i));
- AddInstruction(transition);
- } else {
- type_todo[map->elements_kind()] = true;
- if (IsExternalArrayElementsKind(map->elements_kind())) {
- todo_external_array = true;
- }
- num_untransitionable_maps++;
- untransitionable_map = map;
- }
- }
-
- // If only one map is left after transitioning, handle this case
- // monomorphically.
- if (num_untransitionable_maps == 1) {
- HInstruction* instr = NULL;
- if (untransitionable_map->has_slow_elements_kind()) {
- instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
- : BuildLoadKeyedGeneric(object, key));
- } else {
- instr = AddInstruction(BuildMonomorphicElementAccess(
- object, key, val, transition, untransitionable_map, is_store));
- }
- *has_side_effects |= instr->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) instr->set_position(position);
- return is_store ? NULL : instr;
- }
-
- HInstruction* checkspec =
- AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
- HBasicBlock* join = graph()->CreateBasicBlock();
-
- HInstruction* elements_kind_instr =
- AddInstruction(new(zone()) HElementsKind(object));
- HInstruction* elements =
- AddInstruction(new(zone()) HLoadElements(object, checkspec));
- HLoadExternalArrayPointer* external_elements = NULL;
- HInstruction* checked_key = NULL;
-
- // Generated code assumes that FAST_* and DICTIONARY_ELEMENTS ElementsKinds
- // are handled before external arrays.
- STATIC_ASSERT(FAST_SMI_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
-
- for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
- elements_kind <= LAST_ELEMENTS_KIND;
- elements_kind = ElementsKind(elements_kind + 1)) {
- // After having handled FAST_* and DICTIONARY_ELEMENTS, we need to add some
- // code that's executed for all external array cases.
- STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
- LAST_ELEMENTS_KIND);
- if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
- && todo_external_array) {
- HInstruction* length =
- AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddBoundsCheck(key, length);
- external_elements = new(zone()) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- }
- if (type_todo[elements_kind]) {
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareConstantEqAndBranch* elements_kind_branch =
- new(zone()) HCompareConstantEqAndBranch(
- elements_kind_instr, elements_kind, Token::EQ_STRICT);
- elements_kind_branch->SetSuccessorAt(0, if_true);
- elements_kind_branch->SetSuccessorAt(1, if_false);
- current_block()->Finish(elements_kind_branch);
-
- set_current_block(if_true);
- HInstruction* access;
- if (IsFastElementsKind(elements_kind)) {
- if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
- AddInstruction(new(zone()) HCheckMaps(
- elements, isolate()->factory()->fixed_array_map(),
- zone(), elements_kind_branch));
- }
- // TODO(jkummerow): The need for these two blocks could be avoided
- // in one of two ways:
- // (1) Introduce ElementsKinds for JSArrays that are distinct from
- // those for fast objects.
- // (2) Put the common instructions into a third "join" block. This
- // requires additional AST IDs that we can deopt to from inside
- // that join block. They must be added to the Property class (when
- // it's a keyed property) and registered in the full codegen.
- HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
- HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
- HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(object, JS_ARRAY_TYPE);
- typecheck->SetSuccessorAt(0, if_jsarray);
- typecheck->SetSuccessorAt(1, if_fastobject);
- current_block()->Finish(typecheck);
-
- set_current_block(if_jsarray);
- HInstruction* length;
- length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck,
- HType::Smi()));
- checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
- access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store));
- if (!is_store) {
- Push(access);
- }
-
- *has_side_effects |= access->HasObservableSideEffects();
- if (position != -1) {
- access->set_position(position);
- }
- if_jsarray->Goto(join);
-
- set_current_block(if_fastobject);
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
- access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store));
- } else if (elements_kind == DICTIONARY_ELEMENTS) {
- if (is_store) {
- access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
- } else {
- access = AddInstruction(BuildLoadKeyedGeneric(object, key));
- }
- } else { // External array elements.
- access = AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val,
- elements_kind_branch, elements_kind, is_store));
- }
- *has_side_effects |= access->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) access->set_position(position);
- if (!is_store) {
- Push(access);
- }
- current_block()->Goto(join);
- set_current_block(if_false);
- }
- }
-
- // Deopt if none of the cases matched.
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
- join->SetJoinId(ast_id);
- set_current_block(join);
- return is_store ? NULL : Pop();
-}
-
-
-HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
- HValue* obj,
- HValue* key,
- HValue* val,
- Expression* expr,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
- ASSERT(!expr->IsPropertyName());
- HInstruction* instr = NULL;
- if (expr->IsMonomorphic()) {
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- if (map->has_slow_elements_kind()) {
- instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
- : BuildLoadKeyedGeneric(obj, key);
- } else {
- AddInstruction(new(zone()) HCheckNonSmi(obj));
- instr = BuildMonomorphicElementAccess(obj, key, val, NULL, map, is_store);
- }
- } else if (expr->GetReceiverTypes() != NULL &&
- !expr->GetReceiverTypes()->is_empty()) {
- return HandlePolymorphicElementAccess(
- obj, key, val, expr, ast_id, position, is_store, has_side_effects);
- } else {
- if (is_store) {
- instr = BuildStoreKeyedGeneric(obj, key, val);
- } else {
- instr = BuildLoadKeyedGeneric(obj, key);
- }
- }
- if (position != RelocInfo::kNoPosition) instr->set_position(position);
- AddInstruction(instr);
- *has_side_effects = instr->HasObservableSideEffects();
- return instr;
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
- HValue* object,
- HValue* key,
- HValue* value) {
- HValue* context = environment()->LookupContext();
- return new(zone()) HStoreKeyedGeneric(
- context,
- object,
- key,
- value,
- function_strict_mode_flag());
-}
-
-
-void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
- // Outermost function already has arguments on the stack.
- if (function_state()->outer() == NULL) return;
-
- if (function_state()->arguments_pushed()) return;
-
- // Push arguments when entering inlined function.
- HEnterInlined* entry = function_state()->entry();
- entry->set_arguments_pushed();
-
- ZoneList<HValue*>* arguments_values = entry->arguments_values();
-
- HInstruction* insert_after = entry;
- for (int i = 0; i < arguments_values->length(); i++) {
- HValue* argument = arguments_values->at(i);
- HInstruction* push_argument = new(zone()) HPushArgument(argument);
- push_argument->InsertAfter(insert_after);
- insert_after = push_argument;
- }
-
- HArgumentsElements* arguments_elements =
- new(zone()) HArgumentsElements(true);
- arguments_elements->ClearFlag(HValue::kUseGVN);
- arguments_elements->InsertAfter(insert_after);
- function_state()->set_arguments_elements(arguments_elements);
-}
-
-
-bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
- VariableProxy* proxy = expr->obj()->AsVariableProxy();
- if (proxy == NULL) return false;
- if (!proxy->var()->IsStackAllocated()) return false;
- if (!environment()->Lookup(proxy->var())->CheckFlag(HValue::kIsArguments)) {
- return false;
- }
-
- HInstruction* result = NULL;
- if (expr->key()->IsPropertyName()) {
- Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("length"))) return false;
-
- if (function_state()->outer() == NULL) {
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- result = new(zone()) HArgumentsLength(elements);
- } else {
- // Number of arguments without receiver.
- int argument_count = environment()->
- arguments_environment()->parameter_count() - 1;
- result = new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(argument_count), isolate()),
- Representation::Integer32());
- }
- } else {
- Push(graph()->GetArgumentsObject());
- VisitForValue(expr->key());
- if (HasStackOverflow() || current_block() == NULL) return true;
- HValue* key = Pop();
- Drop(1); // Arguments object.
- if (function_state()->outer() == NULL) {
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HInstruction* length = AddInstruction(
- new(zone()) HArgumentsLength(elements));
- HInstruction* checked_key = AddBoundsCheck(key, length);
- result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
- } else {
- EnsureArgumentsArePushedForAccess();
-
- // Number of arguments without receiver.
- HInstruction* elements = function_state()->arguments_elements();
- int argument_count = environment()->
- arguments_environment()->parameter_count() - 1;
- HInstruction* length = AddInstruction(new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(argument_count), isolate()),
- Representation::Integer32()));
- HInstruction* checked_key = AddBoundsCheck(key, length);
- result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
- }
- }
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
-}
-
-
-void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- expr->RecordTypeFeedback(oracle(), zone());
-
- if (TryArgumentsAccess(expr)) return;
-
- CHECK_ALIVE(VisitForValue(expr->obj()));
-
- HInstruction* instr = NULL;
- if (expr->AsProperty()->IsArrayLength()) {
- HValue* array = Pop();
- AddInstruction(new(zone()) HCheckNonSmi(array));
- HInstruction* mapcheck =
- AddInstruction(HCheckInstanceType::NewIsJSArray(array, zone()));
- instr = new(zone()) HJSArrayLength(array, mapcheck);
- } else if (expr->IsStringLength()) {
- HValue* string = Pop();
- AddInstruction(new(zone()) HCheckNonSmi(string));
- AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
- instr = HStringLength::New(zone(), string);
- } else if (expr->IsStringAccess()) {
- CHECK_ALIVE(VisitForValue(expr->key()));
- HValue* index = Pop();
- HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* char_code =
- BuildStringCharCodeAt(context, string, index);
- AddInstruction(char_code);
- instr = HStringCharFromCode::New(zone(), context, char_code);
-
- } else if (expr->IsFunctionPrototype()) {
- HValue* function = Pop();
- AddInstruction(new(zone()) HCheckNonSmi(function));
- instr = new(zone()) HLoadFunctionPrototype(function);
-
- } else if (expr->key()->IsPropertyName()) {
- Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- SmallMapList* types = expr->GetReceiverTypes();
- HValue* object = Top();
-
- Handle<Map> map;
- bool monomorphic = false;
- if (expr->IsMonomorphic()) {
- map = types->first();
- monomorphic = !map->is_dictionary_map();
- } else if (object->HasMonomorphicJSObjectType()) {
- map = object->GetMonomorphicJSObjectMap();
- monomorphic = !map->is_dictionary_map();
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- AddCheckConstantFunction(holder, Top(), map);
- if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return;
- AddInstruction(new(zone()) HPushArgument(Pop()));
- instr = new(zone()) HCallConstantFunction(getter, 1);
- } else {
- instr = BuildLoadNamedMonomorphic(Pop(), name, expr, map);
- }
- } else if (types != NULL && types->length() > 1) {
- return HandlePolymorphicLoadNamedField(expr, Pop(), types, name);
- } else {
- instr = BuildLoadNamedGeneric(Pop(), name, expr);
- }
-
- } else {
- CHECK_ALIVE(VisitForValue(expr->key()));
-
- HValue* key = Pop();
- HValue* obj = Pop();
-
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, expr->id(), expr->position(),
- false, // is_store
- &has_side_effects);
- if (has_side_effects) {
- if (ast_context()->IsEffect()) {
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
- } else {
- Push(load);
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
- Drop(1);
- }
- }
- return ast_context()->ReturnValue(load);
- }
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
- Handle<Map> receiver_map) {
- if (!holder.is_null()) {
- Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
- AddInstruction(
- new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
- }
-}
-
-
-void HOptimizedGraphBuilder::AddCheckConstantFunction(
- Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map) {
- // Constant functions have the nice property that the map will change if they
- // are overwritten. Therefore it is enough to check the map of the holder and
- // its prototypes.
- AddCheckMapsWithTransitions(receiver, receiver_map);
- AddCheckPrototypeMaps(holder, receiver_map);
-}
-
-
-class FunctionSorter {
- public:
- FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
- FunctionSorter(int index, int ticks, int ast_length, int src_length)
- : index_(index),
- ticks_(ticks),
- ast_length_(ast_length),
- src_length_(src_length) { }
-
- int index() const { return index_; }
- int ticks() const { return ticks_; }
- int ast_length() const { return ast_length_; }
- int src_length() const { return src_length_; }
-
- private:
- int index_;
- int ticks_;
- int ast_length_;
- int src_length_;
-};
-
-
-static int CompareHotness(void const* a, void const* b) {
- FunctionSorter const* function1 = reinterpret_cast<FunctionSorter const*>(a);
- FunctionSorter const* function2 = reinterpret_cast<FunctionSorter const*>(b);
- int diff = function1->ticks() - function2->ticks();
- if (diff != 0) return -diff;
- diff = function1->ast_length() - function2->ast_length();
- if (diff != 0) return diff;
- return function1->src_length() - function2->src_length();
-}
-
-
-void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
- Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name) {
- // TODO(ager): We should recognize when the prototype chains for different
- // maps are identical. In that case we can avoid repeatedly generating the
- // same prototype map checks.
- int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- HBasicBlock* join = NULL;
- FunctionSorter order[kMaxCallPolymorphism];
- int ordered_functions = 0;
- for (int i = 0;
- i < types->length() && ordered_functions < kMaxCallPolymorphism;
- ++i) {
- Handle<Map> map = types->at(i);
- if (expr->ComputeTarget(map, name)) {
- order[ordered_functions++] =
- FunctionSorter(i,
- expr->target()->shared()->profiler_ticks(),
- InliningAstSize(expr->target()),
- expr->target()->shared()->SourceSize());
- }
- }
-
- qsort(reinterpret_cast<void*>(&order[0]),
- ordered_functions,
- sizeof(order[0]),
- &CompareHotness);
-
- for (int fn = 0; fn < ordered_functions; ++fn) {
- int i = order[fn].index();
- Handle<Map> map = types->at(i);
- if (fn == 0) {
- // Only needed once.
- AddInstruction(new(zone()) HCheckNonSmi(receiver));
- join = graph()->CreateBasicBlock();
- }
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(receiver, map, if_true, if_false);
- current_block()->Finish(compare);
-
- set_current_block(if_true);
- expr->ComputeTarget(map, name);
- AddCheckPrototypeMaps(expr->holder(), map);
- if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
- Handle<JSFunction> caller = info()->closure();
- SmartArrayPointer<char> caller_name =
- caller->shared()->DebugName()->ToCString();
- PrintF("Trying to inline the polymorphic call to %s from %s\n",
- *name->ToCString(),
- *caller_name);
- }
- if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
- // Trying to inline will signal that we should bailout from the
- // entire compilation by setting stack overflow on the visitor.
- if (HasStackOverflow()) return;
- } else {
- HCallConstantFunction* call =
- new(zone()) HCallConstantFunction(expr->target(), argument_count);
- call->set_position(expr->position());
- PreProcessCall(call);
- AddInstruction(call);
- if (!ast_context()->IsEffect()) Push(call);
- }
-
- if (current_block() != NULL) current_block()->Goto(join);
- set_current_block(if_false);
- }
-
- // Finish up. Unconditionally deoptimize if we've handled all the maps we
- // know about and do not want to handle ones we've never seen. Otherwise
- // use a generic IC.
- if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
- } else {
- HValue* context = environment()->LookupContext();
- HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
- call->set_position(expr->position());
- PreProcessCall(call);
-
- if (join != NULL) {
- AddInstruction(call);
- if (!ast_context()->IsEffect()) Push(call);
- current_block()->Goto(join);
- } else {
- return ast_context()->ReturnInstruction(call, expr->id());
- }
- }
-
- // We assume that control flow is always live after an expression. So
- // even without predecessors to the join block, we set it as the exit
- // block and continue by adding instructions there.
- ASSERT(join != NULL);
- if (join->HasPredecessor()) {
- set_current_block(join);
- join->SetJoinId(expr->id());
- if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
- } else {
- set_current_block(NULL);
- }
-}
-
-
-void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
- Handle<JSFunction> caller,
- const char* reason) {
- if (FLAG_trace_inlining) {
- SmartArrayPointer<char> target_name =
- target->shared()->DebugName()->ToCString();
- SmartArrayPointer<char> caller_name =
- caller->shared()->DebugName()->ToCString();
- if (reason == NULL) {
- PrintF("Inlined %s called from %s.\n", *target_name, *caller_name);
- } else {
- PrintF("Did not inline %s called from %s (%s).\n",
- *target_name, *caller_name, reason);
- }
- }
-}
-
-
-static const int kNotInlinable = 1000000000;
-
-
-int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
- if (!FLAG_use_inlining) return kNotInlinable;
-
- // Precondition: call is monomorphic and we have found a target with the
- // appropriate arity.
- Handle<JSFunction> caller = info()->closure();
- Handle<SharedFunctionInfo> target_shared(target->shared());
-
- // Do a quick check on source code length to avoid parsing large
- // inlining candidates.
- if (target_shared->SourceSize() >
- Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) {
- TraceInline(target, caller, "target text too big");
- return kNotInlinable;
- }
-
- // Target must be inlineable.
- if (!target->IsInlineable()) {
- TraceInline(target, caller, "target not inlineable");
- return kNotInlinable;
- }
- if (target_shared->dont_inline() || target_shared->dont_optimize()) {
- TraceInline(target, caller, "target contains unsupported syntax [early]");
- return kNotInlinable;
- }
-
- int nodes_added = target_shared->ast_node_count();
- return nodes_added;
-}
-
-
-bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
- Handle<JSFunction> target,
- int arguments_count,
- HValue* implicit_return_value,
- BailoutId ast_id,
- BailoutId return_id,
- InliningKind inlining_kind) {
- int nodes_added = InliningAstSize(target);
- if (nodes_added == kNotInlinable) return false;
-
- Handle<JSFunction> caller = info()->closure();
-
- if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
- TraceInline(target, caller, "target AST is too large [early]");
- return false;
- }
-
-#if !defined(V8_TARGET_ARCH_IA32)
- // Target must be able to use caller's context.
- CompilationInfo* outer_info = info();
- if (target->context() != outer_info->closure()->context() ||
- outer_info->scope()->contains_with() ||
- outer_info->scope()->num_heap_slots() > 0) {
- TraceInline(target, caller, "target requires context change");
- return false;
- }
-#endif
-
-
- // Don't inline deeper than kMaxInliningLevels calls.
- HEnvironment* env = environment();
- int current_level = 1;
- while (env->outer() != NULL) {
- if (current_level == Compiler::kMaxInliningLevels) {
- TraceInline(target, caller, "inline depth limit reached");
- return false;
- }
- if (env->outer()->frame_type() == JS_FUNCTION) {
- current_level++;
- }
- env = env->outer();
- }
-
- // Don't inline recursive functions.
- for (FunctionState* state = function_state();
- state != NULL;
- state = state->outer()) {
- if (*state->compilation_info()->closure() == *target) {
- TraceInline(target, caller, "target is recursive");
- return false;
- }
- }
-
- // We don't want to add more than a certain number of nodes from inlining.
- if (inlined_count_ > Min(FLAG_max_inlined_nodes_cumulative,
- kUnlimitedMaxInlinedNodesCumulative)) {
- TraceInline(target, caller, "cumulative AST node limit reached");
- return false;
- }
-
- // Parse and allocate variables.
- CompilationInfo target_info(target, zone());
- Handle<SharedFunctionInfo> target_shared(target->shared());
- if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
- !Scope::Analyze(&target_info)) {
- if (target_info.isolate()->has_pending_exception()) {
- // Parse or scope error, never optimize this function.
- SetStackOverflow();
- target_shared->DisableOptimization("parse/scope error");
- }
- TraceInline(target, caller, "parse failure");
- return false;
- }
-
- if (target_info.scope()->num_heap_slots() > 0) {
- TraceInline(target, caller, "target has context-allocated variables");
- return false;
- }
- FunctionLiteral* function = target_info.function();
-
- // The following conditions must be checked again after re-parsing, because
- // earlier the information might not have been complete due to lazy parsing.
- nodes_added = function->ast_node_count();
- if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
- TraceInline(target, caller, "target AST is too large [late]");
- return false;
- }
- AstProperties::Flags* flags(function->flags());
- if (flags->Contains(kDontInline) || flags->Contains(kDontOptimize)) {
- TraceInline(target, caller, "target contains unsupported syntax [late]");
- return false;
- }
-
- // If the function uses the arguments object check that inlining of functions
- // with arguments object is enabled and the arguments-variable is
- // stack allocated.
- if (function->scope()->arguments() != NULL) {
- if (!FLAG_inline_arguments) {
- TraceInline(target, caller, "target uses arguments object");
- return false;
- }
-
- if (!function->scope()->arguments()->IsStackAllocated()) {
- TraceInline(target,
- caller,
- "target uses non-stackallocated arguments object");
- return false;
- }
- }
-
- // All declarations must be inlineable.
- ZoneList<Declaration*>* decls = target_info.scope()->declarations();
- int decl_count = decls->length();
- for (int i = 0; i < decl_count; ++i) {
- if (!decls->at(i)->IsInlineable()) {
- TraceInline(target, caller, "target has non-trivial declaration");
- return false;
- }
- }
-
- // Generate the deoptimization data for the unoptimized version of
- // the target function if we don't already have it.
- if (!target_shared->has_deoptimization_support()) {
- // Note that we compile here using the same AST that we will use for
- // generating the optimized inline code.
- target_info.EnableDeoptimizationSupport();
- if (!FullCodeGenerator::MakeCode(&target_info)) {
- TraceInline(target, caller, "could not generate deoptimization info");
- return false;
- }
- if (target_shared->scope_info() == ScopeInfo::Empty(isolate())) {
- // The scope info might not have been set if a lazily compiled
- // function is inlined before being called for the first time.
- Handle<ScopeInfo> target_scope_info =
- ScopeInfo::Create(target_info.scope(), zone());
- target_shared->set_scope_info(*target_scope_info);
- }
- target_shared->EnableDeoptimizationSupport(*target_info.code());
- Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
- &target_info,
- target_shared);
- }
-
- // ----------------------------------------------------------------
- // After this point, we've made a decision to inline this function (so
- // TryInline should always return true).
-
- // Save the pending call context and type feedback oracle. Set up new ones
- // for the inlined function.
- ASSERT(target_shared->has_deoptimization_support());
- Handle<Code> unoptimized_code(target_shared->code());
- TypeFeedbackOracle target_oracle(
- unoptimized_code,
- Handle<Context>(target->context()->native_context()),
- isolate(),
- zone());
- // The function state is new-allocated because we need to delete it
- // in two different places.
- FunctionState* target_state = new FunctionState(
- this, &target_info, &target_oracle, inlining_kind);
-
- HConstant* undefined = graph()->GetConstantUndefined();
- bool undefined_receiver = HEnvironment::UseUndefinedReceiver(
- target, function, call_kind, inlining_kind);
- HEnvironment* inner_env =
- environment()->CopyForInlining(target,
- arguments_count,
- function,
- undefined,
- function_state()->inlining_kind(),
- undefined_receiver);
-#ifdef V8_TARGET_ARCH_IA32
- // IA32 only, overwrite the caller's context in the deoptimization
- // environment with the correct one.
- //
- // TODO(kmillikin): implement the same inlining on other platforms so we
- // can remove the unsightly ifdefs in this function.
- HConstant* context =
- new(zone()) HConstant(Handle<Context>(target->context()),
- Representation::Tagged());
- AddInstruction(context);
- inner_env->BindContext(context);
-#endif
-
- AddSimulate(return_id);
- current_block()->UpdateEnvironment(inner_env);
-
- ZoneList<HValue*>* arguments_values = NULL;
-
- // If the function uses arguments copy current arguments values
- // to use them for materialization.
- if (function->scope()->arguments() != NULL) {
- HEnvironment* arguments_env = inner_env->arguments_environment();
- int arguments_count = arguments_env->parameter_count();
- arguments_values = new(zone()) ZoneList<HValue*>(arguments_count, zone());
- for (int i = 0; i < arguments_count; i++) {
- arguments_values->Add(arguments_env->Lookup(i), zone());
- }
- }
-
- HEnterInlined* enter_inlined =
- new(zone()) HEnterInlined(target,
- arguments_count,
- function,
- function_state()->inlining_kind(),
- function->scope()->arguments(),
- arguments_values,
- undefined_receiver);
- function_state()->set_entry(enter_inlined);
- AddInstruction(enter_inlined);
-
- // If the function uses arguments object create and bind one.
- if (function->scope()->arguments() != NULL) {
- ASSERT(function->scope()->arguments()->IsStackAllocated());
- inner_env->Bind(function->scope()->arguments(),
- graph()->GetArgumentsObject());
- }
-
-
- VisitDeclarations(target_info.scope()->declarations());
- VisitStatements(function->body());
- if (HasStackOverflow()) {
- // Bail out if the inline function did, as we cannot residualize a call
- // instead.
- TraceInline(target, caller, "inline graph construction failed");
- target_shared->DisableOptimization("inlining bailed out");
- inline_bailout_ = true;
- delete target_state;
- return true;
- }
-
- // Update inlined nodes count.
- inlined_count_ += nodes_added;
-
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<TypeFeedbackInfo> type_info(
- TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
- graph()->update_type_change_checksum(type_info->own_type_change_checksum());
-
- TraceInline(target, caller, NULL);
-
- if (current_block() != NULL) {
- FunctionState* state = function_state();
- if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
- // Falling off the end of an inlined construct call. In a test context the
- // return value will always evaluate to true, in a value context the
- // return value is the newly allocated receiver.
- if (call_context()->IsTest()) {
- current_block()->Goto(inlined_test_context()->if_true(), state);
- } else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
- } else {
- ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(implicit_return_value, state);
- }
- } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
- // Falling off the end of an inlined setter call. The returned value is
- // never used, the value of an assignment is always the value of the RHS
- // of the assignment.
- if (call_context()->IsTest()) {
- inlined_test_context()->ReturnValue(implicit_return_value);
- } else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
- } else {
- ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(implicit_return_value, state);
- }
- } else {
- // Falling off the end of a normal inlined function. This basically means
- // returning undefined.
- if (call_context()->IsTest()) {
- current_block()->Goto(inlined_test_context()->if_false(), state);
- } else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
- } else {
- ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(undefined, state);
- }
- }
- }
-
- // Fix up the function exits.
- if (inlined_test_context() != NULL) {
- HBasicBlock* if_true = inlined_test_context()->if_true();
- HBasicBlock* if_false = inlined_test_context()->if_false();
-
- // Pop the return test context from the expression context stack.
- ASSERT(ast_context() == inlined_test_context());
- ClearInlinedTestContext();
- delete target_state;
-
- // Forward to the real test context.
- if (if_true->HasPredecessor()) {
- if_true->SetJoinId(ast_id);
- HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- if_true->Goto(true_target, function_state());
- }
- if (if_false->HasPredecessor()) {
- if_false->SetJoinId(ast_id);
- HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- if_false->Goto(false_target, function_state());
- }
- set_current_block(NULL);
- return true;
-
- } else if (function_return()->HasPredecessor()) {
- function_return()->SetJoinId(ast_id);
- set_current_block(function_return());
- } else {
- set_current_block(NULL);
- }
- delete target_state;
- return true;
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
- // The function call we are inlining is a method call if the call
- // is a property call.
- CallKind call_kind = (expr->expression()->AsProperty() == NULL)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
-
- return TryInline(call_kind,
- expr->target(),
- expr->arguments()->length(),
- NULL,
- expr->id(),
- expr->ReturnId(),
- drop_extra ? DROP_EXTRA_ON_RETURN : NORMAL_RETURN);
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
- HValue* implicit_return_value) {
- return TryInline(CALL_AS_FUNCTION,
- expr->target(),
- expr->arguments()->length(),
- implicit_return_value,
- expr->id(),
- expr->ReturnId(),
- CONSTRUCT_CALL_RETURN);
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
- Property* prop) {
- return TryInline(CALL_AS_METHOD,
- getter,
- 0,
- NULL,
- prop->id(),
- prop->LoadId(),
- GETTER_CALL_RETURN);
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
- Assignment* assignment,
- HValue* implicit_return_value) {
- return TryInline(CALL_AS_METHOD,
- setter,
- 1,
- implicit_return_value,
- assignment->id(),
- assignment->AssignmentId(),
- SETTER_CALL_RETURN);
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function,
- Call* expr,
- int arguments_count) {
- return TryInline(CALL_AS_METHOD,
- function,
- arguments_count,
- NULL,
- expr->id(),
- expr->ReturnId(),
- NORMAL_RETURN);
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
- bool drop_extra) {
- if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
- BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
- switch (id) {
- case kMathExp:
- if (!FLAG_fast_math) break;
- // Fall through if FLAG_fast_math.
- case kMathRound:
- case kMathFloor:
- case kMathAbs:
- case kMathSqrt:
- case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
- if (expr->arguments()->length() == 1) {
- HValue* argument = Pop();
- HValue* context = environment()->LookupContext();
- Drop(1); // Receiver.
- HInstruction* op =
- HUnaryMathOperation::New(zone(), context, argument, id);
- op->set_position(expr->position());
- if (drop_extra) Drop(1); // Optionally drop the function.
- ast_context()->ReturnInstruction(op, expr->id());
- return true;
- }
- break;
- default:
- // Not supported for inlining yet.
- break;
- }
- return false;
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
- Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type) {
- ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
- // Try to inline calls like Math.* as operations in the calling function.
- if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
- BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
- int argument_count = expr->arguments()->length() + 1; // Plus receiver.
- switch (id) {
- case kStringCharCodeAt:
- case kStringCharAt:
- if (argument_count == 2 && check_type == STRING_CHECK) {
- HValue* index = Pop();
- HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- ASSERT(!expr->holder().is_null());
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
- expr->holder(),
- zone()));
- HInstruction* char_code =
- BuildStringCharCodeAt(context, string, index);
- if (id == kStringCharCodeAt) {
- ast_context()->ReturnInstruction(char_code, expr->id());
- return true;
- }
- AddInstruction(char_code);
- HInstruction* result =
- HStringCharFromCode::New(zone(), context, char_code);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
- }
- break;
- case kMathExp:
- if (!FLAG_fast_math) break;
- // Fall through if FLAG_fast_math.
- case kMathRound:
- case kMathFloor:
- case kMathAbs:
- case kMathSqrt:
- case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
- if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
- HValue* argument = Pop();
- HValue* context = environment()->LookupContext();
- Drop(1); // Receiver.
- HInstruction* op =
- HUnaryMathOperation::New(zone(), context, argument, id);
- op->set_position(expr->position());
- ast_context()->ReturnInstruction(op, expr->id());
- return true;
- }
- break;
- case kMathPow:
- if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
- HValue* right = Pop();
- HValue* left = Pop();
- Pop(); // Pop receiver.
- HValue* context = environment()->LookupContext();
- HInstruction* result = NULL;
- // Use sqrt() if exponent is 0.5 or -0.5.
- if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
- double exponent = HConstant::cast(right)->DoubleValue();
- if (exponent == 0.5) {
- result =
- HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
- } else if (exponent == -0.5) {
- HConstant* double_one =
- new(zone()) HConstant(Handle<Object>(Smi::FromInt(1),
- isolate()),
- Representation::Double());
- AddInstruction(double_one);
- HInstruction* sqrt =
- HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
- AddInstruction(sqrt);
- // MathPowHalf doesn't have side effects so there's no need for
- // an environment simulation here.
- ASSERT(!sqrt->HasObservableSideEffects());
- result = HDiv::New(zone(), context, double_one, sqrt);
- } else if (exponent == 2.0) {
- result = HMul::New(zone(), context, left, left);
- }
- } else if (right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HConstant::cast(right)->Integer32Value() == 2) {
- result = HMul::New(zone(), context, left, left);
- }
-
- if (result == NULL) {
- result = HPower::New(zone(), left, right);
- }
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
- }
- break;
- case kMathRandom:
- if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
- Drop(1); // Receiver.
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
- HRandom* result = new(zone()) HRandom(global_object);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
- }
- break;
- case kMathMax:
- case kMathMin:
- if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
- HValue* right = Pop();
- HValue* left = Pop();
- Drop(1); // Receiver.
- HValue* context = environment()->LookupContext();
- HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
- : HMathMinMax::kMathMax;
- HInstruction* result =
- HMathMinMax::New(zone(), context, left, right, op);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
- }
- break;
- default:
- // Not yet supported for inlining.
- break;
- }
- return false;
-}
-
-
-bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
- Expression* callee = expr->expression();
- Property* prop = callee->AsProperty();
- ASSERT(prop != NULL);
-
- if (!expr->IsMonomorphic() || expr->check_type() != RECEIVER_MAP_CHECK) {
- return false;
- }
- Handle<Map> function_map = expr->GetReceiverTypes()->first();
- if (function_map->instance_type() != JS_FUNCTION_TYPE ||
- !expr->target()->shared()->HasBuiltinFunctionId() ||
- expr->target()->shared()->builtin_function_id() != kFunctionApply) {
- return false;
- }
-
- if (info()->scope()->arguments() == NULL) return false;
-
- ZoneList<Expression*>* args = expr->arguments();
- if (args->length() != 2) return false;
-
- VariableProxy* arg_two = args->at(1)->AsVariableProxy();
- if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
- HValue* arg_two_value = environment()->Lookup(arg_two->var());
- if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
-
- // Found pattern f.apply(receiver, arguments).
- VisitForValue(prop->obj());
- if (HasStackOverflow() || current_block() == NULL) return true;
- HValue* function = Top();
- AddCheckConstantFunction(expr->holder(), function, function_map);
- Drop(1);
-
- VisitForValue(args->at(0));
- if (HasStackOverflow() || current_block() == NULL) return true;
- HValue* receiver = Pop();
-
- if (function_state()->outer() == NULL) {
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HInstruction* length =
- AddInstruction(new(zone()) HArgumentsLength(elements));
- HValue* wrapped_receiver =
- AddInstruction(new(zone()) HWrapReceiver(receiver, function));
- HInstruction* result =
- new(zone()) HApplyArguments(function,
- wrapped_receiver,
- length,
- elements);
- result->set_position(expr->position());
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
- } else {
- // We are inside inlined function and we know exactly what is inside
- // arguments object. But we need to be able to materialize at deopt.
- // TODO(mstarzinger): For now we just ensure arguments are pushed
- // right after HEnterInlined, but we could be smarter about this.
- EnsureArgumentsArePushedForAccess();
- ASSERT_EQ(environment()->arguments_environment()->parameter_count(),
- function_state()->entry()->arguments_values()->length());
- HEnterInlined* entry = function_state()->entry();
- ZoneList<HValue*>* arguments_values = entry->arguments_values();
- int arguments_count = arguments_values->length();
- PushAndAdd(new(zone()) HWrapReceiver(receiver, function));
- for (int i = 1; i < arguments_count; i++) {
- Push(arguments_values->at(i));
- }
-
- Handle<JSFunction> known_function;
- if (function->IsConstant()) {
- HConstant* constant_function = HConstant::cast(function);
- known_function = Handle<JSFunction>::cast(constant_function->handle());
- int args_count = arguments_count - 1; // Excluding receiver.
- if (TryInlineApply(known_function, expr, args_count)) return true;
- }
-
- Drop(arguments_count - 1);
- PushAndAdd(new(zone()) HPushArgument(Pop()));
- for (int i = 1; i < arguments_count; i++) {
- PushAndAdd(new(zone()) HPushArgument(arguments_values->at(i)));
- }
-
- HValue* context = environment()->LookupContext();
- HInvokeFunction* call = new(zone()) HInvokeFunction(
- context,
- function,
- known_function,
- arguments_count);
- Drop(arguments_count);
- call->set_position(expr->position());
- ast_context()->ReturnInstruction(call, expr->id());
- return true;
- }
-}
-
-
-// Checks if all maps in |types| are from the same family, i.e., are elements
-// transitions of each other. Returns either NULL if they are not from the same
-// family, or a Map* indicating the map with the first elements kind of the
-// family that is in the list.
-static Map* CheckSameElementsFamily(SmallMapList* types) {
- if (types->length() <= 1) return NULL;
- // Check if all maps belong to the same transition family.
- Map* kinds[kFastElementsKindCount];
- Map* first_map = *types->first();
- ElementsKind first_kind = first_map->elements_kind();
- if (!IsFastElementsKind(first_kind)) return NULL;
- int first_index = GetSequenceIndexFromFastElementsKind(first_kind);
- int last_index = first_index;
-
- for (int i = 0; i < kFastElementsKindCount; i++) kinds[i] = NULL;
-
- kinds[first_index] = first_map;
-
- for (int i = 1; i < types->length(); ++i) {
- Map* map = *types->at(i);
- ElementsKind elements_kind = map->elements_kind();
- if (!IsFastElementsKind(elements_kind)) return NULL;
- int index = GetSequenceIndexFromFastElementsKind(elements_kind);
- if (index < first_index) {
- first_index = index;
- } else if (index > last_index) {
- last_index = index;
- } else if (kinds[index] != map) {
- return NULL;
- }
- kinds[index] = map;
- }
-
- Map* current = kinds[first_index];
- for (int i = first_index + 1; i <= last_index; i++) {
- Map* next = kinds[i];
- if (next != NULL) {
- ElementsKind current_kind = next->elements_kind();
- if (next != current->LookupElementsTransitionMap(current_kind)) {
- return NULL;
- }
- current = next;
- }
- }
-
- return kinds[first_index];
-}
-
-
-void HOptimizedGraphBuilder::VisitCall(Call* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- Expression* callee = expr->expression();
- int argument_count = expr->arguments()->length() + 1; // Plus receiver.
- HInstruction* call = NULL;
-
- Property* prop = callee->AsProperty();
- if (prop != NULL) {
- if (!prop->key()->IsPropertyName()) {
- // Keyed function call.
- CHECK_ALIVE(VisitArgument(prop->obj()));
-
- CHECK_ALIVE(VisitForValue(prop->key()));
- // Push receiver and key like the non-optimized code generator expects it.
- HValue* key = Pop();
- HValue* receiver = Pop();
- Push(key);
- Push(receiver);
-
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
- HValue* context = environment()->LookupContext();
- call = new(zone()) HCallKeyed(context, key, argument_count);
- call->set_position(expr->position());
- Drop(argument_count + 1); // 1 is the key.
- return ast_context()->ReturnInstruction(call, expr->id());
- }
-
- // Named function call.
- expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
-
- if (TryCallApply(expr)) return;
-
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- SmallMapList* types = expr->GetReceiverTypes();
-
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> receiver_map;
- if (monomorphic) {
- receiver_map = (types == NULL || types->is_empty())
- ? Handle<Map>::null()
- : types->first();
- } else {
- Map* family_map = CheckSameElementsFamily(types);
- if (family_map != NULL) {
- receiver_map = Handle<Map>(family_map);
- monomorphic = expr->ComputeTarget(receiver_map, name);
- }
- }
-
- HValue* receiver =
- environment()->ExpressionStackAt(expr->arguments()->length());
- if (monomorphic) {
- if (TryInlineBuiltinMethodCall(expr,
- receiver,
- receiver_map,
- expr->check_type())) {
- if (FLAG_trace_inlining) {
- PrintF("Inlining builtin ");
- expr->target()->ShortPrint();
- PrintF("\n");
- }
- return;
- }
-
- if (CallStubCompiler::HasCustomCallGenerator(expr->target()) ||
- expr->check_type() != RECEIVER_MAP_CHECK) {
- // When the target has a custom call IC generator, use the IC,
- // because it is likely to generate better code. Also use the IC
- // when a primitive receiver check is required.
- HValue* context = environment()->LookupContext();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, name, argument_count));
- } else {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
-
- if (TryInlineCall(expr)) return;
- call = PreProcessCall(
- new(zone()) HCallConstantFunction(expr->target(),
- argument_count));
- }
- } else if (types != NULL && types->length() > 1) {
- ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
- HandlePolymorphicCallNamed(expr, receiver, types, name);
- return;
-
- } else {
- HValue* context = environment()->LookupContext();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, name, argument_count));
- }
-
- } else {
- expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
-
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- return Bailout("possible direct call to eval");
- }
-
- if (global_call) {
- Variable* var = proxy->var();
- bool known_global_function = false;
- // If there is a global property cell for the name at compile time and
- // access check is not enabled we assume that the function will not change
- // and generate optimized code for calling the function.
- LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
- if (type == kUseCell &&
- !info()->global_object()->IsAccessCheckNeeded()) {
- Handle<GlobalObject> global(info()->global_object());
- known_global_function = expr->ComputeGlobalTarget(global, &lookup);
- }
- if (known_global_function) {
- // Push the global object instead of the global receiver because
- // code generated by the full code generator expects it.
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- PushAndAdd(global_object);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Pop();
- AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
-
- // Replace the global object with the global receiver.
- HGlobalReceiver* global_receiver =
- new(zone()) HGlobalReceiver(global_object);
- // Index of the receiver from the top of the expression stack.
- const int receiver_index = argument_count - 1;
- AddInstruction(global_receiver);
- ASSERT(environment()->ExpressionStackAt(receiver_index)->
- IsGlobalObject());
- environment()->SetExpressionStackAt(receiver_index, global_receiver);
-
- if (TryInlineBuiltinFunctionCall(expr, false)) { // Nothing to drop.
- if (FLAG_trace_inlining) {
- PrintF("Inlining builtin ");
- expr->target()->ShortPrint();
- PrintF("\n");
- }
- return;
- }
- if (TryInlineCall(expr)) return;
-
- if (expr->target().is_identical_to(info()->closure())) {
- graph()->MarkRecursive();
- }
-
- call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
- argument_count));
- } else {
- HValue* context = environment()->LookupContext();
- HGlobalObject* receiver = new(zone()) HGlobalObject(context);
- if (var->is_qml_global()) receiver->set_qml_global(true);
- AddInstruction(receiver);
- PushAndAdd(new(zone()) HPushArgument(receiver));
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
- call = new(zone()) HCallGlobal(context, var->name(), argument_count);
- if (var->is_qml_global()) {
- static_cast<HCallGlobal*>(call)->set_qml_global(true);
- }
- Drop(argument_count);
- }
-
- } else if (expr->IsMonomorphic()) {
- // The function is on the stack in the unoptimized code during
- // evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- HValue* context = environment()->LookupContext();
- HGlobalObject* global = new(zone()) HGlobalObject(context);
- AddInstruction(global);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
- PushAndAdd(receiver);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
- AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
-
- if (TryInlineBuiltinFunctionCall(expr, true)) { // Drop the function.
- if (FLAG_trace_inlining) {
- PrintF("Inlining builtin ");
- expr->target()->ShortPrint();
- PrintF("\n");
- }
- return;
- }
-
- if (TryInlineCall(expr, true)) { // Drop function from environment.
- return;
- } else {
- call = PreProcessCall(
- new(zone()) HInvokeFunction(context,
- function,
- expr->target(),
- argument_count));
- Drop(1); // The function.
- }
-
- } else {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
- AddInstruction(receiver);
- PushAndAdd(new(zone()) HPushArgument(receiver));
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
- call = new(zone()) HCallFunction(context, function, argument_count);
- Drop(argument_count + 1);
- }
- }
-
- call->set_position(expr->position());
- return ast_context()->ReturnInstruction(call, expr->id());
-}
-
-
-// Checks whether allocation using the given constructor can be inlined.
-static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
- return constructor->has_initial_map() &&
- constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
- constructor->initial_map()->instance_size() < HAllocateObject::kMaxSize;
-}
-
-
-void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- expr->RecordTypeFeedback(oracle());
- int argument_count = expr->arguments()->length() + 1; // Plus constructor.
- HValue* context = environment()->LookupContext();
-
- if (FLAG_inline_construct &&
- expr->IsMonomorphic() &&
- IsAllocationInlineable(expr->target())) {
- // The constructor function is on the stack in the unoptimized code
- // during evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
- Handle<JSFunction> constructor = expr->target();
- HValue* check = AddInstruction(
- new(zone()) HCheckFunction(function, constructor));
-
- // Force completion of inobject slack tracking before generating
- // allocation code to finalize instance size.
- if (constructor->shared()->IsInobjectSlackTrackingInProgress()) {
- constructor->shared()->CompleteInobjectSlackTracking();
- }
-
- // Replace the constructor function with a newly allocated receiver.
- HInstruction* receiver = new(zone()) HAllocateObject(context, constructor);
- // Index of the receiver from the top of the expression stack.
- const int receiver_index = argument_count - 1;
- AddInstruction(receiver);
- ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
- environment()->SetExpressionStackAt(receiver_index, receiver);
-
- if (TryInlineConstruct(expr, receiver)) return;
-
- // TODO(mstarzinger): For now we remove the previous HAllocateObject and
- // add HPushArgument for the arguments in case inlining failed. What we
- // actually should do is emit HInvokeFunction on the constructor instead
- // of using HCallNew as a fallback.
- receiver->DeleteAndReplaceWith(NULL);
- check->DeleteAndReplaceWith(NULL);
- environment()->SetExpressionStackAt(receiver_index, function);
- HInstruction* call = PreProcessCall(
- new(zone()) HCallNew(context, function, argument_count));
- call->set_position(expr->position());
- return ast_context()->ReturnInstruction(call, expr->id());
- } else {
- // The constructor function is both an operand to the instruction and an
- // argument to the construct call.
- CHECK_ALIVE(VisitArgument(expr->expression()));
- HValue* constructor = HPushArgument::cast(Top())->argument();
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HCallNew* call;
- if (FLAG_optimize_constructed_arrays &&
- !(expr->target().is_null()) &&
- *(expr->target()) == isolate()->global_context()->array_function()) {
- Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
- ASSERT(feedback->IsSmi());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(feedback);
- AddInstruction(new(zone()) HCheckFunction(constructor,
- Handle<JSFunction>(isolate()->global_context()->array_function())));
- call = new(zone()) HCallNewArray(context, constructor, argument_count,
- cell);
- } else {
- call = new(zone()) HCallNew(context, constructor, argument_count);
- }
- Drop(argument_count);
- call->set_position(expr->position());
- return ast_context()->ReturnInstruction(call, expr->id());
- }
-}
-
-
-// Support for generating inlined runtime functions.
-
-// Lookup table for generators for runtime calls that are generated inline.
-// Elements of the table are member pointers to functions of
-// HOptimizedGraphBuilder.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &HOptimizedGraphBuilder::Generate##Name,
-
-const HOptimizedGraphBuilder::InlineFunctionGenerator
- HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
-};
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
-void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- if (expr->is_jsruntime()) {
- return Bailout("call to a JavaScript runtime function");
- }
-
- const Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
- if (function->intrinsic_type == Runtime::INLINE) {
- ASSERT(expr->name()->length() > 0);
- ASSERT(expr->name()->Get(0) == '_');
- // Call to an inline function.
- int lookup_index = static_cast<int>(function->function_id) -
- static_cast<int>(Runtime::kFirstInlineFunction);
- ASSERT(lookup_index >= 0);
- ASSERT(static_cast<size_t>(lookup_index) <
- ARRAY_SIZE(kInlineFunctionGenerators));
- InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
-
- // Call the inline code generator using the pointer-to-member.
- (this->*generator)(expr);
- } else {
- ASSERT(function->intrinsic_type == Runtime::RUNTIME);
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
- HValue* context = environment()->LookupContext();
- Handle<String> name = expr->name();
- int argument_count = expr->arguments()->length();
- HCallRuntime* call =
- new(zone()) HCallRuntime(context, name, function, argument_count);
- Drop(argument_count);
- return ast_context()->ReturnInstruction(call, expr->id());
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- switch (expr->op()) {
- case Token::DELETE: return VisitDelete(expr);
- case Token::VOID: return VisitVoid(expr);
- case Token::TYPEOF: return VisitTypeof(expr);
- case Token::ADD: return VisitAdd(expr);
- case Token::SUB: return VisitSub(expr);
- case Token::BIT_NOT: return VisitBitNot(expr);
- case Token::NOT: return VisitNot(expr);
- default: UNREACHABLE();
- }
-}
-
-void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
- Property* prop = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (prop != NULL) {
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* key = Pop();
- HValue* obj = Pop();
- HValue* context = environment()->LookupContext();
- HDeleteProperty* instr = new(zone()) HDeleteProperty(context, obj, key);
- return ast_context()->ReturnInstruction(instr, expr->id());
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- if (var->IsUnallocated()) {
- Bailout("delete with global variable");
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global variables is false. 'this' is not
- // really a variable, though we implement it as one. The
- // subexpression does not have side effects.
- HValue* value = var->is_this()
- ? graph()->GetConstantTrue()
- : graph()->GetConstantFalse();
- return ast_context()->ReturnValue(value);
- } else {
- Bailout("delete with non-global variable");
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // Evaluate the subexpression for side effects.
- CHECK_ALIVE(VisitForEffect(expr->expression()));
- return ast_context()->ReturnValue(graph()->GetConstantTrue());
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForEffect(expr->expression()));
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForTypeOf(expr->expression()));
- HValue* value = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* instr = new(zone()) HTypeof(context, value);
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitAdd(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* value = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* instr =
- HMul::New(zone(), context, value, graph()->GetConstant1());
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* value = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* instr =
- HMul::New(zone(), context, value, graph()->GetConstantMinus1());
- TypeInfo info = oracle()->UnaryType(expr);
- Representation rep = ToRepresentation(info);
- if (info.IsUninitialized()) {
- AddSoftDeoptimize();
- info = TypeInfo::Unknown();
- }
- if (instr->IsBinaryOperation()) {
- HBinaryOperation::cast(instr)->set_observed_input_representation(rep, rep);
- }
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* value = Pop();
- TypeInfo info = oracle()->UnaryType(expr);
- if (info.IsUninitialized()) {
- AddSoftDeoptimize();
- }
- HInstruction* instr = new(zone()) HBitNot(value);
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
- if (ast_context()->IsTest()) {
- TestContext* context = TestContext::cast(ast_context());
- VisitForControl(expr->expression(),
- context->if_false(),
- context->if_true());
- return;
- }
-
- if (ast_context()->IsEffect()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- ASSERT(ast_context()->IsValue());
- HBasicBlock* materialize_false = graph()->CreateBasicBlock();
- HBasicBlock* materialize_true = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(expr->expression(),
- materialize_false,
- materialize_true));
-
- if (materialize_false->HasPredecessor()) {
- materialize_false->SetJoinId(expr->MaterializeFalseId());
- set_current_block(materialize_false);
- Push(graph()->GetConstantFalse());
- } else {
- materialize_false = NULL;
- }
-
- if (materialize_true->HasPredecessor()) {
- materialize_true->SetJoinId(expr->MaterializeTrueId());
- set_current_block(materialize_true);
- Push(graph()->GetConstantTrue());
- } else {
- materialize_true = NULL;
- }
-
- HBasicBlock* join =
- CreateJoin(materialize_false, materialize_true, expr->id());
- set_current_block(join);
- if (join != NULL) return ast_context()->ReturnValue(Pop());
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildIncrement(
- bool returns_original_input,
- CountOperation* expr) {
- // The input to the count operation is on top of the expression stack.
- TypeInfo info = oracle()->IncrementType(expr);
- Representation rep = ToRepresentation(info);
- if (rep.IsTagged()) {
- rep = Representation::Integer32();
- }
-
- if (returns_original_input) {
- // We need an explicit HValue representing ToNumber(input). The
- // actual HChange instruction we need is (sometimes) added in a later
- // phase, so it is not available now to be used as an input to HAdd and
- // as the return value.
- HInstruction* number_input = new(zone()) HForceRepresentation(Pop(), rep);
- AddInstruction(number_input);
- Push(number_input);
- }
-
- // The addition has no side effects, so we do not need
- // to simulate the expression stack after this instruction.
- // Any later failures deopt to the load of the input or earlier.
- HConstant* delta = (expr->op() == Token::INC)
- ? graph()->GetConstant1()
- : graph()->GetConstantMinus1();
- HValue* context = environment()->LookupContext();
- HInstruction* instr = HAdd::New(zone(), context, Top(), delta);
- // We can't insert a simulate here, because it would break deoptimization,
- // so the HAdd must not have side effects, so we must freeze its
- // representation.
- instr->AssumeRepresentation(rep);
- instr->ClearAllSideEffects();
- AddInstruction(instr);
- return instr;
-}
-
-
-void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- Expression* target = expr->expression();
- VariableProxy* proxy = target->AsVariableProxy();
- Property* prop = target->AsProperty();
- if (proxy == NULL && prop == NULL) {
- return Bailout("invalid lhs in count operation");
- }
-
- // Match the full code generator stack by simulating an extra stack
- // element for postfix operations in a non-effect context. The return
- // value is ToNumber(input).
- bool returns_original_input =
- expr->is_postfix() && !ast_context()->IsEffect();
- HValue* input = NULL; // ToNumber(original_input).
- HValue* after = NULL; // The result after incrementing or decrementing.
-
- if (proxy != NULL) {
- Variable* var = proxy->var();
- if (var->mode() == CONST) {
- return Bailout("unsupported count operation with const");
- }
- // Argument of the count operation is a variable, not a property.
- ASSERT(prop == NULL);
- CHECK_ALIVE(VisitForValue(target));
-
- after = BuildIncrement(returns_original_input, expr);
- input = returns_original_input ? Top() : Pop();
- Push(after);
-
- switch (var->location()) {
- case Variable::UNALLOCATED:
- HandleGlobalVariableAssignment(var,
- after,
- expr->position(),
- expr->AssignmentId());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- Bind(var, after);
- break;
-
- case Variable::CONTEXT: {
- // Bail out if we try to mutate a parameter value in a function
- // using the arguments object. We do not (yet) correctly handle the
- // arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
- // Parameters will rewrite to context slots. We have no direct
- // way to detect that the variable is a parameter so we use a
- // linear search of the parameter list.
- int count = info()->scope()->num_parameters();
- for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
- return Bailout("assignment to parameter in arguments object");
- }
- }
- }
-
- HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
- ? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
- HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), mode, after);
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- break;
- }
-
- case Variable::LOOKUP:
- return Bailout("lookup variable in count operation");
- }
-
- } else {
- // Argument of the count operation is a property.
- ASSERT(prop != NULL);
- prop->RecordTypeFeedback(oracle(), zone());
-
- if (prop->key()->IsPropertyName()) {
- // Named property.
- if (returns_original_input) Push(graph()->GetConstantUndefined());
-
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map;
- HInstruction* load;
- bool monomorphic = prop->IsMonomorphic();
- if (monomorphic) {
- map = prop->GetReceiverTypes()->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- load = BuildCallGetter(object, map, getter, holder);
- } else {
- load = BuildLoadNamedMonomorphic(object, name, prop, map);
- }
- } else {
- load = BuildLoadNamedGeneric(object, name, prop);
- }
- PushAndAdd(load);
- if (load->HasObservableSideEffects()) {
- AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
- }
-
- after = BuildIncrement(returns_original_input, expr);
- input = Pop();
-
- HInstruction* store;
- if (!monomorphic || map->is_observed()) {
- // If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, after));
- } else {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- store = BuildCallSetter(object, after, map, setter, holder);
- } else {
- CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
- name,
- after,
- map));
- }
- }
- AddInstruction(store);
-
- // Overwrite the receiver in the bailout environment with the result
- // of the operation, and the placeholder with the original value if
- // necessary.
- environment()->SetExpressionStackAt(0, after);
- if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
-
- } else {
- // Keyed property.
- if (returns_original_input) Push(graph()->GetConstantUndefined());
-
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
-
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
- false, // is_store
- &has_side_effects);
- Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
-
- after = BuildIncrement(returns_original_input, expr);
- input = Pop();
-
- expr->RecordTypeFeedback(oracle(), zone());
- HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
- RelocInfo::kNoPosition,
- true, // is_store
- &has_side_effects);
-
- // Drop the key from the bailout environment. Overwrite the receiver
- // with the result of the operation, and the placeholder with the
- // original value if necessary.
- Drop(1);
- environment()->SetExpressionStackAt(0, after);
- if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- }
-
- Drop(returns_original_input ? 2 : 1);
- return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
- HValue* context,
- HValue* string,
- HValue* index) {
- if (string->IsConstant() && index->IsConstant()) {
- HConstant* c_string = HConstant::cast(string);
- HConstant* c_index = HConstant::cast(index);
- if (c_string->HasStringValue() && c_index->HasNumberValue()) {
- int32_t i = c_index->NumberValueAsInteger32();
- Handle<String> s = c_string->StringValue();
- if (i < 0 || i >= s->length()) {
- return new(zone()) HConstant(OS::nan_value(), Representation::Double());
- }
- return new(zone()) HConstant(s->Get(i), Representation::Integer32());
- }
- }
- AddInstruction(new(zone()) HCheckNonSmi(string));
- AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
- HInstruction* length = HStringLength::New(zone(), string);
- AddInstruction(length);
- HInstruction* checked_index = AddBoundsCheck(index, length);
- return new(zone()) HStringCharCodeAt(context, string, checked_index);
-}
-
-// Checks if the given shift amounts have form: (sa) and (32 - sa).
-static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
- HValue* const32_minus_sa) {
- if (!const32_minus_sa->IsSub()) return false;
- HSub* sub = HSub::cast(const32_minus_sa);
- if (sa != sub->right()) return false;
- HValue* const32 = sub->left();
- if (!const32->IsConstant() ||
- HConstant::cast(const32)->Integer32Value() != 32) {
- return false;
- }
- return (sub->right() == sa);
-}
-
-
-// Checks if the left and the right are shift instructions with the oposite
-// directions that can be replaced by one rotate right instruction or not.
-// Returns the operand and the shift amount for the rotate instruction in the
-// former case.
-bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount) {
- HShl* shl;
- HShr* shr;
- if (left->IsShl() && right->IsShr()) {
- shl = HShl::cast(left);
- shr = HShr::cast(right);
- } else if (left->IsShr() && right->IsShl()) {
- shl = HShl::cast(right);
- shr = HShr::cast(left);
- } else {
- return false;
- }
- if (shl->left() != shr->left()) return false;
-
- if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) &&
- !ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) {
- return false;
- }
- *operand= shr->left();
- *shift_amount = shr->right();
- return true;
-}
-
-
-bool CanBeZero(HValue *right) {
- if (right->IsConstant()) {
- HConstant* right_const = HConstant::cast(right);
- if (right_const->HasInteger32Value() &&
- (right_const->Integer32Value() & 0x1f) != 0) {
- return false;
- }
- }
- return true;
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
- BinaryOperation* expr,
- HValue* left,
- HValue* right) {
- HValue* context = environment()->LookupContext();
- TypeInfo left_info, right_info, result_info, combined_info;
- oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
- Representation left_rep = ToRepresentation(left_info);
- Representation right_rep = ToRepresentation(right_info);
- Representation result_rep = ToRepresentation(result_info);
- if (left_info.IsUninitialized()) {
- // Can't have initialized one but not the other.
- ASSERT(right_info.IsUninitialized());
- AddSoftDeoptimize();
- left_info = right_info = TypeInfo::Unknown();
- }
- HInstruction* instr = NULL;
- switch (expr->op()) {
- case Token::ADD:
- if (left_info.IsString() && right_info.IsString()) {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
- instr = HStringAdd::New(zone(), context, left, right);
- } else {
- instr = HAdd::New(zone(), context, left, right);
- }
- break;
- case Token::SUB:
- instr = HSub::New(zone(), context, left, right);
- break;
- case Token::MUL:
- instr = HMul::New(zone(), context, left, right);
- break;
- case Token::MOD:
- instr = HMod::New(zone(), context, left, right);
- break;
- case Token::DIV:
- instr = HDiv::New(zone(), context, left, right);
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- instr = HBitwise::New(zone(), expr->op(), context, left, right);
- break;
- case Token::BIT_OR: {
- HValue* operand, *shift_amount;
- if (left_info.IsInteger32() && right_info.IsInteger32() &&
- MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = new(zone()) HRor(context, operand, shift_amount);
- } else {
- instr = HBitwise::New(zone(), expr->op(), context, left, right);
- }
- break;
- }
- case Token::SAR:
- instr = HSar::New(zone(), context, left, right);
- break;
- case Token::SHR:
- instr = HShr::New(zone(), context, left, right);
- if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
- CanBeZero(right)) {
- graph()->RecordUint32Instruction(instr);
- }
- break;
- case Token::SHL:
- instr = HShl::New(zone(), context, left, right);
- break;
- default:
- UNREACHABLE();
- }
-
- if (instr->IsBinaryOperation()) {
- HBinaryOperation* binop = HBinaryOperation::cast(instr);
- binop->set_observed_input_representation(left_rep, right_rep);
- binop->initialize_output_representation(result_rep);
- }
- return instr;
-}
-
-
-// Check for the form (%_ClassOf(foo) === 'BarClass').
-static bool IsClassOfTest(CompareOperation* expr) {
- if (expr->op() != Token::EQ_STRICT) return false;
- CallRuntime* call = expr->left()->AsCallRuntime();
- if (call == NULL) return false;
- Literal* literal = expr->right()->AsLiteral();
- if (literal == NULL) return false;
- if (!literal->handle()->IsString()) return false;
- if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) {
- return false;
- }
- ASSERT(call->arguments()->length() == 1);
- return true;
-}
-
-
-void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- switch (expr->op()) {
- case Token::COMMA:
- return VisitComma(expr);
- case Token::OR:
- case Token::AND:
- return VisitLogicalExpression(expr);
- default:
- return VisitArithmeticExpression(expr);
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
- CHECK_ALIVE(VisitForEffect(expr->left()));
- // Visit the right subexpression in the same AST context as the entire
- // expression.
- Visit(expr->right());
-}
-
-
-void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
- bool is_logical_and = expr->op() == Token::AND;
- if (ast_context()->IsTest()) {
- TestContext* context = TestContext::cast(ast_context());
- // Translate left subexpression.
- HBasicBlock* eval_right = graph()->CreateBasicBlock();
- if (is_logical_and) {
- CHECK_BAILOUT(VisitForControl(expr->left(),
- eval_right,
- context->if_false()));
- } else {
- CHECK_BAILOUT(VisitForControl(expr->left(),
- context->if_true(),
- eval_right));
- }
-
- // Translate right subexpression by visiting it in the same AST
- // context as the entire expression.
- if (eval_right->HasPredecessor()) {
- eval_right->SetJoinId(expr->RightId());
- set_current_block(eval_right);
- Visit(expr->right());
- }
-
- } else if (ast_context()->IsValue()) {
- CHECK_ALIVE(VisitForValue(expr->left()));
- ASSERT(current_block() != NULL);
- HValue* left_value = Top();
-
- if (left_value->IsConstant()) {
- HConstant* left_constant = HConstant::cast(left_value);
- if ((is_logical_and && left_constant->ToBoolean()) ||
- (!is_logical_and && !left_constant->ToBoolean())) {
- Drop(1); // left_value.
- CHECK_BAILOUT(VisitForValue(expr->right()));
- }
- return ast_context()->ReturnValue(Pop());
- }
-
- // We need an extra block to maintain edge-split form.
- HBasicBlock* empty_block = graph()->CreateBasicBlock();
- HBasicBlock* eval_right = graph()->CreateBasicBlock();
- TypeFeedbackId test_id = expr->left()->test_id();
- ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
- HBranch* test = is_logical_and
- ? new(zone()) HBranch(left_value, eval_right, empty_block, expected)
- : new(zone()) HBranch(left_value, empty_block, eval_right, expected);
- current_block()->Finish(test);
-
- set_current_block(eval_right);
- Drop(1); // Value of the left subexpression.
- CHECK_BAILOUT(VisitForValue(expr->right()));
-
- HBasicBlock* join_block =
- CreateJoin(empty_block, current_block(), expr->id());
- set_current_block(join_block);
- return ast_context()->ReturnValue(Pop());
-
- } else {
- ASSERT(ast_context()->IsEffect());
- // In an effect context, we don't need the value of the left subexpression,
- // only its control flow and side effects. We need an extra block to
- // maintain edge-split form.
- HBasicBlock* empty_block = graph()->CreateBasicBlock();
- HBasicBlock* right_block = graph()->CreateBasicBlock();
- if (is_logical_and) {
- CHECK_BAILOUT(VisitForControl(expr->left(), right_block, empty_block));
- } else {
- CHECK_BAILOUT(VisitForControl(expr->left(), empty_block, right_block));
- }
-
- // TODO(kmillikin): Find a way to fix this. It's ugly that there are
- // actually two empty blocks (one here and one inserted by
- // TestContext::BuildBranch, and that they both have an HSimulate though the
- // second one is not a merge node, and that we really have no good AST ID to
- // put on that first HSimulate.
-
- if (empty_block->HasPredecessor()) {
- empty_block->SetJoinId(expr->id());
- } else {
- empty_block = NULL;
- }
-
- if (right_block->HasPredecessor()) {
- right_block->SetJoinId(expr->RightId());
- set_current_block(right_block);
- CHECK_BAILOUT(VisitForEffect(expr->right()));
- right_block = current_block();
- } else {
- right_block = NULL;
- }
-
- HBasicBlock* join_block =
- CreateJoin(empty_block, right_block, expr->id());
- set_current_block(join_block);
- // We did not materialize any value in the predecessor environments,
- // so there is no need to handle it here.
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->left()));
- CHECK_ALIVE(VisitForValue(expr->right()));
- HValue* right = Pop();
- HValue* left = Pop();
- HInstruction* instr = BuildBinaryOperation(expr, left, right);
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
- if (info.IsUninitialized()) return Representation::None();
- if (info.IsSmi()) return Representation::Integer32();
- if (info.IsInteger32()) return Representation::Integer32();
- if (info.IsDouble()) return Representation::Double();
- if (info.IsNumber()) return Representation::Double();
- return Representation::Tagged();
-}
-
-
-void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
- HTypeof* typeof_expr,
- Handle<String> check) {
- // Note: The HTypeof itself is removed during canonicalization, if possible.
- HValue* value = typeof_expr->value();
- HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
- instr->set_position(expr->position());
- return ast_context()->ReturnControl(instr, expr->id());
-}
-
-
-static bool MatchLiteralCompareNil(HValue* left,
- Token::Value op,
- HValue* right,
- Handle<Object> nil,
- HValue** expr) {
- if (left->IsConstant() &&
- HConstant::cast(left)->handle().is_identical_to(nil) &&
- Token::IsEqualityOp(op)) {
- *expr = right;
- return true;
- }
- return false;
-}
-
-
-static bool MatchLiteralCompareTypeof(HValue* left,
- Token::Value op,
- HValue* right,
- HTypeof** typeof_expr,
- Handle<String>* check) {
- if (left->IsTypeof() &&
- Token::IsEqualityOp(op) &&
- right->IsConstant() &&
- HConstant::cast(right)->handle()->IsString()) {
- *typeof_expr = HTypeof::cast(left);
- *check = Handle<String>::cast(HConstant::cast(right)->handle());
- return true;
- }
- return false;
-}
-
-
-static bool IsLiteralCompareTypeof(HValue* left,
- Token::Value op,
- HValue* right,
- HTypeof** typeof_expr,
- Handle<String>* check) {
- return MatchLiteralCompareTypeof(left, op, right, typeof_expr, check) ||
- MatchLiteralCompareTypeof(right, op, left, typeof_expr, check);
-}
-
-
-static bool IsLiteralCompareNil(HValue* left,
- Token::Value op,
- HValue* right,
- Handle<Object> nil,
- HValue** expr) {
- return MatchLiteralCompareNil(left, op, right, nil, expr) ||
- MatchLiteralCompareNil(right, op, left, nil, expr);
-}
-
-
-static bool IsLiteralCompareBool(HValue* left,
- Token::Value op,
- HValue* right) {
- return op == Token::EQ_STRICT &&
- ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
- (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
-}
-
-
-void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- if (IsClassOfTest(expr)) {
- CallRuntime* call = expr->left()->AsCallRuntime();
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- Literal* literal = expr->right()->AsLiteral();
- Handle<String> rhs = Handle<String>::cast(literal->handle());
- HClassOfTestAndBranch* instr =
- new(zone()) HClassOfTestAndBranch(value, rhs);
- instr->set_position(expr->position());
- return ast_context()->ReturnControl(instr, expr->id());
- }
-
- TypeInfo left_type, right_type, overall_type_info;
- oracle()->CompareType(expr, &left_type, &right_type, &overall_type_info);
- Representation combined_rep = ToRepresentation(overall_type_info);
- Representation left_rep = ToRepresentation(left_type);
- Representation right_rep = ToRepresentation(right_type);
- // Check if this expression was ever executed according to type feedback.
- // Note that for the special typeof/null/undefined cases we get unknown here.
- if (overall_type_info.IsUninitialized()) {
- AddSoftDeoptimize();
- overall_type_info = left_type = right_type = TypeInfo::Unknown();
- }
-
- CHECK_ALIVE(VisitForValue(expr->left()));
- CHECK_ALIVE(VisitForValue(expr->right()));
-
- HValue* context = environment()->LookupContext();
- HValue* right = Pop();
- HValue* left = Pop();
- Token::Value op = expr->op();
-
- HTypeof* typeof_expr = NULL;
- Handle<String> check;
- if (IsLiteralCompareTypeof(left, op, right, &typeof_expr, &check)) {
- return HandleLiteralCompareTypeof(expr, typeof_expr, check);
- }
- HValue* sub_expr = NULL;
- Factory* f = graph()->isolate()->factory();
- if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) {
- return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
- }
- if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
- return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
- }
- if (IsLiteralCompareBool(left, op, right)) {
- HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
- return ast_context()->ReturnControl(result, expr->id());
- }
-
- if (op == Token::INSTANCEOF) {
- // Check to see if the rhs of the instanceof is a global function not
- // residing in new space. If it is we assume that the function will stay the
- // same.
- Handle<JSFunction> target = Handle<JSFunction>::null();
- VariableProxy* proxy = expr->right()->AsVariableProxy();
- bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
- if (global_function &&
- info()->has_global_object() &&
- !info()->global_object()->IsAccessCheckNeeded()) {
- Handle<String> name = proxy->name();
- Handle<GlobalObject> global(info()->global_object());
- LookupResult lookup(isolate());
- global->Lookup(*name, &lookup);
- if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
- Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
- // If the function is in new space we assume it's more likely to
- // change and thus prefer the general IC code.
- if (!isolate()->heap()->InNewSpace(*candidate)) {
- target = candidate;
- }
- }
- }
-
- // If the target is not null we have found a known global function that is
- // assumed to stay the same for this instanceof.
- if (target.is_null()) {
- HInstanceOf* result = new(zone()) HInstanceOf(context, left, right);
- result->set_position(expr->position());
- return ast_context()->ReturnInstruction(result, expr->id());
- } else {
- AddInstruction(new(zone()) HCheckFunction(right, target));
- HInstanceOfKnownGlobal* result =
- new(zone()) HInstanceOfKnownGlobal(context, left, target);
- result->set_position(expr->position());
- return ast_context()->ReturnInstruction(result, expr->id());
- }
- } else if (op == Token::IN) {
- HIn* result = new(zone()) HIn(context, left, right);
- result->set_position(expr->position());
- return ast_context()->ReturnInstruction(result, expr->id());
- } else if (overall_type_info.IsNonPrimitive()) {
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT: {
- // Can we get away with map check and not instance type check?
- Handle<Map> map = oracle()->GetCompareMap(expr);
- if (!map.is_null()) {
- AddCheckMapsWithTransitions(left, map);
- AddCheckMapsWithTransitions(right, map);
- HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
- return ast_context()->ReturnControl(result, expr->id());
- } else {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
- HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
- return ast_context()->ReturnControl(result, expr->id());
- }
- }
- default:
- return Bailout("Unsupported non-primitive compare");
- }
- } else if (overall_type_info.IsInternalizedString() &&
- Token::IsEqualityOp(op)) {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone()));
- HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
- return ast_context()->ReturnControl(result, expr->id());
- } else {
- if (combined_rep.IsTagged() || combined_rep.IsNone()) {
- HCompareGeneric* result =
- new(zone()) HCompareGeneric(context, left, right, op);
- result->set_observed_input_representation(left_rep, right_rep);
- result->set_position(expr->position());
- return ast_context()->ReturnInstruction(result, expr->id());
- } else {
- HCompareIDAndBranch* result =
- new(zone()) HCompareIDAndBranch(left, right, op);
- result->set_observed_input_representation(left_rep, right_rep);
- result->set_position(expr->position());
- return ast_context()->ReturnControl(result, expr->id());
- }
- }
-}
-
-
-void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
- HValue* value,
- NilValue nil) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- EqualityKind kind =
- expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
- HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
- instr->set_position(expr->position());
- return ast_context()->ReturnControl(instr, expr->id());
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
- // If we share optimized code between different closures, the
- // this-function is not a constant, except inside an inlined body.
- if (function_state()->outer() != NULL) {
- return new(zone()) HConstant(
- function_state()->compilation_info()->closure(),
- Representation::Tagged());
- } else {
- return new(zone()) HThisFunction;
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- HInstruction* instr = BuildThisFunction();
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
- ASSERT(globals_.is_empty());
- AstVisitor::VisitDeclarations(declarations);
- if (!globals_.is_empty()) {
- Handle<FixedArray> array =
- isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
- for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
- int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(info()->language_mode());
- HInstruction* result = new(zone()) HDeclareGlobals(
- environment()->LookupContext(), array, flags);
- AddInstruction(result);
- globals_.Clear();
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- globals_.Add(variable->name(), zone());
- globals_.Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(), zone());
- globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- return;
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (hole_init) {
- HValue* value = graph()->GetConstantHole();
- environment()->Bind(variable, value);
- }
- break;
- case Variable::CONTEXT:
- if (hole_init) {
- HValue* value = graph()->GetConstantHole();
- HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new(zone()) HStoreContextSlot(
- context, variable->index(), HStoreContextSlot::kNoCheck, value);
- AddInstruction(store);
- if (store->HasObservableSideEffects()) {
- AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
- }
- }
- break;
- case Variable::LOOKUP:
- return Bailout("unsupported lookup slot in declaration");
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_.Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), info()->script());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_.Add(function, zone());
- globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- return;
- }
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- CHECK_ALIVE(VisitForValue(declaration->fun()));
- HValue* value = Pop();
- environment()->Bind(variable, value);
- break;
- }
- case Variable::CONTEXT: {
- CHECK_ALIVE(VisitForValue(declaration->fun()));
- HValue* value = Pop();
- HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new(zone()) HStoreContextSlot(
- context, variable->index(), HStoreContextSlot::kNoCheck, value);
- AddInstruction(store);
- if (store->HasObservableSideEffects()) {
- AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
- }
- break;
- }
- case Variable::LOOKUP:
- return Bailout("unsupported lookup slot in declaration");
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitModuleDeclaration(
- ModuleDeclaration* declaration) {
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitImportDeclaration(
- ImportDeclaration* declaration) {
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitExportDeclaration(
- ExportDeclaration* declaration) {
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
- UNREACHABLE();
-}
-
-
-// Generators for inline runtime functions.
-// Support for types.
-void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsSymbol(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, SYMBOL_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasCachedArrayIndexAndBranch* result =
- new(zone()) HHasCachedArrayIndexAndBranch(value);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_ARRAY_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_REGEXP_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HIsObjectAndBranch* result = new(zone()) HIsObjectAndBranch(value);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
- return Bailout("inlined runtime function: IsNonNegativeSmi");
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HIsUndetectableAndBranch* result =
- new(zone()) HIsUndetectableAndBranch(value);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* call) {
- return Bailout(
- "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
-}
-
-
-// Support for construct call checks.
-void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 0);
- if (function_state()->outer() != NULL) {
- // We are generating graph for inlined function.
- HValue* value = function_state()->inlining_kind() == CONSTRUCT_CALL_RETURN
- ? graph()->GetConstantTrue()
- : graph()->GetConstantFalse();
- return ast_context()->ReturnValue(value);
- } else {
- return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
- call->id());
- }
-}
-
-
-// Support for arguments.length and arguments[?].
-void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
- // Our implementation of arguments (based on this stack frame or an
- // adapter below it) does not work for inlined functions. This runtime
- // function is blacklisted by AstNode::IsInlineable.
- ASSERT(function_state()->outer() == NULL);
- ASSERT(call->arguments()->length() == 0);
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
- // Our implementation of arguments (based on this stack frame or an
- // adapter below it) does not work for inlined functions. This runtime
- // function is blacklisted by AstNode::IsInlineable.
- ASSERT(function_state()->outer() == NULL);
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* index = Pop();
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
- HInstruction* checked_index = AddBoundsCheck(index, length);
- HAccessArgumentsAt* result =
- new(zone()) HAccessArgumentsAt(elements, length, checked_index);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Support for accessing the class and value fields of an object.
-void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
- // The special form detected by IsClassOfTest is detected before we get here
- // and does not cause a bailout.
- return Bailout("inlined runtime function: ClassOf");
-}
-
-
-void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HValueOf* result = new(zone()) HValueOf(value);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 2);
- ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* date = Pop();
- HDateField* result = new(zone()) HDateField(date, index);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
- CallRuntime* call) {
- ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- HValue* value = Pop();
- HValue* index = Pop();
- HValue* string = Pop();
- HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
- String::ONE_BYTE_ENCODING, string, index, value);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
- CallRuntime* call) {
- ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- HValue* value = Pop();
- HValue* index = Pop();
- HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* char_code = BuildStringCharCodeAt(context, string, index);
- AddInstruction(char_code);
- HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
- String::TWO_BYTE_ENCODING, string, index, value);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 2);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* value = Pop();
- HValue* object = Pop();
- // Check if object is a not a smi.
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object);
- HBasicBlock* if_smi = graph()->CreateBasicBlock();
- HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
- HBasicBlock* join = graph()->CreateBasicBlock();
- smicheck->SetSuccessorAt(0, if_smi);
- smicheck->SetSuccessorAt(1, if_heap_object);
- current_block()->Finish(smicheck);
- if_smi->Goto(join);
-
- // Check if object is a JSValue.
- set_current_block(if_heap_object);
- HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(object, JS_VALUE_TYPE);
- HBasicBlock* if_js_value = graph()->CreateBasicBlock();
- HBasicBlock* not_js_value = graph()->CreateBasicBlock();
- typecheck->SetSuccessorAt(0, if_js_value);
- typecheck->SetSuccessorAt(1, not_js_value);
- current_block()->Finish(typecheck);
- not_js_value->Goto(join);
-
- // Create in-object property store to kValueOffset.
- set_current_block(if_js_value);
- Handle<String> name = isolate()->factory()->undefined_string();
- AddInstruction(new(zone()) HStoreNamedField(object,
- name,
- value,
- true, // in-object store.
- JSValue::kValueOffset));
- if_js_value->Goto(join);
- join->SetJoinId(call->id());
- set_current_block(join);
- return ast_context()->ReturnValue(value);
-}
-
-
-// Fast support for charCodeAt(n).
-void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 2);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* index = Pop();
- HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* result = BuildStringCharCodeAt(context, string, index);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for string.charAt(n) and string[n].
-void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* char_code = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* result = HStringCharFromCode::New(zone(), context, char_code);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for string.charAt(n) and string[n].
-void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 2);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* index = Pop();
- HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* char_code = BuildStringCharCodeAt(context, string, index);
- AddInstruction(char_code);
- HInstruction* result = HStringCharFromCode::New(zone(), context, char_code);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for object equality testing.
-void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 2);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* right = Pop();
- HValue* left = Pop();
- HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
- // %_Log is ignored in optimized code.
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-// Fast support for Math.random().
-void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
- HRandom* result = new(zone()) HRandom(global_object);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for StringAdd.
-void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
- ASSERT_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::StringAdd, 2);
- Drop(2);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for SubString.
-void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
- ASSERT_EQ(3, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
- Drop(3);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for StringCompare.
-void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
- ASSERT_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
- Drop(2);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Support for direct calls from JavaScript to native RegExp code.
-void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
- ASSERT_EQ(4, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
- Drop(4);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Construct a RegExp exec result with two in-object properties.
-void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
- ASSERT_EQ(3, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
- Drop(3);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Support for fast native caches.
-void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
- return Bailout("inlined runtime function: GetFromCache");
-}
-
-
-// Fast support for number to string.
-void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast call for custom callbacks.
-void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
- // 1 ~ The function to call is not itself an argument to the call.
- int arg_count = call->arguments()->length() - 1;
- ASSERT(arg_count >= 1); // There's always at least a receiver.
-
- for (int i = 0; i < arg_count; ++i) {
- CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
- }
- CHECK_ALIVE(VisitForValue(call->arguments()->last()));
-
- HValue* function = Pop();
- HValue* context = environment()->LookupContext();
-
- // Branch for function proxies, or other non-functions.
- HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE);
- HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
- HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
- HBasicBlock* join = graph()->CreateBasicBlock();
- typecheck->SetSuccessorAt(0, if_jsfunction);
- typecheck->SetSuccessorAt(1, if_nonfunction);
- current_block()->Finish(typecheck);
-
- set_current_block(if_jsfunction);
- HInstruction* invoke_result = AddInstruction(
- new(zone()) HInvokeFunction(context, function, arg_count));
- Drop(arg_count);
- Push(invoke_result);
- if_jsfunction->Goto(join);
-
- set_current_block(if_nonfunction);
- HInstruction* call_result = AddInstruction(
- new(zone()) HCallFunction(context, function, arg_count));
- Drop(arg_count);
- Push(call_result);
- if_nonfunction->Goto(join);
-
- set_current_block(join);
- join->SetJoinId(call->id());
- return ast_context()->ReturnValue(Pop());
-}
-
-
-// Fast call to math functions.
-void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
- ASSERT_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* right = Pop();
- HValue* left = Pop();
- HInstruction* result = HPower::New(zone(), left, right);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::SIN);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::COS);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::TAN);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::LOG);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
- return Bailout("inlined runtime function: MathSqrt");
-}
-
-
-// Check whether two RegExps are equivalent
-void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
- return Bailout("inlined runtime function: IsRegExpEquivalent");
-}
-
-
-void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
- ASSERT(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
- return Bailout("inlined runtime function: FastAsciiArrayJoin");
-}
-
-
-#undef CHECK_BAILOUT
-#undef CHECK_ALIVE
-
-
-HEnvironment::HEnvironment(HEnvironment* outer,
- Scope* scope,
- Handle<JSFunction> closure,
- Zone* zone)
- : closure_(closure),
- values_(0, zone),
- frame_type_(JS_FUNCTION),
- parameter_count_(0),
- specials_count_(1),
- local_count_(0),
- outer_(outer),
- entry_(NULL),
- pop_count_(0),
- push_count_(0),
- ast_id_(BailoutId::None()),
- zone_(zone) {
- Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
-}
-
-
-HEnvironment::HEnvironment(Zone* zone, int parameter_count)
- : values_(0, zone),
- frame_type_(STUB),
- parameter_count_(parameter_count),
- specials_count_(1),
- local_count_(0),
- outer_(NULL),
- entry_(NULL),
- pop_count_(0),
- push_count_(0),
- ast_id_(BailoutId::None()),
- zone_(zone) {
- Initialize(parameter_count, 0, 0);
-}
-
-
-HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
- : values_(0, zone),
- frame_type_(JS_FUNCTION),
- parameter_count_(0),
- specials_count_(0),
- local_count_(0),
- outer_(NULL),
- entry_(NULL),
- pop_count_(0),
- push_count_(0),
- ast_id_(other->ast_id()),
- zone_(zone) {
- Initialize(other);
-}
-
-
-HEnvironment::HEnvironment(HEnvironment* outer,
- Handle<JSFunction> closure,
- FrameType frame_type,
- int arguments,
- Zone* zone)
- : closure_(closure),
- values_(arguments, zone),
- frame_type_(frame_type),
- parameter_count_(arguments),
- local_count_(0),
- outer_(outer),
- entry_(NULL),
- pop_count_(0),
- push_count_(0),
- ast_id_(BailoutId::None()),
- zone_(zone) {
-}
-
-
-void HEnvironment::Initialize(int parameter_count,
- int local_count,
- int stack_height) {
- parameter_count_ = parameter_count;
- local_count_ = local_count;
-
- // Avoid reallocating the temporaries' backing store on the first Push.
- int total = parameter_count + specials_count_ + local_count + stack_height;
- values_.Initialize(total + 4, zone());
- for (int i = 0; i < total; ++i) values_.Add(NULL, zone());
-}
-
-
-void HEnvironment::Initialize(const HEnvironment* other) {
- closure_ = other->closure();
- values_.AddAll(other->values_, zone());
- assigned_variables_.Union(other->assigned_variables_, zone());
- frame_type_ = other->frame_type_;
- parameter_count_ = other->parameter_count_;
- local_count_ = other->local_count_;
- if (other->outer_ != NULL) outer_ = other->outer_->Copy(); // Deep copy.
- entry_ = other->entry_;
- pop_count_ = other->pop_count_;
- push_count_ = other->push_count_;
- specials_count_ = other->specials_count_;
- ast_id_ = other->ast_id_;
-}
-
-
-void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
- ASSERT(!block->IsLoopHeader());
- ASSERT(values_.length() == other->values_.length());
-
- int length = values_.length();
- for (int i = 0; i < length; ++i) {
- HValue* value = values_[i];
- if (value != NULL && value->IsPhi() && value->block() == block) {
- // There is already a phi for the i'th value.
- HPhi* phi = HPhi::cast(value);
- // Assert index is correct and that we haven't missed an incoming edge.
- ASSERT(phi->merged_index() == i);
- ASSERT(phi->OperandCount() == block->predecessors()->length());
- phi->AddInput(other->values_[i]);
- } else if (values_[i] != other->values_[i]) {
- // There is a fresh value on the incoming edge, a phi is needed.
- ASSERT(values_[i] != NULL && other->values_[i] != NULL);
- HPhi* phi = new(zone()) HPhi(i, zone());
- HValue* old_value = values_[i];
- for (int j = 0; j < block->predecessors()->length(); j++) {
- phi->AddInput(old_value);
- }
- phi->AddInput(other->values_[i]);
- this->values_[i] = phi;
- block->AddPhi(phi);
- }
- }
-}
-
-
-void HEnvironment::Bind(int index, HValue* value) {
- ASSERT(value != NULL);
- assigned_variables_.Add(index, zone());
- values_[index] = value;
-}
-
-
-bool HEnvironment::HasExpressionAt(int index) const {
- return index >= parameter_count_ + specials_count_ + local_count_;
-}
-
-
-bool HEnvironment::ExpressionStackIsEmpty() const {
- ASSERT(length() >= first_expression_index());
- return length() == first_expression_index();
-}
-
-
-void HEnvironment::SetExpressionStackAt(int index_from_top, HValue* value) {
- int count = index_from_top + 1;
- int index = values_.length() - count;
- ASSERT(HasExpressionAt(index));
- // The push count must include at least the element in question or else
- // the new value will not be included in this environment's history.
- if (push_count_ < count) {
- // This is the same effect as popping then re-pushing 'count' elements.
- pop_count_ += (count - push_count_);
- push_count_ = count;
- }
- values_[index] = value;
-}
-
-
-void HEnvironment::Drop(int count) {
- for (int i = 0; i < count; ++i) {
- Pop();
- }
-}
-
-
-HEnvironment* HEnvironment::Copy() const {
- return new(zone()) HEnvironment(this, zone());
-}
-
-
-HEnvironment* HEnvironment::CopyWithoutHistory() const {
- HEnvironment* result = Copy();
- result->ClearHistory();
- return result;
-}
-
-
-HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
- HEnvironment* new_env = Copy();
- for (int i = 0; i < values_.length(); ++i) {
- HPhi* phi = new(zone()) HPhi(i, zone());
- phi->AddInput(values_[i]);
- new_env->values_[i] = phi;
- loop_header->AddPhi(phi);
- }
- new_env->ClearHistory();
- return new_env;
-}
-
-
-HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
- Handle<JSFunction> target,
- FrameType frame_type,
- int arguments) const {
- HEnvironment* new_env =
- new(zone()) HEnvironment(outer, target, frame_type,
- arguments + 1, zone());
- for (int i = 0; i <= arguments; ++i) { // Include receiver.
- new_env->Push(ExpressionStackAt(arguments - i));
- }
- new_env->ClearHistory();
- return new_env;
-}
-
-
-HEnvironment* HEnvironment::CopyForInlining(
- Handle<JSFunction> target,
- int arguments,
- FunctionLiteral* function,
- HConstant* undefined,
- InliningKind inlining_kind,
- bool undefined_receiver) const {
- ASSERT(frame_type() == JS_FUNCTION);
-
- // Outer environment is a copy of this one without the arguments.
- int arity = function->scope()->num_parameters();
-
- HEnvironment* outer = Copy();
- outer->Drop(arguments + 1); // Including receiver.
- outer->ClearHistory();
-
- if (inlining_kind == CONSTRUCT_CALL_RETURN) {
- // Create artificial constructor stub environment. The receiver should
- // actually be the constructor function, but we pass the newly allocated
- // object instead, DoComputeConstructStubFrame() relies on that.
- outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments);
- } else if (inlining_kind == GETTER_CALL_RETURN) {
- // We need an additional StackFrame::INTERNAL frame for restoring the
- // correct context.
- outer = CreateStubEnvironment(outer, target, JS_GETTER, arguments);
- } else if (inlining_kind == SETTER_CALL_RETURN) {
- // We need an additional StackFrame::INTERNAL frame for temporarily saving
- // the argument of the setter, see StoreStubCompiler::CompileStoreViaSetter.
- outer = CreateStubEnvironment(outer, target, JS_SETTER, arguments);
- }
-
- if (arity != arguments) {
- // Create artificial arguments adaptation environment.
- outer = CreateStubEnvironment(outer, target, ARGUMENTS_ADAPTOR, arguments);
- }
-
- HEnvironment* inner =
- new(zone()) HEnvironment(outer, function->scope(), target, zone());
- // Get the argument values from the original environment.
- for (int i = 0; i <= arity; ++i) { // Include receiver.
- HValue* push = (i <= arguments) ?
- ExpressionStackAt(arguments - i) : undefined;
- inner->SetValueAt(i, push);
- }
- // If the function we are inlining is a strict mode function or a
- // builtin function, pass undefined as the receiver for function
- // calls (instead of the global receiver).
- if (undefined_receiver) {
- inner->SetValueAt(0, undefined);
- }
- inner->SetValueAt(arity + 1, LookupContext());
- for (int i = arity + 2; i < inner->length(); ++i) {
- inner->SetValueAt(i, undefined);
- }
-
- inner->set_ast_id(BailoutId::FunctionEntry());
- return inner;
-}
-
-
-void HEnvironment::PrintTo(StringStream* stream) {
- for (int i = 0; i < length(); i++) {
- if (i == 0) stream->Add("parameters\n");
- if (i == parameter_count()) stream->Add("specials\n");
- if (i == parameter_count() + specials_count()) stream->Add("locals\n");
- if (i == parameter_count() + specials_count() + local_count()) {
- stream->Add("expressions\n");
- }
- HValue* val = values_.at(i);
- stream->Add("%d: ", i);
- if (val != NULL) {
- val->PrintNameTo(stream);
- } else {
- stream->Add("NULL");
- }
- stream->Add("\n");
- }
- PrintF("\n");
-}
-
-
-void HEnvironment::PrintToStd() {
- HeapStringAllocator string_allocator;
- StringStream trace(&string_allocator);
- PrintTo(&trace);
- PrintF("%s", *trace.ToCString());
-}
-
-
-void HTracer::TraceCompilation(CompilationInfo* info) {
- Tag tag(this, "compilation");
- if (info->IsOptimizing()) {
- Handle<String> name = info->function()->debug_name();
- PrintStringProperty("name", *name->ToCString());
- PrintStringProperty("method", *name->ToCString());
- } else {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- PrintStringProperty("name", CodeStub::MajorName(major_key, false));
- PrintStringProperty("method", "stub");
- }
- PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
-}
-
-
-void HTracer::TraceLithium(const char* name, LChunk* chunk) {
- AllowHandleDereference allow_handle_deref(chunk->graph()->isolate());
- Trace(name, chunk->graph(), chunk);
-}
-
-
-void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
- AllowHandleDereference allow_handle_deref(graph->isolate());
- Trace(name, graph, NULL);
-}
-
-
-void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
- Tag tag(this, "cfg");
- PrintStringProperty("name", name);
- const ZoneList<HBasicBlock*>* blocks = graph->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* current = blocks->at(i);
- Tag block_tag(this, "block");
- PrintBlockProperty("name", current->block_id());
- PrintIntProperty("from_bci", -1);
- PrintIntProperty("to_bci", -1);
-
- if (!current->predecessors()->is_empty()) {
- PrintIndent();
- trace_.Add("predecessors");
- for (int j = 0; j < current->predecessors()->length(); ++j) {
- trace_.Add(" \"B%d\"", current->predecessors()->at(j)->block_id());
- }
- trace_.Add("\n");
- } else {
- PrintEmptyProperty("predecessors");
- }
-
- if (current->end()->SuccessorCount() == 0) {
- PrintEmptyProperty("successors");
- } else {
- PrintIndent();
- trace_.Add("successors");
- for (HSuccessorIterator it(current->end()); !it.Done(); it.Advance()) {
- trace_.Add(" \"B%d\"", it.Current()->block_id());
- }
- trace_.Add("\n");
- }
-
- PrintEmptyProperty("xhandlers");
- const char* flags = current->IsLoopSuccessorDominator()
- ? "dom-loop-succ"
- : "";
- PrintStringProperty("flags", flags);
-
- if (current->dominator() != NULL) {
- PrintBlockProperty("dominator", current->dominator()->block_id());
- }
-
- PrintIntProperty("loop_depth", current->LoopNestingDepth());
-
- if (chunk != NULL) {
- int first_index = current->first_instruction_index();
- int last_index = current->last_instruction_index();
- PrintIntProperty(
- "first_lir_id",
- LifetimePosition::FromInstructionIndex(first_index).Value());
- PrintIntProperty(
- "last_lir_id",
- LifetimePosition::FromInstructionIndex(last_index).Value());
- }
-
- {
- Tag states_tag(this, "states");
- Tag locals_tag(this, "locals");
- int total = current->phis()->length();
- PrintIntProperty("size", current->phis()->length());
- PrintStringProperty("method", "None");
- for (int j = 0; j < total; ++j) {
- HPhi* phi = current->phis()->at(j);
- PrintIndent();
- trace_.Add("%d ", phi->merged_index());
- phi->PrintNameTo(&trace_);
- trace_.Add(" ");
- phi->PrintTo(&trace_);
- trace_.Add("\n");
- }
- }
-
- {
- Tag HIR_tag(this, "HIR");
- HInstruction* instruction = current->first();
- while (instruction != NULL) {
- int bci = 0;
- int uses = instruction->UseCount();
- PrintIndent();
- trace_.Add("%d %d ", bci, uses);
- instruction->PrintNameTo(&trace_);
- trace_.Add(" ");
- instruction->PrintTo(&trace_);
- trace_.Add(" <|@\n");
- instruction = instruction->next();
- }
- }
-
-
- if (chunk != NULL) {
- Tag LIR_tag(this, "LIR");
- int first_index = current->first_instruction_index();
- int last_index = current->last_instruction_index();
- if (first_index != -1 && last_index != -1) {
- const ZoneList<LInstruction*>* instructions = chunk->instructions();
- for (int i = first_index; i <= last_index; ++i) {
- LInstruction* linstr = instructions->at(i);
- if (linstr != NULL) {
- PrintIndent();
- trace_.Add("%d ",
- LifetimePosition::FromInstructionIndex(i).Value());
- linstr->PrintTo(&trace_);
- trace_.Add(" <|@\n");
- }
- }
- }
- }
- }
-}
-
-
-void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
- Tag tag(this, "intervals");
- PrintStringProperty("name", name);
-
- const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
- for (int i = 0; i < fixed_d->length(); ++i) {
- TraceLiveRange(fixed_d->at(i), "fixed", allocator->zone());
- }
-
- const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
- for (int i = 0; i < fixed->length(); ++i) {
- TraceLiveRange(fixed->at(i), "fixed", allocator->zone());
- }
-
- const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
- for (int i = 0; i < live_ranges->length(); ++i) {
- TraceLiveRange(live_ranges->at(i), "object", allocator->zone());
- }
-}
-
-
-void HTracer::TraceLiveRange(LiveRange* range, const char* type,
- Zone* zone) {
- if (range != NULL && !range->IsEmpty()) {
- PrintIndent();
- trace_.Add("%d %s", range->id(), type);
- if (range->HasRegisterAssigned()) {
- LOperand* op = range->CreateAssignedOperand(zone);
- int assigned_reg = op->index();
- if (op->IsDoubleRegister()) {
- trace_.Add(" \"%s\"",
- DoubleRegister::AllocationIndexToString(assigned_reg));
- } else {
- ASSERT(op->IsRegister());
- trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
- }
- } else if (range->IsSpilled()) {
- LOperand* op = range->TopLevel()->GetSpillOperand();
- if (op->IsDoubleStackSlot()) {
- trace_.Add(" \"double_stack:%d\"", op->index());
- } else {
- ASSERT(op->IsStackSlot());
- trace_.Add(" \"stack:%d\"", op->index());
- }
- }
- int parent_index = -1;
- if (range->IsChild()) {
- parent_index = range->parent()->id();
- } else {
- parent_index = range->id();
- }
- LOperand* op = range->FirstHint();
- int hint_index = -1;
- if (op != NULL && op->IsUnallocated()) {
- hint_index = LUnallocated::cast(op)->virtual_register();
- }
- trace_.Add(" %d %d", parent_index, hint_index);
- UseInterval* cur_interval = range->first_interval();
- while (cur_interval != NULL && range->Covers(cur_interval->start())) {
- trace_.Add(" [%d, %d[",
- cur_interval->start().Value(),
- cur_interval->end().Value());
- cur_interval = cur_interval->next();
- }
-
- UsePosition* current_pos = range->first_pos();
- while (current_pos != NULL) {
- if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
- trace_.Add(" %d M", current_pos->pos().Value());
- }
- current_pos = current_pos->next();
- }
-
- trace_.Add(" \"\"\n");
- }
-}
-
-
-void HTracer::FlushToFile() {
- AppendChars(filename_, *trace_.ToCString(), trace_.length(), false);
- trace_.Reset();
-}
-
-
-void HStatistics::Initialize(CompilationInfo* info) {
- if (info->shared_info().is_null()) return;
- source_size_ += info->shared_info()->SourceSize();
-}
-
-
-void HStatistics::Print() {
- PrintF("Timing results:\n");
- int64_t sum = 0;
- for (int i = 0; i < timing_.length(); ++i) {
- sum += timing_[i];
- }
-
- for (int i = 0; i < names_.length(); ++i) {
- PrintF("%30s", names_[i]);
- double ms = static_cast<double>(timing_[i]) / 1000;
- double percent = static_cast<double>(timing_[i]) * 100 / sum;
- PrintF(" - %8.3f ms / %4.1f %% ", ms, percent);
-
- unsigned size = sizes_[i];
- double size_percent = static_cast<double>(size) * 100 / total_size_;
- PrintF(" %9u bytes / %4.1f %%\n", size, size_percent);
- }
-
- PrintF("----------------------------------------"
- "---------------------------------------\n");
- int64_t total = create_graph_ + optimize_graph_ + generate_code_;
- PrintF("%30s - %8.3f ms / %4.1f %% \n",
- "Create graph",
- static_cast<double>(create_graph_) / 1000,
- static_cast<double>(create_graph_) * 100 / total);
- PrintF("%30s - %8.3f ms / %4.1f %% \n",
- "Optimize graph",
- static_cast<double>(optimize_graph_) / 1000,
- static_cast<double>(optimize_graph_) * 100 / total);
- PrintF("%30s - %8.3f ms / %4.1f %% \n",
- "Generate and install code",
- static_cast<double>(generate_code_) / 1000,
- static_cast<double>(generate_code_) * 100 / total);
- PrintF("----------------------------------------"
- "---------------------------------------\n");
- PrintF("%30s - %8.3f ms (%.1f times slower than full code gen)\n",
- "Total",
- static_cast<double>(total) / 1000,
- static_cast<double>(total) / full_code_gen_);
-
- double source_size_in_kb = static_cast<double>(source_size_) / 1024;
- double normalized_time = source_size_in_kb > 0
- ? (static_cast<double>(total) / 1000) / source_size_in_kb
- : 0;
- double normalized_size_in_kb = source_size_in_kb > 0
- ? total_size_ / 1024 / source_size_in_kb
- : 0;
- PrintF("%30s - %8.3f ms %7.3f kB allocated\n",
- "Average per kB source",
- normalized_time, normalized_size_in_kb);
-}
-
-
-void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
- if (name == HPhase::kFullCodeGen) {
- full_code_gen_ += ticks;
- } else {
- total_size_ += size;
- for (int i = 0; i < names_.length(); ++i) {
- if (strcmp(names_[i], name) == 0) {
- timing_[i] += ticks;
- sizes_[i] += size;
- return;
- }
- }
- names_.Add(name);
- timing_.Add(ticks);
- sizes_.Add(size);
- }
-}
-
-
-const char* const HPhase::kFullCodeGen = "Full code generator";
-
-void HPhase::Begin(const char* name,
- HGraph* graph,
- LChunk* chunk,
- LAllocator* allocator) {
- name_ = name;
- graph_ = graph;
- chunk_ = chunk;
- allocator_ = allocator;
- if (allocator != NULL && chunk_ == NULL) {
- chunk_ = allocator->chunk();
- }
- if (FLAG_hydrogen_stats) start_ = OS::Ticks();
- start_allocation_size_ = Zone::allocation_size_;
-}
-
-
-void HPhase::End() const {
- if (FLAG_hydrogen_stats) {
- int64_t end = OS::Ticks();
- unsigned size = Zone::allocation_size_ - start_allocation_size_;
- HStatistics::Instance()->SaveTiming(name_, end - start_, size);
- }
-
- // Produce trace output if flag is set so that the first letter of the
- // phase name matches the command line parameter FLAG_trace_phase.
- if (FLAG_trace_hydrogen &&
- OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL) {
- if (graph_ != NULL) HTracer::Instance()->TraceHydrogen(name_, graph_);
- if (chunk_ != NULL) HTracer::Instance()->TraceLithium(name_, chunk_);
- if (allocator_ != NULL) {
- HTracer::Instance()->TraceLiveRanges(name_, allocator_);
- }
- }
-
-#ifdef DEBUG
- if (graph_ != NULL) graph_->Verify(false); // No full verify.
- if (allocator_ != NULL) allocator_->Verify();
-#endif
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/hydrogen.h b/src/3rdparty/v8/src/hydrogen.h
deleted file mode 100644
index a9829a0..0000000
--- a/src/3rdparty/v8/src/hydrogen.h
+++ /dev/null
@@ -1,1703 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HYDROGEN_H_
-#define V8_HYDROGEN_H_
-
-#include "v8.h"
-
-#include "allocation.h"
-#include "ast.h"
-#include "compiler.h"
-#include "hydrogen-instructions.h"
-#include "type-info.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class BitVector;
-class FunctionState;
-class HEnvironment;
-class HGraph;
-class HLoopInformation;
-class HTracer;
-class LAllocator;
-class LChunk;
-class LiveRange;
-
-
-class HBasicBlock: public ZoneObject {
- public:
- explicit HBasicBlock(HGraph* graph);
- virtual ~HBasicBlock() { }
-
- // Simple accessors.
- int block_id() const { return block_id_; }
- void set_block_id(int id) { block_id_ = id; }
- HGraph* graph() const { return graph_; }
- const ZoneList<HPhi*>* phis() const { return &phis_; }
- HInstruction* first() const { return first_; }
- HInstruction* last() const { return last_; }
- void set_last(HInstruction* instr) { last_ = instr; }
- HInstruction* GetLastInstruction();
- HControlInstruction* end() const { return end_; }
- HLoopInformation* loop_information() const { return loop_information_; }
- const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
- bool HasPredecessor() const { return predecessors_.length() > 0; }
- const ZoneList<HBasicBlock*>* dominated_blocks() const {
- return &dominated_blocks_;
- }
- const ZoneList<int>* deleted_phis() const {
- return &deleted_phis_;
- }
- void RecordDeletedPhi(int merge_index) {
- deleted_phis_.Add(merge_index, zone());
- }
- HBasicBlock* dominator() const { return dominator_; }
- HEnvironment* last_environment() const { return last_environment_; }
- int argument_count() const { return argument_count_; }
- void set_argument_count(int count) { argument_count_ = count; }
- int first_instruction_index() const { return first_instruction_index_; }
- void set_first_instruction_index(int index) {
- first_instruction_index_ = index;
- }
- int last_instruction_index() const { return last_instruction_index_; }
- void set_last_instruction_index(int index) {
- last_instruction_index_ = index;
- }
- bool is_osr_entry() { return is_osr_entry_; }
- void set_osr_entry() { is_osr_entry_ = true; }
-
- void AttachLoopInformation();
- void DetachLoopInformation();
- bool IsLoopHeader() const { return loop_information() != NULL; }
- bool IsStartBlock() const { return block_id() == 0; }
- void PostProcessLoopHeader(IterationStatement* stmt);
-
- bool IsFinished() const { return end_ != NULL; }
- void AddPhi(HPhi* phi);
- void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr);
- bool Dominates(HBasicBlock* other) const;
- int LoopNestingDepth() const;
-
- void SetInitialEnvironment(HEnvironment* env);
- void ClearEnvironment() { last_environment_ = NULL; }
- bool HasEnvironment() const { return last_environment_ != NULL; }
- void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
- HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
-
- void set_parent_loop_header(HBasicBlock* block) {
- ASSERT(parent_loop_header_ == NULL);
- parent_loop_header_ = block;
- }
-
- bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
-
- void SetJoinId(BailoutId ast_id);
-
- void Finish(HControlInstruction* last);
- void FinishExit(HControlInstruction* instruction);
- void Goto(HBasicBlock* block, FunctionState* state = NULL);
-
- int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(BailoutId ast_id,
- RemovableSimulate removable = FIXED_SIMULATE) {
- AddInstruction(CreateSimulate(ast_id, removable));
- }
- void AssignCommonDominator(HBasicBlock* other);
- void AssignLoopSuccessorDominators();
-
- void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
- FinishExit(CreateDeoptimize(has_uses));
- }
-
- // Add the inlined function exit sequence, adding an HLeaveInlined
- // instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value, FunctionState* state);
-
- // If a target block is tagged as an inline function return, all
- // predecessors should contain the inlined exit sequence:
- //
- // LeaveInlined
- // Simulate (caller's environment)
- // Goto (target block)
- bool IsInlineReturnTarget() const { return is_inline_return_target_; }
- void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
-
- bool IsDeoptimizing() const { return is_deoptimizing_; }
- void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
-
- bool IsLoopSuccessorDominator() const {
- return dominates_loop_successors_;
- }
- void MarkAsLoopSuccessorDominator() {
- dominates_loop_successors_ = true;
- }
-
- inline Zone* zone() const;
-
-#ifdef DEBUG
- void Verify();
-#endif
-
- private:
- void RegisterPredecessor(HBasicBlock* pred);
- void AddDominatedBlock(HBasicBlock* block);
-
- HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
- HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
-
- int block_id_;
- HGraph* graph_;
- ZoneList<HPhi*> phis_;
- HInstruction* first_;
- HInstruction* last_;
- HControlInstruction* end_;
- HLoopInformation* loop_information_;
- ZoneList<HBasicBlock*> predecessors_;
- HBasicBlock* dominator_;
- ZoneList<HBasicBlock*> dominated_blocks_;
- HEnvironment* last_environment_;
- // Outgoing parameter count at block exit, set during lithium translation.
- int argument_count_;
- // Instruction indices into the lithium code stream.
- int first_instruction_index_;
- int last_instruction_index_;
- ZoneList<int> deleted_phis_;
- HBasicBlock* parent_loop_header_;
- bool is_inline_return_target_;
- bool is_deoptimizing_;
- bool dominates_loop_successors_;
- bool is_osr_entry_;
-};
-
-
-class HPredecessorIterator BASE_EMBEDDED {
- public:
- explicit HPredecessorIterator(HBasicBlock* block)
- : predecessor_list_(block->predecessors()), current_(0) { }
-
- bool Done() { return current_ >= predecessor_list_->length(); }
- HBasicBlock* Current() { return predecessor_list_->at(current_); }
- void Advance() { current_++; }
-
- private:
- const ZoneList<HBasicBlock*>* predecessor_list_;
- int current_;
-};
-
-
-class HLoopInformation: public ZoneObject {
- public:
- HLoopInformation(HBasicBlock* loop_header, Zone* zone)
- : back_edges_(4, zone),
- loop_header_(loop_header),
- blocks_(8, zone),
- stack_check_(NULL) {
- blocks_.Add(loop_header, zone);
- }
- virtual ~HLoopInformation() {}
-
- const ZoneList<HBasicBlock*>* back_edges() const { return &back_edges_; }
- const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
- HBasicBlock* loop_header() const { return loop_header_; }
- HBasicBlock* GetLastBackEdge() const;
- void RegisterBackEdge(HBasicBlock* block);
-
- HStackCheck* stack_check() const { return stack_check_; }
- void set_stack_check(HStackCheck* stack_check) {
- stack_check_ = stack_check;
- }
-
- private:
- void AddBlock(HBasicBlock* block);
-
- ZoneList<HBasicBlock*> back_edges_;
- HBasicBlock* loop_header_;
- ZoneList<HBasicBlock*> blocks_;
- HStackCheck* stack_check_;
-};
-
-class BoundsCheckTable;
-class HGraph: public ZoneObject {
- public:
- explicit HGraph(CompilationInfo* info);
-
- Isolate* isolate() const { return isolate_; }
- Zone* zone() const { return zone_; }
- CompilationInfo* info() const { return info_; }
-
- const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
- const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
- HBasicBlock* entry_block() const { return entry_block_; }
- HEnvironment* start_environment() const { return start_environment_; }
-
- void InitializeInferredTypes();
- void InsertTypeConversions();
- void MergeRemovableSimulates();
- void InsertRepresentationChanges();
- void MarkDeoptimizeOnUndefined();
- void ComputeMinusZeroChecks();
- void ComputeSafeUint32Operations();
- void GlobalValueNumbering();
- bool ProcessArgumentsObject();
- void EliminateRedundantPhis();
- void EliminateUnreachablePhis();
- void Canonicalize();
- void OrderBlocks();
- void AssignDominators();
- void SetupInformativeDefinitions();
- void EliminateRedundantBoundsChecks();
- void DehoistSimpleArrayIndexComputations();
- void DeadCodeElimination();
- void RestoreActualValues();
- void PropagateDeoptimizingMark();
- void EliminateUnusedInstructions();
-
- // Returns false if there are phi-uses of the arguments-object
- // which are not supported by the optimizing compiler.
- bool CheckArgumentsPhiUses();
-
- // Returns false if there are phi-uses of an uninitialized const
- // which are not supported by the optimizing compiler.
- bool CheckConstPhiUses();
-
- void CollectPhis();
-
- void set_undefined_constant(HConstant* constant) {
- undefined_constant_.set(constant);
- }
- HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
- HConstant* GetConstant0();
- HConstant* GetConstant1();
- HConstant* GetConstantMinus1();
- HConstant* GetConstantTrue();
- HConstant* GetConstantFalse();
- HConstant* GetConstantHole();
-
- HBasicBlock* CreateBasicBlock();
- HArgumentsObject* GetArgumentsObject() const {
- return arguments_object_.get();
- }
-
- void SetArgumentsObject(HArgumentsObject* object) {
- arguments_object_.set(object);
- }
-
- int GetMaximumValueID() const { return values_.length(); }
- int GetNextBlockID() { return next_block_id_++; }
- int GetNextValueID(HValue* value) {
- values_.Add(value, zone());
- return values_.length() - 1;
- }
- HValue* LookupValue(int id) const {
- if (id >= 0 && id < values_.length()) return values_[id];
- return NULL;
- }
-
- bool Optimize(SmartArrayPointer<char>* bailout_reason);
-
-#ifdef DEBUG
- void Verify(bool do_full_verify) const;
-#endif
-
- bool has_osr_loop_entry() {
- return osr_loop_entry_.is_set();
- }
-
- HBasicBlock* osr_loop_entry() {
- return osr_loop_entry_.get();
- }
-
- void set_osr_loop_entry(HBasicBlock* entry) {
- osr_loop_entry_.set(entry);
- }
-
- ZoneList<HUnknownOSRValue*>* osr_values() {
- return osr_values_.get();
- }
-
- void set_osr_values(ZoneList<HUnknownOSRValue*>* values) {
- osr_values_.set(values);
- }
-
- int update_type_change_checksum(int delta) {
- type_change_checksum_ += delta;
- return type_change_checksum_;
- }
-
- bool use_optimistic_licm() {
- return use_optimistic_licm_;
- }
-
- void set_use_optimistic_licm(bool value) {
- use_optimistic_licm_ = value;
- }
-
- bool has_soft_deoptimize() {
- return has_soft_deoptimize_;
- }
-
- void set_has_soft_deoptimize(bool value) {
- has_soft_deoptimize_ = value;
- }
-
- void MarkRecursive() {
- is_recursive_ = true;
- }
-
- bool is_recursive() const {
- return is_recursive_;
- }
-
- void RecordUint32Instruction(HInstruction* instr) {
- if (uint32_instructions_ == NULL) {
- uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
- }
- uint32_instructions_->Add(instr, zone());
- }
-
- private:
- HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
- Handle<Object> value);
- HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer,
- int32_t integer_value);
-
- void MarkAsDeoptimizingRecursively(HBasicBlock* block);
- void NullifyUnreachableInstructions();
- void InsertTypeConversions(HInstruction* instr);
- void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
- void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi);
- void InsertRepresentationChangeForUse(HValue* value,
- HValue* use_value,
- int use_index,
- Representation to);
- void InsertRepresentationChangesForValue(HValue* value);
- void InferTypes(ZoneList<HValue*>* worklist);
- void InitializeInferredTypes(int from_inclusive, int to_inclusive);
- void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
- void SetupInformativeDefinitionsInBlock(HBasicBlock* block);
- void SetupInformativeDefinitionsRecursively(HBasicBlock* block);
- void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table);
-
- Isolate* isolate_;
- int next_block_id_;
- HBasicBlock* entry_block_;
- HEnvironment* start_environment_;
- ZoneList<HBasicBlock*> blocks_;
- ZoneList<HValue*> values_;
- ZoneList<HPhi*>* phi_list_;
- ZoneList<HInstruction*>* uint32_instructions_;
- SetOncePointer<HConstant> undefined_constant_;
- SetOncePointer<HConstant> constant_0_;
- SetOncePointer<HConstant> constant_1_;
- SetOncePointer<HConstant> constant_minus1_;
- SetOncePointer<HConstant> constant_true_;
- SetOncePointer<HConstant> constant_false_;
- SetOncePointer<HConstant> constant_hole_;
- SetOncePointer<HArgumentsObject> arguments_object_;
-
- SetOncePointer<HBasicBlock> osr_loop_entry_;
- SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
-
- CompilationInfo* info_;
- Zone* zone_;
-
- bool is_recursive_;
- bool use_optimistic_licm_;
- bool has_soft_deoptimize_;
- int type_change_checksum_;
-
- DISALLOW_COPY_AND_ASSIGN(HGraph);
-};
-
-
-Zone* HBasicBlock::zone() const { return graph_->zone(); }
-
-
-// Type of stack frame an environment might refer to.
-enum FrameType {
- JS_FUNCTION,
- JS_CONSTRUCT,
- JS_GETTER,
- JS_SETTER,
- ARGUMENTS_ADAPTOR,
- STUB
-};
-
-
-class HEnvironment: public ZoneObject {
- public:
- HEnvironment(HEnvironment* outer,
- Scope* scope,
- Handle<JSFunction> closure,
- Zone* zone);
-
- HEnvironment(Zone* zone, int parameter_count);
-
- HEnvironment* arguments_environment() {
- return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
- }
-
- // Simple accessors.
- Handle<JSFunction> closure() const { return closure_; }
- const ZoneList<HValue*>* values() const { return &values_; }
- const GrowableBitVector* assigned_variables() const {
- return &assigned_variables_;
- }
- FrameType frame_type() const { return frame_type_; }
- int parameter_count() const { return parameter_count_; }
- int specials_count() const { return specials_count_; }
- int local_count() const { return local_count_; }
- HEnvironment* outer() const { return outer_; }
- int pop_count() const { return pop_count_; }
- int push_count() const { return push_count_; }
-
- BailoutId ast_id() const { return ast_id_; }
- void set_ast_id(BailoutId id) { ast_id_ = id; }
-
- HEnterInlined* entry() const { return entry_; }
- void set_entry(HEnterInlined* entry) { entry_ = entry; }
-
- int length() const { return values_.length(); }
- bool is_special_index(int i) const {
- return i >= parameter_count() && i < parameter_count() + specials_count();
- }
-
- int first_expression_index() const {
- return parameter_count() + specials_count() + local_count();
- }
-
- void Bind(Variable* variable, HValue* value) {
- Bind(IndexFor(variable), value);
- }
-
- void Bind(int index, HValue* value);
-
- void BindContext(HValue* value) {
- Bind(parameter_count(), value);
- }
-
- HValue* Lookup(Variable* variable) const {
- return Lookup(IndexFor(variable));
- }
-
- HValue* Lookup(int index) const {
- HValue* result = values_[index];
- ASSERT(result != NULL);
- return result;
- }
-
- HValue* LookupContext() const {
- // Return first special.
- return Lookup(parameter_count());
- }
-
- void Push(HValue* value) {
- ASSERT(value != NULL);
- ++push_count_;
- values_.Add(value, zone());
- }
-
- HValue* Pop() {
- ASSERT(!ExpressionStackIsEmpty());
- if (push_count_ > 0) {
- --push_count_;
- } else {
- ++pop_count_;
- }
- return values_.RemoveLast();
- }
-
- void Drop(int count);
-
- HValue* Top() const { return ExpressionStackAt(0); }
-
- bool ExpressionStackIsEmpty() const;
-
- HValue* ExpressionStackAt(int index_from_top) const {
- int index = length() - index_from_top - 1;
- ASSERT(HasExpressionAt(index));
- return values_[index];
- }
-
- void SetExpressionStackAt(int index_from_top, HValue* value);
-
- HEnvironment* Copy() const;
- HEnvironment* CopyWithoutHistory() const;
- HEnvironment* CopyAsLoopHeader(HBasicBlock* block) const;
-
- // Create an "inlined version" of this environment, where the original
- // environment is the outer environment but the top expression stack
- // elements are moved to an inner environment as parameters.
- HEnvironment* CopyForInlining(Handle<JSFunction> target,
- int arguments,
- FunctionLiteral* function,
- HConstant* undefined,
- InliningKind inlining_kind,
- bool undefined_receiver) const;
-
- static bool UseUndefinedReceiver(Handle<JSFunction> closure,
- FunctionLiteral* function,
- CallKind call_kind,
- InliningKind inlining_kind) {
- return (closure->shared()->native() || !function->is_classic_mode()) &&
- call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN;
- }
-
- HEnvironment* DiscardInlined(bool drop_extra) {
- HEnvironment* outer = outer_;
- while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
- if (drop_extra) outer->Drop(1);
- return outer;
- }
-
- void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
-
- void ClearHistory() {
- pop_count_ = 0;
- push_count_ = 0;
- assigned_variables_.Clear();
- }
-
- void SetValueAt(int index, HValue* value) {
- ASSERT(index < length());
- values_[index] = value;
- }
-
- void PrintTo(StringStream* stream);
- void PrintToStd();
-
- Zone* zone() const { return zone_; }
-
- private:
- HEnvironment(const HEnvironment* other, Zone* zone);
-
- HEnvironment(HEnvironment* outer,
- Handle<JSFunction> closure,
- FrameType frame_type,
- int arguments,
- Zone* zone);
-
- // Create an artificial stub environment (e.g. for argument adaptor or
- // constructor stub).
- HEnvironment* CreateStubEnvironment(HEnvironment* outer,
- Handle<JSFunction> target,
- FrameType frame_type,
- int arguments) const;
-
- // True if index is included in the expression stack part of the environment.
- bool HasExpressionAt(int index) const;
-
- void Initialize(int parameter_count, int local_count, int stack_height);
- void Initialize(const HEnvironment* other);
-
- // Map a variable to an environment index. Parameter indices are shifted
- // by 1 (receiver is parameter index -1 but environment index 0).
- // Stack-allocated local indices are shifted by the number of parameters.
- int IndexFor(Variable* variable) const {
- ASSERT(variable->IsStackAllocated());
- int shift = variable->IsParameter()
- ? 1
- : parameter_count_ + specials_count_;
- return variable->index() + shift;
- }
-
- Handle<JSFunction> closure_;
- // Value array [parameters] [specials] [locals] [temporaries].
- ZoneList<HValue*> values_;
- GrowableBitVector assigned_variables_;
- FrameType frame_type_;
- int parameter_count_;
- int specials_count_;
- int local_count_;
- HEnvironment* outer_;
- HEnterInlined* entry_;
- int pop_count_;
- int push_count_;
- BailoutId ast_id_;
- Zone* zone_;
-};
-
-
-class HInferRepresentation BASE_EMBEDDED {
- public:
- explicit HInferRepresentation(HGraph* graph)
- : graph_(graph),
- worklist_(8, graph->zone()),
- in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
-
- void Analyze();
- void AddToWorklist(HValue* current);
-
- private:
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-};
-
-
-class HOptimizedGraphBuilder;
-
-enum ArgumentsAllowedFlag {
- ARGUMENTS_NOT_ALLOWED,
- ARGUMENTS_ALLOWED
-};
-
-// This class is not BASE_EMBEDDED because our inlining implementation uses
-// new and delete.
-class AstContext {
- public:
- bool IsEffect() const { return kind_ == Expression::kEffect; }
- bool IsValue() const { return kind_ == Expression::kValue; }
- bool IsTest() const { return kind_ == Expression::kTest; }
-
- // 'Fill' this context with a hydrogen value. The value is assumed to
- // have already been inserted in the instruction stream (or not need to
- // be, e.g., HPhi). Call this function in tail position in the Visit
- // functions for expressions.
- virtual void ReturnValue(HValue* value) = 0;
-
- // Add a hydrogen instruction to the instruction stream (recording an
- // environment simulation if necessary) and then fill this context with
- // the instruction as value.
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id) = 0;
-
- // Finishes the current basic block and materialize a boolean for
- // value context, nothing for effect, generate a branch for test context.
- // Call this function in tail position in the Visit functions for
- // expressions.
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id) = 0;
-
- void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
- bool is_for_typeof() { return for_typeof_; }
-
- protected:
- AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
- virtual ~AstContext();
-
- HOptimizedGraphBuilder* owner() const { return owner_; }
-
- inline Zone* zone() const;
-
- // We want to be able to assert, in a context-specific way, that the stack
- // height makes sense when the context is filled.
-#ifdef DEBUG
- int original_length_;
-#endif
-
- private:
- HOptimizedGraphBuilder* owner_;
- Expression::Context kind_;
- AstContext* outer_;
- bool for_typeof_;
-};
-
-
-class EffectContext: public AstContext {
- public:
- explicit EffectContext(HOptimizedGraphBuilder* owner)
- : AstContext(owner, Expression::kEffect) {
- }
- virtual ~EffectContext();
-
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
-};
-
-
-class ValueContext: public AstContext {
- public:
- ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
- : AstContext(owner, Expression::kValue), flag_(flag) {
- }
- virtual ~ValueContext();
-
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
-
- bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
-
- private:
- ArgumentsAllowedFlag flag_;
-};
-
-
-class TestContext: public AstContext {
- public:
- TestContext(HOptimizedGraphBuilder* owner,
- Expression* condition,
- TypeFeedbackOracle* oracle,
- HBasicBlock* if_true,
- HBasicBlock* if_false)
- : AstContext(owner, Expression::kTest),
- condition_(condition),
- oracle_(oracle),
- if_true_(if_true),
- if_false_(if_false) {
- }
-
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
-
- static TestContext* cast(AstContext* context) {
- ASSERT(context->IsTest());
- return reinterpret_cast<TestContext*>(context);
- }
-
- Expression* condition() const { return condition_; }
- TypeFeedbackOracle* oracle() const { return oracle_; }
- HBasicBlock* if_true() const { return if_true_; }
- HBasicBlock* if_false() const { return if_false_; }
-
- private:
- // Build the shared core part of the translation unpacking a value into
- // control flow.
- void BuildBranch(HValue* value);
-
- Expression* condition_;
- TypeFeedbackOracle* oracle_;
- HBasicBlock* if_true_;
- HBasicBlock* if_false_;
-};
-
-
-class FunctionState {
- public:
- FunctionState(HOptimizedGraphBuilder* owner,
- CompilationInfo* info,
- TypeFeedbackOracle* oracle,
- InliningKind inlining_kind);
- ~FunctionState();
-
- CompilationInfo* compilation_info() { return compilation_info_; }
- TypeFeedbackOracle* oracle() { return oracle_; }
- AstContext* call_context() { return call_context_; }
- InliningKind inlining_kind() const { return inlining_kind_; }
- HBasicBlock* function_return() { return function_return_; }
- TestContext* test_context() { return test_context_; }
- void ClearInlinedTestContext() {
- delete test_context_;
- test_context_ = NULL;
- }
-
- FunctionState* outer() { return outer_; }
-
- HEnterInlined* entry() { return entry_; }
- void set_entry(HEnterInlined* entry) { entry_ = entry; }
-
- HArgumentsElements* arguments_elements() { return arguments_elements_; }
- void set_arguments_elements(HArgumentsElements* arguments_elements) {
- arguments_elements_ = arguments_elements;
- }
-
- bool arguments_pushed() { return arguments_elements() != NULL; }
-
- private:
- HOptimizedGraphBuilder* owner_;
-
- CompilationInfo* compilation_info_;
- TypeFeedbackOracle* oracle_;
-
- // During function inlining, expression context of the call being
- // inlined. NULL when not inlining.
- AstContext* call_context_;
-
- // The kind of call which is currently being inlined.
- InliningKind inlining_kind_;
-
- // When inlining in an effect or value context, this is the return block.
- // It is NULL otherwise. When inlining in a test context, there are a
- // pair of return blocks in the context. When not inlining, there is no
- // local return point.
- HBasicBlock* function_return_;
-
- // When inlining a call in a test context, a context containing a pair of
- // return blocks. NULL in all other cases.
- TestContext* test_context_;
-
- // When inlining HEnterInlined instruction corresponding to the function
- // entry.
- HEnterInlined* entry_;
-
- HArgumentsElements* arguments_elements_;
-
- FunctionState* outer_;
-};
-
-
-class HGraphBuilder {
- public:
- explicit HGraphBuilder(CompilationInfo* info)
- : info_(info), graph_(NULL), current_block_(NULL) {}
- virtual ~HGraphBuilder() {}
-
- HBasicBlock* current_block() const { return current_block_; }
- void set_current_block(HBasicBlock* block) { current_block_ = block; }
- HEnvironment* environment() const {
- return current_block()->last_environment();
- }
- Zone* zone() const { return info_->zone(); }
- HGraph* graph() { return graph_; }
-
- HGraph* CreateGraph();
-
- // Adding instructions.
- HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(BailoutId id,
- RemovableSimulate removable = FIXED_SIMULATE);
- HBoundsCheck* AddBoundsCheck(
- HValue* index,
- HValue* length,
- BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY,
- Representation r = Representation::None());
-
- protected:
- virtual bool BuildGraph() = 0;
-
- HBasicBlock* CreateBasicBlock(HEnvironment* env);
- HBasicBlock* CreateLoopHeaderBlock();
-
- // Building common constructs
- HInstruction* BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
-
- HInstruction* BuildFastElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
-
- HInstruction* BuildUncheckedMonomorphicElementAccess(
- HValue* object,
- HValue* key,
- HValue* val,
- HCheckMaps* mapcheck,
- bool is_js_array,
- ElementsKind elements_kind,
- bool is_store,
- Representation checked_index_representation = Representation::None());
-
- HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id);
- HInstruction* BuildStoreMap(HValue* object, Handle<Map> map, BailoutId id);
-
- class CheckBuilder {
- public:
- CheckBuilder(HGraphBuilder* builder, BailoutId id);
- ~CheckBuilder() {
- if (!finished_) End();
- }
-
- void CheckNotUndefined(HValue* value);
- void CheckIntegerEq(HValue* left, HValue* right);
- void End();
-
- private:
- Zone* zone() { return builder_->zone(); }
-
- HGraphBuilder* builder_;
- bool finished_;
- HBasicBlock* failure_block_;
- HBasicBlock* merge_block_;
- BailoutId id_;
- };
-
- class IfBuilder {
- public:
- IfBuilder(HGraphBuilder* builder, BailoutId id);
- ~IfBuilder() {
- if (!finished_) End();
- }
-
- HInstruction* BeginTrue(
- HValue* left,
- HValue* right,
- Token::Value token,
- Representation input_representation = Representation::Integer32());
- void BeginFalse();
- void End();
-
- private:
- Zone* zone() { return builder_->zone(); }
-
- HGraphBuilder* builder_;
- bool finished_;
- HBasicBlock* first_true_block_;
- HBasicBlock* last_true_block_;
- HBasicBlock* first_false_block_;
- HBasicBlock* merge_block_;
- BailoutId id_;
- };
-
- class LoopBuilder {
- public:
- enum Direction {
- kPreIncrement,
- kPostIncrement,
- kPreDecrement,
- kPostDecrement
- };
-
- LoopBuilder(HGraphBuilder* builder,
- HValue* context,
- Direction direction,
- BailoutId id);
- ~LoopBuilder() {
- ASSERT(finished_);
- }
-
- HValue* BeginBody(
- HValue* initial,
- HValue* terminating,
- Token::Value token,
- Representation input_representation = Representation::Integer32());
- void EndBody();
-
- private:
- Zone* zone() { return builder_->zone(); }
-
- HGraphBuilder* builder_;
- HValue* context_;
- HInstruction* increment_;
- HPhi* phi_;
- HBasicBlock* header_block_;
- HBasicBlock* body_block_;
- HBasicBlock* exit_block_;
- Direction direction_;
- BailoutId id_;
- bool finished_;
- };
-
- HValue* BuildAllocateElements(HContext* context,
- ElementsKind kind,
- HValue* capacity);
-
- void BuildCopyElements(HContext* context,
- HValue* from_elements,
- ElementsKind from_elements_kind,
- HValue* to_elements,
- ElementsKind to_elements_kind,
- HValue* length);
-
- private:
- HGraphBuilder();
- CompilationInfo* info_;
- HGraph* graph_;
- HBasicBlock* current_block_;
-};
-
-
-class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
- public:
- enum BreakType { BREAK, CONTINUE };
- enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
-
- // A class encapsulating (lazily-allocated) break and continue blocks for
- // a breakable statement. Separated from BreakAndContinueScope so that it
- // can have a separate lifetime.
- class BreakAndContinueInfo BASE_EMBEDDED {
- public:
- explicit BreakAndContinueInfo(BreakableStatement* target,
- int drop_extra = 0)
- : target_(target),
- break_block_(NULL),
- continue_block_(NULL),
- drop_extra_(drop_extra) {
- }
-
- BreakableStatement* target() { return target_; }
- HBasicBlock* break_block() { return break_block_; }
- void set_break_block(HBasicBlock* block) { break_block_ = block; }
- HBasicBlock* continue_block() { return continue_block_; }
- void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
- int drop_extra() { return drop_extra_; }
-
- private:
- BreakableStatement* target_;
- HBasicBlock* break_block_;
- HBasicBlock* continue_block_;
- int drop_extra_;
- };
-
- // A helper class to maintain a stack of current BreakAndContinueInfo
- // structures mirroring BreakableStatement nesting.
- class BreakAndContinueScope BASE_EMBEDDED {
- public:
- BreakAndContinueScope(BreakAndContinueInfo* info,
- HOptimizedGraphBuilder* owner)
- : info_(info), owner_(owner), next_(owner->break_scope()) {
- owner->set_break_scope(this);
- }
-
- ~BreakAndContinueScope() { owner_->set_break_scope(next_); }
-
- BreakAndContinueInfo* info() { return info_; }
- HOptimizedGraphBuilder* owner() { return owner_; }
- BreakAndContinueScope* next() { return next_; }
-
- // Search the break stack for a break or continue target.
- HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra);
-
- private:
- BreakAndContinueInfo* info_;
- HOptimizedGraphBuilder* owner_;
- BreakAndContinueScope* next_;
- };
-
- HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
-
- virtual bool BuildGraph();
-
- // Simple accessors.
- BreakAndContinueScope* break_scope() const { return break_scope_; }
- void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
-
- bool inline_bailout() { return inline_bailout_; }
-
- void AddSoftDeoptimize();
-
- // Bailout environment manipulation.
- void Push(HValue* value) { environment()->Push(value); }
- HValue* Pop() { return environment()->Pop(); }
-
- void Bailout(const char* reason);
-
- HBasicBlock* CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- BailoutId join_id);
-
- TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
-
- FunctionState* function_state() const { return function_state_; }
-
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
-
- void* operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
- }
- void operator delete(void* pointer, Zone* zone) { }
- void operator delete(void* pointer) { }
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- // Type of a member function that generates inline code for a native function.
- typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
- (CallRuntime* call);
-
- // Forward declarations for inner scope classes.
- class SubgraphScope;
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- static const int kMaxCallPolymorphism = 4;
- static const int kMaxLoadPolymorphism = 4;
- static const int kMaxStorePolymorphism = 4;
-
- // Even in the 'unlimited' case we have to have some limit in order not to
- // overflow the stack.
- static const int kUnlimitedMaxInlinedSourceSize = 100000;
- static const int kUnlimitedMaxInlinedNodes = 10000;
- static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
-
- // Simple accessors.
- void set_function_state(FunctionState* state) { function_state_ = state; }
-
- AstContext* ast_context() const { return ast_context_; }
- void set_ast_context(AstContext* context) { ast_context_ = context; }
-
- // Accessors forwarded to the function state.
- CompilationInfo* info() const {
- return function_state()->compilation_info();
- }
- AstContext* call_context() const {
- return function_state()->call_context();
- }
- HBasicBlock* function_return() const {
- return function_state()->function_return();
- }
- TestContext* inlined_test_context() const {
- return function_state()->test_context();
- }
- void ClearInlinedTestContext() {
- function_state()->ClearInlinedTestContext();
- }
- StrictModeFlag function_strict_mode_flag() {
- return function_state()->compilation_info()->is_classic_mode()
- ? kNonStrictMode : kStrictMode;
- }
-
- // Generators for inline runtime functions.
-#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \
- void Generate##Name(CallRuntime* call);
-
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
-#undef INLINE_FUNCTION_GENERATOR_DECLARATION
-
- void VisitDelete(UnaryOperation* expr);
- void VisitVoid(UnaryOperation* expr);
- void VisitTypeof(UnaryOperation* expr);
- void VisitAdd(UnaryOperation* expr);
- void VisitSub(UnaryOperation* expr);
- void VisitBitNot(UnaryOperation* expr);
- void VisitNot(UnaryOperation* expr);
-
- void VisitComma(BinaryOperation* expr);
- void VisitLogicalExpression(BinaryOperation* expr);
- void VisitArithmeticExpression(BinaryOperation* expr);
-
- bool PreProcessOsrEntry(IterationStatement* statement);
- // True iff. we are compiling for OSR and the statement is the entry.
- bool HasOsrEntryAt(IterationStatement* statement);
- void VisitLoopBody(IterationStatement* stmt,
- HBasicBlock* loop_entry,
- BreakAndContinueInfo* break_info);
-
- // Create a back edge in the flow graph. body_exit is the predecessor
- // block and loop_entry is the successor block. loop_successor is the
- // block where control flow exits the loop normally (e.g., via failure of
- // the condition) and break_block is the block where control flow breaks
- // from the loop. All blocks except loop_entry can be NULL. The return
- // value is the new successor block which is the join of loop_successor
- // and break_block, or NULL.
- HBasicBlock* CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block);
-
- HBasicBlock* JoinContinue(IterationStatement* statement,
- HBasicBlock* exit_block,
- HBasicBlock* continue_block);
-
- HValue* Top() const { return environment()->Top(); }
- void Drop(int n) { environment()->Drop(n); }
- void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
-
- // The value of the arguments object is allowed in some but not most value
- // contexts. (It's allowed in all effect contexts and disallowed in all
- // test contexts.)
- void VisitForValue(Expression* expr,
- ArgumentsAllowedFlag flag = ARGUMENTS_NOT_ALLOWED);
- void VisitForTypeOf(Expression* expr);
- void VisitForEffect(Expression* expr);
- void VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block);
-
- // Visit an argument subexpression and emit a push to the outgoing arguments.
- void VisitArgument(Expression* expr);
-
- void VisitArgumentList(ZoneList<Expression*>* arguments);
-
- // Visit a list of expressions from left to right, each in a value context.
- void VisitExpressions(ZoneList<Expression*>* exprs);
-
- void AddPhi(HPhi* phi);
-
- void PushAndAdd(HInstruction* instr);
-
- // Remove the arguments from the bailout environment and emit instructions
- // to push them as outgoing parameters.
- template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
-
- static Representation ToRepresentation(TypeInfo info);
-
- void SetUpScope(Scope* scope);
- virtual void VisitStatements(ZoneList<Statement*>* statements);
-
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- // Helpers for flow graph construction.
- enum GlobalPropertyAccess {
- kUseCell,
- kUseGeneric
- };
- GlobalPropertyAccess LookupGlobalProperty(Variable* var,
- LookupResult* lookup,
- bool is_store);
-
- void EnsureArgumentsArePushedForAccess();
- bool TryArgumentsAccess(Property* expr);
-
- // Try to optimize fun.apply(receiver, arguments) pattern.
- bool TryCallApply(Call* expr);
-
- int InliningAstSize(Handle<JSFunction> target);
- bool TryInline(CallKind call_kind,
- Handle<JSFunction> target,
- int arguments_count,
- HValue* implicit_return_value,
- BailoutId ast_id,
- BailoutId return_id,
- InliningKind inlining_kind);
-
- bool TryInlineCall(Call* expr, bool drop_extra = false);
- bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
- bool TryInlineGetter(Handle<JSFunction> getter, Property* prop);
- bool TryInlineSetter(Handle<JSFunction> setter,
- Assignment* assignment,
- HValue* implicit_return_value);
- bool TryInlineApply(Handle<JSFunction> function,
- Call* expr,
- int arguments_count);
- bool TryInlineBuiltinMethodCall(Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type);
- bool TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra);
-
- // If --trace-inlining, print a line of the inlining trace. Inlining
- // succeeded if the reason string is NULL and failed if there is a
- // non-NULL reason string.
- void TraceInline(Handle<JSFunction> target,
- Handle<JSFunction> caller,
- const char* failure_reason);
-
- void HandleGlobalVariableAssignment(Variable* var,
- HValue* value,
- int position,
- BailoutId ast_id);
-
- void HandlePropertyAssignment(Assignment* expr);
- void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicLoadNamedField(Property* expr,
- HValue* object,
- SmallMapList* types,
- Handle<String> name);
- void HandlePolymorphicStoreNamedField(Assignment* expr,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name);
- void HandlePolymorphicCallNamed(Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name);
- void HandleLiteralCompareTypeof(CompareOperation* expr,
- HTypeof* typeof_expr,
- Handle<String> check);
- void HandleLiteralCompareNil(CompareOperation* expr,
- HValue* value,
- NilValue nil);
-
- HInstruction* BuildStringCharCodeAt(HValue* context,
- HValue* string,
- HValue* index);
- HInstruction* BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right);
- HInstruction* BuildIncrement(bool returns_original_input,
- CountOperation* expr);
- HInstruction* BuildLoadKeyedGeneric(HValue* object,
- HValue* key);
-
- HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
- HValue* key,
- HValue* val,
- SmallMapList* maps);
-
- HInstruction* BuildMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HValue* dependency,
- Handle<Map> map,
- bool is_store);
-
- HValue* HandlePolymorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- Expression* prop,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects);
-
- HValue* HandleKeyedElementAccess(HValue* obj,
- HValue* key,
- HValue* val,
- Expression* expr,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects);
-
- HLoadNamedField* BuildLoadNamedField(HValue* object,
- Handle<Map> map,
- LookupResult* result);
- HInstruction* BuildLoadNamedGeneric(HValue* object,
- Handle<String> name,
- Property* expr);
- HInstruction* BuildCallGetter(HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder);
- HInstruction* BuildLoadNamedMonomorphic(HValue* object,
- Handle<String> name,
- Property* expr,
- Handle<Map> map);
-
- void AddCheckMapsWithTransitions(HValue* object,
- Handle<Map> map);
-
- HInstruction* BuildStoreNamedField(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup);
- HInstruction* BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value);
- HInstruction* BuildCallSetter(HValue* object,
- HValue* value,
- Handle<Map> map,
- Handle<JSFunction> setter,
- Handle<JSObject> holder);
- HInstruction* BuildStoreNamedMonomorphic(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map);
- HInstruction* BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value);
-
- HValue* BuildContextChainWalk(Variable* var);
-
- HInstruction* BuildThisFunction();
-
- void AddCheckPrototypeMaps(Handle<JSObject> holder,
- Handle<Map> receiver_map);
-
- void AddCheckConstantFunction(Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map);
-
- bool MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount);
-
- // The translation state of the currently-being-translated function.
- FunctionState* function_state_;
-
- // The base of the function state stack.
- FunctionState initial_function_state_;
-
- // Expression context of the currently visited subexpression. NULL when
- // visiting statements.
- AstContext* ast_context_;
-
- // A stack of breakable statements entered.
- BreakAndContinueScope* break_scope_;
-
- int inlined_count_;
- ZoneList<Handle<Object> > globals_;
-
- bool inline_bailout_;
-
- friend class FunctionState; // Pushes and pops the state stack.
- friend class AstContext; // Pushes and pops the AST context stack.
- friend class KeyedLoadFastElementStub;
-
- DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
-};
-
-
-Zone* AstContext::zone() const { return owner_->zone(); }
-
-
-class HValueMap: public ZoneObject {
- public:
- explicit HValueMap(Zone* zone)
- : array_size_(0),
- lists_size_(0),
- count_(0),
- present_flags_(0),
- array_(NULL),
- lists_(NULL),
- free_list_head_(kNil) {
- ResizeLists(kInitialSize, zone);
- Resize(kInitialSize, zone);
- }
-
- void Kill(GVNFlagSet flags);
-
- void Add(HValue* value, Zone* zone) {
- present_flags_.Add(value->gvn_flags());
- Insert(value, zone);
- }
-
- HValue* Lookup(HValue* value) const;
-
- HValueMap* Copy(Zone* zone) const {
- return new(zone) HValueMap(zone, this);
- }
-
- bool IsEmpty() const { return count_ == 0; }
-
- private:
- // A linked list of HValue* values. Stored in arrays.
- struct HValueMapListElement {
- HValue* value;
- int next; // Index in the array of the next list element.
- };
- static const int kNil = -1; // The end of a linked list
-
- // Must be a power of 2.
- static const int kInitialSize = 16;
-
- HValueMap(Zone* zone, const HValueMap* other);
-
- void Resize(int new_size, Zone* zone);
- void ResizeLists(int new_size, Zone* zone);
- void Insert(HValue* value, Zone* zone);
- uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
-
- int array_size_;
- int lists_size_;
- int count_; // The number of values stored in the HValueMap.
- GVNFlagSet present_flags_; // All flags that are in any value in the
- // HValueMap.
- HValueMapListElement* array_; // Primary store - contains the first value
- // with a given hash. Colliding elements are stored in linked lists.
- HValueMapListElement* lists_; // The linked lists containing hash collisions.
- int free_list_head_; // Unused elements in lists_ are on the free list.
-};
-
-
-class HSideEffectMap BASE_EMBEDDED {
- public:
- HSideEffectMap();
- explicit HSideEffectMap(HSideEffectMap* other);
- HSideEffectMap& operator= (const HSideEffectMap& other);
-
- void Kill(GVNFlagSet flags);
-
- void Store(GVNFlagSet flags, HInstruction* instr);
-
- bool IsEmpty() const { return count_ == 0; }
-
- inline HInstruction* operator[](int i) const {
- ASSERT(0 <= i);
- ASSERT(i < kNumberOfTrackedSideEffects);
- return data_[i];
- }
- inline HInstruction* at(int i) const { return operator[](i); }
-
- private:
- int count_;
- HInstruction* data_[kNumberOfTrackedSideEffects];
-};
-
-
-class HStatistics: public Malloced {
- public:
- void Initialize(CompilationInfo* info);
- void Print();
- void SaveTiming(const char* name, int64_t ticks, unsigned size);
- static HStatistics* Instance() {
- static SetOncePointer<HStatistics> instance;
- if (!instance.is_set()) {
- instance.set(new HStatistics());
- }
- return instance.get();
- }
-
- void IncrementSubtotals(int64_t create_graph,
- int64_t optimize_graph,
- int64_t generate_code) {
- create_graph_ += create_graph;
- optimize_graph_ += optimize_graph;
- generate_code_ += generate_code;
- }
-
- private:
- HStatistics()
- : timing_(5),
- names_(5),
- sizes_(5),
- create_graph_(0),
- optimize_graph_(0),
- generate_code_(0),
- total_size_(0),
- full_code_gen_(0),
- source_size_(0) { }
-
- List<int64_t> timing_;
- List<const char*> names_;
- List<unsigned> sizes_;
- int64_t create_graph_;
- int64_t optimize_graph_;
- int64_t generate_code_;
- unsigned total_size_;
- int64_t full_code_gen_;
- double source_size_;
-};
-
-
-class HPhase BASE_EMBEDDED {
- public:
- static const char* const kFullCodeGen;
-
- explicit HPhase(const char* name) { Begin(name, NULL, NULL, NULL); }
- HPhase(const char* name, HGraph* graph) {
- Begin(name, graph, NULL, NULL);
- }
- HPhase(const char* name, LChunk* chunk) {
- Begin(name, NULL, chunk, NULL);
- }
- HPhase(const char* name, LAllocator* allocator) {
- Begin(name, NULL, NULL, allocator);
- }
-
- ~HPhase() {
- End();
- }
-
- private:
- void Begin(const char* name,
- HGraph* graph,
- LChunk* chunk,
- LAllocator* allocator);
- void End() const;
-
- int64_t start_;
- const char* name_;
- HGraph* graph_;
- LChunk* chunk_;
- LAllocator* allocator_;
- unsigned start_allocation_size_;
-};
-
-
-class HTracer: public Malloced {
- public:
- void TraceCompilation(CompilationInfo* info);
- void TraceHydrogen(const char* name, HGraph* graph);
- void TraceLithium(const char* name, LChunk* chunk);
- void TraceLiveRanges(const char* name, LAllocator* allocator);
-
- static HTracer* Instance() {
- static SetOncePointer<HTracer> instance;
- if (!instance.is_set()) {
- instance.set(new HTracer("hydrogen.cfg"));
- }
- return instance.get();
- }
-
- private:
- class Tag BASE_EMBEDDED {
- public:
- Tag(HTracer* tracer, const char* name) {
- name_ = name;
- tracer_ = tracer;
- tracer->PrintIndent();
- tracer->trace_.Add("begin_%s\n", name);
- tracer->indent_++;
- }
-
- ~Tag() {
- tracer_->indent_--;
- tracer_->PrintIndent();
- tracer_->trace_.Add("end_%s\n", name_);
- ASSERT(tracer_->indent_ >= 0);
- tracer_->FlushToFile();
- }
-
- private:
- HTracer* tracer_;
- const char* name_;
- };
-
- explicit HTracer(const char* filename)
- : filename_(filename), trace_(&string_allocator_), indent_(0) {
- WriteChars(filename, "", 0, false);
- }
-
- void TraceLiveRange(LiveRange* range, const char* type, Zone* zone);
- void Trace(const char* name, HGraph* graph, LChunk* chunk);
- void FlushToFile();
-
- void PrintEmptyProperty(const char* name) {
- PrintIndent();
- trace_.Add("%s\n", name);
- }
-
- void PrintStringProperty(const char* name, const char* value) {
- PrintIndent();
- trace_.Add("%s \"%s\"\n", name, value);
- }
-
- void PrintLongProperty(const char* name, int64_t value) {
- PrintIndent();
- trace_.Add("%s %d000\n", name, static_cast<int>(value / 1000));
- }
-
- void PrintBlockProperty(const char* name, int block_id) {
- PrintIndent();
- trace_.Add("%s \"B%d\"\n", name, block_id);
- }
-
- void PrintIntProperty(const char* name, int value) {
- PrintIndent();
- trace_.Add("%s %d\n", name, value);
- }
-
- void PrintIndent() {
- for (int i = 0; i < indent_; i++) {
- trace_.Add(" ");
- }
- }
-
- const char* filename_;
- HeapStringAllocator string_allocator_;
- StringStream trace_;
- int indent_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_HYDROGEN_H_
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h b/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
deleted file mode 100644
index 56d88b0..0000000
--- a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-// A light-weight IA32 Assembler.
-
-#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
-#define V8_IA32_ASSEMBLER_IA32_INL_H_
-
-#include "ia32/assembler-ia32.h"
-
-#include "cpu.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-static const byte kCallOpcode = 0xE8;
-
-
-// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
- if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == CODE_AGE_SEQUENCE) {
- if (*pc_ == kCallOpcode) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- }
- } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
- // Special handling of js_return when a break point is set (call
- // instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
- // Special handling of a debug break slot when a break point is set (call
- // instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- } else if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- }
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(pc_);
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kSpecialTargetSize;
-}
-
-
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- Assembler::set_target_address_at(pc_, target);
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(pc_);
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_Handle_at(pc_);
-}
-
-
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return &Memory::Object_at(pc_);
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory::Object_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
-}
-
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- ASSERT(*pc_ == kCallOpcode);
- return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(*pc_ == kCallOpcode);
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Assembler::target_address_at(pc_ + 1);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Assembler::set_target_address_at(pc_ + 1, target);
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 1);
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- return *pc_ == kCallOpcode;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- return !Assembler::IsNop(pc());
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
- #ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-
-Immediate::Immediate(int x) {
- x_ = x;
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Immediate::Immediate(const ExternalReference& ext) {
- x_ = reinterpret_cast<int32_t>(ext.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Immediate::Immediate(Label* internal_offset) {
- x_ = reinterpret_cast<int32_t>(internal_offset);
- rmode_ = RelocInfo::INTERNAL_REFERENCE;
-}
-
-
-Immediate::Immediate(Handle<Object> handle) {
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- x_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- x_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
- }
-}
-
-
-Immediate::Immediate(Smi* value) {
- x_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Immediate::Immediate(Address addr) {
- x_ = reinterpret_cast<int32_t>(addr);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-void Assembler::emit(uint32_t x) {
- *reinterpret_cast<uint32_t*>(pc_) = x;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::emit(Handle<Object> handle) {
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!isolate()->heap()->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- emit(reinterpret_cast<intptr_t>(handle.location()),
- RelocInfo::EMBEDDED_OBJECT);
- } else {
- // no relocation needed
- emit(reinterpret_cast<intptr_t>(obj));
- }
-}
-
-
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
- if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (!RelocInfo::IsNone(rmode)) {
- RecordRelocInfo(rmode);
- }
- emit(x);
-}
-
-
-void Assembler::emit(const Immediate& x) {
- if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
- Label* label = reinterpret_cast<Label*>(x.x_);
- emit_code_relative_offset(label);
- return;
- }
- if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
- emit(x.x_);
-}
-
-
-void Assembler::emit_code_relative_offset(Label* label) {
- if (label->is_bound()) {
- int32_t pos;
- pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
- emit(pos);
- } else {
- emit_disp(label, Displacement::CODE_RELATIVE);
- }
-}
-
-
-void Assembler::emit_w(const Immediate& x) {
- ASSERT(RelocInfo::IsNone(x.rmode_));
- uint16_t value = static_cast<uint16_t>(x.x_);
- reinterpret_cast<uint16_t*>(pc_)[0] = value;
- pc_ += sizeof(uint16_t);
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- int32_t* p = reinterpret_cast<int32_t*>(pc);
- *p = target - (pc + sizeof(int32_t));
- CPU::FlushICache(p, sizeof(int32_t));
-}
-
-
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
-
-
-Displacement Assembler::disp_at(Label* L) {
- return Displacement(long_at(L->pos()));
-}
-
-
-void Assembler::disp_at_put(Label* L, Displacement disp) {
- long_at_put(L->pos(), disp.data());
-}
-
-
-void Assembler::emit_disp(Label* L, Displacement::Type type) {
- Displacement disp(L, type);
- L->link_to(pc_offset());
- emit(static_cast<int>(disp.data()));
-}
-
-
-void Assembler::emit_near_disp(Label* L) {
- byte disp = 0x00;
- if (L->is_near_linked()) {
- int offset = L->near_link_pos() - pc_offset();
- ASSERT(is_int8(offset));
- disp = static_cast<byte>(offset & 0xFF);
- }
- L->link_to(pc_offset(), Label::kNear);
- *pc_++ = disp;
-}
-
-
-void Operand::set_modrm(int mod, Register rm) {
- ASSERT((mod & -4) == 0);
- buf_[0] = mod << 6 | rm.code();
- len_ = 1;
-}
-
-
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- ASSERT(len_ == 1);
- ASSERT((scale & -4) == 0);
- // Use SIB with no index register only for base esp.
- ASSERT(!index.is(esp) || base.is(esp));
- buf_[1] = scale << 6 | index.code() << 3 | base.code();
- len_ = 2;
-}
-
-
-void Operand::set_disp8(int8_t disp) {
- ASSERT(len_ == 1 || len_ == 2);
- *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
-}
-
-
-void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
- ASSERT(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
- rmode_ = rmode;
-}
-
-Operand::Operand(Register reg) {
- // reg
- set_modrm(3, reg);
-}
-
-
-Operand::Operand(XMMRegister xmm_reg) {
- Register reg = { xmm_reg.code() };
- set_modrm(3, reg);
-}
-
-
-Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
- // [disp/r]
- set_modrm(0, ebp);
- set_dispr(disp, rmode);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.cc b/src/3rdparty/v8/src/ia32/assembler-ia32.cc
deleted file mode 100644
index 123383c..0000000
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.cc
+++ /dev/null
@@ -1,2696 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "disassembler.h"
-#include "macro-assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Implementation of CpuFeatures
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-uint64_t CpuFeatures::supported_ = 0;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
-int IntelDoubleRegister::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(SSE2)) {
- return XMMRegister::kNumAllocatableRegisters;
- } else {
- return X87TopOfStackRegister::kNumAllocatableRegisters;
- }
-}
-
-
-int IntelDoubleRegister::NumRegisters() {
- if (CpuFeatures::IsSupported(SSE2)) {
- return XMMRegister::kNumRegisters;
- } else {
- return X87TopOfStackRegister::kNumRegisters;
- }
-}
-
-
-const char* IntelDoubleRegister::AllocationIndexToString(int index) {
- if (CpuFeatures::IsSupported(SSE2)) {
- return XMMRegister::AllocationIndexToString(index);
- } else {
- return X87TopOfStackRegister::AllocationIndexToString(index);
- }
-}
-
-
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
-void CpuFeatures::Probe() {
- ASSERT(!initialized_);
- ASSERT(supported_ == 0);
-#ifdef DEBUG
- initialized_ = true;
-#endif
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
- }
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
- }
-
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old esp, since we are going to modify the stack.
- __ push(ebp);
- __ pushfd();
- __ push(ecx);
- __ push(ebx);
- __ mov(ebp, esp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfd();
- __ pop(eax);
- __ mov(edx, eax);
- __ xor_(eax, 0x200000); // Flip bit 21.
- __ push(eax);
- __ popfd();
- __ pushfd();
- __ pop(eax);
- __ xor_(eax, edx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in edx:eax.
- __ xor_(eax, eax);
- __ xor_(edx, edx);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ mov(eax, 1);
- supported_ = (1 << CPUID);
- { Scope fscope(CPUID);
- __ cpuid();
- }
- supported_ = 0;
-
- // Move the result from ecx:edx to edx:eax and make sure to mark the
- // CPUID feature as supported.
- __ mov(eax, edx);
- __ or_(eax, 1 << CPUID);
- __ mov(edx, ecx);
-
- // Done.
- __ bind(&done);
- __ mov(esp, ebp);
- __ pop(ebx);
- __ pop(ecx);
- __ popfd();
- __ pop(ebp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- supported_ = probe();
- found_by_runtime_probing_ = supported_;
- uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
- supported_ |= os_guarantees;
- found_by_runtime_probing_ &= ~os_guarantees;
-
- delete memory;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Displacement
-
-void Displacement::init(Label* L, Type type) {
- ASSERT(!L->is_bound());
- int next = 0;
- if (L->is_linked()) {
- next = L->pos();
- ASSERT(next > 0); // Displacements must be at positions > 0
- }
- // Ensure that we _never_ overflow the next field.
- ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
- data_ = NextField::encode(next) | TypeField::encode(type);
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-
-const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on IA32 means that it is a relative address, as used by
- // branch instructions. These are also the ones that need changing when a
- // code object moves.
- return (1 << rmode_) & kApplyMask;
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- for (int i = 0; i < instruction_count; i++) {
- *(pc_ + i) = *(instructions + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Call instruction takes up 5 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 5;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE32);
-
- // Check that the size of the code generated is as expected.
- ASSERT_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- ASSERT_GE(guard_bytes, 0);
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
- // [base + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
- // [base]
- set_modrm(0, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
- // [base + disp8]
- set_modrm(1, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- set_disp8(disp);
- } else {
- // [base + disp/r]
- set_modrm(2, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- set_dispr(disp, rmode);
- }
-}
-
-
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode) {
- ASSERT(!index.is(esp)); // illegal addressing mode
- // [base + index*scale + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
- // [base + index*scale]
- set_modrm(0, esp);
- set_sib(scale, index, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
- // [base + index*scale + disp8]
- set_modrm(1, esp);
- set_sib(scale, index, base);
- set_disp8(disp);
- } else {
- // [base + index*scale + disp/r]
- set_modrm(2, esp);
- set_sib(scale, index, base);
- set_dispr(disp, rmode);
- }
-}
-
-
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode) {
- ASSERT(!index.is(esp)); // illegal addressing mode
- // [index*scale + disp/r]
- set_modrm(0, esp);
- set_sib(scale, index, ebp);
- set_dispr(disp, rmode);
-}
-
-
-bool Operand::is_reg(Register reg) const {
- return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
- && ((buf_[0] & 0x07) == reg.code()); // register codes match.
-}
-
-
-bool Operand::is_reg_only() const {
- return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
-}
-
-
-Register Operand::reg() const {
- ASSERT(is_reg_only());
- return Register::from_code(buf_[0] & 0x07);
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Emit a single byte. Must always be inlined.
-#define EMIT(x) \
- *pc_++ = (x)
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static void InitCoverageLog();
-#endif
-
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- positions_recorder_(this) {
- // Clear the buffer in debug mode unless it was provided by the
- // caller in which case we can't be sure it's okay to overwrite
- // existing code in it; see CodePatcher::CodePatcher(...).
-#ifdef DEBUG
- if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size_); // int3
- }
-#endif
-
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-
-#ifdef GENERATED_CODE_COVERAGE
- InitCoverageLog();
-#endif
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Finalize code (at this point overflow() may be true, but the gap ensures
- // that we are still not overlapping instructions and relocation info).
- ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- desc->origin = this;
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(IsPowerOf2(m));
- int mask = m - 1;
- int addr = pc_offset();
- Nop((m - (addr & mask)) & mask);
-}
-
-
-bool Assembler::IsNop(Address addr) {
- Address a = addr;
- while (*a == 0x66) a++;
- if (*a == 0x90) return true;
- if (a[0] == 0xf && a[1] == 0x1f) return true;
- return false;
-}
-
-
-void Assembler::Nop(int bytes) {
- EnsureSpace ensure_space(this);
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- // Older CPUs that do not support SSE2 may not support multibyte NOP
- // instructions.
- for (; bytes > 0; bytes--) {
- EMIT(0x90);
- }
- return;
- }
-
- // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
- while (bytes > 0) {
- switch (bytes) {
- case 2:
- EMIT(0x66);
- case 1:
- EMIT(0x90);
- return;
- case 3:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0);
- return;
- case 4:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0x40);
- EMIT(0);
- return;
- case 6:
- EMIT(0x66);
- case 5:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0x44);
- EMIT(0);
- EMIT(0);
- return;
- case 7:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0x80);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- return;
- default:
- case 11:
- EMIT(0x66);
- bytes--;
- case 10:
- EMIT(0x66);
- bytes--;
- case 9:
- EMIT(0x66);
- bytes--;
- case 8:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0x84);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- bytes -= 8;
- }
- }
-}
-
-
-void Assembler::CodeTargetAlign() {
- Align(16); // Preferred alignment of jump targets on ia32.
-}
-
-
-void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA2);
-}
-
-
-void Assembler::pushad() {
- EnsureSpace ensure_space(this);
- EMIT(0x60);
-}
-
-
-void Assembler::popad() {
- EnsureSpace ensure_space(this);
- EMIT(0x61);
-}
-
-
-void Assembler::pushfd() {
- EnsureSpace ensure_space(this);
- EMIT(0x9C);
-}
-
-
-void Assembler::popfd() {
- EnsureSpace ensure_space(this);
- EMIT(0x9D);
-}
-
-
-void Assembler::push(const Immediate& x) {
- EnsureSpace ensure_space(this);
- if (x.is_int8()) {
- EMIT(0x6a);
- EMIT(x.x_);
- } else {
- EMIT(0x68);
- emit(x);
- }
-}
-
-
-void Assembler::push_imm32(int32_t imm32) {
- EnsureSpace ensure_space(this);
- EMIT(0x68);
- emit(imm32);
-}
-
-
-void Assembler::push(Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x50 | src.code());
-}
-
-
-void Assembler::push(const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(esi, src);
-}
-
-
-void Assembler::pop(Register dst) {
- ASSERT(reloc_info_writer.last_pc() != NULL);
- EnsureSpace ensure_space(this);
- EMIT(0x58 | dst.code());
-}
-
-
-void Assembler::pop(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0x8F);
- emit_operand(eax, dst);
-}
-
-
-void Assembler::enter(const Immediate& size) {
- EnsureSpace ensure_space(this);
- EMIT(0xC8);
- emit_w(size);
- EMIT(0);
-}
-
-
-void Assembler::leave() {
- EnsureSpace ensure_space(this);
- EMIT(0xC9);
-}
-
-
-void Assembler::mov_b(Register dst, const Operand& src) {
- CHECK(dst.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x8A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov_b(const Operand& dst, int8_t imm8) {
- EnsureSpace ensure_space(this);
- EMIT(0xC6);
- emit_operand(eax, dst);
- EMIT(imm8);
-}
-
-
-void Assembler::mov_b(const Operand& dst, Register src) {
- CHECK(src.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x88);
- emit_operand(src, dst);
-}
-
-
-void Assembler::mov_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov_w(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::mov(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- EMIT(0xB8 | dst.code());
- emit(imm32);
-}
-
-
-void Assembler::mov(Register dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- EMIT(0xB8 | dst.code());
- emit(x);
-}
-
-
-void Assembler::mov(Register dst, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- EMIT(0xB8 | dst.code());
- emit(handle);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x89);
- EMIT(0xC0 | src.code() << 3 | dst.code());
-}
-
-
-void Assembler::mov(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- EMIT(0xC7);
- emit_operand(eax, dst);
- emit(x);
-}
-
-
-void Assembler::mov(const Operand& dst, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- EMIT(0xC7);
- emit_operand(eax, dst);
- emit(handle);
-}
-
-
-void Assembler::mov(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movsx_b(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xBE);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsx_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xBF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzx_b(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzx_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
- EnsureSpace ensure_space(this);
- // Opcode: 0f 40 + cc /r.
- EMIT(0x0F);
- EMIT(0x40 + cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cld() {
- EnsureSpace ensure_space(this);
- EMIT(0xFC);
-}
-
-
-void Assembler::rep_movs() {
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0xA5);
-}
-
-
-void Assembler::rep_stos() {
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0xAB);
-}
-
-
-void Assembler::stos() {
- EnsureSpace ensure_space(this);
- EMIT(0xAB);
-}
-
-
-void Assembler::xchg(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
- EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
- } else {
- EMIT(0x87);
- EMIT(0xC0 | src.code() << 3 | dst.code());
- }
-}
-
-
-void Assembler::adc(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(2, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::adc(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x13);
- emit_operand(dst, src);
-}
-
-
-void Assembler::add(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x03);
- emit_operand(dst, src);
-}
-
-
-void Assembler::add(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x01);
- emit_operand(src, dst);
-}
-
-
-void Assembler::add(const Operand& dst, const Immediate& x) {
- ASSERT(reloc_info_writer.last_pc() != NULL);
- EnsureSpace ensure_space(this);
- emit_arith(0, dst, x);
-}
-
-
-void Assembler::and_(Register dst, int32_t imm32) {
- and_(dst, Immediate(imm32));
-}
-
-
-void Assembler::and_(Register dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(4, Operand(dst), x);
-}
-
-
-void Assembler::and_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x23);
- emit_operand(dst, src);
-}
-
-
-void Assembler::and_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(4, dst, x);
-}
-
-
-void Assembler::and_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x21);
- emit_operand(src, dst);
-}
-
-
-void Assembler::cmpb(const Operand& op, int8_t imm8) {
- EnsureSpace ensure_space(this);
- if (op.is_reg(eax)) {
- EMIT(0x3C);
- } else {
- EMIT(0x80);
- emit_operand(edi, op); // edi == 7
- }
- EMIT(imm8);
-}
-
-
-void Assembler::cmpb(const Operand& op, Register reg) {
- CHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x38);
- emit_operand(reg, op);
-}
-
-
-void Assembler::cmpb(Register reg, const Operand& op) {
- CHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x3A);
- emit_operand(reg, op);
-}
-
-
-void Assembler::cmpw(const Operand& op, Immediate imm16) {
- ASSERT(imm16.is_int16());
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x81);
- emit_operand(edi, op);
- emit_w(imm16);
-}
-
-
-void Assembler::cmp(Register reg, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(7, Operand(reg), Immediate(imm32));
-}
-
-
-void Assembler::cmp(Register reg, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- emit_arith(7, Operand(reg), Immediate(handle));
-}
-
-
-void Assembler::cmp(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x3B);
- emit_operand(reg, op);
-}
-
-
-void Assembler::cmp(const Operand& op, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- emit_arith(7, op, imm);
-}
-
-
-void Assembler::cmp(const Operand& op, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- emit_arith(7, op, Immediate(handle));
-}
-
-
-void Assembler::cmpb_al(const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x38); // CMP r/m8, r8
- emit_operand(eax, op); // eax has same code as register al.
-}
-
-
-void Assembler::cmpw_ax(const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x39); // CMP r/m16, r16
- emit_operand(eax, op); // eax has same code as register ax.
-}
-
-
-void Assembler::dec_b(Register dst) {
- CHECK(dst.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0xFE);
- EMIT(0xC8 | dst.code());
-}
-
-
-void Assembler::dec_b(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xFE);
- emit_operand(ecx, dst);
-}
-
-
-void Assembler::dec(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0x48 | dst.code());
-}
-
-
-void Assembler::dec(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(ecx, dst);
-}
-
-
-void Assembler::cdq() {
- EnsureSpace ensure_space(this);
- EMIT(0x99);
-}
-
-
-void Assembler::idiv(Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xF8 | src.code());
-}
-
-
-void Assembler::imul(Register reg) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xE8 | reg.code());
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, int32_t imm32) {
- EnsureSpace ensure_space(this);
- if (is_int8(imm32)) {
- EMIT(0x6B);
- EMIT(0xC0 | dst.code() << 3 | src.code());
- EMIT(imm32);
- } else {
- EMIT(0x69);
- EMIT(0xC0 | dst.code() << 3 | src.code());
- emit(imm32);
- }
-}
-
-
-void Assembler::inc(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0x40 | dst.code());
-}
-
-
-void Assembler::inc(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(eax, dst);
-}
-
-
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mul(Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xE0 | src.code());
-}
-
-
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xD8 | dst.code());
-}
-
-
-void Assembler::not_(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xD0 | dst.code());
-}
-
-
-void Assembler::or_(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(1, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::or_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::or_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(1, dst, x);
-}
-
-
-void Assembler::or_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x09);
- emit_operand(src, dst);
-}
-
-
-void Assembler::rcl(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xD0 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xD0 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::rcr(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xD8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xD8 | dst.code());
- EMIT(imm8);
- }
-}
-
-void Assembler::ror(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xC8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xC8 | dst.code());
- EMIT(imm8);
- }
-}
-
-void Assembler::ror_cl(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- EMIT(0xC8 | dst.code());
-}
-
-
-void Assembler::sar(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xF8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xF8 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::sar_cl(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- EMIT(0xF8 | dst.code());
-}
-
-
-void Assembler::sbb(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x1B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::shld(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA5);
- emit_operand(dst, src);
-}
-
-
-void Assembler::shl(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xE0 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xE0 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::shl_cl(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- EMIT(0xE0 | dst.code());
-}
-
-
-void Assembler::shrd(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAD);
- emit_operand(dst, src);
-}
-
-
-void Assembler::shr(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xE8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xE8 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::shr_cl(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- EMIT(0xE8 | dst.code());
-}
-
-
-void Assembler::sub(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(5, dst, x);
-}
-
-
-void Assembler::sub(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x2B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::sub(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x29);
- emit_operand(src, dst);
-}
-
-
-void Assembler::test(Register reg, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (RelocInfo::IsNone(imm.rmode_) &&
- is_uint8(imm.x_) &&
- reg.is_byte_register()) {
- uint8_t imm8 = imm.x_;
- if (reg.is(eax)) {
- EMIT(0xA8);
- EMIT(imm8);
- } else {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
- }
- } else {
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
- }
-}
-
-
-void Assembler::test(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::test_b(Register reg, const Operand& op) {
- CHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x84);
- emit_operand(reg, op);
-}
-
-
-void Assembler::test(const Operand& op, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- emit_operand(eax, op);
- emit(imm);
-}
-
-
-void Assembler::test_b(const Operand& op, uint8_t imm8) {
- if (op.is_reg_only() && !op.reg().is_byte_register()) {
- test(op, Immediate(imm8));
- return;
- }
- EnsureSpace ensure_space(this);
- EMIT(0xF6);
- emit_operand(eax, op);
- EMIT(imm8);
-}
-
-
-void Assembler::xor_(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(6, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::xor_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x33);
- emit_operand(dst, src);
-}
-
-
-void Assembler::xor_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x31);
- emit_operand(src, dst);
-}
-
-
-void Assembler::xor_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(6, dst, x);
-}
-
-
-void Assembler::bt(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA3);
- emit_operand(src, dst);
-}
-
-
-void Assembler::bts(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAB);
- emit_operand(src, dst);
-}
-
-
-void Assembler::hlt() {
- EnsureSpace ensure_space(this);
- EMIT(0xF4);
-}
-
-
-void Assembler::int3() {
- EnsureSpace ensure_space(this);
- EMIT(0xCC);
-}
-
-
-void Assembler::nop() {
- EnsureSpace ensure_space(this);
- EMIT(0x90);
-}
-
-
-void Assembler::rdtsc() {
- ASSERT(CpuFeatures::IsEnabled(RDTSC));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x31);
-}
-
-
-void Assembler::ret(int imm16) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint16(imm16));
- if (imm16 == 0) {
- EMIT(0xC3);
- } else {
- EMIT(0xC2);
- EMIT(imm16 & 0xFF);
- EMIT((imm16 >> 8) & 0xFF);
- }
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the 32bit
-// Displacement of the last instruction using the label.
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- Displacement disp = disp_at(&l);
- PrintF("@ %d ", l.pos());
- disp.print();
- PrintF("\n");
- disp.next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- EnsureSpace ensure_space(this);
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
- while (L->is_linked()) {
- Displacement disp = disp_at(L);
- int fixup_pos = L->pos();
- if (disp.type() == Displacement::CODE_RELATIVE) {
- // Relative to Code* heap object pointer.
- long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
- } else {
- if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
- ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
- }
- // Relative address, relative to point after address.
- int imm32 = pos - (fixup_pos + sizeof(int32_t));
- long_at_put(fixup_pos, imm32);
- }
- disp.next(L);
- }
- while (L->is_near_linked()) {
- int fixup_pos = L->near_link_pos();
- int offset_to_next =
- static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
- ASSERT(offset_to_next <= 0);
- // Relative address, relative to point after address.
- int disp = pos - fixup_pos - sizeof(int8_t);
- CHECK(0 <= disp && disp <= 127);
- set_byte_at(fixup_pos, disp);
- if (offset_to_next < 0) {
- L->link_to(fixup_pos + offset_to_next, Label::kNear);
- } else {
- L->UnuseNear();
- }
- }
- L->bind_to(pos);
-}
-
-
-void Assembler::bind(Label* L) {
- EnsureSpace ensure_space(this);
- ASSERT(!L->is_bound()); // label can only be bound once
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::call(Label* L) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- if (L->is_bound()) {
- const int long_size = 5;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- // 1110 1000 #32-bit disp.
- EMIT(0xE8);
- emit(offs - long_size);
- } else {
- // 1110 1000 #32-bit disp.
- EMIT(0xE8);
- emit_disp(L, Displacement::OTHER);
- }
-}
-
-
-void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE8);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
-}
-
-
-int Assembler::CallSize(const Operand& adr) {
- // Call size is 1 (opcode) + adr.len_ (operand).
- return 1 + adr.len_;
-}
-
-
-void Assembler::call(const Operand& adr) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(edx, adr);
-}
-
-
-int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
-}
-
-
-void Assembler::call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE8);
- emit(reinterpret_cast<intptr_t>(code.location()), rmode, ast_id);
-}
-
-
-void Assembler::jmp(Label* L, Label::Distance distance) {
- EnsureSpace ensure_space(this);
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 5;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 1110 1011 #8-bit disp.
- EMIT(0xEB);
- EMIT((offs - short_size) & 0xFF);
- } else {
- // 1110 1001 #32-bit disp.
- EMIT(0xE9);
- emit(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- EMIT(0xEB);
- emit_near_disp(L);
- } else {
- // 1110 1001 #32-bit disp.
- EMIT(0xE9);
- emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
- }
-}
-
-
-void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE9);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
-}
-
-
-void Assembler::jmp(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE9);
- emit(reinterpret_cast<intptr_t>(code.location()), rmode);
-}
-
-
-void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
- EnsureSpace ensure_space(this);
- ASSERT(0 <= cc && static_cast<int>(cc) < 16);
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 6;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 0111 tttn #8-bit disp
- EMIT(0x70 | cc);
- EMIT((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- EMIT(0x70 | cc);
- emit_near_disp(L);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- // Note: could eliminate cond. jumps to this jump if condition
- // is the same however, seems to be rather unlikely case.
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit_disp(L, Displacement::OTHER);
- }
-}
-
-
-void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
- // 0000 1111 1000 tttn #32-bit disp.
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
-}
-
-
-void Assembler::j(Condition cc, Handle<Code> code) {
- EnsureSpace ensure_space(this);
- // 0000 1111 1000 tttn #32-bit disp
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(reinterpret_cast<intptr_t>(code.location()), RelocInfo::CODE_TARGET);
-}
-
-
-// FPU instructions.
-
-void Assembler::fld(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC0, i);
-}
-
-
-void Assembler::fstp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xD8, i);
-}
-
-
-void Assembler::fld1() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE8);
-}
-
-
-void Assembler::fldpi() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xEB);
-}
-
-
-void Assembler::fldz() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xEE);
-}
-
-
-void Assembler::fldln2() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xED);
-}
-
-
-void Assembler::fld_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fld_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fstp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fstp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fst_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::fild_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fild_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- emit_operand(ebp, adr);
-}
-
-
-void Assembler::fistp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(ecx, adr);
-}
-
-
-void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(ecx, adr);
-}
-
-
-void Assembler::fist_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::fistp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- emit_operand(edi, adr);
-}
-
-
-void Assembler::fabs() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE1);
-}
-
-
-void Assembler::fchs() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE0);
-}
-
-
-void Assembler::fcos() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFF);
-}
-
-
-void Assembler::fsin() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFE);
-}
-
-
-void Assembler::fptan() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF2);
-}
-
-
-void Assembler::fyl2x() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF1);
-}
-
-
-void Assembler::f2xm1() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF0);
-}
-
-
-void Assembler::fscale() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFD);
-}
-
-
-void Assembler::fninit() {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- EMIT(0xE3);
-}
-
-
-void Assembler::fadd(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC0, i);
-}
-
-
-void Assembler::fsub(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xE8, i);
-}
-
-
-void Assembler::fisub_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDA);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::fmul(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC8, i);
-}
-
-
-void Assembler::fdiv(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xF8, i);
-}
-
-
-void Assembler::faddp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC0, i);
-}
-
-
-void Assembler::fsubp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE8, i);
-}
-
-
-void Assembler::fsubrp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE0, i);
-}
-
-
-void Assembler::fmulp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC8, i);
-}
-
-
-void Assembler::fdivp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xF8, i);
-}
-
-
-void Assembler::fprem() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF8);
-}
-
-
-void Assembler::fprem1() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF5);
-}
-
-
-void Assembler::fxch(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC8, i);
-}
-
-
-void Assembler::fincstp() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF7);
-}
-
-
-void Assembler::ffree(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xC0, i);
-}
-
-
-void Assembler::ftst() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE4);
-}
-
-
-void Assembler::fucomp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xE8, i);
-}
-
-
-void Assembler::fucompp() {
- EnsureSpace ensure_space(this);
- EMIT(0xDA);
- EMIT(0xE9);
-}
-
-
-void Assembler::fucomi(int i) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- EMIT(0xE8 + i);
-}
-
-
-void Assembler::fucomip() {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- EMIT(0xE9);
-}
-
-
-void Assembler::fcompp() {
- EnsureSpace ensure_space(this);
- EMIT(0xDE);
- EMIT(0xD9);
-}
-
-
-void Assembler::fnstsw_ax() {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- EMIT(0xE0);
-}
-
-
-void Assembler::fwait() {
- EnsureSpace ensure_space(this);
- EMIT(0x9B);
-}
-
-
-void Assembler::frndint() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFC);
-}
-
-
-void Assembler::fnclex() {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- EMIT(0xE2);
-}
-
-
-void Assembler::sahf() {
- EnsureSpace ensure_space(this);
- EMIT(0x9E);
-}
-
-
-void Assembler::setcc(Condition cc, Register reg) {
- ASSERT(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x90 | cc);
- EMIT(0xC0 | reg.code());
-}
-
-
-void Assembler::cvttss2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2si(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x2D);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x51);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x56);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x2E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x2E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x0B);
- emit_sse_operand(dst, src);
- // Mask precision exeption.
- EMIT(static_cast<byte>(mode) | 0x8);
-}
-
-void Assembler::movmskpd(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movmskps(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x76);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0xC2);
- emit_sse_operand(dst, src);
- EMIT(1); // LT == 1
-}
-
-
-void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x28);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x7F);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x6F);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x7F);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqu(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x6F);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x38);
- EMIT(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movntdq(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xE7);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::prefetch(const Operand& src, int level) {
- ASSERT(is_uint2(level));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x18);
- // Emit hint number in Reg position of RegR/M.
- XMMRegister code = XMMRegister::from_code(level);
- emit_sse_operand(code, src);
-}
-
-
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movsd(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2); // double
- EMIT(0x0F);
- EMIT(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2); // double
- EMIT(0x0F);
- EMIT(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3); // float
- EMIT(0x0F);
- EMIT(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3); // float
- EMIT(0x0F);
- EMIT(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x6E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movd(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x7E);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(CpuFeatures::IsSupported(SSE4_1));
- ASSERT(is_uint8(imm8));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x17);
- emit_sse_operand(dst, src);
- EMIT(imm8);
-}
-
-
-void Assembler::pand(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xDB);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::pxor(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xEF);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::por(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xEB);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ptest(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x38);
- EMIT(0x17);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::psllq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x73);
- emit_sse_operand(esi, reg); // esi == 6
- EMIT(shift);
-}
-
-
-void Assembler::psllq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xF3);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::psrlq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x73);
- emit_sse_operand(edx, reg); // edx == 2
- EMIT(shift);
-}
-
-
-void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xD3);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x70);
- emit_sse_operand(dst, src);
- EMIT(shuffle);
-}
-
-
-void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x16);
- emit_sse_operand(src, dst);
- EMIT(offset);
-}
-
-
-void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x22);
- emit_sse_operand(dst, src);
- EMIT(offset);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
- Register ireg = { reg.code() };
- emit_operand(ireg, adr);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
- EMIT(0xC0 | dst.code() << 3 | src.code());
-}
-
-
-void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
- EMIT(0xC0 | dst.code() << 3 | src.code());
-}
-
-
-void Assembler::Print() {
- Disassembler::Decode(isolate(), stdout, buffer_, pc_);
-}
-
-
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
- if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-void Assembler::GrowBuffer() {
- ASSERT(overflow());
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else {
- desc.buffer_size = 2*buffer_size_;
- }
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
- }
-
- // Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
-
- // Clear the buffer in debug mode. Use 'int3' instructions to make
- // sure to get into problems if we ever run uninitialized code.
-#ifdef DEBUG
- memset(desc.buffer, 0xCC, desc.buffer_size);
-#endif
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::RUNTIME_ENTRY) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- *p -= pc_delta; // relocate entry
- } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- if (*p != 0) { // 0 means uninitialized.
- *p += pc_delta;
- }
- }
- }
-
- ASSERT(!overflow());
-}
-
-
-void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
- ASSERT(is_uint8(op1) && is_uint8(op2)); // wrong opcode
- ASSERT(is_uint8(imm8));
- ASSERT((op1 & 0x01) == 0); // should be 8bit operation
- EMIT(op1);
- EMIT(op2 | dst.code());
- EMIT(imm8);
-}
-
-
-void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
- ASSERT((0 <= sel) && (sel <= 7));
- Register ireg = { sel };
- if (x.is_int8()) {
- EMIT(0x83); // using a sign-extended 8-bit immediate.
- emit_operand(ireg, dst);
- EMIT(x.x_ & 0xFF);
- } else if (dst.is_reg(eax)) {
- EMIT((sel << 3) | 0x05); // short form if the destination is eax.
- emit(x);
- } else {
- EMIT(0x81); // using a literal 32-bit immediate.
- emit_operand(ireg, dst);
- emit(x);
- }
-}
-
-
-void Assembler::emit_operand(Register reg, const Operand& adr) {
- const unsigned length = adr.len_;
- ASSERT(length > 0);
-
- // Emit updated ModRM byte containing the given register.
- pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
-
- // Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
-
- // Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
- pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
- RecordRelocInfo(adr.rmode_);
- pc_ += sizeof(int32_t);
- }
-}
-
-
-void Assembler::emit_farith(int b1, int b2, int i) {
- ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
- ASSERT(0 <= i && i < 8); // illegal stack offset
- EMIT(b1);
- EMIT(b2 + i);
-}
-
-
-void Assembler::db(uint8_t data) {
- EnsureSpace ensure_space(this);
- EMIT(data);
-}
-
-
-void Assembler::dd(uint32_t data) {
- EnsureSpace ensure_space(this);
- emit(data);
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(!RelocInfo::IsNone(rmode));
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- RelocInfo rinfo(pc_, rmode, data, NULL);
- reloc_info_writer.Write(&rinfo);
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitCoverageLog() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void LogGeneratedCodeCoverage(const char* file_line) {
- const char* return_address = (&file_line)[-1];
- char* push_insn = const_cast<char*>(return_address - 12);
- push_insn[0] = 0xeb; // Relative branch insn.
- push_insn[1] = 13; // Skip over coverage insns.
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", file_line);
- fflush(coverage_log);
- }
-}
-
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.h b/src/3rdparty/v8/src/ia32/assembler-ia32.h
deleted file mode 100644
index 315bc17..0000000
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.h
+++ /dev/null
@@ -1,1281 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
-
-// A light-weight IA32 Assembler.
-
-#ifndef V8_IA32_ASSEMBLER_IA32_H_
-#define V8_IA32_ASSEMBLER_IA32_H_
-
-#include "isolate.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-struct Register {
- static const int kMaxNumAllocatableRegisters = 6;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 8;
-
- static inline const char* AllocationIndexToString(int index);
-
- static inline int ToAllocationIndex(Register reg);
-
- static inline Register FromAllocationIndex(int index);
-
- static Register from_code(int code) {
- ASSERT(code >= 0);
- ASSERT(code < kNumRegisters);
- Register r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // eax, ebx, ecx and edx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-const int kRegister_eax_Code = 0;
-const int kRegister_ecx_Code = 1;
-const int kRegister_edx_Code = 2;
-const int kRegister_ebx_Code = 3;
-const int kRegister_esp_Code = 4;
-const int kRegister_ebp_Code = 5;
-const int kRegister_esi_Code = 6;
-const int kRegister_edi_Code = 7;
-const int kRegister_no_reg_Code = -1;
-
-const Register eax = { kRegister_eax_Code };
-const Register ecx = { kRegister_ecx_Code };
-const Register edx = { kRegister_edx_Code };
-const Register ebx = { kRegister_ebx_Code };
-const Register esp = { kRegister_esp_Code };
-const Register ebp = { kRegister_ebp_Code };
-const Register esi = { kRegister_esi_Code };
-const Register edi = { kRegister_edi_Code };
-const Register no_reg = { kRegister_no_reg_Code };
-
-
-inline const char* Register::AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- // This is the mapping of allocation indices to registers.
- const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
- return kNames[index];
-}
-
-
-inline int Register::ToAllocationIndex(Register reg) {
- ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
- return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
-}
-
-
-inline Register Register::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- return (index >= 4) ? from_code(index + 2) : from_code(index);
-}
-
-
-struct IntelDoubleRegister {
- static const int kMaxNumRegisters = 8;
- static const int kMaxNumAllocatableRegisters = 7;
- static int NumAllocatableRegisters();
- static int NumRegisters();
- static const char* AllocationIndexToString(int index);
-
- static int ToAllocationIndex(IntelDoubleRegister reg) {
- ASSERT(reg.code() != 0);
- return reg.code() - 1;
- }
-
- static IntelDoubleRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- return from_code(index + 1);
- }
-
- static IntelDoubleRegister from_code(int code) {
- IntelDoubleRegister result = { code };
- return result;
- }
-
- bool is_valid() const {
- return 0 <= code_ && code_ < NumRegisters();
- }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
-
- int code_;
-};
-
-
-const IntelDoubleRegister double_register_0 = { 0 };
-const IntelDoubleRegister double_register_1 = { 1 };
-const IntelDoubleRegister double_register_2 = { 2 };
-const IntelDoubleRegister double_register_3 = { 3 };
-const IntelDoubleRegister double_register_4 = { 4 };
-const IntelDoubleRegister double_register_5 = { 5 };
-const IntelDoubleRegister double_register_6 = { 6 };
-const IntelDoubleRegister double_register_7 = { 7 };
-
-
-struct XMMRegister : IntelDoubleRegister {
- static const int kNumAllocatableRegisters = 7;
- static const int kNumRegisters = 8;
-
- static XMMRegister from_code(int code) {
- STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister));
- XMMRegister result;
- result.code_ = code;
- return result;
- }
-
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
-
- static XMMRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- return from_code(index + 1);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7"
- };
- return names[index];
- }
-};
-
-
-#define xmm0 (static_cast<const XMMRegister&>(double_register_0))
-#define xmm1 (static_cast<const XMMRegister&>(double_register_1))
-#define xmm2 (static_cast<const XMMRegister&>(double_register_2))
-#define xmm3 (static_cast<const XMMRegister&>(double_register_3))
-#define xmm4 (static_cast<const XMMRegister&>(double_register_4))
-#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
-#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
-#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
-
-
-struct X87TopOfStackRegister : IntelDoubleRegister {
- static const int kNumAllocatableRegisters = 1;
- static const int kNumRegisters = 1;
-
- bool is(X87TopOfStackRegister reg) const {
- return code_ == reg.code_;
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "st0",
- };
- return names[index];
- }
-
- static int ToAllocationIndex(X87TopOfStackRegister reg) {
- ASSERT(reg.code() == 0);
- return 0;
- }
-};
-
-#define x87tos \
- static_cast<const X87TopOfStackRegister&>(double_register_0)
-
-
-typedef IntelDoubleRegister DoubleRegister;
-
-
-enum Condition {
- // any value < 0 is considered no_condition
- no_condition = -1,
-
- overflow = 0,
- no_overflow = 1,
- below = 2,
- above_equal = 3,
- equal = 4,
- not_equal = 5,
- below_equal = 6,
- above = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
- greater_equal = 13,
- less_equal = 14,
- greater = 15,
-
- // aliases
- carry = below,
- not_carry = above_equal,
- zero = equal,
- not_zero = not_equal,
- sign = negative,
- not_sign = positive
-};
-
-
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case below:
- return above;
- case above:
- return below;
- case above_equal:
- return below_equal;
- case below_equal:
- return above_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Immediates
-
-class Immediate BASE_EMBEDDED {
- public:
- inline explicit Immediate(int x);
- inline explicit Immediate(const ExternalReference& ext);
- inline explicit Immediate(Handle<Object> handle);
- inline explicit Immediate(Smi* value);
- inline explicit Immediate(Address addr);
-
- static Immediate CodeRelativeOffset(Label* label) {
- return Immediate(label);
- }
-
- bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); }
- bool is_int8() const {
- return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
- }
- bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
- }
-
- private:
- inline explicit Immediate(Label* value);
-
- int x_;
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
- friend class MacroAssembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-enum ScaleFactor {
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3,
- times_int_size = times_4,
- times_half_pointer_size = times_2,
- times_pointer_size = times_4,
- times_twice_pointer_size = times_8
-};
-
-
-class Operand BASE_EMBEDDED {
- public:
- // XMM reg
- INLINE(explicit Operand(XMMRegister xmm_reg));
-
- // [disp/r]
- INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
- // disp only must always be relocated
-
- // [base + disp/r]
- explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
-
- // [base + index*scale + disp/r]
- explicit Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
-
- // [index*scale + disp/r]
- explicit Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
-
- static Operand StaticVariable(const ExternalReference& ext) {
- return Operand(reinterpret_cast<int32_t>(ext.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand StaticArray(Register index,
- ScaleFactor scale,
- const ExternalReference& arr) {
- return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
- return Operand(reinterpret_cast<int32_t>(cell.location()),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- }
-
- // Returns true if this Operand is a wrapper for the specified register.
- bool is_reg(Register reg) const;
-
- // Returns true if this Operand is a wrapper for one register.
- bool is_reg_only() const;
-
- // Asserts that this Operand is a wrapper for one register and returns the
- // register.
- Register reg() const;
-
- private:
- // reg
- INLINE(explicit Operand(Register reg));
-
- // Set the ModRM byte without an encoded 'reg' register. The
- // register is encoded later as part of the emit_operand operation.
- inline void set_modrm(int mod, Register rm);
-
- inline void set_sib(ScaleFactor scale, Register index, Register base);
- inline void set_disp8(int8_t disp);
- inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
-
- byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
- // Only valid if len_ > 4.
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
- friend class MacroAssembler;
- friend class LCodeGen;
-};
-
-
-// -----------------------------------------------------------------------------
-// A Displacement describes the 32bit immediate field of an instruction which
-// may be used together with a Label in order to refer to a yet unknown code
-// position. Displacements stored in the instruction stream are used to describe
-// the instruction and to chain a list of instructions using the same Label.
-// A Displacement contains 2 different fields:
-//
-// next field: position of next displacement in the chain (0 = end of list)
-// type field: instruction type
-//
-// A next value of null (0) indicates the end of a chain (note that there can
-// be no displacement at position zero, because there is always at least one
-// instruction byte before the displacement).
-//
-// Displacement _data field layout
-//
-// |31.....2|1......0|
-// [ next | type |
-
-class Displacement BASE_EMBEDDED {
- public:
- enum Type {
- UNCONDITIONAL_JUMP,
- CODE_RELATIVE,
- OTHER
- };
-
- int data() const { return data_; }
- Type type() const { return TypeField::decode(data_); }
- void next(Label* L) const {
- int n = NextField::decode(data_);
- n > 0 ? L->link_to(n) : L->Unuse();
- }
- void link_to(Label* L) { init(L, type()); }
-
- explicit Displacement(int data) { data_ = data; }
-
- Displacement(Label* L, Type type) { init(L, type); }
-
- void print() {
- PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
- NextField::decode(data_));
- }
-
- private:
- int data_;
-
- class TypeField: public BitField<Type, 0, 2> {};
- class NextField: public BitField<int, 2, 32-2> {};
-
- void init(Label* L, Type type);
-};
-
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-// Example:
-// if (CpuFeatures::IsSupported(SSE2)) {
-// CpuFeatures::Scope fscope(SSE2);
-// // Generate SSE2 floating point code.
-// } else {
-// // Generate standard x87 floating point code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == SSE2 && !FLAG_enable_sse2) return false;
- if (f == SSE3 && !FLAG_enable_sse3) return false;
- if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- uint64_t enabled = isolate->enabled_cpu_features();
- return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- uint64_t mask = static_cast<uint64_t>(1) << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = isolate_->enabled_cpu_features();
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- uint64_t old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const uint64_t old_supported_;
- };
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static uint64_t supported_;
- static uint64_t found_by_runtime_probing_;
-
- friend class ExternalReference;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-class Assembler : public AssemblerBase {
- private:
- // We check before assembling an instruction that there is sufficient
- // space to write an instruction and its relocation information.
- // The relocation writer's position must be kGap bytes above the end of
- // the generated instructions. This leaves enough space for the
- // longest possible ia32 instruction, 15 bytes, and the longest possible
- // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
- // (There is a 15 byte limit on ia32 instruction length that rules out some
- // otherwise valid instructions.)
- // This allows for a single, fast space check per instruction.
- static const int kGap = 32;
-
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- // TODO(vitalyr): the assembler does not need an isolate.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler() { }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc);
- inline static void set_target_address_at(Address pc, Address target);
-
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches within generated code.
- inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- static const int kSpecialTargetSize = kPointerSize;
-
- // Distance between the address of the code target in the call instruction
- // and the return address
- static const int kCallTargetAddressOffset = kPointerSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
-
- static const int kCallInstructionLength = 5;
- static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
- static const int kJSReturnSequenceLength = 6;
-
- // The debug break slot must be able to contain a call instruction.
- static const int kDebugBreakSlotLength = kCallInstructionLength;
-
- // One byte opcode for test al, 0xXX.
- static const byte kTestAlByte = 0xA8;
- // One byte opcode for nop.
- static const byte kNopByte = 0x90;
-
- // One byte opcode for a short unconditional jump.
- static const byte kJmpShortOpcode = 0xEB;
- // One byte prefix for a short conditional jump.
- static const byte kJccShortPrefix = 0x70;
- static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
- static const byte kJcShortOpcode = kJccShortPrefix | carry;
- static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
- static const byte kJzShortOpcode = kJccShortPrefix | zero;
-
-
- // ---------------------------------------------------------------------------
- // Code generation
- //
- // - function names correspond one-to-one to ia32 instruction mnemonics
- // - unless specified otherwise, instructions operate on 32bit operands
- // - instructions on 8bit (byte) operands/registers have a trailing '_b'
- // - instructions on 16bit (word) operands/registers have a trailing '_w'
- // - naming conflicts with C++ keywords are resolved via a trailing '_'
-
- // NOTE ON INTERFACE: Currently, the interface is not very consistent
- // in the sense that some operations (e.g. mov()) can be called in more
- // the one way to generate the same instruction: The Register argument
- // can in some cases be replaced with an Operand(Register) argument.
- // This should be cleaned up and made more orthogonal. The questions
- // is: should we always use Operands instead of Registers where an
- // Operand is possible, or should we have a Register (overloaded) form
- // instead? We must be careful to make sure that the selected instruction
- // is obvious from the parameters to avoid hard-to-find code generation
- // bugs.
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2.
- void Align(int m);
- void Nop(int bytes = 1);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Stack
- void pushad();
- void popad();
-
- void pushfd();
- void popfd();
-
- void push(const Immediate& x);
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
-
- void pop(Register dst);
- void pop(const Operand& dst);
-
- void enter(const Immediate& size);
- void leave();
-
- // Moves
- void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
- void mov_b(Register dst, const Operand& src);
- void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
- void mov_b(const Operand& dst, int8_t imm8);
- void mov_b(const Operand& dst, Register src);
-
- void mov_w(Register dst, const Operand& src);
- void mov_w(const Operand& dst, Register src);
-
- void mov(Register dst, int32_t imm32);
- void mov(Register dst, const Immediate& x);
- void mov(Register dst, Handle<Object> handle);
- void mov(Register dst, const Operand& src);
- void mov(Register dst, Register src);
- void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<Object> handle);
- void mov(const Operand& dst, Register src);
-
- void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
- void movsx_b(Register dst, const Operand& src);
-
- void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
- void movsx_w(Register dst, const Operand& src);
-
- void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
- void movzx_b(Register dst, const Operand& src);
-
- void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
- void movzx_w(Register dst, const Operand& src);
-
- // Conditional moves
- void cmov(Condition cc, Register dst, Register src) {
- cmov(cc, dst, Operand(src));
- }
- void cmov(Condition cc, Register dst, const Operand& src);
-
- // Flag management.
- void cld();
-
- // Repetitive string instructions.
- void rep_movs();
- void rep_stos();
- void stos();
-
- // Exchange two registers
- void xchg(Register dst, Register src);
-
- // Arithmetics
- void adc(Register dst, int32_t imm32);
- void adc(Register dst, const Operand& src);
-
- void add(Register dst, Register src) { add(dst, Operand(src)); }
- void add(Register dst, const Operand& src);
- void add(const Operand& dst, Register src);
- void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
- void add(const Operand& dst, const Immediate& x);
-
- void and_(Register dst, int32_t imm32);
- void and_(Register dst, const Immediate& x);
- void and_(Register dst, Register src) { and_(dst, Operand(src)); }
- void and_(Register dst, const Operand& src);
- void and_(const Operand& dst, Register src);
- void and_(const Operand& dst, const Immediate& x);
-
- void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
- void cmpb(const Operand& op, int8_t imm8);
- void cmpb(Register reg, const Operand& op);
- void cmpb(const Operand& op, Register reg);
- void cmpb_al(const Operand& op);
- void cmpw_ax(const Operand& op);
- void cmpw(const Operand& op, Immediate imm16);
- void cmp(Register reg, int32_t imm32);
- void cmp(Register reg, Handle<Object> handle);
- void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
- void cmp(Register reg, const Operand& op);
- void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
- void cmp(const Operand& op, const Immediate& imm);
- void cmp(const Operand& op, Handle<Object> handle);
-
- void dec_b(Register dst);
- void dec_b(const Operand& dst);
-
- void dec(Register dst);
- void dec(const Operand& dst);
-
- void cdq();
-
- void idiv(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // edx:eax = eax * src.
- void imul(Register dst, Register src) { imul(dst, Operand(src)); }
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
-
- void inc(Register dst);
- void inc(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
-
- // Unsigned multiply instruction.
- void mul(Register src); // edx:eax = eax * reg.
-
- void neg(Register dst);
-
- void not_(Register dst);
-
- void or_(Register dst, int32_t imm32);
- void or_(Register dst, Register src) { or_(dst, Operand(src)); }
- void or_(Register dst, const Operand& src);
- void or_(const Operand& dst, Register src);
- void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
- void or_(const Operand& dst, const Immediate& x);
-
- void rcl(Register dst, uint8_t imm8);
- void rcr(Register dst, uint8_t imm8);
- void ror(Register dst, uint8_t imm8);
- void ror_cl(Register dst);
-
- void sar(Register dst, uint8_t imm8);
- void sar_cl(Register dst);
-
- void sbb(Register dst, const Operand& src);
-
- void shld(Register dst, Register src) { shld(dst, Operand(src)); }
- void shld(Register dst, const Operand& src);
-
- void shl(Register dst, uint8_t imm8);
- void shl_cl(Register dst);
-
- void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
- void shrd(Register dst, const Operand& src);
-
- void shr(Register dst, uint8_t imm8);
- void shr_cl(Register dst);
-
- void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
- void sub(const Operand& dst, const Immediate& x);
- void sub(Register dst, Register src) { sub(dst, Operand(src)); }
- void sub(Register dst, const Operand& src);
- void sub(const Operand& dst, Register src);
-
- void test(Register reg, const Immediate& imm);
- void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
- void test(Register reg, const Operand& op);
- void test_b(Register reg, const Operand& op);
- void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
- void test_b(const Operand& op, uint8_t imm8);
-
- void xor_(Register dst, int32_t imm32);
- void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
- void xor_(Register dst, const Operand& src);
- void xor_(const Operand& dst, Register src);
- void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
- void xor_(const Operand& dst, const Immediate& x);
-
- // Bit operations.
- void bt(const Operand& dst, Register src);
- void bts(Register dst, Register src) { bts(Operand(dst), src); }
- void bts(const Operand& dst, Register src);
-
- // Miscellaneous
- void hlt();
- void int3();
- void nop();
- void rdtsc();
- void ret(int imm16);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Calls
- void call(Label* L);
- void call(byte* entry, RelocInfo::Mode rmode);
- int CallSize(const Operand& adr);
- void call(Register reg) { call(Operand(reg)); }
- void call(const Operand& adr);
- int CallSize(Handle<Code> code, RelocInfo::Mode mode);
- void call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
-
- // Jumps
- // unconditional jump to L
- void jmp(Label* L, Label::Distance distance = Label::kFar);
- void jmp(byte* entry, RelocInfo::Mode rmode);
- void jmp(Register reg) { jmp(Operand(reg)); }
- void jmp(const Operand& adr);
- void jmp(Handle<Code> code, RelocInfo::Mode rmode);
-
- // Conditional jumps
- void j(Condition cc,
- Label* L,
- Label::Distance distance = Label::kFar);
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
- void j(Condition cc, Handle<Code> code);
-
- // Floating-point operations
- void fld(int i);
- void fstp(int i);
-
- void fld1();
- void fldz();
- void fldpi();
- void fldln2();
-
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
-
- void fstp_s(const Operand& adr);
- void fstp_d(const Operand& adr);
- void fst_d(const Operand& adr);
-
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
-
- void fist_s(const Operand& adr);
-
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
-
- // The fisttp instructions require SSE3.
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
-
- void fabs();
- void fchs();
- void fcos();
- void fsin();
- void fptan();
- void fyl2x();
- void f2xm1();
- void fscale();
- void fninit();
-
- void fadd(int i);
- void fsub(int i);
- void fmul(int i);
- void fdiv(int i);
-
- void fisub_s(const Operand& adr);
-
- void faddp(int i = 1);
- void fsubp(int i = 1);
- void fsubrp(int i = 1);
- void fmulp(int i = 1);
- void fdivp(int i = 1);
- void fprem();
- void fprem1();
-
- void fxch(int i = 1);
- void fincstp();
- void ffree(int i = 0);
-
- void ftst();
- void fucomp(int i);
- void fucompp();
- void fucomi(int i);
- void fucomip();
- void fcompp();
- void fnstsw_ax();
- void fwait();
- void fnclex();
-
- void frndint();
-
- void sahf();
- void setcc(Condition cc, Register reg);
-
- void cpuid();
-
- // SSE2 instructions
- void cvttss2si(Register dst, const Operand& src);
- void cvttsd2si(Register dst, const Operand& src);
- void cvtsd2si(Register dst, XMMRegister src);
-
- void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
- void cvtsi2sd(XMMRegister dst, const Operand& src);
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
-
- void addsd(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, const Operand& src);
- void subsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, const Operand& src);
- void divsd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, XMMRegister src);
-
- void andpd(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, XMMRegister src);
-
- void ucomisd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, const Operand& src);
-
- enum RoundingMode {
- kRoundToNearest = 0x0,
- kRoundDown = 0x1,
- kRoundUp = 0x2,
- kRoundToZero = 0x3
- };
-
- void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
-
- void movmskpd(Register dst, XMMRegister src);
- void movmskps(Register dst, XMMRegister src);
-
- void cmpltsd(XMMRegister dst, XMMRegister src);
- void pcmpeqd(XMMRegister dst, XMMRegister src);
-
- void movaps(XMMRegister dst, XMMRegister src);
-
- void movdqa(XMMRegister dst, const Operand& src);
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqu(XMMRegister dst, const Operand& src);
- void movdqu(const Operand& dst, XMMRegister src);
-
- // Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
-
- void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
- void movd(XMMRegister dst, const Operand& src);
- void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
- void movd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, XMMRegister src);
-
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
- void movss(XMMRegister dst, XMMRegister src);
- void extractps(Register dst, XMMRegister src, byte imm8);
-
- void pand(XMMRegister dst, XMMRegister src);
- void pxor(XMMRegister dst, XMMRegister src);
- void por(XMMRegister dst, XMMRegister src);
- void ptest(XMMRegister dst, XMMRegister src);
-
- void psllq(XMMRegister reg, int8_t shift);
- void psllq(XMMRegister dst, XMMRegister src);
- void psrlq(XMMRegister reg, int8_t shift);
- void psrlq(XMMRegister dst, XMMRegister src);
- void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
- void pextrd(Register dst, XMMRegister src, int8_t offset) {
- pextrd(Operand(dst), src, offset);
- }
- void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
- void pinsrd(XMMRegister dst, Register src, int8_t offset) {
- pinsrd(dst, Operand(src), offset);
- }
- void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
-
- // Parallel XMM operations.
- void movntdqa(XMMRegister dst, const Operand& src);
- void movntdq(const Operand& dst, XMMRegister src);
- // Prefetch src position into cache level.
- // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
- // non-temporal
- void prefetch(const Operand& src, int level);
- // TODO(lrn): Need SFENCE for movnt?
-
- // Debugging
- void Print();
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* label) {
- return pc_offset() - label->pos();
- }
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable, or provide "force = true" flag to always
- // write a comment.
- void RecordComment(const char* msg, bool force = false);
-
- // Writes a single byte or word of data in the code stream. Used for
- // inline tables, e.g., jump-tables.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- // Check if there is less than kGap bytes available in the buffer.
- // If this is the case, we need to grow the buffer before emitting
- // an instruction or relocation information.
- inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
-
- // Get the number of bytes available in the buffer.
- inline int available_space() const { return reloc_info_writer.pos() - pc_; }
-
- static bool IsNop(Address addr);
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- int relocation_writer_size() {
- return (buffer_ + buffer_size_) - reloc_info_writer.pos();
- }
-
- // Avoid overflows for displacements etc.
- static const int kMaximalBufferSize = 512*MB;
-
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
- protected:
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(Register dst, XMMRegister src);
-
- byte* addr_at(int pos) { return buffer_ + pos; }
-
-
- private:
- uint32_t long_at(int pos) {
- return *reinterpret_cast<uint32_t*>(addr_at(pos));
- }
- void long_at_put(int pos, uint32_t x) {
- *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
- }
-
- // code emission
- void GrowBuffer();
- inline void emit(uint32_t x);
- inline void emit(Handle<Object> handle);
- inline void emit(uint32_t x,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
- inline void emit(const Immediate& x);
- inline void emit_w(const Immediate& x);
-
- // Emit the code-object-relative offset of the label's position
- inline void emit_code_relative_offset(Label* label);
-
- // instruction generation
- void emit_arith_b(int op1, int op2, Register dst, int imm8);
-
- // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
- // with a given destination expression and an immediate operand. It attempts
- // to use the shortest encoding possible.
- // sel specifies the /n in the modrm byte (see the Intel PRM).
- void emit_arith(int sel, Operand dst, const Immediate& x);
-
- void emit_operand(Register reg, const Operand& adr);
-
- void emit_farith(int b1, int b2, int i);
-
- // labels
- void print(Label* L);
- void bind_to(Label* L, int pos);
-
- // displacements
- inline Displacement disp_at(Label* L);
- inline void disp_at_put(Label* L, Displacement disp);
- inline void emit_disp(Label* L, Displacement::Type type);
- inline void emit_near_disp(Label* L);
-
- // record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class CodePatcher;
- friend class EnsureSpace;
-
- // code generation
- RelocInfoWriter reloc_info_writer;
-
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
-};
-
-
-// Helper class that ensures that there is enough space for generating
-// instructions and relocation information. The constructor makes
-// sure that there is enough space and (in debug mode) the destructor
-// checks that we did not generate too much.
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->overflow()) assembler_->GrowBuffer();
-#ifdef DEBUG
- space_before_ = assembler_->available_space();
-#endif
- }
-
-#ifdef DEBUG
- ~EnsureSpace() {
- int bytes_generated = space_before_ - assembler_->available_space();
- ASSERT(bytes_generated < assembler_->kGap);
- }
-#endif
-
- private:
- Assembler* assembler_;
-#ifdef DEBUG
- int space_before_;
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_ASSEMBLER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/builtins-ia32.cc b/src/3rdparty/v8/src/ia32/builtins-ia32.cc
deleted file mode 100644
index e3b2b7b..0000000
--- a/src/3rdparty/v8/src/ia32/builtins-ia32.cc
+++ /dev/null
@@ -1,1869 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments excluding receiver
- // -- edi : called function (only guaranteed when
- // extra_args requires it)
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -- ...
- // -- esp[4 * argc] : first argument (argc == eax)
- // -- esp[4 * (argc +1)] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- Register scratch = ebx;
- __ pop(scratch); // Save return address.
- __ push(edi);
- __ push(scratch); // Restore return address.
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects eax to contain the number of arguments
- // including the receiver and the extra arguments.
- __ add(eax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
-
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
-}
-
-
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
- GenerateTailCallToSharedCode(masm);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // ----------- S t a t e -------------
- // -- eax: number of arguments
- // -- edi: constructor function
- // -----------------------------------
-
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Store a smi-tagged arguments count on the stack.
- __ SmiTag(eax);
- __ push(eax);
-
- // Push the function to invoke on the stack.
- __ push(edi);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(not_equal, &rt_call);
-#endif
-
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ dec_b(FieldOperand(ecx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
-
- __ push(eax);
- __ push(edi);
-
- __ push(edi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(edi);
- __ pop(eax);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
- __ AllocateInNewSpace(
- edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- // Set extra fields in the newly allocated object.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
- __ mov(edx, factory->undefined_value());
- if (count_constructions) {
- __ movzx_b(esi,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(esi,
- Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
- // esi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(esi, edi);
- __ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
- }
- __ InitializeFieldsWithFiller(ecx, esi, edx);
- __ mov(edx, factory->one_pointer_filler_map());
- }
- __ InitializeFieldsWithFiller(ecx, edi, edx);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ or_(ebx, Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // Calculate the total number of properties described by the map.
- __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ movzx_b(ecx,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ add(edx, ecx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
- __ sub(edx, ecx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // ebx: JSObject
- // edi: start of next object (will be start of FixedArray)
- // edx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- REGISTER_VALUE_IS_INT32,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // ebx: JSObject
- // edi: FixedArray
- // edx: number of elements
- // ecx: start of next object
- __ mov(eax, factory->fixed_array_map());
- __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
- __ SmiTag(edx);
- __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
-
- // Initialize the fields to undefined.
- // ebx: JSObject
- // edi: FixedArray
- // ecx: start of next object
- { Label loop, entry;
- __ mov(edx, factory->undefined_value());
- __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(eax, 0), edx);
- __ add(eax, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(eax, ecx);
- __ j(below, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // ebx: JSObject
- // edi: FixedArray
- __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
- __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
-
-
- // Continue with JSObject being successfully allocated
- // ebx: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // ebx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(ebx);
- }
-
- // Allocate the new receiver object using the runtime call.
- __ bind(&rt_call);
- // Must restore edi (constructor) before calling runtime.
- __ mov(edi, Operand(esp, 0));
- // edi: function (constructor)
- __ push(edi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(ebx, eax); // store result in ebx
-
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(edi);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
- __ SmiUntag(eax);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
-
- // Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- if (is_api_function) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit);
-
- // Symbols are "objects".
- __ CmpInstanceType(ecx, SYMBOL_TYPE);
- __ j(equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count.
-
- // Leave construct frame.
- }
-
- // Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- __ ret(0);
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Clear the context before we push it when entering the internal frame.
- __ Set(esi, Immediate(0));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
-
- // Get the function from the frame and setup the context.
- __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ push(ecx);
- __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
-
- // Load the number of arguments and setup pointer to the arguments.
- __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
- __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
-
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Set(ecx, Immediate(0));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
- __ inc(ecx);
- __ bind(&entry);
- __ cmp(ecx, eax);
- __ j(not_equal, &loop);
-
- // Get the function from the stack and call it.
- // kPointerSize for the receiver.
- __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
-
- // Invoke the code.
- if (is_construct) {
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Exit the internal frame. Notice that this also removes the empty.
- // context and the function left on the stack by the code
- // invocation.
- }
- __ ret(kPointerSize); // Remove receiver.
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
-}
-
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // Re-execute the code that was patched back to the young age when
- // the stub returns.
- __ sub(Operand(esp, 0), Immediate(5));
- __ pushad();
- __ mov(eax, Operand(esp, 8 * kPointerSize));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- }
- __ popad();
- __ ret(0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
- __ popad();
- // Tear down internal frame.
- }
-
- __ pop(MemOperand(esp, 0)); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass deoptimization type to the runtime system.
- __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-
- // Tear down internal frame.
- }
-
- // Get the full codegen state from the stack and untag it.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ SmiUntag(ecx);
-
- // Switch on the state.
- Label not_no_registers, not_tos_eax;
- __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
- __ j(not_equal, &not_no_registers, Label::kNear);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- __ cmp(ecx, FullCodeGenerator::TOS_REG);
- __ j(not_equal, &not_tos_eax, Label::kNear);
- __ ret(2 * kPointerSize); // Remove state, eax.
-
- __ bind(&not_tos_eax);
- __ Abort("no cases left");
-}
-
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // TODO(kasperl): Do we need to save/restore the XMM registers too?
-
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ popad();
- __ ret(0);
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- Factory* factory = masm->isolate()->factory();
-
- // 1. Make sure we have at least one argument.
- { Label done;
- __ test(eax, eax);
- __ j(not_zero, &done);
- __ pop(ebx);
- __ push(Immediate(factory->undefined_value()));
- __ push(ebx);
- __ inc(eax);
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label slow, non_function;
- // 1 ~ return address.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ JumpIfSmi(edi, &non_function);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
-
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- __ Set(edx, Immediate(0)); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &shift_arguments);
-
- // Do not transform the receiver for natives (shared already in ebx).
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
-
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &convert_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_receiver);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
-
- { // In order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ push(eax);
-
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, eax);
- __ Set(edx, Immediate(0)); // restore
-
- __ pop(eax);
- __ SmiUntag(eax);
- }
-
- // Restore the function to edi.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ jmp(&patch_receiver);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ mov(Operand(esp, eax, times_4, 0), ebx);
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ Set(edx, Immediate(1)); // indicate function proxy
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &shift_arguments);
- __ bind(&non_function);
- __ Set(edx, Immediate(2)); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ mov(Operand(esp, eax, times_4, 0), edi);
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
- __ mov(ecx, eax);
- __ bind(&loop);
- __ mov(ebx, Operand(esp, ecx, times_4, 0));
- __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
- __ dec(ecx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(ebx); // Discard copy of return address.
- __ dec(eax); // One fewer argument (first argument is new receiver).
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- { Label function, non_proxy;
- __ test(edx, edx);
- __ j(zero, &function);
- __ Set(ebx, Immediate(0));
- __ cmp(edx, Immediate(1));
- __ j(not_equal, &non_proxy);
-
- __ pop(edx); // return address
- __ push(edi); // re-add proxy object as additional argument
- __ push(edx);
- __ inc(eax);
- __ SetCallKind(ecx, CALL_AS_FUNCTION);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ SmiUntag(ebx);
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ cmp(eax, ebx);
- __ j(not_equal,
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
-
- ParameterCount expected(0);
- __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- __ push(Operand(ebp, kFunctionOffset)); // push this
- __ push(Operand(ebp, kArgumentsOffset)); // push arguments
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, eax);
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(eax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ mov(ebx, Operand(ebp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- Factory* factory = masm->isolate()->factory();
-
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &call_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_receiver);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &push_receiver);
-
- __ bind(&call_to_object);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, eax);
- __ jmp(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(ebx);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ mov(ecx, Operand(ebp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(eax);
-
- // Update the index on the stack and in register eax.
- __ mov(ecx, Operand(ebp, kIndexOffset));
- __ add(ecx, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, kIndexOffset), ecx);
-
- __ bind(&entry);
- __ cmp(ecx, Operand(ebp, kLimitOffset));
- __ j(not_equal, &loop);
-
- // Invoke the function.
- Label call_proxy;
- __ mov(eax, ecx);
- ParameterCount actual(eax);
- __ SmiUntag(eax);
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- frame_scope.GenerateLeaveFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(edi); // add function proxy as last argument
- __ inc(eax);
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- // Leave internal frame.
- }
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-}
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
-
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ mov(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
- factory->fixed_array_map());
- __ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(initial_capacity)));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- __ mov(scratch3, factory->the_hole_value());
- for (int i = 0; i < initial_capacity; i++) {
- __ mov(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ mov(scratch2, Immediate(initial_capacity));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(FieldOperand(scratch1,
- scratch2,
- times_pointer_size,
- FixedArray::kHeaderSize),
- factory->the_hole_value());
- __ bind(&entry);
- __ dec(scratch2);
- __ j(not_sign, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- ASSERT(scratch.is(edi)); // rep stos destination
- ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
- ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
-
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- array_size,
- REGISTER_VALUE_IS_SMI,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
- Factory* factory = masm->isolate()->factory();
- __ mov(elements_array, factory->empty_fixed_array());
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- if (fill_with_hole) {
- __ SmiUntag(array_size);
- __ lea(edi, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ mov(eax, factory->the_hole_value());
- __ cld();
- // Do not use rep stos when filling less than kRepStosThreshold
- // words.
- const int kRepStosThreshold = 16;
- Label loop, entry, done;
- __ cmp(ecx, kRepStosThreshold);
- __ j(below, &loop); // Note: ecx > 0.
- __ rep_stos();
- __ jmp(&done);
- __ bind(&loop);
- __ stos();
- __ bind(&entry);
- __ cmp(edi, elements_array_end);
- __ j(below, &loop);
- __ bind(&done);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// edi: constructor (built-in Array function)
-// eax: argc
-// esp[0]: return address
-// esp[4]: last argument
-// This function is used for both construct and normal calls of Array. Whether
-// it is a construct call or not is indicated by the construct_call parameter.
-// The only difference between handling a construct call and a normal call is
-// that for a construct call the constructor function in edi needs to be
-// preserved for entering the generic code. In both cases argc in eax needs to
-// be preserved.
-static void ArrayNativeCode(MacroAssembler* masm,
- bool construct_call,
- Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
- empty_array, not_empty_array, finish, cant_transition_map, not_double;
-
- // Push the constructor and argc. No need to tag argc as a smi, as there will
- // be no garbage collection with this on the stack.
- int push_count = 0;
- if (construct_call) {
- push_count++;
- __ push(edi);
- }
- push_count++;
- __ push(eax);
-
- // Check for array construction with zero arguments.
- __ test(eax, eax);
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edi,
- &prepare_generic_code_call);
- __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(eax, 1);
- __ j(not_equal, &argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
- __ test(ecx, ecx);
- __ j(not_zero, &not_empty_array);
-
- // The single argument passed is zero, so we jump to the code above used to
- // handle the case of no arguments passed. To adapt the stack for that we move
- // the return address and the pushed constructor (if pushed) one stack slot up
- // thereby removing the passed argument. Argc is also on the stack - at the
- // bottom - and it needs to be changed from 1 to 0 to have the call into the
- // runtime system work in case a GC is required.
- for (int i = push_count; i > 0; i--) {
- __ mov(eax, Operand(esp, i * kPointerSize));
- __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
- }
- __ Drop(2); // Drop two stack slots.
- __ push(Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(&not_empty_array);
- __ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
- __ j(not_zero, &prepare_generic_code_call);
-
- // Handle construction of an empty array of a certain size. Get the size from
- // the stack and bail out if size is to large to actually allocate an elements
- // array.
- __ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
- __ j(greater_equal, &prepare_generic_code_call);
-
- // edx: array_size (smi)
- // edi: constructor
- // esp[0]: argc (cannot be 0 here)
- // esp[4]: constructor (only if construct_call)
- // esp[8]: return address
- // esp[C]: argument
- AllocateJSArray(masm,
- edi,
- ecx,
- ebx,
- eax,
- edx,
- edi,
- true,
- &prepare_generic_code_call);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ mov(eax, ebx);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTag(eax); // Convet argc to a smi.
- // eax: array_size (smi)
- // edi: constructor
- // esp[0] : argc
- // esp[4]: constructor (only if construct_call)
- // esp[8] : return address
- // esp[C] : last argument
- AllocateJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edx,
- edi,
- false,
- &prepare_generic_code_call);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ push(ebx);
- __ mov(ebx, Operand(esp, kPointerSize));
- // ebx: argc
- // edx: elements_array_end (untagged)
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
-
- // Location of the last argument
- int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
- __ lea(edi, Operand(esp, last_arg_offset));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArray is false, so the FixedArray is returned in ecx).
- __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- Label has_non_smi_element;
-
- // ebx: argc
- // edx: location of the first array element
- // edi: location of the last argument
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
- Label loop, entry;
- __ mov(ecx, ebx);
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(eax, &has_non_smi_element);
- }
- __ mov(Operand(edx, 0), eax);
- __ add(edx, Immediate(kPointerSize));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // ebx: argc
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
- __ bind(&finish);
- __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
- __ pop(eax);
- __ pop(ebx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size,
- last_arg_offset - kPointerSize));
- __ jmp(ecx);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(eax,
- masm->isolate()->factory()->heap_number_map(),
- &not_double,
- DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- // Throw away the array that's only been partially constructed.
- __ pop(eax);
- __ UndoAllocationInNewSpace(eax);
- __ jmp(&prepare_generic_code_call);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- __ mov(ebx, Operand(esp, 0));
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- edi,
- eax,
- &cant_transition_map);
- __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
- __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Prepare to re-enter the loop
- __ lea(edi, Operand(esp, last_arg_offset));
-
- // Finish the array initialization loop.
- Label loop2;
- __ bind(&loop2);
- __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
- __ mov(Operand(edx, 0), eax);
- __ add(edx, Immediate(kPointerSize));
- __ dec(ecx);
- __ j(greater_equal, &loop2);
- __ jmp(&finish);
-
- // Restore argc and constructor before running the generic code.
- __ bind(&prepare_generic_code_call);
- __ pop(eax);
- if (construct_call) {
- __ pop(edi);
- }
- __ jmp(call_generic_code);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for InternalArray function");
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for InternalArray function");
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic internal array code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- ebx : type info cell
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
-
- if (FLAG_optimize_constructed_arrays) {
- // We should either have undefined in ebx or a valid jsglobalpropertycell
- Label okay_here;
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
- __ cmp(ebx, Immediate(undefined_sentinel));
- __ j(equal, &okay_here);
- __ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map));
- __ Assert(equal, "Expected property cell in register ebx");
- __ bind(&okay_here);
- }
- }
-
- if (FLAG_optimize_constructed_arrays) {
- Label not_zero_case, not_one_case;
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- ArrayNoArgumentConstructorStub no_argument_stub;
- __ TailCallStub(&no_argument_stub);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
- ArraySingleArgumentConstructorStub single_argument_stub;
- __ TailCallStub(&single_argument_stub);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub n_argument_stub;
- __ TailCallStub(&n_argument_stub);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, true, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1);
-
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ Assert(equal, "Unexpected String function");
- }
-
- // Load the first argument into eax and get rid of the rest
- // (including the receiver).
- Label no_arguments;
- __ test(eax, eax);
- __ j(zero, &no_arguments);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ pop(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ push(ecx);
- __ mov(eax, ebx);
-
- // Lookup the argument in the number to string cache.
- Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- false, // Input is known to be smi?
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1);
- __ bind(&argument_is_string);
- // ----------- S t a t e -------------
- // -- ebx : argument converted to string
- // -- edi : constructor function
- // -- esp[0] : return address
- // -----------------------------------
-
- // Allocate a JSValue and put the tagged pointer into eax.
- Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- eax, // Result.
- ecx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
-
- // Set the map.
- __ LoadGlobalFunctionInitialMap(edi, ecx);
- if (FLAG_debug_code) {
- __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
- JSValue::kSize >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected string wrapper instance size");
- __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
- __ Assert(equal, "Unexpected unused properties of string wrapper");
- }
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
-
- // Set properties and elements.
- Factory* factory = masm->isolate()->factory();
- __ Set(ecx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
-
- // Set the value.
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
- // We're done. Return.
- __ ret(0);
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &convert_argument);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
- __ j(NegateCondition(is_string), &convert_argument);
- __ mov(ebx, eax);
- __ IncrementCounter(counters->string_ctor_string_value(), 1);
- __ jmp(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into ebx.
- __ bind(&convert_argument);
- __ IncrementCounter(counters->string_ctor_conversions(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edi); // Preserve the function.
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(edi);
- }
- __ mov(ebx, eax);
- __ jmp(&argument_is_string);
-
- // Load the empty string into ebx, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ Set(ebx, Immediate(factory->empty_string()));
- __ pop(ecx);
- __ lea(esp, Operand(esp, kPointerSize));
- __ push(ecx);
- __ jmp(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ebx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
- __ ret(0);
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(ebp);
- __ mov(ebp, esp);
-
- // Store the arguments adaptor context sentinel.
- __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Push the function on the stack.
- __ push(edi);
-
- // Preserve the number of arguments on the stack. Must preserve eax,
- // ebx and ecx because these registers are used when copying the
- // arguments and the receiver.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
- __ push(edi);
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack.
- __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ leave();
-
- // Remove caller arguments from the stack.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : actual number of arguments
- // -- ebx : expected number of arguments
- // -- ecx : call kind information
- // -- edx : code entry to call
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
- __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
-
- Label enough, too_few;
- __ cmp(eax, ebx);
- __ j(less, &too_few);
- __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- __ j(equal, &dont_adapt_arguments);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(eax, Operand(ebp, eax, times_4, offset));
- __ mov(edi, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ inc(edi);
- __ push(Operand(eax, 0));
- __ sub(eax, Immediate(kPointerSize));
- __ cmp(edi, ebx);
- __ j(less, &copy);
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_4, offset));
- // ebx = expected - actual.
- __ sub(ebx, eax);
- // eax = -actual - 1
- __ neg(eax);
- __ sub(eax, Immediate(1));
-
- Label copy;
- __ bind(&copy);
- __ inc(eax);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kPointerSize));
- __ test(eax, eax);
- __ j(not_zero, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ bind(&fill);
- __ inc(eax);
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, ebx);
- __ j(less, &fill);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- // Restore function pointer.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ call(edx);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ jmp(edx);
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
- __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
- return;
- }
-
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(eax, depth) instruction right after the call.
- Label stack_check;
- __ mov(ebx, Operand(esp, 0)); // return address
- if (FLAG_debug_code) {
- __ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
- __ Assert(equal, "test eax instruction not found after loop stack check");
- }
- __ movzx_b(ebx, Operand(ebx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- Label skip;
- __ cmp(eax, Immediate(Smi::FromInt(-1)));
- __ j(not_equal, &skip, Label::kNear);
- __ ret(0);
-
- // Insert a stack guard check so that if we decide not to perform
- // on-stack replacement right away, the function calling this stub can
- // still be interrupted.
- __ bind(&stack_check);
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
- }
- __ bind(&ok);
- __ ret(0);
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(eax);
- __ push(eax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-#undef __
-}
-} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc b/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
deleted file mode 100644
index 44df82a..0000000
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
+++ /dev/null
@@ -1,7936 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "isolate.h"
-#include "jsregexp.h"
-#include "regexp-macro-assembler.h"
-#include "runtime.h"
-#include "stub-cache.h"
-#include "codegen.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { eax, ebx, ecx, edx };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { edx, ecx };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { eax, ebx };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-}
-
-
-static void InitializeArrayConstructorDescriptor(Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // register state
- // edi -- constructor function
- // ebx -- type info cell with elements kind
- // eax -- number of arguments to the constructor function
- static Register registers[] = { edi, ebx };
- descriptor->register_param_count_ = 2;
- // stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
- descriptor->register_params_ = registers;
- descriptor->extra_expression_stack_count_ = 1;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
- __ ret(0);
-
- __ bind(&check_heap_number);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
- __ j(not_equal, &call_builtin, Label::kNear);
- __ ret(0);
-
- __ bind(&call_builtin);
- __ pop(ecx); // Pop return address.
- __ push(eax);
- __ push(ecx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in esi.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
-
- // Get the function info from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
-
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- Factory* factory = masm->isolate()->factory();
- __ mov(ebx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(factory->the_hole_value()));
- __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
- __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
- __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ test(ebx, ebx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(factory->undefined_value()));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
-
- // ecx holds native context, ebx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // Map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
- __ j(equal, &install_optimized);
-
- // Iterate through the rest of map backwards. edx holds an index as a Smi.
- Label loop;
- Label restore;
- __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
- __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ j(equal, &restore);
- __ sub(edx, Immediate(Smi::FromInt(
- SharedFunctionInfo::kEntryLength))); // Skip an entry.
- __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the optimized code
- // map and either unmangle them on marking or do nothing as the whole map is
- // discarded on major GC anyway.
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Now link a function into a list of optimized functions.
- __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(edx, eax);
- __ RecordWriteContextSlot(
- ecx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- edx,
- ebx,
- kDontSaveFPRegs);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&restore);
- // Restore SharedFunctionInfo into edx.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(Immediate(factory->false_value()));
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Set up the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- factory->function_context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // Set up the fixed slots.
- __ Set(ebx, Immediate(0)); // Set to NULL.
- __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
- __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
-
- // Copy the global object from the previous context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), ebx);
-
- // Copy the qml global object from the previous context.
- __ mov(ebx,
- Operand(esi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)),
- ebx);
-
-
- // Initialize the rest of the slots to undefined.
- __ mov(ebx, factory->undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
- }
-
- // Return and remove the on-stack parameter.
- __ mov(esi, eax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + (1 * kPointerSize)]: function
- // [esp + (2 * kPointerSize)]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function or sentinel from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Get the serialized scope info from the stack.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
-
- // Set up the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- factory->block_context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmp(ecx, 0);
- __ Assert(equal, message);
- }
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
- __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots.
- __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
- __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
- __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
-
- // Copy the global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- __ mov(ContextOperand(eax, Context::GLOBAL_OBJECT_INDEX), ebx);
-
- // Copy the qml global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::QML_GLOBAL_OBJECT_INDEX));
- __ mov(ContextOperand(eax, Context::QML_GLOBAL_OBJECT_INDEX), ebx);
-
- // Initialize the rest of the slots to the hole value.
- if (slots_ == 1) {
- __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
- factory->the_hole_value());
- } else {
- __ mov(ebx, factory->the_hole_value());
- for (int i = 0; i < slots_; i++) {
- __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ mov(esi, eax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- //
- // ecx: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- AllocationFlags flags = TAG_OBJECT;
- if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
- flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
- }
- __ AllocateInNewSpace(size, eax, ebx, edx, fail, flags);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ mov(FieldOperand(eax, allocation_info_start),
- Immediate(Handle<Map>(masm->isolate()->heap()->
- allocation_site_info_map())));
- __ mov(FieldOperand(eax, allocation_info_start + kPointerSize), ecx);
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ lea(edx, Operand(eax, JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ lea(edx, Operand(eax, JSArray::kSize));
- }
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
- // Copy the elements array.
- if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- } else {
- ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
- int i;
- for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- while (i < elements_size) {
- __ fld_d(FieldOperand(ecx, i));
- __ fstp_d(FieldOperand(edx, i));
- i += kDoubleSize;
- }
- ASSERT(i == elements_size);
- }
- }
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: constant elements.
- // [esp + (2 * kPointerSize)]: literal index.
- // [esp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ecx, factory->undefined_value());
- Label slow_case;
- __ j(equal, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- // ecx is boilerplate object.
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ CheckMap(ebx, factory->fixed_cow_array_map(),
- &check_fast_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&check_fast_elements);
- __ CheckMap(ebx, factory->fixed_array_map(),
- &double_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Handle<Map> expected_map;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map = factory->fixed_array_map();
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map = factory->fixed_double_array_map();
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map = factory->fixed_cow_array_map();
- }
- __ push(ecx);
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
- __ Assert(equal, message);
- __ pop(ecx);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// The stub expects its argument on the stack and returns its result in tos_:
-// zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- Factory* factory = masm->isolate()->factory();
- const Register argument = eax;
- const Register map = edx;
-
- if (!types_.IsEmpty()) {
- __ mov(argument, Operand(esp, 1 * kPointerSize));
- }
-
- // undefined -> false
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- Label not_smi;
- __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ mov(tos_, argument);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_smi);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(argument, &patch, Label::kNear);
- }
-
- if (types_.NeedsMap()) {
- __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- // Undetectable -> false.
- Label not_undetectable;
- __ j(zero, &not_undetectable, Label::kNear);
- __ Set(tos_, Immediate(0));
- __ ret(1 * kPointerSize);
- __ bind(&not_undetectable);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // spec object -> true.
- Label not_js_object;
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, &not_js_object, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_js_object);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
- __ ret(1 * kPointerSize); // the string length is OK as the return value
- __ bind(&not_string);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number, false_result;
- __ cmp(map, factory->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ fldz();
- __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, &false_result, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(tos_, Immediate(0));
- __ ret(1 * kPointerSize);
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ pushad();
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(Operand(esp, i * kDoubleSize), reg);
- }
- }
- const int argument_count = 1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, ecx);
- __ mov(Operand(esp, 0 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(reg, Operand(esp, i * kDoubleSize));
- }
- __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- }
- __ popad();
- __ ret(0);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- const Register argument = eax;
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- Label different_value;
- __ CompareRoot(argument, value);
- __ j(not_equal, &different_value, Label::kNear);
- if (!result) {
- // If we have to return zero, there is no way around clearing tos_.
- __ Set(tos_, Immediate(0));
- } else if (!tos_.is(argument)) {
- // If we have to return non-zero, we can re-use the argument if it is the
- // same register as the result, because we never see Smi-zero here.
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&different_value);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Get return address, operand is now on top of stack.
- __ push(Immediate(Smi::FromInt(tos_.code())));
- __ push(Immediate(Smi::FromInt(types_.ToByte())));
- __ push(ecx); // Push return address.
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum ArgLocation {
- ARGS_ON_STACK,
- ARGS_IN_REGISTERS
- };
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in register number. Returns operand as floating point number
- // on FPU stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register number);
-
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in eax, operand_2 in edx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch);
-
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* operand_conversion_failure);
-
- // Assumes that operands are smis or heap numbers and loads them
- // into xmm0 and xmm1. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm);
-
- // Test if operands are numbers (smi or HeapNumber objects), and load
- // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
- // either operand is not a number. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-
- // Checks that the two floating point numbers loaded into xmm0 and xmm1
- // have int32 values.
- static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch);
-
- // Checks that |operand| has an int32 value. If |int32_result| is different
- // from |scratch|, it will contain that int32 value.
- static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch);
-};
-
-
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-static void IntegerConvert(MacroAssembler* masm,
- Register source,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, ecx);
- // If the exponent is above 83, the number contains no significant
- // bits in the range 0..2^31, so the result is zero.
- static const uint32_t kResultIsZeroExponent = 83;
- __ cmp(scratch2, Immediate(kResultIsZeroExponent));
- __ j(above, &done);
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent = 63;
- __ cmp(scratch2, Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent = 30;
- __ cmp(scratch2, Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent, Label::kNear);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent, Label::kNear);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent = 31;
- __ cmp(scratch2, Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, scratch2);
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, scratch);
- __ j(positive, &done, Label::kNear);
- __ neg(ecx);
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent in scratch2. Zero in ecx.
- // We know that 0 <= exponent < 30.
- __ mov(ecx, Immediate(30));
- __ sub(ecx, scratch2);
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, scratch);
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(ecx, ecx);
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative, Label::kNear);
- __ mov(ecx, scratch2);
- __ jmp(&done, Label::kNear);
- __ bind(&negative);
- __ sub(ecx, scratch2);
- }
- __ bind(&done);
-}
-
-
-// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
-// |conversion_failure| if the heap number did not contain an int32 value.
-// Result is in ecx. Trashes ebx, xmm0, and xmm1.
-static void ConvertHeapNumberToInt32(MacroAssembler* masm,
- Register source,
- Label* conversion_failure) {
- __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, conversion_failure, xmm0, ecx, ebx, xmm1);
-}
-
-
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
-
- __ push(eax); // the operand
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(mode_)));
- __ push(Immediate(Smi::FromInt(operand_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
- Label::kNear, Label::kNear, Label::kNear);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near,
- Label::Distance undo_near,
- Label::Distance slow_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // We can't handle -0 with smis, so use a type transition for that case.
- __ test(eax, eax);
- __ j(zero, slow, slow_near);
-
- // Try optimistic subtraction '0 - value', saving operand in eax for undo.
- __ mov(edx, eax);
- __ Set(eax, Immediate(0));
- __ sub(eax, edx);
- __ j(overflow, undo, undo_near);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(
- MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // Flip bits and revert inverted smi-tag.
- __ not_(eax);
- __ and_(eax, ~kSmiTagMask);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
- __ mov(eax, edx);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(
- MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
-
- if (mode_ == UNARY_OVERWRITE) {
- __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
- Immediate(HeapNumber::kSignMask)); // Flip sign.
- } else {
- __ mov(edx, eax);
- // edx: operand
-
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated, Label::kNear);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(edx);
- }
-
- __ bind(&heapnumber_allocated);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, Label::kNear);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ ret(0);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ mov(ebx, eax);
- __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the original HeapNumber on the stack. The integer value can't
- // be stored since it's untagged and not in the smi range (so we can't
- // smi-tag it). We'll recalculate the value after the GC instead.
- __ push(ebx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- // New HeapNumber is in eax.
- __ pop(edx);
- }
- // IntegerConvert uses ebx and edi as scratch registers.
- // This conversion won't go slow-case.
- IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
- __ not_(ecx);
-
- __ bind(&heapnumber_allocated);
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ecx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the corresponding JavaScript builtin.
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- __ push(edx);
- __ push(eax);
- // Left and right arguments are now on top.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- // Left and right arguments are already on top of the stack.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op == Token::DIV || op == Token::MOD) {
- left = eax;
- right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
-
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, left);
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left);
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ JumpIfNotSmi(combined, &not_smis);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, left); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, left); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, left); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis);
- break;
-
- case Token::SUB:
- __ sub(left, right);
- __ j(overflow, &use_fp_on_smis);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, left); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &use_fp_on_smis);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, edx);
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &not_smis);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- __ ret(0);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- __ ret(2 * kPointerSize);
- break;
- default:
- UNREACHABLE();
- }
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
- __ bind(&use_fp_on_smis);
- switch (op) {
- // Undo the effects of some operations, and some register moves.
- case Token::SHL:
- // The arguments are saved on the stack, and only used from there.
- break;
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division. They should be in eax, ebx for jump to not_smi.
- __ mov(eax, edi);
- break;
- default:
- // No other operators jump to use_fp_on_smis.
- break;
- }
- __ jmp(&not_smis);
- } else {
- ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
- switch (op) {
- case Token::SHL:
- case Token::SHR: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- // It's OK to overwrite the arguments on the stack because we
- // are about to return.
- if (op == Token::SHR) {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
- __ fild_d(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- } else {
- ASSERT_EQ(Token::SHL, op);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, left);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- }
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- __ ret(0);
- break;
- }
-
- default:
- break;
- }
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-// Input:
-// edx: left operand (tagged)
-// eax: right operand (tagged)
-// Output:
-// eax: result (tagged)
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- Label not_floats;
- Label not_int32;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_int32);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_int32);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
- if (op_ == Token::MOD) {
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ecx, ecx, xmm2);
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- }
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- if (op_ == Token::MOD) {
- // The operands are now on the FPU stack, but we don't need them.
- __ fstp(0);
- __ fstp(0);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label not_int32;
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR hits a hard case, use the runtime system to
- // get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- break;
- case Token::MOD:
- return; // Handled above.
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- break;
- default:
- UNREACHABLE();
- }
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- Factory* factory = masm->isolate()->factory();
-
- // Convert odd ball arguments to numbers.
- Label check, done;
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(edx, edx);
- } else {
- __ mov(edx, Immediate(factory->nan_value()));
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(eax, eax);
- } else {
- __ mov(eax, Immediate(factory->nan_value()));
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime;
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_floats);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_floats);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- if (left_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm0, ecx, ecx, xmm2);
- }
- if (right_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm1, ecx, ecx, xmm2);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
-
- __ bind(&not_floats);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label non_smi_result;
- // We do not check the input arguments here, as any value is
- // unconditionally truncated to an int32 anyway. To get the
- // right optimized code, int32 type feedback is just right.
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- GenerateRegisterArgsPush(masm);
- break;
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- break;
- default:
- UNREACHABLE();
- }
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
- BinaryOpIC::GENERIC,
- BinaryOpIC::GENERIC,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop the arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- GenerateAddStrings(masm);
- // Fall through.
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- break;
- default:
- UNREACHABLE();
- }
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string, Label::kNear);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, ebx);
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, edx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // esp[4]: tagged number input argument (should be number).
- // esp[0]: return address.
- // Output:
- // eax: tagged double result.
- // UNTAGGED case:
- // Input::
- // esp[0]: return address.
- // xmm1: untagged double input argument
- // Output:
- // xmm1: untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- // Test that eax is a number.
- Label input_not_smi;
- Label loaded;
- __ mov(eax, Operand(esp, kPointerSize));
- __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the low and high words of the double into ebx, edx.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sar(eax, 1);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ mov(Operand(esp, 0), eax);
- __ fild_s(Operand(esp, 0));
- __ fst_d(Operand(esp, 0));
- __ pop(edx);
- __ pop(ebx);
- __ jmp(&loaded, Label::kNear);
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // low and high words into ebx, edx.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope sse4_scope(SSE4_1);
- __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
- } else {
- __ pshufd(xmm0, xmm1, 0x1);
- __ movd(edx, xmm0);
- }
- __ movd(ebx, xmm1);
- }
-
- // ST[0] or xmm1 == double value
- // ebx = low 32 bits of double value
- // edx = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ mov(ecx, ebx);
- __ xor_(ecx, edx);
- __ mov(eax, ecx);
- __ sar(eax, 16);
- __ xor_(ecx, eax);
- __ mov(eax, ecx);
- __ sar(eax, 8);
- __ xor_(ecx, eax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ and_(ecx,
- Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] or xmm1 == double value.
- // ebx = low 32 bits of double value.
- // edx = high 32 bits of double value.
- // ecx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ mov(eax, Immediate(cache_array));
- int cache_array_index =
- type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
- __ mov(eax, Operand(eax, cache_array_index));
- // Eax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, eax);
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
- // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
- __ lea(ecx, Operand(ecx, ecx, times_2, 0));
- __ lea(ecx, Operand(eax, ecx, times_4, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmp(ebx, Operand(ecx, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- __ cmp(edx, Operand(ecx, kIntSize));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Cache hit!
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->transcendental_cache_hit(), 1);
- __ mov(eax, Operand(ecx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0);
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- __ IncrementCounter(counters->transcendental_cache_miss(), 1);
- // Update cache with new value.
- // We are short on registers, so use no_reg as scratch.
- // This gives slightly larger code.
- if (tagged) {
- __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- }
- GenerateOperation(masm, type_);
- __ mov(Operand(ecx, 0), ebx);
- __ mov(Operand(ecx, kIntSize), edx);
- __ mov(Operand(ecx, 2 * kIntSize), eax);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- GenerateOperation(masm, type_);
- __ fstp_d(Operand(esp, 0));
- __ movdbl(xmm1, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- ExternalReference runtime =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime, 1, 1);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(eax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(
- MacroAssembler* masm, TranscendentalCache::Type type) {
- // Only free register is edi.
- // Input value is on FP stack, and also in ebx/edx.
- // Input value is possibly in xmm1.
- // Address of result (a newly allocated HeapNumber) may be in eax.
- if (type == TranscendentalCache::SIN ||
- type == TranscendentalCache::COS ||
- type == TranscendentalCache::TAN) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range, done;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ mov(edi, edx);
- __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
- int supported_exponent_limit =
- (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(edi, Immediate(supported_exponent_limit));
- __ j(below, &in_range, Label::kNear);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmp(edi, Immediate(0x7ff00000));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, Label::kNear);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ push(Immediate(0x7ff80000));
- __ push(Immediate(0));
- __ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(2 * kPointerSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ mov(edi, eax); // Save eax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(eax, Immediate(5));
- __ j(zero, &no_exceptions, Label::kNear);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ test(eax, Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- __ fstp(0);
- __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
-
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- case TranscendentalCache::TAN:
- // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
- // FP register stack.
- __ fptan();
- __ fstp(0); // Pop FP register stack.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-// Warning: can clobber inputs even when it jumps to |conversion_failure|!
-void FloatingPointHelper::LoadUnknownsAsIntegers(
- MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, conversion_failure);
- } else {
- __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
- }
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- Factory* factory = masm->isolate()->factory();
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- // Get the untagged integer version of the edx heap number in ecx.
- if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- ConvertHeapNumberToInt32(masm, edx, conversion_failure);
- } else {
- IntegerConvert(masm, edx, use_sse3, conversion_failure);
- }
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, conversion_failure);
- } else {
- __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
- }
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
-
- if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- ConvertHeapNumberToInt32(masm, eax, conversion_failure);
- } else {
- IntegerConvert(masm, eax, use_sse3, conversion_failure);
- }
-
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- Label load_smi, done;
-
- __ JumpIfSmi(number, &load_smi, Label::kNear);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi);
- __ SmiUntag(number);
- __ push(number);
- __ fild_s(Operand(esp, 0));
- __ pop(number);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- Label load_smi_edx, load_eax, load_smi_eax, done;
- // Load operand in edx into xmm0.
- __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- __ bind(&load_eax);
- // Load operand in eax into xmm1.
- __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
-
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
- // Load operand in edx into xmm0, or branch to not_numbers.
- __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
- Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
- __ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ bind(&load_eax);
- // Load operand in eax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
- __ j(equal, &load_float_eax, Label::kNear);
- __ jmp(not_numbers); // Argument in eax is not a number.
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
- __ jmp(&done, Label::kNear);
- __ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, scratch);
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, scratch);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch) {
- CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, scratch, xmm2);
- CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, scratch, xmm2);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch) {
- __ cvttsd2si(int32_result, Operand(operand));
- __ cvtsi2sd(xmm_scratch, int32_result);
- __ pcmpeqd(xmm_scratch, operand);
- __ movmskps(scratch, xmm_scratch);
- // Two least significant bits should be both set.
- __ not_(scratch);
- __ test(scratch, Immediate(3));
- __ j(not_zero, non_int32);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch) {
- Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
- __ JumpIfSmi(edx, &test_other, Label::kNear);
- __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in edx is not a number -> NaN
-
- __ bind(&test_other);
- __ JumpIfSmi(eax, &done, Label::kNear);
- __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in eax is not a number -> NaN
-
- // Fall-through: Both operands are numbers.
- __ bind(&done);
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope use_sse2(SSE2);
- Factory* factory = masm->isolate()->factory();
- const Register exponent = eax;
- const Register base = edx;
- const Register scratch = ecx;
- const XMMRegister double_result = xmm3;
- const XMMRegister double_base = xmm2;
- const XMMRegister double_exponent = xmm1;
- const XMMRegister double_scratch = xmm4;
-
- Label call_runtime, done, exponent_not_smi, int_exponent;
-
- // Save 1 in double_result - we need this several times later on.
- __ mov(scratch, Immediate(1));
- __ cvtsi2sd(double_result, scratch);
-
- if (exponent_type_ == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack.
- __ mov(base, Operand(esp, 2 * kPointerSize));
- __ mov(exponent, Operand(esp, 1 * kPointerSize));
-
- __ JumpIfSmi(base, &base_is_smi, Label::kNear);
- __ cmp(FieldOperand(base, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
-
- __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent, Label::kNear);
-
- __ bind(&base_is_smi);
- __ SmiUntag(base);
- __ cvtsi2sd(double_base, base);
-
- __ bind(&unpack_exponent);
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(double_exponent,
- FieldOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ movdbl(double_exponent,
- FieldOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type_ != INTEGER) {
- Label fast_power;
- // Detect integer exponents stored as double.
- __ cvttsd2si(exponent, Operand(double_exponent));
- // Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmp(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
- __ cvtsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
-
- if (exponent_type_ == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label continue_sqrt, continue_rsqrt, not_plus_half;
- // Test for 0.5.
- // Load double_scratch with 0.5.
- __ mov(scratch, Immediate(0x3F000000u));
- __ movd(double_scratch, scratch);
- __ cvtss2sd(double_scratch, double_scratch);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &not_plus_half, Label::kNear);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- // According to IEEE-754, single-precision -Infinity has the highest
- // 9 bits set and the lowest 23 bits cleared.
- __ mov(scratch, 0xFF800000u);
- __ movd(double_scratch, scratch);
- __ cvtss2sd(double_scratch, double_scratch);
- __ ucomisd(double_base, double_scratch);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_sqrt, Label::kNear);
- __ j(carry, &continue_sqrt, Label::kNear);
-
- // Set result to Infinity in the special case.
- __ xorps(double_result, double_result);
- __ subsd(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&continue_sqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_scratch, double_scratch);
- __ addsd(double_scratch, double_base); // Convert -0 to +0.
- __ sqrtsd(double_result, double_scratch);
- __ jmp(&done);
-
- // Test for -0.5.
- __ bind(&not_plus_half);
- // Load double_exponent with -0.5 by substracting 1.
- __ subsd(double_scratch, double_result);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &fast_power, Label::kNear);
-
- // Calculates reciprocal of square root of base. Check for the special
- // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- // According to IEEE-754, single-precision -Infinity has the highest
- // 9 bits set and the lowest 23 bits cleared.
- __ mov(scratch, 0xFF800000u);
- __ movd(double_scratch, scratch);
- __ cvtss2sd(double_scratch, double_scratch);
- __ ucomisd(double_base, double_scratch);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_rsqrt, Label::kNear);
- __ j(carry, &continue_rsqrt, Label::kNear);
-
- // Set result to 0 in the special case.
- __ xorps(double_result, double_result);
- __ jmp(&done);
-
- __ bind(&continue_rsqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_exponent, double_exponent);
- __ addsd(double_exponent, double_base); // Convert -0 to +0.
- __ sqrtsd(double_exponent, double_exponent);
- __ divsd(double_result, double_exponent);
- __ jmp(&done);
- }
-
- // Using FPU instructions to calculate power.
- Label fast_power_failed;
- __ bind(&fast_power);
- __ fnclex(); // Clear flags to catch exceptions later.
- // Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), double_exponent);
- __ fld_d(Operand(esp, 0)); // E
- __ movdbl(Operand(esp, 0), double_base);
- __ fld_d(Operand(esp, 0)); // B, E
-
- // Exponent is in st(1) and base is in st(0)
- // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
- // FYL2X calculates st(1) * log2(st(0))
- __ fyl2x(); // X
- __ fld(0); // X, X
- __ frndint(); // rnd(X), X
- __ fsub(1); // rnd(X), X-rnd(X)
- __ fxch(1); // X - rnd(X), rnd(X)
- // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
- __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
- __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
- // FSCALE calculates st(0) * 2^st(1)
- __ fscale(); // 2^X, rnd(X)
- __ fstp(1); // 2^X
- // Bail out to runtime in case of exceptions in the status word.
- __ fnstsw_ax();
- __ test_b(eax, 0x5F); // We check for all but precision exception.
- __ j(not_zero, &fast_power_failed, Label::kNear);
- __ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- __ jmp(&done);
-
- __ bind(&fast_power_failed);
- __ fninit();
- __ add(esp, Immediate(kDoubleSize));
- __ jmp(&call_runtime);
- }
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
- const XMMRegister double_scratch2 = double_exponent;
- __ mov(scratch, exponent); // Back up exponent.
- __ movsd(double_scratch, double_base); // Back up base.
- __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
-
- // Get absolute value of exponent.
- Label no_neg, while_true, while_false;
- __ test(scratch, scratch);
- __ j(positive, &no_neg, Label::kNear);
- __ neg(scratch);
- __ bind(&no_neg);
-
- __ j(zero, &while_false, Label::kNear);
- __ shr(scratch, 1);
- // Above condition means CF==0 && ZF==0. This means that the
- // bit that has been shifted out is 0 and the result is not 0.
- __ j(above, &while_true, Label::kNear);
- __ movsd(double_result, double_scratch);
- __ j(zero, &while_false, Label::kNear);
-
- __ bind(&while_true);
- __ shr(scratch, 1);
- __ mulsd(double_scratch, double_scratch);
- __ j(above, &while_true, Label::kNear);
- __ mulsd(double_result, double_scratch);
- __ j(not_zero, &while_true);
-
- __ bind(&while_false);
- // scratch has the original value of the exponent - if the exponent is
- // negative, return 1/result.
- __ test(exponent, exponent);
- __ j(positive, &done);
- __ divsd(double_scratch2, double_result);
- __ movsd(double_result, double_scratch2);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ xorps(double_scratch2, double_scratch2);
- __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
- // double_exponent aliased as double_scratch2 has already been overwritten
- // and may not have contained the exponent value in the first place when the
- // exponent is a smi. We reset it with exponent value before bailing out.
- __ j(not_equal, &done);
- __ cvtsi2sd(double_exponent, exponent);
-
- // Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
- __ IncrementCounter(counters->math_pow(), 1);
- __ ret(2 * kPointerSize);
- } else {
- __ bind(&call_runtime);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(4, scratch);
- __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
- __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 4);
- }
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
-
- __ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1);
- __ ret(0);
- }
-}
-
-
-void ArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->prototype_string()));
- __ j(not_equal, &miss);
- }
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StringLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
- support_wrapper_);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = edx;
- Register value = eax;
- Register scratch = ebx;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::GenerateStoreMiss(masm, kind());
-}
-
-
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, eax, reg_, inobject_, index_);
- __ ret(0);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in edx and the parameter count is in eax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(edx, &slow, Label::kNear);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor, Label::kNear);
-
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(edx, eax);
- __ j(above_equal, &slow, Label::kNear);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebp, eax, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, ecx);
- __ j(above_equal, &slow, Label::kNear);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebx, ecx, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(ebx); // Return address.
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[12] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &runtime, Label::kNear);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters (tagged)
- // esp[8] : receiver displacement
- // esp[12] : function
-
- // ebx = parameter count (tagged)
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- // TODO(rossberg): Factor out some of the bits that are shared with the other
- // Generate* functions.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
-
- // No adaptor, parameter count = argument count.
- __ mov(ecx, ebx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- // ebx = parameter count (tagged)
- // ecx = argument count (tagged)
- // esp[4] = parameter count (tagged)
- // esp[8] = address of receiver argument
- // Compute the mapped parameter count = min(ebx, ecx) in ebx.
- __ cmp(ebx, ecx);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ mov(ebx, ecx);
-
- __ bind(&try_allocate);
-
- // Save mapped parameter count.
- __ push(ebx);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
-
- // eax = address of new object(s) (tagged)
- // ecx = argument count (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
- // Get the arguments boilerplate from the current native context into edi.
- Label has_mapped_parameters, copy;
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
- __ mov(ebx, Operand(esp, 0 * kPointerSize));
- __ test(ebx, ebx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
- __ mov(edi, Operand(edi,
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
- __ jmp(&copy, Label::kNear);
-
- __ bind(&has_mapped_parameters);
- __ mov(edi, Operand(edi,
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
- __ bind(&copy);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (tagged)
- // edi = address of boilerplate object (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ mov(edx, FieldOperand(edi, i));
- __ mov(FieldOperand(eax, i), edx);
- }
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- edx);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- ecx);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, edi will point there, otherwise to the
- // backing store.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (tagged)
- // edi = address of parameter map or backing store (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
- // Free a register.
- __ push(eax);
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &skip_parameter_map);
-
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->non_strict_arguments_elements_map()));
- __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
- __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ push(ecx);
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ add(ebx, Operand(esp, 4 * kPointerSize));
- __ sub(ebx, eax);
- __ mov(ecx, FACTORY->the_hole_value());
- __ mov(edx, edi);
- __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
- // eax = loop variable (tagged)
- // ebx = mapping index (tagged)
- // ecx = the hole value
- // edx = address of parameter map (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = argument count (tagged)
- // esp[4] = address of new object (tagged)
- // esp[8] = mapped parameter count (tagged)
- // esp[16] = parameter count (tagged)
- // esp[20] = address of receiver argument
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ sub(eax, Immediate(Smi::FromInt(1)));
- __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
- __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
- __ add(ebx, Immediate(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ test(eax, eax);
- __ j(not_zero, &parameters_loop, Label::kNear);
- __ pop(ecx);
-
- __ bind(&skip_parameter_map);
-
- // ecx = argument count (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = address of new object (tagged)
- // esp[4] = mapped parameter count (tagged)
- // esp[12] = parameter count (tagged)
- // esp[16] = address of receiver argument
- // Copy arguments header and remaining slots (if there are any).
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
- Label arguments_loop, arguments_test;
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
- __ sub(edx, ebx);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ sub(edx, Immediate(kPointerSize));
- __ mov(eax, Operand(edx, 0));
- __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
- __ add(ebx, Immediate(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(ebx, ecx);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Restore.
- __ pop(eax); // Address of arguments object.
- __ pop(ebx); // Parameter count.
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ pop(eax); // Remove saved parameter count.
- __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[12] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
-
- // Get the length from the frame.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ jmp(&try_allocate, Label::kNear);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ test(ecx, ecx);
- __ j(zero, &add_arguments_object, Label::kNear);
- __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current native context.
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
- const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
- __ mov(edi, Operand(edi, offset));
-
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(edi, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- ecx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ test(ecx, ecx);
- __ j(zero, &done, Label::kNear);
-
- // Get the parameters pointer from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
-
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
- // Untag the length for the loop below.
- __ SmiUntag(ecx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(edi, Immediate(kPointerSize));
- __ sub(edx, Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: last_match_info (expected JSArray)
- // esp[8]: previous index
- // esp[12]: subject string
- // esp[16]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime;
- Factory* factory = masm->isolate()->factory();
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(
- masm->isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
- __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, ebx);
- __ j(zero, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
- __ j(not_equal, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // ecx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
- __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ j(not_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // Or number_of_captures * 2 <= offsets vector size - 2
- // Multiplying by 2 comes for free since edx is smi-tagged.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
- __ j(above, &runtime);
-
- // Reset offset for possibly sliced string.
- __ Set(edi, Immediate(0));
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ JumpIfSmi(eax, &runtime);
- __ mov(edx, eax); // Make a copy of the original subject string.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // eax: subject string
- // edx: subject string
- // ebx: subject string instance type
- // ecx: RegExp data (FixedArray)
- // Handle subject string according to its encoding and representation:
- // (1) Sequential two byte? If yes, go to (9).
- // (2) Sequential one byte? If yes, go to (6).
- // (3) Anything but sequential or cons? If yes, go to (7).
- // (4) Cons string. If the string is flat, replace subject with first string.
- // Otherwise bailout.
- // (5a) Is subject sequential two byte? If yes, go to (9).
- // (5b) Is subject external? If yes, go to (8).
- // (6) One byte sequential. Load regexp code for one byte.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (7) Not a long external string? If yes, go to (10).
- // (8) External string. Make it, offset-wise, look like a sequential string.
- // (8a) Is the external string one byte? If yes, go to (6).
- // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
- // (10) Short external string or not a string? If yes, bail out to runtime.
- // (11) Sliced string. Replace subject with parent. Go to (5a).
-
- Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
- external_string /* 8 */, check_underlying /* 5a */,
- not_seq_nor_cons /* 7 */, check_code /* E */,
- not_long_external /* 10 */;
-
- // (1) Sequential two byte? If yes, go to (9).
- __ and_(ebx, kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask |
- kShortExternalStringMask);
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string); // Go to (9).
-
- // (2) Sequential one byte? If yes, go to (6).
- // Any other sequential string must be one byte.
- __ and_(ebx, Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask));
- __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
-
- // (3) Anything but sequential or cons? If yes, go to (7).
- // We check whether the subject string is a cons, since sequential strings
- // have already been covered.
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmp(ebx, Immediate(kExternalStringTag));
- __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
-
- // (4) Cons string. Check that it's flat.
- // Replace subject with first string and reload instance type.
- __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ bind(&check_underlying);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // (5a) Is subject sequential two byte? If yes, go to (9).
- __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string); // Go to (9).
- // (5b) Is subject external? If yes, go to (8).
- __ test_b(ebx, kStringRepresentationMask);
- // The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
- __ j(not_zero, &external_string); // Go to (8).
-
- // eax: sequential subject string (or look-alike, external string)
- // edx: original subject string
- // ecx: RegExp data (FixedArray)
- // (6) One byte sequential. Load regexp code for one byte.
- __ bind(&seq_one_byte_string);
- // Load previous index and check range before edx is overwritten. We have
- // to use edx instead of eax here because it might have been only made to
- // look like a sequential string when it actually is an external string.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ JumpIfNotSmi(ebx, &runtime);
- __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
- __ j(above_equal, &runtime);
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is one byte.
-
- // (E) Carry on. String handling is done.
- __ bind(&check_code);
- // edx: irregexp code
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // a smi (code flushing support).
- __ JumpIfSmi(edx, &runtime);
-
- // eax: subject string
- // ebx: previous index (smi)
- // edx: code
- // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
- // All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->regexp_entry_native(), 1);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 9;
- __ EnterApiExitFrame(kRegExpExecuteArguments);
-
- // Argument 9: Pass current isolate address.
- __ mov(Operand(esp, 8 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
- __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 6 * kPointerSize), esi);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
-
- // Argument 5: static offsets vector buffer.
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(ExternalReference::address_of_static_offsets_vector(
- masm->isolate())));
-
- // Argument 2: Previous index.
- __ SmiUntag(ebx);
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
-
- // Argument 1: Original subject string.
- // The original subject is in the previous stack frame. Therefore we have to
- // use ebp, which points exactly to one pointer size below the previous esp.
- // (Because creating a new stack frame pushes the previous ebp onto the stack
- // and thereby moves up esp by one kPointerSize.)
- __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), esi);
-
- // esi: original subject string
- // eax: underlying subject string
- // ebx: previous index
- // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
- // edx: code
- // Argument 4: End of string data
- // Argument 3: Start of string data
- // Prepare start and end index of the input.
- // Load the length from the original sliced string if that is the case.
- __ mov(esi, FieldOperand(esi, String::kLengthOffset));
- __ add(esi, edi); // Calculate input end wrt offset.
- __ SmiUntag(edi);
- __ add(ebx, edi); // Calculate input start wrt offset.
-
- // ebx: start index of the input string
- // esi: end index of the input string
- Label setup_two_byte, setup_rest;
- __ test(ecx, ecx);
- __ j(zero, &setup_two_byte, Label::kNear);
- __ SmiUntag(esi);
- __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
- __ jmp(&setup_rest, Label::kNear);
-
- __ bind(&setup_two_byte);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2).
- __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
-
- __ bind(&setup_rest);
-
- // Locate the code entry and call it.
- __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(edx);
-
- // Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame();
-
- // Check the result.
- Label success;
- __ cmp(eax, 1);
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
- __ j(equal, &success);
- Label failure;
- __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
- __ j(equal, &failure);
- __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- masm->isolate());
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
- __ mov(eax, Operand::StaticVariable(pending_exception));
- __ cmp(edx, eax);
- __ j(equal, &runtime);
- // For exception, throw the exception again.
-
- // Clear the pending exception variable.
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, factory->termination_exception());
- Label throw_termination_exception;
- __ j(equal, &throw_termination_exception, Label::kNear);
-
- // Handle normal exception by following handler chain.
- __ Throw(eax);
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(eax);
-
- __ bind(&failure);
- // For failure to match, return null.
- __ mov(eax, factory->null_value());
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(edx, Immediate(2)); // edx was a smi.
-
- // edx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- // Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
- __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(eax, factory->fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiUntag(eax);
- __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, eax);
- __ j(greater, &runtime);
-
- // ebx: last_match_info backing store (FixedArray)
- // edx: number of capture registers
- // Store the capture count.
- __ SmiTag(edx); // Number of capture registers to smi.
- __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
- __ SmiUntag(edx); // Number of capture registers back from smi.
- // Store last subject and last input.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(ecx, eax);
- __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastSubjectOffset,
- eax,
- edi,
- kDontSaveFPRegs);
- __ mov(eax, ecx);
- __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastInputOffset,
- eax,
- edi,
- kDontSaveFPRegs);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(masm->isolate());
- __ mov(ecx, Immediate(address_of_static_offsets_vector));
-
- // ebx: last_match_info backing store (FixedArray)
- // ecx: offsets vector
- // edx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ sub(edx, Immediate(1));
- __ j(negative, &done, Label::kNear);
- // Read the value from the static offsets vector buffer.
- __ mov(edi, Operand(ecx, edx, times_int_size, 0));
- __ SmiTag(edi);
- // Store the smi value in the last match info.
- __ mov(FieldOperand(ebx,
- edx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- edi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-
- // Deferred code for string handling.
- // (7) Not a long external string? If yes, go to (10).
- __ bind(&not_seq_nor_cons);
- // Compare flags are still set from (3).
- __ j(greater, &not_long_external, Label::kNear); // Go to (10).
-
- // (8) External string. Short external strings have been ruled out.
- __ bind(&external_string);
- // Reload instance type.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ test_b(ebx, kIsIndirectStringMask);
- __ Assert(zero, "external string expected, but not found");
- }
- __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // (8a) Is the external string one byte? If yes, go to (6).
- __ test_b(ebx, kStringEncodingMask);
- __ j(not_zero, &seq_one_byte_string); // Goto (6).
-
- // eax: sequential subject string (or look-alike, external string)
- // edx: original subject string
- // ecx: RegExp data (FixedArray)
- // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
- __ bind(&seq_two_byte_string);
- // Load previous index and check range before edx is overwritten. We have
- // to use edx instead of eax here because it might have been only made to
- // look like a sequential string when it actually is an external string.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ JumpIfNotSmi(ebx, &runtime);
- __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
- __ j(above_equal, &runtime);
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(ecx, Immediate(0)); // Type is two byte.
- __ jmp(&check_code); // Go to (E).
-
- // (10) Not a string or a short external string? If yes, bail out to runtime.
- __ bind(&not_long_external);
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
- __ j(not_zero, &runtime);
-
- // (11) Sliced string. Replace subject with parent. Go to (5a).
- // Load offset into edi and replace subject string with parent.
- __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
- __ jmp(&check_underlying); // Go to (5a).
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ mov(ebx, Operand(esp, kPointerSize * 3));
- __ JumpIfNotSmi(ebx, &slowcase);
- __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- ebx, // In: Number of elements as a smi
- REGISTER_VALUE_IS_SMI,
- eax, // Out: Start of allocation (tagged).
- ecx, // Out: End of allocation.
- edx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // eax: Start of allocated area, object-tagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ mov(edx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, Immediate(factory->empty_fixed_array()));
- __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
- __ mov(edx, FieldOperand(edx, GlobalObject::kNativeContextOffset));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
-
- // Set input, index and length fields from arguments.
- __ mov(ecx, Operand(esp, kPointerSize * 1));
- __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 2));
- __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 3));
- __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
-
- // Fill out the elements FixedArray.
- // eax: JSArray.
- // ebx: FixedArray.
- // ecx: Number of elements in array, as smi.
-
- // Set map.
- __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory->fixed_array_map()));
- // Set length.
- __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
- // Fill contents of fixed-array with undefined.
- __ SmiUntag(ecx);
- __ mov(edx, Immediate(factory->undefined_value()));
- __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
- // Fill fixed array elements with undefined.
- // eax: JSArray.
- // ecx: Number of elements to fill.
- // ebx: Start of elements in FixedArray.
- // edx: undefined.
- Label loop;
- __ test(ecx, ecx);
- __ bind(&loop);
- __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
- __ sub(ecx, Immediate(1));
- __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
- __ mov(number_string_cache,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- if (object_is_smi) {
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- } else {
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, &not_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
- }
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-static void CheckInputType(MacroAssembler* masm,
- Register input,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ cmp(FieldOperand(input, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(not_equal, fail);
- }
- // We could be strict about internalized/non-internalized here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-static void BranchIfNotInternalizedString(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsInternalizedMask | kIsNotStringMask);
- __ cmp(scratch, kInternalizedTag | kStringTag);
- __ j(not_equal, label);
-}
-
-
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Label check_unequal_objects;
- Condition cc = GetCondition();
-
- Label miss;
- CheckInputType(masm, edx, left_, &miss);
- CheckInputType(masm, eax, right_, &miss);
-
- // Compare two smis.
- Label non_smi, smi_done;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, eax); // Return on the result of the subtraction.
- __ j(no_overflow, &smi_done, Label::kNear);
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
- __ bind(&smi_done);
- __ mov(eax, edx);
- __ ret(0);
- __ bind(&non_smi);
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- {
- Label not_user_equal, user_equal;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &not_user_equal);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &not_user_equal);
-
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
- __ j(not_equal, &not_user_equal);
-
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &not_user_equal);
-
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &user_equal);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &user_equal);
-
- __ jmp(&not_user_equal);
-
- __ bind(&user_equal);
-
- __ pop(ebx); // Return address.
- __ push(eax);
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- __ bind(&not_user_equal);
- }
-
- // Identical objects can be compared fast, but there are some tricky cases
- // for NaN and undefined.
- {
- Label not_identical;
- __ cmp(eax, edx);
- __ j(not_equal, &not_identical);
-
- if (cc != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ cmp(edx, masm->isolate()->factory()->undefined_value());
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(equal, &heap_number, Label::kNear);
- if (cc != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
- }
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ Set(eax, Immediate(0));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, edx);
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
- }
-
- __ bind(&not_identical);
- }
-
- // Strict equality can quickly decide whether objects are equal.
- // Non-strict object equality is slower, so it is handled later in the stub.
- if (cc == equal && strict()) {
- Label slow; // Fallthrough label.
- Label not_smis;
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, eax);
- __ test(ecx, edx);
- __ j(not_zero, &not_smis, Label::kNear);
- // One operand is a smi.
-
- // Check whether the non-smi is a heap number.
- STATIC_ASSERT(kSmiTagMask == 1);
- // ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(ecx, Immediate(0x01));
- __ mov(ebx, edx);
- __ xor_(ebx, eax);
- __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, eax);
- // if eax was smi, ebx is now edx, else eax.
-
- // Check if the non-smi operand is a heap number.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- // If heap number, handle it in the slow case.
- __ j(equal, &slow, Label::kNear);
- // Return non-equal (ebx is not zero)
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&not_smis);
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // Get the type of the first operand.
- // If the first object is a JS object, we have done pointer comparison.
- Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &first_non_object, Label::kNear);
-
- // Return non-zero (eax is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
-
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
-
- __ Set(eax, Immediate(0));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc != not_equal);
- if (cc == less || cc == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
-
- // Fast negative check for internalized-to-internalized equality.
- Label check_for_strings;
- if (cc == equal) {
- BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx);
- BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx);
-
- // We've already checked for object identity, so if both operands
- // are internalized they aren't equal. Register eax already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
- &check_unequal_objects);
-
- // Inline comparison of ASCII strings.
- if (cc == equal) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- edx,
- eax,
- ecx,
- ebx);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- edx,
- eax,
- ecx,
- ebx,
- edi);
- }
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc == equal && !strict()) {
- // Non-strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects;
- Label return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(ecx, Operand(eax, edx, times_1, 0));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &not_both_objects, Label::kNear);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
- __ j(below, &not_both_objects, Label::kNear);
- // We do not bail out after this point. Both are JSObjects, and
- // they are equal if and only if both are undetectable.
- // The and of the undetectable flags is 1 if and only if they are equal.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal, Label::kNear);
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal, Label::kNear);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(eax, Immediate(EQUAL));
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0); // rax, rdx were pushed
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- }
-
- // Restore return address on the stack.
- __ push(ecx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // ebx : cache cell for call target
- // edi : the function to call
- ASSERT(!FLAG_optimize_constructed_arrays);
- Isolate* isolate = masm->isolate();
- Label initialize, done;
-
- // Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(ecx, edi);
- __ j(equal, &done, Label::kNear);
- __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done, Label::kNear);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
- __ j(equal, &initialize, Label::kNear);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function.
- __ bind(&initialize);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // ebx : cache cell for call target
- // edi : the function to call
- ASSERT(FLAG_optimize_constructed_arrays);
- Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
-
- // Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(ecx, edi);
- __ j(equal, &done);
- __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done);
-
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- LAST_FAST_ELEMENTS_KIND);
- __ cmp(ecx, Immediate(terminal_kind_sentinel));
- __ j(above, &miss);
- // Load the global or builtins object from the current context
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
-
- __ bind(&miss);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
- __ j(equal, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- GetInitialFastElementsKind());
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(initial_kind_sentinel));
- __ jmp(&done);
-
- __ bind(&not_array_function);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // ebx : cache cell for call target
- // edi : the function to call
- Isolate* isolate = masm->isolate();
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label receiver_ok;
- // Get the receiver from the stack.
- // +1 ~ return address
- __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
- // Call as function is indicated with the hole.
- __ cmp(eax, isolate->factory()->the_hole_value());
- __ j(not_equal, &receiver_ok, Label::kNear);
- // Patch the receiver on the stack with the global receiver object.
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
- __ bind(&receiver_ok);
- }
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &non_function);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ cmp(eax, isolate->factory()->the_hole_value());
- __ j(equal, &call_as_function);
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- }
- // Check for function proxy.
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
- __ pop(ecx);
- __ push(edi); // put proxy as additional argument under return address
- __ push(ecx);
- __ Set(eax, Immediate(argc_ + 1));
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_FUNCTION);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // eax : number of arguments
- // ebx : cache cell for call target
- // edi : constructor function
- Label slow, non_function_call;
-
- // Check that function is not a smi.
- __ JumpIfSmi(edi, &non_function_call);
- // Check that function is a JSFunction.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? ecx : ebx;
- __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(jmp_reg, FieldOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
- __ jmp(jmp_reg);
-
- // edi: called object
- // eax: number of arguments
- // ecx: object map
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing eax).
- __ Set(ebx, Immediate(0));
- Handle<Code> arguments_adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return false;
-}
-
-
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- // It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-}
-
-
-void CodeStub::GenerateFPStubs(Isolate* isolate) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CEntryStub save_doubles(1, kSaveFPRegs);
- // Stubs might already be in the snapshot, detect that and don't regenerate,
- // which would lead to code stub initialization state being messed up.
- Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
- save_doubles_code = *(save_doubles.GetCode(isolate));
- }
- save_doubles_code->set_is_pregenerated(true);
- isolate->set_fp_stubs_generated(true);
- }
-}
-
-
-void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
-}
-
-
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ mov(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, 0xf);
- __ cmp(scratch, 0xf);
- __ j(equal, oom_label);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // eax: result parameter for PerformGC, if any
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: pointer to the first argument (C callee-saved)
-
- // Result returned in eax, or eax+edx if result_size_ is 2.
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack alignment is known to be correct. This function takes one argument
- // which is passed on the stack, and we know that the stack has been
- // prepared to pass at least one argument.
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
- __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- __ inc(Operand::StaticVariable(scope_depth));
- }
-
- // Call C function.
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ call(ebx);
- // Result is in eax or edx:eax - do not destroy these registers!
-
- if (always_allocate_scope) {
- __ dec(Operand::StaticVariable(scope_depth));
- }
-
- // Make sure we're not trying to return 'the hole' from the runtime
- // call as this may lead to crashes in the IC code later.
- if (FLAG_debug_code) {
- Label okay;
- __ cmp(eax, masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, &okay, Label::kNear);
- __ int3();
- __ bind(&okay);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ lea(ecx, Operand(eax, 1));
- // Lower 2 bits of ecx are 0 iff eax has failure tag.
- __ test(ecx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
-
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, masm->isolate());
-
- // Check that there is no pending exception, otherwise we
- // should have returned some failure value.
- if (FLAG_debug_code) {
- __ push(edx);
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
- Label okay;
- __ cmp(edx, Operand::StaticVariable(pending_exception_address));
- // Cannot use check here as it attempts to generate call into runtime.
- __ j(equal, &okay, Label::kNear);
- __ int3();
- __ bind(&okay);
- __ pop(edx);
- }
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, Label::kNear);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- __ mov(eax, Operand::StaticVariable(pending_exception_address));
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_exception_address), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, masm->isolate()->factory()->termination_exception());
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // eax: number of arguments including receiver
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // esi: current context (C callee-saved)
- // edi: JS function of the caller (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects instead
- // of a proper result. The builtin entry handles this by performing
- // a garbage collection and retrying the builtin (twice).
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
-
- // eax: result parameter for PerformGC, if any (setup below)
- // ebx: pointer to builtin function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: argv pointer (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(Operand::StaticVariable(external_caught), Immediate(false));
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, eax, ecx, &already_have_failure);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1)));
- __ bind(&already_have_failure);
- __ mov(Operand::StaticVariable(pending_exception), eax);
- // Fall through to the next label.
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(eax);
-
- __ bind(&throw_normal_exception);
- __ Throw(eax);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, handler_entry, exit;
- Label not_outermost_js, not_outermost_js_2;
-
- // Set up frame.
- __ push(ebp);
- __ mov(ebp, esp);
-
- // Push marker in two places.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ push(Immediate(Smi::FromInt(marker))); // context slot
- __ push(Immediate(Smi::FromInt(marker))); // function slot
- // Save callee-saved registers (C calling conventions).
- __ push(edi);
- __ push(esi);
- __ push(ebx);
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate());
- __ push(Operand::StaticVariable(c_entry_fp));
-
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
- masm->isolate());
- __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(not_equal, &not_outermost_js, Label::kNear);
- __ mov(Operand::StaticVariable(js_entry_sp), ebp);
- __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ jmp(&invoke, Label::kNear);
- __ bind(&not_outermost_js);
- __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- masm->isolate());
- __ mov(Operand::StaticVariable(pending_exception), eax);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
- __ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
-
- // Clear any pending exceptions.
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline builtin and
- // pop the faked function when we return. Notice that we cannot store a
- // reference to the trampoline code directly in this stub, because the
- // builtin stubs may not have been generated yet.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- masm->isolate());
- __ mov(edx, Immediate(construct_entry));
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline,
- masm->isolate());
- __ mov(edx, Immediate(entry));
- }
- __ mov(edx, Operand(edx, 0)); // deref address
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(edx);
-
- // Unlink this frame from the handler chain.
- __ PopTryHandler();
-
- __ bind(&exit);
- // Check if the current stack frame is marked as the outermost JS frame.
- __ pop(ebx);
- __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ j(not_equal, &not_outermost_js_2);
- __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ bind(&not_outermost_js_2);
-
- // Restore the top frame descriptor from the stack.
- __ pop(Operand::StaticVariable(ExternalReference(
- Isolate::kCEntryFPAddress,
- masm->isolate())));
-
- // Restore callee-saved registers (C calling conventions).
- __ pop(ebx);
- __ pop(esi);
- __ pop(edi);
- __ add(esp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(ebp);
- __ ret(0);
-}
-
-
-// Generate stub code for instanceof.
-// This code can patch a call site inlined cache of the instance of check,
-// which looks like this.
-//
-// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
-// 75 0a jne <some near label>
-// b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
-//
-// If call site patching is requested the stack will have the delta from the
-// return address to the cmp instruction just below the return address. This
-// also means that call site patching can only take place with arguments in
-// registers. TOS looks like this when call site patching is requested
-//
-// esp[0] : return address
-// esp[4] : delta from return address to cmp instruction
-//
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub.
- Register object = eax; // Object (lhs).
- Register map = ebx; // Map of the object.
- Register function = edx; // Function (rhs).
- Register prototype = edi; // Prototype of the function.
- Register scratch = ecx;
-
- // Constants describing the call site code to patch.
- static const int kDeltaToCmpImmediate = 2;
- static const int kDeltaToMov = 8;
- static const int kDeltaToMovImmediate = 9;
- static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
- static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
- static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
-
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
-
- ASSERT_EQ(object.code(), InstanceofStub::left().code());
- ASSERT_EQ(function.code(), InstanceofStub::right().code());
-
- // Get the object and function - they are always both needed.
- Label slow, not_js_object;
- if (!HasArgsInRegisters()) {
- __ mov(object, Operand(esp, 2 * kPointerSize));
- __ mov(function, Operand(esp, 1 * kPointerSize));
- }
-
- // Check that the left hand is a JS object.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(function, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
- __ j(not_equal, &miss, Label::kNear);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(map, Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start));
- __ j(not_equal, &miss, Label::kNear);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- map);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- function);
- } else {
- // The constants for the code patching are based on no push instructions
- // at the call site.
- ASSERT(HasArgsInRegisters());
- // Get return address and delta to inlined map check.
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
- __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
- }
- __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
- __ mov(Operand(scratch, 0), map);
- }
-
- // Loop through the prototype chain of the object looking for the function
- // prototype.
- __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
- Label loop, is_instance, is_not_instance;
- __ bind(&loop);
- __ cmp(scratch, prototype);
- __ j(equal, &is_instance, Label::kNear);
- Factory* factory = masm->isolate()->factory();
- __ cmp(scratch, Immediate(factory->null_value()));
- __ j(equal, &is_not_instance, Label::kNear);
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(0));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(scratch,
- times_pointer_size, roots_array_start), eax);
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->true_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(0));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start), eax);
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->false_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow, Label::kNear);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, &slow, Label::kNear);
-
- // Null is not instance of anything.
- __ cmp(object, factory->null_value());
- __ j(not_equal, &object_not_null, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null);
- // Smi values is not instance of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null_or_smi);
- // String values is not instance of anything.
- Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
- __ j(NegateCondition(is_string), &slow, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- // Tail call the builtin which returns 0 or 1.
- if (HasArgsInRegisters()) {
- // Push arguments below return address.
- __ pop(scratch);
- __ push(object);
- __ push(function);
- __ push(scratch);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- // Call the builtin and convert 0/1 to true/false.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(object);
- __ push(function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- Label true_value, done;
- __ test(eax, eax);
- __ j(zero, &true_value, Label::kNear);
- __ mov(eax, factory->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(eax, factory->true_value());
- __ bind(&done);
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- }
-}
-
-
-Register InstanceofStub::left() { return eax; }
-
-
-Register InstanceofStub::right() { return edx; }
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- // If the receiver is a smi trigger the non-string case.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ test(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(index_, &index_not_smi_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- __ SmiUntag(index_);
-
- Factory* factory = masm->isolate()->factory();
- StringCharLoadGenerator::Generate(
- masm, factory, object_, index_, result_, &call_runtime_);
-
- __ SmiTag(result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- masm->isolate()->factory()->heap_number_map(),
- index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!index_.is(eax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ mov(index_, eax);
- }
- __ pop(object_);
- // Reload the instance type.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ SmiTag(index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
- __ test(code_,
- Immediate(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case_);
-
- Factory* factory = masm->isolate()->factory();
- __ Set(result_, Immediate(factory->single_character_string_cache()));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ASCII char code.
- __ mov(result_, FieldOperand(result_,
- code_, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result_, factory->undefined_value());
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfSmi(eax, &call_runtime);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &call_runtime);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(edx, &call_runtime);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // eax: first string
- // edx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, ecx);
- __ j(not_zero, &second_not_zero_length, Label::kNear);
- // Second string is empty, result is first string which is already in eax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, ebx);
- __ j(not_zero, &both_not_zero_length, Label::kNear);
- // First string is empty, result is second string which is in edx.
- __ mov(eax, edx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // eax: first string
- // ebx: length of first string as a smi
- // ecx: length of second string as a smi
- // edx: second string
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
- __ add(ebx, ecx);
- STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
- // Handle exceptionally long strings in the runtime system.
- __ j(overflow, &call_runtime);
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return an internalized string here.
- __ cmp(ebx, Immediate(Smi::FromInt(2)));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
-
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_two_character_string_no_reload;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, ebx, ecx, eax, edx, edi,
- &make_two_character_string_no_reload, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Allocate a two character string.
- __ bind(&make_two_character_string);
- // Reload the arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
- __ bind(&make_two_character_string_no_reload);
- __ IncrementCounter(counters->string_add_make_two_char(), 1);
- __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
- // Pack both characters in ebx.
- __ shl(ecx, kBitsPerByte);
- __ or_(ebx, ecx);
- // Set the characters in the new string.
- __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
- __ j(below, &string_add_flat_result);
-
- // If result is not supposed to be flat allocate a cons string object. If both
- // strings are ASCII the result is an ASCII cons string.
- Label non_ascii, allocated, ascii_data;
- __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, edi);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(ecx, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an ASCII cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ AssertSmi(ebx);
- __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
- __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
- __ mov(eax, ecx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
- // ecx: first instance type AND second instance type.
- // edi: second instance type.
- __ test(ecx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, ecx);
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(edi, kOneByteStringTag | kAsciiDataHintTag);
- __ cmp(edi, kOneByteStringTag | kAsciiDataHintTag);
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label first_prepared, second_prepared;
- Label first_is_sequential, second_is_sequential;
- __ bind(&string_add_flat_result);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- // ecx: instance type of first string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ecx, kStringRepresentationMask);
- __ j(zero, &first_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(ecx, kShortExternalStringMask);
- __ j(not_zero, &call_runtime);
- __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ jmp(&first_prepared, Label::kNear);
- __ bind(&first_is_sequential);
- __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ bind(&first_prepared);
-
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- // Check whether both strings have same encoding.
- // edi: instance type of second string
- __ xor_(ecx, edi);
- __ test_b(ecx, kStringEncodingMask);
- __ j(not_zero, &call_runtime);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(edi, kStringRepresentationMask);
- __ j(zero, &second_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(edi, kShortExternalStringMask);
- __ j(not_zero, &call_runtime);
- __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ jmp(&second_prepared, Label::kNear);
- __ bind(&second_is_sequential);
- __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ bind(&second_prepared);
-
- // Push the addresses of both strings' first characters onto the stack.
- __ push(edx);
- __ push(eax);
-
- Label non_ascii_string_add_flat_result, call_runtime_drop_two;
- // edi: instance type of second string
- // First string and second string have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(edi, kStringEncodingMask);
- __ j(zero, &non_ascii_string_add_flat_result);
-
- // Both strings are ASCII strings.
- // ebx: length of resulting flat string as a smi
- __ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // eax: first string - known to be two byte
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are two byte strings.
- __ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Recover stack pointer before jumping to runtime.
- __ bind(&call_runtime_drop_two);
- __ Drop(2);
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- false,
- &not_cached);
- __ mov(arg, scratch1);
- __ mov(Operand(esp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(zero, slow);
- __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ mov(Operand(esp, stack_offset), arg);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(src, Immediate(1));
- __ add(dest, Immediate(1));
- } else {
- __ mov_w(scratch, Operand(src, 0));
- __ mov_w(Operand(dest, 0), scratch);
- __ add(src, Immediate(2));
- __ add(dest, Immediate(2));
- }
- __ sub(count, Immediate(1));
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- // Copy characters using rep movs of doublewords.
- // The destination is aligned on a 4 byte boundary because we are
- // copying to the beginning of a newly allocated string.
- ASSERT(dest.is(edi)); // rep movs destination
- ASSERT(src.is(esi)); // rep movs source
- ASSERT(count.is(ecx)); // rep movs count
- ASSERT(!scratch.is(dest));
- ASSERT(!scratch.is(src));
- ASSERT(!scratch.is(count));
-
- // Nothing to do for zero characters.
- Label done;
- __ test(count, count);
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- __ shl(count, 1);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ test(count, Immediate(~3));
- __ j(zero, &last_bytes, Label::kNear);
-
- // Copy from edi to esi using rep movs instruction.
- __ mov(scratch, count);
- __ sar(count, 2); // Number of doublewords to copy.
- __ cld();
- __ rep_movs();
-
- // Find number of bytes left.
- __ mov(count, scratch);
- __ and_(count, 3);
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ test(count, count);
- __ j(zero, &done);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(src, Immediate(1));
- __ add(dest, Immediate(1));
- __ sub(count, Immediate(1));
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ mov(scratch, c1);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index, Label::kNear);
- __ mov(scratch, c2);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_probed);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, kBitsPerByte);
- __ or_(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the string table.
- Register string_table = c2;
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kStringTableRootIndex));
- __ mov(string_table,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ mov(mask, FieldOperand(string_table, StringTable::kCapacityOffset));
- __ SmiUntag(mask);
- __ sub(mask, Immediate(1));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // string_table: string table
- // mask: capacity mask
- // scratch: -
-
- // Perform a number of probes in the string table.
- static const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes], next_probe_pop_mask[kProbes];
- Register candidate = scratch; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- __ mov(scratch, hash);
- if (i > 0) {
- __ add(scratch, Immediate(StringTable::GetProbeOffset(i)));
- }
- __ and_(scratch, mask);
-
- // Load the entry from the string table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ mov(candidate,
- FieldOperand(string_table,
- scratch,
- times_pointer_size,
- StringTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Factory* factory = masm->isolate()->factory();
- __ cmp(candidate, factory->undefined_value());
- __ j(equal, not_found);
- __ cmp(candidate, factory->the_hole_value());
- __ j(equal, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ cmp(FieldOperand(candidate, String::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &next_probe[i]);
-
- // As we are out of registers save the mask on the stack and use that
- // register as a temporary.
- __ push(mask);
- Register temp = mask;
-
- // Check that the candidate is a non-external ASCII string.
- __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe_pop_mask[i]);
-
- // Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
- __ and_(temp, 0x0000ffff);
- __ cmp(chars, temp);
- __ j(equal, &found_in_string_table);
- __ bind(&next_probe_pop_mask[i]);
- __ pop(mask);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ pop(mask); // Pop saved mask from the stack.
- if (!result.is(eax)) {
- __ mov(eax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = (seed + character) + ((seed + character) << 10);
- if (Serializer::enabled()) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
- __ mov(scratch, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
- __ SmiUntag(scratch);
- __ add(scratch, character);
- __ mov(hash, scratch);
- __ shl(scratch, 10);
- __ add(hash, scratch);
- } else {
- int32_t seed = masm->isolate()->heap()->HashSeed();
- __ lea(scratch, Operand(character, seed));
- __ shl(scratch, 10);
- __ lea(hash, Operand(scratch, character, times_1, seed));
- }
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ shr(scratch, 6);
- __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ add(hash, character);
- // hash += hash << 10;
- __ mov(scratch, hash);
- __ shl(scratch, 10);
- __ add(hash, scratch);
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ shr(scratch, 6);
- __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ mov(scratch, hash);
- __ shl(scratch, 3);
- __ add(hash, scratch);
- // hash ^= hash >> 11;
- __ mov(scratch, hash);
- __ shr(scratch, 11);
- __ xor_(hash, scratch);
- // hash += hash << 15;
- __ mov(scratch, hash);
- __ shl(scratch, 15);
- __ add(hash, scratch);
-
- __ and_(hash, String::kHashBitMask);
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero, Label::kNear);
- __ mov(hash, Immediate(StringHasher::kZeroHash));
- __ bind(&hash_not_zero);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: to
- // esp[8]: from
- // esp[12]: string
-
- // Make sure first argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
-
- // eax: string
- // ebx: instance type
-
- // Calculate length of sub string using the smi values.
- __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ JumpIfNotSmi(ecx, &runtime);
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ JumpIfNotSmi(edx, &runtime);
- __ sub(ecx, edx);
- __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label not_original_string;
- // Shorter than original string's length: an actual substring.
- __ j(below, &not_original_string, Label::kNear);
- // Longer than original string's length or negative: unsafe arguments.
- __ j(above, &runtime);
- // Return original string.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
- __ bind(&not_original_string);
-
- Label single_char;
- __ cmp(ecx, Immediate(Smi::FromInt(1)));
- __ j(equal, &single_char);
-
- // eax: string
- // ebx: instance type
- // ecx: sub string length (smi)
- // edx: from index (smi)
- // Deal with different string types: update the index if necessary
- // and put the underlying string into edi.
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ test(ebx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- Factory* factory = masm->isolate()->factory();
- __ test(ebx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- // Flat cons strings have an empty second part.
- __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
- factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
- // Update instance type.
- __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and adjust start index by offset.
- __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
- // Update instance type.
- __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(edi, eax);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // edi: underlying subject string
- // ebx: instance type of underlying subject string
- // edx: adjusted start index (smi)
- // ecx: length (smi)
- __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
- // Short slice. Copy instead of slicing.
- __ j(less, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(ebx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
- __ bind(&set_slice_header);
- __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
- __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
- __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&copy_routine);
- }
-
- // edi: underlying subject string
- // ebx: instance type of underlying subject string
- // edx: adjusted start index (smi)
- // ecx: length (smi)
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- Label two_byte_sequential, runtime_drop_two, sequential_string;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ebx, kExternalStringTag);
- __ j(zero, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ test_b(ebx, kShortExternalStringMask);
- __ j(not_zero, &runtime);
- __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&sequential_string);
- // Stash away (adjusted) index and (underlying) string.
- __ push(edx);
- __ push(edi);
- __ SmiUntag(ecx);
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ test_b(ebx, kStringEncodingMask);
- __ j(zero, &two_byte_sequential);
-
- // Sequential ASCII string. Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ pop(esi);
- __ pop(ebx);
- __ SmiUntag(ebx);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
- __ mov(esi, edx); // Restore esi.
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&two_byte_sequential);
- // Sequential two-byte string. Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(edi,
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ pop(esi);
- __ pop(ebx);
- // As from is a smi it is 2 times the value which matches the size of a two
- // byte character.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
- __ mov(esi, edx); // Restore esi.
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- // Drop pushed values on the stack before tail call.
- __ bind(&runtime_drop_two);
- __ Drop(2);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-
- __ bind(&single_char);
- // eax: string
- // ebx: instance type
- // ecx: sub string length (smi)
- // edx: from index (smi)
- StringCharAtGenerator generator(
- eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ ret(3 * kPointerSize);
- generator.SkipSlow(masm, &runtime);
-}
-
-
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ mov(length, FieldOperand(left, String::kLengthOffset));
- __ cmp(length, FieldOperand(right, String::kLengthOffset));
- __ j(equal, &check_zero_length, Label::kNear);
- __ bind(&strings_not_equal);
- __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
- __ ret(0);
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ test(length, length);
- __ j(not_zero, &compare_chars, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
-
- // Characters are equal.
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_compare_native(), 1);
-
- // Find minimum length.
- Label left_shorter;
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ mov(scratch3, scratch1);
- __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
-
- Register length_delta = scratch3;
-
- __ j(less_equal, &left_shorter, Label::kNear);
- // Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, length_delta);
- __ bind(&left_shorter);
-
- Register min_length = scratch1;
-
- // If either length is zero, just compare lengths.
- Label compare_lengths;
- __ test(min_length, min_length);
- __ j(zero, &compare_lengths, Label::kNear);
-
- // Compare characters.
- Label result_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- __ test(length_delta, length_delta);
-#ifndef ENABLE_LATIN_1
- __ j(not_zero, &result_not_equal, Label::kNear);
-#else
- Label length_not_equal;
- __ j(not_zero, &length_not_equal, Label::kNear);
-#endif
-
- // Result is EQUAL.
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- Label result_greater;
-#ifdef ENABLE_LATIN_1
- Label result_less;
- __ bind(&length_not_equal);
- __ j(greater, &result_greater, Label::kNear);
- __ jmp(&result_less, Label::kNear);
-#endif
- __ bind(&result_not_equal);
-#ifndef ENABLE_LATIN_1
- __ j(greater, &result_greater, Label::kNear);
-#else
- __ j(above, &result_greater, Label::kNear);
- __ bind(&result_less);
-#endif
-
- // Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
- __ ret(0);
-}
-
-
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
- Label::Distance chars_not_equal_near) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ lea(left,
- FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
- FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(left, index, times_1, 0));
- __ cmpb(scratch, Operand(right, index, times_1, 0));
- __ j(not_equal, chars_not_equal, chars_not_equal_near);
- __ inc(index);
- __ j(not_zero, &loop);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: right string
- // esp[8]: left string
-
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
-
- Label not_same;
- __ cmp(edx, eax);
- __ j(not_equal, &not_same, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
-
- // Compare flat ASCII strings.
- // Drop arguments from the stack.
- __ pop(ecx);
- __ add(esp, Immediate(2 * kPointerSize));
- __ push(ecx);
- GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
- Label miss;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &miss, Label::kNear);
-
- if (GetCondition() == equal) {
- // For equality we do not care about the sign of the result.
- __ sub(eax, edx);
- } else {
- Label done;
- __ sub(edx, eax);
- __ j(no_overflow, &done, Label::kNear);
- // Correct sign of result in case of overflow.
- __ not_(edx);
- __ bind(&done);
- __ mov(eax, edx);
- }
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
-
- Label generic_stub;
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss;
-
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(edx, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(eax, &miss);
- }
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SSE2 or CMOV is unsupported.
- if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope scope1(SSE2);
- CpuFeatures::Scope scope2(CMOV);
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(eax, &right_smi, Label::kNear);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&left, Label::kNear);
- __ bind(&right_smi);
- __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
- __ SmiUntag(ecx);
- __ cvtsi2sd(xmm1, ecx);
-
- __ bind(&left);
- __ JumpIfSmi(edx, &left_smi, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&left_smi);
- __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
- __ SmiUntag(ecx);
- __ cvtsi2sd(xmm0, ecx);
-
- __ bind(&done);
- // Compare operands.
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
-
- // Return a result of -1, 0, or 1, based on EFLAGS.
- // Performing mov, because xor would destroy the flag register.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
-
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- }
-
- __ bind(&unordered);
- __ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
- __ j(not_equal, &miss);
- __ JumpIfSmi(edx, &unordered);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ jmp(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
- __ j(equal, &unordered);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
- ASSERT(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
- Register tmp1 = ecx;
- Register tmp2 = ebx;
-
- // Check that both operands are heap objects.
- Label miss;
- __ mov(tmp1, left);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
- __ JumpIfSmi(tmp1, &miss, Label::kNear);
-
- // Check that both operands are internalized strings.
- __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &miss, Label::kNear);
-
- // Internalized strings are compared by identity.
- Label done;
- __ cmp(left, right);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(eax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASSERT(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
- Register tmp1 = ecx;
- Register tmp2 = ebx;
-
- // Check that both operands are heap objects.
- Label miss;
- __ mov(tmp1, left);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
- __ JumpIfSmi(tmp1, &miss, Label::kNear);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
- __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
-
- Label succeed1;
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed1);
- __ cmpb(tmp1, static_cast<uint8_t>(SYMBOL_TYPE));
- __ j(not_equal, &miss);
- __ bind(&succeed1);
-
- Label succeed2;
- __ test(tmp2, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed2);
- __ cmpb(tmp2, static_cast<uint8_t>(SYMBOL_TYPE));
- __ j(not_equal, &miss);
- __ bind(&succeed2);
-
- // Unique names are compared by identity.
- Label done;
- __ cmp(left, right);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(eax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
- Label miss;
-
- bool equality = Token::IsEqualityOp(op_);
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
- Register tmp1 = ecx;
- Register tmp2 = ebx;
- Register tmp3 = edi;
-
- // Check that both operands are heap objects.
- __ mov(tmp1, left);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
- __ JumpIfSmi(tmp1, &miss);
-
- // Check that both operands are strings. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ mov(tmp3, tmp1);
- STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
- __ test(tmp3, Immediate(kIsNotStringMask));
- __ j(not_zero, &miss);
-
- // Fast check for identical strings.
- Label not_same;
- __ cmp(left, right);
- __ j(not_equal, &not_same, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- // Handle not identical strings.
- __ bind(&not_same);
-
- // Check that both strings are internalized. If they are, we're done
- // because we already know they are not identical. But in the case of
- // non-equality compare, we still need to determine the order.
- if (equality) {
- Label do_compare;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &do_compare, Label::kNear);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(eax));
- __ ret(0);
- __ bind(&do_compare);
- }
-
- // Check that both strings are sequential ASCII.
- Label runtime;
- __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
-
- // Compare flat ASCII strings. Returns when done.
- if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3);
- }
-
- // Handle more complex cases in runtime.
- __ bind(&runtime);
- __ pop(tmp1); // Return address.
- __ push(left);
- __ push(right);
- __ push(tmp1);
- if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
- } else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
- Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &miss, Label::kNear);
-
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
-
- ASSERT(GetCondition() == equal);
- __ sub(eax, edx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &miss, Label::kNear);
-
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, known_map_);
- __ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
- __ cmp(ebx, known_map_);
- __ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
-
- __ sub(eax, edx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- masm->isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx); // Preserve edx and eax.
- __ push(eax);
- __ push(edx); // And also use them as the arguments.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(op_)));
- __ CallExternalReference(miss, 3);
- // Compute the entry point of the rewritten stub.
- __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
- __ pop(eax);
- __ pop(edx);
- }
-
- // Do a tail call to the rewritten stub.
- __ jmp(edi);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be an internalized string and receiver must be a heap object.
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0) {
- ASSERT(name->IsInternalizedString());
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r0;
- // Capacity is smi 2^n.
- __ mov(index, FieldOperand(properties, kCapacityOffset));
- __ dec(index);
- __ and_(index,
- Immediate(Smi::FromInt(name->Hash() +
- StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
- Register entity_name = r0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done);
-
- // Stop if found the property.
- __ cmp(entity_name, Handle<String>(name));
- __ j(equal, miss);
-
- Label the_hole;
- // Check for the hole and skip.
- __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
- __ j(equal, &the_hole, Label::kNear);
-
- // Check if the entry name is not an internalized string.
- __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- kIsInternalizedMask);
- __ j(zero, miss);
- __ bind(&the_hole);
- }
-
- StringDictionaryLookupStub stub(properties,
- r0,
- r0,
- StringDictionaryLookupStub::NEGATIVE_LOOKUP);
- __ push(Immediate(Handle<Object>(name)));
- __ push(Immediate(name->Hash()));
- __ CallStub(&stub);
- __ test(r0, r0);
- __ j(not_zero, miss);
- __ jmp(done);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- ASSERT(!elements.is(r0));
- ASSERT(!elements.is(r1));
- ASSERT(!name.is(r0));
- ASSERT(!name.is(r1));
-
- __ AssertString(name);
-
- __ mov(r1, FieldOperand(elements, kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shr(r0, String::kHashShift);
- if (i > 0) {
- __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(r0, r1);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
-
- // Check if the key is identical to the name.
- __ cmp(name, Operand(elements,
- r0,
- times_4,
- kElementsStartOffset - kHeapObjectTag));
- __ j(equal, done);
- }
-
- StringDictionaryLookupStub stub(elements,
- r1,
- r0,
- POSITIVE_LOOKUP);
- __ push(name);
- __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shr(r0, String::kHashShift);
- __ push(r0);
- __ CallStub(&stub);
-
- __ test(r1, r1);
- __ j(zero, miss);
- __ jmp(done);
-}
-
-
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Stack frame on entry:
- // esp[0 * kPointerSize]: return address.
- // esp[1 * kPointerSize]: key's hash.
- // esp[2 * kPointerSize]: key.
- // Registers:
- // dictionary_: StringDictionary to probe.
- // result_: used as scratch.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- Register scratch = result_;
-
- __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
- __ dec(scratch);
- __ SmiUntag(scratch);
- __ push(scratch);
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- if (i > 0) {
- __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(esp, 0));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
-
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ mov(scratch, Operand(dictionary_,
- index_,
- times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(scratch, masm->isolate()->factory()->undefined_value());
- __ j(equal, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmp(scratch, Operand(esp, 3 * kPointerSize));
- __ j(equal, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // If we hit a key that is not an internalized string during negative
- // lookup we have to bailout as this key might be equal to the
- // key we are looking for.
-
- // Check if the entry name is not an internalized string.
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
- kIsInternalizedMask);
- __ j(zero, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ mov(result_, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
- }
-
- __ bind(&in_dictionary);
- __ mov(result_, Immediate(1));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_in_dictionary);
- __ mov(result_, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-}
-
-
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal and CallFunctionStub.
- { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore
- { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
- { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
- // StoreArrayLiteralElementStub::Generate
- { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub
- { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated() {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
-
- CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (CpuFeatures::IsSupported(SSE2)) {
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(SSE2);
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
- __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm,
- kReturnOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
- __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label object_is_black, need_incremental, need_incremental_pop_object;
-
- __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
- __ mov(regs_.scratch1(),
- Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ sub(regs_.scratch1(), Immediate(1));
- __ mov(Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset),
- regs_.scratch1());
- __ j(negative, &need_incremental);
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &object_is_black,
- Label::kNear);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&object_is_black);
-
- // Get the value from the slot.
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- zero,
- &ensure_not_white,
- Label::kNear);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- not_zero,
- &ensure_not_white,
- Label::kNear);
-
- __ jmp(&need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need an extra register for this, so we push the object register
- // temporarily.
- __ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
- __ pop(regs_.object());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : element value to store
- // -- ebx : array literal
- // -- edi : map of array literal
- // -- ecx : element index as smi
- // -- edx : array literal index in function
- // -- esp[0] : return address
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label slow_elements_from_double;
- Label fast_elements;
-
- __ CheckFastElements(edi, &double_elements);
-
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(eax, &smi_element);
- __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ pop(edi); // Pop return address and remember to put back later for tail
- // call.
- __ push(ebx);
- __ push(ecx);
- __ push(eax);
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(edx);
- __ push(edi); // Return return address so that tail call returns to right
- // place.
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- __ bind(&slow_elements_from_double);
- __ pop(edx);
- __ jmp(&slow_elements);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Update the write barrier for the array store.
- __ RecordWrite(ebx, ecx, eax,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize), eax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ push(edx);
- __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(eax,
- edx,
- ecx,
- edi,
- xmm0,
- &slow_elements_from_double,
- false);
- __ pop(edx);
- __ ret(0);
-}
-
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- bool save_fp_regs = CpuFeatures::IsSupported(SSE2);
- CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
- __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ mov(ebx, MemOperand(ebp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ pop(ecx);
- __ lea(esp, MemOperand(esp, ebx, times_pointer_size,
- extra_expression_stack_count_ * kPointerSize));
- __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
- ProfileEntryHookStub stub;
- masm->CallStub(&stub);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // Ecx is the only volatile register we must save.
- __ push(ecx);
-
- // Calculate and push the original stack pointer.
- __ lea(eax, Operand(esp, kPointerSize));
- __ push(eax);
-
- // Calculate and push the function address.
- __ mov(eax, Operand(eax, 0));
- __ sub(eax, Immediate(Assembler::kCallInstructionLength));
- __ push(eax);
-
- // Call the entry hook.
- int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
- __ call(Operand(hook_location, RelocInfo::NONE32));
- __ add(esp, Immediate(2 * kPointerSize));
-
- // Restore ecx.
- __ pop(ecx);
- __ ret(0);
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h b/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
deleted file mode 100644
index e6bb38a..0000000
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
+++ /dev/null
@@ -1,646 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_CODE_STUBS_IA32_H_
-#define V8_IA32_CODE_STUBS_IA32_H_
-
-#include "macro-assembler.h"
-#include "code-stubs.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- static void GenerateOperation(MacroAssembler* masm,
- TranscendentalCache::Type type);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near = Label::kFar,
- Label::Distance undo_near = Label::kFar,
- Label::Distance slow_near = Label::kFar);
- void GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near = Label::kFar);
- void GenerateSmiCodeUndo(MacroAssembler* masm);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies ecx characters from esi to edi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be edi.
- Register src, // Must be esi.
- Register count, // Must be ecx.
- Register scratch, // Neither of above.
- bool ascii);
-
- // Probe the string table for a two character string. If the string
- // requires non-standard hashing a jump to the label not_probed is
- // performed and registers c1 and c2 are preserved. In all other
- // cases they are clobbered. If the string is not found by probing a
- // jump to the label not_found is performed. This jump does not
- // guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in
- // register eax.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- StringCompareStub() { }
-
- // Compares two flat ASCII strings and returns result in eax.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // Compares two flat ASCII strings for equality and returns result
- // in eax.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
- Label::Distance chars_not_equal_near = Label::kFar);
-};
-
-
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- StringDictionaryLookupStub(Register dictionary,
- Register result,
- Register index,
- LookupMode mode)
- : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0);
-
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- Major MajorKey() { return StringDictionaryLookup; }
-
- int MinorKey() {
- return DictionaryBits::encode(dictionary_.code()) |
- ResultBits::encode(result_.code()) |
- IndexBits::encode(index_.code()) |
- LookupModeBits::encode(mode_);
- }
-
- class DictionaryBits: public BitField<int, 0, 3> {};
- class ResultBits: public BitField<int, 3, 3> {};
- class IndexBits: public BitField<int, 6, 3> {};
- class LookupModeBits: public BitField<LookupMode, 9, 1> {};
-
- Register dictionary_;
- Register result_;
- Register index_;
- LookupMode mode_;
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
- static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
-
- static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
- static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
-
- static Mode GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- ASSERT(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 7);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers, where the third
- // is always ecx (needed for shift operations). The input is two registers
- // that must be preserved and one scratch register provided by the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_orig_(object),
- address_orig_(address),
- scratch0_orig_(scratch0),
- object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
- if (scratch0.is(ecx)) {
- scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
- }
- if (object.is(ecx)) {
- object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
- }
- if (address.is(ecx)) {
- address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
- }
- ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!address_orig_.is(object_));
- ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
- // We don't have to save scratch0_orig_ because it was given to us as
- // a scratch register. But if we had to switch to a different reg then
- // we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
- masm->push(ecx);
- }
- masm->push(scratch1_);
- if (!address_.is(address_orig_)) {
- masm->push(address_);
- masm->mov(address_, address_orig_);
- }
- if (!object_.is(object_orig_)) {
- masm->push(object_);
- masm->mov(object_, object_orig_);
- }
- }
-
- void Restore(MacroAssembler* masm) {
- // These will have been preserved the entire time, so we just need to move
- // them back. Only in one case is the orig_ reg different from the plain
- // one, since only one of them can alias with ecx.
- if (!object_.is(object_orig_)) {
- masm->mov(object_orig_, object_);
- masm->pop(object_);
- }
- if (!address_.is(address_orig_)) {
- masm->mov(address_orig_, address_);
- masm->pop(address_);
- }
- masm->pop(scratch1_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
- masm->pop(ecx);
- }
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The caller saved
- // registers are eax, ecx and edx. The three scratch registers (incl. ecx)
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- masm->sub(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
- // Save all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
- }
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- // Restore all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
- }
- masm->add(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
- }
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_orig_;
- Register address_orig_;
- Register scratch0_orig_;
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- // Third scratch register is always ecx.
-
- Register GetRegThatIsNotEcxOr(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(ecx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- }
-;
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 3> {};
- class ValueBits: public BitField<int, 3, 3> {};
- class AddressBits: public BitField<int, 6, 3> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- RegisterAllocation regs_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.cc b/src/3rdparty/v8/src/ia32/codegen-ia32.cc
deleted file mode 100644
index 5368811..0000000
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.cc
+++ /dev/null
@@ -1,967 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "heap.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-
-#define __ masm.
-
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) {
- // Fallback to library function if function cannot be created.
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- }
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- // Move double input into registers.
-
- __ push(ebx);
- __ push(edx);
- __ push(edi);
- __ fld_d(Operand(esp, 4 * kPointerSize));
- __ mov(ebx, Operand(esp, 4 * kPointerSize));
- __ mov(edx, Operand(esp, 5 * kPointerSize));
- TranscendentalCacheStub::GenerateOperation(&masm, type);
- // The return value is expected to be on ST(0) of the FPU stack.
- __ pop(edi);
- __ pop(edx);
- __ pop(ebx);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(SSE2)) return &exp;
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- {
- CpuFeatures::Scope use_sse2(SSE2);
- XMMRegister input = xmm1;
- XMMRegister result = xmm2;
- __ movdbl(input, Operand(esp, 1 * kPointerSize));
- __ push(eax);
- __ push(ebx);
-
- MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
-
- __ pop(ebx);
- __ pop(eax);
- __ movdbl(Operand(esp, 1 * kPointerSize), result);
- __ fld_d(Operand(esp, 1 * kPointerSize));
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateSqrtFunction() {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- // If SSE2 is not available, we can use libc's implementation to ensure
- // consistency since code by fullcodegen's calls into runtime in that case.
- if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- // Move double input into registers.
- {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
- __ sqrtsd(xmm0, xmm0);
- __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
- // Load result into floating point register as return value.
- __ fld_d(Operand(esp, 1 * kPointerSize));
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-static void MemCopyWrapper(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
-}
-
-
-OS::MemCopyFunction CreateMemCopyFunction() {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) return &MemCopyWrapper;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-
- // Generated code is put into a fixed, unmovable, buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // 32-bit C declaration function calls pass arguments on stack.
-
- // Stack layout:
- // esp[12]: Third argument, size.
- // esp[8]: Second argument, source pointer.
- // esp[4]: First argument, destination pointer.
- // esp[0]: return address
-
- const int kDestinationOffset = 1 * kPointerSize;
- const int kSourceOffset = 2 * kPointerSize;
- const int kSizeOffset = 3 * kPointerSize;
-
- int stack_offset = 0; // Update if we change the stack height.
-
- if (FLAG_debug_code) {
- __ cmp(Operand(esp, kSizeOffset + stack_offset),
- Immediate(OS::kMinComplexMemCopy));
- Label ok;
- __ j(greater_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope enable(SSE2);
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
-
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ neg(edx);
- __ add(edx, Immediate(16));
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
-
- // edi is now aligned. Check if esi is also aligned.
- Label unaligned_source;
- __ test(src, Immediate(0x0F));
- __ j(not_zero, &unaligned_source);
- {
- // Copy loop for aligned source and destination.
- __ mov(edx, count);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop.
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqa(xmm0, Operand(src, 0x00));
- __ movdqa(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
-
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
- __ movdqa(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ and_(count, 0xF);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
- __ Align(16);
- {
- // Copy loop for unaligned source and aligned destination.
- // If source is not aligned, we can't read it as efficiently.
- __ bind(&unaligned_source);
- __ mov(edx, ecx);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
-
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
- __ movdqu(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ and_(count, 0x0F);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
-
- } else {
- // SSE2 not supported. Unlikely to happen in practice.
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- __ cld();
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
- // Copy the first word.
- __ mov(eax, Operand(src, 0));
- __ mov(Operand(dst, 0), eax);
-
- // Increment src,dstso that dst is aligned.
- __ mov(edx, dst);
- __ and_(edx, 0x03);
- __ neg(edx);
- __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
- // edi is now aligned, ecx holds number of remaning bytes to copy.
-
- __ mov(edx, count);
- count = edx;
- __ shr(ecx, 2); // Make word count instead of byte count.
- __ rep_movs();
-
- // At most 3 bytes left to copy. Copy 4 bytes at end of string.
- __ and_(count, 3);
- __ mov(eax, Operand(src, count, times_1, -4));
- __ mov(Operand(dst, count, times_1, -4), eax);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
-}
-
-#undef __
-
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ebx : target map
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
- __ j(equal, allocation_site_info_found);
- }
-
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ebx : target map
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label loop, entry, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
- __ j(equal, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(equal, &only_change_map);
-
- __ push(eax);
- __ push(ebx);
-
- __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Allocate new FixedDoubleArray.
- // edx: receiver
- // edi: length of source FixedArray (smi-tagged)
- AllocationFlags flags =
- static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
- __ AllocateInNewSpace(FixedDoubleArray::kHeaderSize, times_8,
- edi, REGISTER_VALUE_IS_SMI,
- eax, ebx, no_reg, &gc_required, flags);
-
- // eax: destination FixedDoubleArray
- // edi: number of elements
- // edx: receiver
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_double_array_map()));
- __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
- __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
- __ mov(ebx, eax);
- __ RecordWriteField(edx,
- JSObject::kElementsOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
-
- // Prepare for conversion loop.
- ExternalReference canonical_the_hole_nan_reference =
- ExternalReference::address_of_the_hole_nan();
- XMMRegister the_hole_nan = xmm1;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(the_hole_nan,
- Operand::StaticVariable(canonical_the_hole_nan_reference));
- }
- __ jmp(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- // Restore registers before jumping into runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ pop(ebx);
- __ pop(eax);
- __ jmp(fail);
-
- // Convert and copy elements
- // esi: source FixedArray
- __ bind(&loop);
- __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
- // ebx: current element from source
- // edi: index of current element
- __ JumpIfNotSmi(ebx, &convert_hole);
-
- // Normal smi, convert it to double and store.
- __ SmiUntag(ebx);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
- xmm0);
- } else {
- __ push(ebx);
- __ fild_s(Operand(esp, 0));
- __ pop(ebx);
- __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
- }
- __ jmp(&entry);
-
- // Found hole, store hole_nan_as_double instead.
- __ bind(&convert_hole);
-
- if (FLAG_debug_code) {
- __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ Assert(equal, "object found in smi-only array");
- }
-
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
- the_hole_nan);
- } else {
- __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
- __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
- }
-
- __ bind(&entry);
- __ sub(edi, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &loop);
-
- __ pop(ebx);
- __ pop(eax);
-
- // Restore esi.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- __ bind(&only_change_map);
- // eax: value
- // ebx: target map
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ebx : target map
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label loop, entry, convert_hole, gc_required, only_change_map, success;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
- __ j(equal, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(equal, &only_change_map);
-
- __ push(eax);
- __ push(edx);
- __ push(ebx);
-
- __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-
- // Allocate new FixedArray.
- // ebx: length of source FixedDoubleArray (smi-tagged)
- __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
- __ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
-
- // eax: destination FixedArray
- // ebx: number of elements
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-
- __ jmp(&entry);
-
- // ebx: target map
- // edx: receiver
- // Set transitioned map.
- __ bind(&only_change_map);
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&success);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ pop(ebx);
- __ pop(edx);
- __ pop(eax);
- __ jmp(fail);
-
- // Box doubles into heap numbers.
- // edi: source FixedDoubleArray
- // eax: destination FixedArray
- __ bind(&loop);
- // ebx: index of current element (smi-tagged)
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(equal, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
- // edx: new heap number
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0,
- FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
- __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
- __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
- }
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
- __ mov(esi, ebx);
- __ RecordWriteArray(eax,
- edx,
- esi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&entry, Label::kNear);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
- masm->isolate()->factory()->the_hole_value());
-
- __ bind(&entry);
- __ sub(ebx, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &loop);
-
- __ pop(ebx);
- __ pop(edx);
- // ebx: target map
- // edx: receiver
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
- __ RecordWriteField(edx,
- JSObject::kElementsOffset,
- eax,
- edi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Restore registers.
- __ pop(eax);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- __ bind(&success);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Factory* factory,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- // Fetch the instance type of the receiver into result register.
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ test(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ test(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ SmiUntag(result);
- __ add(index, result);
- __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ cmp(FieldOperand(string, ConsString::kSecondOffset),
- Immediate(factory->empty_string()));
- __ j(not_equal, call_runtime);
- __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label seq_string;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
-
- // Handle external strings.
- Label ascii_external, done;
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ test(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
- }
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ test_b(result, kShortExternalStringMask);
- __ j(not_zero, call_runtime);
- // Check encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(result, kStringEncodingMask);
- __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &ascii_external, Label::kNear);
- // Two-byte string.
- __ movzx_w(result, Operand(result, index, times_2, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&ascii_external);
- // Ascii string.
- __ movzx_b(result, Operand(result, index, times_1, 0));
- __ jmp(&done, Label::kNear);
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii;
- __ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- __ movzx_w(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // Ascii string.
- // Load the byte into the result register.
- __ bind(&ascii);
- __ movzx_b(result, FieldOperand(string,
- index,
- times_1,
- SeqOneByteString::kHeaderSize));
- __ bind(&done);
-}
-
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi index");
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi value");
-
- __ cmp(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
-
- __ cmp(index, Immediate(Smi::FromInt(0)));
- __ Check(greater_equal, "Index is negative");
-
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
- __ pop(value);
- }
-
- __ SmiUntag(value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ SmiUntag(index);
- __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- } else {
- // No need to untag a smi for two-byte addressing.
- __ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- }
-}
-
-
-static Operand ExpConstant(int index) {
- return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2) {
- ASSERT(!input.is(double_scratch));
- ASSERT(!input.is(result));
- ASSERT(!result.is(double_scratch));
- ASSERT(!temp1.is(temp2));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ movdbl(double_scratch, ExpConstant(0));
- __ xorpd(result, result);
- __ ucomisd(double_scratch, input);
- __ j(above_equal, &done);
- __ ucomisd(input, ExpConstant(1));
- __ movdbl(result, ExpConstant(2));
- __ j(above_equal, &done);
- __ movdbl(double_scratch, ExpConstant(3));
- __ movdbl(result, ExpConstant(4));
- __ mulsd(double_scratch, input);
- __ addsd(double_scratch, result);
- __ movd(temp2, double_scratch);
- __ subsd(double_scratch, result);
- __ movdbl(result, ExpConstant(6));
- __ mulsd(double_scratch, ExpConstant(5));
- __ subsd(double_scratch, input);
- __ subsd(result, double_scratch);
- __ movsd(input, double_scratch);
- __ mulsd(input, double_scratch);
- __ mulsd(result, input);
- __ mov(temp1, temp2);
- __ mulsd(result, ExpConstant(7));
- __ subsd(result, double_scratch);
- __ add(temp1, Immediate(0x1ff800));
- __ addsd(result, ExpConstant(8));
- __ and_(temp2, Immediate(0x7ff));
- __ shr(temp1, 11);
- __ shl(temp1, 20);
- __ movd(input, temp1);
- __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
- __ movdbl(double_scratch, Operand::StaticArray(
- temp2, times_8, ExternalReference::math_exp_log_table()));
- __ por(input, double_scratch);
- __ mulsd(result, input);
- __ bind(&done);
-}
-
-#undef __
-
-static const int kNoCodeAgeSequenceLength = 5;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(ebp);
- patcher.masm()->mov(ebp, esp);
- patcher.masm()->push(esi);
- patcher.masm()->push(edi);
- initialized = true;
- }
- return sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length);
- patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.h b/src/3rdparty/v8/src/ia32/codegen-ia32.h
deleted file mode 100644
index 5137274..0000000
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_CODEGEN_IA32_H_
-#define V8_IA32_CODEGEN_IA32_H_
-
-#include "ast.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator {
- public:
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Expression* type);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
-
- static Operand FixedArrayElementOperand(Register array,
- Register index_as_smi,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Factory* factory,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/cpu-ia32.cc b/src/3rdparty/v8/src/ia32/cpu-ia32.cc
deleted file mode 100644
index 9eabb2a..0000000
--- a/src/3rdparty/v8/src/ia32/cpu-ia32.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for ia32 independent of OS goes here.
-
-#ifdef __GNUC__
-#include "third_party/valgrind/valgrind.h"
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(SSE2);
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // No need to flush the instruction cache on Intel. On Intel instruction
- // cache flushing is only necessary when multiple cores running the same
- // code simultaneously. V8 (and JavaScript) is single threaded and when code
- // is patched on an intel CPU the core performing the patching will have its
- // own instruction cache updated automatically.
-
- // If flushing of the instruction cache becomes necessary Windows has the
- // API function FlushInstructionCache.
-
- // By default, valgrind only checks the stack for writes that might need to
- // invalidate already cached translated code. This leads to random
- // instability when code patches or moves are sometimes unnoticed. One
- // solution is to run valgrind with --smc-check=all, but this comes at a big
- // performance cost. We can notify valgrind to invalidate its cache.
-#ifdef VALGRIND_DISCARD_TRANSLATIONS
- unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
- USE(res);
-#endif
-}
-
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/debug-ia32.cc b/src/3rdparty/v8/src/ia32/debug-ia32.cc
deleted file mode 100644
index d153e18..0000000
--- a/src/3rdparty/v8/src/ia32/debug-ia32.cc
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- Isolate* isolate = Isolate::Current();
- rinfo()->PatchCodeWithCall(isolate->debug()->debug_break_return()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- Isolate* isolate = Isolate::Current();
- rinfo()->PatchCodeWithCall(
- isolate->debug()->debug_break_slot()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-// All debug break stubs support padding for LiveEdit.
-const bool Debug::FramePaddingLayout::kIsSupported = true;
-
-
-#define __ ACCESS_MASM(masm)
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- bool convert_call_to_jmp) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
- __ push(Immediate(Smi::FromInt(
- Debug::FramePaddingLayout::kPaddingValue)));
- }
- __ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ test(reg, Immediate(0xc0000000));
- __ Assert(zero, "Unable to encode value as smi");
- }
- __ SmiTag(reg);
- __ push(reg);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Set(eax, Immediate(0)); // No arguments.
- __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Automatically find register that could be used after register restore.
- // We need one register for padding skip instructions.
- Register unused_reg = { -1 };
-
- // Restore the register values containing object pointers from the
- // expression stack.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, Immediate(kDebugZapValue));
- }
- bool taken = reg.code() == esi.code();
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- taken = true;
- }
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(reg);
- __ SmiUntag(reg);
- taken = true;
- }
- if (!taken) {
- unused_reg = reg;
- }
- }
-
- ASSERT(unused_reg.code() != -1);
-
- // Read current padding counter and skip corresponding number of words.
- __ pop(unused_reg);
- // We divide stored value by 2 (untagging) and multiply it by word's size.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
-
- // Get rid of the internal frame.
- }
-
- // If this call did not replace a call but patched other code then there will
- // be an unwanted return address left on the stack. Here we get rid of that.
- if (convert_call_to_jmp) {
- __ add(esp, Immediate(kPointerSize));
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ jmp(Operand::StaticVariable(after_break_target));
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for IC load call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for IC store call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC call call (from ic-ia32.cc)
- // ----------- S t a t e -------------
- // -- ecx: name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax: return value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-ia32.cc).
- // ----------- S t a t e -------------
- // -- edi: function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-ia32.cc).
- // ----------- S t a t e -------------
- // -- ebx: cache cell for call target
- // -- edi: function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-ia32.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-ia32.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- ebx: cache cell for call target
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction.
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- __ Nop(Assembler::kDebugBreakSlotLength);
- ASSERT_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
- masm->isolate());
- __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
- // We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
- __ pop(edi); // Function.
- __ pop(ebp);
-
- // Load context from the function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Get function code.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-
- // Re-run JSFunction, edi is function, esi is context.
- __ jmp(edx);
-}
-
-const bool Debug::kFrameDropperSupported = true;
-
-#undef __
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc b/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
deleted file mode 100644
index e27ea4c..0000000
--- a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
+++ /dev/null
@@ -1,1184 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-const int Deoptimizer::table_entry_size_ = 10;
-
-
-int Deoptimizer::patch_size() {
- return Assembler::kCallInstructionLength;
-}
-
-
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- Isolate* isolate = code->GetIsolate();
- HandleScope scope(isolate);
-
- // Compute the size of relocation information needed for the code
- // patching in Deoptimizer::DeoptimizeFunction.
- int min_reloc_size = 0;
- int prev_pc_offset = 0;
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- int pc_offset = deopt_data->Pc(i)->value();
- if (pc_offset == -1) continue;
- ASSERT_GE(pc_offset, prev_pc_offset);
- int pc_delta = pc_offset - prev_pc_offset;
- // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
- // if encodable with small pc delta encoding and up to 6 bytes
- // otherwise.
- if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
- min_reloc_size += 2;
- } else {
- min_reloc_size += 6;
- }
- prev_pc_offset = pc_offset;
- }
-
- // If the relocation information is not big enough we create a new
- // relocation info object that is padded with comments to make it
- // big enough for lazy doptimization.
- int reloc_length = code->relocation_info()->length();
- if (min_reloc_size > reloc_length) {
- int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
- // Padding needed.
- int min_padding = min_reloc_size - reloc_length;
- // Number of comments needed to take up at least that much space.
- int additional_comments =
- (min_padding + comment_reloc_size - 1) / comment_reloc_size;
- // Actual padding size.
- int padding = additional_comments * comment_reloc_size;
- // Allocate new relocation info and copy old relocation to the end
- // of the new relocation info array because relocation info is
- // written and read backwards.
- Factory* factory = isolate->factory();
- Handle<ByteArray> new_reloc =
- factory->NewByteArray(reloc_length + padding, TENURED);
- memcpy(new_reloc->GetDataStartAddress() + padding,
- code->relocation_info()->GetDataStartAddress(),
- reloc_length);
- // Create a relocation writer to write the comments in the padding
- // space. Use position 0 for everything to ensure short encoding.
- RelocInfoWriter reloc_info_writer(
- new_reloc->GetDataStartAddress() + padding, 0);
- intptr_t comment_string
- = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
- for (int i = 0; i < additional_comments; ++i) {
-#ifdef DEBUG
- byte* pos_before = reloc_info_writer.pos();
-#endif
- reloc_info_writer.Write(&rinfo);
- ASSERT(RelocInfo::kMinRelocCommentSize ==
- pos_before - reloc_info_writer.pos());
- }
- // Replace relocation information on the code object.
- code->set_relocation_info(*new_reloc);
- }
-}
-
-
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
- Address code_start_address = code->instruction_start();
-
- // We will overwrite the code's relocation info in-place. Relocation info
- // is written backward. The relocation info is the payload of a byte
- // array. Later on we will slide this to the start of the byte array and
- // create a filler object in the remaining space.
- ByteArray* reloc_info = code->relocation_info();
- Address reloc_end_address = reloc_info->address() + reloc_info->Size();
- RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
-
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
-
- // Since the call is a relative encoding, write new
- // reloc info. We do not need any of the existing reloc info because the
- // existing code will not be used again (we zap it in debug builds).
- //
- // Emit call to lazy deoptimization at all lazy deopt points.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- // Patch lazy deoptimization entry.
- Address call_address = code_start_address + deopt_data->Pc(i)->value();
- CodePatcher patcher(call_address, patch_size());
- Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
- patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
- // We use RUNTIME_ENTRY for deoptimization bailouts.
- RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
- RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry),
- NULL);
- reloc_info_writer.Write(&rinfo);
- ASSERT_GE(reloc_info_writer.pos(),
- reloc_info->address() + ByteArray::kHeaderSize);
- ASSERT(prev_call_address == NULL ||
- call_address >= prev_call_address + patch_size());
- ASSERT(call_address + patch_size() <= code->instruction_end());
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-
- // Move the relocation info to the beginning of the byte array.
- int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
-
- // The relocation info is in place, update the size.
- reloc_info->set_length(new_reloc_size);
-
- // Handle the junk part after the new relocation info. We will create
- // a non-live object in the extra space at the end of the former reloc info.
- Address junk_address = reloc_info->address() + reloc_info->Size();
- ASSERT(junk_address <= reloc_end_address);
- isolate->heap()->CreateFillerObjectAt(junk_address,
- reloc_end_address - junk_address);
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
-}
-
-
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x13;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(check_code->entry(),
- Assembler::target_address_at(call_target_address));
- // The back edge bookkeeping code matches the pattern:
- //
- // sub <profiling_counter>, <delta>
- // jns ok
- // call <stack guard>
- // test eax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // sub <profiling_counter>, <delta> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test eax, <loop nesting depth>
- // ok:
-
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(replacement_code->entry(),
- Assembler::target_address_at(call_target_address));
-
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- Assembler::set_target_address_at(call_target_address,
- check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Next(); // Drop JS frames count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
- ast_id,
- input_frame_size,
- output_frame_size,
- input_->GetRegister(ebp.code()),
- input_->GetRegister(esp.code()));
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // All OSR stack frames are dynamically aligned to an 8-byte boundary.
- int frame_pointer = input_->GetRegister(ebp.code());
- if ((frame_pointer & kPointerSize) != 0) {
- frame_pointer -= kPointerSize;
- has_alignment_padding_ = 1;
- }
-
- int32_t alignment_state = (has_alignment_padding_ == 1) ?
- kAlignmentPaddingPushed :
- kNoAlignmentPadding;
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
- output_offset,
- alignment_state);
- }
- output_[0]->SetFrameSlot(output_offset, alignment_state);
- output_offset -= kPointerSize;
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), frame_pointer);
- output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
- //
- // FROM TO
- // | .... | | .... |
- // +-------------------------+ +-------------------------+
- // | JSFunction continuation | | JSFunction continuation |
- // +-------------------------+ +-------------------------+
- // | | saved frame (ebp) | | saved frame (ebp) |
- // | +=========================+<-ebp +=========================+<-ebp
- // | | JSFunction context | | JSFunction context |
- // v +-------------------------+ +-------------------------|
- // | COMPILED_STUB marker | | STUB_FAILURE marker |
- // +-------------------------+ +-------------------------+
- // | | | caller args.arguments_ |
- // | ... | +-------------------------+
- // | | | caller args.length_ |
- // |-------------------------|<-esp +-------------------------+
- // | caller args pointer |
- // +-------------------------+
- // | caller stack param 1 |
- // parameters in registers +-------------------------+
- // and spilled to stack | .... |
- // +-------------------------+
- // | caller stack param n |
- // +-------------------------+<-esp
- // eax = number of parameters
- // ebx = failure handler address
- // ebp = saved frame
- // esi = JSFunction context
- //
-
- ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
- int major_key = compiled_code_->major_key();
- CodeStubInterfaceDescriptor* descriptor =
- isolate_->code_stub_interface_descriptor(major_key);
-
- // The output frame must have room for all pushed register parameters
- // and the standard stack frame slots. Include space for an argument
- // object to the callee and optionally the space to pass the argument
- // object to the stub failure handler.
- int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
- sizeof(Arguments) + kPointerSize;
- int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
- int input_frame_size = input_->GetFrameSize();
- int output_frame_size = height_in_bytes + fixed_frame_size;
- if (trace_) {
- PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
- CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
- height_in_bytes);
- }
-
- // The stub failure trampoline is a single frame.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, NULL);
- output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ASSERT(frame_index == 0);
- output_[frame_index] = output_frame;
-
- // The top address for the output frame can be computed from the input
- // frame pointer and the output frame's height. Subtract space for the
- // context and function slots.
- intptr_t top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes;
- output_frame->SetTop(top_address);
-
- // Read caller's PC (JSFunction continuation) from the input frame.
- intptr_t input_frame_offset = input_frame_size - kPointerSize;
- intptr_t output_frame_offset = output_frame_size - kPointerSize;
- intptr_t value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Read caller's FP from the input frame, and set this frame's FP.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- intptr_t frame_ptr = input_->GetRegister(ebp.code());
- output_frame->SetRegister(ebp.code(), frame_ptr);
- output_frame->SetFp(frame_ptr);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // The context can be gotten from the input frame.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(esi.code(), value);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_frame_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (stub fail sentinel)\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- int caller_arg_count = 0;
- if (descriptor->stack_parameter_count_ != NULL) {
- caller_arg_count =
- input_->GetRegister(descriptor->stack_parameter_count_->code());
- }
-
- // Build the Arguments object for the caller's parameters and a pointer to it.
- output_frame_offset -= kPointerSize;
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (caller_arg_count - 1) * kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.arguments\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = caller_arg_count;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.length\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = frame_ptr - (output_frame_size - output_frame_offset) -
- StandardFrameConstants::kMarkerOffset + kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args*\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Copy the register parameters to the failure frame.
- for (int i = 0; i < descriptor->register_param_count_; ++i) {
- output_frame_offset -= kPointerSize;
- DoTranslateCommand(iterator, 0, output_frame_offset);
- }
-
- ASSERT(0 == output_frame_offset);
-
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-
- intptr_t handler =
- reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
- output_frame->SetRegister(eax.code(), params);
- output_frame->SetRegister(ebx.code(), handler);
-
- // Compute this frame's PC, state, and continuation.
- Code* trampoline = NULL;
- int extra = descriptor->extra_expression_stack_count_;
- StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
- ASSERT(trampoline != NULL);
- output_frame->SetPc(reinterpret_cast<intptr_t>(
- trampoline->instruction_start()));
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(notify_failure->entry()));
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 7 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
-
- unsigned alignment_state_offset =
- input_offset - parameter_count * kPointerSize -
- StandardFrameConstants::kFixedFrameSize -
- kPointerSize;
- ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
- JavaScriptFrameConstants::kLocal0Offset);
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
- has_alignment_padding_ =
- (alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
- // 2 = context and function in the frame.
- // If the optimized frame had alignment padding, adjust the frame pointer
- // to point to the new position of the old frame pointer after padding
- // is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost ||
- (input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
- fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
- ASSERT(!is_bottommost || !has_alignment_padding_ ||
- (fp_value & kPointerSize) != 0);
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<uint32_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(esi.code(), value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
-
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers ebp and esp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
-#define __ masm()->
-
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
-
- Isolate* isolate = masm()->isolate();
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
- __ sub(esp, Immediate(kDoubleRegsSize));
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
- }
- }
-
- __ pushad();
-
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
- kDoubleRegsSize;
-
- // Get the bailout id from the stack.
- __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible
- // and compute the fp-to-sp delta in register edx.
- if (type() == EAGER) {
- __ Set(ecx, Immediate(0));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
- __ sub(edx, ebp);
- __ neg(edx);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, eax);
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
- __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
- __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
- __ mov(Operand(esp, 5 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
-
- // Preserve deoptimizer object in register eax and get the input
- // frame descriptor pointer.
- __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(ebx, offset));
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- // Fill in the double input registers.
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
- }
- }
-
- // Clear FPU all exceptions.
- // TODO(ulan): Find out why the TOP register is not zero here in some cases,
- // and check that the generated code never deoptimizes with unbalanced stack.
- __ fnclex();
-
- // Remove the bailout id and the double registers from the stack.
- if (type() == EAGER) {
- __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
- } else {
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
- }
-
- // Compute a pointer to the unwinding limit in register ecx; that is
- // the first stack slot not part of the input frame.
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ add(ecx, esp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
- Label pop_loop_header;
- __ jmp(&pop_loop_header);
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(Operand(edx, 0));
- __ add(edx, Immediate(sizeof(uint32_t)));
- __ bind(&pop_loop_header);
- __ cmp(ecx, esp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(eax);
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
- }
- __ pop(eax);
-
- if (type() != OSR) {
- // If frame was dynamically aligned, pop padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ pop(ecx);
- if (FLAG_debug_code) {
- __ cmp(ecx, Immediate(kAlignmentZapValue));
- __ Assert(equal, "alignment marker expected");
- }
- __ bind(&no_padding);
- } else {
- // If frame needs dynamic alignment push padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ push(Immediate(kAlignmentZapValue));
- __ bind(&no_padding);
- }
-
- // Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
- // Outer loop state: eax = current FrameDescription**, edx = one past the
- // last FrameDescription**.
- __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
- __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
- __ lea(edx, Operand(eax, edx, times_4, 0));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
- __ mov(ebx, Operand(eax, 0));
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ sub(ecx, Immediate(sizeof(uint32_t)));
- __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
- __ bind(&inner_loop_header);
- __ test(ecx, ecx);
- __ j(not_zero, &inner_push_loop);
- __ add(eax, Immediate(kPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(eax, edx);
- __ j(below, &outer_push_loop);
-
- // In case of OSR or a failed STUB, we have to restore the XMM registers.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
- }
- }
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(ebx, FrameDescription::state_offset()));
- }
- __ push(Operand(ebx, FrameDescription::pc_offset()));
- __ push(Operand(ebx, FrameDescription::continuation_offset()));
-
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(ebx, offset));
- }
-
- // Restore the registers from the stack.
- __ popad();
-
- // Return to the continuation point.
- __ ret(0);
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ push_imm32(i);
- __ jmp(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/disasm-ia32.cc b/src/3rdparty/v8/src/ia32/disasm-ia32.cc
deleted file mode 100644
index 1193f2a..0000000
--- a/src/3rdparty/v8/src/ia32/disasm-ia32.cc
+++ /dev/null
@@ -1,1728 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-
-#include "v8.h"
-
-#undef CONST
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "disasm.h"
-
-namespace disasm {
-
-enum OperandOrder {
- UNSET_OP_ORDER = 0,
- REG_OPER_OP_ORDER,
- OPER_REG_OP_ORDER
-};
-
-
-//------------------------------------------------------------------
-// Tables
-//------------------------------------------------------------------
-struct ByteMnemonic {
- int b; // -1 terminates, otherwise must be in range (0..255)
- const char* mnem;
- OperandOrder op_order_;
-};
-
-
-static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER},
- {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER},
- {0x0B, "or", REG_OPER_OP_ORDER},
- {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER},
- {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER},
- {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER},
- {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
- {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER},
- {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const ByteMnemonic zero_operands_instr[] = {
- {0xC3, "ret", UNSET_OP_ORDER},
- {0xC9, "leave", UNSET_OP_ORDER},
- {0x90, "nop", UNSET_OP_ORDER},
- {0xF4, "hlt", UNSET_OP_ORDER},
- {0xCC, "int3", UNSET_OP_ORDER},
- {0x60, "pushad", UNSET_OP_ORDER},
- {0x61, "popad", UNSET_OP_ORDER},
- {0x9C, "pushfd", UNSET_OP_ORDER},
- {0x9D, "popfd", UNSET_OP_ORDER},
- {0x9E, "sahf", UNSET_OP_ORDER},
- {0x99, "cdq", UNSET_OP_ORDER},
- {0x9B, "fwait", UNSET_OP_ORDER},
- {0xFC, "cld", UNSET_OP_ORDER},
- {0xAB, "stos", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const ByteMnemonic call_jump_instr[] = {
- {0xE8, "call", UNSET_OP_ORDER},
- {0xE9, "jmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const ByteMnemonic short_immediate_instr[] = {
- {0x05, "add", UNSET_OP_ORDER},
- {0x0D, "or", UNSET_OP_ORDER},
- {0x15, "adc", UNSET_OP_ORDER},
- {0x25, "and", UNSET_OP_ORDER},
- {0x2D, "sub", UNSET_OP_ORDER},
- {0x35, "xor", UNSET_OP_ORDER},
- {0x3D, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-// Generally we don't want to generate these because they are subject to partial
-// register stalls. They are included for completeness and because the cmp
-// variant is used by the RecordWrite stub. Because it does not update the
-// register it is not subject to partial register stalls.
-static ByteMnemonic byte_immediate_instr[] = {
- {0x0c, "or", UNSET_OP_ORDER},
- {0x24, "and", UNSET_OP_ORDER},
- {0x34, "xor", UNSET_OP_ORDER},
- {0x3c, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const char* const jump_conditional_mnem[] = {
- /*0*/ "jo", "jno", "jc", "jnc",
- /*4*/ "jz", "jnz", "jna", "ja",
- /*8*/ "js", "jns", "jpe", "jpo",
- /*12*/ "jl", "jnl", "jng", "jg"
-};
-
-
-static const char* const set_conditional_mnem[] = {
- /*0*/ "seto", "setno", "setc", "setnc",
- /*4*/ "setz", "setnz", "setna", "seta",
- /*8*/ "sets", "setns", "setpe", "setpo",
- /*12*/ "setl", "setnl", "setng", "setg"
-};
-
-
-static const char* const conditional_move_mnem[] = {
- /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
- /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
- /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
- /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
-};
-
-
-enum InstructionType {
- NO_INSTR,
- ZERO_OPERANDS_INSTR,
- TWO_OPERANDS_INSTR,
- JUMP_CONDITIONAL_SHORT_INSTR,
- REGISTER_INSTR,
- MOVE_REG_INSTR,
- CALL_JUMP_INSTR,
- SHORT_IMMEDIATE_INSTR,
- BYTE_IMMEDIATE_INSTR
-};
-
-
-struct InstructionDesc {
- const char* mnem;
- InstructionType type;
- OperandOrder op_order_;
-};
-
-
-class InstructionTable {
- public:
- InstructionTable();
- const InstructionDesc& Get(byte x) const { return instructions_[x]; }
- static InstructionTable* get_instance() {
- static InstructionTable table;
- return &table;
- }
-
- private:
- InstructionDesc instructions_[256];
- void Clear();
- void Init();
- void CopyTable(const ByteMnemonic bm[], InstructionType type);
- void SetTableRange(InstructionType type,
- byte start,
- byte end,
- const char* mnem);
- void AddJumpConditionalShort();
-};
-
-
-InstructionTable::InstructionTable() {
- Clear();
- Init();
-}
-
-
-void InstructionTable::Clear() {
- for (int i = 0; i < 256; i++) {
- instructions_[i].mnem = "";
- instructions_[i].type = NO_INSTR;
- instructions_[i].op_order_ = UNSET_OP_ORDER;
- }
-}
-
-
-void InstructionTable::Init() {
- CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
- CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
- CopyTable(call_jump_instr, CALL_JUMP_INSTR);
- CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
- CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
- AddJumpConditionalShort();
- SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
- SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
- SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
- SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
- SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop.
- SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
-}
-
-
-void InstructionTable::CopyTable(const ByteMnemonic bm[],
- InstructionType type) {
- for (int i = 0; bm[i].b >= 0; i++) {
- InstructionDesc* id = &instructions_[bm[i].b];
- id->mnem = bm[i].mnem;
- id->op_order_ = bm[i].op_order_;
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
- id->type = type;
- }
-}
-
-
-void InstructionTable::SetTableRange(InstructionType type,
- byte start,
- byte end,
- const char* mnem) {
- for (byte b = start; b <= end; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
- id->mnem = mnem;
- id->type = type;
- }
-}
-
-
-void InstructionTable::AddJumpConditionalShort() {
- for (byte b = 0x70; b <= 0x7F; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
- id->mnem = jump_conditional_mnem[b & 0x0F];
- id->type = JUMP_CONDITIONAL_SHORT_INSTR;
- }
-}
-
-
-// The IA32 disassembler implementation.
-class DisassemblerIA32 {
- public:
- DisassemblerIA32(const NameConverter& converter,
- bool abort_on_unimplemented = true)
- : converter_(converter),
- instruction_table_(InstructionTable::get_instance()),
- tmp_buffer_pos_(0),
- abort_on_unimplemented_(abort_on_unimplemented) {
- tmp_buffer_[0] = '\0';
- }
-
- virtual ~DisassemblerIA32() {}
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
-
- private:
- const NameConverter& converter_;
- InstructionTable* instruction_table_;
- v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
- unsigned int tmp_buffer_pos_;
- bool abort_on_unimplemented_;
-
- enum {
- eax = 0,
- ecx = 1,
- edx = 2,
- ebx = 3,
- esp = 4,
- ebp = 5,
- esi = 6,
- edi = 7
- };
-
-
- enum ShiftOpcodeExtension {
- kROL = 0,
- kROR = 1,
- kRCL = 2,
- kRCR = 3,
- kSHL = 4,
- KSHR = 5,
- kSAR = 7
- };
-
-
- const char* NameOfCPURegister(int reg) const {
- return converter_.NameOfCPURegister(reg);
- }
-
-
- const char* NameOfByteCPURegister(int reg) const {
- return converter_.NameOfByteCPURegister(reg);
- }
-
-
- const char* NameOfXMMRegister(int reg) const {
- return converter_.NameOfXMMRegister(reg);
- }
-
-
- const char* NameOfAddress(byte* addr) const {
- return converter_.NameOfAddress(addr);
- }
-
-
- // Disassembler helper functions.
- static void get_modrm(byte data, int* mod, int* regop, int* rm) {
- *mod = (data >> 6) & 3;
- *regop = (data & 0x38) >> 3;
- *rm = data & 7;
- }
-
-
- static void get_sib(byte data, int* scale, int* index, int* base) {
- *scale = (data >> 6) & 3;
- *index = (data >> 3) & 7;
- *base = data & 7;
- }
-
- typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
-
- int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
- int PrintRightOperand(byte* modrmp);
- int PrintRightByteOperand(byte* modrmp);
- int PrintRightXMMOperand(byte* modrmp);
- int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
- int PrintImmediateOp(byte* data);
- int F7Instruction(byte* data);
- int D1D3C1Instruction(byte* data);
- int JumpShort(byte* data);
- int JumpConditional(byte* data, const char* comment);
- int JumpConditionalShort(byte* data, const char* comment);
- int SetCC(byte* data);
- int CMov(byte* data);
- int FPUInstruction(byte* data);
- int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
- int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
- void AppendToBuffer(const char* format, ...);
-
-
- void UnimplementedInstruction() {
- if (abort_on_unimplemented_) {
- UNIMPLEMENTED();
- } else {
- AppendToBuffer("'Unimplemented Instruction'");
- }
- }
-};
-
-
-void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
- v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
- va_list args;
- va_start(args, format);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
- va_end(args);
- tmp_buffer_pos_ += result;
-}
-
-int DisassemblerIA32::PrintRightOperandHelper(
- byte* modrmp,
- RegisterNameMapping direct_register_name) {
- int mod, regop, rm;
- get_modrm(*modrmp, &mod, &regop, &rm);
- RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
- &DisassemblerIA32::NameOfCPURegister;
- switch (mod) {
- case 0:
- if (rm == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
- AppendToBuffer("[0x%x]", disp);
- return 5;
- } else if (rm == esp) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- if (index == esp && base == esp && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 2;
- } else if (base == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
- (this->*register_name)(index),
- 1 << scale,
- disp);
- return 6;
- } else if (index != esp && base != ebp) {
- // [base+index*scale]
- AppendToBuffer("[%s+%s*%d]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale);
- return 2;
- } else {
- UnimplementedInstruction();
- return 1;
- }
- } else {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 1;
- }
- break;
- case 1: // fall through
- case 2:
- if (rm == esp) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
- if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale,
- disp);
- }
- return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
- return mod == 2 ? 5 : 2;
- }
- break;
- case 3:
- AppendToBuffer("%s", (this->*register_name)(rm));
- return 1;
- default:
- UnimplementedInstruction();
- return 1;
- }
- UNREACHABLE();
-}
-
-
-int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
-}
-
-
-int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerIA32::NameOfByteCPURegister);
-}
-
-
-int DisassemblerIA32::PrintRightXMMOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerIA32::NameOfXMMRegister);
-}
-
-
-// Returns number of bytes used including the current *data.
-// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
-int DisassemblerIA32::PrintOperands(const char* mnem,
- OperandOrder op_order,
- byte* data) {
- byte modrm = *data;
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int advance = 0;
- switch (op_order) {
- case REG_OPER_OP_ORDER: {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- advance = PrintRightOperand(data);
- break;
- }
- case OPER_REG_OP_ORDER: {
- AppendToBuffer("%s ", mnem);
- advance = PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return advance;
-}
-
-
-// Returns number of bytes used by machine instruction, including *data byte.
-// Writes immediate instructions to 'tmp_buffer_'.
-int DisassemblerIA32::PrintImmediateOp(byte* data) {
- bool sign_extension_bit = (*data & 0x02) != 0;
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- const char* mnem = "Imm???";
- switch (regop) {
- case 0: mnem = "add"; break;
- case 1: mnem = "or"; break;
- case 2: mnem = "adc"; break;
- case 4: mnem = "and"; break;
- case 5: mnem = "sub"; break;
- case 6: mnem = "xor"; break;
- case 7: mnem = "cmp"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data+1);
- if (sign_extension_bit) {
- AppendToBuffer(",0x%x", *(data + 1 + count));
- return 1 + count + 1 /*int8*/;
- } else {
- AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
- return 1 + count + 4 /*int32_t*/;
- }
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::F7Instruction(byte* data) {
- ASSERT_EQ(0xF7, *data);
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- if (mod == 3 && regop != 0) {
- const char* mnem = NULL;
- switch (regop) {
- case 2: mnem = "not"; break;
- case 3: mnem = "neg"; break;
- case 4: mnem = "mul"; break;
- case 5: mnem = "imul"; break;
- case 7: mnem = "idiv"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
- return 2;
- } else if (mod == 3 && regop == eax) {
- int32_t imm = *reinterpret_cast<int32_t*>(data+2);
- AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
- return 6;
- } else if (regop == eax) {
- AppendToBuffer("test ");
- int count = PrintRightOperand(data+1);
- int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
- AppendToBuffer(",0x%x", imm);
- return 1+count+4 /*int32_t*/;
- } else {
- UnimplementedInstruction();
- return 2;
- }
-}
-
-int DisassemblerIA32::D1D3C1Instruction(byte* data) {
- byte op = *data;
- ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int imm8 = -1;
- int num_bytes = 2;
- if (mod == 3) {
- const char* mnem = NULL;
- switch (regop) {
- case kROL: mnem = "rol"; break;
- case kROR: mnem = "ror"; break;
- case kRCL: mnem = "rcl"; break;
- case kRCR: mnem = "rcr"; break;
- case kSHL: mnem = "shl"; break;
- case KSHR: mnem = "shr"; break;
- case kSAR: mnem = "sar"; break;
- default: UnimplementedInstruction();
- }
- if (op == 0xD1) {
- imm8 = 1;
- } else if (op == 0xC1) {
- imm8 = *(data+2);
- num_bytes = 3;
- } else if (op == 0xD3) {
- // Shift/rotate by cl.
- }
- ASSERT_NE(NULL, mnem);
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
- if (imm8 > 0) {
- AppendToBuffer("%d", imm8);
- } else {
- AppendToBuffer("cl");
- }
- } else {
- UnimplementedInstruction();
- }
- return num_bytes;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::JumpShort(byte* data) {
- ASSERT_EQ(0xEB, *data);
- byte b = *(data+1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- AppendToBuffer("jmp %s", NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
- const char* mnem = jump_conditional_mnem[cond];
- AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
- AppendToBuffer(", %s", comment);
- }
- return 6; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
- byte cond = *data & 0x0F;
- byte b = *(data+1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- const char* mnem = jump_conditional_mnem[cond];
- AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
- AppendToBuffer(", %s", comment);
- }
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::SetCC(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
- const char* mnem = set_conditional_mnem[cond];
- AppendToBuffer("%s ", mnem);
- PrintRightByteOperand(data+2);
- return 3; // Includes 0x0F.
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::CMov(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- const char* mnem = conditional_move_mnem[cond];
- int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
- return 2 + op_size; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::FPUInstruction(byte* data) {
- byte escape_opcode = *data;
- ASSERT_EQ(0xD8, escape_opcode & 0xF8);
- byte modrm_byte = *(data+1);
-
- if (modrm_byte >= 0xC0) {
- return RegisterFPUInstruction(escape_opcode, modrm_byte);
- } else {
- return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
- }
-}
-
-int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
- int modrm_byte,
- byte* modrm_start) {
- const char* mnem = "?";
- int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
- switch (escape_opcode) {
- case 0xD9: switch (regop) {
- case 0: mnem = "fld_s"; break;
- case 3: mnem = "fstp_s"; break;
- case 7: mnem = "fstcw"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDB: switch (regop) {
- case 0: mnem = "fild_s"; break;
- case 1: mnem = "fisttp_s"; break;
- case 2: mnem = "fist_s"; break;
- case 3: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD: switch (regop) {
- case 0: mnem = "fld_d"; break;
- case 1: mnem = "fisttp_d"; break;
- case 2: mnem = "fst_d"; break;
- case 3: mnem = "fstp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDF: switch (regop) {
- case 5: mnem = "fild_d"; break;
- case 7: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(modrm_start);
- return count + 1;
-}
-
-int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
- byte modrm_byte) {
- bool has_register = false; // Is the FPU register encoded in modrm_byte?
- const char* mnem = "?";
-
- switch (escape_opcode) {
- case 0xD8:
- UnimplementedInstruction();
- break;
-
- case 0xD9:
- switch (modrm_byte & 0xF8) {
- case 0xC0:
- mnem = "fld";
- has_register = true;
- break;
- case 0xC8:
- mnem = "fxch";
- has_register = true;
- break;
- default:
- switch (modrm_byte) {
- case 0xE0: mnem = "fchs"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE4: mnem = "ftst"; break;
- case 0xE8: mnem = "fld1"; break;
- case 0xEB: mnem = "fldpi"; break;
- case 0xED: mnem = "fldln2"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xF0: mnem = "f2xm1"; break;
- case 0xF1: mnem = "fyl2x"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xFC: mnem = "frndint"; break;
- case 0xFD: mnem = "fscale"; break;
- case 0xFE: mnem = "fsin"; break;
- case 0xFF: mnem = "fcos"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDA:
- if (modrm_byte == 0xE9) {
- mnem = "fucompp";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDB:
- if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomi";
- has_register = true;
- } else if (modrm_byte == 0xE2) {
- mnem = "fclex";
- } else if (modrm_byte == 0xE3) {
- mnem = "fninit";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDC:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "ffree"; break;
- case 0xD8: mnem = "fstp"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDE:
- if (modrm_byte == 0xD9) {
- mnem = "fcompp";
- } else {
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "faddp"; break;
- case 0xE8: mnem = "fsubp"; break;
- case 0xC8: mnem = "fmulp"; break;
- case 0xF8: mnem = "fdivp"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDF:
- if (modrm_byte == 0xE0) {
- mnem = "fnstsw_ax";
- } else if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomip";
- has_register = true;
- }
- break;
-
- default: UnimplementedInstruction();
- }
-
- if (has_register) {
- AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
- } else {
- AppendToBuffer("%s", mnem);
- }
- return 2;
-}
-
-
-// Mnemonics for instructions 0xF0 byte.
-// Returns NULL if the instruction is not handled here.
-static const char* F0Mnem(byte f0byte) {
- switch (f0byte) {
- case 0x18: return "prefetch";
- case 0xA2: return "cpuid";
- case 0x31: return "rdtsc";
- case 0xBE: return "movsx_b";
- case 0xBF: return "movsx_w";
- case 0xB6: return "movzx_b";
- case 0xB7: return "movzx_w";
- case 0xAF: return "imul";
- case 0xA5: return "shld";
- case 0xAD: return "shrd";
- case 0xAC: return "shrd"; // 3-operand version.
- case 0xAB: return "bts";
- default: return NULL;
- }
-}
-
-
-// Disassembled instruction '*instr' and writes it into 'out_buffer'.
-int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
- byte* instr) {
- tmp_buffer_pos_ = 0; // starting to write as position 0
- byte* data = instr;
- // Check for hints.
- const char* branch_hint = NULL;
- // We use these two prefixes only with branch prediction
- if (*data == 0x3E /*ds*/) {
- branch_hint = "predicted taken";
- data++;
- } else if (*data == 0x2E /*cs*/) {
- branch_hint = "predicted not taken";
- data++;
- }
- bool processed = true; // Will be set to false if the current instruction
- // is not in 'instructions' table.
- const InstructionDesc& idesc = instruction_table_->Get(*data);
- switch (idesc.type) {
- case ZERO_OPERANDS_INSTR:
- AppendToBuffer(idesc.mnem);
- data++;
- break;
-
- case TWO_OPERANDS_INSTR:
- data++;
- data += PrintOperands(idesc.mnem, idesc.op_order_, data);
- break;
-
- case JUMP_CONDITIONAL_SHORT_INSTR:
- data += JumpConditionalShort(data, branch_hint);
- break;
-
- case REGISTER_INSTR:
- AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
- data++;
- break;
-
- case MOVE_REG_INSTR: {
- byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("mov %s,%s",
- NameOfCPURegister(*data & 0x07),
- NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
- AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case SHORT_IMMEDIATE_INSTR: {
- byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case BYTE_IMMEDIATE_INSTR: {
- AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
- data += 2;
- break;
- }
-
- case NO_INSTR:
- processed = false;
- break;
-
- default:
- UNIMPLEMENTED(); // This type is not implemented.
- }
- //----------------------------
- if (!processed) {
- switch (*data) {
- case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
- data += 3;
- break;
-
- case 0x69: // fall through
- case 0x6B:
- { int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- int32_t imm =
- *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
- AppendToBuffer("imul %s,%s,0x%x",
- NameOfCPURegister(regop),
- NameOfCPURegister(rm),
- imm);
- data += 2 + (*data == 0x6B ? 1 : 4);
- }
- break;
-
- case 0xF6:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("test_b ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x81: // fall through
- case 0x83: // 0x81 with sign extension bit set
- data += PrintImmediateOp(data);
- break;
-
- case 0x0F:
- { byte f0byte = data[1];
- const char* f0mnem = F0Mnem(f0byte);
- if (f0byte == 0x18) {
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* suffix[] = {"nta", "1", "2", "3"};
- AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
- data += PrintRightOperand(data);
- } else if (f0byte == 0x1F && data[2] == 0) {
- AppendToBuffer("nop"); // 3 byte nop.
- data += 3;
- } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
- AppendToBuffer("nop"); // 4 byte nop.
- data += 4;
- } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
- data[4] == 0) {
- AppendToBuffer("nop"); // 5 byte nop.
- data += 5;
- } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
- data[4] == 0 && data[5] == 0 && data[6] == 0) {
- AppendToBuffer("nop"); // 7 byte nop.
- data += 7;
- } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
- data[4] == 0 && data[5] == 0 && data[6] == 0 &&
- data[7] == 0) {
- AppendToBuffer("nop"); // 8 byte nop.
- data += 8;
- } else if (f0byte == 0xA2 || f0byte == 0x31) {
- AppendToBuffer("%s", f0mnem);
- data += 2;
- } else if (f0byte == 0x28) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movaps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (f0byte == 0x57) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (f0byte == 0x50) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s,%s",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if ((f0byte & 0xF0) == 0x80) {
- data += JumpConditional(data, branch_hint);
- } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
- f0byte == 0xB7 || f0byte == 0xAF) {
- data += 2;
- data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
- } else if ((f0byte & 0xF0) == 0x90) {
- data += SetCC(data);
- } else if ((f0byte & 0xF0) == 0x40) {
- data += CMov(data);
- } else {
- data += 2;
- if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd, shld, bts
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- if (f0byte == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
- } else {
- UnimplementedInstruction();
- }
- }
- }
- break;
-
- case 0x8F:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("pop ");
- data += PrintRightOperand(data);
- }
- }
- break;
-
- case 0xFF:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case esi: mnem = "push"; break;
- case eax: mnem = "inc"; break;
- case ecx: mnem = "dec"; break;
- case edx: mnem = "call"; break;
- case esp: mnem = "jmp"; break;
- default: mnem = "???";
- }
- AppendToBuffer("%s ", mnem);
- data += PrintRightOperand(data);
- }
- break;
-
- case 0xC7: // imm32, fall through
- case 0xC6: // imm8
- { bool is_byte = *data == 0xC6;
- data++;
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
- }
- }
- break;
-
- case 0x80:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case 5: mnem = "subb"; break;
- case 7: mnem = "cmpb"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- }
- break;
-
- case 0x88: // 8bit, fall through
- case 0x89: // 32bit
- { bool is_byte = *data == 0x88;
- int mod, regop, rm;
- data++;
- get_modrm(*data, &mod, &regop, &rm);
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- }
- }
- break;
-
- case 0x66: // prefix
- while (*data == 0x66) data++;
- if (*data == 0xf && data[1] == 0x1f) {
- AppendToBuffer("nop"); // 0x66 prefix
- } else if (*data == 0x90) {
- AppendToBuffer("nop"); // 0x66 prefix
- } else if (*data == 0x8B) {
- data++;
- data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
- } else if (*data == 0x89) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("mov_w ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else if (*data == 0x0F) {
- data++;
- if (*data == 0x38) {
- data++;
- if (*data == 0x17) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("ptest %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x2A) {
- // movntdqa
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0x3A) {
- data++;
- if (*data == 0x0B) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundsd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x16) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pextrd %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x17) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("extractps %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x22) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pinsrd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfCPURegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0x2E || *data == 0x2F) {
- const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (mod == 0x3) {
- AppendToBuffer("%s %s,%s", mnem,
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- }
- } else if (*data == 0x50) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskpd %s,%s",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x54) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("andpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x56) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("orpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x57) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x6E) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else if (*data == 0x6F) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (*data == 0x70) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pshufd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x76) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pcmpeqd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x90) {
- data++;
- AppendToBuffer("nop"); // 2 byte nop.
- } else if (*data == 0xF3) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psllq %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x73) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- ASSERT(regop == esi || regop == edx);
- AppendToBuffer("%s %s,%d",
- (regop == esi) ? "psllq" : "psrlq",
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0xD3) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psrlq %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x7F) {
- AppendToBuffer("movdqa ");
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (*data == 0x7E) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movd ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (*data == 0xDB) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pand %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xE7) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (mod == 3) {
- AppendToBuffer("movntdq ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0xEF) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pxor %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xEB) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("por %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else {
- UnimplementedInstruction();
- }
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xFE:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == ecx) {
- AppendToBuffer("dec_b ");
- data += PrintRightOperand(data);
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
- data += 5;
- break;
-
- case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
- data += 2;
- break;
-
- case 0xA9:
- AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
- data += 5;
- break;
-
- case 0xD1: // fall through
- case 0xD3: // fall through
- case 0xC1:
- data += D1D3C1Instruction(data);
- break;
-
- case 0xD9: // fall through
- case 0xDA: // fall through
- case 0xDB: // fall through
- case 0xDC: // fall through
- case 0xDD: // fall through
- case 0xDE: // fall through
- case 0xDF:
- data += FPUInstruction(data);
- break;
-
- case 0xEB:
- data += JumpShort(data);
- break;
-
- case 0xF2:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
- if (b2 == 0x11) {
- AppendToBuffer("movsd ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (b2 == 0x10) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x5A) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvtsd2ss %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else {
- const char* mnem = "?";
- switch (b2) {
- case 0x2A: mnem = "cvtsi2sd"; break;
- case 0x2C: mnem = "cvttsd2si"; break;
- case 0x2D: mnem = "cvtsd2si"; break;
- case 0x51: mnem = "sqrtsd"; break;
- case 0x58: mnem = "addsd"; break;
- case 0x59: mnem = "mulsd"; break;
- case 0x5C: mnem = "subsd"; break;
- case 0x5E: mnem = "divsd"; break;
- }
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (b2 == 0x2A) {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else if (b2 == 0x2C || b2 == 0x2D) {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0xC2) {
- // Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqsd",
- "cmpltsd",
- "cmplesd",
- "cmpunordsd",
- "cmpneqsd",
- "cmpnltsd",
- "cmpnlesd",
- "cmpordsd"
- };
- AppendToBuffer("%s %s,%s",
- pseudo_op[data[1]],
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data += 2;
- } else {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- }
- }
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xF3:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
- if (b2 == 0x11) {
- AppendToBuffer("movss ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (b2 == 0x10) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movss %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x2C) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x5A) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x6F) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x7F) {
- AppendToBuffer("movdqu ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- UnimplementedInstruction();
- }
- } else if (*(data+1) == 0xA5) {
- data += 2;
- AppendToBuffer("rep_movs");
- } else if (*(data+1) == 0xAB) {
- data += 2;
- AppendToBuffer("rep_stos");
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xF7:
- data += F7Instruction(data);
- break;
-
- default:
- UnimplementedInstruction();
- }
- }
-
- if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
- tmp_buffer_[tmp_buffer_pos_] = '\0';
- }
-
- int instr_len = data - instr;
- if (instr_len == 0) {
- printf("%02x", *data);
- }
- ASSERT(instr_len > 0); // Ensure progress.
-
- int outp = 0;
- // Instruction bytes.
- for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- "%02x",
- *bp);
- }
- for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- " ");
- }
-
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- " %s",
- tmp_buffer_.start());
- return instr_len;
-} // NOLINT (function is too long)
-
-
-//------------------------------------------------------------------------------
-
-
-static const char* cpu_regs[8] = {
- "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
-};
-
-
-static const char* byte_cpu_regs[8] = {
- "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
-};
-
-
-static const char* xmm_regs[8] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
-};
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- if (0 <= reg && reg < 8) return cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- if (0 <= reg && reg < 8) return xmm_regs[reg];
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // IA32 does not embed debug strings at the moment.
- UNREACHABLE();
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- DisassemblerIA32 d(converter_, false /*do not crash if unimplemented*/);
- return d.InstructionDecode(buffer, instruction);
-}
-
-
-// The IA-32 assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
-
-
-/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", prev_pc);
- fprintf(f, " ");
-
- for (byte* bp = prev_pc; bp < pc; bp++) {
- fprintf(f, "%02x", *bp);
- }
- for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
- fprintf(f, " ");
- }
- fprintf(f, " %s\n", buffer.start());
- }
-}
-
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/frames-ia32.cc b/src/3rdparty/v8/src/ia32/frames-ia32.cc
deleted file mode 100644
index dd44f0e..0000000
--- a/src/3rdparty/v8/src/ia32/frames-ia32.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/frames-ia32.h b/src/3rdparty/v8/src/ia32/frames-ia32.h
deleted file mode 100644
index 5bd102a..0000000
--- a/src/3rdparty/v8/src/ia32/frames-ia32.h
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_FRAMES_IA32_H_
-#define V8_IA32_FRAMES_IA32_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Register lists
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 8;
-
-
-// Caller-saved registers
-const RegList kJSCallerSaved =
- 1 << 0 | // eax
- 1 << 1 | // ecx
- 1 << 2 | // edx
- 1 << 3 | // ebx - used as a caller-saved register in JavaScript code
- 1 << 7; // edi - callee function
-
-const int kNumJSCallerSaved = 5;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-
-// Number of registers for which space is reserved in safepoints.
-const int kNumSafepointRegisters = 8;
-
-const int kNoAlignmentPadding = 0;
-const int kAlignmentPaddingPushed = 2;
-const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
-
-// ----------------------------------------------------
-
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset = -6 * kPointerSize;
-
- static const int kFunctionArgOffset = +3 * kPointerSize;
- static const int kReceiverArgOffset = +4 * kPointerSize;
- static const int kArgcOffset = +5 * kPointerSize;
- static const int kArgvOffset = +6 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-
- static const int kDynamicAlignmentStateOffset = kLocal0Offset;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_FRAMES_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
deleted file mode 100644
index 733d977..0000000
--- a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
+++ /dev/null
@@ -1,4595 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- void EmitJumpIfNotSmi(Register reg,
- Label* target,
- Label::Distance distance = Label::kFar) {
- __ test(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target, distance); // Always taken before patched.
- }
-
- void EmitJumpIfSmi(Register reg,
- Label* target,
- Label::Distance distance = Label::kFar) {
- __ test(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target, distance); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ test(eax, Immediate(delta_to_patch_site));
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- } else {
- __ nop(); // Signals no inlined code.
- }
- }
-
- private:
- // jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, Label* target, Label::Distance distance) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- ASSERT(cc == carry || cc == not_carry);
- __ bind(&patch_site_);
- __ j(cc, target, distance);
- }
-
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them. The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-// o edi: the JS function object being called (i.e. ourselves)
-// o esi: our context
-// o ebp: our caller's frame pointer
-// o esp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-ia32.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
- __ test(ecx, ecx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
- __ mov(ecx, Operand(esp, receiver_offset));
- __ JumpIfSmi(ecx, &ok);
- __ CmpObjectType(ecx, JS_GLOBAL_PROXY_TYPE, ecx);
- __ j(not_equal, &ok, Label::kNear);
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
- __ bind(&ok);
- }
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- info->set_prologue_offset(masm_->pc_offset());
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = info->scope()->num_stack_slots();
- if (locals_count == 1) {
- __ push(Immediate(isolate()->factory()->undefined_value()));
- } else if (locals_count > 1) {
- __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
- for (int i = 0; i < locals_count; i++) {
- __ push(eax);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment cmnt(masm_, "[ Allocate context");
- // Argument to NewContext is the function, which is still in edi.
- __ push(edi);
- if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register = false;
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-
- // Copy parameters into context if necessary.
- int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers eax and ebx.
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx,
- kDontSaveFPRegs);
- }
- }
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(edi);
- } else {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ lea(edx,
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(num_parameters)));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
- }
- ArgumentsAccessStub stub(type);
- __ CallStub(&stub);
-
- SetVar(arguments, eax, ebx, edx);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ mov(eax, isolate()->factory()->undefined_value());
- EmitReturnSequence();
- }
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Set(eax, Immediate(Smi::FromInt(0)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ mov(ebx, Immediate(profiling_counter_));
- __ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(Smi::FromInt(delta)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- __ mov(ebx, Immediate(profiling_counter_));
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(Smi::FromInt(reset_value)));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- Label ok;
-
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
-
- EmitProfilingCounterReset();
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- } else {
- // Common return label
- __ bind(&return_label_);
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(eax);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- InterruptStub stub;
- __ CallStub(&stub);
- }
- __ pop(eax);
- EmitProfilingCounterReset();
- __ bind(&ok);
- }
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- SetSourcePosition(function()->end_position() - 1);
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ mov(esp, ebp);
- __ pop(ebp);
-
- int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, ecx);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand operand = codegen()->VarOperand(var, result_register());
- // Memory operands can be pushed directly.
- __ push(operand);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- if (lit->IsSmi()) {
- __ SafeSet(result_register(), Immediate(lit));
- } else {
- __ Set(result_register(), Immediate(lit));
- }
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- if (lit->IsSmi()) {
- __ SafePush(Immediate(lit));
- } else {
- __ push(Immediate(lit));
- }
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ mov(result_register(), lit);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ mov(Operand(esp, 0), reg);
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ mov(result_register(), isolate()->factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ mov(result_register(), isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ push(Immediate(isolate()->factory()->true_value()));
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ push(Immediate(isolate()->factory()->false_value()));
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
- __ mov(result_register(), value);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
- __ push(Immediate(value));
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ push(result_register());
- __ CallStub(&stub, condition->test_id());
- __ test(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ j(cc, if_true);
- } else if (if_true == fall_through) {
- __ j(NegateCondition(cc), if_false);
- } else {
- __ j(cc, if_true);
- __ jmp(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- ASSERT(var->IsStackAllocated());
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return Operand(ebp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- MemOperand location = VarOperand(var, dest);
- __ mov(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!scratch0.is(src));
- ASSERT(!scratch0.is(scratch1));
- ASSERT(!scratch1.is(src));
- MemOperand location = VarOperand(var, scratch0);
- __ mov(location, src);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- int offset = Context::SlotOffset(var->index());
- ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
- __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- Label skip;
- if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
- __ cmp(ebx, isolate()->factory()->with_context_map());
- __ Check(not_equal, "Declaration in with context.");
- __ cmp(ebx, isolate()->factory()->catch_context_map());
- __ Check(not_equal, "Declaration in catch context.");
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(), zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ mov(StackOperand(variable),
- Immediate(isolate()->factory()->the_hole_value()));
- }
- break;
-
- case Variable::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ mov(ContextOperand(esi, variable->index()),
- Immediate(isolate()->factory()->the_hole_value()));
- // No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(esi);
- __ push(Immediate(variable->name()));
- // VariableDeclaration nodes are always introduced in one of four modes.
- ASSERT(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ push(Immediate(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ push(Immediate(isolate()->factory()->the_hole_value()));
- } else {
- __ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ mov(StackOperand(variable), result_register());
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ mov(ContextOperand(esi, variable->index()), result_register());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(esi,
- Context::SlotOffset(variable->index()),
- result_register(),
- ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(esi);
- __ push(Immediate(variable->name()));
- __ push(Immediate(Smi::FromInt(NONE)));
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
-
- // Load instance object.
- __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
- __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
-
- // Assign it.
- __ mov(ContextOperand(esi, variable->index()), eax);
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(esi,
- Context::SlotOffset(variable->index()),
- eax,
- ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
-
- // Traverse into body.
- Visit(declaration->module());
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ push(esi); // The context is the first argument.
- __ Push(pairs);
- __ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ mov(edx, Operand(esp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
-
- __ cmp(edx, eax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
- patch_site.EmitPatchInfo();
- __ test(eax, eax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ jmp(nested_statement.break_label());
- } else {
- __ jmp(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, &exit);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &done_convert, Label::kNear);
- __ bind(&convert);
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- __ push(eax);
-
- // Check for proxies.
- Label call_runtime, use_cache, fixed_array;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- __ j(below_equal, &call_runtime);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(&call_runtime);
-
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->meta_map());
- __ j(not_equal, &fixed_array);
-
-
- // We got a map in register eax. Get the enumeration cache from it.
- Label no_descriptors;
- __ bind(&use_cache);
-
- __ EnumLength(edx, eax);
- __ cmp(edx, Immediate(Smi::FromInt(0)));
- __ j(equal, &no_descriptors);
-
- __ LoadInstanceDescriptors(eax, ecx);
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ push(eax); // Map.
- __ push(ecx); // Enumeration cache.
- __ push(edx); // Number of valid entries for the map in the enum cache.
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
- __ jmp(&loop);
-
- __ bind(&no_descriptors);
- __ add(esp, Immediate(kPointerSize));
- __ jmp(&exit);
-
- // We got a fixed array in register eax. Iterate through that.
- Label non_proxy;
- __ bind(&fixed_array);
-
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(ebx, cell);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
-
- __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
- __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
- __ j(above, &non_proxy);
- __ mov(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ push(ebx); // Smi
- __ push(eax); // Array
- __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ push(eax); // Fixed array length (as smi).
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&loop);
- __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
- __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
- __ j(above_equal, loop_statement.break_label());
-
- // Get the current entry of the array into register ebx.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register edx.
- __ mov(edx, Operand(esp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ mov(ecx, Operand(esp, 4 * kPointerSize));
- __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ j(equal, &update_each, Label::kNear);
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- ASSERT(Smi::FromInt(0) == 0);
- __ test(edx, edx);
- __ j(zero, &update_each);
-
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
- // just skip it.
- __ push(ecx); // Enumerable.
- __ push(ebx); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ test(eax, eax);
- __ j(equal, loop_statement.continue_label());
- __ mov(ebx, eax);
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register ebx.
- __ bind(&update_each);
- __ mov(result_register(), ebx);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for going to the next element by incrementing the
- // index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_label());
- __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ jmp(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_label());
- __ add(esp, Immediate(5 * kPointerSize));
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ push(Immediate(info));
- __ CallStub(&stub);
- } else {
- __ push(esi);
- __ push(Immediate(info));
- __ push(Immediate(pretenure
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value()));
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
- TypeofState typeof_state,
- Label* slow) {
- Register context = esi;
- Register temp = edx;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- // Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(temp)) {
- __ mov(temp, context);
- }
- __ bind(&next);
- // Terminate at native context.
- __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->native_context_map()));
- __ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
- // Load next context in chain.
- __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
- __ jmp(&next);
- __ bind(&fast);
- }
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- __ mov(edx, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(ecx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- CallIC(ic, mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- ASSERT(var->IsContextSlot());
- Register context = esi;
- Register temp = ebx;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- }
- }
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an esi-based operand (the write barrier cannot be allowed to
- // destroy the esi register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, done);
- if (local->mode() == CONST) {
- __ mov(eax, isolate()->factory()->undefined_value());
- } else { // LET || CONST_HARMONY
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ jmp(done);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in ecx and the global
- // object in eax.
- __ mov(edx, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(ecx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(eax);
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- ASSERT(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- Label done;
- GetVar(eax, var);
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ mov(eax, isolate()->factory()->undefined_value());
- }
- __ bind(&done);
- context()->Plug(eax);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case Variable::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
- context()->Plug(eax);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // edi = JS function.
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, isolate()->factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->pattern()));
- __ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ push(Immediate(isolate()->factory()->null_value()));
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- } else {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
- __ mov(ecx, Immediate(constant_properties));
- __ mov(edx, Immediate(Smi::FromInt(flags)));
- FastCloneShallowObjectStub stub(properties_count);
- __ CallStub(&stub);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in eax.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(eax); // Save result on the stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ mov(ecx, Immediate(key->handle()));
- __ mov(edx, Operand(esp, 0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
- break;
- case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
- Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_constant_fast_elements =
- IsFastObjectElementsKind(constant_elements_kind);
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
-
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_elements));
- Heap* heap = isolate()->heap();
- if (has_constant_fast_elements &&
- constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
- length);
- __ CallStub(&stub);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
-
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- if (has_constant_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(eax);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ mov(FieldOperand(ebx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
- __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
- __ mov(ecx, Immediate(Smi::FromInt(i)));
- __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
- StoreArrayLiteralElementStub stub;
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in edx.
- VisitForStackValue(property->obj());
- __ mov(edx, Operand(esp, 0));
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY: {
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- __ mov(edx, Operand(esp, kPointerSize)); // Object.
- __ mov(ecx, Operand(esp, 0)); // Key.
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- AccumulatorValueContext result_context(this);
- { AccumulatorValueContext left_operand_context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(eax); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- ASSERT(!key->handle()->IsSmi());
- __ mov(ecx, Immediate(key->handle()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left,
- Expression* right) {
- // Do combined smi check of the operands. Left operand is on the
- // stack. Right operand is in eax.
- Label smi_case, done, stub_call;
- __ pop(edx);
- __ mov(ecx, eax);
- __ or_(eax, edx);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
-
- __ bind(&stub_call);
- __ mov(eax, ecx);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done, Label::kNear);
-
- // Smi case.
- __ bind(&smi_case);
- __ mov(eax, edx); // Copy left operand in case of a stub call.
-
- switch (op) {
- case Token::SAR:
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ sar_cl(eax); // No checks of result necessary
- __ SmiTag(eax);
- break;
- case Token::SHL: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shl_cl(eax);
- // Check that the *signed* result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(positive, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::SHR: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shr_cl(eax);
- __ test(eax, Immediate(0xc0000000));
- __ j(zero, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::ADD:
- __ add(eax, ecx);
- __ j(overflow, &stub_call);
- break;
- case Token::SUB:
- __ sub(eax, ecx);
- __ j(overflow, &stub_call);
- break;
- case Token::MUL: {
- __ SmiUntag(eax);
- __ imul(eax, ecx);
- __ j(overflow, &stub_call);
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
- __ mov(ebx, edx);
- __ or_(ebx, ecx);
- __ j(negative, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ or_(eax, ecx);
- break;
- case Token::BIT_AND:
- __ and_(eax, ecx);
- break;
- case Token::BIT_XOR:
- __ xor_(eax, ecx);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
- __ pop(edx);
- BinaryOpStub stub(op, mode);
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(eax); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ mov(edx, eax);
- __ pop(eax); // Restore value.
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(eax); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(ecx, eax);
- __ pop(edx); // Receiver.
- __ pop(eax); // Restore value.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
- // Global var, const, or let.
- __ mov(ecx, var->name());
- __ mov(edx, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (op == Token::INIT_CONST) {
- // Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ mov(edx, StackOperand(var));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(StackOperand(var), eax);
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(eax);
- __ push(esi);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- if (var->IsLookupSlot()) {
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &assign, Label::kNear);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&assign);
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
- }
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
- MemOperand location = VarOperand(var, ecx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
- // Check for an uninitialized let binding.
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ Check(equal, "Let binding re-initialization.");
- }
- // Perform the assignment.
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- }
- // Non-initializing assignments to consts are ignored.
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- // eax : value
- // esp[0] : receiver
-
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- __ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
- // eax : value
- // esp[0] : key
- // esp[kPointerSize] : receiver
-
- __ pop(ecx); // Key.
- __ pop(edx);
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- __ mov(edx, result_register());
- EmitNamedPropertyLoad(expr);
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(eax);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(edx); // Object.
- __ mov(ecx, result_register()); // Key.
- EmitKeyedPropertyLoad(expr);
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- __ call(code, rmode, ast_id);
-}
-
-
-
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ Set(ecx, Immediate(name));
- }
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(ecx);
- __ push(eax);
- __ push(ecx);
-
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
-
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(ebx, cell);
-
- CallFunctionStub stub(arg_count, flags);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
-
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(esp, arg_count * kPointerSize));
- } else {
- __ push(Immediate(isolate()->factory()->undefined_value()));
- }
-
- // Push the receiver of the enclosing function.
- __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
- // Push the language mode.
- __ push(Immediate(Smi::FromInt(language_mode())));
-
- // Push the start position of the scope the calls resides in.
- __ push(Immediate(Smi::FromInt(scope()->start_position())));
-
- // Push the qml mode flag
- __ push(Immediate(Smi::FromInt(is_qml_mode())));
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
-
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- // Reserved receiver slot.
- __ push(Immediate(isolate()->factory()->undefined_value()));
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
-
- // The runtime call returns a pair of values in eax (function) and
- // edx (receiver). Touch up the stack with the right values.
- __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
-
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ push(proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
- }
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in eax) and
- // the object holding it (returned in edx).
- __ push(context_register());
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(eax); // Function.
- __ push(edx); // Receiver.
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ push(eax);
- // The receiver is implicitly the global receiver. Indicate this by
- // passing the hole to the call function stub.
- __ push(Immediate(isolate()->factory()->the_hole_value()));
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found by
- // LoadContextSlot. That object could be the hole if the receiver is
- // implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
-
- } else if (property != NULL) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
- RelocInfo::CODE_TARGET);
- } else {
- EmitKeyedCallWithIC(expr, property->key());
- }
-
- } else {
- // Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
- // Load global receiver object.
- __ mov(ebx, GlobalObjectOperand());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
- // Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into edi and eax.
- __ Set(eax, Immediate(arg_count));
- __ mov(edi, Operand(esp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(ebx, cell);
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, Immediate(kSmiTagMask));
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, Immediate(kSmiTagMask | 0x80000000));
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_true);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(below, if_false);
- __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(below_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- // TODO(rossberg): incorporate symbols.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(not_zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(eax);
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(not_zero, if_true);
-
- // Check for fast case object. Return false for slow case objects.
- __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, FACTORY->hash_table_map());
- __ j(equal, if_false);
-
- // Look for valueOf string in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(ecx, ebx);
- __ cmp(ecx, 0);
- __ j(equal, &done);
-
- __ LoadInstanceDescriptors(ebx, ebx);
- // ebx: descriptor array.
- // ecx: valid entries in the descriptor array.
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ imul(ecx, ecx, DescriptorArray::kDescriptorSize);
- __ lea(ecx, Operand(ebx, ecx, times_2, DescriptorArray::kFirstOffset));
- // Calculate location of the first key name.
- __ add(ebx, Immediate(DescriptorArray::kFirstOffset));
- // Loop through all the keys in the descriptor array. If one of these is the
- // internalized string "valueOf" the result is false.
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, FieldOperand(ebx, 0));
- __ cmp(edx, FACTORY->value_of_string());
- __ j(equal, if_false);
- __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmp(ebx, ecx);
- __ j(not_equal, &loop);
-
- __ bind(&done);
-
- // Reload map as register ebx was used as temporary above.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ JumpIfSmi(ecx, if_false);
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edx,
- FieldOperand(edx, GlobalObject::kNativeContextOffset));
- __ cmp(ecx,
- ContextOperand(edx,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(ebx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, SYMBOL_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(ebx);
- __ cmp(eax, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
- VisitForAccumulatorValue(args->at(0));
- __ mov(edx, eax);
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label exit;
- // Get the number of formal parameters.
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- __ AssertSmi(eax);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(eax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
- // Map is now in eax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &non_function_constructor);
-
- // eax now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ mov(eax, isolate()->factory()->function_class_string());
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ mov(eax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ mov(eax, isolate()->factory()->null_value());
-
- // All done.
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
- // Finally, we're expected to leave a value on the top of the stack.
- __ mov(eax, isolate()->factory()->undefined_value());
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(edi, eax);
-
- __ bind(&heapnumber_allocated);
-
- __ PrepareCallCFunction(1, ebx);
- __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- // This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, ebx);
- __ movd(xmm0, eax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorps(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
- } else {
- // 0x4130000000000000 is 1.0 x 2^20 as a double.
- __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
- Immediate(0x41300000));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ fsubp(1);
- __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
- }
- __ mov(eax, edi);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(eax, &done, Label::kNear);
- // If the object is not a value type, return the object.
- __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
- __ j(not_equal, &done, Label::kNear);
- __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label runtime, done, not_date_object;
- Register object = eax;
- Register result = eax;
- Register scratch = ecx;
-
- __ JumpIfSmi(object, &not_date_object);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ j(not_equal, &not_date_object);
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
- }
-
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(ecx);
- __ pop(ebx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(ecx);
- __ pop(ebx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- if (CpuFeatures::IsSupported(SSE2)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(ebx); // eax = value. ebx = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(ebx, &done, Label::kNear);
-
- // If the object is not a value type, return the value.
- __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
- __ j(not_equal, &done, Label::kNear);
-
- // Store the value.
- __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
-
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(edx, eax);
- __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(eax, ebx);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(ebx);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = ebx;
- Register index = eax;
- Register result = edx;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ Set(result, Immediate(isolate()->factory()->nan_value()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ Set(result, Immediate(isolate()->factory()->undefined_value()));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = ebx;
- Register index = eax;
- Register scratch = edx;
- Register result = eax;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Set(result, Immediate(isolate()->factory()->empty_string()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Set(result, Immediate(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; ++i) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in edi. Move it in there.
- __ mov(edi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kCall, args->length());
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpConstructResultStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ mov(eax, isolate()->factory()->undefined_value());
- context()->Plug(eax);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = eax;
- Register cache = ebx;
- Register tmp = ecx;
- __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- __ mov(cache,
- FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ mov(cache,
- FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done, not_found;
- // tmp now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- __ cmp(key, CodeGenerator::FixedArrayElementOperand(cache, tmp));
- __ j(not_equal, &not_found);
-
- __ mov(eax, CodeGenerator::FixedArrayElementOperand(cache, tmp, 1));
- __ jmp(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = eax;
- Register left = ebx;
- Register tmp = ecx;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, right);
- __ j(equal, &ok);
- // Fail if either is a non-HeapObject.
- __ mov(tmp, left);
- __ and_(tmp, right);
- __ JumpIfSmi(tmp, &fail);
- __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
- __ j(not_equal, &fail);
- __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail);
- __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok);
- __ bind(&fail);
- __ mov(eax, Immediate(isolate()->factory()->false_value()));
- __ jmp(&done);
- __ bind(&ok);
- __ mov(eax, Immediate(isolate()->factory()->true_value()));
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(eax);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(FieldOperand(eax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(eax);
-
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ IndexFromHash(eax, eax);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- VisitForStackValue(args->at(1));
- // Load this to eax (= array)
- VisitForAccumulatorValue(args->at(0));
- // All aliases of the same register have disjoint lifetimes.
- Register array = eax;
- Register elements = no_reg; // Will be eax.
-
- Register index = edx;
-
- Register string_length = ecx;
-
- Register string = esi;
-
- Register scratch = ebx;
-
- Register array_length = edi;
- Register result_pos = no_reg; // Will be edi.
-
- // Separator operand is already pushed.
- Operand separator_operand = Operand(esp, 2 * kPointerSize);
- Operand result_operand = Operand(esp, 1 * kPointerSize);
- Operand array_length_operand = Operand(esp, 0);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ JumpIfSmi(array, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch, &bailout);
-
- // If the array has length zero, return the empty string.
- __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length);
- __ j(not_zero, &non_trivial_array);
- __ mov(result_operand, isolate()->factory()->empty_string());
- __ jmp(&done);
-
- // Save the array length.
- __ bind(&non_trivial_array);
- __ mov(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, Immediate(0));
- __ Set(string_length, Immediate(0));
- // Loop condition: while (index < length).
- // Live loop registers: index, array_length, string,
- // scratch, string_length, elements.
- if (generate_debug_code_) {
- __ cmp(index, array_length);
- __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
- }
- __ bind(&loop);
- __ mov(string, FieldOperand(elements,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(string, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- __ add(string_length,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ j(overflow, &bailout);
- __ add(index, Immediate(1));
- __ cmp(index, array_length);
- __ j(less, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, 1);
- __ j(not_equal, &not_size_one_array);
- __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
- __ mov(result_operand, scratch);
- __ jmp(&done);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths, as a smi.
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ mov(string, separator_operand);
- __ JumpIfSmi(string, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, ASCII_STRING_TYPE);
- __ j(not_equal, &bailout);
-
- // Add (separator length times array_length) - separator length
- // to string_length.
- __ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
- __ sub(string_length, scratch); // May be negative, temporarily.
- __ imul(scratch, array_length_operand);
- __ j(overflow, &bailout);
- __ add(string_length, scratch);
- __ j(overflow, &bailout);
-
- __ shr(string_length, 1);
- // Live registers and stack values:
- // string_length
- // elements
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
- __ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
-
- __ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case
- __ mov(index, Immediate(0));
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
-
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
- __ bind(&loop_1_condition);
- __ cmp(index, array_length_operand);
- __ j(less, &loop_1); // End while (index < length).
- __ jmp(&done);
-
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
- __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ mov_b(separator_operand, scratch);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator character to the result.
- __ mov_b(scratch, separator_operand);
- __ mov_b(Operand(result_pos, 0), scratch);
- __ inc(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator to the result.
- __ mov(string, separator_operand);
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_3); // End while (index < length).
- __ jmp(&done);
-
-
- __ bind(&bailout);
- __ mov(result_operand, isolate()->factory()->undefined_value());
- __ bind(&done);
- __ mov(eax, result_operand);
- // Drop temp values from the stack, and restore context register.
- __ add(esp, Immediate(3 * kPointerSize));
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ mov(eax, GlobalObjectOperand());
- __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function via a call IC.
- __ Set(ecx, Immediate(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- } else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ push(Immediate(Smi::FromInt(strict_mode_flag)));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(eax);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
- if (var->IsUnallocated()) {
- __ push(var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(kNonStrictMode)));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(eax);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global variables is false. 'this' is
- // not really a variable, though we implement it as one. The
- // subexpression does not have side effects.
- context()->Plug(var->is_this());
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(eax);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(isolate()->factory()->undefined_value());
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- // We handle value contexts explicitly rather than simply visiting
- // for control and plugging the control flow into the context,
- // because we need to prepare a pair of extra administrative AST ids
- // for the optimizing compiler.
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
- __ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ mov(eax, isolate()->factory()->true_value());
- } else {
- __ Push(isolate()->factory()->true_value());
- }
- __ jmp(&done, Label::kNear);
- __ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ mov(eax, isolate()->factory()->false_value());
- } else {
- __ Push(isolate()->factory()->false_value());
- }
- __ bind(&done);
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(eax);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ push(Immediate(Smi::FromInt(0)));
- }
- if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in edx.
- VisitForAccumulatorValue(prop->obj());
- __ push(eax);
- __ mov(edx, eax);
- EmitNamedPropertyLoad(prop);
- } else {
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ mov(edx, Operand(esp, kPointerSize)); // Object.
- __ mov(ecx, Operand(esp, 0)); // Key.
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(eax, &no_conversion, Label::kNear);
- }
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(eax);
- break;
- case NAMED_PROPERTY:
- __ mov(Operand(esp, kPointerSize), eax);
- break;
- case KEYED_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
- }
- }
- }
-
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ add(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- }
- __ j(overflow, &stub_call, Label::kNear);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(eax, &done, Label::kNear);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ add(eax, Immediate(Smi::FromInt(1)));
- }
- }
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- // Call stub for +1/-1.
- __ mov(edx, eax);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
-
- // Store the value returned in eax.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(eax);
- }
- // For all contexts except EffectContext We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- // Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- __ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ pop(ecx);
- __ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- // Result is on the stack
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
-
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ mov(edx, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallIC(ic);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(eax);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ push(esi);
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(eax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_string())) {
- __ JumpIfSmi(eax, if_true);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
- __ j(above_equal, if_false);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
- __ cmp(eax, isolate()->factory()->true_value());
- __ j(equal, if_true);
- __ cmp(eax, isolate()->factory()->false_value());
- Split(equal, if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
- __ cmp(eax, isolate()->factory()->null_value());
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, if_true);
- __ JumpIfSmi(eax, if_false);
- // Check for undetectable objects => true.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
- __ JumpIfSmi(eax, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
- __ j(equal, if_true);
- __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
- __ JumpIfSmi(eax, if_false);
- if (!FLAG_harmony_typeof) {
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_true);
- }
- if (FLAG_harmony_symbols) {
- __ CmpObjectType(eax, SYMBOL_TYPE, edx);
- __ j(equal, if_true);
- }
- __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
- __ j(below, if_false);
- __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, if_false);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- Split(zero, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, eax);
- // The stub returns 0 for true.
- Split(zero, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cc = CompareIC::ComputeCondition(op);
- __ pop(edx);
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
- __ cmp(edx, eax);
- Split(cc, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, eax);
- Split(cc, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Handle<Object> nil_value = nil == kNullValue ?
- isolate()->factory()->null_value() :
- isolate()->factory()->undefined_value();
- __ cmp(eax, nil_value);
- if (expr->op() == Token::EQ_STRICT) {
- Split(equal, if_true, if_false, fall_through);
- } else {
- Handle<Object> other_nil_value = nil == kNullValue ?
- isolate()->factory()->undefined_value() :
- isolate()->factory()->null_value();
- __ j(equal, if_true);
- __ cmp(eax, other_nil_value);
- __ j(equal, if_true);
- __ JumpIfSmi(eax, if_false);
- // It can be an undetectable object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(edx, Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(eax);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return eax;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return esi;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ mov(Operand(ebp, frame_offset), value);
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ mov(dst, ContextOperand(esi, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ push(Immediate(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts nested inside eval code have the same closure as the context
- // calling eval, not the anonymous closure containing the eval code.
- // Fetch it from the context.
- __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
- } else {
- ASSERT(declaration_scope->is_function_scope());
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- // Cook return address on top of stack (smi encoded Code* delta)
- ASSERT(!result_register().is(edx));
- __ pop(edx);
- __ sub(edx, Immediate(masm_->CodeObject()));
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTag(edx);
- __ push(edx);
-
- // Store result register while executing finally block.
- __ push(result_register());
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(edx, Operand::StaticVariable(pending_message_obj));
- __ push(edx);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(edx, Operand::StaticVariable(has_pending_message));
- __ SmiTag(edx);
- __ push(edx);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(edx, Operand::StaticVariable(pending_message_script));
- __ push(edx);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(edx));
- // Restore pending message from stack.
- __ pop(edx);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(Operand::StaticVariable(pending_message_script), edx);
-
- __ pop(edx);
- __ SmiUntag(edx);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(Operand::StaticVariable(has_pending_message), edx);
-
- __ pop(edx);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(Operand::StaticVariable(pending_message_obj), edx);
-
- // Restore result register from stack.
- __ pop(result_register());
-
- // Uncook return address.
- __ pop(edx);
- __ SmiUntag(edx);
- __ add(edx, Immediate(masm_->CodeObject()));
- __ jmp(edx);
-}
-
-
-#undef __
-
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ mov(esi, Operand(esp, StackHandlerConstants::kContextOffset));
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
- }
- __ PopTryHandler();
- __ call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/ic-ia32.cc b/src/3rdparty/v8/src/ia32/ic-ia32.cc
deleted file mode 100644
index 428d830..0000000
--- a/src/3rdparty/v8/src/ia32/ic-ia32.cc
+++ /dev/null
@@ -1,1675 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // r0: used to hold receiver instance type.
- // Holds the property dictionary on fall through.
- // r1: used to hold receivers map.
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss);
-
- // If this assert fails, we have to check upper bound too.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, r0, miss);
-
- // Check for non-global object that requires access check.
- __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor));
- __ j(not_zero, miss);
-
- __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CheckMap(r0, FACTORY->hash_table_map(), miss, DONT_DO_SMI_CHECK);
-}
-
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - used for the index into the property dictionary
- //
- // r1 - used to hold the capacity of the property dictionary.
- //
- // result - holds the result on exit.
-
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register r0,
- Register r1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // r0 - used for index into the property dictionary and is clobbered.
- //
- // r1 - used to hold the capacity of the property dictionary and is clobbered.
- Label done;
-
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
- __ mov(Operand(r0, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ mov(r1, value);
- __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Get the map of the receiver.
- __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check bit field.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
- __ j(not_zero, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects works as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
- __ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register scratch,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // Scratch registers:
- // scratch - used to hold elements of the receiver and the loaded value.
- // result - holds the result on exit if the load succeeds and
- // we fall through.
-
- __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CheckMap(scratch,
- FACTORY->fixed_array_map(),
- not_fast_array,
- DONT_DO_SMI_CHECK);
- } else {
- __ AssertFastElements(scratch);
- }
- // Check that the key (index) is within bounds.
- __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(above_equal, out_of_range);
- // Fast case: Do the load.
- STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ mov(result, scratch);
- }
-}
-
-
-// Checks whether a key is an array index string or an internalized string.
-// Falls through if the key is an internalized string.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_internalized) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_internalized);
-
- // Is the string an array index, with cached numeric value?
- __ mov(hash, FieldOperand(key, String::kHashFieldOffset));
- __ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, index_string);
-
- // Is the string internalized?
- STATIC_ASSERT(kInternalizedTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsInternalizedMask);
- __ j(zero, not_internalized);
-}
-
-
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
- Factory* factory = masm->isolate()->factory();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
- __ j(below, slow_case);
-
- // Check that the key is a positive smi.
- __ test(key, Immediate(0x80000001));
- __ j(not_zero, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, Immediate(Smi::FromInt(2)));
- __ cmp(key, scratch2);
- __ j(above_equal, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ mov(scratch2, FieldOperand(scratch1,
- key,
- times_half_pointer_size,
- kHeaderSize));
- __ cmp(scratch2, factory->the_hole_value());
- __ j(equal, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- const int kContextOffset = FixedArray::kHeaderSize;
- __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
- return FieldOperand(scratch1,
- scratch2,
- times_half_pointer_size,
- Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, scratch);
- __ j(greater_equal, slow_case);
- return FieldOperand(backing_store,
- key,
- times_half_pointer_size,
- FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(eax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // edx: receiver
- // ebx: untagged index
- // ecx: key
- // eax: elements
- __ CheckMap(eax,
- isolate->factory()->hash_table_map(),
- &slow,
- DONT_DO_SMI_CHECK);
- Label slow_pop_receiver;
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(edx);
- __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(edx);
- __ ret(0);
-
- __ bind(&slow_pop_receiver);
- // Pop the receiver from the stack and jump to runtime.
- __ pop(edx);
-
- __ bind(&slow);
- // Slow case: jump to runtime.
- // edx: receiver
- // ecx: key
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(isolate->factory()->hash_table_map()));
- __ j(equal, &probe_dictionary);
-
- // The receiver's map is still in eax, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- if (FLAG_debug_code) {
- __ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ Check(equal, "Map is no longer in eax.");
- }
- __ mov(ebx, eax); // Keep the map around for later.
- __ shr(eax, KeyedLookupCache::kMapHashShift);
- __ mov(edi, FieldOperand(ecx, String::kHashFieldOffset));
- __ shr(edi, String::kHashShift);
- __ xor_(eax, edi);
- __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ mov(edi, eax);
- __ shl(edi, kPointerSizeLog2 + 1);
- if (i != 0) {
- __ add(edi, Immediate(kPointerSize * i * 2));
- }
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &try_next_entry);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(equal, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- __ lea(edi, Operand(eax, 1));
- __ shl(edi, kPointerSizeLog2 + 1);
- __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
-
- // Get field offset.
- // edx : receiver
- // ebx : receiver's map
- // ecx : key
- // eax : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- if (i != 0) {
- __ add(eax, Immediate(i));
- }
- __ mov(edi,
- Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
- __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, eax);
- __ j(above_equal, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(eax, edi);
- __ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
- FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
-
- __ mov(eax, FieldOperand(edx, JSObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_string);
- __ IndexFromHash(ebx, ecx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key (index)
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Register receiver = edx;
- Register index = ecx;
- Register scratch = ebx;
- Register result = eax;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow);
-
- // Get the map of the receiver.
- __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
- __ and_(eax, Immediate(kSlowCaseBitFieldMask));
- __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(eax);
- __ push(edx); // receiver
- __ push(ecx); // key
- __ push(eax); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, notin;
- Factory* factory = masm->isolate()->factory();
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
- __ mov(eax, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
- __ cmp(unmapped_location, factory->the_hole_value());
- __ j(equal, &slow);
- __ mov(eax, unmapped_location);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, notin;
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, &notin, &slow);
- __ mov(mapped_location, eax);
- __ lea(ecx, mapped_location);
- __ mov(edx, eax);
- __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
- __ mov(unmapped_location, eax);
- __ lea(edi, unmapped_location);
- __ mov(edx, eax);
- __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
- // eax: value
- // ecx: key (a smi)
- // edx: receiver
- // ebx: FixedArray receiver->elements
- // edi: receiver map
- // Fast case: Do the store, could either Object or double.
- __ bind(fast_object);
- if (check_map == kCheckMap) {
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, fast_double);
- }
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(eax, &non_smi_value);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
- // Update write barrier for the elements array address.
- __ mov(edx, eax); // Preserve the value which is returned.
- __ RecordWriteArray(
- ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, slow);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- }
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0,
- &transition_double_elements, false);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ CheckMap(eax,
- masm->isolate()->factory()->heap_number_map(),
- &non_double_value,
- DONT_DO_SMI_CHECK);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
- // and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- ebx,
- edi,
- slow);
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
- slow);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(edx, &slow);
- // Get the map from the receiver.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &slow);
- __ CmpInstanceType(edi, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JSObject.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- // eax: value
- // edx: JSObject
- // ecx: key (a smi)
- // edi: receiver map
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(below, &fast_object);
-
- // Slow case: call runtime.
- __ bind(&slow);
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // eax: value
- // edx: receiver, a JSArray
- // ecx: key, a smi.
- // ebx: receiver->elements, a FixedArray
- // edi: receiver map
- // flags: compare (ecx, edx.length())
- // do not leave holes in the array:
- __ j(not_equal, &slow);
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // eax: value
- // edx: receiver, a JSArray
- // ecx: key, a smi.
- // edi: receiver map
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Check the key against the length in the array and fall through to the
- // common store code.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &extra);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength);
-}
-
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- Isolate* isolate = masm->isolate();
- isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(edx, &number);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ cmp(edx, isolate->factory()->true_value());
- __ j(equal, &boolean);
- __ cmp(edx, isolate->factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edi : function
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check that the result is not a smi.
- __ JumpIfSmi(edi, miss);
-
- // Check that the value is a JavaScript function, fetching its map into eax.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
-
- // eax: elements
- // Search the dictionary placing the result in edi.
- GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, edi);
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ push(edx);
- __ push(ecx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
- __ CallStub(&stub);
-
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
- __ JumpIfSmi(edx, &invoke, Label::kNear);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, &global, Label::kNear);
- __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke, Label::kNear);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC,
- extra_state);
-
- GenerateMiss(masm, argc, extra_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &check_string);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in edx is not used after this point.
- // ecx: key
- // edi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // eax: elements
- // ecx: smi key
- // Check whether the elements is a number dictionary.
- __ CheckMap(eax,
- isolate->factory()->hash_table_map(),
- &slow_load,
- DONT_DO_SMI_CHECK);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- // ebx: untagged index
- // Receiver in edx will be clobbered, need to reload it on miss.
- __ LoadFromNumberDictionary(
- &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_reload_receiver);
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ecx); // save the key
- __ push(edx); // pass the receiver
- __ push(ecx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(ecx); // restore the key
- // Leave the internal frame.
- }
-
- __ mov(edi, eax);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow_call);
-
- // The key is known to be an internalized string.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ CheckMap(ebx,
- isolate->factory()->hash_table_map(),
- &lookup_monomorphic_cache,
- DONT_DO_SMI_CHECK);
-
- GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor an internalized string,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(ebx, ecx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label slow, notin;
- Factory* factory = masm->isolate()->factory();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
- __ mov(edi, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
- __ cmp(unmapped_location, factory->the_hole_value());
- __ j(equal, &slow);
- __ mov(edi, unmapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ JumpIfSmi(ecx, &miss);
- Condition cond = masm->IsObjectStringType(ecx, eax, eax);
- __ j(NegateCondition(cond), &miss);
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, edx, ecx, ebx, eax);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
-
- // eax: elements
- // Search the dictionary placing the result in eax.
- GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, eax);
- __ ret(0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
-
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Label miss, restore_miss;
-
- GenerateStringDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
-
- // A lot of registers are needed for storing to slow case
- // objects. Push and restore receiver but rely on
- // GenerateDictionaryStore preserving the value and name.
- __ push(edx);
- GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
- __ Drop(1);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
- __ ret(0);
-
- __ bind(&restore_miss);
- __ pop(edx);
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(Immediate(Smi::FromInt(strict_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode.
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
-
- // Do tail-call to runtime routine.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ebx : target map
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
- __ mov(eax, edx);
- __ Ret();
- __ bind(&fail);
- }
-
- __ pop(ebx);
- __ push(edx);
- __ push(ebx); // return address
- // Leaving the code managed by the register allocator and return to the
- // convention of using esi as context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ebx : target map
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
- __ mov(eax, edx);
- __ Ret();
- __ bind(&fail);
- }
-
- __ pop(ebx);
- __ push(edx);
- __ push(ebx); // return address
- // Leaving the code managed by the register allocator and return to the
- // convention of using esi as context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- ASSERT(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
- }
-
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
- Address jmp_address = test_instruction_address - delta;
- ASSERT((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
deleted file mode 100644
index 8ef3bdf..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
+++ /dev/null
@@ -1,6266 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "ia32/lithium-codegen-ia32.h"
-#include "ic.h"
-#include "code-stubs.h"
-#include "deoptimizer.h"
-#include "stub-cache.h"
-#include "codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) {}
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) const {}
-
- virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- support_aligned_spilled_doubles_ = info()->IsOptimizing();
-
- dynamic_frame_alignment_ = info()->IsOptimizing() &&
- ((chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone());
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(GetStackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
- PopulateDeoptimizationData(code);
- if (!info()->IsStub()) {
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
- }
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
-}
-
-
-void LCodeGen::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ test(ecx, Operand(ecx));
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
- __ bind(&ok);
- }
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- // Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
-
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
- }
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- ASSERT(!frame_is_built_);
- frame_is_built_ = true;
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- if (info()->IsStub()) {
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- } else {
- __ push(edi); // Callee's JS function.
- }
- }
-
- if (info()->IsOptimizing() &&
- dynamic_frame_alignment_ &&
- FLAG_debug_code) {
- __ test(esp, Immediate(kPointerSize));
- __ Assert(zero, "frame is expected to be aligned");
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- ASSERT(slots != 0 || !info()->IsOptimizing());
- if (slots > 0) {
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
- __ push(eax);
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ mov(MemOperand(esp, eax, times_4, 0),
- Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- __ pop(eax);
- } else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ mov(Operand(esp, offset), eax);
- }
-#endif
- }
-
- if (support_aligned_spilled_doubles_) {
- Comment(";;; Store dynamic frame alignment tag for spilled doubles");
- // Store dynamic frame alignment state in the first local.
- int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp, offset), edx);
- } else {
- __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
- }
- }
- }
-
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- Comment(";;; Save clobbered callee double registers");
- CpuFeatures::Scope scope(SSE2);
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movdbl(MemOperand(esp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is still in edi.
- __ push(edi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-
- // Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers eax and ebx.
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx,
- kDontSaveFPRegs);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so esi still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
- instr->CompileToNative(this);
- }
- }
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- Label needs_frame_not_call;
- Label needs_frame_is_call;
- for (int i = 0; i < jump_table_.length(); i++) {
- __ bind(&jump_table_[i].label);
- Address entry = jump_table_[i].address;
- bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (jump_table_[i].needs_frame) {
- __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- if (is_lazy_deopt) {
- if (needs_frame_is_call.is_bound()) {
- __ jmp(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push a PC inside the function so that the deopt code can find where
- // the deopt comes from. It doesn't have to be the precise return
- // address of a "calling" LAZY deopt, it only has to be somewhere
- // inside the code body.
- Label push_approx_pc;
- __ call(&push_approx_pc);
- __ bind(&push_approx_pc);
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 3 * kPointerSize));
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
- } else {
- if (needs_frame_not_call.is_bound()) {
- __ jmp(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
- }
- } else {
- if (is_lazy_deopt) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
- }
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred build frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
- frame_is_built_ = true;
- // Build the frame in such a way that esi isn't trashed.
- __ push(ebp); // Caller's frame pointer.
- __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- __ lea(ebp, Operand(esp, 2 * kPointerSize));
- }
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred destroy frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(frame_is_built_);
- frame_is_built_ = false;
- __ mov(esp, ebp);
- __ pop(ebp);
- }
- __ jmp(code->exit());
- }
- }
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- if (!info()->IsStub()) {
- // For lazy deoptimization we need space to patch a call after every call.
- // Ensure there is always space for such patching, even if the code ends
- // in a call.
- int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
- while (masm()->pc_offset() < target_offset) {
- masm()->nop();
- }
- }
- safepoints_.Emit(masm(), GetStackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
-}
-
-
-bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
- return op->IsDoubleRegister();
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
- if (op->IsRegister()) return Operand(ToRegister(op));
- if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()));
-}
-
-
-Operand LCodeGen::HighOperand(LOperand* op) {
- ASSERT(op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
- bool has_closure_id = !info()->closure().is_null() &&
- *info()->closure() != *environment->closure();
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- default:
- UNREACHABLE();
- }
-
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
- }
- }
-
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- XMMRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* fun,
- int argc,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- __ CallRuntime(fun, argc);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-
- ASSERT(info()->is_calling());
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- if (!ToRegister(context).is(esi)) {
- __ mov(esi, ToRegister(context));
- }
- } else if (context->IsStackSlot()) {
- __ mov(esi, ToOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
- } else {
- UNREACHABLE();
- }
-}
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- LoadContextFromDeferred(context);
-
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-
- ASSERT(info()->is_calling());
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(
- LEnvironment* environment, Safepoint::DeoptMode mode) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- if (FLAG_deopt_every_n_times != 0) {
- Handle<SharedFunctionInfo> shared(info_->shared_info());
- Label no_deopt;
- __ pushfd();
- __ push(eax);
- __ push(ebx);
- __ mov(ebx, shared);
- __ mov(eax,
- FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset));
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- __ j(not_zero, &no_deopt, Label::kNear);
- if (FLAG_trap_on_deopt) __ int3();
- __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
- __ pop(eax);
- __ popfd();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-
- __ bind(&no_deopt);
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
- __ pop(eax);
- __ popfd();
- }
-
- if (FLAG_trap_on_deopt) {
- Label done;
- if (cc != no_condition) {
- __ j(NegateCondition(cc), &done, Label::kNear);
- }
- __ int3();
- __ bind(&done);
- }
-
- ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
- if (cc == no_condition && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
- } else {
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (jump_table_.is_empty() ||
- jump_table_.last().address != entry ||
- jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
- jump_table_.Add(table_entry, zone());
- }
- if (cc == no_condition) {
- __ jmp(&jump_table_.last().label);
- } else {
- __ j(cc, &jump_table_.last().label);
- }
- }
-}
-
-
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
- maps.Add(map, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- ASSERT(kind == expected_safepoint_kind_);
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint =
- safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
- RecordSafepoint(&empty_pointers, mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
-
- Label positive_dividend, done;
- __ test(dividend, Operand(dividend));
- __ j(not_sign, &positive_dividend, Label::kNear);
- __ neg(dividend);
- __ and_(dividend, divisor - 1);
- __ neg(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done, Label::kNear);
- DeoptimizeIf(no_condition, instr->environment());
- } else {
- __ jmp(&done, Label::kNear);
- }
- __ bind(&positive_dividend);
- __ and_(dividend, divisor - 1);
- __ bind(&done);
- } else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
-
- ASSERT(left_reg.is(eax));
- ASSERT(result_reg.is(edx));
- ASSERT(!right_reg.is(eax));
- ASSERT(!right_reg.is(edx));
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr->environment());
- }
-
- __ test(left_reg, Operand(left_reg));
- __ j(zero, &remainder_eq_dividend, Label::kNear);
- __ j(sign, &slow, Label::kNear);
-
- __ test(right_reg, Operand(right_reg));
- __ j(not_sign, &both_positive, Label::kNear);
- // The sign of the divisor doesn't matter.
- __ neg(right_reg);
-
- __ bind(&both_positive);
- // If the dividend is smaller than the nonnegative
- // divisor, the dividend is the result.
- __ cmp(left_reg, Operand(right_reg));
- __ j(less, &remainder_eq_dividend, Label::kNear);
-
- // Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->temp());
- __ mov(scratch, right_reg);
- __ sub(Operand(scratch), Immediate(1));
- __ test(scratch, Operand(right_reg));
- __ j(not_zero, &do_subtraction, Label::kNear);
- __ and_(left_reg, Operand(scratch));
- __ jmp(&remainder_eq_dividend, Label::kNear);
-
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ mov(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ sub(left_reg, Operand(right_reg));
- // Check if the dividend is less than the divisor.
- __ cmp(left_reg, Operand(right_reg));
- __ j(less, &remainder_eq_dividend, Label::kNear);
- }
- __ mov(left_reg, scratch);
-
- // Slow case, using idiv instruction.
- __ bind(&slow);
-
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmp(right_reg, -1);
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend to edx.
- __ cdq();
-
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- Label done;
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &positive_left, Label::kNear);
- __ idiv(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
-
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&positive_left);
- __ idiv(right_reg);
- __ bind(&done);
- } else {
- __ idiv(right_reg);
- }
- __ jmp(&done, Label::kNear);
-
- __ bind(&remainder_eq_dividend);
- __ mov(result_reg, left_reg);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, Operand(dividend));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
-
- if (test_value != 0) {
- // Deoptimize if remainder is not 0.
- __ test(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sar(dividend, power);
- }
-
- if (divisor < 0) __ neg(dividend);
-
- return;
- }
-
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(edx));
-
- Register left_reg = eax;
-
- // Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, ToOperand(right));
- DeoptimizeIf(zero, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ test(left_reg, Operand(left_reg));
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ test(right_reg, ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmp(right_reg, -1);
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend to edx.
- __ cdq();
- __ idiv(right_reg);
-
- if (!instr->is_flooring()) {
- // Deoptimize if remainder is not 0.
- __ test(edx, Operand(edx));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- Label done;
- __ test(edx, edx);
- __ j(zero, &done, Label::kNear);
- __ xor_(edx, right_reg);
- __ sar(edx, 31);
- __ add(eax, edx);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
-
- Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- Register result = ToRegister(instr->result());
-
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
- return;
-
- case 1:
- __ Move(result, dividend);
- return;
-
- case -1:
- __ Move(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
- return;
- }
-
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- // Input[dividend] is clobbered.
- // The sequence is tedious because neg(dividend) might overflow.
- __ mov(result, dividend);
- __ sar(dividend, 31);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ shl(dividend, 32 - power);
- __ sar(result, power);
- __ not_(dividend);
- // Clear result.sign if dividend.sign is set.
- __ and_(result, dividend);
- } else {
- __ Move(result, dividend);
- __ sar(result, power);
- }
- } else {
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(ToRegister(instr->result()).is(edx));
- Register scratch = ToRegister(instr->temp());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- __ mov(scratch, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- __ mov(edx, static_cast<int32_t>(multiplier));
- __ imul(edx);
- if (static_cast<int32_t>(multiplier) < 0) {
- __ add(edx, scratch);
- }
- Register reg_lo = eax;
- Register reg_byte_scratch = scratch;
- if (!reg_byte_scratch.is_byte_register()) {
- __ xchg(reg_lo, reg_byte_scratch);
- reg_lo = scratch;
- reg_byte_scratch = eax;
- }
- if (divisor < 0) {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0x40000000);
- __ setcc(above, reg_byte_scratch);
- __ neg(edx);
- __ sub(edx, reg_byte_scratch);
- } else {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0xC0000000);
- __ setcc(above_equal, reg_byte_scratch);
- __ add(edx, reg_byte_scratch);
- }
- __ sar(edx, shift - 32);
- }
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->left());
- LOperand* right = instr->right();
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ mov(ToRegister(instr->temp()), left);
- }
-
- if (right->IsConstantOperand()) {
- // Try strength reductions on the multiplication.
- // All replacement instructions are at most as long as the imul
- // and have better latency.
- int constant = ToInteger32(LConstantOperand::cast(right));
- if (constant == -1) {
- __ neg(left);
- } else if (constant == 0) {
- __ xor_(left, Operand(left));
- } else if (constant == 2) {
- __ add(left, Operand(left));
- } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // If we know that the multiplication can't overflow, it's safe to
- // use instructions that don't set the overflow flag for the
- // multiplication.
- switch (constant) {
- case 1:
- // Do nothing.
- break;
- case 3:
- __ lea(left, Operand(left, left, times_2, 0));
- break;
- case 4:
- __ shl(left, 2);
- break;
- case 5:
- __ lea(left, Operand(left, left, times_4, 0));
- break;
- case 8:
- __ shl(left, 3);
- break;
- case 9:
- __ lea(left, Operand(left, left, times_8, 0));
- break;
- case 16:
- __ shl(left, 4);
- break;
- default:
- __ imul(left, left, constant);
- break;
- }
- } else {
- __ imul(left, left, constant);
- }
- } else {
- __ imul(left, ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ test(left, Operand(left));
- __ j(not_zero, &done, Label::kNear);
- if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
- } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
- __ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr->environment());
- }
- } else {
- // Test the non-zero operand for negative sign.
- __ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
-
- if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), right_operand);
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), right_operand);
- break;
- case Token::BIT_XOR:
- __ xor_(ToRegister(left), right_operand);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- if (right->IsRegister()) {
- ASSERT(ToRegister(right).is(ecx));
-
- switch (instr->op()) {
- case Token::ROR:
- __ ror_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- break;
- case Token::SAR:
- __ sar_cl(ToRegister(left));
- break;
- case Token::SHR:
- __ shr_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- break;
- case Token::SHL:
- __ shl_cl(ToRegister(left));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ ror(ToRegister(left), shift_count);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sar(ToRegister(left), shift_count);
- }
- break;
- case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ shr(ToRegister(left), shift_count);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ shl(ToRegister(left), shift_count);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ sub(ToOperand(left), ToInteger32Immediate(right));
- } else {
- __ sub(ToRegister(left), ToOperand(right));
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
- double v = instr->value();
- // Use xor to produce +0.0 in a fast and compact way, but avoid to
- // do so if the constant is -0.0.
- if (BitCast<uint64_t, double>(v) == 0) {
- __ xorps(res, res);
- } else {
- Register temp = ToRegister(instr->temp());
- uint64_t int_val = BitCast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope1(SSE2);
- CpuFeatures::Scope scope2(SSE4_1);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(res, Operand(temp));
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- } else {
- __ xorps(res, res);
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- }
- } else {
- CpuFeatures::Scope scope(SSE2);
- __ Set(temp, Immediate(upper));
- __ movd(res, Operand(temp));
- __ psllq(res, 32);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
- }
- }
- }
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Register reg = ToRegister(instr->result());
- Handle<Object> handle = instr->value();
- if (handle->IsHeapObject()) {
- __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
- } else {
- __ Set(reg, Immediate(handle));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayBaseLength(
- LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(result, FieldOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(result, Map::kElementsKindMask);
- __ shr(result, Map::kElementsKindShift);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- ASSERT(input.is(result));
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, map);
- __ j(not_equal, &done, Label::kNear);
- __ mov(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- Label runtime, done;
- ASSERT(object.is(result));
- ASSERT(object.is(eax));
-
- __ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr->environment());
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToOperand(instr->value()));
- ASSERT(ToRegister(instr->context()).is(esi));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToInteger32Immediate(right));
- } else {
- __ add(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- CpuFeatures::Scope scope(SSE2);
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
- Label return_left;
- Condition condition = (operation == HMathMinMax::kMathMin)
- ? less_equal
- : greater_equal;
- if (right->IsConstantOperand()) {
- Operand left_op = ToOperand(left);
- Immediate right_imm = ToInteger32Immediate(right);
- __ cmp(left_op, right_imm);
- __ j(condition, &return_left, Label::kNear);
- __ mov(left_op, right_imm);
- } else {
- Register left_reg = ToRegister(left);
- Operand right_op = ToOperand(right);
- __ cmp(left_reg, right_op);
- __ j(condition, &return_left, Label::kNear);
- __ mov(left_reg, right_op);
- }
- __ bind(&return_left);
- } else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
- XMMRegister left_reg = ToDoubleRegister(left);
- XMMRegister right_reg = ToDoubleRegister(right);
- __ ucomisd(left_reg, right_reg);
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(left_reg, xmm_scratch);
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- __ orpd(left_reg, right_reg);
- } else {
- // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
- __ addsd(left_reg, right_reg);
- }
- __ jmp(&return_left, Label::kNear);
-
- __ bind(&check_nan_left);
- __ ucomisd(left_reg, left_reg); // NaN check.
- __ j(parity_even, &return_left, Label::kNear); // left == NaN.
- __ bind(&return_right);
- __ movsd(left_reg, right_reg);
-
- __ bind(&return_left);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->left()).is(edx));
- ASSERT(ToRegister(instr->right()).is(eax));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- __ jmp(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- CpuFeatures::Scope scope(SSE2);
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->value());
- __ test(reg, Operand(reg));
- EmitBranch(true_block, false_block, not_zero);
- } else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- __ cmp(reg, factory()->true_value());
- EmitBranch(true_block, false_block, equal);
- } else if (type.IsSmi()) {
- __ test(reg, Operand(reg));
- EmitBranch(true_block, false_block, not_equal);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
-
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
- // undefined -> false.
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, false_label);
- }
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
- // true -> true.
- __ cmp(reg, factory()->true_value());
- __ j(equal, true_label);
- // false -> false.
- __ cmp(reg, factory()->false_value());
- __ j(equal, false_label);
- }
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
- // 'null' -> false.
- __ cmp(reg, factory()->null_value());
- __ j(equal, false_label);
- }
-
- if (expected.Contains(ToBooleanStub::SMI)) {
- // Smis: 0 -> false, all other -> true.
- __ test(reg, Operand(reg));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
- } else if (expected.NeedsMap()) {
- // If we need a map later and have a Smi -> deopt.
- __ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
- }
-
- Register map = no_reg; // Keep the compiler happy.
- if (expected.NeedsMap()) {
- map = ToRegister(instr->temp());
- ASSERT(!map.is(reg));
- __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
-
- if (expected.CanBeUndetectable()) {
- // Undetectable -> false.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, false_label);
- }
- }
-
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
- // spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, true_label);
- }
-
- if (expected.Contains(ToBooleanStub::STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, true_label);
- __ jmp(false_label);
- __ bind(&not_string);
- }
-
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, false_label);
- __ jmp(true_label);
- __ bind(&not_heap_number);
- }
-
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = equal;
- break;
- case Token::LT:
- cond = is_unsigned ? below : less;
- break;
- case Token::GT:
- cond = is_unsigned ? above : greater;
- break;
- case Token::LTE:
- cond = is_unsigned ? below_equal : less_equal;
- break;
- case Token::GTE:
- cond = is_unsigned ? above_equal : greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- CpuFeatures::Scope scope(SSE2);
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
- } else {
- if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left), ToInteger32Immediate(right));
- } else if (left->IsConstantOperand()) {
- __ cmp(ToOperand(right), ToInteger32Immediate(left));
- // We transposed the operands. Reverse the condition.
- cc = ReverseCondition(cc);
- } else {
- __ cmp(ToRegister(left), ToOperand(right));
- }
- }
- EmitBranch(true_block, false_block, cc);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmp(left, instr->hydrogen()->right());
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Handle<Object> nil_value = instr->nil() == kNullValue ?
- factory()->null_value() :
- factory()->undefined_value();
- __ cmp(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, equal);
- } else {
- Handle<Object> other_nil_value = instr->nil() == kNullValue ?
- factory()->undefined_value() :
- factory()->null_value();
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ cmp(reg, other_nil_value);
- __ j(equal, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->temp());
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object) {
- __ JumpIfSmi(input, is_not_object);
-
- __ cmp(input, isolate()->factory()->null_value());
- __ j(equal, is_object);
-
- __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, is_not_object);
-
- __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(below, is_not_object);
- __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- return below_equal;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
-
- Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
-
- return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsString(reg, temp, false_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Operand input = ToOperand(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ test(input, Immediate(kSmiTagMask));
- EmitBranch(true_block, false_block, zero);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- EmitBranch(true_block, false_block, not_zero);
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = ComputeCompareCondition(op);
- __ test(eax, Operand(eax));
-
- EmitBranch(true_block, false_block, condition);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return equal;
- if (to == LAST_TYPE) return above_equal;
- if (from == FIRST_TYPE) return below_equal;
- UNREACHABLE();
- return equal;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
-
- __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ mov(result, FieldOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ test(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String>class_name,
- Register input,
- Register temp,
- Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!input.is(temp2));
- ASSERT(!temp.is(temp2));
- __ JumpIfSmi(input, is_false);
-
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
- __ j(equal, is_true);
- } else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
- // Objects with a non-function constructor have class 'Object'.
- __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
- __ j(not_equal, is_true);
- } else {
- __ j(not_equal, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ mov(temp, FieldOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- __ cmp(temp, class_name);
- // End with the answer in the z flag.
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- // Object and function are in fixed registers defined by the stub.
- ASSERT(ToRegister(instr->context()).is(esi));
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-
- Label true_value, done;
- __ test(eax, Operand(eax));
- __ j(zero, &true_value, Label::kNear);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- virtual LInstruction* instr() { return instr_; }
- Label* map_check() { return &map_check_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- // A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = ToRegister(instr->temp());
- __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<JSGlobalPropertyCell> cache_cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map.
- __ j(not_equal, &cache_miss, Label::kNear);
- __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
- __ jmp(&done);
-
- // The inlined call site cache did not match. Check for null and string
- // before calling the deferred code.
- __ bind(&cache_miss);
- // Null is not an instance of anything.
- __ cmp(object, factory()->null_value());
- __ j(equal, &false_result);
-
- // String values are not instances of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp, temp);
- __ j(is_string, &false_result);
-
- // Go to the deferred code.
- __ jmp(deferred->entry());
-
- __ bind(&false_result);
- __ mov(ToRegister(instr->result()), factory()->false_value());
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- PushSafepointRegistersScope scope(this);
-
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
-
- // Get the temp register reserved by the instruction. This needs to be a
- // register which is pushed last by PushSafepointRegisters as top of the
- // stack is used to pass the offset to the location of the map check to
- // the stub.
- Register temp = ToRegister(instr->temp());
- ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 13;
- int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- __ mov(temp, Immediate(delta));
- __ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- // Get the deoptimization index of the LLazyBailout-environment that
- // corresponds to this instruction.
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-
- // Put the result value into the eax slot and restore all registers.
- __ StoreToSafepointRegisterSlot(eax, eax);
-}
-
-
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = ComputeCompareCondition(op);
- Label true_value, done;
- __ test(eax, Operand(eax));
- __ j(condition, &true_value, Label::kNear);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Preserve the return value on the stack and rely on the runtime call
- // to return the value in the same register. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(eax);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- ASSERT(NeedsEagerFrame());
- CpuFeatures::Scope scope(SSE2);
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(esp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
- if (dynamic_frame_alignment_) {
- // Fetch the state of the dynamic frame alignment.
- __ mov(edx, Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
- }
- if (NeedsEagerFrame()) {
- __ mov(esp, ebp);
- __ pop(ebp);
- }
- if (dynamic_frame_alignment_) {
- Label no_padding;
- __ cmp(edx, Immediate(kNoAlignmentPadding));
- __ j(equal, &no_padding);
- if (FLAG_debug_code) {
- __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, "expected alignment marker");
- }
- __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
- __ bind(&no_padding);
- }
- if (info()->IsStub()) {
- __ Ret();
- } else {
- __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
- }
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->global_object()).is(edx));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- __ mov(ecx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
-
- // Store the value.
- __ mov(Operand::Cell(cell_handle), value);
- // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->global_object()).is(edx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result, ContextOperand(context, instr->slot_index()));
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- Label is_not_hole;
- __ j(not_equal, &is_not_hole, Label::kNear);
- __ mov(result, factory()->undefined_value());
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
-
- Label skip_assignment;
-
- Operand target = ContextOperand(context, instr->slot_index());
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(target, factory()->the_hole_value());
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &skip_assignment, Label::kNear);
- }
- }
-
- __ mov(target, value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Register temp = ToRegister(instr->temp());
- int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWriteContextSlot(context,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ mov(result, FieldOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- DeoptimizeIf(not_equal, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ mov(result, factory()->undefined_value());
- }
-}
-
-
-void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
- ASSERT(!operand->IsDoubleRegister());
- if (operand->IsConstantOperand()) {
- Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- if (object->IsSmi()) {
- __ Push(Handle<Smi>::cast(object));
- } else {
- __ PushHeapObject(Handle<HeapObject>::cast(object));
- }
- } else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
- } else {
- __ push(ToOperand(operand));
- }
-}
-
-
-// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
-// prototype chain, which causes unbounded code generation.
-static bool CompactEmit(SmallMapList* list,
- Handle<String> name,
- int i,
- Isolate* isolate) {
- Handle<Map> map = list->at(i);
- // If the map has ElementsKind transitions, we will generate map checks
- // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
- if (map->HasElementsTransition()) return false;
- LookupResult lookup(isolate);
- map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstantFunction();
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- bool all_are_compact = true;
- for (int i = 0; i < map_count; ++i) {
- if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
- all_are_compact = false;
- break;
- }
- }
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- bool compact = all_are_compact ? true :
- CompactEmit(instr->hydrogen()->types(), name, i, isolate());
- __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ mov(ecx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function.
- __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
- DeoptimizeIf(not_equal, instr->environment());
-
- // Check whether the function has an instance prototype.
- Label non_instance;
- __ test_b(FieldOperand(result, Map::kBitFieldOffset),
- 1 << Map::kHasNonInstancePrototype);
- __ j(not_zero, &non_instance, Label::kNear);
-
- // Get the prototype or initial map from the function.
- __ mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr->environment());
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CmpObjectType(result, MAP_TYPE, temp);
- __ j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
- __ jmp(&done, Label::kNear);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in the function's map.
- __ bind(&non_instance);
- __ mov(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(equal, &done, Label::kNear);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_cow_array_map()));
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(eax)) ? ebx : eax);
- __ push(temp);
- __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Map::kElementsKindMask);
- __ shr(temp, Map::kElementsKindShift);
- __ cmp(temp, GetInitialFastElementsKind());
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ mov(result, FieldOperand(input,
- ExternalArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Operand index = ToOperand(instr->index());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, index);
- __ mov(result, Operand(arguments, length, times_4, kPointerSize));
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else {
- __ fld_s(operand);
- HandleX87FPReturnValue(instr);
- }
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- __ movdbl(ToDoubleRegister(instr->result()), operand);
- } else {
- __ fld_d(operand);
- HandleX87FPReturnValue(instr);
- }
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsx_b(result, operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movzx_b(result, operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsx_w(result, operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzx_w(result, operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ mov(result, operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ test(result, Operand(result));
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
- if (IsX87TopOfStack(instr->result())) {
- // Return value is already on stack. If the value has no uses, then
- // pop it off the FP stack. Otherwise, make sure that there are enough
- // copies of the value on the stack to feed all of the usages, e.g.
- // when the following instruction uses the return value in multiple
- // inputs.
- int count = instr->hydrogen_value()->UseCount();
- if (count == 0) {
- __ fstp(0);
- } else {
- count--;
- ASSERT(count <= 7);
- while (count-- > 0) {
- __ fld(0);
- }
- }
- } else {
- __ fstp_d(ToOperand(instr->result()));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
- __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, double_load_operand);
- } else {
- __ fld_d(double_load_operand);
- HandleX87FPReturnValue(instr);
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register result = ToRegister(instr->result());
-
- // Load the result.
- __ mov(result,
- BuildFastArrayOperand(instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr->environment());
- } else {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
- Register elements_pointer_reg = ToRegister(elements_pointer);
- int shift_size = ElementsKindToShiftSize(elements_kind);
- if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
- if (constant_value & 0xF0000000) {
- Abort("array index constant value too big");
- }
- return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
- } else {
- // Take the tag bit into account while computing the shift size.
- if (key_representation.IsTagged() && (shift_size >= 1)) {
- shift_size -= kSmiTagSize;
- }
- ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- offset + (additional_index << shift_size));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->key()).is(ecx));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(esp, -2 * kPointerSize));
- } else {
- // Check for arguments adapter frame.
- Label done, adapted;
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(result),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted, Label::kNear);
-
- // No arguments adaptor frame.
- __ mov(result, Operand(ebp));
- __ jmp(&done, Label::kNear);
-
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Operand elem = ToOperand(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(ebp, elem);
- __ mov(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done, Label::kNear);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register scratch = ToRegister(instr->temp());
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
-
- // Do not transform the receiver to object for strict mode
- // functions.
- __ mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
-
- // Do not transform the receiver to object for builtins.
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok);
-
- // Normal function. Replace undefined or null with global receiver.
- __ cmp(receiver, factory()->null_value());
- __ j(equal, &global_object, Label::kNear);
- __ cmp(receiver, factory()->undefined_value());
- __ j(equal, &global_object, Label::kNear);
-
- // The receiver should be a JS object.
- __ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr->environment());
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok, Label::kNear);
-
- __ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- ASSERT(receiver.is(eax)); // Used for parameter count.
- ASSERT(function.is(edi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(eax));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr->environment());
-
- __ push(receiver);
- __ mov(receiver, length);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ test(length, Operand(length));
- __ j(zero, &invoke, Label::kNear);
- __ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
- __ dec(length);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- EmitPushTaggedOperand(argument);
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in esi.
- ASSERT(result.is(esi));
- }
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result,
- Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- __ push(esi); // The context is the first argument.
- __ push(Immediate(instr->hydrogen()->pairs()));
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result,
- Operand(context,
- Context::SlotOffset(instr->qml_global()
- ? Context::QML_GLOBAL_OBJECT_INDEX
- : Context::GLOBAL_OBJECT_INDEX)));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- EDIState edi_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- if (can_invoke_directly) {
- if (edi_state == EDI_UNINITIALIZED) {
- __ LoadHeapObject(edi, function);
- }
-
- // Change context.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Set eax to arguments count if adaption is not needed. Assumes that eax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(eax, arity);
- }
-
- // Invoke function directly.
- __ SetCallKind(ecx, call_kind);
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- } else {
- // We need to adapt arguments.
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
- }
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- EDI_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->value());
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr->environment());
-
- Label done;
- Register tmp = input_reg.is(eax) ? ecx : eax;
- Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label negative;
- __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| will be restored
- // unchanged by popping safepoint registers.
- __ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
-
- __ bind(&negative);
-
- Label allocated, slow;
- __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
- __ jmp(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
- instr, instr->context());
-
- // Set the pointer to the new heap number in tmp.
- if (!tmp.is(eax)) __ mov(tmp, eax);
-
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
- __ bind(&allocated);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(tmp2, ~HeapNumber::kSignMask);
- __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
- __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->value());
- __ test(input_reg, Operand(input_reg));
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
- __ test(input_reg, Operand(input_reg));
- DeoptimizeIf(negative, instr->environment());
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LUnaryMathOperation* instr_;
- };
-
- ASSERT(instr->value()->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
-
- CpuFeatures::Scope scope(SSE2);
- if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ xorps(scratch, scratch);
- __ subsd(scratch, input_reg);
- __ pand(input_reg, scratch);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else { // Tagged case.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
-
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Deoptimize on negative zero.
- Label non_zero;
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- __ j(not_equal, &non_zero, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&non_zero);
- }
- __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
- __ cvttsd2si(output_reg, Operand(xmm_scratch));
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
- } else {
- Label negative_sign, done;
- // Deoptimize on unordered.
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
- __ j(below, &negative_sign, Label::kNear);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Check for negative zero.
- Label positive_sign;
- __ j(above, &positive_sign, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ Set(output_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- __ bind(&positive_sign);
- }
-
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, Operand(input_reg));
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
- __ jmp(&done, Label::kNear);
-
- // Non-zero negative reaches here.
- __ bind(&negative_sign);
- // Truncate, then compare and compensate.
- __ cvttsd2si(output_reg, Operand(input_reg));
- __ cvtsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(equal, &done, Label::kNear);
- __ sub(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
-
- __ bind(&done);
- }
-}
-
-void LCodeGen::DoMathRound(LMathRound* instr) {
- CpuFeatures::Scope scope(SSE2);
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = xmm0;
- XMMRegister input_temp = ToDoubleRegister(instr->temp());
- ExternalReference one_half = ExternalReference::address_of_one_half();
- ExternalReference minus_one_half =
- ExternalReference::address_of_minus_one_half();
-
- Label done, round_to_zero, below_one_half, do_not_compensate;
- __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
- __ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half);
-
- // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
- __ addsd(xmm_scratch, input_reg);
- __ cvttsd2si(output_reg, Operand(xmm_scratch));
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
- __ jmp(&done);
-
- __ bind(&below_one_half);
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
- __ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero);
-
- // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
- // compare and compensate.
- __ movsd(input_temp, input_reg); // Do not alter input_reg.
- __ subsd(input_temp, xmm_scratch);
- __ cvttsd2si(output_reg, Operand(input_temp));
- // Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmp(output_reg, 0x80000000u);
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
-
- __ cvtsi2sd(xmm_scratch, output_reg);
- __ ucomisd(xmm_scratch, input_temp);
- __ j(equal, &done);
- __ sub(output_reg, Immediate(1));
- // No overflow because we already ruled out minint.
- __ jmp(&done);
-
- __ bind(&round_to_zero);
- // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
- // we can ignore the difference between a result of -0 and +0.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // If the sign is positive, we return +0.
- __ movmskpd(output_reg, input_reg);
- __ test(output_reg, Immediate(1));
- __ RecordComment("Minus zero");
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ Set(output_reg, Immediate(0));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ sqrtsd(input_reg, input_reg);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done, sqrt;
- // Check base for -Infinity. According to IEEE-754, single-precision
- // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
- __ mov(scratch, 0xFF800000);
- __ movd(xmm_scratch, scratch);
- __ cvtss2sd(xmm_scratch, xmm_scratch);
- __ ucomisd(input_reg, xmm_scratch);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &sqrt, Label::kNear);
- __ j(carry, &sqrt, Label::kNear);
- // If input is -Infinity, return Infinity.
- __ xorps(input_reg, input_reg);
- __ subsd(input_reg, xmm_scratch);
- __ jmp(&done, Label::kNear);
-
- // Square root.
- __ bind(&sqrt);
- __ xorps(xmm_scratch, xmm_scratch);
- __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ sqrtsd(input_reg, input_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(xmm1));
- ASSERT(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(eax));
- ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
- ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
-
- if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(eax, &no_deopt);
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- CpuFeatures::Scope scope(SSE2);
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- ASSERT(ToRegister(instr->global_object()).is(eax));
- // Assert that the register size is indeed the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
- // ebx: FixedArray of the native context's random seeds
-
- // Load state[0].
- __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ test(ecx, ecx);
- __ j(zero, deferred->entry());
- // Load state[1].
- __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
- // ecx: state[0]
- // eax: state[1]
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ movzx_w(edx, ecx);
- __ imul(edx, edx, 18273);
- __ shr(ecx, 16);
- __ add(ecx, edx);
- // Save state[0].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzx_w(edx, eax);
- __ imul(edx, edx, 36969);
- __ shr(eax, 16);
- __ add(eax, edx);
- // Save state[1].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shl(ecx, 14);
- __ and_(eax, Immediate(0x3FFFF));
- __ add(eax, ecx);
-
- __ bind(deferred->exit());
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm2, ebx);
- __ movd(xmm1, eax);
- __ cvtss2sd(xmm2, xmm2);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in eax.
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(SSE2);
- ASSERT(instr->value()->Equals(instr->result()));
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- Label positive, done, zero;
- __ xorps(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
- __ j(above, &positive, Label::kNear);
- __ j(equal, &zero, Label::kNear);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(input_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
- __ bind(&zero);
- __ push(Immediate(0xFFF00000));
- __ push(Immediate(0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ jmp(&done, Label::kNear);
- __ bind(&positive);
- __ fldln2();
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
- __ fld_d(Operand(esp, 0));
- __ fyl2x();
- __ fstp_d(Operand(esp, 0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
-}
-
-
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->function()).is(edi));
- ASSERT(instr->HasPointerMap());
-
- if (instr->known_function().is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- } else {
- CallKnownFunction(instr->known_function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- EDI_CONTAINS_TARGET);
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->key()).is(ecx));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(ecx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->function()).is(edi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(ecx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->target(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- EDI_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->constructor()).is(edi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- if (FLAG_optimize_constructed_arrays) {
- // No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
- isolate());
- __ mov(ebx, Immediate(undefined_value));
- }
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ Set(eax, Immediate(instr->arity()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->constructor()).is(edi));
- ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(FLAG_optimize_constructed_arrays);
-
- __ mov(ebx, instr->hydrogen()->property_cell());
- Handle<Code> array_construct_code =
- isolate()->builtins()->ArrayConstructCode();
- __ Set(eax, Immediate(instr->arity()));
- CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- int offset = instr->offset();
-
- if (!instr->transition().is_null()) {
- if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
- } else {
- Register temp = ToRegister(instr->temp());
- Register temp_map = ToRegister(instr->temp_map());
- __ mov(temp_map, instr->transition());
- __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
- // Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- temp_map,
- temp,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
- }
-
- // Do the store.
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ mov(FieldOperand(object, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- } else {
- Register temp = ToRegister(instr->temp());
- __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(temp, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(temp,
- offset,
- value,
- object,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ cmp(ToOperand(instr->length()),
- Immediate(Smi::FromInt(constant_index)));
- } else {
- __ cmp(ToOperand(instr->length()), Immediate(constant_index));
- }
- DeoptimizeIf(below_equal, instr->environment());
- } else {
- __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
- DeoptimizeIf(above_equal, instr->environment());
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- CpuFeatures::Scope scope(SSE2);
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(SSE2);
- __ movdbl(operand, ToDoubleRegister(instr->value()));
- } else {
- Register value = ToRegister(instr->value());
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- __ mov_b(operand, value);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(operand, value);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(operand, value);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister value = ToDoubleRegister(instr->value());
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
- __ bind(&have_value);
- }
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movdbl(double_store_operand, value);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
-
- Operand operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ mov(operand, value);
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ lea(key, operand);
- __ RecordWrite(elements,
- key,
- value,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases...external, fast-double, fast
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->key()).is(ecx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- bool is_simple_map_transition =
- IsSimpleMapChangeTransition(from_kind, to_kind);
- Label::Distance branch_distance =
- is_simple_map_transition ? Label::kNear : Label::kFar;
- __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
- __ j(not_equal, &not_applicable, branch_distance);
- if (is_simple_map_transition) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- Handle<Map> map = instr->hydrogen()->transitioned_map();
- __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(map));
- // Write barrier.
- ASSERT_NE(instr->temp(), NULL);
- __ RecordWriteForMap(object_reg, to_map, new_map_reg,
- ToRegister(instr->temp()),
- kDontSaveFPRegs);
- } else if (FLAG_compiled_transitions) {
- PushSafepointRegistersScope scope(this);
- if (!object_reg.is(eax)) {
- __ push(object_reg);
- }
- LoadContextFromDeferred(instr->context());
- if (!object_reg.is(eax)) {
- __ pop(eax);
- }
- __ mov(ebx, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(new_map_reg, to_map);
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(edx));
- ASSERT(new_map_reg.is(ebx));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(new_map_reg, to_map);
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(edx));
- ASSERT(new_map_reg.is(ebx));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
- } else {
- UNREACHABLE();
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- factory(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ push(Immediate(Smi::FromInt(const_index)));
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
- instr, instr->context());
- __ AssertSmi(eax);
- __ SmiUntag(eax);
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
-
- __ cmp(char_code, String::kMaxOneByteCharCode);
- __ j(above, deferred->entry());
- __ Set(result, Immediate(factory()->single_character_string_cache()));
- __ mov(result, FieldOperand(result,
- char_code, times_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result, factory()->undefined_value());
- __ j(equal, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatures::Scope scope(SSE2);
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- LOperand* temp = instr->temp();
-
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagI* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
- __ SmiTag(reg);
- __ j(overflow, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmp(reg, Immediate(Smi::kMaxValue));
- __ j(above, deferred->entry());
- __ SmiTag(reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
- Register reg = ToRegister(value);
- Register tmp = reg.is(eax) ? ecx : eax;
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label done;
-
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- __ SmiUntag(reg);
- __ xor_(reg, 0x80000000);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope feature_scope(SSE2);
- __ cvtsi2sd(xmm0, Operand(reg));
- } else {
- __ push(reg);
- __ fild_s(Operand(esp, 0));
- __ pop(reg);
- }
- } else {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope feature_scope(SSE2);
- __ LoadUint32(xmm0, reg, xmm1);
- } else {
- // There's no fild variant for unsigned values, so zero-extend to a 64-bit
- // int manually.
- __ push(Immediate(0));
- __ push(reg);
- __ fild_d(Operand(esp, 0));
- __ pop(reg);
- __ pop(reg);
- }
- }
-
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
- __ jmp(&done, Label::kNear);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- if (!reg.is(eax)) __ mov(reg, eax);
-
- // Done. Put the value in xmm0 into the value of the allocated heap
- // number.
- __ bind(&done);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope feature_scope(SSE2);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
- }
- __ StoreToSafepointRegisterSlot(reg, reg);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagD* instr_;
- };
-
- Register reg = ToRegister(instr->result());
-
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- bool use_sse2 = CpuFeatures::IsSupported(SSE2);
- if (use_sse2) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ ucomisd(input_reg, input_reg);
- } else {
- if (!IsX87TopOfStack(instr->value())) {
- __ fld_d(ToOperand(instr->value()));
- }
- __ fld(0);
- __ fld(0);
- __ FCmp();
- }
-
- __ j(parity_odd, &no_special_nan_handling);
- __ sub(esp, Immediate(kDoubleSize));
- if (use_sse2) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(MemOperand(esp, 0), input_reg);
- } else {
- __ fld(0);
- __ fstp_d(MemOperand(esp, 0));
- }
- __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
- Immediate(kHoleNanUpper32));
- Label canonicalize;
- __ j(not_equal, &canonicalize);
- __ add(esp, Immediate(kDoubleSize));
- __ mov(reg, factory()->the_hole_value());
- __ jmp(&done);
- __ bind(&canonicalize);
- __ add(esp, Immediate(kDoubleSize));
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- if (use_sse2) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(input_reg, Operand::StaticVariable(nan));
- } else {
- __ fstp(0);
- __ fld_d(Operand::StaticVariable(nan));
- }
- }
-
- __ bind(&no_special_nan_handling);
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- Register tmp = ToRegister(instr->temp());
- __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
- } else {
- if (!IsX87TopOfStack(instr->value())) {
- __ fld_d(ToOperand(instr->value()));
- }
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Set(reg, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(reg, eax);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- if (instr->needs_check()) {
- __ test(ToRegister(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ AssertSmi(ToRegister(input));
- }
- __ SmiUntag(ToRegister(input));
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- Register temp_reg,
- XMMRegister result_reg,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
- Label load_smi, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- if (deoptimize_on_undefined) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number;
- __ j(equal, &heap_number, Label::kNear);
-
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
-
- // Convert undefined to NaN.
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- }
- // Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(result_reg, xmm_scratch);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(temp_reg, result_reg);
- __ test_b(temp_reg, 1);
- DeoptimizeIf(not_zero, env);
- }
- __ jmp(&done, Label::kNear);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ test(input_reg, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi);
- ExternalReference hole_nan_reference =
- ExternalReference::address_of_the_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(hole_nan_reference));
- __ jmp(&done, Label::kNear);
- } else {
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- // Smi to XMM conversion
- __ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(input_reg));
- __ SmiTag(input_reg); // Retag smi.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
- Register input_reg = ToRegister(instr->value());
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
-
- if (instr->truncating()) {
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- if (CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- Label convert;
- // Use more powerful conversion when sse3 is available.
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- // Get exponent alone and check for too-big exponent.
- __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(input_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- // Pop FPU stack before deoptimizing.
- __ fstp(0);
- __ RecordComment("Deferred TaggedToI: exponent too big");
- DeoptimizeIf(no_condition, instr->environment());
-
- // Reserve space for 64 bit answer.
- __ bind(&convert);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- } else {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cmp(input_reg, 0x80000000u);
- __ j(not_equal, &done);
- // Check if the input was 0x8000000 (kMinInt).
- // If no, then we got an overflow and we deoptimize.
- ExternalReference min_int = ExternalReference::address_of_min_int();
- __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
- __ ucomisd(xmm_temp, xmm0);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- }
- } else if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- // Deoptimize if we don't have a heap number.
- __ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(not_equal, instr->environment());
-
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cvtsi2sd(xmm_temp, Operand(input_reg));
- __ ucomisd(xmm0, xmm_temp);
- __ RecordComment("Deferred TaggedToI: lost precision");
- DeoptimizeIf(not_equal, instr->environment());
- __ RecordComment("Deferred TaggedToI: NaN");
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(input_reg, Operand(input_reg));
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ and_(input_reg, 1);
- __ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(not_zero, instr->environment());
- }
- } else {
- UNREACHABLE();
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
-
- // Smi to int32 conversion
- __ SmiUntag(input_reg); // Untag smi.
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* temp = instr->temp();
- ASSERT(temp == NULL || temp->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
- Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
-
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
- HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
- }
- }
- }
-
- EmitNumberUntagD(input_reg,
- temp_reg,
- result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- deoptimize_on_minus_zero,
- instr->environment(),
- mode);
- } else {
- UNIMPLEMENTED();
- }
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsDoubleRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsRegister());
- CpuFeatures::Scope scope(SSE2);
-
- XMMRegister input_reg = ToDoubleRegister(input);
- Register result_reg = ToRegister(result);
-
- if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cmp(result_reg, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE3)) {
- // This will deoptimize if the exponent of the input in out of range.
- CpuFeatures::Scope scope(SSE3);
- Label convert, done;
- __ j(not_equal, &done, Label::kNear);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
- // Get exponent alone and check for too-big exponent.
- __ mov(result_reg, Operand(esp, sizeof(int32_t)));
- __ and_(result_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- __ add(Operand(esp), Immediate(kDoubleSize));
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&convert);
- // Do conversion, which cannot fail because we checked the exponent.
- __ fld_d(Operand(esp, 0));
- __ fisttp_d(Operand(esp, 0));
- __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
- } else {
- Label done;
- Register temp_reg = ToRegister(instr->temp());
- XMMRegister xmm_scratch = xmm0;
-
- // If cvttsd2si succeeded, we're done. Otherwise, we attempt
- // manual conversion.
- __ j(not_equal, &done, Label::kNear);
-
- // Get high 32 bits of the input in result_reg and temp_reg.
- __ pshufd(xmm_scratch, input_reg, 1);
- __ movd(Operand(temp_reg), xmm_scratch);
- __ mov(result_reg, temp_reg);
-
- // Prepare negation mask in temp_reg.
- __ sar(temp_reg, kBitsPerInt - 1);
-
- // Extract the exponent from result_reg and subtract adjusted
- // bias from it. The adjustment is selected in a way such that
- // when the difference is zero, the answer is in the low 32 bits
- // of the input, otherwise a shift has to be performed.
- __ shr(result_reg, HeapNumber::kExponentShift);
- __ and_(result_reg,
- HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
- __ sub(Operand(result_reg),
- Immediate(HeapNumber::kExponentBias +
- HeapNumber::kExponentBits +
- HeapNumber::kMantissaBits));
- // Don't handle big (> kMantissaBits + kExponentBits == 63) or
- // special exponents.
- DeoptimizeIf(greater, instr->environment());
-
- // Zero out the sign and the exponent in the input (by shifting
- // it to the left) and restore the implicit mantissa bit,
- // i.e. convert the input to unsigned int64 shifted left by
- // kExponentBits.
- ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
- // Minus zero has the most significant bit set and the other
- // bits cleared.
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
- __ psllq(input_reg, HeapNumber::kExponentBits);
- __ por(input_reg, xmm_scratch);
-
- // Get the amount to shift the input right in xmm_scratch.
- __ neg(result_reg);
- __ movd(xmm_scratch, Operand(result_reg));
-
- // Shift the input right and extract low 32 bits.
- __ psrlq(input_reg, xmm_scratch);
- __ movd(Operand(result_reg), input_reg);
-
- // Use the prepared mask in temp_reg to negate the result if necessary.
- __ xor_(result_reg, Operand(temp_reg));
- __ sub(result_reg, Operand(temp_reg));
- __ bind(&done);
- }
- } else {
- Label done;
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cvtsi2sd(xmm0, Operand(result_reg));
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ and_(result_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
- } else {
- DeoptimizeIf(below, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(last));
- DeoptimizeIf(above, instr->environment());
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (IsPowerOf2(mask)) {
- ASSERT(tag == 0 || IsPowerOf2(tag));
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
- } else {
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ and_(temp, mask);
- __ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (instr->hydrogen()->target_in_new_space()) {
- Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ cmp(reg, Operand::Cell(cell));
- } else {
- Operand operand = ToOperand(instr->value());
- __ cmp(operand, target);
- }
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapCommon(Register reg,
- Handle<Map> map,
- CompareMapMode mode,
- LInstruction* instr) {
- Label success;
- __ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&success);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
-
- Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
- __ j(equal, &success);
- }
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- ASSERT(instr->unclamped()->Equals(instr->result()));
- Register value_reg = ToRegister(instr->result());
- __ ClampUint8(value_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- CpuFeatures::Scope scope(SSE2);
-
- ASSERT(instr->unclamped()->Equals(instr->result()));
- Register input_reg = ToRegister(instr->unclamped());
- Label is_smi, done, heap_number;
-
- __ JumpIfSmi(input_reg, &is_smi);
-
- // Check for heap number
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
- __ jmp(&done, Label::kNear);
-
- // Heap number
- __ bind(&heap_number);
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
- __ jmp(&done, Label::kNear);
-
- // smi
- __ bind(&is_smi);
- __ SmiUntag(input_reg);
- __ ClampUint8(input_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
- Register reg = ToRegister(instr->temp());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- // TODO(ulan): Move this check to hydrogen and split HCheckPrototypeMaps
- // into two instruction: one that checks the prototypes and another that
- // loads the holder (HConstant). Find a way to do it without breaking
- // parallel recompilation.
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1));
- } else {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr);
- }
- }
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- no_reg,
- scratch,
- deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- instance_size >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- initial_map->pre_allocated_property_fields());
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- initial_map->unused_property_fields());
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- initial_map->inobject_properties());
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ mov(FieldOperand(result, JSObject::kMapOffset), map);
- __ mov(scratch, factory()->empty_fixed_array());
- __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ mov(scratch, factory()->undefined_value());
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ mov(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ push(Immediate(Smi::FromInt(instance_size)));
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- // Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
- Register result = ToRegister(instr->result());
-
- __ SmiTag(size);
- PushSafepointRegistersScope scope(this);
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- if (!size.is(result)) {
- __ StoreToSafepointRegisterSlot(result, size);
- }
- __ push(size);
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // Load the map's "bit field 2". We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ebx, Map::kElementsKindMask);
- __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ push(Immediate(isolate()->factory()->empty_fixed_array()));
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(ecx));
- ASSERT(!result.is(ecx));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- if (FLAG_debug_code) {
- __ LoadHeapObject(ecx, object);
- __ cmp(source, ecx);
- __ Assert(equal, "Unexpected object literal boilerplate");
- __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
- __ cmp(ecx, Handle<Map>(object->map()));
- __ Assert(equal, "Unexpected boilerplate map");
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
- __ Assert(equal, "Unexpected boilerplate elements kind");
- }
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ lea(ecx, Operand(result, elements_offset));
- } else {
- __ mov(ecx, FieldOperand(source, i));
- }
- __ mov(FieldOperand(result, object_offset + i), ecx);
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
- } else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ mov(FieldOperand(result, object_size),
- Immediate(Handle<Map>(isolate()->heap()->
- allocation_site_info_map())));
- __ mov(FieldOperand(result, object_size + kPointerSize), source);
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ mov(ecx, FieldOperand(source, i));
- __ mov(FieldOperand(result, elements_offset + i), ecx);
- }
-
- // Copy elements backing store content.
- int elements_length = elements->length();
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(FieldOperand(result, total_offset), Immediate(value_low));
- __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
- } else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the literal boilerplate ElementsKind is of a type different than
- // the expected one. The check isn't necessary if the boilerplate has already
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- // Load the map's "bit field 2". We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= instr->hydrogen()->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
-
- // Set up the parameters to the stub/runtime call and pick the right
- // runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- __ LoadHeapObject(eax, literals);
- __ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(ecx, Immediate(constant_properties));
- __ mov(edx, Immediate(Smi::FromInt(flags)));
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).is(eax));
- __ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Label materialized;
- // Registers will be used as follows:
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- // esi = context.
- int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(ecx, instr->hydrogen()->literals());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->pattern()));
- __ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ push(Immediate(shared_info));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(esi);
- __ push(Immediate(shared_info));
- __ push(Immediate(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->value();
- EmitPushTaggedOperand(input);
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition =
- EmitTypeofIs(true_label, false_label, input, instr->type_literal());
- if (final_branch_condition != no_condition) {
- EmitBranch(true_block, false_block, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ cmp(FieldOperand(input, HeapObject::kMapOffset),
- factory()->heap_number_map());
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = zero;
-
- } else if (type_name->Equals(heap()->boolean_string())) {
- __ cmp(input, factory()->true_value());
- __ j(equal, true_label);
- __ cmp(input, factory()->false_value());
- final_branch_condition = equal;
-
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
- __ cmp(input, factory()->null_value());
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->undefined_string())) {
- __ cmp(input, factory()->undefined_value());
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = not_zero;
-
- } else if (type_name->Equals(heap()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
- if (!FLAG_harmony_typeof) {
- __ cmp(input, factory()->null_value());
- __ j(equal, true_label);
- }
- if (FLAG_harmony_symbols) {
- __ CmpObjectType(input, SYMBOL_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- } else {
- __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
- }
- __ j(below, false_label);
- __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = zero;
-
- } else {
- __ jmp(false_label);
- }
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt() {
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- __ Nop(padding_size);
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- __ push(ToOperand(obj));
- EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStackCheck* instr_;
- };
-
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &done, Label::kNear);
-
- ASSERT(instr->context()->IsRegister());
- ASSERT(ToRegister(instr->context()).is(esi));
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
- __ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- } else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(key);
- EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- __ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr->environment());
-
- __ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr->environment());
-
- __ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr->environment());
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(&call_runtime);
-
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(eax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ cmp(result, Immediate(Smi::FromInt(0)));
- __ j(not_equal, &load_cache);
- __ mov(result, isolate()->factory()->empty_fixed_array());
- __ jmp(&done);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ mov(result,
- FieldOperand(result, DescriptorArray::kEnumCacheOffset));
- __ mov(result,
- FieldOperand(result, FixedArray::SizeFor(instr->idx())));
- __ bind(&done);
- __ test(result, result);
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- __ cmp(ToRegister(instr->map()),
- FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
-
- Label out_of_object, done;
- __ cmp(index, Immediate(0));
- __ j(less, &out_of_object);
- __ mov(object, FieldOperand(object,
- index,
- times_half_pointer_size,
- JSObject::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&out_of_object);
- __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
- __ neg(index);
- // Index is now equal to out of object property index plus 1.
- __ mov(object, FieldOperand(object,
- index,
- times_half_pointer_size,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(&done);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
deleted file mode 100644
index ab6779a..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
+++ /dev/null
@@ -1,475 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
-#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
-
-#include "ia32/lithium-ia32.h"
-
-#include "checks.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "ia32/lithium-gap-resolver-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class LGapNode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4, info->zone()),
- jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- translations_(info->zone()),
- deferred_(8, info->zone()),
- dynamic_frame_alignment_(false),
- support_aligned_spilled_doubles_(false),
- osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
- bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- Operand ToOperand(LOperand* op) const;
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsX87TopOfStack(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- Immediate ToInteger32Immediate(LOperand* op) const {
- return Immediate(ToInteger32(LConstantOperand::cast(op)));
- }
-
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // A utility for instructions that return floating point values on X87.
- void HandleX87FPReturnValue(LInstruction* instr);
-
- // The operand denoting the second word (the one with a higher address) of
- // a double stack slot.
- Operand HighOperand(LOperand* op);
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LInstruction* instr);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
-
- void EnsureRelocSpaceForDeoptimization();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
-
- LPlatformChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- int GetNextEmittedBlock(int block);
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register temporary2);
-
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return info()->num_parameters(); }
-
- void Abort(const char* reason);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* fun,
- int argc,
- LInstruction* instr);
-
- void CallRuntime(Runtime::FunctionId id,
- int argc,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, argc, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- void LoadContextFromDeferred(LOperand* context);
-
- enum EDIState {
- EDI_UNINITIALIZED,
- EDI_CONTAINS_TARGET
- };
-
- // Generate a direct call to a known function. Expects the function
- // to be in edi.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- EDIState edi_state);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- XMMRegister ToDoubleRegister(int index) const;
- int ToInteger32(LConstantOperand* op) const;
-
- double ToDouble(LConstantOperand* op) const;
- Operand BuildFastArrayOperand(LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordPosition(int position);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(
- Register input,
- Register temp,
- XMMRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- void EnsureSpaceForLazyDeopt();
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- // Emits code for pushing either a tagged constant, a (non-double)
- // register, or a stack slot operand.
- void EmitPushTaggedOperand(LOperand* operand);
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- bool dynamic_frame_alignment_;
- bool support_aligned_spilled_doubles_;
- int osr_pc_offset_;
- int last_lazy_deopt_pc_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->masm_->PushSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- ASSERT(codegen_->info()->is_calling());
- }
-
- ~PushSafepointRegistersScope() {
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- codegen_->masm_->PopSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
deleted file mode 100644
index 6fee7fe..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ /dev/null
@@ -1,494 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "ia32/lithium-gap-resolver-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32, owner->zone()),
- source_uses_(),
- destination_uses_(),
- spilled_register_(-1) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(HasBeenReset());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- PerformMove(i);
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- Finish();
- ASSERT(HasBeenReset());
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) AddMove(move);
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph. We use operand swaps to resolve cycles,
- // which means that a call to PerformMove could change any source operand
- // in the move graph.
-
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved on the side.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- // Though PerformMove can change any source operand in the move graph,
- // this call cannot create a blocking move via a swap (this loop does
- // not miss any). Assume there is a non-blocking move with source A
- // and this move is blocked on source B and there is a swap of A and
- // B. Then A and B must be involved in the same cycle (or they would
- // not be swapped). Since this move's destination is B and there is
- // only a single incoming edge to an operand, this move must also be
- // involved in the same cycle. In that case, the blocking move will
- // be created but will be "pending" when we return from PerformMove.
- PerformMove(i);
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // This move's source may have changed due to swaps to resolve cycles and
- // so it may now be the last move in the cycle. If so remove it.
- if (moves_[index].source()->Equals(destination)) {
- RemoveMove(index);
- return;
- }
-
- // The move may be blocked on a (at most one) pending move, in which case
- // we have a cycle. Search for such a blocking move and perform a swap to
- // resolve it.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- EmitSwap(index);
- return;
- }
- }
-
- // This move is not blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::AddMove(LMoveOperands move) {
- LOperand* source = move.source();
- if (source->IsRegister()) ++source_uses_[source->index()];
-
- LOperand* destination = move.destination();
- if (destination->IsRegister()) ++destination_uses_[destination->index()];
-
- moves_.Add(move, cgen_->zone());
-}
-
-
-void LGapResolver::RemoveMove(int index) {
- LOperand* source = moves_[index].source();
- if (source->IsRegister()) {
- --source_uses_[source->index()];
- ASSERT(source_uses_[source->index()] >= 0);
- }
-
- LOperand* destination = moves_[index].destination();
- if (destination->IsRegister()) {
- --destination_uses_[destination->index()];
- ASSERT(destination_uses_[destination->index()] >= 0);
- }
-
- moves_[index].Eliminate();
-}
-
-
-int LGapResolver::CountSourceUses(LOperand* operand) {
- int count = 0;
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
- ++count;
- }
- }
- return count;
-}
-
-
-Register LGapResolver::GetFreeRegisterNot(Register reg) {
- int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
- return Register::FromAllocationIndex(i);
- }
- }
- return no_reg;
-}
-
-
-bool LGapResolver::HasBeenReset() {
- if (!moves_.is_empty()) return false;
- if (spilled_register_ >= 0) return false;
-
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] != 0) return false;
- if (destination_uses_[i] != 0) return false;
- }
- return true;
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::Finish() {
- if (spilled_register_ >= 0) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
- spilled_register_ = -1;
- }
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::EnsureRestored(LOperand* operand) {
- if (operand->IsRegister() && operand->index() == spilled_register_) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
- spilled_register_ = -1;
- }
-}
-
-
-Register LGapResolver::EnsureTempRegister() {
- // 1. We may have already spilled to create a temp register.
- if (spilled_register_ >= 0) {
- return Register::FromAllocationIndex(spilled_register_);
- }
-
- // 2. We may have a free register that we can use without spilling.
- Register free = GetFreeRegisterNot(no_reg);
- if (!free.is(no_reg)) return free;
-
- // 3. Prefer to spill a register that is not used in any remaining move
- // because it will not need to be restored until the end.
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
- Register scratch = Register::FromAllocationIndex(i);
- __ push(scratch);
- spilled_register_ = i;
- return scratch;
- }
- }
-
- // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
- Register scratch = Register::FromAllocationIndex(0);
- __ push(scratch);
- spilled_register_ = 0;
- return scratch;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
- Register src = cgen_->ToRegister(source);
- Operand dst = cgen_->ToOperand(destination);
- __ mov(dst, src);
-
- } else if (source->IsStackSlot()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ mov(dst, src);
- } else {
- // Spill on demand to use a temporary register for memory-to-memory
- // moves.
- Register tmp = EnsureTempRegister();
- Operand dst = cgen_->ToOperand(destination);
- __ mov(tmp, src);
- __ mov(dst, tmp);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
- } else {
- Register tmp = EnsureTempRegister();
- __ LoadObject(tmp, cgen_->ToHandle(constant_source));
- __ mov(dst, tmp);
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(dst, src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
- }
- } else if (source->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(SSE2);
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
- } else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
- }
- } else {
- UNREACHABLE();
- }
-
- RemoveMove(index);
-}
-
-
-void LGapResolver::EmitSwap(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = cgen_->ToRegister(source);
- Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
-
- } else if ((source->IsRegister() && destination->IsStackSlot()) ||
- (source->IsStackSlot() && destination->IsRegister())) {
- // Register-memory. Use a free register as a temp if possible. Do not
- // spill on demand because the simple spill implementation cannot avoid
- // spilling src at this point.
- Register tmp = GetFreeRegisterNot(no_reg);
- Register reg =
- cgen_->ToRegister(source->IsRegister() ? source : destination);
- Operand mem =
- cgen_->ToOperand(source->IsRegister() ? destination : source);
- if (tmp.is(no_reg)) {
- __ xor_(reg, mem);
- __ xor_(mem, reg);
- __ xor_(reg, mem);
- } else {
- __ mov(tmp, mem);
- __ mov(mem, reg);
- __ mov(reg, tmp);
- }
-
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory. Spill on demand to use a temporary. If there is a
- // free register after that, use it as a second temporary.
- Register tmp0 = EnsureTempRegister();
- Register tmp1 = GetFreeRegisterNot(tmp0);
- Operand src = cgen_->ToOperand(source);
- Operand dst = cgen_->ToOperand(destination);
- if (tmp1.is(no_reg)) {
- // Only one temp register available to us.
- __ mov(tmp0, dst);
- __ xor_(tmp0, src);
- __ xor_(src, tmp0);
- __ xor_(tmp0, src);
- __ mov(dst, tmp0);
- } else {
- __ mov(tmp0, dst);
- __ mov(tmp1, src);
- __ mov(dst, tmp1);
- __ mov(src, tmp0);
- }
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- CpuFeatures::Scope scope(SSE2);
- // XMM register-register swap. We rely on having xmm0
- // available as a fixed scratch register.
- XMMRegister src = cgen_->ToDoubleRegister(source);
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(xmm0, src);
- __ movaps(src, dst);
- __ movaps(dst, xmm0);
-
- } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- CpuFeatures::Scope scope(SSE2);
- // XMM register-memory swap. We rely on having xmm0
- // available as a fixed scratch register.
- ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
- XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
- Operand other =
- cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movdbl(xmm0, other);
- __ movdbl(other, reg);
- __ movdbl(reg, Operand(xmm0));
-
- } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(SSE2);
- // Double-width memory-to-memory. Spill on demand to use a general
- // purpose temporary register and also rely on having xmm0 available as
- // a fixed scratch register.
- Register tmp = EnsureTempRegister();
- Operand src0 = cgen_->ToOperand(source);
- Operand src1 = cgen_->HighOperand(source);
- Operand dst0 = cgen_->ToOperand(destination);
- Operand dst1 = cgen_->HighOperand(destination);
- __ movdbl(xmm0, dst0); // Save destination in xmm0.
- __ mov(tmp, src0); // Then use tmp to copy source to destination.
- __ mov(dst0, tmp);
- __ mov(tmp, src1);
- __ mov(dst1, tmp);
- __ movdbl(src0, xmm0);
-
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-
- // The swap of source and destination has executed a move from source to
- // destination.
- RemoveMove(index);
-
- // Any unperformed (including pending) move with a source of either
- // this move's source or destination needs to have their source
- // changed to reflect the state of affairs after the swap.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(source)) {
- moves_[i].set_source(destination);
- } else if (other_move.Blocks(destination)) {
- moves_[i].set_source(source);
- }
- }
-
- // In addition to swapping the actual uses as sources, we need to update
- // the use counts.
- if (source->IsRegister() && destination->IsRegister()) {
- int temp = source_uses_[source->index()];
- source_uses_[source->index()] = source_uses_[destination->index()];
- source_uses_[destination->index()] = temp;
- } else if (source->IsRegister()) {
- // We don't have use counts for non-register operands like destination.
- // Compute those counts now.
- source_uses_[source->index()] = CountSourceUses(source);
- } else if (destination->IsRegister()) {
- source_uses_[destination->index()] = CountSourceUses(destination);
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h
deleted file mode 100644
index 3a58f58..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // Emit any code necessary at the end of a gap move.
- void Finish();
-
- // Add or delete a move from the move graph without emitting any code.
- // Used to build up the graph and remove trivial moves.
- void AddMove(LMoveOperands move);
- void RemoveMove(int index);
-
- // Report the count of uses of operand as a source in a not-yet-performed
- // move. Used to rebuild use counts.
- int CountSourceUses(LOperand* operand);
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Execute a move by emitting a swap of two operands. The move from
- // source to destination is removed from the move graph.
- void EmitSwap(int index);
-
- // Ensure that the given operand is not spilled.
- void EnsureRestored(LOperand* operand);
-
- // Return a register that can be used as a temp register, spilling
- // something if necessary.
- Register EnsureTempRegister();
-
- // Return a known free register different from the given one (which could
- // be no_reg---returning any free register), or no_reg if there is no such
- // register.
- Register GetFreeRegisterNot(Register reg);
-
- // Verify that the state is the initial one, ready to resolve a single
- // parallel move.
- bool HasBeenReset();
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- // Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kMaxNumAllocatableRegisters];
- int destination_uses_[Register::kMaxNumAllocatableRegisters];
-
- // If we had to spill on demand, the currently spilled register's
- // allocation index.
- int spilled_register_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-ia32.cc
deleted file mode 100644
index 910219d..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.cc
+++ /dev/null
@@ -1,2604 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "lithium-allocator-inl.h"
-#include "ia32/lithium-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sal-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-void LMathPowHalf::PrintDataTo(StringStream* stream) {
- stream->Add("/pow_half ");
- value()->PrintTo(stream);
-}
-
-
-void LMathRound::PrintDataTo(StringStream* stream) {
- stream->Add("/round ");
- value()->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[ecx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
- // Skip a slot if for a double-width slot.
- if (is_double) {
- spill_slot_count_++;
- spill_slot_count_ |= 1;
- num_double_slots_++;
- }
- return spill_slot_count_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // Reserve the first spill slot for the state of dynamic alignment.
- if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
- ASSERT_EQ(alignment_state_index, 0);
- USE(alignment_state_index);
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LChunkBuilder::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- X87TopOfStackRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
- int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineX87TOS(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, ToUnallocated(x87tos));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) {
- Abort("Not enough virtual registers (temps).");
- }
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseFixed(right_value, ecx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
- }
- }
-
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left_operand = UseFixed(left, edx);
- LOperand* right_operand = UseFixed(right, eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- instr->set_hydrogen_value(current);
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
- LEnvironment* result =
- new(zone()) LEnvironment(hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
-
- // Untagged integers or doubles, smis and booleans don't require a
- // deoptimization environment nor a temp register.
- Representation rep = value->representation();
- HType type = value->type();
- if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
- return new(zone()) LBranch(UseRegister(value), NULL);
- }
-
- ToBooleanStub::Types expected = instr->expected_input_types();
- // We need a temporary register when we have to access the map *or* we have
- // no type info yet, in which case we handle all cases (including the ones
- // involving maps).
- bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
- LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
- LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
- LOperand* context = UseFixed(instr->context(), esi);
- LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(
- UseFixed(instr->context(), esi),
- UseFixed(instr->left(), InstanceofStub::left()),
- FixedTemp(edi));
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LOperand* temp = TempRegister();
- LWrapReceiver* result =
- new(zone()) LWrapReceiver(receiver, function, temp);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), edi);
- LOperand* receiver = UseFixed(instr->receiver(), eax);
- LOperand* length = UseFixed(instr->length(), ebx);
- LOperand* elements = UseFixed(instr->elements(), ecx);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = UseAny(instr->argument());
- return new(zone()) LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, esi);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context,
- instr->qml_global()));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* context = UseAny(instr->context()); // Not actually used.
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- return DefineSameAsFirst(result);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
- } else if (op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
- if (op == kMathPowHalf) {
- LOperand* temp = TempRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
- return DefineSameAsFirst(result);
- } else if (op == kMathRound) {
- LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(context, input, temp);
- return AssignEnvironment(DefineAsRegister(result));
- }
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseFixed(instr->key(), ecx);
- argument_count_ -= instr->argument_count();
- LCallKeyed* result = new(zone()) LCallKeyed(context, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallNamed* result = new(zone()) LCallNamed(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallGlobal* result = new(zone()) LCallGlobal(context, instr->qml_global());
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- ASSERT(FLAG_optimize_constructed_arrays);
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
- LCallFunction* result = new(zone()) LCallFunction(context, function);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new(zone()) LBitI(left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(instr->op(), context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new(zone()) LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
- }
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, eax));
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, eax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- // use dividend as temp if divisor < 0 && divisor != -1
- LOperand* dividend = divisor_si < -1 ? UseTempRegister(instr->left()) :
- UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
- } else {
- // needs edx:eax, plus a temp
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* temp = TempRegister();
- LInstruction* result = DefineFixed(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp), edx);
- return divisor_si < 0 ? AssignEnvironment(result) : result;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LInstruction* result;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod =
- new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
- } else {
- // The temporary operand is necessary to ensure that right is
- // not allocated into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* value = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new(zone()) LModI(value, divisor, temp);
- result = DefineFixed(mod, edx);
- }
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- temp = TempRegister();
- }
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- AssignEnvironment(mul);
- }
- return DefineSameAsFirst(mul);
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
- } else {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
- return DefineSameAsFirst(minmax);
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm1) :
- UseFixed(instr->right(), eax);
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), eax);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- } else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left;
- LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
- } else {
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return new(zone()) LCmpIDAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- return new(zone()) LCmpConstantEqAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- // We only need a temp register for non-strict compare.
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- ASSERT(instr ->value()->representation().IsTagged());
- return new(zone()) LIsUndetectableAndBranch(
- UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
-
- LStringCompareAndBranch* result = new(zone())
- LStringCompareAndBranch(context, left, right);
-
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LHasInstanceTypeAndBranch(
- UseRegisterAtStart(instr->value()),
- TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
- TempRegister(),
- TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* date = UseFixed(instr->value(), eax);
- LDateField* result =
- new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(ecx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), ecx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new(zone()) LBoundsCheck(
- UseRegisterOrConstantAtStart(instr->index()),
- UseAtStart(instr->length())));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(), eax);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- // Only mark conversions that might need to allocate as calling rather than
- // all changes. This makes simple, non-allocating conversion not have to force
- // building a stack frame.
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- // Temp register only necessary for minus zero check.
- LOperand* temp = instr->deoptimize_on_minus_zero()
- ? TempRegister()
- : NULL;
- LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
- } else {
- bool truncating = instr->CanTruncateToInt32();
- LOperand* xmm_temp =
- (truncating && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = CpuFeatures::IsSupported(SSE2)
- ? UseRegisterAtStart(instr->value())
- : UseAtStart(instr->value());
- LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
-
- // Make sure that temp and result_temp are different registers.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
- return AssignPointerMap(Define(result, result_temp));
- } else {
- ASSERT(to.IsInteger32());
- bool truncating = instr->CanTruncateToInt32();
- bool needs_temp = truncating && !CpuFeatures::IsSupported(SSE3);
- LOperand* value = needs_temp ?
- UseTempRegister(instr->value()) : UseRegister(instr->value());
- LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
- } else if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- }
- } else {
- ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
- } else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(Define(result, temp));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- // If the target is in new space, we'll emit a global cell compare and so
- // want the value in a register. If the target gets promoted before we
- // emit code, we will still get the register but will do an immediate
- // compare instead of the cell compare. This is safe.
- LOperand* value = instr->target_in_new_space()
- ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- if (input_rep.IsDouble()) {
- LOperand* reg = UseRegister(value);
- return DefineFixed(new(zone()) LClampDToUint8(reg), eax);
- } else if (input_rep.IsInteger32()) {
- LOperand* reg = UseFixed(value, eax);
- return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
- } else {
- ASSERT(input_rep.IsTagged());
- LOperand* reg = UseFixed(value, eax);
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LOperand* temp = FixedTemp(xmm1);
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
- return AssignEnvironment(DefineFixed(result, eax));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), esi)
- : NULL;
- return new(zone()) LReturn(UseFixed(instr->value(), eax), context);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- double value = instr->DoubleValue();
- LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
- ? TempRegister()
- : NULL;
- return DefineAsRegister(new(zone()) LConstantD(temp));
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(), edx);
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LStoreGlobalCell* result =
- new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(), edx);
- LOperand* value = UseFixed(instr->value(), eax);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* value;
- LOperand* temp;
- LOperand* context = UseRegister(instr->context());
- if (instr->NeedsWriteBarrier()) {
- value = UseTempRegister(instr->value());
- temp = TempRegister();
- } else {
- value = UseRegister(instr->value());
- temp = NULL;
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* obj = UseFixed(instr->object(), edx);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(context, obj);
- return MarkAsCall(DefineFixed(result, eax), instr);
- } else {
- LOperand* context = UseAny(instr->context()); // Not actually used.
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(context, obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
- TempRegister())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
-
- if (!instr->is_external()) {
- LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
-
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LOperand* key = UseFixed(instr->key(), ecx);
-
- LLoadKeyedGeneric* result =
- new(zone()) LLoadKeyedGeneric(context, object, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- if (instr->value()->representation().IsDouble()) {
- LOperand* object = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
- return new(zone()) LStoreKeyed(object, key, val);
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = UseRegister(instr->elements());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(obj, key, val);
- }
- }
-
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
-
- LOperand* external_pointer = UseRegister(instr->elements());
- // Determine if we need a byte register in this case for the value.
- bool val_is_fixed_register =
- elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS;
-
- LOperand* val = val_is_fixed_register
- ? UseFixed(instr->value(), eax)
- : UseRegister(instr->value());
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(external_pointer,
- key,
- val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LOperand* key = UseFixed(instr->key(), ecx);
- LOperand* value = UseFixed(instr->value(), eax);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- LStoreKeyedGeneric* result =
- new(zone()) LStoreKeyedGeneric(context, object, key, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LOperand* temp_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL,
- new_map_reg, temp_reg);
- return result;
- } else if (FLAG_compiled_transitions) {
- LOperand* context = UseRegister(instr->context());
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return AssignPointerMap(result);
- } else {
- LOperand* object = UseFixed(instr->object(), eax);
- LOperand* fixed_object_reg = FixedTemp(edx);
- LOperand* new_map_reg = FixedTemp(ebx);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- NULL,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = instr->is_in_object()
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- // We only need a scratch register if we have a write barrier or we
- // have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LOperand* value = UseFixed(instr->value(), eax);
-
- LStoreNamedGeneric* result =
- new(zone()) LStoreNamedGeneric(context, object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
- return MarkAsCall(DefineFixed(string_add, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* temp = TempRegister();
- LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* size = UseTempRegister(instr->size());
- LOperand* temp = TempRegister();
- LAllocate* result = new(zone()) LAllocate(context, size, temp);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LFastLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseAtStart(instr->object());
- LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- ASSERT(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- ASSERT(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
- Register reg = descriptor->register_params_[instr->index()];
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallStub* result = new(zone()) LCallStub(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = Use(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), eax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseAtStart(instr->value());
- LTypeof* result = new(zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (!pending_deoptimization_ast_id_.IsNone()) {
- ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
- LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
- LInstruction* result = AssignEnvironment(lazy_bailout);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- info()->MarkAsDeferredCalling();
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- ASSERT(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
- }
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseOrConstantAtStart(instr->key());
- LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new(zone()) LIn(context, key, object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->enumerable(), eax);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.h b/src/3rdparty/v8/src/ia32/lithium-ia32.h
deleted file mode 100644
index e6fd655..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.h
+++ /dev/null
@@ -1,2849 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_LITHIUM_IA32_H_
-#define V8_IA32_LITHIUM_IA32_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(AllocateObject) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpIDAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(CmpConstantEqAndBranch) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeclareGlobals) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(DummyUse) \
- V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(In) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(Uint32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(MapEnumLength) \
- V(MathExp) \
- V(MathFloorOfDiv) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRound) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(Random) \
- V(PushArgument) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(StringLength) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(Throw) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
- void MarkAsCall() { is_call_ = true; }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- virtual bool ClobbersDoubleRegisters() const {
- return is_call_ || !CpuFeatures::IsSupported(SSE2);
- }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- // Iterator support.
- friend class InputIterator;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- bool is_call_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
-
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block) : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
- static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap: public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool ClobbersDoubleRegisters() const { return false; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
-
- private:
- int block_id_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-};
-
-
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- virtual bool IsControl() const { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-};
-
-
-class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
- public:
- LWrapReceiver(LOperand* receiver,
- LOperand* function,
- LOperand* temp) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- temps_[0] = temp;
- }
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 1> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 1> {
- public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
- public:
- LUnaryMathOperation(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
- public:
- LMathExp(LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LMathRound: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
-};
-
-
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStringCompareAndBranch: public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
- public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
-};
-
-
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-
- Token::Value op() const { return hydrogen()->op(); }
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- Token::Value op() const { return op_; }
- bool can_deopt() const { return can_deopt_; }
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LConstantD(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 1> {
- public:
- LBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCmpMapAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LDateField: public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index)
- : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- Smi* index() const { return index_; }
-
- private:
- Smi* index_;
-};
-
-
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
-class LThrow: public LTemplateInstruction<0, 2, 0> {
- public:
- LThrow(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRandom(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return op_; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- Token::Value op() const { return op_; }
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LReturn(LOperand* value, LOperand* context) {
- inputs_[0] = value;
- inputs_[1] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
- inputs_[0] = function;
- temps_[0] = temp;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_external() const {
- return hydrogen()->is_external();
- }
-
- virtual bool ClobbersDoubleRegisters() const {
- return !CpuFeatures::IsSupported(SSE2) &&
- !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
- bool key_is_smi() {
- return hydrogen()->key()->representation().IsTagged();
- }
-};
-
-
-inline static bool ExternalArrayOpRequiresTemp(
- Representation key_representation,
- ElementsKind elements_kind) {
- // Operations that require the key to be divided by two to be converted into
- // an index cannot fold the scale operation into a load and need an extra
- // temp register to do the work.
- return key_representation.IsTagged() &&
- (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS);
-}
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreGlobalCell(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
-class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalObject(LOperand* context, bool qml_global) {
- inputs_[0] = context;
- qml_global_ = qml_global;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- bool qml_global() { return qml_global_; }
-
- private:
- bool qml_global_;
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNamed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNamed(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 2, 0> {
- public:
- explicit LCallFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context, bool qml_global)
- : qml_global_(qml_global) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray: public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagU(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
- public:
- LDoubleToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberUntagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change);
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- bool needs_check() const { return needs_check_; }
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
- public:
- LStoreNamedField(LOperand* obj,
- LOperand* val,
- LOperand* temp,
- LOperand* temp_map) {
- inputs_[0] = obj;
- inputs_[1] = val;
- temps_[0] = temp;
- temps_[1] = temp_map;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp_map() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* object,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* new_map_temp,
- LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- temps_[1] = temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LStringAdd: public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- LOperand* string() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
- public:
- LCheckInstanceType(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
- public:
- LAllocateObject(LOperand* context, LOperand* temp) {
- inputs_[0] = context;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LAllocate: public LTemplateInstruction<1, 2, 1> {
- public:
- LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFastLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArrayLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LObjectLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
- public:
- LDeleteProperty(LOperand* context, LOperand* obj, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LIn: public LTemplateInstruction<1, 3, 0> {
- public:
- LIn(LOperand* context, LOperand* key, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = key;
- inputs_[2] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* object() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk: public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph),
- num_double_slots_(0) { }
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-
- int num_double_slots() const { return num_double_slots_; }
-
- private:
- int num_double_slots_;
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- zone_(graph->zone()),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* reason);
-
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(XMMRegister reg);
- LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- XMMRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
- template<int I, int T>
- LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
- // Assigns an environment to an instruction. An instruction which can
- // deoptimize must have an environment.
- LInstruction* AssignEnvironment(LInstruction* instr);
- // Assigns a pointer map to an instruction. An instruction which can
- // trigger a GC or a lazy deoptimization must have a pointer map.
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // Marks a call for the register allocator. Assigns a pointer map to
- // support GC and lazy deoptimization. Assigns an environment to support
- // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Zone* zone_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_LITHIUM_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
deleted file mode 100644
index 587699f..0000000
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
+++ /dev/null
@@ -1,3101 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "runtime.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// MacroAssembler implementation.
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-void MacroAssembler::InNewSpace(
- Register object,
- Register scratch,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == equal || cc == not_equal);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
- }
- // Check that we can use a test_b.
- ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
- ASSERT(MemoryChunk::IN_TO_SPACE < 8);
- int mask = (1 << MemoryChunk::IN_FROM_SPACE)
- | (1 << MemoryChunk::IN_TO_SPACE);
- // If non-zero, the page belongs to new-space.
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::RememberedSetHelper(
- Register object, // Only used for debug checks.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- MacroAssembler::RememberedSetFinalAction and_then) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(scratch, Operand::StaticVariable(store_buffer));
- // Store pointer to buffer.
- mov(Operand(scratch, 0), addr);
- // Increment buffer top.
- add(scratch, Immediate(kPointerSize));
- // Write back new top of buffer.
- mov(Operand::StaticVariable(store_buffer), scratch);
- // Call stub on end of buffer.
- // Check for end of buffer.
- test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kReturnAtEnd) {
- Label buffer_overflowed;
- j(not_equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- j(equal, &done, Label::kNear);
- }
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
- CallStub(&store_buffer_overflow);
- if (and_then == kReturnAtEnd) {
- ret(0);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- bind(&done);
- }
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister scratch_reg,
- Register result_reg) {
- Label done;
- Label conv_failure;
- pxor(scratch_reg, scratch_reg);
- cvtsd2si(result_reg, input_reg);
- test(result_reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- cmp(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
- mov(result_reg, Immediate(0));
- setcc(above, result_reg);
- sub(result_reg, Immediate(1));
- and_(result_reg, Immediate(255));
- jmp(&done, Label::kNear);
- bind(&conv_failure);
- Set(result_reg, Immediate(0));
- ucomisd(input_reg, scratch_reg);
- j(below, &done, Label::kNear);
- Set(result_reg, Immediate(255));
- bind(&done);
-}
-
-
-void MacroAssembler::ClampUint8(Register reg) {
- Label done;
- test(reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- setcc(negative, reg); // 1 if negative, 0 if positive.
- dec_b(reg); // 0 if negative, 255 if positive.
- bind(&done);
-}
-
-
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
-void MacroAssembler::LoadUint32(XMMRegister dst,
- Register src,
- XMMRegister scratch) {
- Label done;
- cmp(src, Immediate(0));
- movdbl(scratch,
- Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
- cvtsi2sd(dst, src);
- j(not_sign, &done, Label::kNear);
- addsd(dst, scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
- }
-
- // Array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
- // into an array of words.
- Register dst = index;
- lea(dst, Operand(object, index, times_half_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(index, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done, Label::kNear);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
-
- lea(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
- Label ok;
- test_b(dst, (1 << kPointerSizeLog2) - 1);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::RecordWriteForMap(
- Register object,
- Handle<Map> map,
- Register scratch1,
- Register scratch2,
- SaveFPRegsMode save_fp) {
- Label done;
-
- Register address = scratch1;
- Register value = scratch2;
- if (emit_debug_code()) {
- Label ok;
- lea(address, FieldOperand(object, HeapObject::kMapOffset));
- test_b(address, (1 << kPointerSizeLog2) - 1);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
- AssertNotSmi(object);
-
- if (!FLAG_incremental_marking) {
- return;
- }
-
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- ASSERT(!isolate()->heap()->InNewSpace(*map));
- CheckPageFlagForMap(map,
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- // Delay the initialization of |address| and |value| for the stub until it's
- // known that the will be needed. Up until this point their values are not
- // needed since they are embedded in the operands of instructions that need
- // them.
- lea(address, FieldOperand(object, HeapObject::kMapOffset));
- mov(value, Immediate(map));
- RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
- CallStub(&stub);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
- AssertNotSmi(object);
-
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
- return;
- }
-
- if (emit_debug_code()) {
- Label ok;
- cmp(value, Operand(address, 0));
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
- Label done;
-
- if (smi_check == INLINE_SMI_CHECK) {
- // Skip barrier if writing a smi.
- JumpIfSmi(value, &done, Label::kNear);
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
-
- bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(address, Immediate(BitCast<int32_t>(kZapValue)));
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- Set(eax, Immediate(0));
- mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-#endif
-
-
-void MacroAssembler::Set(Register dst, const Immediate& x) {
- if (x.is_zero()) {
- xor_(dst, dst); // Shorter than mov.
- } else {
- mov(dst, x);
- }
-}
-
-
-void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
-
-
-bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
- static const int kMaxImmediateBits = 17;
- if (!RelocInfo::IsNone(x.rmode_)) return false;
- return !is_intn(x.x_, kMaxImmediateBits);
-}
-
-
-void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
- if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- Set(dst, Immediate(x.x_ ^ jit_cookie()));
- xor_(dst, jit_cookie());
- } else {
- Set(dst, x);
- }
-}
-
-
-void MacroAssembler::SafePush(const Immediate& x) {
- if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- push(Immediate(x.x_ ^ jit_cookie()));
- xor_(Operand(esp, 0), Immediate(jit_cookie()));
- } else {
- push(x);
- }
-}
-
-
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- // see ROOT_ACCESSOR macro in factory.h
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
- // see ROOT_ACCESSOR macro in factory.h
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
-void MacroAssembler::CmpObjectType(Register heap_object,
- InstanceType type,
- Register map) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- CmpInstanceType(map, type);
-}
-
-
-void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
- static_cast<int8_t>(type));
-}
-
-
-void MacroAssembler::CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
- j(below_equal, fail, distance);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register maybe_number,
- Register elements,
- Register key,
- Register scratch1,
- XMMRegister scratch2,
- Label* fail,
- bool specialize_for_processor,
- int elements_offset) {
- Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
- JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
- CheckMap(maybe_number,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- cmp(FieldOperand(maybe_number, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- j(greater_equal, &maybe_nan, Label::kNear);
-
- bind(&not_nan);
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope use_sse2(SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
- scratch2);
- } else {
- fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
- }
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- j(greater, &is_nan, Label::kNear);
- cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
- j(zero, &not_nan);
- bind(&is_nan);
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope use_sse2(SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
- } else {
- fld_d(Operand::StaticVariable(canonical_nan_reference));
- }
- jmp(&have_double_value, Label::kNear);
-
- bind(&smi_value);
- // Value is a smi. Convert to a double and store.
- // Preserve original value.
- mov(scratch1, maybe_number);
- SmiUntag(scratch1);
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope fscope(SSE2);
- cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
- scratch2);
- } else {
- push(scratch1);
- fild_s(Operand(esp, 0));
- pop(scratch1);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
- }
- bind(&done);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
- cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
- }
- }
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, map, &success, mode);
- j(not_equal, fail);
- bind(&success);
-}
-
-
-void MacroAssembler::DispatchMap(Register obj,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
- j(equal, success);
-
- bind(&fail);
-}
-
-
-Condition MacroAssembler::IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- test(instance_type, Immediate(kIsNotStringMask));
- return zero;
-}
-
-
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- cmp(scratch,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- j(above, fail);
-}
-
-
-void MacroAssembler::FCmp() {
- if (CpuFeatures::IsSupported(CMOV)) {
- fucomip();
- fstp(0);
- } else {
- fucompp();
- push(eax);
- fnstsw_ax();
- sahf();
- pop(eax);
- }
-}
-
-
-void MacroAssembler::AssertNumber(Register object) {
- if (emit_debug_code()) {
- Label ok;
- JumpIfSmi(object, &ok);
- cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Check(equal, "Operand not a number");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(equal, "Operand is not a smi");
- }
-}
-
-
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a string");
- push(object);
- mov(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Check(below, "Operand is not a string");
- }
-}
-
-
-void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi");
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(ebp);
- mov(ebp, esp);
- push(esi);
- push(Immediate(Smi::FromInt(type)));
- push(Immediate(CodeObject()));
- if (emit_debug_code()) {
- cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, "code object not properly patched");
- }
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code()) {
- cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(type)));
- Check(equal, "stack frame types must match");
- }
- leave();
-}
-
-
-void MacroAssembler::EnterExitFramePrologue() {
- // Set up the frame structure on the stack.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(ebp);
- mov(ebp, esp);
-
- // Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
- push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
-
- // Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- ExternalReference context_address(Isolate::kContextAddress,
- isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), ebp);
- mov(Operand::StaticVariable(context_address), esi);
-}
-
-
-void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
- // Optionally save all XMM registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
- int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
- sub(esp, Immediate(space));
- const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
- }
- } else {
- sub(esp, Immediate(argc * kPointerSize));
- }
-
- // Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
- if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- and_(esp, -kFrameAlignment);
- }
-
- // Patch the saved entry sp.
- mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
-}
-
-
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
- EnterExitFramePrologue();
-
- // Set up argc and argv in callee-saved registers.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, eax);
- lea(esi, Operand(ebp, eax, times_4, offset));
-
- // Reserve space for argc, argv and isolate.
- EnterExitFrameEpilogue(3, save_doubles);
-}
-
-
-void MacroAssembler::EnterApiExitFrame(int argc) {
- EnterExitFramePrologue();
- EnterExitFrameEpilogue(argc, false);
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
- // Optionally restore all XMM registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
- const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
- }
- }
-
- // Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kPointerSize));
- mov(ebp, Operand(ebp, 0 * kPointerSize));
-
- // Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kPointerSize));
-
- // Push the return address to get ready to return.
- push(ecx);
-
- LeaveExitFrameEpilogue();
-}
-
-void MacroAssembler::LeaveExitFrameEpilogue() {
- // Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Isolate::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
-#ifdef DEBUG
- mov(Operand::StaticVariable(context_address), Immediate(0));
-#endif
-
- // Clear the top frame.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
-}
-
-
-void MacroAssembler::LeaveApiExitFrame() {
- mov(esp, ebp);
- pop(ebp);
-
- LeaveExitFrameEpilogue();
-}
-
-
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // We will build up the handler from the bottom by pushing on the stack.
- // First push the frame pointer and context.
- if (kind == StackHandler::JS_ENTRY) {
- // The frame pointer does not point to a JS frame so we save NULL for
- // ebp. We expect the code throwing an exception to check ebp before
- // dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
- push(Immediate(Smi::FromInt(0))); // No context.
- } else {
- push(ebp);
- push(esi);
- }
- // Push the state and the code object.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- push(Immediate(state));
- Push(CodeObject());
-
- // Link the current handler as the next handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(Operand::StaticVariable(handler_address));
- // Set this new handler as the current one.
- mov(Operand::StaticVariable(handler_address), esp);
-}
-
-
-void MacroAssembler::PopTryHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(Operand::StaticVariable(handler_address));
- add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // eax = exception, edi = code object, edx = state.
- mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
- shr(edx, StackHandler::kKindWidth);
- mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
- SmiUntag(edx);
- lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
- jmp(edi);
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in eax.
- if (!value.is(eax)) {
- mov(eax, value);
- }
- // Drop the stack pointer to the top of the top handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- mov(esp, Operand::StaticVariable(handler_address));
- // Restore the next handler.
- pop(Operand::StaticVariable(handler_address));
-
- // Remove the code object and state, compute the handler address in edi.
- pop(edi); // Code object.
- pop(edx); // Index and state.
-
- // Restore the context and frame pointer.
- pop(esi); // Context.
- pop(ebp); // Frame pointer.
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
- // ebp or esi.
- Label skip;
- test(esi, esi);
- j(zero, &skip, Label::kNear);
- mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
- bind(&skip);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in eax.
- if (!value.is(eax)) {
- mov(eax, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the top ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind, Label::kNear);
- bind(&fetch_next);
- mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- test(Operand(esp, StackHandlerConstants::kStateOffset),
- Immediate(StackHandler::KindField::kMask));
- j(not_zero, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(Operand::StaticVariable(handler_address));
-
- // Remove the code object and state, compute the handler address in edi.
- pop(edi); // Code object.
- pop(edx); // Index and state.
-
- // Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(esi);
- pop(ebp);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
-
- // Load current lexical context from the stack frame.
- mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- // When generating debug code, make sure the lexical context is set.
- if (emit_debug_code()) {
- cmp(scratch, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
- }
- // Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, offset));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- push(scratch);
- // Read the first word and compare to native_context_map.
- mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- cmp(scratch, isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(scratch);
- }
-
- // Check if both contexts are the same.
- cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- j(equal, &same_contexts);
-
- // Compare security tokens, save holder_reg on the stack so we can use it
- // as a temporary register.
- //
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- push(holder_reg);
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- mov(holder_reg,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- cmp(holder_reg, isolate()->factory()->null_value());
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
-
- push(holder_reg);
- // Read the first word and compare to native_context_map(),
- mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(holder_reg);
- }
-
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, token_offset));
- cmp(scratch, FieldOperand(holder_reg, token_offset));
- pop(holder_reg);
- j(not_equal, miss);
-
- bind(&same_contexts);
-}
-
-
-// Compute the hash code from the untagged key. This must be kept in sync
-// with ComputeIntegerHash in utils.h.
-//
-// Note: r0 will contain hash code
-void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
- // Xor original key with a seed.
- if (Serializer::enabled()) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(scratch, Immediate(Heap::kHashSeedRootIndex));
- mov(scratch,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
- SmiUntag(scratch);
- xor_(r0, scratch);
- } else {
- int32_t seed = isolate()->heap()->HashSeed();
- xor_(r0, Immediate(seed));
- }
-
- // hash = ~hash + (hash << 15);
- mov(scratch, r0);
- not_(r0);
- shl(scratch, 15);
- add(r0, scratch);
- // hash = hash ^ (hash >> 12);
- mov(scratch, r0);
- shr(scratch, 12);
- xor_(r0, scratch);
- // hash = hash + (hash << 2);
- lea(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- mov(scratch, r0);
- shr(scratch, 4);
- xor_(r0, scratch);
- // hash = hash * 2057;
- imul(r0, r0, 2057);
- // hash = hash ^ (hash >> 16);
- mov(scratch, r0);
- shr(scratch, 16);
- xor_(r0, scratch);
-}
-
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver and is unchanged.
- //
- // key - holds the smi key on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeds and we fall through.
-
- Label done;
-
- GetNumberHash(r0, r1);
-
- // Compute capacity mask.
- mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
- shr(r1, kSmiTagSize); // convert smi to int
- dec(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- mov(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- cmp(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- j(equal, &done);
- } else {
- j(not_equal, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
- j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
-void MacroAssembler::LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Just return if allocation top is already known.
- if ((flags & RESULT_CONTAINS_TOP) != 0) {
- // No use of scratch if allocation top is provided.
- ASSERT(scratch.is(no_reg));
-#ifdef DEBUG
- // Assert that result actually contains top on entry.
- cmp(result, Operand::StaticVariable(new_space_allocation_top));
- Check(equal, "Unexpected allocation top");
-#endif
- return;
- }
-
- // Move address of new object to result. Use scratch register if available.
- if (scratch.is(no_reg)) {
- mov(result, Operand::StaticVariable(new_space_allocation_top));
- } else {
- mov(scratch, Immediate(new_space_allocation_top));
- mov(result, Operand(scratch, 0));
- }
-}
-
-
-void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
- if (emit_debug_code()) {
- test(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
- }
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Update new top. Use scratch if available.
- if (scratch.is(no_reg)) {
- mov(Operand::StaticVariable(new_space_allocation_top), result_end);
- } else {
- mov(Operand(scratch, 0), result_end);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- if (result_end.is_valid()) {
- mov(result_end, Immediate(0x7191));
- }
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- if (!top_reg.is(result)) {
- mov(top_reg, result);
- }
- add(top_reg, Immediate(object_size));
- j(carry, gc_required);
- cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
-
- // Tag result if requested.
- bool tag_result = (flags & TAG_OBJECT) != 0;
- if (top_reg.is(result)) {
- if (tag_result) {
- sub(result, Immediate(object_size - kHeapObjectTag));
- } else {
- sub(result, Immediate(object_size));
- }
- } else if (tag_result) {
- ASSERT(kHeapObjectTag == 1);
- inc(result);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(
- int header_size,
- ScaleFactor element_size,
- Register element_count,
- RegisterValueType element_count_type,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & SIZE_IN_WORDS) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- mov(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- // We assume that element_count*element_size + header_size does not
- // overflow.
- if (element_count_type == REGISTER_VALUE_IS_SMI) {
- STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
- STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
- STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
- ASSERT(element_size >= times_2);
- ASSERT(kSmiTagSize == 1);
- element_size = static_cast<ScaleFactor>(element_size - 1);
- } else {
- ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
- }
- lea(result_end, Operand(element_count, element_size, header_size));
- add(result_end, result);
- j(carry, gc_required);
- cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required);
-
- if ((flags & TAG_OBJECT) != 0) {
- ASSERT(kHeapObjectTag == 1);
- inc(result);
- }
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- mov(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- // object_size is left unchanged by this function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- if (!object_size.is(result_end)) {
- mov(result_end, object_size);
- }
- add(result_end, result);
- j(carry, gc_required);
- cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required);
-
- // Tag result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- ASSERT(kHeapObjectTag == 1);
- inc(result);
- }
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
-#ifdef DEBUG
- cmp(object, Operand::StaticVariable(new_space_allocation_top));
- Check(below, "Undo allocation of non allocated memory");
-#endif
- mov(Operand::StaticVariable(new_space_allocation_top), object);
-}
-
-
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->heap_number_map()));
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, length);
- ASSERT(kCharSize == 1);
- add(scratch1, Immediate(kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- ASSERT(length > 0);
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::SizeFor(length),
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
- mov(FieldOperand(result, String::kLengthOffset),
- Immediate(Smi::FromInt(length)));
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_string_map()));
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_ascii_string_map()));
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_string_map()));
-}
-
-
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_ascii_string_map()));
-}
-
-
-// Copy memory, byte-by-byte, from source to destination. Not optimized for
-// long or aligned copies. The contents of scratch and length are destroyed.
-// Source and destination are incremented by length.
-// Many variants of movsb, loop unrolling, word moves, and indexed operands
-// have been tried here already, and this is fastest.
-// A simpler loop is faster on small copies, but 30% slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register source,
- Register destination,
- Register length,
- Register scratch) {
- Label loop, done, short_string, short_loop;
- // Experimentation shows that the short string loop is faster if length < 10.
- cmp(length, Immediate(10));
- j(less_equal, &short_string);
-
- ASSERT(source.is(esi));
- ASSERT(destination.is(edi));
- ASSERT(length.is(ecx));
-
- // Because source is 4-byte aligned in our uses of this function,
- // we keep source aligned for the rep_movs call by copying the odd bytes
- // at the end of the ranges.
- mov(scratch, Operand(source, length, times_1, -4));
- mov(Operand(destination, length, times_1, -4), scratch);
- mov(scratch, ecx);
- shr(ecx, 2);
- rep_movs();
- and_(scratch, Immediate(0x3));
- add(destination, scratch);
- jmp(&done);
-
- bind(&short_string);
- test(length, length);
- j(zero, &done);
-
- bind(&short_loop);
- mov_b(scratch, Operand(source, 0));
- mov_b(Operand(destination, 0), scratch);
- inc(source);
- inc(destination);
- dec(length);
- j(not_zero, &short_loop);
-
- bind(&done);
-}
-
-
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- jmp(&entry);
- bind(&loop);
- mov(Operand(start_offset, 0), filler);
- add(start_offset, Immediate(kPointerSize));
- bind(&entry);
- cmp(start_offset, end_offset);
- j(less, &loop);
-}
-
-
-void MacroAssembler::BooleanBitTest(Register object,
- int field_offset,
- int bit_index) {
- bit_index += kSmiTagSize + kSmiShiftSize;
- ASSERT(IsPowerOf2(kBitsPerByte));
- int byte_index = bit_index / kBitsPerByte;
- int byte_bit_index = bit_index & (kBitsPerByte - 1);
- test_b(FieldOperand(object, field_offset + byte_index),
- static_cast<byte>(1 << byte_bit_index));
-}
-
-
-
-void MacroAssembler::NegativeZeroTest(Register result,
- Register op,
- Label* then_label) {
- Label ok;
- test(result, result);
- j(not_zero, &ok);
- test(op, op);
- j(sign, then_label);
- bind(&ok);
-}
-
-
-void MacroAssembler::NegativeZeroTest(Register result,
- Register op1,
- Register op2,
- Register scratch,
- Label* then_label) {
- Label ok;
- test(result, result);
- j(not_zero, &ok);
- mov(scratch, op1);
- or_(scratch, op2);
- j(sign, then_label);
- bind(&ok);
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
-
- if (miss_on_bound_function) {
- // If a bound function, go to miss label.
- mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- j(not_zero, miss);
- }
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
- test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance);
-
- // Get the prototype or initial map from the function.
- mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- cmp(result, Immediate(isolate()->factory()->the_hole_value()));
- j(equal, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CmpObjectType(result, MAP_TYPE, scratch);
- j(not_equal, &done);
-
- // Get the prototype from the initial map.
- mov(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- mov(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
- ret((argc - 1) * kPointerSize);
-}
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- add(esp, Immediate(num_arguments * kPointerSize));
- }
- mov(eax, Immediate(isolate()->factory()->undefined_value()));
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- and_(hash, String::kArrayIndexValueMask);
- STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
- if (String::kHashShift > kSmiTagSize) {
- shr(hash, String::kHashShift - kSmiTagSize);
- }
- if (!index.is(hash)) {
- mov(index, hash);
- }
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
- : kDontSaveFPRegs);
- CallStub(&ces);
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
- CallStub(&ces);
-}
-
-
-void MacroAssembler::CallExternalReference(ExternalReference ref,
- int num_arguments) {
- mov(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ref));
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-// If true, a Handle<T> returned by value from a function with cdecl calling
-// convention will be returned directly as a value of location_ field in a
-// register eax.
-// If false, it is returned as a pointer to a preallocated by caller memory
-// region. Pointer to this region should be passed to a function as an
-// implicit first argument.
-#if defined(USING_BSD_ABI) || defined(__MINGW32__) || defined(__CYGWIN__)
-static const bool kReturnHandlesDirectly = true;
-#else
-static const bool kReturnHandlesDirectly = false;
-#endif
-
-
-Operand ApiParameterOperand(int index) {
- return Operand(
- esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int argc) {
- if (kReturnHandlesDirectly) {
- EnterApiExitFrame(argc);
- // When handles are returned directly we don't have to allocate extra
- // space for and pass an out parameter.
- if (emit_debug_code()) {
- mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
- }
- } else {
- // We allocate two additional slots: return value and pointer to it.
- EnterApiExitFrame(argc + 2);
-
- // The argument slots are filled as follows:
- //
- // n + 1: output slot
- // n: arg n
- // ...
- // 1: arg1
- // 0: pointer to the output slot
-
- lea(esi, Operand(esp, (argc + 1) * kPointerSize));
- mov(Operand(esp, 0 * kPointerSize), esi);
- if (emit_debug_code()) {
- mov(Operand(esi, 0), Immediate(0));
- }
- }
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- int stack_space) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- ExternalReference limit_address =
- ExternalReference::handle_scope_limit_address(isolate());
- ExternalReference level_address =
- ExternalReference::handle_scope_level_address(isolate());
-
- // Allocate HandleScope in callee-save registers.
- mov(ebx, Operand::StaticVariable(next_address));
- mov(edi, Operand::StaticVariable(limit_address));
- add(Operand::StaticVariable(level_address), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, eax);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- // Call the api function.
- call(function_address, RelocInfo::RUNTIME_ENTRY);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, eax);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- if (!kReturnHandlesDirectly) {
- // PrepareCallApiFunction saved pointer to the output slot into
- // callee-save register esi.
- mov(eax, Operand(esi, 0));
- }
-
- Label empty_handle;
- Label prologue;
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- // Check if the result handle holds 0.
- test(eax, eax);
- j(zero, &empty_handle);
- // It was non-zero. Dereference to get the result value.
- mov(eax, Operand(eax, 0));
- bind(&prologue);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- mov(Operand::StaticVariable(next_address), ebx);
- sub(Operand::StaticVariable(level_address), Immediate(1));
- Assert(above_equal, "Invalid HandleScope level");
- cmp(edi, Operand::StaticVariable(limit_address));
- j(not_equal, &delete_allocated_handles);
- bind(&leave_exit_frame);
-
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate());
- cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(isolate()->factory()->the_hole_value()));
- j(not_equal, &promote_scheduled_exception);
-
-#if ENABLE_EXTRA_CHECKS
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = eax;
- Register map = ecx;
-
- JumpIfSmi(return_value, &ok, Label::kNear);
- mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- j(below, &ok, Label::kNear);
-
- CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- j(above_equal, &ok, Label::kNear);
-
- cmp(map, isolate()->factory()->heap_number_map());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->undefined_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->true_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->false_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->null_value());
- j(equal, &ok, Label::kNear);
-
- Abort("API call returned invalid object");
-
- bind(&ok);
-#endif
-
- LeaveApiExitFrame();
- ret(stack_space * kPointerSize);
-
- bind(&empty_handle);
- // It was zero; the result is undefined.
- mov(eax, isolate()->factory()->undefined_value());
- jmp(&prologue);
-
- bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-
- // HandleScope limit has changed. Delete allocated extensions.
- ExternalReference delete_extensions =
- ExternalReference::delete_handle_scope_extensions(isolate());
- bind(&delete_allocated_handles);
- mov(Operand::StaticVariable(limit_address), edi);
- mov(edi, eax);
- mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
- mov(eax, Immediate(delete_extensions));
- call(eax);
- mov(eax, edi);
- jmp(&leave_exit_frame);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
- // Set the entry point and jump to the C entry runtime stub.
- mov(ebx, Immediate(ext));
- CEntryStub ces(1);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be ecx to
- // follow the calling convention which requires the call type to be
- // in ecx.
- ASSERT(dst.is(ecx));
- if (call_kind == CALL_AS_FUNCTION) {
- // Set to some non-zero smi by updating the least significant
- // byte.
- mov_b(dst, 1 << kSmiTagSize);
- } else {
- // Set to smi zero by clearing the register.
- xor_(dst, dst);
- }
-}
-
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- Label::Distance done_near,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- bool definitely_matches = false;
- *definitely_mismatches = false;
- Label invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- mov(eax, actual.immediate());
- const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- if (expected.immediate() == sentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- *definitely_mismatches = true;
- mov(ebx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmp(expected.reg(), actual.immediate());
- j(equal, &invoke);
- ASSERT(expected.reg().is(ebx));
- mov(eax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmp(expected.reg(), actual.reg());
- j(equal, &invoke);
- ASSERT(actual.reg().is(eax));
- ASSERT(expected.reg().is(ebx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- mov(edx, Immediate(code_constant));
- add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_operand.is_reg(edx)) {
- mov(edx, code_operand);
- }
-
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
- SetCallKind(ecx, call_kind);
- call(adaptor, RelocInfo::CODE_TARGET);
- call_wrapper.AfterCall();
- if (!*definitely_mismatches) {
- jmp(done, done_near);
- }
- } else {
- SetCallKind(ecx, call_kind);
- jmp(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
-void MacroAssembler::InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag, Label::kNear,
- call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(ecx, call_kind);
- call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code);
- }
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- Operand dummy(eax, 0);
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
- flag, Label::kNear, call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code, rmode));
- SetCallKind(ecx, call_kind);
- call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code, rmode);
- }
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeFunction(Register fun,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- ASSERT(fun.is(edi));
- mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- SmiUntag(ebx);
-
- ParameterCount expected(ebx);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Get the function and setup the context.
- LoadHeapObject(edi, function);
- mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- ParameterCount expected(function->shared()->formal_parameter_count());
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- GetBuiltinFunction(edi, id);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, call_wrapper, CALL_AS_METHOD);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the JavaScript builtin function from the builtins object.
- mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- mov(target, FieldOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(edi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(edi, id);
- // Load the code entry point from the function into the target register.
- mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in esi).
- mov(dst, esi);
- }
-
- // We should not have found a with context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- cmp(FieldOperand(dst, HeapObject::kMapOffset),
- isolate()->factory()->with_context_map());
- Check(not_equal, "Variable resolved to with context.");
- }
-}
-
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- // Load the global or builtins object from the current context.
- mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check that the function's map is the same as the expected cached map.
- mov(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmp(map_in_out, FieldOperand(scratch, offset));
- j(not_equal, no_map_match);
-
- // Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- mov(map_in_out, FieldOperand(scratch, offset));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- mov(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::LoadGlobalContext(Register global_context) {
- // Load the global or builtins object from the current context.
- mov(global_context,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(global_context,
- FieldOperand(global_context, GlobalObject::kNativeContextOffset));
-}
-
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- mov(function,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(function,
- FieldOperand(function, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- mov(function, Operand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map) {
- // Load the initial map. The global functions all have initial maps.
- mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
- jmp(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-// Store the value in register src in the safepoint register stack
-// slot for register dst.
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- mov(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
- mov(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- mov(dst, SafepointRegisterSlot(src));
-}
-
-
-Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the lowest encoding,
- // which means that lowest encodings are furthest away from
- // the stack pointer.
- ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
- return kNumSafepointRegisters - reg_code - 1;
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- mov(result, Operand::Cell(cell));
- } else {
- mov(result, object);
- }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- push(Operand::Cell(cell));
- } else {
- Push(object);
- }
-}
-
-
-void MacroAssembler::Ret() {
- ret(0);
-}
-
-
-void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
- if (is_uint16(bytes_dropped)) {
- ret(bytes_dropped);
- } else {
- pop(scratch);
- add(esp, Immediate(bytes_dropped));
- push(scratch);
- ret(0);
- }
-}
-
-
-void MacroAssembler::Drop(int stack_elements) {
- if (stack_elements > 0) {
- add(esp, Immediate(stack_elements * kPointerSize));
- }
-}
-
-
-void MacroAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
- mov(dst, src);
- }
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = Operand::StaticVariable(ExternalReference(counter));
- if (value == 1) {
- inc(operand);
- } else {
- add(operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = Operand::StaticVariable(ExternalReference(counter));
- if (value == 1) {
- dec(operand);
- } else {
- sub(operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::IncrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- IncrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::DecrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- DecrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- Factory* factory = isolate()->factory();
- Label ok;
- cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(factory->fixed_array_map()));
- j(equal, &ok);
- cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(factory->fixed_double_array_map()));
- j(equal, &ok);
- cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(factory->fixed_cow_array_map()));
- j(equal, &ok);
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::Check(Condition cc, const char* msg) {
- Label L;
- j(cc, &L);
- Abort(msg);
- // will not return here
- bind(&L);
-}
-
-
-void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- test(esp, Immediate(frame_alignment_mask));
- j(zero, &alignment_as_expected);
- // Abort if stack is not aligned.
- int3();
- bind(&alignment_as_expected);
- }
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
-
- push(eax);
- push(Immediate(p0));
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
- // Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
- // will not return here
- int3();
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- mov(dst, FieldOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::LoadPowerOf2(XMMRegister dst,
- Register scratch,
- int power) {
- ASSERT(is_uintn(power + HeapNumber::kExponentBias,
- HeapNumber::kExponentBits));
- mov(scratch, Immediate(power + HeapNumber::kExponentBias));
- movd(dst, scratch);
- psllq(dst, HeapNumber::kMantissaBits);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label* failure) {
- if (!scratch.is(instance_type)) {
- mov(scratch, instance_type);
- }
- and_(scratch,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
- j(not_equal, failure);
-}
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that both objects are not smis.
- STATIC_ASSERT(kSmiTag == 0);
- mov(scratch1, object1);
- and_(scratch1, object2);
- JumpIfSmi(scratch1, failure);
-
- // Load instance type for both strings.
- mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
- mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
- movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ASCII strings.
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- // Interleave bits from both instance types and compare them in one check.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- and_(scratch1, kFlatAsciiStringMask);
- and_(scratch2, kFlatAsciiStringMask);
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
- j(not_equal, failure);
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = OS::ActivationFrameAlignment();
- if (frame_alignment != 0) {
- // Make stack end at alignment and make room for num_arguments words
- // and the original value of esp.
- mov(scratch, esp);
- sub(esp, Immediate((num_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
- and_(esp, -frame_alignment);
- mov(Operand(esp, num_arguments * kPointerSize), scratch);
- } else {
- sub(esp, Immediate(num_arguments * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- // Trashing eax is ok as it will be the return value.
- mov(eax, Immediate(function));
- CallCFunction(eax, num_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
- ASSERT(has_frame());
- // Check stack alignment.
- if (emit_debug_code()) {
- CheckStackAlignment();
- }
-
- call(function);
- if (OS::ActivationFrameAlignment() != 0) {
- mov(esp, Operand(esp, num_arguments * kPointerSize));
- } else {
- add(esp, Immediate(num_arguments * kPointerSize));
- }
-}
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
-}
-
-
-CodePatcher::CodePatcher(byte* address, int size)
- : address_(address),
- size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
- }
- if (mask < (1 << kBitsPerByte)) {
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
- } else {
- test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
- Page* page = Page::FromAddress(map->address());
- ExternalReference reference(ExternalReference::page_flags(page));
- // The inlined static address check of the page's flags relies
- // on maps never being compacted.
- ASSERT(!isolate()->heap()->mark_compact_collector()->
- IsOnEvacuationCandidate(*map));
- if (mask < (1 << kBitsPerByte)) {
- test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
- } else {
- test(Operand::StaticVariable(reference), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_near) {
- HasColor(object, scratch0, scratch1,
- on_black, on_black_near,
- 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
- add(mask_scratch, mask_scratch); // Shift left 1 by adding.
- j(zero, &word_boundary, Label::kNear);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- jmp(&other_color, Label::kNear);
-
- bind(&word_boundary);
- test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
-
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
- mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- and_(bitmap_reg, addr_reg);
- mov(ecx, addr_reg);
- int shift =
- Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
- shr(ecx, shift);
- and_(ecx,
- (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
-
- add(bitmap_reg, ecx);
- mov(ecx, addr_reg);
- shr(ecx, kPointerSizeLog2);
- and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
- mov(mask_reg, Immediate(1));
- shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- add(mask_scratch, mask_scratch);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = ecx; // Holds map while checking type.
- Register length = ecx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- mov(map, FieldOperand(value, HeapObject::kMapOffset));
- cmp(map, FACTORY->heap_number_map());
- j(not_equal, &not_heap_number, Label::kNear);
- mov(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = ecx;
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- test_b(instance_type, kExternalStringTag);
- j(zero, &not_external, Label::kNear);
- mov(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either ASCII or UC16.
- ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- add(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
- // by 2. If we multiply the string length as smi by this, it still
- // won't overflow a 32-bit value.
- ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
- ASSERT(SeqOneByteString::kMaxSize <=
- static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, 2 + kSmiTagSize + kSmiShiftSize);
- add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
- length);
- if (emit_debug_code()) {
- mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, "Live Bytes Count overflow chunk size");
- }
-
- bind(&done);
-}
-
-
-void MacroAssembler::EnumLength(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- mov(dst, FieldOperand(map, Map::kBitField3Offset));
- and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask)));
-}
-
-
-void MacroAssembler::CheckEnumCache(Label* call_runtime) {
- Label next, start;
- mov(ecx, eax);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-
- EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
- j(equal, call_runtime);
-
- jmp(&start);
-
- bind(&next);
- mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(0)));
- j(not_equal, call_runtime);
-
- bind(&start);
-
- // Check that there are no elements. Register rcx contains the current JS
- // object we've reached through the prototype chain.
- mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
- cmp(ecx, isolate()->factory()->empty_fixed_array());
- j(not_equal, call_runtime);
-
- mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- cmp(ecx, isolate()->factory()->null_value());
- j(not_equal, &next);
-}
-
-
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
- Register receiver_reg,
- Register scratch_reg) {
- Label no_info_available;
-
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- lea(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
- cmp(scratch_reg, Immediate(new_space_start));
- j(less, &no_info_available);
- cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, &no_info_available);
- cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
- bind(&no_info_available);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
deleted file mode 100644
index 3a6e17b..0000000
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
+++ /dev/null
@@ -1,1018 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
-#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
-
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Convenience for platform-independent signatures. We do not normally
-// distinguish memory operands from other operands on ia32.
-typedef Operand MemOperand;
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
-
-enum RegisterValueType {
- REGISTER_VALUE_IS_SMI,
- REGISTER_VALUE_IS_INT32
-};
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // ---------------------------------------------------------------------------
- // GC Support
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- void CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, zero, branch, distance);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_zero, branch, distance);
- }
-
- // Check if an object has a given incremental marking color. Also uses ecx!
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // Operand(reg, off).
- void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- save_fp,
- remembered_set_action,
- smi_check);
- }
-
- // Notify the garbage collector that we wrote a pointer into a fixed array.
- // |array| is the array being stored into, |value| is the
- // object being stored. |index| is the array index represented as a
- // Smi. All registers are clobbered by the operation RecordWriteArray
- // filters out smis so it does not update the write barrier if the
- // value is a smi.
- void RecordWriteArray(
- Register array,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // For page containing |object| mark region covering |address|
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. The address and value registers are clobbered by the
- // operation. RecordWrite filters out smis so it does not update the
- // write barrier if the value is a smi.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // For page containing |object| mark the region covering the object's map
- // dirty. |object| is the object being stored into, |map| is the Map object
- // that was stored.
- void RecordWriteForMap(
- Register object,
- Handle<Map> map,
- Register scratch1,
- Register scratch2,
- SaveFPRegsMode save_fp);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
- // Enter specific kind of exit frame. Expects the number of
- // arguments in register eax and sets up the number of arguments in
- // register edi and the pointer to the first argument in register
- // esi.
- void EnterExitFrame(bool save_doubles);
-
- void EnterApiExitFrame(int argc);
-
- // Leave the current exit frame. Expects the return value in
- // register eax:edx (untouched) and the pointer to the first
- // argument in register esi.
- void LeaveExitFrame(bool save_doubles);
-
- // Leave the current exit frame. Expects the return value in
- // register eax (untouched).
- void LeaveApiExitFrame();
-
- // Find the function context up the context chain.
- void LoadContext(Register dst, int context_chain_length);
-
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- void LoadGlobalContext(Register global_context);
-
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same.
- void LoadGlobalFunctionInitialMap(Register function, Register map);
-
- // Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { pushad(); }
- void PopSafepointRegisters() { popad(); }
- // Store the value in register/immediate src in the safepoint
- // register stack slot for register dst.
- void StoreToSafepointRegisterSlot(Register dst, Register src);
- void StoreToSafepointRegisterSlot(Register dst, Immediate src);
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
- void PushHeapObject(Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Set(result, Immediate(object));
- }
- }
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
- }
-
- void InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
- // Expression support
- void Set(Register dst, const Immediate& x);
- void Set(const Operand& dst, const Immediate& x);
-
- // Support for constant splitting.
- bool IsUnsafeImmediate(const Immediate& x);
- void SafeSet(Register dst, const Immediate& x);
- void SafePush(const Immediate& x);
-
- // Compare against a known root, e.g. undefined, null, true, ...
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
-
- // Compare object type for heap object.
- // Incoming register is heap_object and outgoing register is map.
- void CmpObjectType(Register heap_object, InstanceType type, Register map);
-
- // Compare instance type for map.
- void CmpInstanceType(Register map, InstanceType type);
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register key,
- Register scratch1,
- XMMRegister scratch2,
- Label* fail,
- bool specialize_for_processor,
- int offset = 0);
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
- // result of map compare. If multiple map compares are required, the compare
- // sequences branches to early_success.
- void CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
-
- // Check if the object in register heap_object is a string. Afterwards the
- // register map contains the object map and the register instance_type
- // contains the instance_type. The registers map and instance_type can be the
- // same in which case it contains the instance type afterwards. Either of the
- // registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type);
-
- // Check if a heap object's type is in the JSObject range, not including
- // JSFunction. The object's map will be loaded in the map register.
- // Any or all of the three registers may be the same.
- // The contents of the scratch register will always be overwritten.
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- // The contents of the scratch register will be overwritten.
- void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
-
- // FCmp is similar to integer cmp, but requires unsigned
- // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
- void FCmp();
-
- void ClampUint8(Register reg);
-
- void ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister scratch_reg,
- Register result_reg);
-
-
- // Smi tagging support.
- void SmiTag(Register reg) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- add(reg, reg);
- }
- void SmiUntag(Register reg) {
- sar(reg, kSmiTagSize);
- }
-
- // Modifies the register even if it does not contain a Smi!
- void SmiUntag(Register reg, Label* is_smi) {
- STATIC_ASSERT(kSmiTagSize == 1);
- sar(reg, kSmiTagSize);
- STATIC_ASSERT(kSmiTag == 0);
- j(not_carry, is_smi);
- }
-
- void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
-
- // Jump the register contains a smi.
- inline void JumpIfSmi(Register value,
- Label* smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, distance);
- }
- // Jump if the operand is a smi.
- inline void JumpIfSmi(Operand value,
- Label* smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, distance);
- }
- // Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value,
- Label* not_smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(not_zero, not_smi_label, distance);
- }
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void EnumLength(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
-
- template<typename Field>
- void DecodeField(Register reg) {
- static const int shift = Field::kShift;
- static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
- sar(reg, shift);
- and_(reg, Immediate(mask));
- }
- void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
-
- // Abort execution if argument is not a number, enabled via --debug-code.
- void AssertNumber(Register object);
-
- // Abort execution if argument is not a smi, enabled via --debug-code.
- void AssertSmi(Register object);
-
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
-
- // Abort execution if argument is not a string, enabled via --debug-code.
- void AssertString(Register object);
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link it into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Throw to the top handler in the try hander chain.
- void Throw(Register value);
-
- // Throw past all JS frames to the top JS entry frame.
- void ThrowUncatchable(Register value);
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, but the scratch register is clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- void GetNumberHash(Register r0, Register scratch);
-
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result);
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
- // should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- RegisterValueType element_count_type,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
- // Allocate a heap number in new space with undefined value. The
- // register scratch2 can be passed as no_reg; the others must be
- // valid registers. Returns tagged pointer in result register, or
- // jumps to gc_required if new space is full.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a raw sliced string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Copy memory, byte-by-byte, from source to destination. Not optimized for
- // long or aligned copies.
- // The contents of index and scratch are destroyed.
- void CopyBytes(Register source,
- Register destination,
- Register length,
- Register scratch);
-
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Check a boolean-bit of a Smi field.
- void BooleanBitTest(Register object, int field_offset, int bit_index);
-
- // Check if result is zero and op is negative.
- void NegativeZeroTest(Register result, Register op, Label* then_label);
-
- // Check if result is zero and any of op1 and op2 are negative.
- // Register scratch is destroyed, and it must be different from op2.
- void NegativeZeroTest(Register result, Register op1, Register op2,
- Register scratch, Label* then_label);
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub. Generate the code if necessary.
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // Tail call a code stub (jump). Generate the code if necessary.
- void TailCallStub(CodeStub* stub);
-
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(ExternalReference ref, int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
-
- // Prepares stack to put arguments (aligns and so on). Reserves
- // space for return value if needed (assumes the return value is a handle).
- // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
- // etc. Saves context (esi). If space was reserved for return value then
- // stores the pointer to the reserved slot into esi.
- void PrepareCallApiFunction(int argc);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Clobbers ebx, edi and
- // caller-save registers. Restores context. On return removes
- // stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address, int stack_space);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext);
-
- // ---------------------------------------------------------------------------
- // Utilities
-
- void Ret();
-
- // Return and drop arguments from stack, where the number of arguments
- // may be bigger than 2^16 - 1. Requires a scratch register.
- void Ret(int bytes_dropped, Register scratch);
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the esp register.
- void Drop(int element_count);
-
- void Call(Label* target) { call(target); }
-
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- call(self, RelocInfo::CODE_TARGET);
- }
-
- // Move if the registers are not identical.
- void Move(Register target, Register source);
-
- // Push a handle value.
- void Push(Handle<Object> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value);
- void IncrementCounter(StatsCounter* counter, int value);
- void DecrementCounter(StatsCounter* counter, int value);
- void IncrementCounter(Condition cc, StatsCounter* counter, int value);
- void DecrementCounter(Condition cc, StatsCounter* counter, int value);
-
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
-
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Check that the stack is aligned.
- void CheckStackAlignment();
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- // ---------------------------------------------------------------------------
- // String utilities.
-
- // Check whether the instance type represents a flat ASCII string. Jump to the
- // label if not. If the instance type can be scratched specify same register
- // for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
- Register scratch,
- Label* on_not_flat_ascii_string);
-
- // Checks if both objects are sequential ASCII strings, and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* on_not_flat_ascii_strings);
-
- static int SafepointRegisterStackIndex(Register reg) {
- return SafepointRegisterStackIndex(reg.code());
- }
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- // Expects object in eax and returns map with validated enum cache
- // in eax. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Label* call_runtime);
-
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, conditional code is set to equal
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
-
- private:
- bool generating_stub_;
- bool allow_stub_calls_;
- bool has_frame_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- Label::Distance done_distance,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
-
- void EnterExitFramePrologue();
- void EnterExitFrameEpilogue(int argc, bool save_doubles);
-
- void LeaveExitFrameEpilogue();
-
- // Allocation support helpers.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags);
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
-
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Uses ecx as scratch and leaves addr_reg
- // unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
- // Compute memory operands for safepoint stack slots.
- Operand SafepointRegisterSlot(Register reg);
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-};
-
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. Is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-// Generate an Operand for loading a field from an object.
-inline Operand FieldOperand(Register object, int offset) {
- return Operand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
- int offset) {
- return Operand(object, index, scale, offset - kHeapObjectTag);
-}
-
-
-inline Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
-}
-
-
-inline Operand GlobalObjectOperand() {
- return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
-}
-
-static inline Operand QmlGlobalObjectOperand() {
- return ContextOperand(esi, Context::QML_GLOBAL_OBJECT_INDEX);
-}
-
-// Generates an Operand for saving parameters after PrepareCallApiFunction.
-Operand ApiParameterOperand(int index);
-
-
-#ifdef GENERATED_CODE_COVERAGE
-extern void LogGeneratedCodeCoverage(const char* file_line);
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- byte* ia32_coverage_function = \
- reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
- masm->pushfd(); \
- masm->pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
- masm->pop(eax); \
- masm->popad(); \
- masm->popfd(); \
- } \
- masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
deleted file mode 100644
index 49c75e1..0000000
--- a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ /dev/null
@@ -1,1420 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "unicode.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "ia32/regexp-macro-assembler-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - edx : Current character. Must be loaded using LoadCurrentCharacter
- * before using any of the dispatch methods. Temporarily stores the
- * index of capture start after a matching pass for a global regexp.
- * - edi : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - esi : end of input (points to byte after last character in input).
- * - ebp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - esp : Points to tip of C stack.
- * - ecx : Points to tip of backtrack stack
- *
- * The registers eax and ebx are free to use for computations.
- *
- * Each call to a public method should retain this convention.
- * The stack will have the following structure:
- * - Isolate* isolate (address of the current isolate)
- * - direct_call (if 1, direct call from JavaScript code, if 0
- * call through the runtime system)
- * - stack_area_base (high end of the memory area to use as
- * backtracking stack)
- * - capture array size (may fit multiple sets of matches)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (address of end of string)
- * - start of input (address of first character in string)
- * - start index (character index of start)
- * - String* input_string (location of a handle containing the string)
- * --- frame alignment (if applicable) ---
- * - return address
- * ebp-> - old ebp
- * - backup of caller esi
- * - backup of caller edi
- * - backup of caller ebx
- * - success counter (only for global regexps to count matches).
- * - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
- * - register 0 ebp[-4] (only positions must be stored in the first
- * - register 1 ebp[-8] num_saved_registers_ registers)
- * - ...
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * int* capture_output_array,
- * bool at_start,
- * byte* stack_area_base,
- * bool direct_call)
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerIA32::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ add(edi, Immediate(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ add(register_location(reg), Immediate(by));
- }
-}
-
-
-void RegExpMacroAssemblerIA32::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(ebx);
- __ add(ebx, Immediate(masm_->CodeObject()));
- __ jmp(ebx);
-}
-
-
-void RegExpMacroAssemblerIA32::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmp(current_character(), c);
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmp(current_character(), limit);
- BranchOrBacktrack(greater, on_greater);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
- BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
- BranchOrBacktrack(not_equal, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmp(current_character(), limit);
- BranchOrBacktrack(less, on_less);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ASCII character.
- if (mode_ == ASCII) {
- ASSERT(String::IsOneByte(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmp(edi, Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(esi, edi, times_1, byte_offset),
- static_cast<int8_t>(str[0]));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzx_w(eax,
- Operand(esi, edi, times_1, byte_offset));
- __ cmp(eax, static_cast<int32_t>(str[0]));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(ebx, Operand(esi, edi, times_1, 0));
- for (int i = 1, n = str.length(); i < n;) {
- if (mode_ == ASCII) {
- if (i <= n - 4) {
- int combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) |
- (static_cast<uint32_t>(str[i + 1]) << 8) |
- (static_cast<uint32_t>(str[i + 2]) << 16) |
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmp(Operand(ebx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(ebx, byte_offset + i),
- static_cast<int8_t>(str[i]));
- i += 1;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i <= n - 2) {
- __ cmp(Operand(ebx, byte_offset + i * sizeof(uc16)),
- Immediate(*reinterpret_cast<const int*>(&str[i])));
- i += 2;
- } else {
- // Avoid a 16-bit immediate operation. It uses the length-changing
- // 0x66 prefix which causes pre-decoder misprediction and pipeline
- // stalls. See
- // "Intel(R) 64 and IA-32 Architectures Optimization Reference Manual"
- // (248966.pdf) section 3.4.2.3 "Length-Changing Prefixes (LCP)"
- __ movzx_w(eax,
- Operand(ebx, byte_offset + i * sizeof(uc16)));
- __ cmp(eax, static_cast<int32_t>(str[i]));
- i += 1;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
- Label fallthrough;
- __ cmp(edi, Operand(backtrack_stackpointer(), 0));
- __ j(not_equal, &fallthrough);
- __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
- BranchOrBacktrack(no_condition, on_equal);
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ mov(edx, register_location(start_reg)); // Index of start of capture
- __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
- __ sub(ebx, edx); // Length of capture.
-
- // The length of a capture should not be negative. This can only happen
- // if the end of the capture is unrecorded, or at a point earlier than
- // the start of the capture.
- BranchOrBacktrack(less, on_no_match);
-
- // If length is zero, either the capture is empty or it is completely
- // uncaptured. In either case succeed immediately.
- __ j(equal, &fallthrough);
-
- // Check that there are sufficient characters left in the input.
- __ mov(eax, edi);
- __ add(eax, ebx);
- BranchOrBacktrack(greater, on_no_match);
-
- if (mode_ == ASCII) {
- Label success;
- Label fail;
- Label loop_increment;
- // Save register contents to make the registers available below.
- __ push(edi);
- __ push(backtrack_stackpointer());
- // After this, the eax, ecx, and edi registers are available.
-
- __ add(edx, esi); // Start of capture
- __ add(edi, esi); // Start of text to match against capture.
- __ add(ebx, edi); // End of text to match against capture.
-
- Label loop;
- __ bind(&loop);
- __ movzx_b(eax, Operand(edi, 0));
- __ cmpb_al(Operand(edx, 0));
- __ j(equal, &loop_increment);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ or_(eax, 0x20); // Convert match character to lower-case.
- __ lea(ecx, Operand(eax, -'a'));
- __ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter?
-#ifndef ENABLE_LATIN_1
- __ j(above, &fail); // Weren't letters anyway.
-#else
- Label convert_capture;
- __ j(below_equal, &convert_capture); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ sub(ecx, Immediate(224 - 'a'));
- __ cmp(ecx, Immediate(254 - 224));
- __ j(above, &fail); // Weren't Latin-1 letters.
- __ cmp(ecx, Immediate(247 - 224)); // Check for 247.
- __ j(equal, &fail);
- __ bind(&convert_capture);
-#endif
- // Also convert capture character.
- __ movzx_b(ecx, Operand(edx, 0));
- __ or_(ecx, 0x20);
-
- __ cmp(eax, ecx);
- __ j(not_equal, &fail);
-
- __ bind(&loop_increment);
- // Increment pointers into match and capture strings.
- __ add(edx, Immediate(1));
- __ add(edi, Immediate(1));
- // Compare to end of match, and loop if not done.
- __ cmp(edi, ebx);
- __ j(below, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- // Restore original values before failing.
- __ pop(backtrack_stackpointer());
- __ pop(edi);
- BranchOrBacktrack(no_condition, on_no_match);
-
- __ bind(&success);
- // Restore original value before continuing.
- __ pop(backtrack_stackpointer());
- // Drop original value of character position.
- __ add(esp, Immediate(kPointerSize));
- // Compute new value of character position after the matched part.
- __ sub(edi, esi);
- } else {
- ASSERT(mode_ == UC16);
- // Save registers before calling C function.
- __ push(esi);
- __ push(edi);
- __ push(backtrack_stackpointer());
- __ push(ebx);
-
- static const int argument_count = 4;
- __ PrepareCallCFunction(argument_count, ecx);
- // Put arguments into allocated stack area, last argument highest on stack.
- // Parameters are
- // Address byte_offset1 - Address captured substring's start.
- // Address byte_offset2 - Address of current character position.
- // size_t byte_length - length of capture in bytes(!)
- // Isolate* isolate
-
- // Set isolate.
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- // Set byte_length.
- __ mov(Operand(esp, 2 * kPointerSize), ebx);
- // Set byte_offset2.
- // Found by adding negative string-end offset of current position (edi)
- // to end of string.
- __ add(edi, esi);
- __ mov(Operand(esp, 1 * kPointerSize), edi);
- // Set byte_offset1.
- // Start of capture, where edx already holds string-end negative offset.
- __ add(edx, esi);
- __ mov(Operand(esp, 0 * kPointerSize), edx);
-
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(compare, argument_count);
- }
- // Pop original values before reacting on result value.
- __ pop(ebx);
- __ pop(backtrack_stackpointer());
- __ pop(edi);
- __ pop(esi);
-
- // Check if function returned non-zero for success or zero for failure.
- __ or_(eax, eax);
- BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- __ add(edi, ebx);
- }
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- Label success;
- Label fail;
-
- // Find length of back-referenced capture.
- __ mov(edx, register_location(start_reg));
- __ mov(eax, register_location(start_reg + 1));
- __ sub(eax, edx); // Length to check.
- // Fail on partial or illegal capture (start of capture after end of capture).
- BranchOrBacktrack(less, on_no_match);
- // Succeed on empty capture (including no capture)
- __ j(equal, &fallthrough);
-
- // Check that there are sufficient characters left in the input.
- __ mov(ebx, edi);
- __ add(ebx, eax);
- BranchOrBacktrack(greater, on_no_match);
-
- // Save register to make it available below.
- __ push(backtrack_stackpointer());
-
- // Compute pointers to match string and capture string
- __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
- __ add(edx, esi); // Start of capture.
- __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ movzx_b(eax, Operand(edx, 0));
- __ cmpb_al(Operand(ebx, 0));
- } else {
- ASSERT(mode_ == UC16);
- __ movzx_w(eax, Operand(edx, 0));
- __ cmpw_ax(Operand(ebx, 0));
- }
- __ j(not_equal, &fail);
- // Increment pointers into capture and match string.
- __ add(edx, Immediate(char_size()));
- __ add(ebx, Immediate(char_size()));
- // Check if we have reached end of match area.
- __ cmp(ebx, ecx);
- __ j(below, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- // Restore backtrack stackpointer.
- __ pop(backtrack_stackpointer());
- BranchOrBacktrack(no_condition, on_no_match);
-
- __ bind(&success);
- // Move current character position to position after match.
- __ mov(edi, ecx);
- __ sub(edi, esi);
- // Restore backtrack stackpointer.
- __ pop(backtrack_stackpointer());
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- __ cmp(current_character(), c);
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c == 0) {
- __ test(current_character(), Immediate(mask));
- } else {
- __ mov(eax, mask);
- __ and_(eax, current_character());
- __ cmp(eax, c);
- }
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- if (c == 0) {
- __ test(current_character(), Immediate(mask));
- } else {
- __ mov(eax, mask);
- __ and_(eax, current_character());
- __ cmp(eax, c);
- }
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ lea(eax, Operand(current_character(), -minus));
- if (c == 0) {
- __ test(eax, Immediate(mask));
- } else {
- __ and_(eax, mask);
- __ cmp(eax, c);
- }
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ lea(eax, Operand(current_character(), -from));
- __ cmp(eax, to - from);
- BranchOrBacktrack(below_equal, on_in_range);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ lea(eax, Operand(current_character(), -from));
- __ cmp(eax, to - from);
- BranchOrBacktrack(above, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ mov(eax, Immediate(table));
- Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ mov(ebx, kTableSize - 1);
- __ and_(ebx, current_character());
- index = ebx;
- }
- __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
- BranchOrBacktrack(not_equal, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ cmp(current_character(), ' ');
- __ j(equal, &success);
- // Check range 0x09..0x0d
- __ lea(eax, Operand(current_character(), -'\t'));
- __ cmp(eax, '\r' - '\t');
- BranchOrBacktrack(above, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmp(current_character(), ' ');
- BranchOrBacktrack(equal, on_no_match);
- __ lea(eax, Operand(current_character(), -'\t'));
- __ cmp(eax, '\r' - '\t');
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ lea(eax, Operand(current_character(), -'0'));
- __ cmp(eax, '9' - '0');
- BranchOrBacktrack(above, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ lea(eax, Operand(current_character(), -'0'));
- __ cmp(eax, '9' - '0');
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ mov(eax, current_character());
- __ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
- BranchOrBacktrack(below_equal, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
- __ cmp(eax, 0x2029 - 0x2028);
- BranchOrBacktrack(below_equal, on_no_match);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Immediate('z'));
- BranchOrBacktrack(above, on_no_match);
- }
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
- BranchOrBacktrack(zero, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Immediate('z'));
- __ j(above, &done);
- }
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
- BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
- // Non-standard classes (with no syntactic shorthand) used internally.
- case '*':
- // Match any character.
- return true;
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
- // The opposite of '.'.
- __ mov(eax, current_character());
- __ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
- if (mode_ == ASCII) {
- BranchOrBacktrack(above, on_no_match);
- } else {
- Label done;
- BranchOrBacktrack(below_equal, &done);
- ASSERT_EQ(UC16, mode_);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
- __ cmp(eax, 1);
- BranchOrBacktrack(above, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerIA32::Fail() {
- STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
- if (!global()) {
- __ Set(eax, Immediate(FAILURE));
- }
- __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
- Label return_eax;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
- __ push(ebp);
- __ mov(ebp, esp);
- // Save callee-save registers. Order here should correspond to order of
- // kBackup_ebx etc.
- __ push(esi);
- __ push(edi);
- __ push(ebx); // Callee-save on MacOS.
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ mov(ecx, esp);
- __ sub(ecx, Operand::StaticVariable(stack_limit));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(ecx, num_registers_ * kPointerSize);
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_eax);
-
- __ bind(&stack_ok);
- // Load start index for later use.
- __ mov(ebx, Operand(ebp, kStartIndex));
-
- // Allocate space on stack for registers.
- __ sub(esp, Immediate(num_registers_ * kPointerSize));
- // Load string length.
- __ mov(esi, Operand(ebp, kInputEnd));
- // Load input position.
- __ mov(edi, Operand(ebp, kInputStart));
- // Set up edi to be negative offset from string end.
- __ sub(edi, esi);
-
- // Set eax to address of char before start of the string.
- // (effectively string position -1).
- __ neg(ebx);
- if (mode_ == UC16) {
- __ lea(eax, Operand(edi, ebx, times_2, -char_size()));
- } else {
- __ lea(eax, Operand(edi, ebx, times_1, -char_size()));
- }
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ mov(Operand(ebp, kInputStartMinusOne), eax);
-
-#ifdef WIN32
- // Ensure that we write to each stack page, in order. Skipping a page
- // on Windows can cause segmentation faults. Assuming page size is 4k.
- const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kPointerSize;
- for (int i = num_saved_registers_ + kRegistersPerPage - 1;
- i < num_registers_;
- i += kRegistersPerPage) {
- __ mov(register_location(i), eax); // One write every page.
- }
-#endif // WIN32
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ mov(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- if (num_saved_registers_ > 8) {
- __ mov(ecx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ mov(Operand(ebp, ecx, times_1, 0), eax);
- __ sub(ecx, Immediate(kPointerSize));
- __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
- __ j(greater, &init_loop);
- } else { // Unroll the loop.
- for (int i = 0; i < num_saved_registers_; i++) {
- __ mov(register_location(i), eax);
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-
- __ jmp(&start_label_);
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ mov(ebx, Operand(ebp, kRegisterOutput));
- __ mov(ecx, Operand(ebp, kInputEnd));
- __ mov(edx, Operand(ebp, kStartIndex));
- __ sub(ecx, Operand(ebp, kInputStart));
- if (mode_ == UC16) {
- __ lea(ecx, Operand(ecx, edx, times_2, 0));
- } else {
- __ add(ecx, edx);
- }
- for (int i = 0; i < num_saved_registers_; i++) {
- __ mov(eax, register_location(i));
- if (i == 0 && global_with_zero_length_check()) {
- // Keep capture start in edx for the zero-length check later.
- __ mov(edx, eax);
- }
- // Convert to index from start of string, not end.
- __ add(eax, ecx);
- if (mode_ == UC16) {
- __ sar(eax, 1); // Convert byte index to character index.
- }
- __ mov(Operand(ebx, i * kPointerSize), eax);
- }
- }
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- // Increment success counter.
- __ inc(Operand(ebp, kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ mov(ecx, Operand(ebp, kNumOutputRegisters));
- __ sub(ecx, Immediate(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmp(ecx, Immediate(num_saved_registers_));
- __ j(less, &exit_label_);
-
- __ mov(Operand(ebp, kNumOutputRegisters), ecx);
- // Advance the location for output.
- __ add(Operand(ebp, kRegisterOutput),
- Immediate(num_saved_registers_ * kPointerSize));
-
- // Prepare eax to initialize registers with its value in the next run.
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- // edx: capture start index
- __ cmp(edi, edx);
- // Not a zero-length match, restart.
- __ j(not_equal, &load_char_start_regexp);
- // edi (offset from the end) is zero if we already reached the end.
- __ test(edi, edi);
- __ j(zero, &exit_label_, Label::kNear);
- // Advance current position after a zero-length match.
- if (mode_ == UC16) {
- __ add(edi, Immediate(2));
- } else {
- __ inc(edi);
- }
- }
-
- __ jmp(&load_char_start_regexp);
- } else {
- __ mov(eax, Immediate(SUCCESS));
- }
- }
-
- __ bind(&exit_label_);
- if (global()) {
- // Return the number of successful captures.
- __ mov(eax, Operand(ebp, kSuccessfulCaptures));
- }
-
- __ bind(&return_eax);
- // Skip esp past regexp registers.
- __ lea(esp, Operand(ebp, kBackup_ebx));
- // Restore callee-save registers.
- __ pop(ebx);
- __ pop(edi);
- __ pop(esi);
- // Exit function frame, restore previous one.
- __ pop(ebp);
- __ ret(0);
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- __ push(backtrack_stackpointer());
- __ push(edi);
-
- CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ j(not_zero, &return_eax);
-
- __ pop(edi);
- __ pop(backtrack_stackpointer());
- // String might have moved: Reload esi from frame.
- __ mov(esi, Operand(ebp, kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
-
- Label grow_failed;
- // Save registers before calling C function
- __ push(esi);
- __ push(edi);
-
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, ebx);
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ lea(eax, Operand(ebp, kStackHighEnd));
- __ mov(Operand(esp, 1 * kPointerSize), eax);
- __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ or_(eax, eax);
- __ j(equal, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ mov(backtrack_stackpointer(), eax);
- // Restore saved registers and continue.
- __ pop(edi);
- __ pop(esi);
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code =
- masm_->isolate()->factory()->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerIA32::GoTo(Label* to) {
- BranchOrBacktrack(no_condition, to);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ cmp(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(greater_equal, if_ge);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ cmp(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(less, if_lt);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ cmp(edi, register_location(reg));
- BranchOrBacktrack(equal, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerIA32::Implementation() {
- return kIA32Implementation;
-}
-
-
-void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerIA32::PopCurrentPosition() {
- Pop(edi);
-}
-
-
-void RegExpMacroAssemblerIA32::PopRegister(int register_index) {
- Pop(eax);
- __ mov(register_location(register_index), eax);
-}
-
-
-void RegExpMacroAssemblerIA32::PushBacktrack(Label* label) {
- Push(Immediate::CodeRelativeOffset(label));
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerIA32::PushCurrentPosition() {
- Push(edi);
-}
-
-
-void RegExpMacroAssemblerIA32::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ mov(eax, register_location(register_index));
- Push(eax);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerIA32::ReadCurrentPositionFromRegister(int reg) {
- __ mov(edi, register_location(reg));
-}
-
-
-void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
- __ mov(backtrack_stackpointer(), register_location(reg));
- __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-}
-
-void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ cmp(edi, -by * char_size());
- __ j(greater_equal, &after_position, Label::kNear);
- __ mov(edi, -by * char_size());
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ mov(register_location(register_index), Immediate(to));
-}
-
-
-bool RegExpMacroAssemblerIA32::Succeed() {
- __ jmp(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ mov(register_location(reg), edi);
- } else {
- __ lea(eax, Operand(edi, cp_offset * char_size()));
- __ mov(register_location(reg), eax);
- }
-}
-
-
-void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ mov(register_location(reg), eax);
- }
-}
-
-
-void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
- __ mov(eax, backtrack_stackpointer());
- __ sub(eax, Operand(ebp, kStackHighEnd));
- __ mov(register_location(reg), eax);
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
- // RegExp code frame pointer.
- __ mov(Operand(esp, 2 * kPointerSize), ebp);
- // Code* of self.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
- // Next address on the stack (will be address of return address).
- __ lea(eax, Operand(esp, -kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- ExternalReference check_stack_guard =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
- __ CallCFunction(check_stack_guard, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles(isolate);
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
-
- // Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
-
- if (*code_handle != re_code) { // Return address no longer valid
- int delta = code_handle->address() - re_code->address();
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- Handle<String> subject_tmp = subject;
- int slice_offset = 0;
-
- // Extract the underlying string and the slice offset.
- if (StringShape(*subject_tmp).IsCons()) {
- subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
- } else if (StringShape(*subject_tmp).IsSliced()) {
- SlicedString* slice = SlicedString::cast(*subject_tmp);
- subject_tmp = Handle<String>(slice->parent());
- slice_offset = slice->offset();
- }
-
- // String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
- StringShape(*subject_tmp).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject_tmp,
- start_index + slice_offset);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = static_cast<int>(end_address - start_address);
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
- // Subject string might have been a ConsString that underwent
- // short-circuiting during GC. That will not change start_address but
- // will change pointer inside the subject handle.
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- }
-
- return 0;
-}
-
-
-Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return Operand(ebp, kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmp(edi, -cp_offset * char_size());
- BranchOrBacktrack(greater_equal, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition < 0) { // No condition
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ j(condition, &backtrack_label_);
- return;
- }
- __ j(condition, to);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
- Label return_to;
- __ push(Immediate::CodeRelativeOffset(&return_to));
- __ jmp(to);
- __ bind(&return_to);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeReturn() {
- __ pop(ebx);
- __ add(ebx, Immediate(masm_->CodeObject()));
- __ jmp(ebx);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
- __ bind(name);
-}
-
-
-void RegExpMacroAssemblerIA32::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- // Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
- __ mov(Operand(backtrack_stackpointer(), 0), source);
-}
-
-
-void RegExpMacroAssemblerIA32::Push(Immediate value) {
- // Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
- __ mov(Operand(backtrack_stackpointer(), 0), value);
-}
-
-
-void RegExpMacroAssemblerIA32::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ mov(target, Operand(backtrack_stackpointer(), 0));
- // Notice: This updates flags, unlike normal Pop.
- __ add(backtrack_stackpointer(), Immediate(kPointerSize));
-}
-
-
-void RegExpMacroAssemblerIA32::CheckPreemption() {
- // Check for preemption.
- Label no_preempt;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above, &no_preempt);
-
- SafeCall(&check_preempt_label_);
-
- __ bind(&no_preempt);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckStackLimit() {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
- __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
- __ j(above, &no_stack_overflow);
-
- SafeCall(&stack_overflow_label_);
-
- __ bind(&no_stack_overflow);
-}
-
-
-void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
- } else if (characters == 2) {
- __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
- } else {
- ASSERT(characters == 1);
- __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ mov(current_character(),
- Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
- } else {
- ASSERT(characters == 1);
- __ movzx_w(current_character(),
- Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
- }
- }
-}
-
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h
deleted file mode 100644
index 7aea385..0000000
--- a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerIA32(Mode mode, int registers_to_save, Zone* zone);
- virtual ~RegExpMacroAssemblerIA32();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from ebp of function parameters and stored registers.
- static const int kFramePointer = 0;
- // Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
- // Parameters.
- static const int kInputString = kFrameAlign;
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
- // Below the frame pointer - local stack variables.
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kBackup_esi = kFramePointer - kPointerSize;
- static const int kBackup_edi = kBackup_esi - kPointerSize;
- static const int kBackup_ebx = kBackup_edi - kPointerSize;
- static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- Operand register_location(int register_index);
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return edx; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return ecx; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer (ecx) by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
- // by a word size and stores the value there.
- inline void Push(Immediate value);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // (ecx) and increments it by a word size.
- inline void Pop(Register target);
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/simulator-ia32.cc b/src/3rdparty/v8/src/ia32/simulator-ia32.cc
deleted file mode 100644
index ab81693..0000000
--- a/src/3rdparty/v8/src/ia32/simulator-ia32.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Since there is no simulator for the ia32 architecture this file is empty.
-
diff --git a/src/3rdparty/v8/src/ia32/simulator-ia32.h b/src/3rdparty/v8/src/ia32/simulator-ia32.h
deleted file mode 100644
index 478d4ce..0000000
--- a/src/3rdparty/v8/src/ia32/simulator-ia32.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_SIMULATOR_IA32_H_
-#define V8_IA32_SIMULATOR_IA32_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the ia32 architecture the only thing we can
-// do is to call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on ia32 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc b/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
deleted file mode 100644
index 34ce36d..0000000
--- a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
+++ /dev/null
@@ -1,3833 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register receiver,
- // Number of the cache entry pointer-size scaled.
- Register offset,
- Register extra) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
-
- if (extra.is_valid()) {
- // Get the code entry from the cache.
- __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
-
- __ bind(&miss);
- } else {
- // Save the offset on the stack.
- __ push(offset);
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Restore offset register.
- __ mov(offset, Operand(esp, 0));
-
- // Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
- // Pop at miss.
- __ bind(&miss);
- __ pop(offset);
- }
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be an internalized string and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register r0,
- Register r1) {
- ASSERT(name->IsInternalizedString());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
- kInterceptorOrAccessCheckNeededMask);
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = r0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->hash_table_map()));
- __ j(not_equal, miss_label);
-
- Label done;
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Label miss;
-
- // Assert that code is valid. The multiplying code relies on the entry size
- // being 12.
- ASSERT(sizeof(Entry) == 12);
-
- // Assert the flags do not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Assert that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
- ASSERT(!extra.is(receiver));
- ASSERT(!extra.is(name));
- ASSERT(!extra.is(scratch));
-
- // Assert scratch and extra registers are valid, and extra2/3 are unused.
- ASSERT(!scratch.is(no_reg));
- ASSERT(extra2.is(no_reg));
- ASSERT(extra3.is(no_reg));
-
- Register offset = scratch;
- scratch = no_reg;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
- // ProbeTable expects the offset to be pointer scaled, which it is, because
- // the heap object tag size is 2 and the pointer size log 2 is also 2.
- ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
-
- // Probe the primary table.
- ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
-
- // Primary miss: Compute hash for secondary probe.
- __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
- __ sub(offset, name);
- __ add(offset, Immediate(flags));
- __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
-
- // Probe the secondary table.
- ProbeTable(
- isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- __ LoadGlobalFunction(index, prototype);
- __ LoadGlobalFunctionInitialMap(prototype, prototype);
- // Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
- // Check we're still in the same context.
- __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- masm->isolate()->global_object());
- __ j(not_equal, miss);
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(masm->isolate()->native_context()->get(index)));
- // Load its initial map. The global functions all have initial maps.
- __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
- // Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, miss_label);
-
- // Load length directly from the JS array.
- __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
- __ ret(0);
-}
-
-
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ test(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length from the string and convert to a smi.
- __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(eax, scratch1);
- __ ret(0);
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index) {
- DoGenerateFastPropertyLoad(
- masm, dst, src, index.is_inobject(holder), index.translate(holder));
-}
-
-
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
- int offset = index * kPointerSize;
- if (!inobject) {
- // Calculate the offset into the properties array.
- offset = offset + FixedArray::kHeaderSize;
- __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- src = dst;
- }
- __ mov(dst, FieldOperand(src, offset));
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Immediate(interceptor));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
- __ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate()),
- 6);
-}
-
-
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 4;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : last argument in the internal frame of the caller
- // -----------------------------------
- __ pop(scratch);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(Immediate(Smi::FromInt(0)));
- }
- __ push(scratch);
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address.
- // -- esp[4] : last fast api call extra argument.
- // -- ...
- // -- esp[kFastApiCallArguments * 4] : first fast api call extra argument.
- // -- esp[kFastApiCallArguments * 4 + 4] : last argument in the internal
- // frame.
- // -----------------------------------
- __ pop(scratch);
- __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
- __ push(scratch);
-}
-
-
-// Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- esp[8] : api function
- // (first fast api call extra argument)
- // -- esp[12] : api call data
- // -- esp[16] : isolate
- // -- esp[20] : last argument
- // -- ...
- // -- esp[(argc + 4) * 4] : first argument
- // -- esp[(argc + 5) * 4] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(edi, function);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- __ mov(Operand(esp, 2 * kPointerSize), edi);
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ mov(ecx, api_call_info);
- __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
- } else {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
- }
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(reinterpret_cast<int>(masm->isolate())));
-
- // Prepare arguments.
- __ lea(eax, Operand(esp, 4 * kPointerSize));
-
- const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
-
- __ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
- __ add(eax, Immediate(argc * kPointerSize));
- __ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_.
- __ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
- __ Set(ApiParameterOperand(4), Immediate(0));
-
- // v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(1));
- __ mov(ApiParameterOperand(0), eax);
-
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ CallApiFunctionAndReturn(function_address,
- argc + kFastApiCallArguments + 1);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_state_(extra_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 6);
-
- // Restore the name_ register.
- __ pop(name_);
-
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- // Leave the internal frame.
- }
-
- __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
- __ j(not_equal, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_state_;
-};
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- Handle<Code> code = (kind == Code::STORE_IC)
- ? masm->isolate()->builtins()->StoreIC_Miss()
- : masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Handle<Code> code =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
- }
-
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, mode);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ pop(scratch1); // Return address.
- __ push(receiver_reg);
- __ push(Immediate(transition));
- __ push(eax);
- __ push(scratch1);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (!transition.is_null()) {
- // Update the map of the object.
- __ mov(scratch1, Immediate(transition));
- __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ mov(FieldOperand(receiver_reg, offset), eax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, eax);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kDontSaveFPRegs);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch1, offset), eax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, eax);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
- }
-
- // Return the value (register eax).
- __ ret(0);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
- if (Serializer::enabled()) {
- __ mov(scratch, Immediate(cell));
- __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(the_hole));
- } else {
- __ cmp(Operand::Cell(cell), Immediate(the_hole));
- }
- __ j(not_equal, miss);
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void StubCompiler::GenerateTailCall(Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- int save_at_depth,
- Label* miss,
- PrototypeCheckType check) {
- Handle<JSObject> first = object;
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- Handle<JSObject> current = object;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
- }
-
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current.is_identical_to(holder)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsInternalizedString()) {
- name = factory()->InternalizeString(name);
- }
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- bool in_new_space = heap()->InNewSpace(*prototype);
- Handle<Map> current_map(current->map());
- if (in_new_space) {
- // Save the map in scratch1 for later.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (in_new_space) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- __ mov(reg, prototype);
- }
- }
-
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
- ASSERT(current.is_identical_to(holder));
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
- Label* miss) {
- if (!miss->is_unused()) {
- __ jmp(success);
- __ bind(miss);
- GenerateLoadMiss(masm(), kind());
- }
-}
-
-
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success,
- Handle<ExecutableAccessorInfo> callback) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- ASSERT(!reg.is(scratch2()));
- ASSERT(!reg.is(scratch3()));
- Register dictionary = scratch1();
- bool must_preserve_dictionary_reg = reg.is(dictionary);
-
- // Load the properties dictionary.
- if (must_preserve_dictionary_reg) {
- __ push(dictionary);
- }
- __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done, pop_and_miss;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &pop_and_miss,
- &probe_done,
- dictionary,
- this->name(),
- scratch2(),
- scratch3());
- __ bind(&pop_and_miss);
- if (must_preserve_dictionary_reg) {
- __ pop(dictionary);
- }
- __ jmp(&miss);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch2 contains the
- // index into the dictionary. Check that the value is the callback.
- Register index = scratch2();
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(scratch3(),
- Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
- if (must_preserve_dictionary_reg) {
- __ pop(dictionary);
- }
- __ cmp(scratch3(), callback);
- __ j(not_equal, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
- return reg;
-}
-
-
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- if (!last->HasFastProperties()) {
- __ mov(scratch2(), FieldOperand(reg, HeapObject::kMapOffset));
- __ mov(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset));
- __ cmp(scratch2(), isolate()->factory()->null_value());
- __ j(not_equal, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex index) {
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
- __ ret(0);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
- // Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch3().is(reg));
- __ pop(scratch3()); // Get return address to place it below.
-
- __ push(receiver()); // receiver
- __ mov(scratch2(), esp);
- ASSERT(!scratch2().is(reg));
- __ push(reg); // holder
- // Push data from ExecutableAccessorInfo.
- if (isolate()->heap()->InNewSpace(callback->data())) {
- __ mov(scratch1(), Immediate(callback));
- __ push(FieldOperand(scratch1(), ExecutableAccessorInfo::kDataOffset));
- } else {
- __ push(Immediate(Handle<Object>(callback->data(), isolate())));
- }
- __ push(Immediate(reinterpret_cast<int>(isolate())));
-
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
- __ push(scratch2());
-
- __ push(name()); // name
- __ mov(ebx, esp); // esp points to reference to name (handler).
-
- __ push(scratch3()); // Restore return address.
-
- // 4 elements array for v8::Arguments::values_, handler for name and pointer
- // to the values (it considered as smi in GC).
- const int kStackSpace = 6;
- const int kApiArgc = 2;
-
- __ PrepareCallApiFunction(kApiArgc);
- __ mov(ApiParameterOperand(0), ebx); // name.
- __ add(ebx, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(1), ebx); // arguments pointer.
-
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ CallApiFunctionAndReturn(getter_address, kStackSpace);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
- // Return the constant value.
- __ LoadHeapObject(eax, value);
- __ ret(0);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* callback =
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
- compile_followup_inline = callback->getter() != NULL &&
- callback->IsCompatibleReceiver(*object);
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (must_preserve_receiver_reg) {
- __ push(receiver());
- }
- __ push(holder_reg);
- __ push(this->name());
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, factory()->no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- // Clobber registers when generating debug-code to provoke errors.
- __ bind(&interceptor_failed);
- if (FLAG_debug_code) {
- __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
- }
-
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
-
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- __ pop(scratch2()); // save old return address
- PushInterceptorArguments(masm(), receiver(), holder_reg,
- this->name(), interceptor_holder);
- __ push(scratch2()); // restore old return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
- isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(edx, miss);
- CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- if (Serializer::enabled()) {
- __ mov(edi, Immediate(cell));
- __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ mov(edi, Operand::Cell(cell));
- }
-
- // Check that the cell contains the same function.
- if (isolate()->heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(edi, miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
- Immediate(Handle<SharedFunctionInfo>(function->shared())));
- } else {
- __ cmp(edi, Immediate(function));
- }
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
- name, &miss);
-
- GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
-
- // Check that the function really is a function.
- __ JumpIfSmi(edi, &miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
-
- if (argc == 0) {
- // Noop, return the length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- // Get the elements array of the object.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &check_double);
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
-
- // Get the elements' length into ecx.
- __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, ecx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ JumpIfNotSmi(ecx, &with_write_barrier);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Store the value.
- __ mov(FieldOperand(edi,
- eax,
- times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- ecx);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&check_double);
-
-
- // Check that the elements are in double mode.
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_double_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
-
- // Get the elements' length into ecx.
- __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, ecx);
- __ j(greater, &call_builtin);
-
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ StoreNumberToDoubleElements(
- ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(ebx, &call_builtin);
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(factory()->heap_number_map()));
- __ j(equal, &call_builtin);
- // edi: elements array
- // edx: receiver
- // ebx: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- ebx,
- edi,
- &call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(ebx, &call_builtin);
- }
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Store the value.
- __ lea(edx, FieldOperand(edi,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ mov(Operand(edx, 0), ecx);
-
- __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- __ mov(ebx, Operand(esp, argc * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(ebx, &no_fast_elements_check);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- // We could be lucky and the elements array could be at the top of
- // new-space. In this case we can just grow it in place by moving the
- // allocation pointer up.
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
-
- // Check if it's the end of elements.
- __ lea(edx, FieldOperand(edi,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmp(edx, ecx);
- __ j(not_equal, &call_builtin);
- __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
- __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
-
- // Push the argument...
- __ mov(Operand(edx, 0), ebx);
- // ... and fill the rest with holes.
- for (int i = 1; i < kAllocationDelta; i++) {
- __ mov(Operand(edx, i * kPointerSize),
- Immediate(factory()->the_hole_value()));
- }
-
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
- // Restore receiver to edx as finish sequence assumes it's here.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Increment element's and array's sizes.
- __ add(FieldOperand(edi, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(kAllocationDelta)));
-
- // NOTE: This only happen in new-space, where we don't
- // care about the black-byte-count on pages. Otherwise we should
- // update that too if the object is black.
-
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()),
- argc + 1,
- 1);
- }
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- Label miss, return_undefined, call_builtin;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
-
- // Get the elements array of the object.
- __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into ecx and calculate new length.
- __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
- __ sub(ecx, Immediate(Smi::FromInt(1)));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(eax, FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(eax, Immediate(factory()->the_hole_value()));
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), ecx);
-
- // Fill with the hole.
- __ mov(FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(factory()->the_hole_value()));
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ mov(eax, Immediate(factory()->undefined_value()));
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
-
- Register receiver = ebx;
- Register index = edi;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->nan_value()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
-
- Register receiver = eax;
- Register index = edi;
- Register scratch = edx;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->empty_string()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = ebx;
- __ mov(code, Operand(esp, 1 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, Immediate(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, eax);
- generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- return Handle<Code>::null();
- }
-
- CpuFeatures::Scope use_sse2(SSE2);
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &smi);
-
- // Check if the argument is a heap number and load its value into xmm0.
- Label slow;
- __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
-
- // Check if the argument is strictly positive. Note this also
- // discards NaN.
- __ xorpd(xmm1, xmm1);
- __ ucomisd(xmm0, xmm1);
- __ j(below_equal, &slow);
-
- // Do a truncating conversion.
- __ cvttsd2si(eax, Operand(xmm0));
-
- // Check if the result fits into a smi. Note this also checks for
- // 0x80000000 which signals a failed conversion.
- Label wont_fit_into_smi;
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &wont_fit_into_smi);
-
- // Smi tag and return.
- __ SmiTag(eax);
- __ bind(&smi);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is < 2^kMantissaBits.
- Label already_round;
- __ bind(&wont_fit_into_smi);
- __ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
- __ ucomisd(xmm0, xmm1);
- __ j(above_equal, &already_round);
-
- // Save a copy of the argument.
- __ movaps(xmm2, xmm0);
-
- // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
- __ addsd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
-
- // Compare the argument and the tentative result to get the right mask:
- // if xmm2 < xmm0:
- // xmm2 = 1...1
- // else:
- // xmm2 = 0...0
- __ cmpltsd(xmm2, xmm0);
-
- // Subtract 1 if the argument was less than the tentative result.
- __ LoadPowerOf2(xmm1, ebx, 0);
- __ andpd(xmm1, xmm2);
- __ subsd(xmm0, xmm1);
-
- // Return a new heap number.
- __ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(2 * kPointerSize);
-
- // Return the argument (when it's an already round heap number).
- __ bind(&already_round);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(eax, &not_smi);
-
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ mov(ebx, eax);
- __ sar(ebx, kBitsPerInt - 1);
-
- // Do bitwise not or do nothing depending on ebx.
- __ xor_(eax, ebx);
-
- // Add 1 or do nothing depending on ebx.
- __ sub(eax, ebx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- // Smi case done.
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its exponent and
- // sign into ebx.
- __ bind(&not_smi);
- __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ test(ebx, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ and_(ebx, ~HeapNumber::kSignMask);
- __ mov(ecx, FieldOperand(eax, HeapNumber::kMantissaOffset));
- __ AllocateHeapNumber(eax, edi, edx, &slow);
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ebx);
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ mov(eax, Operand(esp, 4 * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
- // duplicate of return address and will be overwritten.
- GenerateFastApiCall(masm(), optimization, argc);
-
- __ bind(&miss);
- __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(edx, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
- edi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ CmpObjectType(edx, SYMBOL_TYPE, eax);
- __ j(not_equal, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(edx, &fast);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, factory()->true_value());
- __ j(equal, &fast);
- __ cmp(edx, factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
- }
- }
-
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Handle<JSFunction> function) {
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
- &miss);
-
- // Restore receiver.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the function really is a function.
- __ JumpIfSmi(eax, &miss);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- __ mov(edi, eax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle load cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Set up the context (function already in edi).
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- edx, ecx, ebx, edi,
- &miss);
- // Handle store cache miss.
- __ bind(&miss);
- __ mov(ecx, Immediate(name)); // restore name
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed, preserving the value register.
- __ push(eax);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
- __ pop(eax); // restore value
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- __ pop(ebx); // remove the return address
- __ push(edx); // receiver
- __ push(Immediate(callback)); // callback info
- __ push(ecx); // name
- __ push(eax); // value
- __ push(ebx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ pop(eax);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(eax);
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- __ push(edx);
- __ push(eax);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(eax);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed, preserving the name register.
- __ push(ecx);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(receiver, edx, holder, ebx, ecx, edi, name, &miss);
- __ pop(ecx);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- __ pop(ecx);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(edx, Handle<Map>(receiver->map()),
- &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(edx, ebx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ pop(ebx); // remove the return address
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(eax); // value
- __ push(Immediate(Smi::FromInt(strict_mode_)));
- __ push(ebx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ mov(ebx, Immediate(cell));
- Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ cmp(cell_operand, factory()->the_hole_value());
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ mov(cell_operand, eax);
- // No write barrier here, because cells are always rescanned.
-
- // Return the value (register eax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- // Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- edx, ecx, ebx, edi,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_store_field(), 1);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_jsarray,
- elements_kind,
- grow_mode_).GetCode(isolate());
-
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(edx, &miss, Label::kNear);
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // ebx: receiver->map().
- for (int i = 0; i < receiver_maps->length(); ++i) {
- __ cmp(edi, receiver_maps->at(i));
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ mov(ebx, Immediate(transitioned_maps->at(i)));
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
- __ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Handle<GlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
-
- __ bind(&success);
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ mov(eax, isolate()->factory()->undefined_value());
- __ ret(0);
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
- return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
- return registers;
-}
-
-
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Immediate(name));
- __ j(not_equal, miss);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- __ push(edx);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete) {
- Label success, miss;
-
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
- // Get the value from the cell.
- if (Serializer::enabled()) {
- __ mov(eax, Immediate(cell));
- __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ mov(eax, Operand::Cell(cell));
- }
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ cmp(eax, factory()->the_hole_value());
- __ j(equal, &miss);
- } else if (FLAG_debug_code) {
- __ cmp(eax, factory()->the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
-
- HandlerFrontendFooter(&success, &miss);
- __ bind(&success);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
- // The code above already loads the result into the return register.
- __ ret(0);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
- Handle<Code> stub = KeyedLoadFastElementStub(
- receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode(isolate());
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
- } else {
- Handle<Code> stub =
- KeyedLoadDictionaryElementStub().GetCode(isolate());
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
- }
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
- }
-
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int current = 0; current < receiver_count; ++current) {
- __ cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current));
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), kind());
-
- // Return the generated code.
- InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(Code::IC_FRAGMENT, type, name, state);
-}
-
-
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ JumpIfSmi(ebx, &generic_stub_call);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // ebx: initial map
- __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject on the heap by moving the new space allocation
- // top forward.
- // ebx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ shl(ecx, kPointerSizeLog2);
- __ cmp(ecx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(instance_size, edx, ecx, no_reg,
- &generic_stub_call, NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // ebx: initial map
- // edx: JSObject (untagged)
- __ mov(Operand(edx, JSObject::kMapOffset), ebx);
- __ mov(ebx, factory()->empty_fixed_array());
- __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
- __ mov(Operand(edx, JSObject::kElementsOffset), ebx);
-
- // Push the allocated object to the stack. This is the object that will be
- // returned (after it is tagged).
- __ push(edx);
-
- // eax: argc
- // edx: JSObject (untagged)
- // Load the address of the first in-object property into edx.
- __ lea(edx, Operand(edx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains the
- // allocated object and the return address on top of the argc arguments.
- __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
-
- // Use edi for holding undefined which is used in several places below.
- __ mov(edi, factory()->undefined_value());
-
- // eax: argc
- // ecx: first argument
- // edx: first in-object property of the JSObject
- // edi: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ mov(ebx, edi);
- __ cmp(eax, arg_number);
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope use_cmov(CMOV);
- __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
- } else {
- Label not_passed;
- __ j(below_equal, &not_passed);
- __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
- __ bind(&not_passed);
- }
- // Store value in the property.
- __ mov(Operand(edx, i * kPointerSize), ebx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- isolate());
- __ mov(Operand(edx, i * kPointerSize), Immediate(constant));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ mov(Operand(edx, i * kPointerSize), edi);
- }
-
- // Move argc to ebx and retrieve and tag the JSObject to return.
- __ mov(ebx, eax);
- __ pop(eax);
- __ or_(eax, Immediate(kHeapObjectTag));
-
- // Remove caller arguments and receiver from the stack and return.
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
- __ push(ecx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(ecx, &miss_force_generic);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(edx);
- __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(edx);
- __ ret(0);
-
- __ bind(&slow);
- __ pop(edx);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> miss_force_generic_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi and if SSE2 is available a heap number
- // containing a smi and branch if the check fails.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ cmp(FieldOperand(key, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, fail);
- __ movdbl(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, Operand(xmm_scratch0));
- __ cvtsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- // Check if the key fits in the smi range.
- __ cmp(scratch, 0xc0000000);
- __ j(sign, fail);
- __ SmiTag(scratch);
- __ mov(key, scratch);
- __ bind(&key_ok);
- } else {
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow, check_heap_number;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(edi, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- __ JumpIfNotSmi(eax, &slow);
- } else {
- __ JumpIfNotSmi(eax, &check_heap_number);
- }
-
- // smi case
- __ mov(ebx, eax); // Preserve the value in eax as the return value.
- __ SmiUntag(ebx);
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ push(ebx);
- __ fild_s(Operand(esp, 0));
- __ pop(ebx);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- } else { // elements_kind == EXTERNAL_DOUBLE_ELEMENTS.
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(not_equal, &slow);
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // For the moment we make the slow call to the runtime on
- // processors that don't support SSE2. The code in IntegerConvert
- // (code-stubs-ia32.cc) is roughly what is needed here though the
- // conversion failure case does not need to be handled.
- if (CpuFeatures::IsSupported(SSE2)) {
- if ((elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) &&
- CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- // fisttp stores values as signed integers. To represent the
- // entire range of int and unsigned int arrays, store as a
- // 64-bit int and discard the high 32 bits.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(esp, Immediate(2 * kPointerSize));
- __ fisttp_d(Operand(esp, 0));
-
- // If conversion failed (NaN, infinity, or a number outside
- // signed int64 range), the result is 0x8000000000000000, and
- // we must handle this case in the runtime.
- Label ok;
- __ cmp(Operand(esp, kPointerSize), Immediate(0x80000000u));
- __ j(not_equal, &ok);
- __ cmp(Operand(esp, 0), Immediate(0));
- __ j(not_equal, &ok);
- __ add(esp, Immediate(2 * kPointerSize)); // Restore the stack.
- __ jmp(&slow);
-
- __ bind(&ok);
- __ pop(ebx);
- __ add(esp, Immediate(kPointerSize));
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- } else {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
- __ cmp(ebx, 0x80000000u);
- __ j(equal, &slow);
- // ebx: untagged integer value
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- // Fall through.
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- __ ret(0); // Return original value.
- }
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, grow, slow, transition_elements_kind;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(eax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ mov(FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize), eax);
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- // Do the store and update the write barrier.
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ lea(ecx, FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Make sure to preserve the value in register eax.
- __ mov(ebx, eax);
- __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
- // Restore the key, which is known to be the array length.
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
- }
-
- // Store the element at index zero.
- __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ ret(0);
-
- __ bind(&check_capacity);
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
- __ j(equal, &miss_force_generic);
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(edi);
-
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
- &transition_elements_kind, true);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(eax, &value_is_smi);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
-
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Initialize the new FixedDoubleArray.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_double_array_map()));
- __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-
- __ StoreNumberToDoubleElements(eax, edi, ecx, ebx, xmm0,
- &transition_elements_kind, true);
-
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(FieldOperand(edi, offset), Immediate(kHoleNanLower32));
- __ mov(FieldOperand(edi, offset + kPointerSize),
- Immediate(kHoleNanUpper32));
- }
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ ret(0);
-
- __ bind(&check_capacity);
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ic-inl.h b/src/3rdparty/v8/src/ic-inl.h
deleted file mode 100644
index 9439792..0000000
--- a/src/3rdparty/v8/src/ic-inl.h
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IC_INL_H_
-#define V8_IC_INL_H_
-
-#include "ic.h"
-
-#include "compiler.h"
-#include "debug.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address IC::address() const {
- // Get the address of the call.
- Address result = Assembler::target_address_from_return_address(pc());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ASSERT(Isolate::Current() == isolate());
- Debug* debug = isolate()->debug();
- // First check if any break points are active if not just return the address
- // of the call.
- if (!debug->has_break_points()) return result;
-
- // At least one break point is active perform additional test to ensure that
- // break point locations are updated correctly.
- if (debug->IsDebugBreak(Assembler::target_address_at(result))) {
- // If the call site is a call to debug break then return the address in
- // the original code instead of the address in the running code. This will
- // cause the original code to be updated and keeps the breakpoint active in
- // the running code.
- return OriginalCodeAddress();
- } else {
- // No break point here just return the address of the call.
- return result;
- }
-#else
- return result;
-#endif
-}
-
-
-Code* IC::GetTargetAtAddress(Address address) {
- // Get the target address of the IC.
- Address target = Assembler::target_address_at(address);
- // Convert target address to the code object. Code::GetCodeFromTargetAddress
- // is safe for use during GC where the map might be marked.
- Code* result = Code::GetCodeFromTargetAddress(target);
- ASSERT(result->is_inline_cache_stub());
- return result;
-}
-
-
-void IC::SetTargetAtAddress(Address address, Code* target) {
- ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
- Heap* heap = target->GetHeap();
- Code* old_target = GetTargetAtAddress(address);
-#ifdef DEBUG
- // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
- // ICs as strict mode. The strict-ness of the IC must be preserved.
- if (old_target->kind() == Code::STORE_IC ||
- old_target->kind() == Code::KEYED_STORE_IC) {
- ASSERT(Code::GetStrictMode(old_target->extra_ic_state()) ==
- Code::GetStrictMode(target->extra_ic_state()));
- }
-#endif
- Assembler::set_target_address_at(address, target->instruction_start());
- if (heap->gc_state() == Heap::MARK_COMPACT) {
- heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
- } else {
- heap->incremental_marking()->RecordCodeTargetPatch(address, target);
- }
- PostPatching(address, target, old_target);
-}
-
-
-InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
- JSObject* holder) {
- if (object->IsJSObject()) {
- return GetCodeCacheForObject(JSObject::cast(object), holder);
- }
- // If the object is a value, we use the prototype map for the cache.
- ASSERT(object->IsString() || object->IsSymbol() ||
- object->IsNumber() || object->IsBoolean());
- return DELEGATE_MAP;
-}
-
-
-InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
- JSObject* holder) {
- // Fast-properties and global objects store stubs in their own maps.
- // Slow properties objects use prototype's map (unless the property is its own
- // when holder == object). It works because slow properties objects having
- // the same prototype (or a prototype with the same map) and not having
- // the property are interchangeable for such a stub.
- if (holder != object &&
- !object->HasFastProperties() &&
- !object->IsJSGlobalProxy() &&
- !object->IsJSGlobalObject()) {
- return DELEGATE_MAP;
- }
- return OWN_MAP;
-}
-
-
-JSObject* IC::GetCodeCacheHolder(Isolate* isolate,
- Object* object,
- InlineCacheHolderFlag holder) {
- Object* map_owner = holder == OWN_MAP ? object : object->GetDelegate(isolate);
- ASSERT(map_owner->IsJSObject());
- return JSObject::cast(map_owner);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IC_INL_H_
diff --git a/src/3rdparty/v8/src/ic.cc b/src/3rdparty/v8/src/ic.cc
deleted file mode 100644
index a9163db..0000000
--- a/src/3rdparty/v8/src/ic.cc
+++ /dev/null
@@ -1,2655 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "arguments.h"
-#include "codegen.h"
-#include "execution.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-char IC::TransitionMarkFromState(IC::State state) {
- switch (state) {
- case UNINITIALIZED: return '0';
- case PREMONOMORPHIC: return '.';
- case MONOMORPHIC: return '1';
- case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
- case POLYMORPHIC: return 'P';
- case MEGAMORPHIC: return 'N';
- case GENERIC: return 'G';
-
- // We never see the debugger states here, because the state is
- // computed from the original code - not the patched code. Let
- // these cases fall through to the unreachable code below.
- case DEBUG_STUB: break;
- }
- UNREACHABLE();
- return 0;
-}
-
-void IC::TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target) {
- if (FLAG_trace_ic) {
- Object* undef = new_target->GetHeap()->undefined_value();
- State new_state = StateFrom(new_target, undef, undef);
- PrintF("[%s in ", type);
- Isolate* isolate = new_target->GetIsolate();
- StackFrameIterator it(isolate);
- while (it.frame()->fp() != this->fp()) it.Advance();
- StackFrame* raw_frame = it.frame();
- if (raw_frame->is_internal()) {
- Code* apply_builtin = isolate->builtins()->builtin(
- Builtins::kFunctionApply);
- if (raw_frame->unchecked_code() == apply_builtin) {
- PrintF("apply from ");
- it.Advance();
- raw_frame = it.frame();
- }
- }
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- bool new_can_grow =
- Code::GetKeyedAccessGrowMode(new_target->extra_ic_state()) ==
- ALLOW_JSARRAY_GROWTH;
- PrintF(" (%c->%c%s)",
- TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state),
- new_can_grow ? ".GROW" : "");
- name->Print();
- PrintF("]\n");
- }
-}
-
-#define TRACE_GENERIC_IC(isolate, type, reason) \
- do { \
- if (FLAG_trace_ic) { \
- PrintF("[%s patching generic stub in ", type); \
- JavaScriptFrame::PrintTop(isolate, stdout, false, true); \
- PrintF(" (%s)]\n", reason); \
- } \
- } while (false)
-
-#else
-#define TRACE_GENERIC_IC(isolate, type, reason)
-#endif // DEBUG
-
-#define TRACE_IC(type, name, old_state, new_target) \
- ASSERT((TraceIC(type, name, old_state, new_target), true))
-
-IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
- // To improve the performance of the (much used) IC code, we unfold a few
- // levels of the stack frame iteration code. This yields a ~35% speedup when
- // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
- const Address entry =
- Isolate::c_entry_fp(isolate->thread_local_top());
- Address* pc_address =
- reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- // If there's another JavaScript frame on the stack or a
- // StubFailureTrampoline, we need to look one frame further down the stack to
- // find the frame pointer and the return address stack slot.
- if (depth == EXTRA_CALL_FRAME) {
- const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
- pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
- fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
- }
-#ifdef DEBUG
- StackFrameIterator it(isolate);
- for (int i = 0; i < depth + 1; i++) it.Advance();
- StackFrame* frame = it.frame();
- ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
-#endif
- fp_ = fp;
- pc_address_ = pc_address;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() const {
- HandleScope scope(isolate());
- // Compute the JavaScript frame for the frame pointer of this IC
- // structure. We need this to be able to find the function
- // corresponding to the frame.
- StackFrameIterator it(isolate());
- while (it.frame()->fp() != this->fp()) it.Advance();
- JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
- // Find the function on the stack and both the active code for the
- // function and the original code.
- JSFunction* function = JSFunction::cast(frame->function());
- Handle<SharedFunctionInfo> shared(function->shared());
- Code* code = shared->code();
- ASSERT(Debug::HasDebugInfo(shared));
- Code* original_code = Debug::GetDebugInfo(shared)->original_code();
- ASSERT(original_code->IsCode());
- // Get the address of the call site in the active code. This is the
- // place where the call to DebugBreakXXX is and where the IC
- // normally would be.
- Address addr = Assembler::target_address_from_return_address(pc());
- // Return the address in the original code. This is the place where
- // the call which has been overwritten by the DebugBreakXXX resides
- // and the place where the inline cache system should look.
- intptr_t delta =
- original_code->instruction_start() - code->instruction_start();
- return addr + delta;
-}
-#endif
-
-
-static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
- Object* receiver,
- Object* name) {
- InlineCacheHolderFlag cache_holder =
- Code::ExtractCacheHolderFromFlags(target->flags());
-
- Isolate* isolate = target->GetIsolate();
- if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
- // The stub was generated for JSObject but called for non-JSObject.
- // IC::GetCodeCacheHolder is not applicable.
- return false;
- } else if (cache_holder == DELEGATE_MAP &&
- receiver->GetPrototype(isolate)->IsNull()) {
- // IC::GetCodeCacheHolder is not applicable.
- return false;
- }
- Map* map = IC::GetCodeCacheHolder(isolate, receiver, cache_holder)->map();
-
- // Decide whether the inline cache failed because of changes to the
- // receiver itself or changes to one of its prototypes.
- //
- // If there are changes to the receiver itself, the map of the
- // receiver will have changed and the current target will not be in
- // the receiver map's code cache. Therefore, if the current target
- // is in the receiver map's code cache, the inline cache failed due
- // to prototype check failure.
- int index = map->IndexInCodeCache(name, target);
- if (index >= 0) {
- map->RemoveFromCodeCache(String::cast(name), target, index);
- return true;
- }
-
- return false;
-}
-
-
-IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
- IC::State state = target->ic_state();
-
- if (state != MONOMORPHIC || !name->IsString()) return state;
- if (receiver->IsUndefined() || receiver->IsNull()) return state;
-
- // For keyed load/store/call, the most likely cause of cache failure is
- // that the key has changed. We do not distinguish between
- // prototype and non-prototype failures for keyed access.
- Code::Kind kind = target->kind();
- if (kind == Code::KEYED_LOAD_IC ||
- kind == Code::KEYED_STORE_IC ||
- kind == Code::KEYED_CALL_IC) {
- return MONOMORPHIC;
- }
-
- // Remove the target from the code cache if it became invalid
- // because of changes in the prototype chain to avoid hitting it
- // again.
- // Call stubs handle this later to allow extra IC state
- // transitions.
- if (kind != Code::CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
- return MONOMORPHIC_PROTOTYPE_FAILURE;
- }
-
- // The builtins object is special. It only changes when JavaScript
- // builtins are loaded lazily. It is important to keep inline
- // caches for the builtins object monomorphic. Therefore, if we get
- // an inline cache miss for the builtins object after lazily loading
- // JavaScript builtins, we return uninitialized as the state to
- // force the inline cache back to monomorphic state.
- if (receiver->IsJSBuiltinsObject()) {
- return UNINITIALIZED;
- }
-
- return MONOMORPHIC;
-}
-
-
-RelocInfo::Mode IC::ComputeMode() {
- Address addr = address();
- Code* code = Code::cast(isolate()->heap()->FindCodeObject(addr));
- for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
- !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->pc() == addr) return info->rmode();
- }
- UNREACHABLE();
- return RelocInfo::NONE32;
-}
-
-
-Failure* IC::TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key) {
- HandleScope scope(isolate());
- Handle<Object> args[2] = { key, object };
- Handle<Object> error = isolate()->factory()->NewTypeError(
- type, HandleVector(args, 2));
- return isolate()->Throw(*error);
-}
-
-
-Failure* IC::ReferenceError(const char* type, Handle<String> name) {
- HandleScope scope(isolate());
- Handle<Object> error = isolate()->factory()->NewReferenceError(
- type, HandleVector(&name, 1));
- return isolate()->Throw(*error);
-}
-
-
-static int ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state) {
- bool was_uninitialized =
- old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
- bool is_uninitialized =
- new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
- return (was_uninitialized && !is_uninitialized) ? 1 :
- (!was_uninitialized && is_uninitialized) ? -1 : 0;
-}
-
-
-void IC::PostPatching(Address address, Code* target, Code* old_target) {
- if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
- return;
- }
- Isolate* isolate = target->GetHeap()->isolate();
- Code* host = isolate->
- inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
- if (host->kind() != Code::FUNCTION) return;
-
- if (FLAG_type_info_threshold > 0 &&
- old_target->is_inline_cache_stub() &&
- target->is_inline_cache_stub()) {
- int delta = ComputeTypeInfoCountDelta(old_target->ic_state(),
- target->ic_state());
- // Not all Code objects have TypeFeedbackInfo.
- if (host->type_feedback_info()->IsTypeFeedbackInfo() && delta != 0) {
- TypeFeedbackInfo* info =
- TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_ic_with_type_info_count(delta);
- }
- }
- if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
- TypeFeedbackInfo* info =
- TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_own_type_change_checksum();
- }
- if (FLAG_watch_ic_patching) {
- host->set_profiler_ticks(0);
- isolate->runtime_profiler()->NotifyICChanged();
- }
- // TODO(2029): When an optimized function is patched, it would
- // be nice to propagate the corresponding type information to its
- // unoptimized version for the benefit of later inlining.
-}
-
-
-void IC::Clear(Address address) {
- Code* target = GetTargetAtAddress(address);
-
- // Don't clear debug break inline cache as it will remove the break point.
- if (target->is_debug_break()) return;
-
- switch (target->kind()) {
- case Code::LOAD_IC: return LoadIC::Clear(address, target);
- case Code::KEYED_LOAD_IC: return KeyedLoadIC::Clear(address, target);
- case Code::STORE_IC: return StoreIC::Clear(address, target);
- case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
- case Code::CALL_IC: return CallIC::Clear(address, target);
- case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
- case Code::COMPARE_IC: return CompareIC::Clear(address, target);
- case Code::UNARY_OP_IC:
- case Code::BINARY_OP_IC:
- case Code::TO_BOOLEAN_IC:
- // Clearing these is tricky and does not
- // make any performance difference.
- return;
- default: UNREACHABLE();
- }
-}
-
-
-void CallICBase::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
- Code* code =
- Isolate::Current()->stub_cache()->FindCallInitialize(
- target->arguments_count(),
- contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
- target->kind());
- SetTargetAtAddress(address, code);
-}
-
-
-void KeyedLoadIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- // Make sure to also clear the map used in inline fast cases. If we
- // do not clear these maps, cached code can keep objects alive
- // through the embedded maps.
- SetTargetAtAddress(address, *initialize_stub());
-}
-
-
-void LoadIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address, *initialize_stub());
-}
-
-
-void StoreIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address,
- (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict()
- : *initialize_stub());
-}
-
-
-void KeyedStoreIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address,
- (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict()
- : *initialize_stub());
-}
-
-
-void CompareIC::Clear(Address address, Code* target) {
- ASSERT(target->major_key() == CodeStub::CompareIC);
- CompareIC::State handler_state;
- Token::Value op;
- ICCompareStub::DecodeMinorKey(target->stub_info(), NULL, NULL,
- &handler_state, &op);
- // Only clear CompareICs that can retain objects.
- if (handler_state != KNOWN_OBJECT) return;
- SetTargetAtAddress(address, GetRawUninitialized(op));
- PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
-}
-
-
-static bool HasInterceptorGetter(JSObject* object) {
- return !object->GetNamedInterceptor()->getter()->IsUndefined();
-}
-
-
-static void LookupForRead(Handle<Object> object,
- Handle<String> name,
- LookupResult* lookup) {
- // Skip all the objects with named interceptors, but
- // without actual getter.
- while (true) {
- object->Lookup(*name, lookup);
- // Besides normal conditions (property not found or it's not
- // an interceptor), bail out if lookup is not cacheable: we won't
- // be able to IC it anyway and regular lookup should work fine.
- if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
- return;
- }
-
- Handle<JSObject> holder(lookup->holder());
- if (HasInterceptorGetter(*holder)) {
- return;
- }
-
- holder->LocalLookupRealNamedProperty(*name, lookup);
- if (lookup->IsFound()) {
- ASSERT(!lookup->IsInterceptor());
- return;
- }
-
- Handle<Object> proto(holder->GetPrototype(), name->GetIsolate());
- if (proto->IsNull()) {
- ASSERT(!lookup->IsFound());
- return;
- }
-
- object = proto;
- }
-}
-
-
-Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
- Handle<Object> delegate = Execution::GetFunctionDelegate(object);
-
- if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
- // Patch the receiver and use the delegate as the function to
- // invoke. This is used for invoking objects as if they were functions.
- const int argc = target()->arguments_count();
- StackFrameLocator locator(isolate());
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *object);
- }
-
- return delegate;
-}
-
-
-void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
- Handle<Object> object) {
- while (callee->IsJSFunctionProxy()) {
- callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap(),
- isolate());
- }
-
- if (callee->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
- if (!function->shared()->is_classic_mode() || function->IsBuiltin()) {
- // Do not wrap receiver for strict mode functions or for builtins.
- return;
- }
- }
-
- // And only wrap string, number or boolean.
- if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- // Change the receiver to the result of calling ToObject on it.
- const int argc = this->target()->arguments_count();
- StackFrameLocator locator(isolate());
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *isolate()->factory()->ToObject(object));
- }
-}
-
-
-MaybeObject* CallICBase::LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name) {
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_call", object, name);
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element if so.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- Handle<Object> result = Object::GetElement(object, index);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- if (result->IsJSFunction()) return *result;
-
- // Try to find a suitable function delegate for the object at hand.
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return *result;
-
- // Otherwise, it will fail in the lookup step.
- }
-
- // Lookup the property in the object.
- LookupResult lookup(isolate());
- LookupForRead(object, name, &lookup);
-
- if (!lookup.IsFound()) {
- // If the object does not have the requested property, check which
- // exception we need to throw.
- return IsUndeclaredGlobal(object)
- ? ReferenceError("not_defined", name)
- : TypeError("undefined_method", object, name);
- }
-
- // Lookup is valid: Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, extra_ic_state, object, name);
- }
-
- // Get the property.
- PropertyAttributes attr;
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
-
- if (lookup.IsInterceptor() && attr == ABSENT) {
- // If the object does not have the requested property, check which
- // exception we need to throw.
- return IsUndeclaredGlobal(object)
- ? ReferenceError("not_defined", name)
- : TypeError("undefined_method", object, name);
- }
-
- ASSERT(!result->IsTheHole());
-
- // Make receiver an object if the callee requires it. Strict mode or builtin
- // functions do not wrap the receiver, non-strict functions and objects
- // called as functions do.
- ReceiverToObjectIfRequired(result, object);
-
- if (result->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(result);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Handle stepping into a function if step into is active.
- Debug* debug = isolate()->debug();
- if (debug->StepInActive()) {
- // Protect the result in a handle as the debugger can allocate and might
- // cause GC.
- debug->HandleStepIn(function, object, fp(), false);
- }
-#endif
- return *function;
- }
-
- // Try to find a suitable function delegate for the object at hand.
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return *result;
-
- return TypeError("property_not_function", object, name);
-}
-
-
-bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state) {
- ASSERT(kind_ == Code::CALL_IC);
- if (lookup->type() != CONSTANT_FUNCTION) return false;
- JSFunction* function = lookup->GetConstantFunction();
- if (!function->shared()->HasBuiltinFunctionId()) return false;
-
- // Fetch the arguments passed to the called function.
- const int argc = target()->arguments_count();
- Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- Arguments args(argc + 1,
- &Memory::Object_at(fp +
- StandardFrameConstants::kCallerSPOffset +
- argc * kPointerSize));
- switch (function->shared()->builtin_function_id()) {
- case kStringCharCodeAt:
- case kStringCharAt:
- if (object->IsString()) {
- String* string = String::cast(*object);
- // Check there's the right string value or wrapper in the receiver slot.
- ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
- // If we're in the default (fastest) state and the index is
- // out of bounds, update the state to record this fact.
- if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB &&
- argc >= 1 && args[1]->IsNumber()) {
- double index = DoubleToInteger(args.number_at(1));
- if (index < 0 || index >= string->length()) {
- *extra_ic_state =
- StringStubState::update(*extra_ic_state,
- STRING_INDEX_OUT_OF_BOUNDS);
- return true;
- }
- }
- }
- break;
- default:
- return false;
- }
- return false;
-}
-
-
-Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_state,
- Handle<Object> object,
- Handle<String> name) {
- int argc = target()->arguments_count();
- Handle<JSObject> holder(lookup->holder());
- switch (lookup->type()) {
- case FIELD: {
- PropertyIndex index = lookup->GetFieldIndex();
- return isolate()->stub_cache()->ComputeCallField(
- argc, kind_, extra_state, name, object, holder, index);
- }
- case CONSTANT_FUNCTION: {
- // Get the constant function and compute the code stub for this
- // call; used for rewriting to monomorphic state and making sure
- // that the code stub is in the stub cache.
- Handle<JSFunction> function(lookup->GetConstantFunction());
- return isolate()->stub_cache()->ComputeCallConstant(
- argc, kind_, extra_state, name, object, holder, function);
- }
- case NORMAL: {
- // If we return a null handle, the IC will not be patched.
- if (!object->IsJSObject()) return Handle<Code>::null();
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (holder->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
- if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
- Handle<JSFunction> function(JSFunction::cast(cell->value()));
- return isolate()->stub_cache()->ComputeCallGlobal(
- argc, kind_, extra_state, name, receiver, global, cell, function);
- } else {
- // There is only one shared stub for calling normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
- return isolate()->stub_cache()->ComputeCallNormal(
- argc, kind_, extra_state, IsQmlGlobal(holder));
- }
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(*holder));
- return isolate()->stub_cache()->ComputeCallInterceptor(
- argc, kind_, extra_state, name, object, holder);
- default:
- return Handle<Code>::null();
- }
-}
-
-
-void CallICBase::UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name) {
- // Bail out if we didn't find a result.
- if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
-
- // Compute the number of arguments.
- int argc = target()->arguments_count();
- Handle<Code> code;
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- code = isolate()->stub_cache()->ComputeCallPreMonomorphic(
- argc, kind_, extra_ic_state);
- } else if (state == MONOMORPHIC) {
- if (kind_ == Code::CALL_IC &&
- TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- } else if (kind_ == Code::CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target(),
- *object,
- *name)) {
- state = MONOMORPHIC_PROTOTYPE_FAILURE;
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- } else {
- code = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, kind_, extra_ic_state);
- }
- } else {
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- }
-
- // If there's no appropriate stub we simply avoid updating the caches.
- if (code.is_null()) return;
-
- // Patch the call site depending on the state of the cache.
- switch (state) {
- case UNINITIALIZED:
- case MONOMORPHIC_PROTOTYPE_FAILURE:
- case PREMONOMORPHIC:
- set_target(*code);
- break;
- case MONOMORPHIC:
- if (code->ic_state() != MONOMORPHIC) {
- Map* map = target()->FindFirstMap();
- if (map != NULL) {
- UpdateMegamorphicCache(map, *name, target());
- }
- }
- set_target(*code);
- break;
- case MEGAMORPHIC: {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
- Handle<JSObject> cache_object = object->IsJSObject()
- ? Handle<JSObject>::cast(object)
- : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())));
- // Update the stub cache.
- UpdateMegamorphicCache(cache_object->map(), *name, *code);
- break;
- }
- case DEBUG_STUB:
- break;
- case POLYMORPHIC:
- case GENERIC:
- UNREACHABLE();
- break;
- }
-
- TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
- name, state, target());
-}
-
-
-MaybeObject* KeyedCallIC::LoadFunction(State state,
- Handle<Object> object,
- Handle<Object> key) {
- if (key->IsInternalizedString()) {
- return CallICBase::LoadFunction(state,
- Code::kNoExtraICState,
- object,
- Handle<String>::cast(key));
- }
-
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_call", object, key);
- }
-
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
-
- if (use_ic && state != MEGAMORPHIC) {
- int argc = target()->arguments_count();
- Handle<Code> stub = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = isolate()->stub_cache()->ComputeCallArguments(argc);
- }
- }
- ASSERT(!stub.is_null());
- set_target(*stub);
- TRACE_IC("KeyedCallIC", key, state, target());
- }
-
- Handle<Object> result = GetProperty(isolate(), object, key);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
-
- // Make receiver an object if the callee requires it. Strict mode or builtin
- // functions do not wrap the receiver, non-strict functions and objects
- // called as functions do.
- ReceiverToObjectIfRequired(result, object);
- if (result->IsJSFunction()) return *result;
-
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return *result;
-
- return TypeError("property_not_function", object, key);
-}
-
-
-MaybeObject* LoadIC::Load(State state,
- Handle<Object> object,
- Handle<String> name) {
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_load", object, name);
- }
-
- if (FLAG_use_ic) {
- // Use specialized code for getting the length of strings and
- // string wrapper objects. The length property of string wrapper
- // objects is read-only and therefore always returns the length of
- // the underlying string value. See ECMA-262 15.5.5.1.
- if ((object->IsString() || object->IsStringWrapper()) &&
- name->Equals(isolate()->heap()->length_string())) {
- Handle<Code> stub;
- if (state == UNINITIALIZED) {
- stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- StringLengthStub string_length_stub(kind(), !object->IsString());
- stub = string_length_stub.GetCode(isolate());
- } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- StringLengthStub string_length_stub(kind(), true);
- stub = string_length_stub.GetCode(isolate());
- } else if (state != MEGAMORPHIC) {
- ASSERT(state != GENERIC);
- stub = megamorphic_stub();
- }
- if (!stub.is_null()) {
- set_target(*stub);
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
-#endif
- }
- // Get the string if we have a string wrapper object.
- Handle<Object> string = object->IsJSValue()
- ? Handle<Object>(Handle<JSValue>::cast(object)->value(), isolate())
- : object;
- return Smi::FromInt(String::cast(*string)->length());
- }
-
- // Use specialized code for getting the length of arrays.
- if (object->IsJSArray() &&
- name->Equals(isolate()->heap()->length_string())) {
- Handle<Code> stub;
- if (state == UNINITIALIZED) {
- stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- ArrayLengthStub array_length_stub(kind());
- stub = array_length_stub.GetCode(isolate());
- } else if (state != MEGAMORPHIC) {
- ASSERT(state != GENERIC);
- stub = megamorphic_stub();
- }
- if (!stub.is_null()) {
- set_target(*stub);
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
-#endif
- }
- return JSArray::cast(*object)->length();
- }
-
- // Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() &&
- name->Equals(isolate()->heap()->prototype_string()) &&
- Handle<JSFunction>::cast(object)->should_have_prototype()) {
- Handle<Code> stub;
- if (state == UNINITIALIZED) {
- stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- FunctionPrototypeStub function_prototype_stub(kind());
- stub = function_prototype_stub.GetCode(isolate());
- } else if (state != MEGAMORPHIC) {
- ASSERT(state != GENERIC);
- stub = megamorphic_stub();
- }
- if (!stub.is_null()) {
- set_target(*stub);
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
-#endif
- }
- return Accessors::FunctionGetPrototype(*object, 0);
- }
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element or char if so.
- uint32_t index;
- if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
- // Rewrite to the generic keyed load stub.
- if (FLAG_use_ic) set_target(*generic_stub());
- return Runtime::GetElementOrCharAt(isolate(), object, index);
- }
-
- // Named lookup in the object.
- LookupResult lookup(isolate());
- LookupForRead(object, name, &lookup);
-
- // If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsFound()) {
- if (IsUndeclaredGlobal(object)) {
- return ReferenceError("not_defined", name);
- }
- LOG(isolate(), SuspectReadEvent(*name, *object));
- }
-
- // Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, object, name);
- }
-
- PropertyAttributes attr;
- if (lookup.IsInterceptor() || lookup.IsHandler()) {
- // Get the property.
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- // If the property is not present, check if we need to throw an
- // exception.
- if (attr == ABSENT && IsUndeclaredGlobal(object)) {
- return ReferenceError("not_defined", name);
- }
- return *result;
- }
-
- // Get the property.
- return object->GetProperty(*object, &lookup, *name, &attr);
-}
-
-
-static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
- Handle<Map> new_receiver_map) {
- ASSERT(!new_receiver_map.is_null());
- for (int current = 0; current < receiver_maps->length(); ++current) {
- if (!receiver_maps->at(current).is_null() &&
- receiver_maps->at(current).is_identical_to(new_receiver_map)) {
- return false;
- }
- }
- receiver_maps->Add(new_receiver_map);
- return true;
-}
-
-
-bool IC::UpdatePolymorphicIC(State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Code> code) {
- if (code->type() == Code::NORMAL) return false;
- if (target()->ic_state() == MONOMORPHIC &&
- target()->type() == Code::NORMAL) {
- return false;
- }
- MapHandleList receiver_maps;
- CodeHandleList handlers;
- target()->FindAllMaps(&receiver_maps);
- int number_of_maps = receiver_maps.length();
- if (number_of_maps == 0 || number_of_maps >= 4) return false;
-
- target()->FindAllCode(&handlers, receiver_maps.length());
-
- if (!AddOneReceiverMapIfMissing(&receiver_maps,
- Handle<Map>(receiver->map()))) {
- return false;
- }
-
- handlers.Add(code);
- Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &receiver_maps, &handlers, name);
- set_target(*ic);
- return true;
-}
-
-
-void LoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<String> name) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
- receiver, handler, name);
- set_target(*ic);
-}
-
-
-void KeyedLoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<String> name) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedMonomorphicIC(
- receiver, handler, name);
- set_target(*ic);
-}
-
-
-void IC::PatchCache(State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Code> code) {
- switch (state) {
- case UNINITIALIZED:
- case PREMONOMORPHIC:
- case MONOMORPHIC_PROTOTYPE_FAILURE:
- UpdateMonomorphicIC(receiver, code, name);
- break;
- case MONOMORPHIC:
- // Only move to megamorphic if the target changes.
- if (target() != *code) {
- if (target()->is_load_stub()) {
- if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
- break;
- }
- }
- // We are transitioning from monomorphic to megamorphic case. Place the
- // stub compiled for the receiver into stub cache.
- Map* map = target()->FindFirstMap();
- if (map != NULL) {
- UpdateMegamorphicCache(map, *name, target());
- }
- UpdateMegamorphicCache(receiver->map(), *name, *code);
- set_target((strict_mode == kStrictMode)
- ? *megamorphic_stub_strict()
- : *megamorphic_stub());
- }
- break;
- case MEGAMORPHIC:
- // Update the stub cache.
- UpdateMegamorphicCache(receiver->map(), *name, *code);
- break;
- case POLYMORPHIC:
- if (target()->is_load_stub()) {
- if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
- break;
- }
- MapHandleList receiver_maps;
- CodeHandleList handlers;
- target()->FindAllMaps(&receiver_maps);
- target()->FindAllCode(&handlers, receiver_maps.length());
- for (int i = 0; i < receiver_maps.length(); i++) {
- UpdateMegamorphicCache(*receiver_maps.at(i), *name, *handlers.at(i));
- }
- UpdateMegamorphicCache(receiver->map(), *name, *code);
- set_target(*megamorphic_stub());
- } else {
- // When trying to patch a polymorphic keyed load/store element stub
- // with anything other than another polymorphic stub, go generic.
- set_target((strict_mode == kStrictMode)
- ? *generic_stub_strict()
- : *generic_stub());
- }
- break;
- case DEBUG_STUB:
- break;
- case GENERIC:
- UNREACHABLE();
- break;
- }
-}
-
-
-static void GetReceiverMapsForStub(Handle<Code> stub,
- MapHandleList* result) {
- ASSERT(stub->is_inline_cache_stub());
- switch (stub->ic_state()) {
- case MONOMORPHIC: {
- Map* map = stub->FindFirstMap();
- if (map != NULL) {
- result->Add(Handle<Map>(map));
- }
- break;
- }
- case POLYMORPHIC: {
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Handle<Object> object(info->target_object(), stub->GetIsolate());
- if (object->IsString()) break;
- ASSERT(object->IsMap());
- AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
- }
- break;
- }
- case MEGAMORPHIC:
- break;
- case UNINITIALIZED:
- case PREMONOMORPHIC:
- case MONOMORPHIC_PROTOTYPE_FAILURE:
- case GENERIC:
- case DEBUG_STUB:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LoadIC::UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name) {
- // Bail out if the result is not cacheable.
- if (!lookup->IsCacheable()) return;
-
- // Loading properties from values is not common, so don't try to
- // deal with non-JS objects here.
- if (!object->IsJSObject()) return;
-
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- Handle<Code> code;
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- code = pre_monomorphic_stub();
- } else {
- code = ComputeLoadHandler(lookup, receiver, name);
- if (code.is_null()) return;
- }
-
- PatchCache(state, kNonStrictMode, receiver, name, code);
- TRACE_IC("LoadIC", name, state, target());
-}
-
-
-void IC::UpdateMegamorphicCache(Map* map, String* name, Code* code) {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe.
- isolate()->stub_cache()->Set(name, map, code);
-}
-
-
-Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name) {
- if (!lookup->IsProperty()) {
- // Nonexistent property. The result is undefined.
- return isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
- }
-
- // Compute monomorphic stub.
- Handle<JSObject> holder(lookup->holder());
- switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
- case CONSTANT_FUNCTION: {
- Handle<JSFunction> constant(lookup->GetConstantFunction());
- return isolate()->stub_cache()->ComputeLoadConstant(
- name, receiver, holder, constant);
- }
- case NORMAL:
- if (holder->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
- return isolate()->stub_cache()->ComputeLoadGlobal(
- name, receiver, global, cell, lookup->IsDontDelete());
- }
- // There is only one shared stub for loading normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (!holder.is_identical_to(receiver)) break;
- return isolate()->stub_cache()->ComputeLoadNormal(name, receiver);
- case CALLBACKS: {
-#ifdef _WIN32_WCE
- // Disable optimization for wince as the calling convention looks different.
- return;
-#endif
- Handle<Object> callback(lookup->GetCallbackObject(), isolate());
- if (callback->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(callback);
- if (v8::ToCData<Address>(info->getter()) == 0) break;
- if (!info->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeLoadCallback(
- name, receiver, holder, info);
- } else if (callback->IsAccessorPair()) {
- Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(),
- isolate());
- if (!getter->IsJSFunction()) break;
- if (holder->IsGlobalObject()) break;
- if (!holder->HasFastProperties()) break;
- return isolate()->stub_cache()->ComputeLoadViaGetter(
- name, receiver, holder, Handle<JSFunction>::cast(getter));
- }
- // TODO(dcarney): Handle correctly.
- if (callback->IsDeclaredAccessorInfo()) break;
- ASSERT(callback->IsForeign());
- // No IC support for old-style native accessors.
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(*holder));
- return isolate()->stub_cache()->ComputeLoadInterceptor(
- name, receiver, holder);
- default:
- break;
- }
- return Handle<Code>::null();
-}
-
-
-static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
- // This helper implements a few common fast cases for converting
- // non-smi keys of keyed loads/stores to a smi or a string.
- if (key->IsHeapNumber()) {
- double value = Handle<HeapNumber>::cast(key)->value();
- if (isnan(value)) {
- key = isolate->factory()->nan_string();
- } else {
- int int_value = FastD2I(value);
- if (value == int_value && Smi::IsValid(int_value)) {
- key = Handle<Smi>(Smi::FromInt(int_value), isolate);
- }
- }
- } else if (key->IsUndefined()) {
- key = isolate->factory()->undefined_string();
- }
- return key;
-}
-
-
-Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
- State ic_state = target()->ic_state();
-
- // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
- // via megamorphic stubs, since they don't have a map in their relocation info
- // and so the stubs can't be harvested for the object needed for a map check.
- if (target()->type() != Code::NORMAL) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
- return generic_stub();
- }
-
- Handle<Map> receiver_map(receiver->map());
- MapHandleList target_receiver_maps;
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
- // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
- // yet will do so and stay there.
- return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
- }
-
- if (target() == *string_stub()) {
- target_receiver_maps.Add(isolate()->factory()->string_map());
- } else {
- GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
- if (target_receiver_maps.length() == 0) {
- return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
- }
- }
-
- // The first time a receiver is seen that is a transitioned version of the
- // previous monomorphic receiver type, assume the new ElementsKind is the
- // monomorphic type. This benefits global arrays that only transition
- // once, and all call sites accessing them are faster if they remain
- // monomorphic. If this optimistic assumption is not true, the IC will
- // miss again and it will become polymorphic and support both the
- // untransitioned and transitioned maps.
- if (ic_state == MONOMORPHIC &&
- IsMoreGeneralElementsKindTransition(
- target_receiver_maps.at(0)->elements_kind(),
- receiver->GetElementsKind())) {
- return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
- }
-
- ASSERT(ic_state != GENERIC);
-
- // Determine the list of receiver maps that this call site has seen,
- // adding the map that was just encountered.
- if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
- // If the miss wasn't due to an unseen map, a polymorphic stub
- // won't help, use the generic stub.
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
- return generic_stub();
- }
-
- // If the maximum number of receiver maps has been exceeded, use the generic
- // version of the IC.
- if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
- return generic_stub();
- }
-
- return isolate()->stub_cache()->ComputeLoadElementPolymorphic(
- &target_receiver_maps);
-}
-
-
-MaybeObject* KeyedLoadIC::Load(State state,
- Handle<Object> object,
- Handle<Object> key,
- ICMissMode miss_mode) {
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
- key = TryConvertKey(key, isolate());
-
- if (key->IsInternalizedString()) {
- return LoadIC::Load(state, object, Handle<String>::cast(key));
- }
-
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
-
- if (use_ic) {
- Handle<Code> stub = generic_stub();
- if (miss_mode != MISS_FORCE_GENERIC) {
- if (object->IsString() && key->IsNumber()) {
- if (state == UNINITIALIZED) {
- stub = string_stub();
- }
- } else if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
- } else if (receiver->HasIndexedInterceptor()) {
- stub = indexed_interceptor_stub();
- } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
- stub = LoadElementStub(receiver);
- }
- }
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic");
- }
- ASSERT(!stub.is_null());
- set_target(*stub);
- TRACE_IC("KeyedLoadIC", key, state, target());
- }
-
-
- return Runtime::GetObjectProperty(isolate(), object, key);
-}
-
-
-Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name) {
- // Bail out if we didn't find a result.
- if (!lookup->IsProperty()) return Handle<Code>::null();
-
- // Compute a monomorphic stub.
- Handle<JSObject> holder(lookup->holder());
- switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeKeyedLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
- case CONSTANT_FUNCTION: {
- Handle<JSFunction> constant(lookup->GetConstantFunction());
- return isolate()->stub_cache()->ComputeKeyedLoadConstant(
- name, receiver, holder, constant);
- }
- case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject(), isolate());
- // TODO(dcarney): Handle DeclaredAccessorInfo correctly.
- if (!callback_object->IsExecutableAccessorInfo()) break;
- Handle<ExecutableAccessorInfo> callback =
- Handle<ExecutableAccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->getter()) == 0) break;
- if (!callback->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeKeyedLoadCallback(
- name, receiver, holder, callback);
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(lookup->holder()));
- return isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
- name, receiver, holder);
- default:
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- return generic_stub();
- }
- return Handle<Code>::null();
-}
-
-
-static bool StoreICableLookup(LookupResult* lookup) {
- // Bail out if we didn't find a result.
- if (!lookup->IsFound()) return false;
-
- // Bail out if inline caching is not allowed.
- if (!lookup->IsCacheable()) return false;
-
- // If the property is read-only, we leave the IC in its current state.
- if (lookup->IsTransition()) {
- return !lookup->GetTransitionDetails().IsReadOnly();
- }
- return !lookup->IsReadOnly();
-}
-
-
-static bool LookupForWrite(Handle<JSObject> receiver,
- Handle<String> name,
- LookupResult* lookup) {
- receiver->LocalLookup(*name, lookup);
- if (!lookup->IsFound()) {
- receiver->map()->LookupTransition(*receiver, *name, lookup);
- }
- if (!StoreICableLookup(lookup)) {
- // 2nd chance: There can be accessors somewhere in the prototype chain.
- receiver->Lookup(*name, lookup);
- return lookup->IsPropertyCallbacks() && StoreICableLookup(lookup);
- }
-
- if (lookup->IsInterceptor() &&
- receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
- receiver->LocalLookupRealNamedProperty(*name, lookup);
- return StoreICableLookup(lookup);
- }
-
- return true;
-}
-
-
-MaybeObject* StoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<String> name,
- Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode) {
- // Handle proxies.
- if (object->IsJSProxy()) {
- return JSProxy::cast(*object)->
- SetProperty(*name, *value, NONE, strict_mode);
- }
-
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
-
- // The length property of string values is read-only. Throw in strict mode.
- if (strict_mode == kStrictMode && object->IsString() &&
- name->Equals(isolate()->heap()->length_string())) {
- return TypeError("strict_read_only_property", object, name);
- }
-
- // Ignore other stores where the receiver is not a JSObject.
- // TODO(1475): Must check prototype chains of object wrappers.
- if (!object->IsJSObject()) return *value;
-
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- // Check if the given name is an array index.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- Handle<Object> result =
- JSObject::SetElement(receiver, index, value, NONE, strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *value;
- }
-
- // Observed objects are always modified through the runtime.
- if (FLAG_harmony_observation && receiver->map()->is_observed()) {
- return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode);
- }
-
- // Use specialized code for setting the length of arrays with fast
- // properties. Slow properties might indicate redefinition of the length
- // property.
- if (FLAG_use_ic &&
- receiver->IsJSArray() &&
- name->Equals(isolate()->heap()->length_string()) &&
- Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
- receiver->HasFastProperties()) {
- Handle<Code> stub =
- StoreArrayLengthStub(kind(), strict_mode).GetCode(isolate());
- set_target(*stub);
- TRACE_IC("StoreIC", name, state, *stub);
- return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode);
- }
-
- if (receiver->IsJSGlobalProxy()) {
- if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) {
- // Generate a generic stub that goes to the runtime when we see a global
- // proxy as receiver.
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? global_proxy_stub_strict()
- : global_proxy_stub();
- set_target(*stub);
- TRACE_IC("StoreIC", name, state, *stub);
- }
- return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode);
- }
-
- LookupResult lookup(isolate());
- if (LookupForWrite(receiver, name, &lookup)) {
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
- }
- } else if (strict_mode == kStrictMode &&
- !(lookup.IsProperty() && lookup.IsReadOnly()) &&
- IsUndeclaredGlobal(object)) {
- // Strict mode doesn't allow setting non-existent global property.
- return ReferenceError("not_defined", name);
- }
-
- // Set the property.
- return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode);
-}
-
-
-void StoreIC::UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value) {
- ASSERT(!receiver->IsJSGlobalProxy());
- ASSERT(StoreICableLookup(lookup));
- ASSERT(lookup->IsFound());
-
- // These are not cacheable, so we never see such LookupResults here.
- ASSERT(!lookup->IsHandler());
-
- Handle<Code> code =
- ComputeStoreMonomorphic(lookup, strict_mode, receiver, name);
- if (code.is_null()) return;
-
- PatchCache(state, strict_mode, receiver, name, code);
- TRACE_IC("StoreIC", name, state, target());
-}
-
-
-Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name) {
- Handle<JSObject> holder(lookup->holder());
- switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeStoreField(
- name, receiver, lookup->GetFieldIndex().field_index(),
- Handle<Map>::null(), strict_mode);
- case NORMAL:
- if (receiver->IsGlobalObject()) {
- // The stub generated for the global object picks the value directly
- // from the property cell. So the property must be directly on the
- // global object.
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
- return isolate()->stub_cache()->ComputeStoreGlobal(
- name, global, cell, strict_mode);
- }
- if (!holder.is_identical_to(receiver)) break;
- return isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
- case CALLBACKS: {
- Handle<Object> callback(lookup->GetCallbackObject(), isolate());
- if (callback->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(callback);
- if (v8::ToCData<Address>(info->setter()) == 0) break;
- if (!holder->HasFastProperties()) break;
- if (!info->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeStoreCallback(
- name, receiver, holder, info, strict_mode);
- } else if (callback->IsAccessorPair()) {
- Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter(),
- isolate());
- if (!setter->IsJSFunction()) break;
- if (holder->IsGlobalObject()) break;
- if (!holder->HasFastProperties()) break;
- return isolate()->stub_cache()->ComputeStoreViaSetter(
- name, receiver, holder, Handle<JSFunction>::cast(setter),
- strict_mode);
- }
- // TODO(dcarney): Handle correctly.
- if (callback->IsDeclaredAccessorInfo()) break;
- ASSERT(callback->IsForeign());
- // No IC support for old-style native accessors.
- break;
- }
- case INTERCEPTOR:
- ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- return isolate()->stub_cache()->ComputeStoreInterceptor(
- name, receiver, strict_mode);
- case CONSTANT_FUNCTION:
- break;
- case TRANSITION: {
- Handle<Map> transition(lookup->GetTransitionTarget());
- int descriptor = transition->LastAdded();
-
- DescriptorArray* target_descriptors = transition->instance_descriptors();
- PropertyDetails details = target_descriptors->GetDetails(descriptor);
-
- if (details.type() != FIELD || details.attributes() != NONE) break;
-
- int field_index = target_descriptors->GetFieldIndex(descriptor);
- return isolate()->stub_cache()->ComputeStoreField(
- name, receiver, field_index, transition, strict_mode);
- }
- case NONEXISTENT:
- case HANDLER:
- UNREACHABLE();
- break;
- }
- return Handle<Code>::null();
-}
-
-
-Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
- StubKind stub_kind,
- StrictModeFlag strict_mode) {
- State ic_state = target()->ic_state();
- KeyedAccessGrowMode grow_mode = IsGrowStubKind(stub_kind)
- ? ALLOW_JSARRAY_GROWTH
- : DO_NOT_ALLOW_JSARRAY_GROWTH;
-
- // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
- // via megamorphic stubs, since they don't have a map in their relocation info
- // and so the stubs can't be harvested for the object needed for a map check.
- if (target()->type() != Code::NORMAL) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
- return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
- }
-
- Handle<Map> receiver_map(receiver->map());
- MapHandleList target_receiver_maps;
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
- // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
- // yet will do so and stay there.
- Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, stub_kind);
- stub_kind = GetNoTransitionStubKind(stub_kind);
- return isolate()->stub_cache()->ComputeKeyedStoreElement(
- monomorphic_map, stub_kind, strict_mode, grow_mode);
- }
-
- GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
- if (target_receiver_maps.length() == 0) {
- // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
- // yet will do so and stay there.
- stub_kind = GetNoTransitionStubKind(stub_kind);
- return isolate()->stub_cache()->ComputeKeyedStoreElement(
- receiver_map, stub_kind, strict_mode, grow_mode);
- }
- // The first time a receiver is seen that is a transitioned version of the
- // previous monomorphic receiver type, assume the new ElementsKind is the
- // monomorphic type. This benefits global arrays that only transition
- // once, and all call sites accessing them are faster if they remain
- // monomorphic. If this optimistic assumption is not true, the IC will
- // miss again and it will become polymorphic and support both the
- // untransitioned and transitioned maps.
- if (ic_state == MONOMORPHIC &&
- IsTransitionStubKind(stub_kind) &&
- IsMoreGeneralElementsKindTransition(
- target_receiver_maps.at(0)->elements_kind(),
- receiver->GetElementsKind())) {
- Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, stub_kind);
- ASSERT(*monomorphic_map != *receiver_map);
- stub_kind = GetNoTransitionStubKind(stub_kind);
- return isolate()->stub_cache()->ComputeKeyedStoreElement(
- monomorphic_map, stub_kind, strict_mode, grow_mode);
- }
-
- ASSERT(ic_state != GENERIC);
-
- bool map_added =
- AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
-
- if (IsTransitionStubKind(stub_kind)) {
- Handle<Map> new_map = ComputeTransitionedMap(receiver, stub_kind);
- map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map);
- }
-
- if (!map_added) {
- // If the miss wasn't due to an unseen map, a polymorphic stub
- // won't help, use the generic stub.
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
- return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
- }
-
- // If the maximum number of receiver maps has been exceeded, use the generic
- // version of the IC.
- if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
- return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
- }
-
- if ((Code::GetKeyedAccessGrowMode(target()->extra_ic_state()) ==
- ALLOW_JSARRAY_GROWTH)) {
- grow_mode = ALLOW_JSARRAY_GROWTH;
- }
-
- return isolate()->stub_cache()->ComputeStoreElementPolymorphic(
- &target_receiver_maps, grow_mode, strict_mode);
-}
-
-
-Handle<Map> KeyedStoreIC::ComputeTransitionedMap(Handle<JSObject> receiver,
- StubKind stub_kind) {
- switch (stub_kind) {
- case STORE_TRANSITION_SMI_TO_OBJECT:
- case STORE_TRANSITION_DOUBLE_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
- return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
- case STORE_TRANSITION_SMI_TO_DOUBLE:
- case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
- return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
- case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- return JSObject::GetElementsTransitionMap(receiver,
- FAST_HOLEY_ELEMENTS);
- case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- return JSObject::GetElementsTransitionMap(receiver,
- FAST_HOLEY_DOUBLE_ELEMENTS);
- case STORE_NO_TRANSITION:
- case STORE_AND_GROW_NO_TRANSITION:
- return Handle<Map>(receiver->map());
- }
- return Handle<Map>::null();
-}
-
-
-KeyedStoreIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
- Handle<Object> key,
- Handle<Object> value) {
- ASSERT(key->IsSmi());
- int index = Smi::cast(*key)->value();
- bool allow_growth = receiver->IsJSArray() &&
- JSArray::cast(*receiver)->length()->IsSmi() &&
- index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
-
- if (allow_growth) {
- // Handle growing array in stub if necessary.
- if (receiver->HasFastSmiElements()) {
- if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
- }
- }
- if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
- }
- }
- } else if (receiver->HasFastDoubleElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
- }
- }
- }
- return STORE_AND_GROW_NO_TRANSITION;
- } else {
- // Handle only in-bounds elements accesses.
- if (receiver->HasFastSmiElements()) {
- if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_TRANSITION_SMI_TO_DOUBLE;
- }
- } else if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_TRANSITION_SMI_TO_OBJECT;
- }
- }
- } else if (receiver->HasFastDoubleElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_TRANSITION_DOUBLE_TO_OBJECT;
- }
- }
- }
- return STORE_NO_TRANSITION;
- }
-}
-
-
-MaybeObject* KeyedStoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- ICMissMode miss_mode) {
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
- key = TryConvertKey(key, isolate());
-
- if (key->IsInternalizedString()) {
- return StoreIC::Store(state,
- strict_mode,
- object,
- Handle<String>::cast(key),
- value,
- JSReceiver::MAY_BE_STORE_FROM_KEYED);
- }
-
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
- !(FLAG_harmony_observation && object->IsJSObject() &&
- JSObject::cast(*object)->map()->is_observed());
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
-
- if (use_ic) {
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? generic_stub_strict()
- : generic_stub();
- if (miss_mode != MISS_FORCE_GENERIC) {
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
- } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
- StubKind stub_kind = GetStubKind(receiver, key, value);
- stub = StoreElementStub(receiver, stub_kind, strict_mode);
- }
- }
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "force generic");
- }
- ASSERT(!stub.is_null());
- set_target(*stub);
- TRACE_IC("KeyedStoreIC", key, state, target());
- }
-
- return Runtime::SetObjectProperty(
- isolate(), object , key, value, NONE, strict_mode);
-}
-
-
-Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name) {
- // If the property has a non-field type allowing map transitions
- // where there is extra room in the object, we leave the IC in its
- // current state.
- switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, lookup->GetFieldIndex().field_index(),
- Handle<Map>::null(), strict_mode);
- case TRANSITION: {
- Handle<Map> transition(lookup->GetTransitionTarget());
- int descriptor = transition->LastAdded();
-
- DescriptorArray* target_descriptors = transition->instance_descriptors();
- PropertyDetails details = target_descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD && details.attributes() == NONE) {
- int field_index = target_descriptors->GetFieldIndex(descriptor);
- return isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, field_index, transition, strict_mode);
- }
- // fall through.
- }
- case NORMAL:
- case CONSTANT_FUNCTION:
- case CALLBACKS:
- case INTERCEPTOR:
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- return (strict_mode == kStrictMode)
- ? generic_stub_strict()
- : generic_stub();
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- return Handle<Code>::null();
-}
-
-
-#undef TRACE_IC
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- MaybeObject* maybe_result = ic.LoadFunction(state,
- extra_ic_state,
- args.at<Object>(0),
- args.at<String>(1));
- JSFunction* raw_function;
- if (!maybe_result->To(&raw_function)) return maybe_result;
-
- // The first time the inline cache is updated may be the first time the
- // function it references gets called. If the function is lazily compiled
- // then the first call will trigger a compilation. We check for this case
- // and we do the compilation immediately, instead of waiting for the stub
- // currently attached to the JSFunction object to trigger compilation.
- if (raw_function->is_compiled()) return raw_function;
-
- Handle<JSFunction> function(raw_function);
- JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
- return *function;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- KeyedCallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- MaybeObject* maybe_result =
- ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
- // Result could be a function or a failure.
- JSFunction* raw_function = NULL;
- if (!maybe_result->To(&raw_function)) return maybe_result;
-
- if (raw_function->is_compiled()) return raw_function;
-
- Handle<JSFunction> function(raw_function);
- JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
- return *function;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<String>(1));
-}
-
-
-// Used from ic-<arch>.cc
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state,
- args.at<Object>(0),
- args.at<Object>(1),
- MISS_FORCE_GENERIC);
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- StoreIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<String>(1),
- args.at<Object>(2));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
- NoHandleAllocation nha(isolate);
-
- ASSERT(args.length() == 2);
- JSArray* receiver = JSArray::cast(args[0]);
- Object* len = args[1];
-
- // The generated code should filter out non-Smis before we get here.
- ASSERT(len->IsSmi());
-
-#ifdef DEBUG
- // The length property has to be a writable callback property.
- LookupResult debug_lookup(isolate);
- receiver->LocalLookup(isolate->heap()->length_string(), &debug_lookup);
- ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly());
-#endif
-
- Object* result;
- MaybeObject* maybe_result = receiver->SetElementsLength(len);
- if (!maybe_result->To(&result)) return maybe_result;
-
- return len;
-}
-
-
-// Extend storage is called in a store inline cache when
-// it is necessary to extend the properties array of a
-// JSObject.
-RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
- NoHandleAllocation na(isolate);
- ASSERT(args.length() == 3);
-
- // Convert the parameters
- JSObject* object = JSObject::cast(args[0]);
- Map* transition = Map::cast(args[1]);
- Object* value = args[2];
-
- // Check the object has run out out property space.
- ASSERT(object->HasFastProperties());
- ASSERT(object->map()->unused_property_fields() == 0);
-
- // Expand the properties array.
- FixedArray* old_storage = object->properties();
- int new_unused = transition->unused_property_fields();
- int new_size = old_storage->length() + new_unused + 1;
- Object* result;
- { MaybeObject* maybe_result = old_storage->CopySize(new_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* new_storage = FixedArray::cast(result);
- new_storage->set(old_storage->length(), value);
-
- // Set the new property value and do the map transition.
- object->set_properties(new_storage);
- object->set_map(transition);
-
- // Return the stored value.
- return value;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- KeyedStoreIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- MISS);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
- NoHandleAllocation na(isolate);
- ASSERT(args.length() == 3);
- KeyedStoreIC ic(isolate);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- NONE,
- strict_mode);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- KeyedStoreIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- MISS_FORCE_GENERIC);
-}
-
-
-void UnaryOpIC::patch(Code* code) {
- set_target(code);
-}
-
-
-const char* UnaryOpIC::GetName(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED: return "Uninitialized";
- case SMI: return "Smi";
- case NUMBER: return "Number";
- case GENERIC: return "Generic";
- default: return "Invalid";
- }
-}
-
-
-UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED:
- return ::v8::internal::UNINITIALIZED;
- case SMI:
- case NUMBER:
- return MONOMORPHIC;
- case GENERIC:
- return ::v8::internal::GENERIC;
- }
- UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
-}
-
-UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
- ::v8::internal::TypeInfo operand_type =
- ::v8::internal::TypeInfo::TypeFromValue(operand);
- if (operand_type.IsSmi()) {
- return SMI;
- } else if (operand_type.IsNumber()) {
- return NUMBER;
- } else {
- return GENERIC;
- }
-}
-
-
-UnaryOpIC::TypeInfo UnaryOpIC::ComputeNewType(
- TypeInfo current_type,
- TypeInfo previous_type) {
- switch (previous_type) {
- case UNINITIALIZED:
- return current_type;
- case SMI:
- return (current_type == GENERIC) ? GENERIC : NUMBER;
- case NUMBER:
- return GENERIC;
- case GENERIC:
- // We should never do patching if we are in GENERIC state.
- UNREACHABLE();
- return GENERIC;
- }
- UNREACHABLE();
- return GENERIC;
-}
-
-
-void BinaryOpIC::patch(Code* code) {
- set_target(code);
-}
-
-
-const char* BinaryOpIC::GetName(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED: return "Uninitialized";
- case SMI: return "Smi";
- case INT32: return "Int32";
- case NUMBER: return "Number";
- case ODDBALL: return "Oddball";
- case STRING: return "String";
- case GENERIC: return "Generic";
- default: return "Invalid";
- }
-}
-
-
-BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED:
- return ::v8::internal::UNINITIALIZED;
- case SMI:
- case INT32:
- case NUMBER:
- case ODDBALL:
- case STRING:
- return MONOMORPHIC;
- case GENERIC:
- return ::v8::internal::GENERIC;
- }
- UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
- ASSERT(args.length() == 4);
-
- HandleScope scope(isolate);
- Handle<Object> operand = args.at<Object>(0);
- Token::Value op = static_cast<Token::Value>(args.smi_at(1));
- UnaryOverwriteMode mode = static_cast<UnaryOverwriteMode>(args.smi_at(2));
- UnaryOpIC::TypeInfo previous_type =
- static_cast<UnaryOpIC::TypeInfo>(args.smi_at(3));
-
- UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
- type = UnaryOpIC::ComputeNewType(type, previous_type);
-
- UnaryOpStub stub(op, mode, type);
- Handle<Code> code = stub.GetCode(isolate);
- if (!code.is_null()) {
- if (FLAG_trace_ic) {
- PrintF("[UnaryOpIC in ");
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- PrintF(" (%s->%s)#%s @ %p]\n",
- UnaryOpIC::GetName(previous_type),
- UnaryOpIC::GetName(type),
- Token::Name(op),
- static_cast<void*>(*code));
- }
- UnaryOpIC ic(isolate);
- ic.patch(*code);
- }
-
- Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
- isolate->thread_local_top()->context_->builtins(), isolate);
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::UNARY_MINUS);
- break;
- case Token::BIT_NOT:
- builtin = builtins->javascript_builtin(Builtins::BIT_NOT);
- break;
- default:
- UNREACHABLE();
- }
-
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
-
- bool caught_exception;
- Handle<Object> result = Execution::Call(builtin_function, operand, 0, NULL,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
-}
-
-
-static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
- Token::Value op) {
- ::v8::internal::TypeInfo type =
- ::v8::internal::TypeInfo::TypeFromValue(value);
- if (type.IsSmi()) return BinaryOpIC::SMI;
- if (type.IsInteger32()) {
- if (kSmiValueSize == 32) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- if (type.IsNumber()) return BinaryOpIC::NUMBER;
- if (type.IsString()) return BinaryOpIC::STRING;
- if (value->IsUndefined()) {
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- if (kSmiValueSize == 32) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- return BinaryOpIC::ODDBALL;
- }
- return BinaryOpIC::GENERIC;
-}
-
-
-static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
- Handle<Object> value,
- Token::Value op) {
- BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
- if (old_type == BinaryOpIC::STRING) {
- if (new_type == BinaryOpIC::STRING) return new_type;
- return BinaryOpIC::GENERIC;
- }
- return Max(old_type, new_type);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 3);
-
- HandleScope scope(isolate);
- Handle<Object> left = args.at<Object>(0);
- Handle<Object> right = args.at<Object>(1);
- int key = args.smi_at(2);
- Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
- BinaryOpIC::TypeInfo previous_left, previous_right, unused_previous_result;
- BinaryOpStub::decode_types_from_minor_key(
- key, &previous_left, &previous_right, &unused_previous_result);
-
- BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
- BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
-
- // STRING is only used for ADD operations.
- if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
- op != Token::ADD) {
- new_left = new_right = BinaryOpIC::GENERIC;
- }
-
- BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
- BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
-
- if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
- if (op == Token::DIV ||
- op == Token::MUL ||
- op == Token::SHR ||
- kSmiValueSize == 32) {
- // Arithmetic on two Smi inputs has yielded a heap number.
- // That is the only way to get here from the Smi stub.
- // With 32-bit Smis, all overflows give heap numbers, but with
- // 31-bit Smis, most operations overflow to int32 results.
- result_type = BinaryOpIC::NUMBER;
- } else {
- // Other operations on SMIs that overflow yield int32s.
- result_type = BinaryOpIC::INT32;
- }
- }
- if (new_overall == BinaryOpIC::INT32 &&
- previous_overall == BinaryOpIC::INT32) {
- if (new_left == previous_left && new_right == previous_right) {
- result_type = BinaryOpIC::NUMBER;
- }
- }
-
- BinaryOpStub stub(key, new_left, new_right, result_type);
- Handle<Code> code = stub.GetCode(isolate);
- if (!code.is_null()) {
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC in ");
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- PrintF(" ((%s+%s)->((%s+%s)->%s))#%s @ %p]\n",
- BinaryOpIC::GetName(previous_left),
- BinaryOpIC::GetName(previous_right),
- BinaryOpIC::GetName(new_left),
- BinaryOpIC::GetName(new_right),
- BinaryOpIC::GetName(result_type),
- Token::Name(op),
- static_cast<void*>(*code));
- }
-#endif
- BinaryOpIC ic(isolate);
- ic.patch(*code);
-
- // Activate inlined smi code.
- if (previous_overall == BinaryOpIC::UNINITIALIZED) {
- PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
- }
- }
-
- Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
- isolate->thread_local_top()->context_->builtins(), isolate);
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::ADD:
- builtin = builtins->javascript_builtin(Builtins::ADD);
- break;
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::SUB);
- break;
- case Token::MUL:
- builtin = builtins->javascript_builtin(Builtins::MUL);
- break;
- case Token::DIV:
- builtin = builtins->javascript_builtin(Builtins::DIV);
- break;
- case Token::MOD:
- builtin = builtins->javascript_builtin(Builtins::MOD);
- break;
- case Token::BIT_AND:
- builtin = builtins->javascript_builtin(Builtins::BIT_AND);
- break;
- case Token::BIT_OR:
- builtin = builtins->javascript_builtin(Builtins::BIT_OR);
- break;
- case Token::BIT_XOR:
- builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
- break;
- case Token::SHR:
- builtin = builtins->javascript_builtin(Builtins::SHR);
- break;
- case Token::SAR:
- builtin = builtins->javascript_builtin(Builtins::SAR);
- break;
- case Token::SHL:
- builtin = builtins->javascript_builtin(Builtins::SHL);
- break;
- default:
- UNREACHABLE();
- }
-
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
-
- bool caught_exception;
- Handle<Object> builtin_args[] = { right };
- Handle<Object> result = Execution::Call(builtin_function,
- left,
- ARRAY_SIZE(builtin_args),
- builtin_args,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
-}
-
-
-Code* CompareIC::GetRawUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
- Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code, Isolate::Current()));
- return code;
-}
-
-
-Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
- return stub.GetCode(isolate);
-}
-
-
-const char* CompareIC::GetStateName(State state) {
- switch (state) {
- case UNINITIALIZED: return "UNINITIALIZED";
- case SMI: return "SMI";
- case NUMBER: return "NUMBER";
- case INTERNALIZED_STRING: return "INTERNALIZED_STRING";
- case STRING: return "STRING";
- case UNIQUE_NAME: return "UNIQUE_NAME";
- case OBJECT: return "OBJECT";
- case KNOWN_OBJECT: return "KNOWN_OBJECT";
- case GENERIC: return "GENERIC";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-static CompareIC::State InputState(CompareIC::State old_state,
- Handle<Object> value) {
- switch (old_state) {
- case CompareIC::UNINITIALIZED:
- if (value->IsSmi()) return CompareIC::SMI;
- if (value->IsHeapNumber()) return CompareIC::NUMBER;
- if (value->IsInternalizedString()) return CompareIC::INTERNALIZED_STRING;
- if (value->IsString()) return CompareIC::STRING;
- if (value->IsSymbol()) return CompareIC::UNIQUE_NAME;
- if (value->IsJSObject()) return CompareIC::OBJECT;
- break;
- case CompareIC::SMI:
- if (value->IsSmi()) return CompareIC::SMI;
- if (value->IsHeapNumber()) return CompareIC::NUMBER;
- break;
- case CompareIC::NUMBER:
- if (value->IsNumber()) return CompareIC::NUMBER;
- break;
- case CompareIC::INTERNALIZED_STRING:
- if (value->IsInternalizedString()) return CompareIC::INTERNALIZED_STRING;
- if (value->IsString()) return CompareIC::STRING;
- if (value->IsSymbol()) return CompareIC::UNIQUE_NAME;
- break;
- case CompareIC::STRING:
- if (value->IsString()) return CompareIC::STRING;
- break;
- case CompareIC::UNIQUE_NAME:
- if (value->IsUniqueName()) return CompareIC::UNIQUE_NAME;
- break;
- case CompareIC::OBJECT:
- if (value->IsJSObject()) return CompareIC::OBJECT;
- break;
- case CompareIC::GENERIC:
- break;
- case CompareIC::KNOWN_OBJECT:
- UNREACHABLE();
- break;
- }
- return CompareIC::GENERIC;
-}
-
-
-CompareIC::State CompareIC::TargetState(State old_state,
- State old_left,
- State old_right,
- bool has_inlined_smi_code,
- Handle<Object> x,
- Handle<Object> y) {
- switch (old_state) {
- case UNINITIALIZED:
- if (x->IsSmi() && y->IsSmi()) return SMI;
- if (x->IsNumber() && y->IsNumber()) return NUMBER;
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- // Ordered comparisons treat undefined as NaN, so the
- // NUMBER stub will do the right thing.
- if ((x->IsNumber() && y->IsUndefined()) ||
- (y->IsNumber() && x->IsUndefined())) {
- return NUMBER;
- }
- }
- if (x->IsInternalizedString() && y->IsInternalizedString()) {
- // We compare internalized strings as plain ones if we need to determine
- // the order in a non-equality compare.
- return Token::IsEqualityOp(op_) ? INTERNALIZED_STRING : STRING;
- }
- if (x->IsString() && y->IsString()) return STRING;
- if (!Token::IsEqualityOp(op_)) return GENERIC;
- if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
- if (x->IsJSObject() && y->IsJSObject()) {
- if (Handle<JSObject>::cast(x)->map() ==
- Handle<JSObject>::cast(y)->map()) {
- return KNOWN_OBJECT;
- } else {
- return OBJECT;
- }
- }
- return GENERIC;
- case SMI:
- return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
- case INTERNALIZED_STRING:
- ASSERT(Token::IsEqualityOp(op_));
- if (x->IsString() && y->IsString()) return STRING;
- if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
- return GENERIC;
- case NUMBER:
- // If the failure was due to one side changing from smi to heap number,
- // then keep the state (if other changed at the same time, we will get
- // a second miss and then go to generic).
- if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
- if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
- return GENERIC;
- case KNOWN_OBJECT:
- ASSERT(Token::IsEqualityOp(op_));
- if (x->IsJSObject() && y->IsJSObject()) return OBJECT;
- return GENERIC;
- case STRING:
- case UNIQUE_NAME:
- case OBJECT:
- case GENERIC:
- return GENERIC;
- }
- UNREACHABLE();
- return GENERIC; // Make the compiler happy.
-}
-
-
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope(isolate());
- State previous_left, previous_right, previous_state;
- ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
- &previous_right, &previous_state, NULL);
- State new_left = InputState(previous_left, x);
- State new_right = InputState(previous_right, y);
- State state = TargetState(previous_state, previous_left, previous_right,
- HasInlinedSmiCode(address()), x, y);
- ICCompareStub stub(op_, new_left, new_right, state);
- if (state == KNOWN_OBJECT) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- set_target(*stub.GetCode(isolate()));
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC in ");
- JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
- PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
- GetStateName(previous_left),
- GetStateName(previous_right),
- GetStateName(previous_state),
- GetStateName(new_left),
- GetStateName(new_right),
- GetStateName(state),
- Token::Name(op_),
- static_cast<void*>(*stub.GetCode(isolate())));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
-
-// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
-RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
- NoHandleAllocation na(isolate);
- ASSERT(args.length() == 3);
- CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
- ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
- return ic.target();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, ToBoolean_Patch) {
- ASSERT(args.length() == 3);
-
- HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- Register tos = Register::from_code(args.smi_at(1));
- ToBooleanStub::Types old_types(args.smi_at(2));
-
- ToBooleanStub::Types new_types(old_types);
- bool to_boolean_value = new_types.Record(object);
- old_types.TraceTransition(new_types);
-
- ToBooleanStub stub(tos, new_types);
- Handle<Code> code = stub.GetCode(isolate);
- ToBooleanIC ic(isolate);
- ic.patch(*code);
- return Smi::FromInt(to_boolean_value ? 1 : 0);
-}
-
-
-void ToBooleanIC::patch(Code* code) {
- set_target(code);
-}
-
-
-static const Address IC_utilities[] = {
-#define ADDR(name) FUNCTION_ADDR(name),
- IC_UTIL_LIST(ADDR)
- NULL
-#undef ADDR
-};
-
-
-Address IC::AddressFromUtilityId(IC::UtilityId id) {
- return IC_utilities[id];
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/ic.h b/src/3rdparty/v8/src/ic.h
deleted file mode 100644
index cdbdbbd..0000000
--- a/src/3rdparty/v8/src/ic.h
+++ /dev/null
@@ -1,850 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IC_H_
-#define V8_IC_H_
-
-#include "macro-assembler.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-
-// IC_UTIL_LIST defines all utility functions called from generated
-// inline caching code. The argument for the macro, ICU, is the function name.
-#define IC_UTIL_LIST(ICU) \
- ICU(LoadIC_Miss) \
- ICU(KeyedLoadIC_Miss) \
- ICU(KeyedLoadIC_MissForceGeneric) \
- ICU(CallIC_Miss) \
- ICU(KeyedCallIC_Miss) \
- ICU(StoreIC_Miss) \
- ICU(StoreIC_ArrayLength) \
- ICU(SharedStoreIC_ExtendStorage) \
- ICU(KeyedStoreIC_Miss) \
- ICU(KeyedStoreIC_MissForceGeneric) \
- ICU(KeyedStoreIC_Slow) \
- /* Utilities for IC stubs. */ \
- ICU(StoreCallbackProperty) \
- ICU(LoadPropertyWithInterceptorOnly) \
- ICU(LoadPropertyWithInterceptorForLoad) \
- ICU(LoadPropertyWithInterceptorForCall) \
- ICU(KeyedLoadPropertyWithInterceptor) \
- ICU(StoreInterceptorProperty) \
- ICU(UnaryOp_Patch) \
- ICU(BinaryOp_Patch) \
- ICU(CompareIC_Miss) \
- ICU(ToBoolean_Patch)
-//
-// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
-// and KeyedStoreIC.
-//
-class IC {
- public:
- // The ids for utility called from the generated code.
- enum UtilityId {
- #define CONST_NAME(name) k##name,
- IC_UTIL_LIST(CONST_NAME)
- #undef CONST_NAME
- kUtilityCount
- };
-
- // Looks up the address of the named utility.
- static Address AddressFromUtilityId(UtilityId id);
-
- // Alias the inline cache state type to make the IC code more readable.
- typedef InlineCacheState State;
-
- // The IC code is either invoked with no extra frames on the stack
- // or with a single extra frame for supporting calls.
- enum FrameDepth {
- NO_EXTRA_FRAME = 0,
- EXTRA_CALL_FRAME = 1
- };
-
- // Construct the IC structure with the given number of extra
- // JavaScript frames on the stack.
- IC(FrameDepth depth, Isolate* isolate);
- virtual ~IC() {}
-
- // Get the call-site target; used for determining the state.
- Code* target() const { return GetTargetAtAddress(address()); }
- inline Address address() const;
-
- // Compute the current IC state based on the target stub, receiver and name.
- static State StateFrom(Code* target, Object* receiver, Object* name);
-
- // Clear the inline cache to initial state.
- static void Clear(Address address);
-
- // Computes the reloc info for this IC. This is a fairly expensive
- // operation as it has to search through the heap to find the code
- // object that contains this IC site.
- RelocInfo::Mode ComputeMode();
-
- bool IsQmlGlobal(Handle<Object> receiver) {
- JSObject* qml_global = isolate_->context()->qml_global_object();
- return !qml_global->IsUndefined() && qml_global == *receiver;
- }
-
- // Returns if this IC is for contextual (no explicit receiver)
- // access to properties.
- bool IsUndeclaredGlobal(Handle<Object> receiver) {
- if (receiver->IsGlobalObject() ||
- IsQmlGlobal(receiver)) {
- return SlowIsUndeclaredGlobal();
- } else {
- ASSERT(!SlowIsUndeclaredGlobal());
- return false;
- }
- }
-
- bool SlowIsUndeclaredGlobal() {
- return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
- }
-
- // Determines which map must be used for keeping the code stub.
- // These methods should not be called with undefined or null.
- static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
- JSObject* holder);
- static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
- JSObject* holder);
- static inline JSObject* GetCodeCacheHolder(Isolate* isolate,
- Object* object,
- InlineCacheHolderFlag holder);
-
- protected:
- Address fp() const { return fp_; }
- Address pc() const { return *pc_address_; }
- Isolate* isolate() const { return isolate_; }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Computes the address in the original code when the code running is
- // containing break points (calls to DebugBreakXXX builtins).
- Address OriginalCodeAddress() const;
-#endif
-
- // Set the call-site target.
- void set_target(Code* code) { SetTargetAtAddress(address(), code); }
-
-#ifdef DEBUG
- char TransitionMarkFromState(IC::State state);
-
- void TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target);
-#endif
-
- Failure* TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key);
- Failure* ReferenceError(const char* type, Handle<String> name);
-
- // Access the target code for the given IC address.
- static inline Code* GetTargetAtAddress(Address address);
- static inline void SetTargetAtAddress(Address address, Code* target);
- static void PostPatching(Address address, Code* target, Code* old_target);
-
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<String> name) {
- set_target(*handler);
- }
- bool UpdatePolymorphicIC(State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Code> code);
- void PatchCache(State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Code> code);
- virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code);
- virtual Handle<Code> megamorphic_stub() {
- UNREACHABLE();
- return Handle<Code>::null();
- }
- virtual Handle<Code> megamorphic_stub_strict() {
- UNREACHABLE();
- return Handle<Code>::null();
- }
- virtual Handle<Code> generic_stub() const {
- UNREACHABLE();
- return Handle<Code>::null();
- }
- virtual Handle<Code> generic_stub_strict() const {
- UNREACHABLE();
- return Handle<Code>::null();
- }
-
- private:
- // Frame pointer for the frame that uses (calls) the IC.
- Address fp_;
-
- // All access to the program counter of an IC structure is indirect
- // to make the code GC safe. This feature is crucial since
- // GetProperty and SetProperty are called and they in turn might
- // invoke the garbage collector.
- Address* pc_address_;
-
- Isolate* isolate_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
-};
-
-
-// An IC_Utility encapsulates IC::UtilityId. It exists mainly because you
-// cannot make forward declarations to an enum.
-class IC_Utility {
- public:
- explicit IC_Utility(IC::UtilityId id)
- : address_(IC::AddressFromUtilityId(id)), id_(id) {}
-
- Address address() const { return address_; }
-
- IC::UtilityId id() const { return id_; }
- private:
- Address address_;
- IC::UtilityId id_;
-};
-
-
-class CallICBase: public IC {
- public:
- class Contextual: public BitField<bool, 0, 1> {};
- class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
-
- // Returns a JSFunction or a Failure.
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name);
-
- protected:
- CallICBase(Code::Kind kind, Isolate* isolate)
- : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
-
- bool TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state);
-
- // Compute a monomorphic stub if possible, otherwise return a null handle.
- Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_state,
- Handle<Object> object,
- Handle<String> name);
-
- // Update the inline cache and the global stub cache based on the lookup
- // result.
- void UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name);
-
- // Returns a JSFunction if the object can be called as a function, and
- // patches the stack to be ready for the call. Otherwise, it returns the
- // undefined value.
- Handle<Object> TryCallAsFunction(Handle<Object> object);
-
- void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
-
- static void Clear(Address address, Code* target);
-
- // Platform-specific code generation functions used by both call and
- // keyed call.
- static void GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state);
-
- static void GenerateNormal(MacroAssembler* masm, int argc);
-
- static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state);
-
- Code::Kind kind_;
-
- friend class IC;
-};
-
-
-class CallIC: public CallICBase {
- public:
- explicit CallIC(Isolate* isolate) : CallICBase(Code::CALL_IC, isolate) {
- ASSERT(target()->is_call_stub());
- }
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_state) {
- GenerateMiss(masm, argc, extra_state);
- }
-
- static void GenerateMiss(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_state) {
- CallICBase::GenerateMiss(masm, argc, IC::kCallIC_Miss, extra_state);
- }
-
- static void GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state);
-
- static void GenerateNormal(MacroAssembler* masm, int argc) {
- CallICBase::GenerateNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
- }
-};
-
-
-class KeyedCallIC: public CallICBase {
- public:
- explicit KeyedCallIC(Isolate* isolate)
- : CallICBase(Code::KEYED_CALL_IC, isolate) {
- ASSERT(target()->is_keyed_call_stub());
- }
-
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Handle<Object> object,
- Handle<Object> key);
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm, int argc) {
- GenerateMiss(masm, argc);
- }
-
- static void GenerateMiss(MacroAssembler* masm, int argc) {
- CallICBase::GenerateMiss(masm, argc, IC::kKeyedCallIC_Miss,
- Code::kNoExtraICState);
- }
-
- static void GenerateMegamorphic(MacroAssembler* masm, int argc);
- static void GenerateNormal(MacroAssembler* masm, int argc);
- static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
-};
-
-
-class LoadIC: public IC {
- public:
- explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
- ASSERT(target()->is_load_stub() || target()->is_keyed_load_stub());
- }
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
- static void GenerateNormal(MacroAssembler* masm);
-
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
- Handle<String> name);
-
- protected:
- virtual Code::Kind kind() const { return Code::LOAD_IC; }
-
- virtual Handle<Code> generic_stub() const {
- UNREACHABLE();
- return Handle<Code>::null();
- }
-
- virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->LoadIC_Megamorphic();
- }
-
- // Update the inline cache and the global stub cache based on the
- // lookup result.
- void UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name);
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<String> name);
- virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name);
-
- private:
- // Stub accessors.
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->LoadIC_Initialize();
- }
- virtual Handle<Code> pre_monomorphic_stub() {
- return isolate()->builtins()->LoadIC_PreMonomorphic();
- }
-
- static void Clear(Address address, Code* target);
-
- friend class IC;
-};
-
-
-enum ICMissMode {
- MISS_FORCE_GENERIC,
- MISS
-};
-
-
-class KeyedLoadIC: public LoadIC {
- public:
- explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
- : LoadIC(depth, isolate) {
- ASSERT(target()->is_keyed_load_stub());
- }
-
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
- Handle<Object> key,
- ICMissMode force_generic);
-
- // Code generator routines.
- static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
- }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
- }
- static void GenerateGeneric(MacroAssembler* masm);
- static void GenerateString(MacroAssembler* masm);
- static void GenerateIndexedInterceptor(MacroAssembler* masm);
- static void GenerateNonStrictArguments(MacroAssembler* masm);
-
- // Bit mask to be tested against bit field for the cases when
- // generic stub should go into slow case.
- // Access check is necessary explicitly since generic stub does not perform
- // map checks.
- static const int kSlowCaseBitFieldMask =
- (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
-
- protected:
- virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
-
- Handle<Code> LoadElementStub(Handle<JSObject> receiver);
-
- virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->KeyedLoadIC_Generic();
- }
- virtual Handle<Code> generic_stub() const {
- return isolate()->builtins()->KeyedLoadIC_Generic();
- }
-
- // Update the inline cache.
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<String> name);
- virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name);
- virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
-
- private:
- // Stub accessors.
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->KeyedLoadIC_Initialize();
- }
- virtual Handle<Code> pre_monomorphic_stub() {
- return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
- }
- Handle<Code> indexed_interceptor_stub() {
- return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
- }
- Handle<Code> non_strict_arguments_stub() {
- return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
- }
- Handle<Code> string_stub() {
- return isolate()->builtins()->KeyedLoadIC_String();
- }
-
- static void Clear(Address address, Code* target);
-
- friend class IC;
-};
-
-
-class StoreIC: public IC {
- public:
- explicit StoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
- ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub());
- }
-
- // Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode);
- static void GenerateNormal(MacroAssembler* masm);
- static void GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* Store(
- State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<String> name,
- Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode =
- JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
-
- protected:
- virtual Code::Kind kind() const { return Code::STORE_IC; }
- virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->StoreIC_Megamorphic();
- }
- // Stub accessors.
- virtual Handle<Code> megamorphic_stub_strict() {
- return isolate()->builtins()->StoreIC_Megamorphic_Strict();
- }
- virtual Handle<Code> global_proxy_stub() {
- return isolate()->builtins()->StoreIC_GlobalProxy();
- }
- virtual Handle<Code> global_proxy_stub_strict() {
- return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
- }
-
-
- // Update the inline cache and the global stub cache based on the
- // lookup result.
- void UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value);
- // Compute the code stub for this store; used for rewriting to
- // monomorphic state and making sure that the code stub is in the
- // stub cache.
- virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name);
-
- private:
- void set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
- Code::GetStrictMode(target()->extra_ic_state()));
- IC::set_target(code);
- }
-
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->StoreIC_Initialize();
- }
- static Handle<Code> initialize_stub_strict() {
- return Isolate::Current()->builtins()->StoreIC_Initialize_Strict();
- }
- static void Clear(Address address, Code* target);
-
- friend class IC;
-};
-
-
-enum KeyedStoreCheckMap {
- kDontCheckMap,
- kCheckMap
-};
-
-
-enum KeyedStoreIncrementLength {
- kDontIncrementLength,
- kIncrementLength
-};
-
-
-class KeyedStoreIC: public StoreIC {
- public:
- enum StubKind {
- STORE_NO_TRANSITION,
- STORE_TRANSITION_SMI_TO_OBJECT,
- STORE_TRANSITION_SMI_TO_DOUBLE,
- STORE_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
- STORE_AND_GROW_NO_TRANSITION,
- STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT
- };
-
- static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
- STORE_NO_TRANSITION;
- STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
- STORE_TRANSITION_SMI_TO_OBJECT);
- STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
- STORE_TRANSITION_SMI_TO_DOUBLE);
- STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
- STORE_TRANSITION_DOUBLE_TO_OBJECT);
-
- static inline StubKind GetGrowStubKind(StubKind stub_kind) {
- if (stub_kind < STORE_AND_GROW_NO_TRANSITION) {
- stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) +
- kGrowICDelta);
- }
- return stub_kind;
- }
-
- explicit KeyedStoreIC(Isolate* isolate) : StoreIC(isolate) {
- ASSERT(target()->is_keyed_store_stub());
- }
-
- MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<Object> name,
- Handle<Object> value,
- ICMissMode force_generic);
-
- // Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
- }
- static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
- static void GenerateSlow(MacroAssembler* masm);
- static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode);
- static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
- static void GenerateNonStrictArguments(MacroAssembler* masm);
- static void GenerateTransitionElementsSmiToDouble(MacroAssembler* masm);
- static void GenerateTransitionElementsDoubleToObject(MacroAssembler* masm);
-
- protected:
- virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
-
- virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name);
- virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
-
- virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->KeyedStoreIC_Generic();
- }
- virtual Handle<Code> megamorphic_stub_strict() {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
- }
-
- Handle<Code> StoreElementStub(Handle<JSObject> receiver,
- StubKind stub_kind,
- StrictModeFlag strict_mode);
-
- private:
- void set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
- Code::GetStrictMode(target()->extra_ic_state()));
- IC::set_target(code);
- }
-
- // Stub accessors.
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->KeyedStoreIC_Initialize();
- }
- static Handle<Code> initialize_stub_strict() {
- return Isolate::Current()->builtins()->KeyedStoreIC_Initialize_Strict();
- }
- Handle<Code> generic_stub() const {
- return isolate()->builtins()->KeyedStoreIC_Generic();
- }
- Handle<Code> generic_stub_strict() const {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
- }
- Handle<Code> non_strict_arguments_stub() {
- return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
- }
-
- static void Clear(Address address, Code* target);
-
- StubKind GetStubKind(Handle<JSObject> receiver,
- Handle<Object> key,
- Handle<Object> value);
-
- static bool IsTransitionStubKind(StubKind stub_kind) {
- return stub_kind > STORE_NO_TRANSITION &&
- stub_kind != STORE_AND_GROW_NO_TRANSITION;
- }
-
- static bool IsGrowStubKind(StubKind stub_kind) {
- return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
- }
-
- static StubKind GetNoTransitionStubKind(StubKind stub_kind) {
- if (!IsTransitionStubKind(stub_kind)) return stub_kind;
- if (IsGrowStubKind(stub_kind)) return STORE_AND_GROW_NO_TRANSITION;
- return STORE_NO_TRANSITION;
- }
-
- Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
- StubKind stub_kind);
-
- friend class IC;
-};
-
-
-class UnaryOpIC: public IC {
- public:
- // sorted: increasingly more unspecific (ignoring UNINITIALIZED)
- // TODO(svenpanne) Using enums+switch is an antipattern, use a class instead.
- enum TypeInfo {
- UNINITIALIZED,
- SMI,
- NUMBER,
- GENERIC
- };
-
- explicit UnaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
- void patch(Code* code);
-
- static const char* GetName(TypeInfo type_info);
-
- static State ToState(TypeInfo type_info);
-
- static TypeInfo GetTypeInfo(Handle<Object> operand);
-
- static TypeInfo ComputeNewType(TypeInfo type, TypeInfo previous);
-};
-
-
-// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class BinaryOpIC: public IC {
- public:
- enum TypeInfo {
- UNINITIALIZED,
- SMI,
- INT32,
- NUMBER,
- ODDBALL,
- STRING, // Only used for addition operation.
- GENERIC
- };
-
- explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
- void patch(Code* code);
-
- static const char* GetName(TypeInfo type_info);
-
- static State ToState(TypeInfo type_info);
-};
-
-
-class CompareIC: public IC {
- public:
- // The type/state lattice is defined by the following inequations:
- // UNINITIALIZED < ...
- // ... < GENERIC
- // SMI < NUMBER
- // INTERNALIZED_STRING < STRING
- // KNOWN_OBJECT < OBJECT
- enum State {
- UNINITIALIZED,
- SMI,
- NUMBER,
- STRING,
- INTERNALIZED_STRING,
- UNIQUE_NAME, // Symbol or InternalizedString
- OBJECT, // JSObject
- KNOWN_OBJECT, // JSObject with specific map (faster check)
- GENERIC
- };
-
- CompareIC(Isolate* isolate, Token::Value op)
- : IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
-
- // Update the inline cache for the given operands.
- void UpdateCaches(Handle<Object> x, Handle<Object> y);
-
-
- // Factory method for getting an uninitialized compare stub.
- static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
-
- // Helper function for computing the condition for a compare operation.
- static Condition ComputeCondition(Token::Value op);
-
- static const char* GetStateName(State state);
-
- private:
- static bool HasInlinedSmiCode(Address address);
-
- State TargetState(State old_state,
- State old_left,
- State old_right,
- bool has_inlined_smi_code,
- Handle<Object> x,
- Handle<Object> y);
-
- bool strict() const { return op_ == Token::EQ_STRICT; }
- Condition GetCondition() const { return ComputeCondition(op_); }
-
- static Code* GetRawUninitialized(Token::Value op);
-
- static void Clear(Address address, Code* target);
-
- Token::Value op_;
-
- friend class IC;
-};
-
-
-class ToBooleanIC: public IC {
- public:
- explicit ToBooleanIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
- void patch(Code* code);
-};
-
-
-// Helper for BinaryOpIC and CompareIC.
-enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
-
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure);
-
-} } // namespace v8::internal
-
-#endif // V8_IC_H_
diff --git a/src/3rdparty/v8/src/incremental-marking-inl.h b/src/3rdparty/v8/src/incremental-marking-inl.h
deleted file mode 100644
index 1c30383..0000000
--- a/src/3rdparty/v8/src/incremental-marking-inl.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_INCREMENTAL_MARKING_INL_H_
-#define V8_INCREMENTAL_MARKING_INL_H_
-
-#include "incremental-marking.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
- Object** slot,
- Object* value) {
- HeapObject* value_heap_obj = HeapObject::cast(value);
- MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- if (chunk->IsLeftOfProgressBar(slot)) {
- WhiteToGreyAndPush(value_heap_obj, value_bit);
- RestartIfNotMarking();
- } else {
- return false;
- }
- } else {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- return false;
- }
- } else {
- return false;
- }
- }
- if (!is_compacting_) return false;
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- return Marking::IsBlack(obj_bit);
-}
-
-
-void IncrementalMarking::RecordWrite(HeapObject* obj,
- Object** slot,
- Object* value) {
- if (IsMarking() && value->NonFailureIsHeapObject()) {
- RecordWriteSlow(obj, slot, value);
- }
-}
-
-
-void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
- Object** slot,
- Code* value) {
- if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
-}
-
-
-void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
- RelocInfo* rinfo,
- Object* value) {
- if (IsMarking() && value->NonFailureIsHeapObject()) {
- RecordWriteIntoCodeSlow(obj, rinfo, value);
- }
-}
-
-
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
- if (IsMarking()) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- chunk->set_progress_bar(0);
- }
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
- }
-}
-
-
-void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
- MarkBit mark_bit) {
- ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
- ASSERT(obj->Size() >= 2*kPointerSize);
- ASSERT(IsMarking());
- Marking::BlackToGrey(mark_bit);
- int obj_size = obj->Size();
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
- bytes_scanned_ -= obj_size;
- int64_t old_bytes_rescanned = bytes_rescanned_;
- bytes_rescanned_ = old_bytes_rescanned + obj_size;
- if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
- if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
- // If we have queued twice the heap size for rescanning then we are
- // going around in circles, scanning the same objects again and again
- // as the program mutates the heap faster than we can incrementally
- // trace it. In this case we switch to non-incremental marking in
- // order to finish off this marking phase.
- if (FLAG_trace_gc) {
- PrintPID("Hurrying incremental marking because of lack of progress\n");
- }
- marking_speed_ = kMaxMarkingSpeed;
- }
- }
-
- marking_deque_.UnshiftGrey(obj);
-}
-
-
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
- Marking::WhiteToGrey(mark_bit);
- marking_deque_.PushGrey(obj);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_INCREMENTAL_MARKING_INL_H_
diff --git a/src/3rdparty/v8/src/incremental-marking.cc b/src/3rdparty/v8/src/incremental-marking.cc
deleted file mode 100644
index e2fca5b..0000000
--- a/src/3rdparty/v8/src/incremental-marking.cc
+++ /dev/null
@@ -1,1012 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "incremental-marking.h"
-
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
-#include "v8conversions.h"
-
-namespace v8 {
-namespace internal {
-
-
-IncrementalMarking::IncrementalMarking(Heap* heap)
- : heap_(heap),
- state_(STOPPED),
- marking_deque_memory_(NULL),
- marking_deque_memory_committed_(false),
- steps_count_(0),
- steps_took_(0),
- longest_step_(0.0),
- old_generation_space_available_at_start_of_incremental_(0),
- old_generation_space_used_at_start_of_incremental_(0),
- steps_count_since_last_gc_(0),
- steps_took_since_last_gc_(0),
- should_hurry_(false),
- marking_speed_(0),
- allocated_(0),
- no_marking_scope_depth_(0) {
-}
-
-
-void IncrementalMarking::TearDown() {
- delete marking_deque_memory_;
-}
-
-
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
- Object** slot,
- Object* value) {
- if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- // Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordSlot(
- HeapObject::RawField(obj, 0), slot, value);
- }
- }
-}
-
-
-void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate) {
- ASSERT(obj->IsHeapObject());
- IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(!marking->is_compacting_);
-
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- int counter = chunk->write_barrier_counter();
- if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
- marking->write_barriers_invoked_since_last_step_ +=
- MemoryChunk::kWriteBarrierCounterGranularity -
- chunk->write_barrier_counter();
- chunk->set_write_barrier_counter(
- MemoryChunk::kWriteBarrierCounterGranularity);
- }
-
- marking->RecordWrite(obj, slot, *slot);
-}
-
-
-void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate) {
- ASSERT(obj->IsHeapObject());
- IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(marking->is_compacting_);
-
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- int counter = chunk->write_barrier_counter();
- if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
- marking->write_barriers_invoked_since_last_step_ +=
- MemoryChunk::kWriteBarrierCounterGranularity -
- chunk->write_barrier_counter();
- chunk->set_write_barrier_counter(
- MemoryChunk::kWriteBarrierCounterGranularity);
- }
-
- marking->RecordWrite(obj, slot, *slot);
-}
-
-
-void IncrementalMarking::RecordCodeTargetPatch(Code* host,
- Address pc,
- HeapObject* value) {
- if (IsMarking()) {
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
- RecordWriteIntoCode(host, &rinfo, value);
- }
-}
-
-
-void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
- if (IsMarking()) {
- Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(pc);
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
- RecordWriteIntoCode(host, &rinfo, value);
- }
-}
-
-
-void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
- Object** slot,
- Code* value) {
- if (BaseRecordWrite(host, slot, value)) {
- ASSERT(slot != NULL);
- heap_->mark_compact_collector()->
- RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
- }
-}
-
-
-void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
- RelocInfo* rinfo,
- Object* value) {
- MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
- // Object is either grey or white. It will be scanned if survives.
- return;
- }
-
- if (is_compacting_) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- // Object is not going to be rescanned. We need to record the slot.
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
- Code::cast(value));
- }
- }
-}
-
-
-static void MarkObjectGreyDoNotEnqueue(Object* obj) {
- if (obj->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
- if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
- -heap_obj->Size());
- }
- Marking::AnyToGrey(mark_bit);
- }
-}
-
-
-static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
- MarkBit mark_bit,
- int size) {
- ASSERT(!Marking::IsImpossible(mark_bit));
- if (mark_bit.Get()) return;
- mark_bit.Set();
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
- ASSERT(Marking::IsBlack(mark_bit));
-}
-
-
-static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
- MarkBit mark_bit,
- int size) {
- ASSERT(!Marking::IsImpossible(mark_bit));
- if (Marking::IsBlack(mark_bit)) return;
- Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
- ASSERT(Marking::IsBlack(mark_bit));
-}
-
-
-class IncrementalMarkingMarkingVisitor
- : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
- public:
- static void Initialize() {
- StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
- table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
- table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
- table_.Register(kVisitJSRegExp, &VisitJSRegExp);
- }
-
- static const int kProgressBarScanningChunk = 32 * 1024;
-
- static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- // TODO(mstarzinger): Move setting of the flag to the allocation site of
- // the array. The visitor should just check the flag.
- if (FLAG_use_marking_progress_bar &&
- chunk->owner()->identity() == LO_SPACE) {
- chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
- }
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- Heap* heap = map->GetHeap();
- // When using a progress bar for large fixed arrays, scan only a chunk of
- // the array and try to push it onto the marking deque again until it is
- // fully scanned. Fall back to scanning it through to the end in case this
- // fails because of a full deque.
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
- chunk->progress_bar());
- int end_offset = Min(object_size,
- start_offset + kProgressBarScanningChunk);
- bool scan_until_end = false;
- do {
- VisitPointersWithAnchor(heap,
- HeapObject::RawField(object, 0),
- HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- start_offset = end_offset;
- end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
- scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
- } while (scan_until_end && start_offset < object_size);
- chunk->set_progress_bar(start_offset);
- if (start_offset < object_size) {
- heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
- }
- } else {
- FixedArrayVisitor::Visit(map, object);
- }
- }
-
- static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
- Context* context = Context::cast(object);
-
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
- VisitNativeContext(map, context);
- }
-
- static void VisitJSWeakMap(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- VisitPointers(heap,
- HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
- HeapObject::RawField(object, JSWeakMap::kSize));
- }
-
- static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {}
-
- INLINE(static void VisitPointer(Heap* heap, Object** p)) {
- Object* obj = *p;
- if (obj->NonFailureIsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(p, p, obj);
- MarkObject(heap, obj);
- }
- }
-
- INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
- for (Object** p = start; p < end; p++) {
- Object* obj = *p;
- if (obj->NonFailureIsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(start, p, obj);
- MarkObject(heap, obj);
- }
- }
- }
-
- INLINE(static void VisitPointersWithAnchor(Heap* heap,
- Object** anchor,
- Object** start,
- Object** end)) {
- for (Object** p = start; p < end; p++) {
- Object* obj = *p;
- if (obj->NonFailureIsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
- MarkObject(heap, obj);
- }
- }
- }
-
- // Marks the object grey and pushes it on the marking stack.
- INLINE(static void MarkObject(Heap* heap, Object* obj)) {
- HeapObject* heap_object = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
- if (mark_bit.data_only()) {
- MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
- } else if (Marking::IsWhite(mark_bit)) {
- heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
- }
- }
-
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
- HeapObject* heap_object = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
- if (Marking::IsWhite(mark_bit)) {
- mark_bit.Set();
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
- heap_object->Size());
- return true;
- }
- return false;
- }
-};
-
-
-class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
- public:
- explicit IncrementalMarkingRootMarkingVisitor(
- IncrementalMarking* incremental_marking)
- : incremental_marking_(incremental_marking) {
- }
-
- void VisitPointer(Object** p) {
- MarkObjectByPointer(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
- }
-
- private:
- void MarkObjectByPointer(Object** p) {
- Object* obj = *p;
- if (!obj->IsHeapObject()) return;
-
- HeapObject* heap_object = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
- if (mark_bit.data_only()) {
- MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
- } else {
- if (Marking::IsWhite(mark_bit)) {
- incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
- }
- }
- }
-
- IncrementalMarking* incremental_marking_;
-};
-
-
-void IncrementalMarking::Initialize() {
- IncrementalMarkingMarkingVisitor::Initialize();
-}
-
-
-void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
- bool is_marking,
- bool is_compacting) {
- if (is_marking) {
- chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
- // It's difficult to filter out slots recorded for large objects.
- if (chunk->owner()->identity() == LO_SPACE &&
- chunk->size() > static_cast<size_t>(Page::kPageSize) &&
- is_compacting) {
- chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
- }
- } else if (chunk->owner()->identity() == CELL_SPACE ||
- chunk->scan_on_scavenge()) {
- chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- } else {
- chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- }
-}
-
-
-void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
- bool is_marking) {
- chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- if (is_marking) {
- chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- } else {
- chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- }
- chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
- PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- SetOldSpacePageFlags(p, false, false);
- }
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
- NewSpace* space) {
- NewSpacePageIterator it(space);
- while (it.has_next()) {
- NewSpacePage* p = it.next();
- SetNewSpacePageFlags(p, false);
- }
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
- DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
-
- LargePage* lop = heap_->lo_space()->first_page();
- while (lop->is_valid()) {
- SetOldSpacePageFlags(lop, false, false);
- lop = lop->next_page();
- }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- SetOldSpacePageFlags(p, true, is_compacting_);
- }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
- NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
- while (it.has_next()) {
- NewSpacePage* p = it.next();
- SetNewSpacePageFlags(p, true);
- }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier() {
- ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
- ActivateIncrementalWriteBarrier(heap_->old_data_space());
- ActivateIncrementalWriteBarrier(heap_->cell_space());
- ActivateIncrementalWriteBarrier(heap_->map_space());
- ActivateIncrementalWriteBarrier(heap_->code_space());
- ActivateIncrementalWriteBarrier(heap_->new_space());
-
- LargePage* lop = heap_->lo_space()->first_page();
- while (lop->is_valid()) {
- SetOldSpacePageFlags(lop, true, is_compacting_);
- lop = lop->next_page();
- }
-}
-
-
-bool IncrementalMarking::WorthActivating() {
-#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
-#else
- // TODO(gc) consider setting this to some low level so that some
- // debug tests run with incremental marking and some without.
- static const intptr_t kActivationThreshold = 0;
-#endif
-
- return !FLAG_expose_gc &&
- FLAG_incremental_marking &&
- !Serializer::enabled() &&
- heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
-}
-
-
-void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
- ASSERT(RecordWriteStub::GetMode(stub) ==
- RecordWriteStub::STORE_BUFFER_ONLY);
-
- if (!IsMarking()) {
- // Initially stub is generated in STORE_BUFFER_ONLY mode thus
- // we don't need to do anything if incremental marking is
- // not active.
- } else if (IsCompacting()) {
- RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
- } else {
- RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
- }
-}
-
-
-static void PatchIncrementalMarkingRecordWriteStubs(
- Heap* heap, RecordWriteStub::Mode mode) {
- UnseededNumberDictionary* stubs = heap->code_stubs();
-
- int capacity = stubs->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = stubs->KeyAt(i);
- if (stubs->IsKey(k)) {
- uint32_t key = NumberToUint32(k);
-
- if (CodeStub::MajorKeyFromKey(key) ==
- CodeStub::RecordWrite) {
- Object* e = stubs->ValueAt(i);
- if (e->IsCode()) {
- RecordWriteStub::Patch(Code::cast(e), mode);
- }
- }
- }
- }
-}
-
-
-void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
- if (marking_deque_memory_ == NULL) {
- marking_deque_memory_ = new VirtualMemory(4 * MB);
- }
- if (!marking_deque_memory_committed_) {
- bool success = marking_deque_memory_->Commit(
- reinterpret_cast<Address>(marking_deque_memory_->address()),
- marking_deque_memory_->size(),
- false); // Not executable.
- CHECK(success);
- marking_deque_memory_committed_ = true;
- }
-}
-
-void IncrementalMarking::UncommitMarkingDeque() {
- if (state_ == STOPPED && marking_deque_memory_committed_) {
- bool success = marking_deque_memory_->Uncommit(
- reinterpret_cast<Address>(marking_deque_memory_->address()),
- marking_deque_memory_->size());
- CHECK(success);
- marking_deque_memory_committed_ = false;
- }
-}
-
-
-void IncrementalMarking::Start() {
- ASSERT(!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress());
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start\n");
- }
- ASSERT(FLAG_incremental_marking);
- ASSERT(state_ == STOPPED);
-
- ResetStepCounters();
-
- if (heap_->IsSweepingComplete()) {
- StartMarking(ALLOW_COMPACTION);
- } else {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start sweeping.\n");
- }
- state_ = SWEEPING;
- }
-
- heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
-}
-
-
-void IncrementalMarking::StartMarking(CompactionFlag flag) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start marking\n");
- }
-
- is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
- heap_->mark_compact_collector()->StartCompaction(
- MarkCompactCollector::INCREMENTAL_COMPACTION);
-
- state_ = MARKING;
-
- RecordWriteStub::Mode mode = is_compacting_ ?
- RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
-
- PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
-
- EnsureMarkingDequeIsCommitted();
-
- // Initialize marking stack.
- Address addr = static_cast<Address>(marking_deque_memory_->address());
- size_t size = marking_deque_memory_->size();
- if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
- marking_deque_.Initialize(addr, addr + size);
-
- ActivateIncrementalWriteBarrier();
-
- // Marking bits are cleared by the sweeper.
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
- }
-#endif
-
- heap_->CompletelyClearInstanceofCache();
- heap_->isolate()->compilation_cache()->MarkCompactPrologue();
-
- if (FLAG_cleanup_code_caches_at_gc) {
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
- }
-
- // Mark strong roots grey.
- IncrementalMarkingRootMarkingVisitor visitor(this);
- heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
-
- // Ready to start incremental marking.
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Running\n");
- }
-}
-
-
-void IncrementalMarking::PrepareForScavenge() {
- if (!IsMarking()) return;
- NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
- heap_->new_space()->FromSpaceEnd());
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
-}
-
-
-void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
- if (!IsMarking()) return;
-
- int current = marking_deque_.bottom();
- int mask = marking_deque_.mask();
- int limit = marking_deque_.top();
- HeapObject** array = marking_deque_.array();
- int new_top = current;
-
- Map* filler_map = heap_->one_pointer_filler_map();
-
- while (current != limit) {
- HeapObject* obj = array[current];
- ASSERT(obj->IsHeapObject());
- current = ((current + 1) & mask);
- if (heap_->InNewSpace(obj)) {
- MapWord map_word = obj->map_word();
- if (map_word.IsForwardingAddress()) {
- HeapObject* dest = map_word.ToForwardingAddress();
- array[new_top] = dest;
- new_top = ((new_top + 1) & mask);
- ASSERT(new_top != marking_deque_.bottom());
-#ifdef DEBUG
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)));
-#endif
- }
- } else if (obj->map() != filler_map) {
- // Skip one word filler objects that appear on the
- // stack when we perform in place array shift.
- array[new_top] = obj;
- new_top = ((new_top + 1) & mask);
- ASSERT(new_top != marking_deque_.bottom());
-#ifdef DEBUG
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
- (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
- Marking::IsBlack(mark_bit)));
-#endif
- }
- }
- marking_deque_.set_top(new_top);
-
- steps_took_since_last_gc_ = 0;
- steps_count_since_last_gc_ = 0;
- longest_step_ = 0.0;
-}
-
-
-void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
- MarkBit map_mark_bit = Marking::MarkBitFrom(map);
- if (Marking::IsWhite(map_mark_bit)) {
- WhiteToGreyAndPush(map, map_mark_bit);
- }
-
- IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
-
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
-#ifdef DEBUG
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
- (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
- Marking::IsBlack(mark_bit)));
-#endif
- MarkBlackOrKeepBlack(obj, mark_bit, size);
-}
-
-
-void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
- Map* filler_map = heap_->one_pointer_filler_map();
- while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) continue;
-
- int size = obj->SizeFromMap(map);
- bytes_to_process -= size;
- VisitObject(map, obj, size);
- }
-}
-
-
-void IncrementalMarking::ProcessMarkingDeque() {
- Map* filler_map = heap_->one_pointer_filler_map();
- while (!marking_deque_.IsEmpty()) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) continue;
-
- VisitObject(map, obj, obj->SizeFromMap(map));
- }
-}
-
-
-void IncrementalMarking::Hurry() {
- if (state() == MARKING) {
- double start = 0.0;
- if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
- start = OS::TimeCurrentMillis();
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Hurry\n");
- }
- }
- // TODO(gc) hurry can mark objects it encounters black as mutator
- // was stopped.
- ProcessMarkingDeque();
- state_ = COMPLETE;
- if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
- double end = OS::TimeCurrentMillis();
- double delta = end - start;
- heap_->AddMarkingTime(delta);
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
- static_cast<int>(delta));
- }
- }
- }
-
- if (FLAG_cleanup_code_caches_at_gc) {
- PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
- Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
- MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
- PolymorphicCodeCache::kSize);
- }
-
- Object* context = heap_->native_contexts_list();
- while (!context->IsUndefined()) {
- // GC can happen when the context is not fully initialized,
- // so the cache can be undefined.
- HeapObject* cache = HeapObject::cast(
- Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
- if (!cache->IsUndefined()) {
- MarkBit mark_bit = Marking::MarkBitFrom(cache);
- if (Marking::IsGrey(mark_bit)) {
- Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
- }
- }
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
-void IncrementalMarking::Abort() {
- if (IsStopped()) return;
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Aborting.\n");
- }
- heap_->new_space()->LowerInlineAllocationLimit(0);
- IncrementalMarking::set_should_hurry(false);
- ResetStepCounters();
- if (IsMarking()) {
- PatchIncrementalMarkingRecordWriteStubs(heap_,
- RecordWriteStub::STORE_BUFFER_ONLY);
- DeactivateIncrementalWriteBarrier();
-
- if (is_compacting_) {
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Page* p = Page::FromAddress(obj->address());
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- p->ClearFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
- }
- }
- heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
- state_ = STOPPED;
- is_compacting_ = false;
-}
-
-
-void IncrementalMarking::Finalize() {
- Hurry();
- state_ = STOPPED;
- is_compacting_ = false;
- heap_->new_space()->LowerInlineAllocationLimit(0);
- IncrementalMarking::set_should_hurry(false);
- ResetStepCounters();
- PatchIncrementalMarkingRecordWriteStubs(heap_,
- RecordWriteStub::STORE_BUFFER_ONLY);
- DeactivateIncrementalWriteBarrier();
- ASSERT(marking_deque_.IsEmpty());
- heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
-}
-
-
-void IncrementalMarking::MarkingComplete(CompletionAction action) {
- state_ = COMPLETE;
- // We will set the stack guard to request a GC now. This will mean the rest
- // of the GC gets performed as soon as possible (we can't do a GC here in a
- // record-write context). If a few things get allocated between now and then
- // that shouldn't make us do a scavenge and keep being incremental, so we set
- // the should-hurry flag to indicate that there can't be much work left to do.
- set_should_hurry(true);
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Complete (normal).\n");
- }
- if (action == GC_VIA_STACK_GUARD) {
- heap_->isolate()->stack_guard()->RequestGC();
- }
-}
-
-
-void IncrementalMarking::Step(intptr_t allocated_bytes,
- CompletionAction action) {
- if (heap_->gc_state() != Heap::NOT_IN_GC ||
- !FLAG_incremental_marking ||
- !FLAG_incremental_marking_steps ||
- (state_ != SWEEPING && state_ != MARKING)) {
- return;
- }
-
- allocated_ += allocated_bytes;
-
- if (allocated_ < kAllocatedThreshold &&
- write_barriers_invoked_since_last_step_ <
- kWriteBarriersInvokedThreshold) {
- return;
- }
-
- if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
-
- // The marking speed is driven either by the allocation rate or by the rate
- // at which we are having to check the color of objects in the write barrier.
- // It is possible for a tight non-allocating loop to run a lot of write
- // barriers before we get here and check them (marking can only take place on
- // allocation), so to reduce the lumpiness we don't use the write barriers
- // invoked since last step directly to determine the amount of work to do.
- intptr_t bytes_to_process =
- marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
- allocated_ = 0;
- write_barriers_invoked_since_last_step_ = 0;
-
- bytes_scanned_ += bytes_to_process;
-
- double start = 0;
-
- if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
- FLAG_print_cumulative_gc_stat) {
- start = OS::TimeCurrentMillis();
- }
-
- if (state_ == SWEEPING) {
- if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
- bytes_scanned_ = 0;
- StartMarking(PREVENT_COMPACTION);
- }
- } else if (state_ == MARKING) {
- ProcessMarkingDeque(bytes_to_process);
- if (marking_deque_.IsEmpty()) MarkingComplete(action);
- }
-
- steps_count_++;
- steps_count_since_last_gc_++;
-
- bool speed_up = false;
-
- if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
- if (FLAG_trace_gc) {
- PrintPID("Speed up marking after %d steps\n",
- static_cast<int>(kMarkingSpeedAccellerationInterval));
- }
- speed_up = true;
- }
-
- bool space_left_is_very_small =
- (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
-
- bool only_1_nth_of_space_that_was_available_still_left =
- (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
- old_generation_space_available_at_start_of_incremental_);
-
- if (space_left_is_very_small ||
- only_1_nth_of_space_that_was_available_still_left) {
- if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
- speed_up = true;
- }
-
- bool size_of_old_space_multiplied_by_n_during_marking =
- (heap_->PromotedTotalSize() >
- (marking_speed_ + 1) *
- old_generation_space_used_at_start_of_incremental_);
- if (size_of_old_space_multiplied_by_n_during_marking) {
- speed_up = true;
- if (FLAG_trace_gc) {
- PrintPID("Speed up marking because of heap size increase\n");
- }
- }
-
- int64_t promoted_during_marking = heap_->PromotedTotalSize()
- - old_generation_space_used_at_start_of_incremental_;
- intptr_t delay = marking_speed_ * MB;
- intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
-
- // We try to scan at at least twice the speed that we are allocating.
- if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
- if (FLAG_trace_gc) {
- PrintPID("Speed up marking because marker was not keeping up\n");
- }
- speed_up = true;
- }
-
- if (speed_up) {
- if (state_ != MARKING) {
- if (FLAG_trace_gc) {
- PrintPID("Postponing speeding up marking until marking starts\n");
- }
- } else {
- marking_speed_ += kMarkingSpeedAccellerationInterval;
- marking_speed_ = static_cast<int>(
- Min(kMaxMarkingSpeed,
- static_cast<intptr_t>(marking_speed_ * 1.3)));
- if (FLAG_trace_gc) {
- PrintPID("Marking speed increased to %d\n", marking_speed_);
- }
- }
- }
-
- if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
- FLAG_print_cumulative_gc_stat) {
- double end = OS::TimeCurrentMillis();
- double delta = (end - start);
- longest_step_ = Max(longest_step_, delta);
- steps_took_ += delta;
- steps_took_since_last_gc_ += delta;
- heap_->AddMarkingTime(delta);
- }
-}
-
-
-void IncrementalMarking::ResetStepCounters() {
- steps_count_ = 0;
- steps_took_ = 0;
- longest_step_ = 0.0;
- old_generation_space_available_at_start_of_incremental_ =
- SpaceLeftInOldSpace();
- old_generation_space_used_at_start_of_incremental_ =
- heap_->PromotedTotalSize();
- steps_count_since_last_gc_ = 0;
- steps_took_since_last_gc_ = 0;
- bytes_rescanned_ = 0;
- marking_speed_ = kInitialMarkingSpeed;
- bytes_scanned_ = 0;
- write_barriers_invoked_since_last_step_ = 0;
-}
-
-
-int64_t IncrementalMarking::SpaceLeftInOldSpace() {
- return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/incremental-marking.h b/src/3rdparty/v8/src/incremental-marking.h
deleted file mode 100644
index fc5a978..0000000
--- a/src/3rdparty/v8/src/incremental-marking.h
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_INCREMENTAL_MARKING_H_
-#define V8_INCREMENTAL_MARKING_H_
-
-
-#include "execution.h"
-#include "mark-compact.h"
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-class IncrementalMarking {
- public:
- enum State {
- STOPPED,
- SWEEPING,
- MARKING,
- COMPLETE
- };
-
- enum CompletionAction {
- GC_VIA_STACK_GUARD,
- NO_GC_VIA_STACK_GUARD
- };
-
- explicit IncrementalMarking(Heap* heap);
-
- static void Initialize();
-
- void TearDown();
-
- State state() {
- ASSERT(state_ == STOPPED || FLAG_incremental_marking);
- return state_;
- }
-
- bool should_hurry() { return should_hurry_; }
- void set_should_hurry(bool val) { should_hurry_ = val; }
-
- inline bool IsStopped() { return state() == STOPPED; }
-
- INLINE(bool IsMarking()) { return state() >= MARKING; }
-
- inline bool IsMarkingIncomplete() { return state() == MARKING; }
-
- inline bool IsComplete() { return state() == COMPLETE; }
-
- bool WorthActivating();
-
- void Start();
-
- void Stop();
-
- void PrepareForScavenge();
-
- void UpdateMarkingDequeAfterScavenge();
-
- void Hurry();
-
- void Finalize();
-
- void Abort();
-
- void MarkingComplete(CompletionAction action);
-
- // It's hard to know how much work the incremental marker should do to make
- // progress in the face of the mutator creating new work for it. We start
- // of at a moderate rate of work and gradually increase the speed of the
- // incremental marker until it completes.
- // Do some marking every time this much memory has been allocated or that many
- // heavy (color-checking) write barriers have been invoked.
- static const intptr_t kAllocatedThreshold = 65536;
- static const intptr_t kWriteBarriersInvokedThreshold = 65536;
- // Start off by marking this many times more memory than has been allocated.
- static const intptr_t kInitialMarkingSpeed = 1;
- // But if we are promoting a lot of data we need to mark faster to keep up
- // with the data that is entering the old space through promotion.
- static const intptr_t kFastMarking = 3;
- // After this many steps we increase the marking/allocating factor.
- static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
- // This is how much we increase the marking/allocating factor by.
- static const intptr_t kMarkingSpeedAccelleration = 2;
- static const intptr_t kMaxMarkingSpeed = 1000;
-
- void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialMarkingSpeed,
- GC_VIA_STACK_GUARD);
- }
-
- void Step(intptr_t allocated, CompletionAction action);
-
- inline void RestartIfNotMarking() {
- if (state_ == COMPLETE) {
- state_ = MARKING;
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
- }
- }
- }
-
- static void RecordWriteFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate);
-
- static void RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate);
-
- // Record a slot for compaction. Returns false for objects that are
- // guaranteed to be rescanned or not guaranteed to survive.
- //
- // No slots in white objects should be recorded, as some slots are typed and
- // cannot be interpreted correctly if the underlying object does not survive
- // the incremental cycle (stays white).
- INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
- INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
- INLINE(void RecordWriteIntoCode(HeapObject* obj,
- RelocInfo* rinfo,
- Object* value));
- INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
- Object** slot,
- Code* value));
-
-
- void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
- void RecordWriteIntoCodeSlow(HeapObject* obj,
- RelocInfo* rinfo,
- Object* value);
- void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
- void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
- void RecordCodeTargetPatch(Address pc, HeapObject* value);
-
- inline void RecordWrites(HeapObject* obj);
-
- inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
-
- inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
-
- inline int steps_count() {
- return steps_count_;
- }
-
- inline double steps_took() {
- return steps_took_;
- }
-
- inline double longest_step() {
- return longest_step_;
- }
-
- inline int steps_count_since_last_gc() {
- return steps_count_since_last_gc_;
- }
-
- inline double steps_took_since_last_gc() {
- return steps_took_since_last_gc_;
- }
-
- inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
- SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
- }
-
- inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
- SetNewSpacePageFlags(chunk, IsMarking());
- }
-
- MarkingDeque* marking_deque() { return &marking_deque_; }
-
- bool IsCompacting() { return IsMarking() && is_compacting_; }
-
- void ActivateGeneratedStub(Code* stub);
-
- void NotifyOfHighPromotionRate() {
- if (IsMarking()) {
- if (marking_speed_ < kFastMarking) {
- if (FLAG_trace_gc) {
- PrintPID("Increasing marking speed to %d "
- "due to high promotion rate\n",
- static_cast<int>(kFastMarking));
- }
- marking_speed_ = kFastMarking;
- }
- }
- }
-
- void EnterNoMarkingScope() {
- no_marking_scope_depth_++;
- }
-
- void LeaveNoMarkingScope() {
- no_marking_scope_depth_--;
- }
-
- void UncommitMarkingDeque();
-
- private:
- int64_t SpaceLeftInOldSpace();
-
- void ResetStepCounters();
-
- enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
-
- void StartMarking(CompactionFlag flag);
-
- void ActivateIncrementalWriteBarrier(PagedSpace* space);
- static void ActivateIncrementalWriteBarrier(NewSpace* space);
- void ActivateIncrementalWriteBarrier();
-
- static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
- static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
- void DeactivateIncrementalWriteBarrier();
-
- static void SetOldSpacePageFlags(MemoryChunk* chunk,
- bool is_marking,
- bool is_compacting);
-
- static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
-
- void EnsureMarkingDequeIsCommitted();
-
- INLINE(void ProcessMarkingDeque());
-
- INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process));
-
- INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
-
- Heap* heap_;
-
- State state_;
- bool is_compacting_;
-
- VirtualMemory* marking_deque_memory_;
- bool marking_deque_memory_committed_;
- MarkingDeque marking_deque_;
-
- int steps_count_;
- double steps_took_;
- double longest_step_;
- int64_t old_generation_space_available_at_start_of_incremental_;
- int64_t old_generation_space_used_at_start_of_incremental_;
- int steps_count_since_last_gc_;
- double steps_took_since_last_gc_;
- int64_t bytes_rescanned_;
- bool should_hurry_;
- int marking_speed_;
- intptr_t bytes_scanned_;
- intptr_t allocated_;
- intptr_t write_barriers_invoked_since_last_step_;
-
- int no_marking_scope_depth_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_INCREMENTAL_MARKING_H_
diff --git a/src/3rdparty/v8/src/interface.cc b/src/3rdparty/v8/src/interface.cc
deleted file mode 100644
index 603dfe9..0000000
--- a/src/3rdparty/v8/src/interface.cc
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "interface.h"
-
-namespace v8 {
-namespace internal {
-
-static bool Match(void* key1, void* key2) {
- String* name1 = *static_cast<String**>(key1);
- String* name2 = *static_cast<String**>(key2);
- ASSERT(name1->IsInternalizedString());
- ASSERT(name2->IsInternalizedString());
- return name1 == name2;
-}
-
-
-Interface* Interface::Lookup(Handle<String> name, Zone* zone) {
- ASSERT(IsModule());
- ZoneHashMap* map = Chase()->exports_;
- if (map == NULL) return NULL;
- ZoneAllocationPolicy allocator(zone);
- ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false,
- allocator);
- if (p == NULL) return NULL;
- ASSERT(*static_cast<String**>(p->key) == *name);
- ASSERT(p->value != NULL);
- return static_cast<Interface*>(p->value);
-}
-
-
-#ifdef DEBUG
-// Current nesting depth for debug output.
-class Nesting {
- public:
- Nesting() { current_ += 2; }
- ~Nesting() { current_ -= 2; }
- static int current() { return current_; }
- private:
- static int current_;
-};
-
-int Nesting::current_ = 0;
-#endif
-
-
-void Interface::DoAdd(
- void* name, uint32_t hash, Interface* interface, Zone* zone, bool* ok) {
- MakeModule(ok);
- if (!*ok) return;
-
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("%*s# Adding...\n", Nesting::current(), "");
- PrintF("%*sthis = ", Nesting::current(), "");
- this->Print(Nesting::current());
- PrintF("%*s%s : ", Nesting::current(), "",
- (*static_cast<String**>(name))->ToAsciiArray());
- interface->Print(Nesting::current());
- }
-#endif
-
- ZoneHashMap** map = &Chase()->exports_;
- ZoneAllocationPolicy allocator(zone);
-
- if (*map == NULL)
- *map = new ZoneHashMap(Match, ZoneHashMap::kDefaultHashMapCapacity,
- allocator);
-
- ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen(), allocator);
- if (p == NULL) {
- // This didn't have name but was frozen already, that's an error.
- *ok = false;
- } else if (p->value == NULL) {
- p->value = interface;
- } else {
-#ifdef DEBUG
- Nesting nested;
-#endif
- static_cast<Interface*>(p->value)->Unify(interface, zone, ok);
- }
-
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("%*sthis' = ", Nesting::current(), "");
- this->Print(Nesting::current());
- PrintF("%*s# Added.\n", Nesting::current(), "");
- }
-#endif
-}
-
-
-void Interface::Unify(Interface* that, Zone* zone, bool* ok) {
- if (this->forward_) return this->Chase()->Unify(that, zone, ok);
- if (that->forward_) return this->Unify(that->Chase(), zone, ok);
- ASSERT(this->forward_ == NULL);
- ASSERT(that->forward_ == NULL);
-
- *ok = true;
- if (this == that) return;
- if (this->IsValue()) {
- that->MakeValue(ok);
- if (*ok && this->IsConst()) that->MakeConst(ok);
- return;
- }
- if (that->IsValue()) {
- this->MakeValue(ok);
- if (*ok && that->IsConst()) this->MakeConst(ok);
- return;
- }
-
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("%*s# Unifying...\n", Nesting::current(), "");
- PrintF("%*sthis = ", Nesting::current(), "");
- this->Print(Nesting::current());
- PrintF("%*sthat = ", Nesting::current(), "");
- that->Print(Nesting::current());
- }
-#endif
-
- // Merge the smaller interface into the larger, for performance.
- if (this->exports_ != NULL && (that->exports_ == NULL ||
- this->exports_->occupancy() >= that->exports_->occupancy())) {
- this->DoUnify(that, ok, zone);
- } else {
- that->DoUnify(this, ok, zone);
- }
-
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("%*sthis' = ", Nesting::current(), "");
- this->Print(Nesting::current());
- PrintF("%*sthat' = ", Nesting::current(), "");
- that->Print(Nesting::current());
- PrintF("%*s# Unified.\n", Nesting::current(), "");
- }
-#endif
-}
-
-
-void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
- ASSERT(this->forward_ == NULL);
- ASSERT(that->forward_ == NULL);
- ASSERT(!this->IsValue());
- ASSERT(!that->IsValue());
- ASSERT(this->index_ == -1);
- ASSERT(that->index_ == -1);
- ASSERT(*ok);
-
-#ifdef DEBUG
- Nesting nested;
-#endif
-
- // Try to merge all members from that into this.
- ZoneHashMap* map = that->exports_;
- if (map != NULL) {
- for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
- this->DoAdd(p->key, p->hash, static_cast<Interface*>(p->value), zone, ok);
- if (!*ok) return;
- }
- }
-
- // If the new interface is larger than that's, then there were members in
- // 'this' which 'that' didn't have. If 'that' was frozen that is an error.
- int this_size = this->exports_ == NULL ? 0 : this->exports_->occupancy();
- int that_size = map == NULL ? 0 : map->occupancy();
- if (that->IsFrozen() && this_size > that_size) {
- *ok = false;
- return;
- }
-
- // Merge interfaces.
- this->flags_ |= that->flags_;
- that->forward_ = this;
-}
-
-
-#ifdef DEBUG
-void Interface::Print(int n) {
- int n0 = n > 0 ? n : 0;
-
- if (FLAG_print_interface_details) {
- PrintF("%p", static_cast<void*>(this));
- for (Interface* link = this->forward_; link != NULL; link = link->forward_)
- PrintF("->%p", static_cast<void*>(link));
- PrintF(" ");
- }
-
- if (IsUnknown()) {
- PrintF("unknown\n");
- } else if (IsConst()) {
- PrintF("const\n");
- } else if (IsValue()) {
- PrintF("value\n");
- } else if (IsModule()) {
- PrintF("module %d %s{", Index(), IsFrozen() ? "" : "(unresolved) ");
- ZoneHashMap* map = Chase()->exports_;
- if (map == NULL || map->occupancy() == 0) {
- PrintF("}\n");
- } else if (n < 0 || n0 >= 2 * FLAG_print_interface_depth) {
- // Avoid infinite recursion on cyclic types.
- PrintF("...}\n");
- } else {
- PrintF("\n");
- for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
- String* name = *static_cast<String**>(p->key);
- Interface* interface = static_cast<Interface*>(p->value);
- PrintF("%*s%s : ", n0 + 2, "", name->ToAsciiArray());
- interface->Print(n0 + 2);
- }
- PrintF("%*s}\n", n0, "");
- }
- }
-}
-#endif
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/interface.h b/src/3rdparty/v8/src/interface.h
deleted file mode 100644
index f824a9a..0000000
--- a/src/3rdparty/v8/src/interface.h
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_INTERFACE_H_
-#define V8_INTERFACE_H_
-
-#include "zone-inl.h" // For operator new.
-
-namespace v8 {
-namespace internal {
-
-
-// This class implements the following abstract grammar of interfaces
-// (i.e. module types):
-// interface ::= UNDETERMINED | VALUE | CONST | MODULE(exports)
-// exports ::= {name : interface, ...}
-// A frozen type is one that is fully determined. Unification does not
-// allow to turn non-const values into const, or adding additional exports to
-// frozen interfaces. Otherwise, unifying modules merges their exports.
-// Undetermined types are unification variables that can be unified freely.
-// There is a natural subsort lattice that reflects the increase of knowledge:
-//
-// undetermined
-// // | \\ .
-// value (frozen) module
-// // \\ / \ //
-// const fr.value fr.module
-// \\ /
-// fr.const
-//
-// where the bold lines are the only transitions allowed.
-
-class Interface : public ZoneObject {
- public:
- // ---------------------------------------------------------------------------
- // Factory methods.
-
- static Interface* NewUnknown(Zone* zone) {
- return new(zone) Interface(NONE);
- }
-
- static Interface* NewValue() {
- static Interface value_interface(VALUE + FROZEN); // Cached.
- return &value_interface;
- }
-
- static Interface* NewConst() {
- static Interface value_interface(VALUE + CONST + FROZEN); // Cached.
- return &value_interface;
- }
-
- static Interface* NewModule(Zone* zone) {
- return new(zone) Interface(MODULE);
- }
-
- // ---------------------------------------------------------------------------
- // Mutators.
-
- // Add a name to the list of exports. If it already exists, unify with
- // interface, otherwise insert unless this is closed.
- void Add(Handle<String> name, Interface* interface, Zone* zone, bool* ok) {
- DoAdd(name.location(), name->Hash(), interface, zone, ok);
- }
-
- // Unify with another interface. If successful, both interface objects will
- // represent the same type, and changes to one are reflected in the other.
- void Unify(Interface* that, Zone* zone, bool* ok);
-
- // Determine this interface to be a value interface.
- void MakeValue(bool* ok) {
- *ok = !IsModule();
- if (*ok) Chase()->flags_ |= VALUE;
- }
-
- // Determine this interface to be an immutable interface.
- void MakeConst(bool* ok) {
- *ok = !IsModule() && (IsConst() || !IsFrozen());
- if (*ok) Chase()->flags_ |= VALUE + CONST;
- }
-
- // Determine this interface to be a module interface.
- void MakeModule(bool* ok) {
- *ok = !IsValue();
- if (*ok) Chase()->flags_ |= MODULE;
- }
-
- // Do not allow any further refinements, directly or through unification.
- void Freeze(bool* ok) {
- *ok = IsValue() || IsModule();
- if (*ok) Chase()->flags_ |= FROZEN;
- }
-
- // Assign an index.
- void Allocate(int index) {
- ASSERT(IsModule() && IsFrozen() && Chase()->index_ == -1);
- Chase()->index_ = index;
- }
-
- // ---------------------------------------------------------------------------
- // Accessors.
-
- // Check whether this is still a fully undetermined type.
- bool IsUnknown() { return Chase()->flags_ == NONE; }
-
- // Check whether this is a value type.
- bool IsValue() { return Chase()->flags_ & VALUE; }
-
- // Check whether this is a constant type.
- bool IsConst() { return Chase()->flags_ & CONST; }
-
- // Check whether this is a module type.
- bool IsModule() { return Chase()->flags_ & MODULE; }
-
- // Check whether this is closed (i.e. fully determined).
- bool IsFrozen() { return Chase()->flags_ & FROZEN; }
-
- bool IsUnified(Interface* that) {
- return Chase() == that->Chase()
- || (this->IsValue() == that->IsValue() &&
- this->IsConst() == that->IsConst());
- }
-
- int Length() {
- ASSERT(IsModule() && IsFrozen());
- ZoneHashMap* exports = Chase()->exports_;
- return exports ? exports->occupancy() : 0;
- }
-
- // The context slot in the hosting global context pointing to this module.
- int Index() {
- ASSERT(IsModule() && IsFrozen());
- return Chase()->index_;
- }
-
- // Look up an exported name. Returns NULL if not (yet) defined.
- Interface* Lookup(Handle<String> name, Zone* zone);
-
- // ---------------------------------------------------------------------------
- // Iterators.
-
- // Use like:
- // for (auto it = interface->iterator(); !it.done(); it.Advance()) {
- // ... it.name() ... it.interface() ...
- // }
- class Iterator {
- public:
- bool done() const { return entry_ == NULL; }
- Handle<String> name() const {
- ASSERT(!done());
- return Handle<String>(*static_cast<String**>(entry_->key));
- }
- Interface* interface() const {
- ASSERT(!done());
- return static_cast<Interface*>(entry_->value);
- }
- void Advance() { entry_ = exports_->Next(entry_); }
-
- private:
- friend class Interface;
- explicit Iterator(const ZoneHashMap* exports)
- : exports_(exports), entry_(exports ? exports->Start() : NULL) {}
-
- const ZoneHashMap* exports_;
- ZoneHashMap::Entry* entry_;
- };
-
- Iterator iterator() const { return Iterator(this->exports_); }
-
- // ---------------------------------------------------------------------------
- // Debugging.
-#ifdef DEBUG
- void Print(int n = 0); // n = indentation; n < 0 => don't print recursively
-#endif
-
- // ---------------------------------------------------------------------------
- // Implementation.
- private:
- enum Flags { // All flags are monotonic
- NONE = 0,
- VALUE = 1, // This type describes a value
- CONST = 2, // This type describes a constant
- MODULE = 4, // This type describes a module
- FROZEN = 8 // This type is fully determined
- };
-
- int flags_;
- Interface* forward_; // Unification link
- ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
- int index_;
-
- explicit Interface(int flags)
- : flags_(flags),
- forward_(NULL),
- exports_(NULL),
- index_(-1) {
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Creating %p\n", static_cast<void*>(this));
-#endif
- }
-
- Interface* Chase() {
- Interface* result = this;
- while (result->forward_ != NULL) result = result->forward_;
- if (result != this) forward_ = result; // On-the-fly path compression.
- return result;
- }
-
- void DoAdd(void* name, uint32_t hash, Interface* interface, Zone* zone,
- bool* ok);
- void DoUnify(Interface* that, bool* ok, Zone* zone);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_INTERFACE_H_
diff --git a/src/3rdparty/v8/src/interpreter-irregexp.cc b/src/3rdparty/v8/src/interpreter-irregexp.cc
deleted file mode 100644
index 5abeb5a..0000000
--- a/src/3rdparty/v8/src/interpreter-irregexp.cc
+++ /dev/null
@@ -1,641 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A simple interpreter for the Irregexp byte code.
-
-
-#include "v8.h"
-#include "unicode.h"
-#include "utils.h"
-#include "ast.h"
-#include "bytecodes-irregexp.h"
-#include "interpreter-irregexp.h"
-#include "jsregexp.h"
-#include "regexp-macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-typedef unibrow::Mapping<unibrow::Ecma262Canonicalize> Canonicalize;
-
-static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
- int from,
- int current,
- int len,
- Vector<const uc16> subject) {
- for (int i = 0; i < len; i++) {
- unibrow::uchar old_char = subject[from++];
- unibrow::uchar new_char = subject[current++];
- if (old_char == new_char) continue;
- unibrow::uchar old_string[1] = { old_char };
- unibrow::uchar new_string[1] = { new_char };
- interp_canonicalize->get(old_char, '\0', old_string);
- interp_canonicalize->get(new_char, '\0', new_string);
- if (old_string[0] != new_string[0]) {
- return false;
- }
- }
- return true;
-}
-
-
-static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
- int from,
- int current,
- int len,
- Vector<const uint8_t> subject) {
- for (int i = 0; i < len; i++) {
- unsigned int old_char = subject[from++];
- unsigned int new_char = subject[current++];
- if (old_char == new_char) continue;
- if (old_char - 'A' <= 'Z' - 'A') old_char |= 0x20;
- if (new_char - 'A' <= 'Z' - 'A') new_char |= 0x20;
- if (old_char != new_char) return false;
- }
- return true;
-}
-
-
-#ifdef DEBUG
-static void TraceInterpreter(const byte* code_base,
- const byte* pc,
- int stack_depth,
- int current_position,
- uint32_t current_char,
- int bytecode_length,
- const char* bytecode_name) {
- if (FLAG_trace_regexp_bytecodes) {
- bool printable = (current_char < 127 && current_char >= 32);
- const char* format =
- printable ?
- "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = %s" :
- "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = %s";
- PrintF(format,
- pc - code_base,
- stack_depth,
- current_position,
- current_char,
- printable ? current_char : '.',
- bytecode_name);
- for (int i = 0; i < bytecode_length; i++) {
- printf(", %02x", pc[i]);
- }
- printf(" ");
- for (int i = 1; i < bytecode_length; i++) {
- unsigned char b = pc[i];
- if (b < 127 && b >= 32) {
- printf("%c", b);
- } else {
- printf(".");
- }
- }
- printf("\n");
- }
-}
-
-
-#define BYTECODE(name) \
- case BC_##name: \
- TraceInterpreter(code_base, \
- pc, \
- static_cast<int>(backtrack_sp - backtrack_stack_base), \
- current, \
- current_char, \
- BC_##name##_LENGTH, \
- #name);
-#else
-#define BYTECODE(name) \
- case BC_##name:
-#endif
-
-
-static int32_t Load32Aligned(const byte* pc) {
- ASSERT((reinterpret_cast<intptr_t>(pc) & 3) == 0);
- return *reinterpret_cast<const int32_t *>(pc);
-}
-
-
-static int32_t Load16Aligned(const byte* pc) {
- ASSERT((reinterpret_cast<intptr_t>(pc) & 1) == 0);
- return *reinterpret_cast<const uint16_t *>(pc);
-}
-
-
-// A simple abstraction over the backtracking stack used by the interpreter.
-// This backtracking stack does not grow automatically, but it ensures that the
-// the memory held by the stack is released or remembered in a cache if the
-// matching terminates.
-class BacktrackStack {
- public:
- explicit BacktrackStack(Isolate* isolate) : isolate_(isolate) {
- if (isolate->irregexp_interpreter_backtrack_stack_cache() != NULL) {
- // If the cache is not empty reuse the previously allocated stack.
- data_ = isolate->irregexp_interpreter_backtrack_stack_cache();
- isolate->set_irregexp_interpreter_backtrack_stack_cache(NULL);
- } else {
- // Cache was empty. Allocate a new backtrack stack.
- data_ = NewArray<int>(kBacktrackStackSize);
- }
- }
-
- ~BacktrackStack() {
- if (isolate_->irregexp_interpreter_backtrack_stack_cache() == NULL) {
- // The cache is empty. Keep this backtrack stack around.
- isolate_->set_irregexp_interpreter_backtrack_stack_cache(data_);
- } else {
- // A backtrack stack was already cached, just release this one.
- DeleteArray(data_);
- }
- }
-
- int* data() const { return data_; }
-
- int max_size() const { return kBacktrackStackSize; }
-
- private:
- static const int kBacktrackStackSize = 10000;
-
- int* data_;
- Isolate* isolate_;
-
- DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
-};
-
-
-template <typename Char>
-static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
- const byte* code_base,
- Vector<const Char> subject,
- int* registers,
- int current,
- uint32_t current_char) {
- const byte* pc = code_base;
- // BacktrackStack ensures that the memory allocated for the backtracking stack
- // is returned to the system or cached if there is no stack being cached at
- // the moment.
- BacktrackStack backtrack_stack(isolate);
- int* backtrack_stack_base = backtrack_stack.data();
- int* backtrack_sp = backtrack_stack_base;
- int backtrack_stack_space = backtrack_stack.max_size();
-#ifdef DEBUG
- if (FLAG_trace_regexp_bytecodes) {
- PrintF("\n\nStart bytecode interpreter\n\n");
- }
-#endif
- while (true) {
- int32_t insn = Load32Aligned(pc);
- switch (insn & BYTECODE_MASK) {
- BYTECODE(BREAK)
- UNREACHABLE();
- return RegExpImpl::RE_FAILURE;
- BYTECODE(PUSH_CP)
- if (--backtrack_stack_space < 0) {
- return RegExpImpl::RE_EXCEPTION;
- }
- *backtrack_sp++ = current;
- pc += BC_PUSH_CP_LENGTH;
- break;
- BYTECODE(PUSH_BT)
- if (--backtrack_stack_space < 0) {
- return RegExpImpl::RE_EXCEPTION;
- }
- *backtrack_sp++ = Load32Aligned(pc + 4);
- pc += BC_PUSH_BT_LENGTH;
- break;
- BYTECODE(PUSH_REGISTER)
- if (--backtrack_stack_space < 0) {
- return RegExpImpl::RE_EXCEPTION;
- }
- *backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
- pc += BC_PUSH_REGISTER_LENGTH;
- break;
- BYTECODE(SET_REGISTER)
- registers[insn >> BYTECODE_SHIFT] = Load32Aligned(pc + 4);
- pc += BC_SET_REGISTER_LENGTH;
- break;
- BYTECODE(ADVANCE_REGISTER)
- registers[insn >> BYTECODE_SHIFT] += Load32Aligned(pc + 4);
- pc += BC_ADVANCE_REGISTER_LENGTH;
- break;
- BYTECODE(SET_REGISTER_TO_CP)
- registers[insn >> BYTECODE_SHIFT] = current + Load32Aligned(pc + 4);
- pc += BC_SET_REGISTER_TO_CP_LENGTH;
- break;
- BYTECODE(SET_CP_TO_REGISTER)
- current = registers[insn >> BYTECODE_SHIFT];
- pc += BC_SET_CP_TO_REGISTER_LENGTH;
- break;
- BYTECODE(SET_REGISTER_TO_SP)
- registers[insn >> BYTECODE_SHIFT] =
- static_cast<int>(backtrack_sp - backtrack_stack_base);
- pc += BC_SET_REGISTER_TO_SP_LENGTH;
- break;
- BYTECODE(SET_SP_TO_REGISTER)
- backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
- backtrack_stack_space = backtrack_stack.max_size() -
- static_cast<int>(backtrack_sp - backtrack_stack_base);
- pc += BC_SET_SP_TO_REGISTER_LENGTH;
- break;
- BYTECODE(POP_CP)
- backtrack_stack_space++;
- --backtrack_sp;
- current = *backtrack_sp;
- pc += BC_POP_CP_LENGTH;
- break;
- BYTECODE(POP_BT)
- backtrack_stack_space++;
- --backtrack_sp;
- pc = code_base + *backtrack_sp;
- break;
- BYTECODE(POP_REGISTER)
- backtrack_stack_space++;
- --backtrack_sp;
- registers[insn >> BYTECODE_SHIFT] = *backtrack_sp;
- pc += BC_POP_REGISTER_LENGTH;
- break;
- BYTECODE(FAIL)
- return RegExpImpl::RE_FAILURE;
- BYTECODE(SUCCEED)
- return RegExpImpl::RE_SUCCESS;
- BYTECODE(ADVANCE_CP)
- current += insn >> BYTECODE_SHIFT;
- pc += BC_ADVANCE_CP_LENGTH;
- break;
- BYTECODE(GOTO)
- pc = code_base + Load32Aligned(pc + 4);
- break;
- BYTECODE(ADVANCE_CP_AND_GOTO)
- current += insn >> BYTECODE_SHIFT;
- pc = code_base + Load32Aligned(pc + 4);
- break;
- BYTECODE(CHECK_GREEDY)
- if (current == backtrack_sp[-1]) {
- backtrack_sp--;
- backtrack_stack_space++;
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_GREEDY_LENGTH;
- }
- break;
- BYTECODE(LOAD_CURRENT_CHAR) {
- int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos >= subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- current_char = subject[pos];
- pc += BC_LOAD_CURRENT_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(LOAD_CURRENT_CHAR_UNCHECKED) {
- int pos = current + (insn >> BYTECODE_SHIFT);
- current_char = subject[pos];
- pc += BC_LOAD_CURRENT_CHAR_UNCHECKED_LENGTH;
- break;
- }
- BYTECODE(LOAD_2_CURRENT_CHARS) {
- int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos + 2 > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- Char next = subject[pos + 1];
- current_char =
- (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
- pc += BC_LOAD_2_CURRENT_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(LOAD_2_CURRENT_CHARS_UNCHECKED) {
- int pos = current + (insn >> BYTECODE_SHIFT);
- Char next = subject[pos + 1];
- current_char = (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
- pc += BC_LOAD_2_CURRENT_CHARS_UNCHECKED_LENGTH;
- break;
- }
- BYTECODE(LOAD_4_CURRENT_CHARS) {
- ASSERT(sizeof(Char) == 1);
- int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos + 4 > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- Char next1 = subject[pos + 1];
- Char next2 = subject[pos + 2];
- Char next3 = subject[pos + 3];
- current_char = (subject[pos] |
- (next1 << 8) |
- (next2 << 16) |
- (next3 << 24));
- pc += BC_LOAD_4_CURRENT_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(LOAD_4_CURRENT_CHARS_UNCHECKED) {
- ASSERT(sizeof(Char) == 1);
- int pos = current + (insn >> BYTECODE_SHIFT);
- Char next1 = subject[pos + 1];
- Char next2 = subject[pos + 2];
- Char next3 = subject[pos + 3];
- current_char = (subject[pos] |
- (next1 << 8) |
- (next2 << 16) |
- (next3 << 24));
- pc += BC_LOAD_4_CURRENT_CHARS_UNCHECKED_LENGTH;
- break;
- }
- BYTECODE(CHECK_4_CHARS) {
- uint32_t c = Load32Aligned(pc + 4);
- if (c == current_char) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_4_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- if (c == current_char) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_NOT_4_CHARS) {
- uint32_t c = Load32Aligned(pc + 4);
- if (c != current_char) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_NOT_4_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_NOT_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- if (c != current_char) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_NOT_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(AND_CHECK_4_CHARS) {
- uint32_t c = Load32Aligned(pc + 4);
- if (c == (current_char & Load32Aligned(pc + 8))) {
- pc = code_base + Load32Aligned(pc + 12);
- } else {
- pc += BC_AND_CHECK_4_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(AND_CHECK_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- if (c == (current_char & Load32Aligned(pc + 4))) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_AND_CHECK_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(AND_CHECK_NOT_4_CHARS) {
- uint32_t c = Load32Aligned(pc + 4);
- if (c != (current_char & Load32Aligned(pc + 8))) {
- pc = code_base + Load32Aligned(pc + 12);
- } else {
- pc += BC_AND_CHECK_NOT_4_CHARS_LENGTH;
- }
- break;
- }
- BYTECODE(AND_CHECK_NOT_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- if (c != (current_char & Load32Aligned(pc + 4))) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_AND_CHECK_NOT_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(MINUS_AND_CHECK_NOT_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
- uint32_t minus = Load16Aligned(pc + 4);
- uint32_t mask = Load16Aligned(pc + 6);
- if (c != ((current_char - minus) & mask)) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_MINUS_AND_CHECK_NOT_CHAR_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_CHAR_IN_RANGE) {
- uint32_t from = Load16Aligned(pc + 4);
- uint32_t to = Load16Aligned(pc + 6);
- if (from <= current_char && current_char <= to) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_CHAR_IN_RANGE_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_CHAR_NOT_IN_RANGE) {
- uint32_t from = Load16Aligned(pc + 4);
- uint32_t to = Load16Aligned(pc + 6);
- if (from > current_char || current_char > to) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_CHAR_NOT_IN_RANGE_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_BIT_IN_TABLE) {
- int mask = RegExpMacroAssembler::kTableMask;
- byte b = pc[8 + ((current_char & mask) >> kBitsPerByteLog2)];
- int bit = (current_char & (kBitsPerByte - 1));
- if ((b & (1 << bit)) != 0) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_BIT_IN_TABLE_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_LT) {
- uint32_t limit = (insn >> BYTECODE_SHIFT);
- if (current_char < limit) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_LT_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_GT) {
- uint32_t limit = (insn >> BYTECODE_SHIFT);
- if (current_char > limit) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_GT_LENGTH;
- }
- break;
- }
- BYTECODE(CHECK_REGISTER_LT)
- if (registers[insn >> BYTECODE_SHIFT] < Load32Aligned(pc + 4)) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_REGISTER_LT_LENGTH;
- }
- break;
- BYTECODE(CHECK_REGISTER_GE)
- if (registers[insn >> BYTECODE_SHIFT] >= Load32Aligned(pc + 4)) {
- pc = code_base + Load32Aligned(pc + 8);
- } else {
- pc += BC_CHECK_REGISTER_GE_LENGTH;
- }
- break;
- BYTECODE(CHECK_REGISTER_EQ_POS)
- if (registers[insn >> BYTECODE_SHIFT] == current) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_REGISTER_EQ_POS_LENGTH;
- }
- break;
- BYTECODE(CHECK_NOT_REGS_EQUAL)
- if (registers[insn >> BYTECODE_SHIFT] ==
- registers[Load32Aligned(pc + 4)]) {
- pc += BC_CHECK_NOT_REGS_EQUAL_LENGTH;
- } else {
- pc = code_base + Load32Aligned(pc + 8);
- }
- break;
- BYTECODE(CHECK_NOT_BACK_REF) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from < 0 || len <= 0) {
- pc += BC_CHECK_NOT_BACK_REF_LENGTH;
- break;
- }
- if (current + len > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- } else {
- int i;
- for (i = 0; i < len; i++) {
- if (subject[from + i] != subject[current + i]) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- }
- }
- if (i < len) break;
- current += len;
- }
- pc += BC_CHECK_NOT_BACK_REF_LENGTH;
- break;
- }
- BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from < 0 || len <= 0) {
- pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
- break;
- }
- if (current + len > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- } else {
- if (BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
- from, current, len, subject)) {
- current += len;
- pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
- } else {
- pc = code_base + Load32Aligned(pc + 4);
- }
- }
- break;
- }
- BYTECODE(CHECK_AT_START)
- if (current == 0) {
- pc = code_base + Load32Aligned(pc + 4);
- } else {
- pc += BC_CHECK_AT_START_LENGTH;
- }
- break;
- BYTECODE(CHECK_NOT_AT_START)
- if (current == 0) {
- pc += BC_CHECK_NOT_AT_START_LENGTH;
- } else {
- pc = code_base + Load32Aligned(pc + 4);
- }
- break;
- BYTECODE(SET_CURRENT_POSITION_FROM_END) {
- int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
- if (subject.length() - current > by) {
- current = subject.length() - by;
- current_char = subject[current - 1];
- }
- pc += BC_SET_CURRENT_POSITION_FROM_END_LENGTH;
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
- Isolate* isolate,
- Handle<ByteArray> code_array,
- Handle<String> subject,
- int* registers,
- int start_position) {
- ASSERT(subject->IsFlat());
-
- AssertNoAllocation a;
- const byte* code_base = code_array->GetDataStartAddress();
- uc16 previous_char = '\n';
- String::FlatContent subject_content = subject->GetFlatContent();
- if (subject_content.IsAscii()) {
- Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
- if (start_position != 0) previous_char = subject_vector[start_position - 1];
- return RawMatch(isolate,
- code_base,
- subject_vector,
- registers,
- start_position,
- previous_char);
- } else {
- ASSERT(subject_content.IsTwoByte());
- Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
- if (start_position != 0) previous_char = subject_vector[start_position - 1];
- return RawMatch(isolate,
- code_base,
- subject_vector,
- registers,
- start_position,
- previous_char);
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/interpreter-irregexp.h b/src/3rdparty/v8/src/interpreter-irregexp.h
deleted file mode 100644
index 0f45d98..0000000
--- a/src/3rdparty/v8/src/interpreter-irregexp.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A simple interpreter for the Irregexp byte code.
-
-#ifndef V8_INTERPRETER_IRREGEXP_H_
-#define V8_INTERPRETER_IRREGEXP_H_
-
-namespace v8 {
-namespace internal {
-
-
-class IrregexpInterpreter {
- public:
- static RegExpImpl::IrregexpResult Match(Isolate* isolate,
- Handle<ByteArray> code,
- Handle<String> subject,
- int* captures,
- int start_position);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_INTERPRETER_IRREGEXP_H_
diff --git a/src/3rdparty/v8/src/isolate-inl.h b/src/3rdparty/v8/src/isolate-inl.h
deleted file mode 100644
index 9fb16fb..0000000
--- a/src/3rdparty/v8/src/isolate-inl.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ISOLATE_INL_H_
-#define V8_ISOLATE_INL_H_
-
-#include "isolate.h"
-
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
- if (isolate->context() != NULL) {
- context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_ = Handle<Context>(isolate->context());
-#endif
- }
- isolate->set_save_context(this);
-
- c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
-}
-
-
-bool Isolate::IsDebuggerActive() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!NoBarrier_Load(&debugger_initialized_)) return false;
- return debugger()->IsDebuggerActive();
-#else
- return false;
-#endif
-}
-
-
-bool Isolate::DebuggerHasBreakPoints() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- return debug()->has_break_points();
-#else
- return false;
-#endif
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ISOLATE_INL_H_
diff --git a/src/3rdparty/v8/src/isolate.cc b/src/3rdparty/v8/src/isolate.cc
deleted file mode 100644
index eba1982..0000000
--- a/src/3rdparty/v8/src/isolate.cc
+++ /dev/null
@@ -1,2335 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "ast.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "heap-profiler.h"
-#include "hydrogen.h"
-#include "isolate.h"
-#include "lithium-allocator.h"
-#include "log.h"
-#include "marking-thread.h"
-#include "messages.h"
-#include "platform.h"
-#include "regexp-stack.h"
-#include "runtime-profiler.h"
-#include "scopeinfo.h"
-#include "serialize.h"
-#include "simulator.h"
-#include "spaces.h"
-#include "stub-cache.h"
-#include "sweeper-thread.h"
-#include "version.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-Atomic32 ThreadId::highest_thread_id_ = 0;
-
-int ThreadId::AllocateThreadId() {
- int new_id = NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
- return new_id;
-}
-
-
-int ThreadId::GetCurrentThreadId() {
- int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
- if (thread_id == 0) {
- thread_id = AllocateThreadId();
- Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
- }
- return thread_id;
-}
-
-
-ThreadLocalTop::ThreadLocalTop() {
- InitializeInternal();
- // This flag may be set using v8::V8::IgnoreOutOfMemoryException()
- // before an isolate is initialized. The initialize methods below do
- // not touch it to preserve its value.
- ignore_out_of_memory_ = false;
-}
-
-
-void ThreadLocalTop::InitializeInternal() {
- c_entry_fp_ = 0;
- handler_ = 0;
-#ifdef USE_SIMULATOR
- simulator_ = NULL;
-#endif
- js_entry_sp_ = NULL;
- external_callback_ = NULL;
- current_vm_state_ = EXTERNAL;
- try_catch_handler_address_ = NULL;
- context_ = NULL;
- thread_id_ = ThreadId::Invalid();
- external_caught_exception_ = false;
- failed_access_check_callback_ = NULL;
- user_object_comparison_callback_ = NULL;
- save_context_ = NULL;
- catcher_ = NULL;
- top_lookup_result_ = NULL;
-
- // These members are re-initialized later after deserialization
- // is complete.
- pending_exception_ = NULL;
- has_pending_message_ = false;
- pending_message_obj_ = NULL;
- pending_message_script_ = NULL;
- scheduled_exception_ = NULL;
-}
-
-
-void ThreadLocalTop::Initialize() {
- InitializeInternal();
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- simulator_ = Simulator::current(isolate_);
-#elif V8_TARGET_ARCH_MIPS
- simulator_ = Simulator::current(isolate_);
-#endif
-#endif
- thread_id_ = ThreadId::Current();
-}
-
-
-v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
- return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
-}
-
-
-int SystemThreadManager::NumberOfParallelSystemThreads(
- ParallelSystemComponent type) {
- int number_of_threads = Min(OS::NumberOfCores(), kMaxThreads);
- ASSERT(number_of_threads > 0);
- if (number_of_threads == 1) {
- return 0;
- }
- if (type == PARALLEL_SWEEPING) {
- return number_of_threads;
- } else if (type == CONCURRENT_SWEEPING) {
- return number_of_threads - 1;
- } else if (type == PARALLEL_MARKING) {
- return number_of_threads;
- }
- return 1;
-}
-
-
-// Create a dummy thread that will wait forever on a semaphore. The only
-// purpose for this thread is to have some stack area to save essential data
-// into for use by a stacks only core dump (aka minidump).
-class PreallocatedMemoryThread: public Thread {
- public:
- char* data() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return data_;
- }
-
- unsigned length() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return length_;
- }
-
- // Stop the PreallocatedMemoryThread and release its resources.
- void StopThread() {
- keep_running_ = false;
- wait_for_ever_semaphore_->Signal();
-
- // Wait for the thread to terminate.
- Join();
-
- if (data_ready_semaphore_ != NULL) {
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
-
- delete wait_for_ever_semaphore_;
- wait_for_ever_semaphore_ = NULL;
- }
-
- protected:
- // When the thread starts running it will allocate a fixed number of bytes
- // on the stack and publish the location of this memory for others to use.
- void Run() {
- EmbeddedVector<char, 15 * 1024> local_buffer;
-
- // Initialize the buffer with a known good value.
- OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
- local_buffer.length());
-
- // Publish the local buffer and signal its availability.
- data_ = local_buffer.start();
- length_ = local_buffer.length();
- data_ready_semaphore_->Signal();
-
- while (keep_running_) {
- // This thread will wait here until the end of time.
- wait_for_ever_semaphore_->Wait();
- }
-
- // Make sure we access the buffer after the wait to remove all possibility
- // of it being optimized away.
- OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
- local_buffer.length());
- }
-
-
- private:
- PreallocatedMemoryThread()
- : Thread("v8:PreallocMem"),
- keep_running_(true),
- wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
- data_ready_semaphore_(OS::CreateSemaphore(0)),
- data_(NULL),
- length_(0) {
- }
-
- // Used to make sure that the thread keeps looping even for spurious wakeups.
- bool keep_running_;
-
- // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
- Semaphore* wait_for_ever_semaphore_;
- // Semaphore to signal that the data has been initialized.
- Semaphore* data_ready_semaphore_;
-
- // Location and size of the preallocated memory block.
- char* data_;
- unsigned length_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
-};
-
-
-void Isolate::PreallocatedMemoryThreadStart() {
- if (preallocated_memory_thread_ != NULL) return;
- preallocated_memory_thread_ = new PreallocatedMemoryThread();
- preallocated_memory_thread_->Start();
-}
-
-
-void Isolate::PreallocatedMemoryThreadStop() {
- if (preallocated_memory_thread_ == NULL) return;
- preallocated_memory_thread_->StopThread();
- // Done with the thread entirely.
- delete preallocated_memory_thread_;
- preallocated_memory_thread_ = NULL;
-}
-
-
-void Isolate::PreallocatedStorageInit(size_t size) {
- ASSERT(free_list_.next_ == &free_list_);
- ASSERT(free_list_.previous_ == &free_list_);
- PreallocatedStorage* free_chunk =
- reinterpret_cast<PreallocatedStorage*>(new char[size]);
- free_list_.next_ = free_list_.previous_ = free_chunk;
- free_chunk->next_ = free_chunk->previous_ = &free_list_;
- free_chunk->size_ = size - sizeof(PreallocatedStorage);
- preallocated_storage_preallocated_ = true;
-}
-
-
-void* Isolate::PreallocatedStorageNew(size_t size) {
- if (!preallocated_storage_preallocated_) {
- return FreeStoreAllocationPolicy().New(size);
- }
- ASSERT(free_list_.next_ != &free_list_);
- ASSERT(free_list_.previous_ != &free_list_);
-
- size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
- // Search for exact fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ == size) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Search for first fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- PreallocatedStorage* left_over =
- reinterpret_cast<PreallocatedStorage*>(
- reinterpret_cast<char*>(storage + 1) + size);
- left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
- ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
- storage->size_);
- storage->size_ = size;
- left_over->LinkTo(&free_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Allocation failure.
- ASSERT(false);
- return NULL;
-}
-
-
-// We don't attempt to coalesce.
-void Isolate::PreallocatedStorageDelete(void* p) {
- if (p == NULL) {
- return;
- }
- if (!preallocated_storage_preallocated_) {
- FreeStoreAllocationPolicy::Delete(p);
- return;
- }
- PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
- ASSERT(storage->next_->previous_ == storage);
- ASSERT(storage->previous_->next_ == storage);
- storage->Unlink();
- storage->LinkTo(&free_list_);
-}
-
-Isolate* Isolate::default_isolate_ = NULL;
-Thread::LocalStorageKey Isolate::isolate_key_;
-Thread::LocalStorageKey Isolate::thread_id_key_;
-Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
-Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
-Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
-
-
-Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
- ThreadId thread_id) {
- ASSERT(!thread_id.Equals(ThreadId::Invalid()));
- PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
- {
- ScopedLock lock(process_wide_mutex_);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
- thread_data_table_->Insert(per_thread);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
- }
- return per_thread;
-}
-
-
-Isolate::PerIsolateThreadData*
- Isolate::FindOrAllocatePerThreadDataForThisThread() {
- ThreadId thread_id = ThreadId::Current();
- PerIsolateThreadData* per_thread = NULL;
- {
- ScopedLock lock(process_wide_mutex_);
- per_thread = thread_data_table_->Lookup(this, thread_id);
- if (per_thread == NULL) {
- per_thread = AllocatePerIsolateThreadData(thread_id);
- }
- }
- return per_thread;
-}
-
-
-Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
- ThreadId thread_id = ThreadId::Current();
- PerIsolateThreadData* per_thread = NULL;
- {
- ScopedLock lock(process_wide_mutex_);
- per_thread = thread_data_table_->Lookup(this, thread_id);
- }
- return per_thread;
-}
-
-
-void Isolate::EnsureDefaultIsolate() {
- ScopedLock lock(process_wide_mutex_);
- if (default_isolate_ == NULL) {
- isolate_key_ = Thread::CreateThreadLocalKey();
- thread_id_key_ = Thread::CreateThreadLocalKey();
- per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
- thread_data_table_ = new Isolate::ThreadDataTable();
- default_isolate_ = new Isolate();
- }
- // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
- // because a non-null thread data may be already set.
- if (Thread::GetThreadLocal(isolate_key_) == NULL) {
- Thread::SetThreadLocal(isolate_key_, default_isolate_);
- }
-}
-
-struct StaticInitializer {
- StaticInitializer() {
- Isolate::EnsureDefaultIsolate();
- }
-} static_initializer;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Debugger* Isolate::GetDefaultIsolateDebugger() {
- EnsureDefaultIsolate();
- return default_isolate_->debugger();
-}
-#endif
-
-
-StackGuard* Isolate::GetDefaultIsolateStackGuard() {
- EnsureDefaultIsolate();
- return default_isolate_->stack_guard();
-}
-
-
-void Isolate::EnterDefaultIsolate() {
- EnsureDefaultIsolate();
- ASSERT(default_isolate_ != NULL);
-
- PerIsolateThreadData* data = CurrentPerIsolateThreadData();
- // If not yet in default isolate - enter it.
- if (data == NULL || data->isolate() != default_isolate_) {
- default_isolate_->Enter();
- }
-}
-
-
-v8::Isolate* Isolate::GetDefaultIsolateForLocking() {
- EnsureDefaultIsolate();
- return reinterpret_cast<v8::Isolate*>(default_isolate_);
-}
-
-
-Address Isolate::get_address_from_id(Isolate::AddressId id) {
- return isolate_addresses_[id];
-}
-
-
-char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
- ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
- Iterate(v, thread);
- return thread_storage + sizeof(ThreadLocalTop);
-}
-
-
-void Isolate::IterateThread(ThreadVisitor* v, char* t) {
- ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
- v->VisitThread(this, thread);
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
- // Visit the roots from the top for a given thread.
- Object* pending;
- // The pending exception can sometimes be a failure. We can't show
- // that to the GC, which only understands objects.
- if (thread->pending_exception_->ToObject(&pending)) {
- v->VisitPointer(&pending);
- thread->pending_exception_ = pending; // In case GC updated it.
- }
- v->VisitPointer(&(thread->pending_message_obj_));
- v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
- v->VisitPointer(BitCast<Object**>(&(thread->context_)));
- Object* scheduled;
- if (thread->scheduled_exception_->ToObject(&scheduled)) {
- v->VisitPointer(&scheduled);
- thread->scheduled_exception_ = scheduled;
- }
-
- for (v8::TryCatch* block = thread->TryCatchHandler();
- block != NULL;
- block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
- v->VisitPointer(BitCast<Object**>(&(block->exception_)));
- v->VisitPointer(BitCast<Object**>(&(block->message_)));
- }
-
- // Iterate over pointers on native execution stack.
- for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
- it.frame()->Iterate(v);
- }
-
- // Iterate pointers in live lookup results.
- thread->top_lookup_result_->Iterate(v);
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v) {
- ThreadLocalTop* current_t = thread_local_top();
- Iterate(v, current_t);
-}
-
-void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
- for (DeferredHandles* deferred = deferred_handles_head_;
- deferred != NULL;
- deferred = deferred->next_) {
- deferred->Iterate(visitor);
- }
-}
-
-
-void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
- // The ARM simulator has a separate JS stack. We therefore register
- // the C++ try catch handler with the simulator and get back an
- // address that can be used for comparisons with addresses into the
- // JS stack. When running without the simulator, the address
- // returned will be the address of the C++ try catch handler itself.
- Address address = reinterpret_cast<Address>(
- SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
- thread_local_top()->set_try_catch_handler_address(address);
-}
-
-
-void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
- ASSERT(thread_local_top()->TryCatchHandler() == that);
- thread_local_top()->set_try_catch_handler_address(
- reinterpret_cast<Address>(that->next_));
- thread_local_top()->catcher_ = NULL;
- SimulatorStack::UnregisterCTryCatch();
-}
-
-
-Handle<String> Isolate::StackTraceString() {
- if (stack_trace_nesting_level_ == 0) {
- stack_trace_nesting_level_++;
- HeapStringAllocator allocator;
- StringStream::ClearMentionedObjectCache();
- StringStream accumulator(&allocator);
- incomplete_message_ = &accumulator;
- PrintStack(&accumulator);
- Handle<String> stack_trace = accumulator.ToString();
- incomplete_message_ = NULL;
- stack_trace_nesting_level_ = 0;
- return stack_trace;
- } else if (stack_trace_nesting_level_ == 1) {
- stack_trace_nesting_level_++;
- OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message_->OutputToStdOut();
- return factory()->empty_string();
- } else {
- OS::Abort();
- // Unreachable
- return factory()->empty_string();
- }
-}
-
-
-void Isolate::PushStackTraceAndDie(unsigned int magic,
- Object* object,
- Map* map,
- unsigned int magic2) {
- const int kMaxStackTraceSize = 8192;
- Handle<String> trace = StackTraceString();
- uint8_t buffer[kMaxStackTraceSize];
- int length = Min(kMaxStackTraceSize - 1, trace->length());
- String::WriteToFlat(*trace, buffer, 0, length);
- buffer[length] = '\0';
- // TODO(dcarney): convert buffer to utf8?
- OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n",
- magic, magic2,
- static_cast<void*>(object), static_cast<void*>(map),
- reinterpret_cast<char*>(buffer));
- OS::Abort();
-}
-
-
-// Determines whether the given stack frame should be displayed in
-// a stack trace. The caller is the error constructor that asked
-// for the stack trace to be collected. The first time a construct
-// call to this function is encountered it is skipped. The seen_caller
-// in/out parameter is used to remember if the caller has been seen
-// yet.
-static bool IsVisibleInStackTrace(StackFrame* raw_frame,
- Object* caller,
- bool* seen_caller) {
- // Only display JS frames.
- if (!raw_frame->is_java_script()) return false;
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- Object* raw_fun = frame->function();
- // Not sure when this can happen but skip it just in case.
- if (!raw_fun->IsJSFunction()) return false;
- if ((raw_fun == caller) && !(*seen_caller)) {
- *seen_caller = true;
- return false;
- }
- // Skip all frames until we've seen the caller.
- if (!(*seen_caller)) return false;
- // Also, skip non-visible built-in functions and any call with the builtins
- // object as receiver, so as to not reveal either the builtins object or
- // an internal function.
- // The --builtins-in-stack-traces command line flag allows including
- // internal call sites in the stack trace for debugging purposes.
- if (!FLAG_builtins_in_stack_traces) {
- JSFunction* fun = JSFunction::cast(raw_fun);
- if (frame->receiver()->IsJSBuiltinsObject() ||
- (fun->IsBuiltin() && !fun->shared()->native())) {
- return false;
- }
- }
- return true;
-}
-
-
-Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
- Handle<Object> caller,
- int limit) {
- limit = Max(limit, 0); // Ensure that limit is not negative.
- int initial_size = Min(limit, 10);
- Handle<FixedArray> elements =
- factory()->NewFixedArrayWithHoles(initial_size * 4);
-
- // If the caller parameter is a function we skip frames until we're
- // under it before starting to collect.
- bool seen_caller = !caller->IsJSFunction();
- int cursor = 0;
- int frames_seen = 0;
- for (StackFrameIterator iter(this);
- !iter.done() && frames_seen < limit;
- iter.Advance()) {
- StackFrame* raw_frame = iter.frame();
- if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) {
- frames_seen++;
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0; i--) {
- if (cursor + 4 > elements->length()) {
- int new_capacity = JSObject::NewElementsCapacity(elements->length());
- Handle<FixedArray> new_elements =
- factory()->NewFixedArrayWithHoles(new_capacity);
- for (int i = 0; i < cursor; i++) {
- new_elements->set(i, elements->get(i));
- }
- elements = new_elements;
- }
- ASSERT(cursor + 4 <= elements->length());
-
- Handle<Object> recv = frames[i].receiver();
- Handle<JSFunction> fun = frames[i].function();
- Handle<Code> code = frames[i].code();
- Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
- elements->set(cursor++, *recv);
- elements->set(cursor++, *fun);
- elements->set(cursor++, *code);
- elements->set(cursor++, *offset);
- }
- }
- }
- Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
- result->set_length(Smi::FromInt(cursor));
- return result;
-}
-
-
-void Isolate::CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object) {
- if (capture_stack_trace_for_uncaught_exceptions_) {
- // Capture stack trace for a detailed exception message.
- Handle<String> key = factory()->hidden_stack_trace_string();
- Handle<JSArray> stack_trace = CaptureCurrentStackTrace(
- stack_trace_for_uncaught_exceptions_frame_limit_,
- stack_trace_for_uncaught_exceptions_options_);
- JSObject::SetHiddenProperty(error_object, key, stack_trace);
- }
-}
-
-
-Handle<JSArray> Isolate::CaptureCurrentStackTrace(
- int frame_limit, StackTrace::StackTraceOptions options) {
- // Ensure no negative values.
- int limit = Max(frame_limit, 0);
- Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
-
- Handle<String> column_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column"));
- Handle<String> line_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber"));
- Handle<String> script_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName"));
- Handle<String> script_name_or_source_url_key =
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("scriptNameOrSourceURL"));
- Handle<String> function_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("functionName"));
- Handle<String> eval_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isEval"));
- Handle<String> constructor_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isConstructor"));
-
- StackTraceFrameIterator it(this);
- int frames_seen = 0;
- while (!it.done() && (frames_seen < limit)) {
- JavaScriptFrame* frame = it.frame();
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
- // Create a JSObject to hold the information for the StackFrame.
- Handle<JSObject> stack_frame = factory()->NewJSObject(object_function());
-
- Handle<JSFunction> fun = frames[i].function();
- Handle<Script> script(Script::cast(fun->shared()->script()));
-
- if (options & StackTrace::kLineNumber) {
- int script_line_offset = script->line_offset()->value();
- int position = frames[i].code()->SourcePosition(frames[i].pc());
- int line_number = GetScriptLineNumber(script, position);
- // line_number is already shifted by the script_line_offset.
- int relative_line_number = line_number - script_line_offset;
- if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- int start = (relative_line_number == 0) ? 0 :
- Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
- int column_offset = position - start;
- if (relative_line_number == 0) {
- // For the case where the code is on the same line as the script
- // tag.
- column_offset += script->column_offset()->value();
- }
- CHECK_NOT_EMPTY_HANDLE(
- this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, column_key,
- Handle<Smi>(Smi::FromInt(column_offset + 1), this), NONE));
- }
- CHECK_NOT_EMPTY_HANDLE(
- this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, line_key,
- Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE));
- }
-
- if (options & StackTrace::kScriptName) {
- Handle<Object> script_name(script->name(), this);
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, script_key, script_name, NONE));
- }
-
- if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> result = GetScriptNameOrSourceURL(script);
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, script_name_or_source_url_key,
- result, NONE));
- }
-
- if (options & StackTrace::kFunctionName) {
- Handle<Object> fun_name(fun->shared()->name(), this);
- if (fun_name->ToBoolean()->IsFalse()) {
- fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
- }
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, function_key, fun_name, NONE));
- }
-
- if (options & StackTrace::kIsEval) {
- int type = Smi::cast(script->compilation_type())->value();
- Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
- factory()->true_value() : factory()->false_value();
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, eval_key, is_eval, NONE));
- }
-
- if (options & StackTrace::kIsConstructor) {
- Handle<Object> is_constructor = (frames[i].is_constructor()) ?
- factory()->true_value() : factory()->false_value();
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, constructor_key,
- is_constructor, NONE));
- }
-
- FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
- frames_seen++;
- }
- it.Advance();
- }
-
- stack_trace->set_length(Smi::FromInt(frames_seen));
- return stack_trace;
-}
-
-
-void Isolate::PrintStack() {
- if (stack_trace_nesting_level_ == 0) {
- stack_trace_nesting_level_++;
-
- StringAllocator* allocator;
- if (preallocated_message_space_ == NULL) {
- allocator = new HeapStringAllocator();
- } else {
- allocator = preallocated_message_space_;
- }
-
- StringStream::ClearMentionedObjectCache();
- StringStream accumulator(allocator);
- incomplete_message_ = &accumulator;
- PrintStack(&accumulator);
- accumulator.OutputToStdOut();
- InitializeLoggingAndCounters();
- accumulator.Log();
- incomplete_message_ = NULL;
- stack_trace_nesting_level_ = 0;
- if (preallocated_message_space_ == NULL) {
- // Remove the HeapStringAllocator created above.
- delete allocator;
- }
- } else if (stack_trace_nesting_level_ == 1) {
- stack_trace_nesting_level_++;
- OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message_->OutputToStdOut();
- }
-}
-
-
-static void PrintFrames(Isolate* isolate,
- StringStream* accumulator,
- StackFrame::PrintMode mode) {
- StackFrameIterator it(isolate);
- for (int i = 0; !it.done(); it.Advance()) {
- it.frame()->Print(accumulator, mode, i++);
- }
-}
-
-
-void Isolate::PrintStack(StringStream* accumulator) {
- if (!IsInitialized()) {
- accumulator->Add(
- "\n==== JS stack trace is not available =======================\n\n");
- accumulator->Add(
- "\n==== Isolate for the thread is not initialized =============\n\n");
- return;
- }
- // The MentionedObjectCache is not GC-proof at the moment.
- AssertNoAllocation nogc;
- ASSERT(StringStream::IsMentionedObjectCacheClear());
-
- // Avoid printing anything if there are no frames.
- if (c_entry_fp(thread_local_top()) == 0) return;
-
- accumulator->Add(
- "\n==== JS stack trace =========================================\n\n");
- PrintFrames(this, accumulator, StackFrame::OVERVIEW);
-
- accumulator->Add(
- "\n==== Details ================================================\n\n");
- PrintFrames(this, accumulator, StackFrame::DETAILS);
-
- accumulator->PrintMentionedObjectCache();
- accumulator->Add("=====================\n\n");
-}
-
-
-void Isolate::SetFailedAccessCheckCallback(
- v8::FailedAccessCheckCallback callback) {
- thread_local_top()->failed_access_check_callback_ = callback;
-}
-
-
-void Isolate::SetUserObjectComparisonCallback(
- v8::UserObjectComparisonCallback callback) {
- thread_local_top()->user_object_comparison_callback_ = callback;
-}
-
-
-void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
- if (!thread_local_top()->failed_access_check_callback_) return;
-
- ASSERT(receiver->IsAccessCheckNeeded());
- ASSERT(context());
-
- // Get the data object from access check info.
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return;
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return;
-
- HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- { VMState state(this, EXTERNAL);
- thread_local_top()->failed_access_check_callback_(
- v8::Utils::ToLocal(receiver_handle),
- type,
- v8::Utils::ToLocal(data));
- }
-}
-
-
-enum MayAccessDecision {
- YES, NO, UNKNOWN
-};
-
-
-static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
- JSObject* receiver,
- v8::AccessType type) {
- // During bootstrapping, callback functions are not enabled yet.
- if (isolate->bootstrapper()->IsActive()) return YES;
-
- if (receiver->IsJSGlobalProxy()) {
- Object* receiver_context = JSGlobalProxy::cast(receiver)->native_context();
- if (!receiver_context->IsContext()) return NO;
-
- // Get the native context of current top context.
- // avoid using Isolate::native_context() because it uses Handle.
- Context* native_context =
- isolate->context()->global_object()->native_context();
- if (receiver_context == native_context) return YES;
-
- if (Context::cast(receiver_context)->security_token() ==
- native_context->security_token())
- return YES;
- }
-
- return UNKNOWN;
-}
-
-
-bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
- v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
-
- // The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
-
- // Skip checks for hidden properties access. Note, we do not
- // require existence of a context in this case.
- if (key == heap_.hidden_string()) return true;
-
- // Check for compatibility between the security tokens in the
- // current lexical context and the accessed object.
- ASSERT(context());
-
- MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
- if (decision != UNKNOWN) return decision == YES;
-
- // Get named access check callback
- // TODO(dcarney): revert
- Map* map = receiver->map();
- CHECK(map->IsMap());
- CHECK(map->constructor()->IsJSFunction());
- JSFunction* constructor = JSFunction::cast(map->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
- v8::NamedSecurityCallback callback =
- v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
- HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver, this);
- Handle<Object> key_handle(key, this);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- LOG(this, ApiNamedSecurityCheck(key));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState state(this, EXTERNAL);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(key_handle),
- type,
- v8::Utils::ToLocal(data));
- }
- return result;
-}
-
-
-bool Isolate::MayIndexedAccess(JSObject* receiver,
- uint32_t index,
- v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
- // Check for compatibility between the security tokens in the
- // current lexical context and the accessed object.
- ASSERT(context());
-
- MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
- if (decision != UNKNOWN) return decision == YES;
-
- // Get indexed access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
- v8::IndexedSecurityCallback callback =
- v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
- HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver, this);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- LOG(this, ApiIndexedSecurityCheck(index));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState state(this, EXTERNAL);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- index,
- type,
- v8::Utils::ToLocal(data));
- }
- return result;
-}
-
-
-const char* const Isolate::kStackOverflowMessage =
- "Uncaught RangeError: Maximum call stack size exceeded";
-
-
-Failure* Isolate::StackOverflow() {
- HandleScope scope(this);
- // At this point we cannot create an Error object using its javascript
- // constructor. Instead, we copy the pre-constructed boilerplate and
- // attach the stack trace as a hidden property.
- Handle<String> key = factory()->stack_overflow_string();
- Handle<JSObject> boilerplate =
- Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
- Handle<JSObject> exception = Copy(boilerplate);
- DoThrow(*exception, NULL);
-
- // Get stack trace limit.
- Handle<Object> error = GetProperty(js_builtins_object(), "$Error");
- if (!error->IsJSObject()) return Failure::Exception();
- Handle<Object> stack_trace_limit =
- GetProperty(Handle<JSObject>::cast(error), "stackTraceLimit");
- if (!stack_trace_limit->IsNumber()) return Failure::Exception();
- int limit = static_cast<int>(stack_trace_limit->Number());
-
- Handle<JSArray> stack_trace = CaptureSimpleStackTrace(
- exception, factory()->undefined_value(), limit);
- JSObject::SetHiddenProperty(exception,
- factory()->hidden_stack_trace_string(),
- stack_trace);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::TerminateExecution() {
- DoThrow(heap_.termination_exception(), NULL);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
- DoThrow(exception, location);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::ReThrow(MaybeObject* exception) {
- bool can_be_caught_externally = false;
- bool catchable_by_javascript = is_catchable_by_javascript(exception);
- ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
-
- thread_local_top()->catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
-
- // Set the exception being re-thrown.
- set_pending_exception(exception);
- if (exception->IsFailure()) return exception->ToFailureUnchecked();
- return Failure::Exception();
-}
-
-
-Failure* Isolate::ThrowIllegalOperation() {
- return Throw(heap_.illegal_access_string());
-}
-
-
-void Isolate::ScheduleThrow(Object* exception) {
- // When scheduling a throw we first throw the exception to get the
- // error reporting if it is uncaught before rescheduling it.
- Throw(exception);
- PropagatePendingExceptionToExternalTryCatch();
- if (has_pending_exception()) {
- thread_local_top()->scheduled_exception_ = pending_exception();
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- }
-}
-
-
-Failure* Isolate::PromoteScheduledException() {
- MaybeObject* thrown = scheduled_exception();
- clear_scheduled_exception();
- // Re-throw the exception to avoid getting repeated error reporting.
- return ReThrow(thrown);
-}
-
-
-void Isolate::PrintCurrentStackTrace(FILE* out) {
- StackTraceFrameIterator it(this);
- while (!it.done()) {
- HandleScope scope(this);
- // Find code position if recorded in relocation info.
- JavaScriptFrame* frame = it.frame();
- int pos = frame->LookupCode()->SourcePosition(frame->pc());
- Handle<Object> pos_obj(Smi::FromInt(pos), this);
- // Fetch function and receiver.
- Handle<JSFunction> fun(JSFunction::cast(frame->function()));
- Handle<Object> recv(frame->receiver(), this);
- // Advance to the next JavaScript frame and determine if the
- // current frame is the top-level frame.
- it.Advance();
- Handle<Object> is_top_level = it.done()
- ? factory()->true_value()
- : factory()->false_value();
- // Generate and print stack trace line.
- Handle<String> line =
- Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
- if (line->length() > 0) {
- line->PrintOn(out);
- fprintf(out, "\n");
- }
- }
-}
-
-
-void Isolate::ComputeLocation(MessageLocation* target) {
- *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
- StackTraceFrameIterator it(this);
- if (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* fun = JSFunction::cast(frame->function());
- Object* script = fun->shared()->script();
- if (script->IsScript() &&
- !(Script::cast(script)->source()->IsUndefined())) {
- int pos = frame->LookupCode()->SourcePosition(frame->pc());
- // Compute the location from the function and the reloc info.
- Handle<Script> casted_script(Script::cast(script));
- *target = MessageLocation(casted_script, pos, pos + 1);
- }
- }
-}
-
-
-bool Isolate::ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript) {
- // Find the top-most try-catch handler.
- StackHandler* handler =
- StackHandler::FromAddress(Isolate::handler(thread_local_top()));
- while (handler != NULL && !handler->is_catch()) {
- handler = handler->next();
- }
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-catch
- // handler.
- *can_be_caught_externally = external_handler_address != NULL &&
- (handler == NULL || handler->address() > external_handler_address ||
- !catchable_by_javascript);
-
- if (*can_be_caught_externally) {
- // Only report the exception if the external handler is verbose.
- return try_catch_handler()->is_verbose_;
- } else {
- // Report the exception if it isn't caught by JavaScript code.
- return handler == NULL;
- }
-}
-
-
-bool Isolate::IsErrorObject(Handle<Object> obj) {
- if (!obj->IsJSObject()) return false;
-
- String* error_key =
- *(factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error")));
- Object* error_constructor =
- js_builtins_object()->GetPropertyNoExceptionThrown(error_key);
-
- for (Object* prototype = *obj; !prototype->IsNull();
- prototype = prototype->GetPrototype(this)) {
- if (!prototype->IsJSObject()) return false;
- if (JSObject::cast(prototype)->map()->constructor() == error_constructor) {
- return true;
- }
- }
- return false;
-}
-
-
-void Isolate::DoThrow(Object* exception, MessageLocation* location) {
- ASSERT(!has_pending_exception());
-
- HandleScope scope(this);
- Handle<Object> exception_handle(exception, this);
-
- // Determine reporting and whether the exception is caught externally.
- bool catchable_by_javascript = is_catchable_by_javascript(exception);
- bool can_be_caught_externally = false;
- bool should_report_exception =
- ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
- bool report_exception = catchable_by_javascript && should_report_exception;
- bool try_catch_needs_message =
- can_be_caught_externally && try_catch_handler()->capture_message_;
- bool bootstrapping = bootstrapper()->IsActive();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger of exception.
- if (catchable_by_javascript) {
- debugger_->OnException(exception_handle, report_exception);
- }
-#endif
-
- // Generate the message if required.
- if (report_exception || try_catch_needs_message) {
- MessageLocation potential_computed_location;
- if (location == NULL) {
- // If no location was specified we use a computed one instead.
- ComputeLocation(&potential_computed_location);
- location = &potential_computed_location;
- }
- // It's not safe to try to make message objects or collect stack traces
- // while the bootstrapper is active since the infrastructure may not have
- // been properly initialized.
- if (!bootstrapping) {
- Handle<String> stack_trace;
- if (FLAG_trace_exception) stack_trace = StackTraceString();
- Handle<JSArray> stack_trace_object;
- if (capture_stack_trace_for_uncaught_exceptions_) {
- if (IsErrorObject(exception_handle)) {
- // We fetch the stack trace that corresponds to this error object.
- String* key = heap()->hidden_stack_trace_string();
- Object* stack_property =
- JSObject::cast(*exception_handle)->GetHiddenProperty(key);
- // Property lookup may have failed. In this case it's probably not
- // a valid Error object.
- if (stack_property->IsJSArray()) {
- stack_trace_object = Handle<JSArray>(JSArray::cast(stack_property));
- }
- }
- if (stack_trace_object.is_null()) {
- // Not an error object, we capture at throw site.
- stack_trace_object = CaptureCurrentStackTrace(
- stack_trace_for_uncaught_exceptions_frame_limit_,
- stack_trace_for_uncaught_exceptions_options_);
- }
- }
-
- Handle<Object> exception_arg = exception_handle;
- // If the exception argument is a custom object, turn it into a string
- // before throwing as uncaught exception. Note that the pending
- // exception object to be set later must not be turned into a string.
- if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) {
- bool failed = false;
- exception_arg = Execution::ToDetailString(exception_arg, &failed);
- if (failed) {
- exception_arg = factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("exception"));
- }
- }
- Handle<Object> message_obj = MessageHandler::MakeMessageObject(
- "uncaught_exception",
- location,
- HandleVector<Object>(&exception_arg, 1),
- stack_trace,
- stack_trace_object);
- thread_local_top()->pending_message_obj_ = *message_obj;
- if (location != NULL) {
- thread_local_top()->pending_message_script_ = *location->script();
- thread_local_top()->pending_message_start_pos_ = location->start_pos();
- thread_local_top()->pending_message_end_pos_ = location->end_pos();
- }
- } else if (location != NULL && !location->script().is_null()) {
- // We are bootstrapping and caught an error where the location is set
- // and we have a script for the location.
- // In this case we could have an extension (or an internal error
- // somewhere) and we print out the line number at which the error occured
- // to the console for easier debugging.
- int line_number = GetScriptLineNumberSafe(location->script(),
- location->start_pos());
- if (exception->IsString()) {
- OS::PrintError(
- "Extension or internal compilation error: %s in %s at line %d.\n",
- *String::cast(exception)->ToCString(),
- *String::cast(location->script()->name())->ToCString(),
- line_number + 1);
- } else {
- OS::PrintError(
- "Extension or internal compilation error in %s at line %d.\n",
- *String::cast(location->script()->name())->ToCString(),
- line_number + 1);
- }
- }
- }
-
- // Save the message for reporting if the the exception remains uncaught.
- thread_local_top()->has_pending_message_ = report_exception;
-
- // Do not forget to clean catcher_ if currently thrown exception cannot
- // be caught. If necessary, ReThrow will update the catcher.
- thread_local_top()->catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
-
- set_pending_exception(*exception_handle);
-}
-
-
-bool Isolate::IsExternallyCaught() {
- ASSERT(has_pending_exception());
-
- if ((thread_local_top()->catcher_ == NULL) ||
- (try_catch_handler() != thread_local_top()->catcher_)) {
- // When throwing the exception, we found no v8::TryCatch
- // which should care about this exception.
- return false;
- }
-
- if (!is_catchable_by_javascript(pending_exception())) {
- return true;
- }
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- ASSERT(external_handler_address != NULL);
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-finally
- // handler.
- // There should be no try-catch blocks as they would prohibit us from
- // finding external catcher in the first place (see catcher_ check above).
- //
- // Note, that finally clause would rethrow an exception unless it's
- // aborted by jumps in control flow like return, break, etc. and we'll
- // have another chances to set proper v8::TryCatch.
- StackHandler* handler =
- StackHandler::FromAddress(Isolate::handler(thread_local_top()));
- while (handler != NULL && handler->address() < external_handler_address) {
- ASSERT(!handler->is_catch());
- if (handler->is_finally()) return false;
-
- handler = handler->next();
- }
-
- return true;
-}
-
-
-void Isolate::ReportPendingMessages() {
- ASSERT(has_pending_exception());
- PropagatePendingExceptionToExternalTryCatch();
-
- // If the pending exception is OutOfMemoryException set out_of_memory in
- // the native context. Note: We have to mark the native context here
- // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
- // set it.
- HandleScope scope(this);
- if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
- context()->mark_out_of_memory();
- } else if (thread_local_top_.pending_exception_ ==
- heap()->termination_exception()) {
- // Do nothing: if needed, the exception has been already propagated to
- // v8::TryCatch.
- } else {
- if (thread_local_top_.has_pending_message_) {
- thread_local_top_.has_pending_message_ = false;
- if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
- HandleScope scope(this);
- Handle<Object> message_obj(thread_local_top_.pending_message_obj_,
- this);
- if (thread_local_top_.pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_top_.pending_message_script_);
- int start_pos = thread_local_top_.pending_message_start_pos_;
- int end_pos = thread_local_top_.pending_message_end_pos_;
- MessageLocation location(script, start_pos, end_pos);
- MessageHandler::ReportMessage(this, &location, message_obj);
- } else {
- MessageHandler::ReportMessage(this, NULL, message_obj);
- }
- }
- }
- }
- clear_pending_message();
-}
-
-
-MessageLocation Isolate::GetMessageLocation() {
- ASSERT(has_pending_exception());
-
- if (!thread_local_top_.pending_exception_->IsOutOfMemory() &&
- thread_local_top_.pending_exception_ != heap()->termination_exception() &&
- thread_local_top_.has_pending_message_ &&
- !thread_local_top_.pending_message_obj_->IsTheHole() &&
- thread_local_top_.pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_top_.pending_message_script_);
- int start_pos = thread_local_top_.pending_message_start_pos_;
- int end_pos = thread_local_top_.pending_message_end_pos_;
- return MessageLocation(script, start_pos, end_pos);
- }
-
- return MessageLocation();
-}
-
-
-void Isolate::TraceException(bool flag) {
- FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
-}
-
-
-bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
- ASSERT(has_pending_exception());
- PropagatePendingExceptionToExternalTryCatch();
-
- // Always reschedule out of memory exceptions.
- if (!is_out_of_memory()) {
- bool is_termination_exception =
- pending_exception() == heap_.termination_exception();
-
- // Do not reschedule the exception if this is the bottom call.
- bool clear_exception = is_bottom_call;
-
- if (is_termination_exception) {
- if (is_bottom_call) {
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- } else if (thread_local_top()->external_caught_exception_) {
- // If the exception is externally caught, clear it if there are no
- // JavaScript frames on the way to the C++ frame that has the
- // external handler.
- ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- JavaScriptFrameIterator it(this);
- if (it.done() || (it.frame()->sp() > external_handler_address)) {
- clear_exception = true;
- }
- }
-
- // Clear the exception if needed.
- if (clear_exception) {
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- }
-
- // Reschedule the exception.
- thread_local_top()->scheduled_exception_ = pending_exception();
- clear_pending_exception();
- return true;
-}
-
-
-void Isolate::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
- capture_stack_trace_for_uncaught_exceptions_ = capture;
- stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
- stack_trace_for_uncaught_exceptions_options_ = options;
-}
-
-
-bool Isolate::is_out_of_memory() {
- if (has_pending_exception()) {
- MaybeObject* e = pending_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- if (has_scheduled_exception()) {
- MaybeObject* e = scheduled_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- return false;
-}
-
-
-Handle<Context> Isolate::native_context() {
- GlobalObject* global = thread_local_top()->context_->global_object();
- return Handle<Context>(global->native_context());
-}
-
-
-Handle<Context> Isolate::global_context() {
- GlobalObject* global = thread_local_top()->context_->global_object();
- return Handle<Context>(global->global_context());
-}
-
-
-Handle<Context> Isolate::GetCallingNativeContext() {
- JavaScriptFrameIterator it(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (debug_->InDebugger()) {
- while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- if (context->native_context() == *debug_->debug_context()) {
- it.Advance();
- } else {
- break;
- }
- }
- }
-#endif // ENABLE_DEBUGGER_SUPPORT
- if (it.done()) return Handle<Context>::null();
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- return Handle<Context>(context->native_context());
-}
-
-
-char* Isolate::ArchiveThread(char* to) {
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateExitedJS(this);
- }
- memcpy(to, reinterpret_cast<char*>(thread_local_top()),
- sizeof(ThreadLocalTop));
- InitializeThreadLocal();
- clear_pending_exception();
- clear_pending_message();
- clear_scheduled_exception();
- return to + sizeof(ThreadLocalTop);
-}
-
-
-char* Isolate::RestoreThread(char* from) {
- memcpy(reinterpret_cast<char*>(thread_local_top()), from,
- sizeof(ThreadLocalTop));
- // This might be just paranoia, but it seems to be needed in case a
- // thread_local_top_ is restored on a separate OS thread.
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- thread_local_top()->simulator_ = Simulator::current(this);
-#elif V8_TARGET_ARCH_MIPS
- thread_local_top()->simulator_ = Simulator::current(this);
-#endif
-#endif
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateEnteredJS(this);
- }
- ASSERT(context() == NULL || context()->IsContext());
- return from + sizeof(ThreadLocalTop);
-}
-
-
-Isolate::ThreadDataTable::ThreadDataTable()
- : list_(NULL) {
-}
-
-
-Isolate::ThreadDataTable::~ThreadDataTable() {
- // TODO(svenpanne) The assertion below would fire if an embedder does not
- // cleanly dispose all Isolates before disposing v8, so we are conservative
- // and leave it out for now.
- // ASSERT_EQ(NULL, list_);
-}
-
-
-Isolate::PerIsolateThreadData*
- Isolate::ThreadDataTable::Lookup(Isolate* isolate,
- ThreadId thread_id) {
- for (PerIsolateThreadData* data = list_; data != NULL; data = data->next_) {
- if (data->Matches(isolate, thread_id)) return data;
- }
- return NULL;
-}
-
-
-void Isolate::ThreadDataTable::Insert(Isolate::PerIsolateThreadData* data) {
- if (list_ != NULL) list_->prev_ = data;
- data->next_ = list_;
- list_ = data;
-}
-
-
-void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
- if (list_ == data) list_ = data->next_;
- if (data->next_ != NULL) data->next_->prev_ = data->prev_;
- if (data->prev_ != NULL) data->prev_->next_ = data->next_;
- delete data;
-}
-
-
-void Isolate::ThreadDataTable::Remove(Isolate* isolate,
- ThreadId thread_id) {
- PerIsolateThreadData* data = Lookup(isolate, thread_id);
- if (data != NULL) {
- Remove(data);
- }
-}
-
-
-void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
- PerIsolateThreadData* data = list_;
- while (data != NULL) {
- PerIsolateThreadData* next = data->next_;
- if (data->isolate() == isolate) Remove(data);
- data = next;
- }
-}
-
-
-#ifdef DEBUG
-#define TRACE_ISOLATE(tag) \
- do { \
- if (FLAG_trace_isolates) { \
- PrintF("Isolate %p " #tag "\n", reinterpret_cast<void*>(this)); \
- } \
- } while (false)
-#else
-#define TRACE_ISOLATE(tag)
-#endif
-
-
-Isolate::Isolate()
- : state_(UNINITIALIZED),
- embedder_data_(NULL),
- entry_stack_(NULL),
- stack_trace_nesting_level_(0),
- incomplete_message_(NULL),
- preallocated_memory_thread_(NULL),
- preallocated_message_space_(NULL),
- bootstrapper_(NULL),
- runtime_profiler_(NULL),
- compilation_cache_(NULL),
- counters_(NULL),
- code_range_(NULL),
- // Must be initialized early to allow v8::SetResourceConstraints calls.
- break_access_(OS::CreateMutex()),
- debugger_initialized_(false),
- // Must be initialized early to allow v8::Debug calls.
- debugger_access_(OS::CreateMutex()),
- logger_(NULL),
- stats_table_(NULL),
- stub_cache_(NULL),
- deoptimizer_data_(NULL),
- capture_stack_trace_for_uncaught_exceptions_(false),
- stack_trace_for_uncaught_exceptions_frame_limit_(0),
- stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
- transcendental_cache_(NULL),
- memory_allocator_(NULL),
- keyed_lookup_cache_(NULL),
- context_slot_cache_(NULL),
- descriptor_lookup_cache_(NULL),
- handle_scope_implementer_(NULL),
- unicode_cache_(NULL),
- runtime_zone_(this),
- in_use_list_(0),
- free_list_(0),
- preallocated_storage_preallocated_(false),
- inner_pointer_to_code_cache_(NULL),
- write_iterator_(NULL),
- global_handles_(NULL),
- context_switcher_(NULL),
- thread_manager_(NULL),
- fp_stubs_generated_(false),
- has_installed_extensions_(false),
- string_tracker_(NULL),
- regexp_stack_(NULL),
- date_cache_(NULL),
- code_stub_interface_descriptors_(NULL),
- context_exit_happened_(false),
- deferred_handles_head_(NULL),
- optimizing_compiler_thread_(this),
- marking_thread_(NULL),
- sweeper_thread_(NULL) {
- TRACE_ISOLATE(constructor);
-
- memset(isolate_addresses_, 0,
- sizeof(isolate_addresses_[0]) * (kIsolateAddressCount + 1));
-
- heap_.isolate_ = this;
- stack_guard_.isolate_ = this;
-
- // ThreadManager is initialized early to support locking an isolate
- // before it is entered.
- thread_manager_ = new ThreadManager();
- thread_manager_->isolate_ = this;
-
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
- simulator_initialized_ = false;
- simulator_i_cache_ = NULL;
- simulator_redirection_ = NULL;
-#endif
-
-#ifdef DEBUG
- // heap_histograms_ initializes itself.
- memset(&js_spill_information_, 0, sizeof(js_spill_information_));
- memset(code_kind_statistics_, 0,
- sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
-
- allow_handle_deref_ = true;
-#endif
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_ = NULL;
- debugger_ = NULL;
-#endif
-
- handle_scope_data_.Initialize();
-
-#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
- name##_ = (initial_value);
- ISOLATE_INIT_LIST(ISOLATE_INIT_EXECUTE)
-#undef ISOLATE_INIT_EXECUTE
-
-#define ISOLATE_INIT_ARRAY_EXECUTE(type, name, length) \
- memset(name##_, 0, sizeof(type) * length);
- ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE)
-#undef ISOLATE_INIT_ARRAY_EXECUTE
-}
-
-
-void Isolate::TearDown() {
- TRACE_ISOLATE(tear_down);
-
- // Temporarily set this isolate as current so that various parts of
- // the isolate can access it in their destructors without having a
- // direct pointer. We don't use Enter/Exit here to avoid
- // initializing the thread data.
- PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
- Isolate* saved_isolate = UncheckedCurrent();
- SetIsolateThreadLocals(this, NULL);
-
- Deinit();
-
- { ScopedLock lock(process_wide_mutex_);
- thread_data_table_->RemoveAllThreads(this);
- }
-
- if (serialize_partial_snapshot_cache_ != NULL) {
- delete[] serialize_partial_snapshot_cache_;
- serialize_partial_snapshot_cache_ = NULL;
- }
-
- if (!IsDefaultIsolate()) {
- delete this;
- }
-
- // Restore the previous current isolate.
- SetIsolateThreadLocals(saved_isolate, saved_data);
-}
-
-
-void Isolate::GlobalTearDown() {
- delete thread_data_table_;
-}
-
-
-void Isolate::Deinit() {
- if (state_ == INITIALIZED) {
- TRACE_ISOLATE(deinit);
-
- if (FLAG_sweeper_threads > 0) {
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i]->Stop();
- delete sweeper_thread_[i];
- }
- delete[] sweeper_thread_;
- }
-
- if (FLAG_marking_threads > 0) {
- for (int i = 0; i < FLAG_marking_threads; i++) {
- marking_thread_[i]->Stop();
- delete marking_thread_[i];
- }
- delete[] marking_thread_;
- }
-
- if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
-
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
-
- // We must stop the logger before we tear down other components.
- logger_->EnsureTickerStopped();
-
- delete deoptimizer_data_;
- deoptimizer_data_ = NULL;
- if (FLAG_preemption) {
- v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
- v8::Locker::StopPreemption();
- }
- builtins_.TearDown();
- bootstrapper_->TearDown();
-
- // Remove the external reference to the preallocated stack memory.
- delete preallocated_message_space_;
- preallocated_message_space_ = NULL;
- PreallocatedMemoryThreadStop();
-
- HeapProfiler::TearDown();
- CpuProfiler::TearDown();
- if (runtime_profiler_ != NULL) {
- runtime_profiler_->TearDown();
- delete runtime_profiler_;
- runtime_profiler_ = NULL;
- }
- heap_.TearDown();
- logger_->TearDown();
-
- // The default isolate is re-initializable due to legacy API.
- state_ = UNINITIALIZED;
- }
-}
-
-
-void Isolate::PushToPartialSnapshotCache(Object* obj) {
- int length = serialize_partial_snapshot_cache_length();
- int capacity = serialize_partial_snapshot_cache_capacity();
-
- if (length >= capacity) {
- int new_capacity = static_cast<int>((capacity + 10) * 1.2);
- Object** new_array = new Object*[new_capacity];
- for (int i = 0; i < length; i++) {
- new_array[i] = serialize_partial_snapshot_cache()[i];
- }
- if (capacity != 0) delete[] serialize_partial_snapshot_cache();
- set_serialize_partial_snapshot_cache(new_array);
- set_serialize_partial_snapshot_cache_capacity(new_capacity);
- }
-
- serialize_partial_snapshot_cache()[length] = obj;
- set_serialize_partial_snapshot_cache_length(length + 1);
-}
-
-
-void Isolate::SetIsolateThreadLocals(Isolate* isolate,
- PerIsolateThreadData* data) {
- Thread::SetThreadLocal(isolate_key_, isolate);
- Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
-}
-
-
-Isolate::~Isolate() {
- TRACE_ISOLATE(destructor);
-
- // Has to be called while counters_ are still alive.
- runtime_zone_.DeleteKeptSegment();
-
- delete[] assembler_spare_buffer_;
- assembler_spare_buffer_ = NULL;
-
- delete unicode_cache_;
- unicode_cache_ = NULL;
-
- delete date_cache_;
- date_cache_ = NULL;
-
- delete[] code_stub_interface_descriptors_;
- code_stub_interface_descriptors_ = NULL;
-
- delete regexp_stack_;
- regexp_stack_ = NULL;
-
- delete descriptor_lookup_cache_;
- descriptor_lookup_cache_ = NULL;
- delete context_slot_cache_;
- context_slot_cache_ = NULL;
- delete keyed_lookup_cache_;
- keyed_lookup_cache_ = NULL;
-
- delete transcendental_cache_;
- transcendental_cache_ = NULL;
- delete stub_cache_;
- stub_cache_ = NULL;
- delete stats_table_;
- stats_table_ = NULL;
-
- delete logger_;
- logger_ = NULL;
-
- delete counters_;
- counters_ = NULL;
-
- delete handle_scope_implementer_;
- handle_scope_implementer_ = NULL;
- delete break_access_;
- break_access_ = NULL;
- delete debugger_access_;
- debugger_access_ = NULL;
-
- delete compilation_cache_;
- compilation_cache_ = NULL;
- delete bootstrapper_;
- bootstrapper_ = NULL;
- delete inner_pointer_to_code_cache_;
- inner_pointer_to_code_cache_ = NULL;
- delete write_iterator_;
- write_iterator_ = NULL;
-
- delete context_switcher_;
- context_switcher_ = NULL;
- delete thread_manager_;
- thread_manager_ = NULL;
-
- delete string_tracker_;
- string_tracker_ = NULL;
-
- delete memory_allocator_;
- memory_allocator_ = NULL;
- delete code_range_;
- code_range_ = NULL;
- delete global_handles_;
- global_handles_ = NULL;
-
- delete external_reference_table_;
- external_reference_table_ = NULL;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- delete debugger_;
- debugger_ = NULL;
- delete debug_;
- debug_ = NULL;
-#endif
-}
-
-
-void Isolate::InitializeThreadLocal() {
- thread_local_top_.isolate_ = this;
- thread_local_top_.Initialize();
-}
-
-
-void Isolate::PropagatePendingExceptionToExternalTryCatch() {
- ASSERT(has_pending_exception());
-
- bool external_caught = IsExternallyCaught();
- thread_local_top_.external_caught_exception_ = external_caught;
-
- if (!external_caught) return;
-
- if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
- // Do not propagate OOM exception: we should kill VM asap.
- } else if (thread_local_top_.pending_exception_ ==
- heap()->termination_exception()) {
- try_catch_handler()->can_continue_ = false;
- try_catch_handler()->exception_ = heap()->null_value();
- } else {
- // At this point all non-object (failure) exceptions have
- // been dealt with so this shouldn't fail.
- ASSERT(!pending_exception()->IsFailure());
- try_catch_handler()->can_continue_ = true;
- try_catch_handler()->exception_ = pending_exception();
- if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
- try_catch_handler()->message_ = thread_local_top_.pending_message_obj_;
- }
- }
-}
-
-
-void Isolate::InitializeLoggingAndCounters() {
- if (logger_ == NULL) {
- logger_ = new Logger(this);
- }
- if (counters_ == NULL) {
- counters_ = new Counters;
- }
-}
-
-
-void Isolate::InitializeDebugger() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ScopedLock lock(debugger_access_);
- if (NoBarrier_Load(&debugger_initialized_)) return;
- InitializeLoggingAndCounters();
- debug_ = new Debug(this);
- debugger_ = new Debugger(this);
- Release_Store(&debugger_initialized_, true);
-#endif
-}
-
-
-bool Isolate::Init(Deserializer* des) {
- ASSERT(state_ != INITIALIZED);
- ASSERT(Isolate::Current() == this);
- TRACE_ISOLATE(init);
-
- // The initialization process does not handle memory exhaustion.
- DisallowAllocationFailure disallow_allocation_failure;
-
- InitializeLoggingAndCounters();
-
- InitializeDebugger();
-
- memory_allocator_ = new MemoryAllocator(this);
- code_range_ = new CodeRange(this);
-
- // Safe after setting Heap::isolate_, initializing StackGuard and
- // ensuring that Isolate::Current() == this.
- heap_.SetStackLimits();
-
-#define ASSIGN_ELEMENT(CamelName, hacker_name) \
- isolate_addresses_[Isolate::k##CamelName##Address] = \
- reinterpret_cast<Address>(hacker_name##_address());
- FOR_EACH_ISOLATE_ADDRESS_NAME(ASSIGN_ELEMENT)
-#undef C
-
- string_tracker_ = new StringTracker();
- string_tracker_->isolate_ = this;
- compilation_cache_ = new CompilationCache(this);
- transcendental_cache_ = new TranscendentalCache();
- keyed_lookup_cache_ = new KeyedLookupCache();
- context_slot_cache_ = new ContextSlotCache();
- descriptor_lookup_cache_ = new DescriptorLookupCache();
- unicode_cache_ = new UnicodeCache();
- inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
- write_iterator_ = new ConsStringIteratorOp();
- global_handles_ = new GlobalHandles(this);
- bootstrapper_ = new Bootstrapper(this);
- handle_scope_implementer_ = new HandleScopeImplementer(this);
- stub_cache_ = new StubCache(this, runtime_zone());
- regexp_stack_ = new RegExpStack();
- regexp_stack_->isolate_ = this;
- date_cache_ = new DateCache();
- code_stub_interface_descriptors_ =
- new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
-
- // Enable logging before setting up the heap
- logger_->SetUp();
-
- CpuProfiler::SetUp();
- HeapProfiler::SetUp();
-
- // Initialize other runtime facilities
-#if defined(USE_SIMULATOR)
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
- Simulator::Initialize(this);
-#endif
-#endif
-
- { // NOLINT
- // Ensure that the thread has a valid stack guard. The v8::Locker object
- // will ensure this too, but we don't have to use lockers if we are only
- // using one thread.
- ExecutionAccess lock(this);
- stack_guard_.InitThread(lock);
- }
-
- // SetUp the object heap.
- ASSERT(!heap_.HasBeenSetUp());
- if (!heap_.SetUp()) {
- V8::FatalProcessOutOfMemory("heap setup");
- return false;
- }
-
- deoptimizer_data_ = new DeoptimizerData;
-
- const bool create_heap_objects = (des == NULL);
- if (create_heap_objects && !heap_.CreateHeapObjects()) {
- V8::FatalProcessOutOfMemory("heap object creation");
- return false;
- }
-
- if (create_heap_objects) {
- // Terminate the cache array with the sentinel so we can iterate.
- PushToPartialSnapshotCache(heap_.undefined_value());
- }
-
- InitializeThreadLocal();
-
- bootstrapper_->Initialize(create_heap_objects);
- builtins_.SetUp(create_heap_objects);
-
- // Only preallocate on the first initialization.
- if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
- // Start the thread which will set aside some memory.
- PreallocatedMemoryThreadStart();
- preallocated_message_space_ =
- new NoAllocationStringAllocator(
- preallocated_memory_thread_->data(),
- preallocated_memory_thread_->length());
- PreallocatedStorageInit(preallocated_memory_thread_->length() / 4);
- }
-
- if (FLAG_preemption) {
- v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
- v8::Locker::StartPreemption(100);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_->SetUp(create_heap_objects);
-#endif
-
- // If we are deserializing, read the state into the now-empty heap.
- if (!create_heap_objects) {
- des->Deserialize();
- }
- stub_cache_->Initialize();
-
- // Finish initialization of ThreadLocal after deserialization is done.
- clear_pending_exception();
- clear_pending_message();
- clear_scheduled_exception();
-
- // Deserializing may put strange things in the root array's copy of the
- // stack guard.
- heap_.SetStackLimits();
-
- // Quiet the heap NaN if needed on target platform.
- if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
-
- runtime_profiler_ = new RuntimeProfiler(this);
- runtime_profiler_->SetUp();
-
- // If we are deserializing, log non-function code objects and compiled
- // functions found in the snapshot.
- if (!create_heap_objects &&
- (FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) {
- HandleScope scope(this);
- LOG(this, LogCodeObjects());
- LOG(this, LogCompiledFunctions());
- }
-
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, state_)),
- Internals::kIsolateStateOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
- Internals::kIsolateEmbedderDataOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
- Internals::kIsolateRootsOffset);
-
- state_ = INITIALIZED;
- time_millis_at_init_ = OS::TimeCurrentMillis();
-
- if (!create_heap_objects) {
- // Now that the heap is consistent, it's OK to generate the code for the
- // deopt entry table that might have been referred to by optimized code in
- // the snapshot.
- HandleScope scope(this);
- Deoptimizer::EnsureCodeForDeoptimizationEntry(
- this,
- Deoptimizer::LAZY,
- kDeoptTableSerializeEntryCount - 1);
- }
-
- if (!Serializer::enabled()) {
- // Ensure that the stub failure trampoline has been generated.
- HandleScope scope(this);
- CodeStub::GenerateFPStubs(this);
- StubFailureTrampolineStub::GenerateAheadOfTime(this);
- }
-
- if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
-
- if (FLAG_parallel_marking && FLAG_marking_threads == 0) {
- FLAG_marking_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_MARKING);
- }
- if (FLAG_marking_threads > 0) {
- marking_thread_ = new MarkingThread*[FLAG_marking_threads];
- for (int i = 0; i < FLAG_marking_threads; i++) {
- marking_thread_[i] = new MarkingThread(this);
- marking_thread_[i]->Start();
- }
- } else {
- FLAG_parallel_marking = false;
- }
-
- if (FLAG_sweeper_threads == 0) {
- if (FLAG_concurrent_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::CONCURRENT_SWEEPING);
- } else if (FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_SWEEPING);
- }
- }
- if (FLAG_sweeper_threads > 0) {
- sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i] = new SweeperThread(this);
- sweeper_thread_[i]->Start();
- }
- } else {
- FLAG_concurrent_sweeping = false;
- FLAG_parallel_sweeping = false;
- }
- return true;
-}
-
-
-// Initialized lazily to allow early
-// v8::V8::SetAddHistogramSampleFunction calls.
-StatsTable* Isolate::stats_table() {
- if (stats_table_ == NULL) {
- stats_table_ = new StatsTable;
- }
- return stats_table_;
-}
-
-
-void Isolate::Enter() {
- Isolate* current_isolate = NULL;
- PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
- if (current_data != NULL) {
- current_isolate = current_data->isolate_;
- ASSERT(current_isolate != NULL);
- if (current_isolate == this) {
- ASSERT(Current() == this);
- ASSERT(entry_stack_ != NULL);
- ASSERT(entry_stack_->previous_thread_data == NULL ||
- entry_stack_->previous_thread_data->thread_id().Equals(
- ThreadId::Current()));
- // Same thread re-enters the isolate, no need to re-init anything.
- entry_stack_->entry_count++;
- return;
- }
- }
-
- // Threads can have default isolate set into TLS as Current but not yet have
- // PerIsolateThreadData for it, as it requires more advanced phase of the
- // initialization. For example, a thread might be the one that system used for
- // static initializers - in this case the default isolate is set in TLS but
- // the thread did not yet Enter the isolate. If PerisolateThreadData is not
- // there, use the isolate set in TLS.
- if (current_isolate == NULL) {
- current_isolate = Isolate::UncheckedCurrent();
- }
-
- PerIsolateThreadData* data = FindOrAllocatePerThreadDataForThisThread();
- ASSERT(data != NULL);
- ASSERT(data->isolate_ == this);
-
- EntryStackItem* item = new EntryStackItem(current_data,
- current_isolate,
- entry_stack_);
- entry_stack_ = item;
-
- SetIsolateThreadLocals(this, data);
-
- // In case it's the first time some thread enters the isolate.
- set_thread_id(data->thread_id());
-}
-
-
-void Isolate::Exit() {
- ASSERT(entry_stack_ != NULL);
- ASSERT(entry_stack_->previous_thread_data == NULL ||
- entry_stack_->previous_thread_data->thread_id().Equals(
- ThreadId::Current()));
-
- if (--entry_stack_->entry_count > 0) return;
-
- ASSERT(CurrentPerIsolateThreadData() != NULL);
- ASSERT(CurrentPerIsolateThreadData()->isolate_ == this);
-
- // Pop the stack.
- EntryStackItem* item = entry_stack_;
- entry_stack_ = item->previous_item;
-
- PerIsolateThreadData* previous_thread_data = item->previous_thread_data;
- Isolate* previous_isolate = item->previous_isolate;
-
- delete item;
-
- // Reinit the current thread for the isolate it was running before this one.
- SetIsolateThreadLocals(previous_isolate, previous_thread_data);
-}
-
-
-void Isolate::LinkDeferredHandles(DeferredHandles* deferred) {
- deferred->next_ = deferred_handles_head_;
- if (deferred_handles_head_ != NULL) {
- deferred_handles_head_->previous_ = deferred;
- }
- deferred_handles_head_ = deferred;
-}
-
-
-void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
-#ifdef DEBUG
- // In debug mode assert that the linked list is well-formed.
- DeferredHandles* deferred_iterator = deferred;
- while (deferred_iterator->previous_ != NULL) {
- deferred_iterator = deferred_iterator->previous_;
- }
- ASSERT(deferred_handles_head_ == deferred_iterator);
-#endif
- if (deferred_handles_head_ == deferred) {
- deferred_handles_head_ = deferred_handles_head_->next_;
- }
- if (deferred->next_ != NULL) {
- deferred->next_->previous_ = deferred->previous_;
- }
- if (deferred->previous_ != NULL) {
- deferred->previous_->next_ = deferred->next_;
- }
-}
-
-
-CodeStubInterfaceDescriptor*
- Isolate::code_stub_interface_descriptor(int index) {
- return code_stub_interface_descriptors_ + index;
-}
-
-
-#ifdef DEBUG
-#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
-const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
-ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
-ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
-#undef ISOLATE_FIELD_OFFSET
-#endif
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/isolate.h b/src/3rdparty/v8/src/isolate.h
deleted file mode 100644
index 53fece7..0000000
--- a/src/3rdparty/v8/src/isolate.h
+++ /dev/null
@@ -1,1494 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ISOLATE_H_
-#define V8_ISOLATE_H_
-
-#include "../include/v8-debug.h"
-#include "allocation.h"
-#include "apiutils.h"
-#include "atomicops.h"
-#include "builtins.h"
-#include "contexts.h"
-#include "execution.h"
-#include "frames.h"
-#include "date.h"
-#include "global-handles.h"
-#include "handles.h"
-#include "hashmap.h"
-#include "heap.h"
-#include "optimizing-compiler-thread.h"
-#include "regexp-stack.h"
-#include "runtime-profiler.h"
-#include "runtime.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-class Bootstrapper;
-class CodeGenerator;
-class CodeRange;
-struct CodeStubInterfaceDescriptor;
-class CompilationCache;
-class ContextSlotCache;
-class ContextSwitcher;
-class Counters;
-class CpuFeatures;
-class CpuProfiler;
-class DeoptimizerData;
-class Deserializer;
-class EmptyStatement;
-class ExternalReferenceTable;
-class Factory;
-class FunctionInfoListener;
-class HandleScopeImplementer;
-class HeapProfiler;
-class InlineRuntimeFunctionsTable;
-class NoAllocationStringAllocator;
-class InnerPointerToCodeCache;
-class MarkingThread;
-class PreallocatedMemoryThread;
-class RegExpStack;
-class SaveContext;
-class UnicodeCache;
-class ConsStringIteratorOp;
-class StringTracker;
-class StubCache;
-class SweeperThread;
-class ThreadManager;
-class ThreadState;
-class ThreadVisitor; // Defined in v8threads.h
-class VMState;
-
-// 'void function pointer', used to roundtrip the
-// ExternalReference::ExternalReferenceRedirector since we can not include
-// assembler.h, where it is defined, here.
-typedef void* ExternalReferenceRedirectorPointer();
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-class Debug;
-class Debugger;
-class DebuggerAgent;
-#endif
-
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
-class Redirection;
-class Simulator;
-#endif
-
-
-// Static indirection table for handles to constants. If a frame
-// element represents a constant, the data contains an index into
-// this table of handles to the actual constants.
-// Static indirection table for handles to constants. If a Result
-// represents a constant, the data contains an index into this table
-// of handles to the actual constants.
-typedef ZoneList<Handle<Object> > ZoneObjectList;
-
-#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
- do { \
- Isolate* __isolate__ = (isolate); \
- if (__isolate__->has_scheduled_exception()) { \
- return __isolate__->PromoteScheduledException(); \
- } \
- } while (false)
-
-#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
- do { \
- if ((call).is_null()) { \
- ASSERT((isolate)->has_pending_exception()); \
- return (value); \
- } \
- } while (false)
-
-#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \
- do { \
- ASSERT(!(isolate)->has_pending_exception()); \
- CHECK(!(call).is_null()); \
- CHECK(!(isolate)->has_pending_exception()); \
- } while (false)
-
-#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
-
-#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
- C(Handler, handler) \
- C(CEntryFP, c_entry_fp) \
- C(Context, context) \
- C(PendingException, pending_exception) \
- C(ExternalCaughtException, external_caught_exception) \
- C(JSEntrySP, js_entry_sp)
-
-
-// Platform-independent, reliable thread identifier.
-class ThreadId {
- public:
- // Creates an invalid ThreadId.
- ThreadId() : id_(kInvalidId) {}
-
- // Returns ThreadId for current thread.
- static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
-
- // Returns invalid ThreadId (guaranteed not to be equal to any thread).
- static ThreadId Invalid() { return ThreadId(kInvalidId); }
-
- // Compares ThreadIds for equality.
- INLINE(bool Equals(const ThreadId& other) const) {
- return id_ == other.id_;
- }
-
- // Checks whether this ThreadId refers to any thread.
- INLINE(bool IsValid() const) {
- return id_ != kInvalidId;
- }
-
- // Converts ThreadId to an integer representation
- // (required for public API: V8::V8::GetCurrentThreadId).
- int ToInteger() const { return id_; }
-
- // Converts ThreadId to an integer representation
- // (required for public API: V8::V8::TerminateExecution).
- static ThreadId FromInteger(int id) { return ThreadId(id); }
-
- private:
- static const int kInvalidId = -1;
-
- explicit ThreadId(int id) : id_(id) {}
-
- static int AllocateThreadId();
-
- static int GetCurrentThreadId();
-
- int id_;
-
- static Atomic32 highest_thread_id_;
-
- friend class Isolate;
-};
-
-
-class ThreadLocalTop BASE_EMBEDDED {
- public:
- // Does early low-level initialization that does not depend on the
- // isolate being present.
- ThreadLocalTop();
-
- // Initialize the thread data.
- void Initialize();
-
- // Get the top C++ try catch handler or NULL if none are registered.
- //
- // This method is not guarenteed to return an address that can be
- // used for comparison with addresses into the JS stack. If such an
- // address is needed, use try_catch_handler_address.
- v8::TryCatch* TryCatchHandler();
-
- // Get the address of the top C++ try catch handler or NULL if
- // none are registered.
- //
- // This method always returns an address that can be compared to
- // pointers into the JavaScript stack. When running on actual
- // hardware, try_catch_handler_address and TryCatchHandler return
- // the same pointer. When running on a simulator with a separate JS
- // stack, try_catch_handler_address returns a JS stack address that
- // corresponds to the place on the JS stack where the C++ handler
- // would have been if the stack were not separate.
- inline Address try_catch_handler_address() {
- return try_catch_handler_address_;
- }
-
- // Set the address of the top C++ try catch handler.
- inline void set_try_catch_handler_address(Address address) {
- try_catch_handler_address_ = address;
- }
-
- void Free() {
- ASSERT(!has_pending_message_);
- ASSERT(!external_caught_exception_);
- ASSERT(try_catch_handler_address_ == NULL);
- }
-
- Isolate* isolate_;
- // The context where the current execution method is created and for variable
- // lookups.
- Context* context_;
- ThreadId thread_id_;
- MaybeObject* pending_exception_;
- bool has_pending_message_;
- Object* pending_message_obj_;
- Script* pending_message_script_;
- int pending_message_start_pos_;
- int pending_message_end_pos_;
- // Use a separate value for scheduled exceptions to preserve the
- // invariants that hold about pending_exception. We may want to
- // unify them later.
- MaybeObject* scheduled_exception_;
- bool external_caught_exception_;
- SaveContext* save_context_;
- v8::TryCatch* catcher_;
-
- // Stack.
- Address c_entry_fp_; // the frame pointer of the top c entry frame
- Address handler_; // try-blocks are chained through the stack
-
-#ifdef USE_SIMULATOR
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
- Simulator* simulator_;
-#endif
-#endif // USE_SIMULATOR
-
- Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
- Address external_callback_; // the external callback we're currently in
- StateTag current_vm_state_;
-
- // Generated code scratch locations.
- int32_t formal_count_;
-
- // Call back function to report unsafe JS accesses.
- v8::FailedAccessCheckCallback failed_access_check_callback_;
-
- // Head of the list of live LookupResults.
- LookupResult* top_lookup_result_;
-
- // Call back function for user object comparisons
- v8::UserObjectComparisonCallback user_object_comparison_callback_;
-
- // Whether out of memory exceptions should be ignored.
- bool ignore_out_of_memory_;
-
- private:
- void InitializeInternal();
-
- Address try_catch_handler_address_;
-};
-
-
-class SystemThreadManager {
- public:
- enum ParallelSystemComponent {
- PARALLEL_SWEEPING,
- CONCURRENT_SWEEPING,
- PARALLEL_MARKING
- };
-
- static int NumberOfParallelSystemThreads(ParallelSystemComponent type);
-
- static const int kMaxThreads = 4;
-};
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-#define ISOLATE_DEBUGGER_INIT_LIST(V) \
- V(v8::Debug::EventCallback, debug_event_callback, NULL) \
- V(DebuggerAgent*, debugger_agent_instance, NULL)
-#else
-
-#define ISOLATE_DEBUGGER_INIT_LIST(V)
-
-#endif
-
-#ifdef DEBUG
-
-#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
- V(CommentStatistic, paged_space_comments_statistics, \
- CommentStatistic::kMaxComments + 1)
-#else
-
-#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-
-#endif
-
-#define ISOLATE_INIT_ARRAY_LIST(V) \
- /* SerializerDeserializer state. */ \
- V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
- V(int, bad_char_shift_table, kUC16AlphabetSize) \
- V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
- V(int, suffix_table, (kBMMaxShift + 1)) \
- V(uint32_t, private_random_seed, 2) \
- ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-
-typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
-
-#define ISOLATE_INIT_LIST(V) \
- /* SerializerDeserializer state. */ \
- V(int, serialize_partial_snapshot_cache_length, 0) \
- V(int, serialize_partial_snapshot_cache_capacity, 0) \
- V(Object**, serialize_partial_snapshot_cache, NULL) \
- /* Assembler state. */ \
- /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
- V(byte*, assembler_spare_buffer, NULL) \
- V(FatalErrorCallback, exception_behavior, NULL) \
- V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
- V(v8::Debug::MessageHandler, message_handler, NULL) \
- /* To distinguish the function templates, so that we can find them in the */ \
- /* function cache of the native context. */ \
- V(int, next_serial_number, 0) \
- V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
- V(bool, always_allow_natives_syntax, false) \
- /* Part of the state of liveedit. */ \
- V(FunctionInfoListener*, active_function_info_listener, NULL) \
- /* State for Relocatable. */ \
- V(Relocatable*, relocatable_top, NULL) \
- /* State for CodeEntry in profile-generator. */ \
- V(CodeGenerator*, current_code_generator, NULL) \
- V(bool, jump_target_compiling_deferred_code, false) \
- V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
- V(Object*, string_stream_current_security_token, NULL) \
- /* TODO(isolates): Release this on destruction? */ \
- V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
- /* Serializer state. */ \
- V(ExternalReferenceTable*, external_reference_table, NULL) \
- /* AstNode state. */ \
- V(int, ast_node_id, 0) \
- V(unsigned, ast_node_count, 0) \
- /* SafeStackFrameIterator activations count. */ \
- V(int, safe_stack_iterator_counter, 0) \
- V(uint64_t, enabled_cpu_features, 0) \
- V(CpuProfiler*, cpu_profiler, NULL) \
- V(HeapProfiler*, heap_profiler, NULL) \
- V(bool, observer_delivery_pending, false) \
- ISOLATE_DEBUGGER_INIT_LIST(V)
-
-class Isolate {
- // These forward declarations are required to make the friend declarations in
- // PerIsolateThreadData work on some older versions of gcc.
- class ThreadDataTable;
- class EntryStackItem;
- public:
- ~Isolate();
-
- // A thread has a PerIsolateThreadData instance for each isolate that it has
- // entered. That instance is allocated when the isolate is initially entered
- // and reused on subsequent entries.
- class PerIsolateThreadData {
- public:
- PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
- : isolate_(isolate),
- thread_id_(thread_id),
- stack_limit_(0),
- thread_state_(NULL),
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
- simulator_(NULL),
-#endif
- next_(NULL),
- prev_(NULL) { }
- Isolate* isolate() const { return isolate_; }
- ThreadId thread_id() const { return thread_id_; }
- void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
- uintptr_t stack_limit() const { return stack_limit_; }
- ThreadState* thread_state() const { return thread_state_; }
- void set_thread_state(ThreadState* value) { thread_state_ = value; }
-
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
- Simulator* simulator() const { return simulator_; }
- void set_simulator(Simulator* simulator) {
- simulator_ = simulator;
- }
-#endif
-
- bool Matches(Isolate* isolate, ThreadId thread_id) const {
- return isolate_ == isolate && thread_id_.Equals(thread_id);
- }
-
- private:
- Isolate* isolate_;
- ThreadId thread_id_;
- uintptr_t stack_limit_;
- ThreadState* thread_state_;
-
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
- Simulator* simulator_;
-#endif
-
- PerIsolateThreadData* next_;
- PerIsolateThreadData* prev_;
-
- friend class Isolate;
- friend class ThreadDataTable;
- friend class EntryStackItem;
-
- DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
- };
-
-
- enum AddressId {
-#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
- FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
-#undef DECLARE_ENUM
- kIsolateAddressCount
- };
-
- // Returns the PerIsolateThreadData for the current thread (or NULL if one is
- // not currently set).
- static PerIsolateThreadData* CurrentPerIsolateThreadData() {
- return reinterpret_cast<PerIsolateThreadData*>(
- Thread::GetThreadLocal(per_isolate_thread_data_key_));
- }
-
- // Returns the isolate inside which the current thread is running.
- INLINE(static Isolate* Current()) {
- Isolate* isolate = reinterpret_cast<Isolate*>(
- Thread::GetExistingThreadLocal(isolate_key_));
- ASSERT(isolate != NULL);
- return isolate;
- }
-
- INLINE(static Isolate* UncheckedCurrent()) {
- return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
- }
-
- // Usually called by Init(), but can be called early e.g. to allow
- // testing components that require logging but not the whole
- // isolate.
- //
- // Safe to call more than once.
- void InitializeLoggingAndCounters();
-
- bool Init(Deserializer* des);
-
- bool IsInitialized() { return state_ == INITIALIZED; }
-
- // True if at least one thread Enter'ed this isolate.
- bool IsInUse() { return entry_stack_ != NULL; }
-
- // Destroys the non-default isolates.
- // Sets default isolate into "has_been_disposed" state rather then destroying,
- // for legacy API reasons.
- void TearDown();
-
- static void GlobalTearDown();
-
- bool IsDefaultIsolate() const { return this == default_isolate_; }
-
- // Ensures that process-wide resources and the default isolate have been
- // allocated. It is only necessary to call this method in rare cases, for
- // example if you are using V8 from within the body of a static initializer.
- // Safe to call multiple times.
- static void EnsureDefaultIsolate();
-
- // Find the PerThread for this particular (isolate, thread) combination
- // If one does not yet exist, return null.
- PerIsolateThreadData* FindPerThreadDataForThisThread();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Get the debugger from the default isolate. Preinitializes the
- // default isolate if needed.
- static Debugger* GetDefaultIsolateDebugger();
-#endif
-
- // Get the stack guard from the default isolate. Preinitializes the
- // default isolate if needed.
- static StackGuard* GetDefaultIsolateStackGuard();
-
- // Returns the key used to store the pointer to the current isolate.
- // Used internally for V8 threads that do not execute JavaScript but still
- // are part of the domain of an isolate (like the context switcher).
- static Thread::LocalStorageKey isolate_key() {
- return isolate_key_;
- }
-
- // Returns the key used to store process-wide thread IDs.
- static Thread::LocalStorageKey thread_id_key() {
- return thread_id_key_;
- }
-
- static Thread::LocalStorageKey per_isolate_thread_data_key();
-
- // If a client attempts to create a Locker without specifying an isolate,
- // we assume that the client is using legacy behavior. Set up the current
- // thread to be inside the implicit isolate (or fail a check if we have
- // switched to non-legacy behavior).
- static void EnterDefaultIsolate();
-
- // Mutex for serializing access to break control structures.
- Mutex* break_access() { return break_access_; }
-
- // Mutex for serializing access to debugger.
- Mutex* debugger_access() { return debugger_access_; }
-
- Address get_address_from_id(AddressId id);
-
- // Access to top context (where the current function object was created).
- Context* context() { return thread_local_top_.context_; }
- void set_context(Context* context) {
- ASSERT(context == NULL || context->IsContext());
- thread_local_top_.context_ = context;
- }
- Context** context_address() { return &thread_local_top_.context_; }
-
- SaveContext* save_context() {return thread_local_top_.save_context_; }
- void set_save_context(SaveContext* save) {
- thread_local_top_.save_context_ = save;
- }
-
- // Access to current thread id.
- ThreadId thread_id() { return thread_local_top_.thread_id_; }
- void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
-
- // Interface to pending exception.
- MaybeObject* pending_exception() {
- ASSERT(has_pending_exception());
- return thread_local_top_.pending_exception_;
- }
- bool external_caught_exception() {
- return thread_local_top_.external_caught_exception_;
- }
- void set_external_caught_exception(bool value) {
- thread_local_top_.external_caught_exception_ = value;
- }
- void set_pending_exception(MaybeObject* exception) {
- thread_local_top_.pending_exception_ = exception;
- }
- void clear_pending_exception() {
- thread_local_top_.pending_exception_ = heap_.the_hole_value();
- }
- MaybeObject** pending_exception_address() {
- return &thread_local_top_.pending_exception_;
- }
- bool has_pending_exception() {
- return !thread_local_top_.pending_exception_->IsTheHole();
- }
- void clear_pending_message() {
- thread_local_top_.has_pending_message_ = false;
- thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
- thread_local_top_.pending_message_script_ = NULL;
- }
- v8::TryCatch* try_catch_handler() {
- return thread_local_top_.TryCatchHandler();
- }
- Address try_catch_handler_address() {
- return thread_local_top_.try_catch_handler_address();
- }
- bool* external_caught_exception_address() {
- return &thread_local_top_.external_caught_exception_;
- }
- v8::TryCatch* catcher() {
- return thread_local_top_.catcher_;
- }
- void set_catcher(v8::TryCatch* catcher) {
- thread_local_top_.catcher_ = catcher;
- }
-
- MaybeObject** scheduled_exception_address() {
- return &thread_local_top_.scheduled_exception_;
- }
-
- Address pending_message_obj_address() {
- return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
- }
-
- Address has_pending_message_address() {
- return reinterpret_cast<Address>(&thread_local_top_.has_pending_message_);
- }
-
- Address pending_message_script_address() {
- return reinterpret_cast<Address>(
- &thread_local_top_.pending_message_script_);
- }
-
- MaybeObject* scheduled_exception() {
- ASSERT(has_scheduled_exception());
- return thread_local_top_.scheduled_exception_;
- }
- bool has_scheduled_exception() {
- return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
- }
- void clear_scheduled_exception() {
- thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
- }
-
- bool IsExternallyCaught();
-
- bool is_catchable_by_javascript(MaybeObject* exception) {
- return (!exception->IsOutOfMemory()) &&
- (exception != heap()->termination_exception());
- }
-
- // Serializer.
- void PushToPartialSnapshotCache(Object* obj);
-
- // JS execution stack (see frames.h).
- static Address c_entry_fp(ThreadLocalTop* thread) {
- return thread->c_entry_fp_;
- }
- static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
-
- inline Address* c_entry_fp_address() {
- return &thread_local_top_.c_entry_fp_;
- }
- inline Address* handler_address() { return &thread_local_top_.handler_; }
-
- // Bottom JS entry (see StackTracer::Trace in log.cc).
- static Address js_entry_sp(ThreadLocalTop* thread) {
- return thread->js_entry_sp_;
- }
- inline Address* js_entry_sp_address() {
- return &thread_local_top_.js_entry_sp_;
- }
-
- // Generated code scratch locations.
- void* formal_count_address() { return &thread_local_top_.formal_count_; }
-
- // Returns the global object of the current context. It could be
- // a builtin object, or a JS global object.
- Handle<GlobalObject> global_object() {
- return Handle<GlobalObject>(context()->global_object());
- }
-
- // Returns the global proxy object of the current context.
- Object* global_proxy() {
- return context()->global_proxy();
- }
-
- Handle<JSBuiltinsObject> js_builtins_object() {
- return Handle<JSBuiltinsObject>(thread_local_top_.context_->builtins());
- }
-
- static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
- void FreeThreadResources() { thread_local_top_.Free(); }
-
- // This method is called by the api after operations that may throw
- // exceptions. If an exception was thrown and not handled by an external
- // handler the exception is scheduled to be rethrown when we return to running
- // JavaScript code. If an exception is scheduled true is returned.
- bool OptionalRescheduleException(bool is_bottom_call);
-
- class ExceptionScope {
- public:
- explicit ExceptionScope(Isolate* isolate) :
- // Scope currently can only be used for regular exceptions, not
- // failures like OOM or termination exception.
- isolate_(isolate),
- pending_exception_(isolate_->pending_exception()->ToObjectUnchecked(),
- isolate_),
- catcher_(isolate_->catcher())
- { }
-
- ~ExceptionScope() {
- isolate_->set_catcher(catcher_);
- isolate_->set_pending_exception(*pending_exception_);
- }
-
- private:
- Isolate* isolate_;
- Handle<Object> pending_exception_;
- v8::TryCatch* catcher_;
- };
-
- void SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options);
-
- // Tells whether the current context has experienced an out of memory
- // exception.
- bool is_out_of_memory();
- bool ignore_out_of_memory() {
- return thread_local_top_.ignore_out_of_memory_;
- }
- void set_ignore_out_of_memory(bool value) {
- thread_local_top_.ignore_out_of_memory_ = value;
- }
-
- void PrintCurrentStackTrace(FILE* out);
- void PrintStackTrace(FILE* out, char* thread_data);
- void PrintStack(StringStream* accumulator);
- void PrintStack();
- Handle<String> StackTraceString();
- NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
- Object* object,
- Map* map,
- unsigned int magic2));
- Handle<JSArray> CaptureCurrentStackTrace(
- int frame_limit,
- StackTrace::StackTraceOptions options);
-
- Handle<JSArray> CaptureSimpleStackTrace(Handle<JSObject> error_object,
- Handle<Object> caller,
- int limit);
- void CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object);
-
- // Returns if the top context may access the given global object. If
- // the result is false, the pending exception is guaranteed to be
- // set.
- bool MayNamedAccess(JSObject* receiver,
- Object* key,
- v8::AccessType type);
- bool MayIndexedAccess(JSObject* receiver,
- uint32_t index,
- v8::AccessType type);
-
- void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
- void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
-
- void SetUserObjectComparisonCallback(
- v8::UserObjectComparisonCallback callback);
- inline v8::UserObjectComparisonCallback UserObjectComparisonCallback() {
- return thread_local_top()->user_object_comparison_callback_;
- }
-
- // Exception throwing support. The caller should use the result
- // of Throw() as its return value.
- Failure* Throw(Object* exception, MessageLocation* location = NULL);
- // Re-throw an exception. This involves no error reporting since
- // error reporting was handled when the exception was thrown
- // originally.
- Failure* ReThrow(MaybeObject* exception);
- void ScheduleThrow(Object* exception);
- void ReportPendingMessages();
- // Return pending location if any or unfilled structure.
- MessageLocation GetMessageLocation();
- Failure* ThrowIllegalOperation();
-
- // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
- Failure* PromoteScheduledException();
- void DoThrow(Object* exception, MessageLocation* location);
- // Checks if exception should be reported and finds out if it's
- // caught externally.
- bool ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript);
-
- // Attempts to compute the current source location, storing the
- // result in the target out parameter.
- void ComputeLocation(MessageLocation* target);
-
- // Override command line flag.
- void TraceException(bool flag);
-
- // Out of resource exception helpers.
- Failure* StackOverflow();
- Failure* TerminateExecution();
-
- // Administration
- void Iterate(ObjectVisitor* v);
- void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
- char* Iterate(ObjectVisitor* v, char* t);
- void IterateThread(ThreadVisitor* v, char* t);
-
-
- // Returns the current native and global context.
- Handle<Context> native_context();
- Handle<Context> global_context();
-
- // Returns the native context of the calling JavaScript code. That
- // is, the native context of the top-most JavaScript frame.
- Handle<Context> GetCallingNativeContext();
-
- void RegisterTryCatchHandler(v8::TryCatch* that);
- void UnregisterTryCatchHandler(v8::TryCatch* that);
-
- char* ArchiveThread(char* to);
- char* RestoreThread(char* from);
-
- static const char* const kStackOverflowMessage;
-
- static const int kUC16AlphabetSize = 256; // See StringSearchBase.
- static const int kBMMaxShift = 250; // See StringSearchBase.
-
- // Accessors.
-#define GLOBAL_ACCESSOR(type, name, initialvalue) \
- inline type name() const { \
- ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
- return name##_; \
- } \
- inline void set_##name(type value) { \
- ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
- name##_ = value; \
- }
- ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
-#undef GLOBAL_ACCESSOR
-
-#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
- inline type* name() { \
- ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
- return &(name##_)[0]; \
- }
- ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
-#undef GLOBAL_ARRAY_ACCESSOR
-
-#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- Handle<type> name() { \
- return Handle<type>(context()->native_context()->name(), this); \
- }
- NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
-#undef NATIVE_CONTEXT_FIELD_ACCESSOR
-
- Bootstrapper* bootstrapper() { return bootstrapper_; }
- Counters* counters() {
- // Call InitializeLoggingAndCounters() if logging is needed before
- // the isolate is fully initialized.
- ASSERT(counters_ != NULL);
- return counters_;
- }
- CodeRange* code_range() { return code_range_; }
- RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
- CompilationCache* compilation_cache() { return compilation_cache_; }
- Logger* logger() {
- // Call InitializeLoggingAndCounters() if logging is needed before
- // the isolate is fully initialized.
- ASSERT(logger_ != NULL);
- return logger_;
- }
- StackGuard* stack_guard() { return &stack_guard_; }
- Heap* heap() { return &heap_; }
- StatsTable* stats_table();
- StubCache* stub_cache() { return stub_cache_; }
- DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
- ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
-
- TranscendentalCache* transcendental_cache() const {
- return transcendental_cache_;
- }
-
- MemoryAllocator* memory_allocator() {
- return memory_allocator_;
- }
-
- KeyedLookupCache* keyed_lookup_cache() {
- return keyed_lookup_cache_;
- }
-
- ContextSlotCache* context_slot_cache() {
- return context_slot_cache_;
- }
-
- DescriptorLookupCache* descriptor_lookup_cache() {
- return descriptor_lookup_cache_;
- }
-
- v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
- return &handle_scope_data_;
- }
- HandleScopeImplementer* handle_scope_implementer() {
- ASSERT(handle_scope_implementer_);
- return handle_scope_implementer_;
- }
- Zone* runtime_zone() { return &runtime_zone_; }
-
- UnicodeCache* unicode_cache() {
- return unicode_cache_;
- }
-
- InnerPointerToCodeCache* inner_pointer_to_code_cache() {
- return inner_pointer_to_code_cache_;
- }
-
- ConsStringIteratorOp* write_iterator() { return write_iterator_; }
-
- GlobalHandles* global_handles() { return global_handles_; }
-
- ThreadManager* thread_manager() { return thread_manager_; }
-
- ContextSwitcher* context_switcher() { return context_switcher_; }
-
- void set_context_switcher(ContextSwitcher* switcher) {
- context_switcher_ = switcher;
- }
-
- StringTracker* string_tracker() { return string_tracker_; }
-
- unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
- return &jsregexp_uncanonicalize_;
- }
-
- unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
- return &jsregexp_canonrange_;
- }
-
- ConsStringIteratorOp* objects_string_compare_iterator_a() {
- return &objects_string_compare_iterator_a_;
- }
-
- ConsStringIteratorOp* objects_string_compare_iterator_b() {
- return &objects_string_compare_iterator_b_;
- }
-
- StaticResource<ConsStringIteratorOp>* objects_string_iterator() {
- return &objects_string_iterator_;
- }
-
- RuntimeState* runtime_state() { return &runtime_state_; }
-
- void set_fp_stubs_generated(bool value) {
- fp_stubs_generated_ = value;
- }
-
- bool fp_stubs_generated() { return fp_stubs_generated_; }
-
- Builtins* builtins() { return &builtins_; }
-
- void NotifyExtensionInstalled() {
- has_installed_extensions_ = true;
- }
-
- bool has_installed_extensions() { return has_installed_extensions_; }
-
- unibrow::Mapping<unibrow::Ecma262Canonicalize>*
- regexp_macro_assembler_canonicalize() {
- return &regexp_macro_assembler_canonicalize_;
- }
-
- RegExpStack* regexp_stack() { return regexp_stack_; }
-
- unibrow::Mapping<unibrow::Ecma262Canonicalize>*
- interp_canonicalize_mapping() {
- return &interp_canonicalize_mapping_;
- }
-
- void* PreallocatedStorageNew(size_t size);
- void PreallocatedStorageDelete(void* p);
- void PreallocatedStorageInit(size_t size);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger* debugger() {
- if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
- return debugger_;
- }
- Debug* debug() {
- if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
- return debug_;
- }
-#endif
-
- inline bool IsDebuggerActive();
- inline bool DebuggerHasBreakPoints();
-
-#ifdef DEBUG
- HistogramInfo* heap_histograms() { return heap_histograms_; }
-
- JSObject::SpillInformation* js_spill_information() {
- return &js_spill_information_;
- }
-
- int* code_kind_statistics() { return code_kind_statistics_; }
-
- bool allow_handle_deref() { return allow_handle_deref_; }
- void set_allow_handle_deref(bool allow) { allow_handle_deref_ = allow; }
-#endif
-
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
- bool simulator_initialized() { return simulator_initialized_; }
- void set_simulator_initialized(bool initialized) {
- simulator_initialized_ = initialized;
- }
-
- HashMap* simulator_i_cache() { return simulator_i_cache_; }
- void set_simulator_i_cache(HashMap* hash_map) {
- simulator_i_cache_ = hash_map;
- }
-
- Redirection* simulator_redirection() {
- return simulator_redirection_;
- }
- void set_simulator_redirection(Redirection* redirection) {
- simulator_redirection_ = redirection;
- }
-#endif
-
- Factory* factory() { return reinterpret_cast<Factory*>(this); }
-
- static const int kJSRegexpStaticOffsetsVectorSize = 128;
-
- Address external_callback() {
- return thread_local_top_.external_callback_;
- }
- void set_external_callback(Address callback) {
- thread_local_top_.external_callback_ = callback;
- }
-
- StateTag current_vm_state() {
- return thread_local_top_.current_vm_state_;
- }
-
- void SetCurrentVMState(StateTag state) {
- if (RuntimeProfiler::IsEnabled()) {
- // Make sure thread local top is initialized.
- ASSERT(thread_local_top_.isolate_ == this);
- StateTag current_state = thread_local_top_.current_vm_state_;
- if (current_state != JS && state == JS) {
- // Non-JS -> JS transition.
- RuntimeProfiler::IsolateEnteredJS(this);
- } else if (current_state == JS && state != JS) {
- // JS -> non-JS transition.
- RuntimeProfiler::IsolateExitedJS(this);
- } else {
- // Other types of state transitions are not interesting to the
- // runtime profiler, because they don't affect whether we're
- // in JS or not.
- ASSERT((current_state == JS) == (state == JS));
- }
- }
- thread_local_top_.current_vm_state_ = state;
- }
-
- void SetData(void* data) { embedder_data_ = data; }
- void* GetData() { return embedder_data_; }
-
- LookupResult* top_lookup_result() {
- return thread_local_top_.top_lookup_result_;
- }
- void SetTopLookupResult(LookupResult* top) {
- thread_local_top_.top_lookup_result_ = top;
- }
-
- bool context_exit_happened() {
- return context_exit_happened_;
- }
- void set_context_exit_happened(bool context_exit_happened) {
- context_exit_happened_ = context_exit_happened;
- }
-
- double time_millis_since_init() {
- return OS::TimeCurrentMillis() - time_millis_at_init_;
- }
-
- DateCache* date_cache() {
- return date_cache_;
- }
-
- void set_date_cache(DateCache* date_cache) {
- if (date_cache != date_cache_) {
- delete date_cache_;
- }
- date_cache_ = date_cache;
- }
-
- CodeStubInterfaceDescriptor*
- code_stub_interface_descriptor(int index);
-
- void IterateDeferredHandles(ObjectVisitor* visitor);
- void LinkDeferredHandles(DeferredHandles* deferred_handles);
- void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
-
- OptimizingCompilerThread* optimizing_compiler_thread() {
- return &optimizing_compiler_thread_;
- }
-
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the isolate).
- // TODO(svenpanne) This method is on death row...
- static v8::Isolate* GetDefaultIsolateForLocking();
-
- MarkingThread** marking_threads() {
- return marking_thread_;
- }
-
- SweeperThread** sweeper_threads() {
- return sweeper_thread_;
- }
-
- private:
- Isolate();
-
- friend struct GlobalState;
- friend struct InitializeGlobalState;
-
- enum State {
- UNINITIALIZED, // Some components may not have been allocated.
- INITIALIZED // All components are fully initialized.
- };
-
- // These fields are accessed through the API, offsets must be kept in sync
- // with v8::internal::Internals (in include/v8.h) constants. This is also
- // verified in Isolate::Init() using runtime checks.
- State state_; // Will be padded to kApiPointerSize.
- void* embedder_data_;
- Heap heap_;
-
- // The per-process lock should be acquired before the ThreadDataTable is
- // modified.
- class ThreadDataTable {
- public:
- ThreadDataTable();
- ~ThreadDataTable();
-
- PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
- void Insert(PerIsolateThreadData* data);
- void Remove(Isolate* isolate, ThreadId thread_id);
- void Remove(PerIsolateThreadData* data);
- void RemoveAllThreads(Isolate* isolate);
-
- private:
- PerIsolateThreadData* list_;
- };
-
- // These items form a stack synchronously with threads Enter'ing and Exit'ing
- // the Isolate. The top of the stack points to a thread which is currently
- // running the Isolate. When the stack is empty, the Isolate is considered
- // not entered by any thread and can be Disposed.
- // If the same thread enters the Isolate more then once, the entry_count_
- // is incremented rather then a new item pushed to the stack.
- class EntryStackItem {
- public:
- EntryStackItem(PerIsolateThreadData* previous_thread_data,
- Isolate* previous_isolate,
- EntryStackItem* previous_item)
- : entry_count(1),
- previous_thread_data(previous_thread_data),
- previous_isolate(previous_isolate),
- previous_item(previous_item) { }
-
- int entry_count;
- PerIsolateThreadData* previous_thread_data;
- Isolate* previous_isolate;
- EntryStackItem* previous_item;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
- };
-
- // This mutex protects highest_thread_id_, thread_data_table_ and
- // default_isolate_.
- static Mutex* process_wide_mutex_;
-
- static Thread::LocalStorageKey per_isolate_thread_data_key_;
- static Thread::LocalStorageKey isolate_key_;
- static Thread::LocalStorageKey thread_id_key_;
- static Isolate* default_isolate_;
- static ThreadDataTable* thread_data_table_;
-
- void Deinit();
-
- static void SetIsolateThreadLocals(Isolate* isolate,
- PerIsolateThreadData* data);
-
- // Allocate and insert PerIsolateThreadData into the ThreadDataTable
- // (regardless of whether such data already exists).
- PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
-
- // Find the PerThread for this particular (isolate, thread) combination.
- // If one does not yet exist, allocate a new one.
- PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
-
- // Initializes the current thread to run this Isolate.
- // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
- // at the same time, this should be prevented using external locking.
- void Enter();
-
- // Exits the current thread. The previosuly entered Isolate is restored
- // for the thread.
- // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
- // at the same time, this should be prevented using external locking.
- void Exit();
-
- void PreallocatedMemoryThreadStart();
- void PreallocatedMemoryThreadStop();
- void InitializeThreadLocal();
-
- void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
- void MarkCompactPrologue(bool is_compacting,
- ThreadLocalTop* archived_thread_data);
- void MarkCompactEpilogue(bool is_compacting,
- ThreadLocalTop* archived_thread_data);
-
- void FillCache();
-
- void PropagatePendingExceptionToExternalTryCatch();
-
- void InitializeDebugger();
-
- // Traverse prototype chain to find out whether the object is derived from
- // the Error object.
- bool IsErrorObject(Handle<Object> obj);
-
- EntryStackItem* entry_stack_;
- int stack_trace_nesting_level_;
- StringStream* incomplete_message_;
- // The preallocated memory thread singleton.
- PreallocatedMemoryThread* preallocated_memory_thread_;
- Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
- NoAllocationStringAllocator* preallocated_message_space_;
- Bootstrapper* bootstrapper_;
- RuntimeProfiler* runtime_profiler_;
- CompilationCache* compilation_cache_;
- Counters* counters_;
- CodeRange* code_range_;
- Mutex* break_access_;
- Atomic32 debugger_initialized_;
- Mutex* debugger_access_;
- Logger* logger_;
- StackGuard stack_guard_;
- StatsTable* stats_table_;
- StubCache* stub_cache_;
- DeoptimizerData* deoptimizer_data_;
- ThreadLocalTop thread_local_top_;
- bool capture_stack_trace_for_uncaught_exceptions_;
- int stack_trace_for_uncaught_exceptions_frame_limit_;
- StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
- TranscendentalCache* transcendental_cache_;
- MemoryAllocator* memory_allocator_;
- KeyedLookupCache* keyed_lookup_cache_;
- ContextSlotCache* context_slot_cache_;
- DescriptorLookupCache* descriptor_lookup_cache_;
- v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
- HandleScopeImplementer* handle_scope_implementer_;
- UnicodeCache* unicode_cache_;
- Zone runtime_zone_;
- PreallocatedStorage in_use_list_;
- PreallocatedStorage free_list_;
- bool preallocated_storage_preallocated_;
- InnerPointerToCodeCache* inner_pointer_to_code_cache_;
- ConsStringIteratorOp* write_iterator_;
- GlobalHandles* global_handles_;
- ContextSwitcher* context_switcher_;
- ThreadManager* thread_manager_;
- RuntimeState runtime_state_;
- bool fp_stubs_generated_;
- Builtins builtins_;
- bool has_installed_extensions_;
- StringTracker* string_tracker_;
- unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
- unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
- ConsStringIteratorOp objects_string_compare_iterator_a_;
- ConsStringIteratorOp objects_string_compare_iterator_b_;
- StaticResource<ConsStringIteratorOp> objects_string_iterator_;
- unibrow::Mapping<unibrow::Ecma262Canonicalize>
- regexp_macro_assembler_canonicalize_;
- RegExpStack* regexp_stack_;
- DateCache* date_cache_;
- unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
- CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
-
- // The garbage collector should be a little more aggressive when it knows
- // that a context was recently exited.
- bool context_exit_happened_;
-
- // Time stamp at initialization.
- double time_millis_at_init_;
-
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
- bool simulator_initialized_;
- HashMap* simulator_i_cache_;
- Redirection* simulator_redirection_;
-#endif
-
-#ifdef DEBUG
- // A static array of histogram info for each type.
- HistogramInfo heap_histograms_[LAST_TYPE + 1];
- JSObject::SpillInformation js_spill_information_;
- int code_kind_statistics_[Code::NUMBER_OF_KINDS];
-
- bool allow_handle_deref_;
-#endif
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger* debugger_;
- Debug* debug_;
-#endif
-
-#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
- type name##_;
- ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
-#undef GLOBAL_BACKING_STORE
-
-#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
- type name##_[length];
- ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
-#undef GLOBAL_ARRAY_BACKING_STORE
-
-#ifdef DEBUG
- // This class is huge and has a number of fields controlled by
- // preprocessor defines. Make sure the offsets of these fields agree
- // between compilation units.
-#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
- static const intptr_t name##_debug_offset_;
- ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
- ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
-#undef ISOLATE_FIELD_OFFSET
-#endif
-
- DeferredHandles* deferred_handles_head_;
- OptimizingCompilerThread optimizing_compiler_thread_;
- MarkingThread** marking_thread_;
- SweeperThread** sweeper_thread_;
-
- friend class ExecutionAccess;
- friend class HandleScopeImplementer;
- friend class IsolateInitializer;
- friend class MarkingThread;
- friend class OptimizingCompilerThread;
- friend class SweeperThread;
- friend class ThreadManager;
- friend class Simulator;
- friend class StackGuard;
- friend class ThreadId;
- friend class TestMemoryAllocatorScope;
- friend class TestCodeRangeScope;
- friend class v8::Isolate;
- friend class v8::Locker;
- friend class v8::Unlocker;
-
- DISALLOW_COPY_AND_ASSIGN(Isolate);
-};
-
-
-// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
-// class as a work around for a bug in the generated code found with these
-// versions of GCC. See V8 issue 122 for details.
-class SaveContext BASE_EMBEDDED {
- public:
- inline explicit SaveContext(Isolate* isolate);
-
- ~SaveContext() {
- if (context_.is_null()) {
- Isolate* isolate = Isolate::Current();
- isolate->set_context(NULL);
- isolate->set_save_context(prev_);
- } else {
- Isolate* isolate = context_->GetIsolate();
- isolate->set_context(*context_);
- isolate->set_save_context(prev_);
- }
- }
-
- Handle<Context> context() { return context_; }
- SaveContext* prev() { return prev_; }
-
- // Returns true if this save context is below a given JavaScript frame.
- bool IsBelowFrame(JavaScriptFrame* frame) {
- return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
- }
-
- private:
- Handle<Context> context_;
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- Handle<Context> dummy_;
-#endif
- SaveContext* prev_;
- Address c_entry_fp_;
-};
-
-
-class AssertNoContextChange BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- AssertNoContextChange() :
- scope_(Isolate::Current()),
- context_(Isolate::Current()->context(), Isolate::Current()) {
- }
-
- ~AssertNoContextChange() {
- ASSERT(Isolate::Current()->context() == *context_);
- }
-
- private:
- HandleScope scope_;
- Handle<Context> context_;
-#else
- public:
- AssertNoContextChange() { }
-#endif
-};
-
-
-class ExecutionAccess BASE_EMBEDDED {
- public:
- explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
- Lock(isolate);
- }
- ~ExecutionAccess() { Unlock(isolate_); }
-
- static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
- static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
-
- static bool TryLock(Isolate* isolate) {
- return isolate->break_access_->TryLock();
- }
-
- private:
- Isolate* isolate_;
-};
-
-
-// Support for checking for stack-overflows in C++ code.
-class StackLimitCheck BASE_EMBEDDED {
- public:
- explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
-
- bool HasOverflowed() const {
- StackGuard* stack_guard = isolate_->stack_guard();
- return (reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit());
- }
- private:
- Isolate* isolate_;
-};
-
-
-// Support for temporarily postponing interrupts. When the outermost
-// postpone scope is left the interrupts will be re-enabled and any
-// interrupts that occurred while in the scope will be taken into
-// account.
-class PostponeInterruptsScope BASE_EMBEDDED {
- public:
- explicit PostponeInterruptsScope(Isolate* isolate)
- : stack_guard_(isolate->stack_guard()) {
- stack_guard_->thread_local_.postpone_interrupts_nesting_++;
- stack_guard_->DisableInterrupts();
- }
-
- ~PostponeInterruptsScope() {
- if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
- stack_guard_->EnableInterrupts();
- }
- }
- private:
- StackGuard* stack_guard_;
-};
-
-
-// Temporary macros for accessing current isolate and its subobjects.
-// They provide better readability, especially when used a lot in the code.
-#define HEAP (v8::internal::Isolate::Current()->heap())
-#define FACTORY (v8::internal::Isolate::Current()->factory())
-#define ISOLATE (v8::internal::Isolate::Current())
-#define LOGGER (v8::internal::Isolate::Current()->logger())
-
-
-// Tells whether the native context is marked with out of memory.
-inline bool Context::has_out_of_memory() {
- return native_context()->out_of_memory()->IsTrue();
-}
-
-
-// Mark the native context with out of memory.
-inline void Context::mark_out_of_memory() {
- native_context()->set_out_of_memory(HEAP->true_value());
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ISOLATE_H_
diff --git a/src/3rdparty/v8/src/json-parser.h b/src/3rdparty/v8/src/json-parser.h
deleted file mode 100644
index 28ef8b3..0000000
--- a/src/3rdparty/v8/src/json-parser.h
+++ /dev/null
@@ -1,708 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JSON_PARSER_H_
-#define V8_JSON_PARSER_H_
-
-#include "v8.h"
-
-#include "char-predicates-inl.h"
-#include "v8conversions.h"
-#include "messages.h"
-#include "spaces-inl.h"
-#include "token.h"
-
-namespace v8 {
-namespace internal {
-
-// A simple json parser.
-template <bool seq_ascii>
-class JsonParser BASE_EMBEDDED {
- public:
- static Handle<Object> Parse(Handle<String> source, Zone* zone) {
- return JsonParser().ParseJson(source, zone);
- }
-
- static const int kEndOfString = -1;
-
- private:
- // Parse a string containing a single JSON value.
- Handle<Object> ParseJson(Handle<String> source, Zone* zone);
-
- inline void Advance() {
- position_++;
- if (position_ >= source_length_) {
- c0_ = kEndOfString;
- } else if (seq_ascii) {
- c0_ = seq_source_->SeqOneByteStringGet(position_);
- } else {
- c0_ = source_->Get(position_);
- }
- }
-
- // The JSON lexical grammar is specified in the ECMAScript 5 standard,
- // section 15.12.1.1. The only allowed whitespace characters between tokens
- // are tab, carriage-return, newline and space.
-
- inline void AdvanceSkipWhitespace() {
- do {
- Advance();
- } while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r');
- }
-
- inline void SkipWhitespace() {
- while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r') {
- Advance();
- }
- }
-
- inline uc32 AdvanceGetChar() {
- Advance();
- return c0_;
- }
-
- // Checks that current charater is c.
- // If so, then consume c and skip whitespace.
- inline bool MatchSkipWhiteSpace(uc32 c) {
- if (c0_ == c) {
- AdvanceSkipWhitespace();
- return true;
- }
- return false;
- }
-
- // A JSON string (production JSONString) is subset of valid JavaScript string
- // literals. The string must only be double-quoted (not single-quoted), and
- // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
- // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
- Handle<String> ParseJsonString() {
- return ScanJsonString<false>();
- }
- Handle<String> ParseJsonInternalizedString() {
- return ScanJsonString<true>();
- }
- template <bool is_internalized>
- Handle<String> ScanJsonString();
- // Creates a new string and copies prefix[start..end] into the beginning
- // of it. Then scans the rest of the string, adding characters after the
- // prefix. Called by ScanJsonString when reaching a '\' or non-ASCII char.
- template <typename StringType, typename SinkChar>
- Handle<String> SlowScanJsonString(Handle<String> prefix, int start, int end);
-
- // A JSON number (production JSONNumber) is a subset of the valid JavaScript
- // decimal number literals.
- // It includes an optional minus sign, must have at least one
- // digit before and after a decimal point, may not have prefixed zeros (unless
- // the integer part is zero), and may include an exponent part (e.g., "e-10").
- // Hexadecimal and octal numbers are not allowed.
- Handle<Object> ParseJsonNumber();
-
- // Parse a single JSON value from input (grammar production JSONValue).
- // A JSON value is either a (double-quoted) string literal, a number literal,
- // one of "true", "false", or "null", or an object or array literal.
- Handle<Object> ParseJsonValue();
-
- // Parse a JSON object literal (grammar production JSONObject).
- // An object literal is a squiggly-braced and comma separated sequence
- // (possibly empty) of key/value pairs, where the key is a JSON string
- // literal, the value is a JSON value, and the two are separated by a colon.
- // A JSON array doesn't allow numbers and identifiers as keys, like a
- // JavaScript array.
- Handle<Object> ParseJsonObject();
-
- // Parses a JSON array literal (grammar production JSONArray). An array
- // literal is a square-bracketed and comma separated sequence (possibly empty)
- // of JSON values.
- // A JSON array doesn't allow leaving out values from the sequence, nor does
- // it allow a terminal comma, like a JavaScript array does.
- Handle<Object> ParseJsonArray();
-
-
- // Mark that a parsing error has happened at the current token, and
- // return a null handle. Primarily for readability.
- inline Handle<Object> ReportUnexpectedCharacter() {
- return Handle<Object>::null();
- }
-
- inline Isolate* isolate() { return isolate_; }
- inline Factory* factory() { return factory_; }
- inline Handle<JSFunction> object_constructor() { return object_constructor_; }
- inline Zone* zone() const { return zone_; }
-
- static const int kInitialSpecialStringLength = 1024;
- static const int kPretenureTreshold = 100 * 1024;
-
-
- private:
- Handle<String> source_;
- int source_length_;
- Handle<SeqOneByteString> seq_source_;
-
- PretenureFlag pretenure_;
- Isolate* isolate_;
- Factory* factory_;
- Handle<JSFunction> object_constructor_;
- uc32 c0_;
- int position_;
- Zone* zone_;
-};
-
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
- Zone* zone) {
- isolate_ = source->map()->GetHeap()->isolate();
- factory_ = isolate_->factory();
- object_constructor_ = Handle<JSFunction>(
- isolate()->native_context()->object_function(), isolate());
- zone_ = zone;
- FlattenString(source);
- source_ = source;
- source_length_ = source_->length();
- pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
-
- // Optimized fast case where we only have ASCII characters.
- if (seq_ascii) {
- seq_source_ = Handle<SeqOneByteString>::cast(source_);
- }
-
- // Set initial position right before the string.
- position_ = -1;
- // Advance to the first character (possibly EOS)
- AdvanceSkipWhitespace();
- Handle<Object> result = ParseJsonValue();
- if (result.is_null() || c0_ != kEndOfString) {
- // Some exception (for example stack overflow) is already pending.
- if (isolate_->has_pending_exception()) return Handle<Object>::null();
-
- // Parse failed. Current character is the unexpected token.
- const char* message;
- Factory* factory = this->factory();
- Handle<JSArray> array;
-
- switch (c0_) {
- case kEndOfString:
- message = "unexpected_eos";
- array = factory->NewJSArray(0);
- break;
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- message = "unexpected_token_number";
- array = factory->NewJSArray(0);
- break;
- case '"':
- message = "unexpected_token_string";
- array = factory->NewJSArray(0);
- break;
- default:
- message = "unexpected_token";
- Handle<Object> name =
- LookupSingleCharacterStringFromCode(isolate_, c0_);
- Handle<FixedArray> element = factory->NewFixedArray(1);
- element->set(0, *name);
- array = factory->NewJSArrayWithElements(element);
- break;
- }
-
- MessageLocation location(factory->NewScript(source),
- position_,
- position_ + 1);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
- return Handle<Object>::null();
- }
- return result;
-}
-
-
-// Parse any JSON value.
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
- StackLimitCheck stack_check(isolate_);
- if (stack_check.HasOverflowed()) {
- isolate_->StackOverflow();
- return Handle<Object>::null();
- }
-
- if (c0_ == '"') return ParseJsonString();
- if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
- if (c0_ == '{') return ParseJsonObject();
- if (c0_ == '[') return ParseJsonArray();
- if (c0_ == 'f') {
- if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return factory()->false_value();
- }
- return ReportUnexpectedCharacter();
- }
- if (c0_ == 't') {
- if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
- AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return factory()->true_value();
- }
- return ReportUnexpectedCharacter();
- }
- if (c0_ == 'n') {
- if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 'l') {
- AdvanceSkipWhitespace();
- return factory()->null_value();
- }
- return ReportUnexpectedCharacter();
- }
- return ReportUnexpectedCharacter();
-}
-
-
-// Parse a JSON object. Position must be right at '{'.
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
- Handle<JSObject> json_object =
- factory()->NewJSObject(object_constructor(), pretenure_);
- ASSERT_EQ(c0_, '{');
-
- AdvanceSkipWhitespace();
- if (c0_ != '}') {
- do {
- if (c0_ != '"') return ReportUnexpectedCharacter();
-
- int start_position = position_;
- Advance();
-
- uint32_t index = 0;
- if (c0_ >= '0' && c0_ <= '9') {
- // Maybe an array index, try to parse it.
- if (c0_ == '0') {
- // With a leading zero, the string has to be "0" only to be an index.
- Advance();
- } else {
- do {
- int d = c0_ - '0';
- if (index > 429496729U - ((d > 5) ? 1 : 0)) break;
- index = (index * 10) + d;
- Advance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
-
- if (c0_ == '"') {
- // Successfully parsed index, parse and store element.
- AdvanceSkipWhitespace();
-
- if (c0_ != ':') return ReportUnexpectedCharacter();
- AdvanceSkipWhitespace();
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
-
- JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
- continue;
- }
- // Not an index, fallback to the slow path.
- }
-
- position_ = start_position;
-#ifdef DEBUG
- c0_ = '"';
-#endif
-
- Handle<String> key = ParseJsonInternalizedString();
- if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
-
- AdvanceSkipWhitespace();
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
-
- if (JSObject::TryTransitionToField(json_object, key)) {
- int index = json_object->LastAddedFieldIndex();
- json_object->FastPropertyAtPut(index, *value);
- } else {
- JSObject::SetLocalPropertyIgnoreAttributes(
- json_object, key, value, NONE);
- }
- } while (MatchSkipWhiteSpace(','));
- if (c0_ != '}') {
- return ReportUnexpectedCharacter();
- }
- }
- AdvanceSkipWhitespace();
- return json_object;
-}
-
-// Parse a JSON array. Position must be right at '['.
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
- ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
- ZoneList<Handle<Object> > elements(4, zone());
- ASSERT_EQ(c0_, '[');
-
- AdvanceSkipWhitespace();
- if (c0_ != ']') {
- do {
- Handle<Object> element = ParseJsonValue();
- if (element.is_null()) return ReportUnexpectedCharacter();
- elements.Add(element, zone());
- } while (MatchSkipWhiteSpace(','));
- if (c0_ != ']') {
- return ReportUnexpectedCharacter();
- }
- }
- AdvanceSkipWhitespace();
- // Allocate a fixed array with all the elements.
- Handle<FixedArray> fast_elements =
- factory()->NewFixedArray(elements.length(), pretenure_);
- for (int i = 0, n = elements.length(); i < n; i++) {
- fast_elements->set(i, *elements[i]);
- }
- return factory()->NewJSArrayWithElements(
- fast_elements, FAST_ELEMENTS, pretenure_);
-}
-
-
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
- bool negative = false;
- int beg_pos = position_;
- if (c0_ == '-') {
- Advance();
- negative = true;
- }
- if (c0_ == '0') {
- Advance();
- // Prefix zero is only allowed if it's the only digit before
- // a decimal point or exponent.
- if ('0' <= c0_ && c0_ <= '9') return ReportUnexpectedCharacter();
- } else {
- int i = 0;
- int digits = 0;
- if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
- do {
- i = i * 10 + c0_ - '0';
- digits++;
- Advance();
- } while (c0_ >= '0' && c0_ <= '9');
- if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
- SkipWhitespace();
- return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
- }
- }
- if (c0_ == '.') {
- Advance();
- if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
- do {
- Advance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
- if (AsciiAlphaToLower(c0_) == 'e') {
- Advance();
- if (c0_ == '-' || c0_ == '+') Advance();
- if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
- do {
- Advance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
- int length = position_ - beg_pos;
- double number;
- if (seq_ascii) {
- Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
- number = StringToDouble(isolate()->unicode_cache(),
- Vector<const char>::cast(chars),
- NO_FLAGS, // Hex, octal or trailing junk.
- OS::nan_value());
- } else {
- Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
- String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
- Vector<const uint8_t> result =
- Vector<const uint8_t>(buffer.start(), length);
- number = StringToDouble(isolate()->unicode_cache(),
- // TODO(dcarney): Convert StringToDouble to uint_t.
- Vector<const char>::cast(result),
- NO_FLAGS, // Hex, octal or trailing junk.
- 0.0);
- buffer.Dispose();
- }
- SkipWhitespace();
- return factory()->NewNumber(number, pretenure_);
-}
-
-
-template <typename StringType>
-inline void SeqStringSet(Handle<StringType> seq_str, int i, uc32 c);
-
-template <>
-inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
- seq_str->SeqTwoByteStringSet(i, c);
-}
-
-template <>
-inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
- seq_str->SeqOneByteStringSet(i, c);
-}
-
-template <typename StringType>
-inline Handle<StringType> NewRawString(Factory* factory,
- int length,
- PretenureFlag pretenure);
-
-template <>
-inline Handle<SeqTwoByteString> NewRawString(Factory* factory,
- int length,
- PretenureFlag pretenure) {
- return factory->NewRawTwoByteString(length, pretenure);
-}
-
-template <>
-inline Handle<SeqOneByteString> NewRawString(Factory* factory,
- int length,
- PretenureFlag pretenure) {
- return factory->NewRawOneByteString(length, pretenure);
-}
-
-
-// Scans the rest of a JSON string starting from position_ and writes
-// prefix[start..end] along with the scanned characters into a
-// sequential string of type StringType.
-template <bool seq_ascii>
-template <typename StringType, typename SinkChar>
-Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
- Handle<String> prefix, int start, int end) {
- int count = end - start;
- int max_length = count + source_length_ - position_;
- int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
- Handle<StringType> seq_str =
- NewRawString<StringType>(factory(), length, pretenure_);
- // Copy prefix into seq_str.
- SinkChar* dest = seq_str->GetChars();
- String::WriteToFlat(*prefix, dest, start, end);
-
- while (c0_ != '"') {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
- if (c0_ < 0x20) return Handle<String>::null();
- if (count >= length) {
- // We need to create a longer sequential string for the result.
- return SlowScanJsonString<StringType, SinkChar>(seq_str, 0, count);
- }
- if (c0_ != '\\') {
- // If the sink can contain UC16 characters, or source_ contains only
- // ASCII characters, there's no need to test whether we can store the
- // character. Otherwise check whether the UC16 source character can fit
- // in the ASCII sink.
- if (sizeof(SinkChar) == kUC16Size ||
- seq_ascii ||
- c0_ <= String::kMaxOneByteCharCode) {
- SeqStringSet(seq_str, count++, c0_);
- Advance();
- } else {
- // StringType is SeqOneByteString and we just read a non-ASCII char.
- return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count);
- }
- } else {
- Advance(); // Advance past the \.
- switch (c0_) {
- case '"':
- case '\\':
- case '/':
- SeqStringSet(seq_str, count++, c0_);
- break;
- case 'b':
- SeqStringSet(seq_str, count++, '\x08');
- break;
- case 'f':
- SeqStringSet(seq_str, count++, '\x0c');
- break;
- case 'n':
- SeqStringSet(seq_str, count++, '\x0a');
- break;
- case 'r':
- SeqStringSet(seq_str, count++, '\x0d');
- break;
- case 't':
- SeqStringSet(seq_str, count++, '\x09');
- break;
- case 'u': {
- uc32 value = 0;
- for (int i = 0; i < 4; i++) {
- Advance();
- int digit = HexValue(c0_);
- if (digit < 0) {
- return Handle<String>::null();
- }
- value = value * 16 + digit;
- }
- if (sizeof(SinkChar) == kUC16Size ||
- value <= String::kMaxOneByteCharCode) {
- SeqStringSet(seq_str, count++, value);
- break;
- } else {
- // StringType is SeqOneByteString and we just read a non-ASCII char.
- position_ -= 6; // Rewind position_ to \ in \uxxxx.
- Advance();
- return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str,
- 0,
- count);
- }
- }
- default:
- return Handle<String>::null();
- }
- Advance();
- }
- }
- // Shrink seq_string length to count.
- if (isolate()->heap()->InNewSpace(*seq_str)) {
- isolate()->heap()->new_space()->
- template ShrinkStringAtAllocationBoundary<StringType>(
- *seq_str, count);
- } else {
- int string_size = StringType::SizeFor(count);
- int allocated_string_size = StringType::SizeFor(length);
- int delta = allocated_string_size - string_size;
- Address start_filler_object = seq_str->address() + string_size;
- seq_str->set_length(count);
- isolate()->heap()->CreateFillerObjectAt(start_filler_object, delta);
- }
- ASSERT_EQ('"', c0_);
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
- return seq_str;
-}
-
-
-template <bool seq_ascii>
-template <bool is_internalized>
-Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
- ASSERT_EQ('"', c0_);
- Advance();
- if (c0_ == '"') {
- AdvanceSkipWhitespace();
- return factory()->empty_string();
- }
-
- if (seq_ascii && is_internalized) {
- // Fast path for existing internalized strings. If the the string being
- // parsed is not a known internalized string, contains backslashes or
- // unexpectedly reaches the end of string, return with an empty handle.
- uint32_t running_hash = isolate()->heap()->HashSeed();
- int position = position_;
- uc32 c0 = c0_;
- do {
- if (c0 == '\\') {
- c0_ = c0;
- int beg_pos = position_;
- position_ = position;
- return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
- beg_pos,
- position_);
- }
- if (c0 < 0x20) return Handle<String>::null();
- if (static_cast<uint32_t>(c0) >
- unibrow::Utf16::kMaxNonSurrogateCharCode) {
- running_hash =
- StringHasher::AddCharacterCore(running_hash,
- unibrow::Utf16::LeadSurrogate(c0));
- running_hash =
- StringHasher::AddCharacterCore(running_hash,
- unibrow::Utf16::TrailSurrogate(c0));
- } else {
- running_hash = StringHasher::AddCharacterCore(running_hash, c0);
- }
- position++;
- if (position >= source_length_) return Handle<String>::null();
- c0 = seq_source_->SeqOneByteStringGet(position);
- } while (c0 != '"');
- int length = position - position_;
- uint32_t hash = (length <= String::kMaxHashCalcLength)
- ? StringHasher::GetHashCore(running_hash) : length;
- Vector<const uint8_t> string_vector(
- seq_source_->GetChars() + position_, length);
- StringTable* string_table = isolate()->heap()->string_table();
- uint32_t capacity = string_table->Capacity();
- uint32_t entry = StringTable::FirstProbe(hash, capacity);
- uint32_t count = 1;
- while (true) {
- Object* element = string_table->KeyAt(entry);
- if (element == isolate()->heap()->undefined_value()) {
- // Lookup failure.
- break;
- }
- if (element != isolate()->heap()->the_hole_value() &&
- String::cast(element)->IsOneByteEqualTo(string_vector)) {
- // Lookup success, update the current position.
- position_ = position;
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
- return Handle<String>(String::cast(element), isolate());
- }
- entry = StringTable::NextProbe(entry, count++, capacity);
- }
- }
-
- int beg_pos = position_;
- // Fast case for ASCII only without escape characters.
- do {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
- if (c0_ < 0x20) return Handle<String>::null();
- if (c0_ != '\\') {
- if (seq_ascii || c0_ <= String::kMaxOneByteCharCode) {
- Advance();
- } else {
- return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
- beg_pos,
- position_);
- }
- } else {
- return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
- beg_pos,
- position_);
- }
- } while (c0_ != '"');
- int length = position_ - beg_pos;
- Handle<String> result;
- if (seq_ascii && is_internalized) {
- result = factory()->InternalizeOneByteString(seq_source_, beg_pos, length);
- } else {
- result = factory()->NewRawOneByteString(length, pretenure_);
- uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
- String::WriteToFlat(*source_, dest, beg_pos, position_);
- }
- ASSERT_EQ('"', c0_);
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
- return result;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_JSON_PARSER_H_
diff --git a/src/3rdparty/v8/src/json-stringifier.h b/src/3rdparty/v8/src/json-stringifier.h
deleted file mode 100644
index e9121d4..0000000
--- a/src/3rdparty/v8/src/json-stringifier.h
+++ /dev/null
@@ -1,788 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JSON_STRINGIFIER_H_
-#define V8_JSON_STRINGIFIER_H_
-
-#include "v8.h"
-#include "v8utils.h"
-#include "v8conversions.h"
-
-namespace v8 {
-namespace internal {
-
-class BasicJsonStringifier BASE_EMBEDDED {
- public:
- explicit BasicJsonStringifier(Isolate* isolate);
-
- MaybeObject* Stringify(Handle<Object> object);
-
- private:
- static const int kInitialPartLength = 32;
- static const int kMaxPartLength = 16 * 1024;
- static const int kPartLengthGrowthFactor = 2;
-
- enum Result { UNCHANGED, SUCCESS, EXCEPTION, CIRCULAR, STACK_OVERFLOW };
-
- void Extend();
-
- void ChangeEncoding();
-
- void ShrinkCurrentPart();
-
- template <bool is_ascii, typename Char>
- INLINE(void Append_(Char c));
-
- template <bool is_ascii, typename Char>
- INLINE(void Append_(const Char* chars));
-
- INLINE(void Append(uint8_t c)) {
- if (is_ascii_) {
- Append_<true>(c);
- } else {
- Append_<false>(c);
- }
- }
-
- INLINE(void AppendAscii(const char* chars)) {
- if (is_ascii_) {
- Append_<true>(reinterpret_cast<const uint8_t*>(chars));
- } else {
- Append_<false>(reinterpret_cast<const uint8_t*>(chars));
- }
- }
-
- Handle<Object> ApplyToJsonFunction(Handle<Object> object,
- Handle<Object> key);
-
- Result SerializeGeneric(Handle<Object> object,
- Handle<Object> key,
- bool deferred_comma,
- bool deferred_key);
-
- // Entry point to serialize the object.
- INLINE(Result SerializeObject(Handle<Object> obj)) {
- return Serialize_<false>(obj, false, factory_->empty_string());
- }
-
- // Serialize an array element.
- // The index may serve as argument for the toJSON function.
- INLINE(Result SerializeElement(Isolate* isolate,
- Handle<Object> object,
- int i)) {
- return Serialize_<false>(object,
- false,
- Handle<Object>(Smi::FromInt(i), isolate));
- }
-
- // Serialize a object property.
- // The key may or may not be serialized depending on the property.
- // The key may also serve as argument for the toJSON function.
- INLINE(Result SerializeProperty(Handle<Object> object,
- bool deferred_comma,
- Handle<String> deferred_key)) {
- ASSERT(!deferred_key.is_null());
- return Serialize_<true>(object, deferred_comma, deferred_key);
- }
-
- template <bool deferred_string_key>
- Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
-
- void SerializeDeferredKey(bool deferred_comma, Handle<Object> deferred_key) {
- if (deferred_comma) Append(',');
- SerializeString(Handle<String>::cast(deferred_key));
- Append(':');
- }
-
- Result SerializeSmi(Smi* object);
-
- Result SerializeDouble(double number);
- INLINE(Result SerializeHeapNumber(Handle<HeapNumber> object)) {
- return SerializeDouble(object->value());
- }
-
- Result SerializeJSValue(Handle<JSValue> object);
-
- INLINE(Result SerializeJSArray(Handle<JSArray> object));
- INLINE(Result SerializeJSObject(Handle<JSObject> object));
-
- Result SerializeJSArraySlow(Handle<JSArray> object, int length);
-
- void SerializeString(Handle<String> object);
-
- template <typename SrcChar, typename DestChar>
- INLINE(void SerializeStringUnchecked_(const SrcChar* src,
- DestChar* dest,
- int length));
-
- template <bool is_ascii, typename Char>
- INLINE(void SerializeString_(Handle<String> string));
-
- template <typename Char>
- INLINE(bool DoNotEscape(Char c));
-
- template <typename Char>
- INLINE(Vector<const Char> GetCharVector(Handle<String> string));
-
- Result StackPush(Handle<Object> object);
- void StackPop();
-
- INLINE(Handle<String> accumulator()) {
- return Handle<String>(String::cast(accumulator_store_->value()), isolate_);
- }
-
- INLINE(void set_accumulator(Handle<String> string)) {
- return accumulator_store_->set_value(*string);
- }
-
- Isolate* isolate_;
- Factory* factory_;
- // We use a value wrapper for the string accumulator to keep the
- // (indirect) handle to it in the outermost handle scope.
- Handle<JSValue> accumulator_store_;
- Handle<String> current_part_;
- Handle<String> tojson_string_;
- Handle<JSArray> stack_;
- int current_index_;
- int part_length_;
- bool is_ascii_;
-
- static const int kJsonEscapeTableEntrySize = 8;
- static const char* const JsonEscapeTable;
-};
-
-
-// Translation table to escape ASCII characters.
-// Table entries start at a multiple of 8 and are null-terminated.
-const char* const BasicJsonStringifier::JsonEscapeTable =
- "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
- "\\u0004\0 \\u0005\0 \\u0006\0 \\u0007\0 "
- "\\b\0 \\t\0 \\n\0 \\u000b\0 "
- "\\f\0 \\r\0 \\u000e\0 \\u000f\0 "
- "\\u0010\0 \\u0011\0 \\u0012\0 \\u0013\0 "
- "\\u0014\0 \\u0015\0 \\u0016\0 \\u0017\0 "
- "\\u0018\0 \\u0019\0 \\u001a\0 \\u001b\0 "
- "\\u001c\0 \\u001d\0 \\u001e\0 \\u001f\0 "
- " \0 !\0 \\\"\0 #\0 "
- "$\0 %\0 &\0 '\0 "
- "(\0 )\0 *\0 +\0 "
- ",\0 -\0 .\0 /\0 "
- "0\0 1\0 2\0 3\0 "
- "4\0 5\0 6\0 7\0 "
- "8\0 9\0 :\0 ;\0 "
- "<\0 =\0 >\0 ?\0 "
- "@\0 A\0 B\0 C\0 "
- "D\0 E\0 F\0 G\0 "
- "H\0 I\0 J\0 K\0 "
- "L\0 M\0 N\0 O\0 "
- "P\0 Q\0 R\0 S\0 "
- "T\0 U\0 V\0 W\0 "
- "X\0 Y\0 Z\0 [\0 "
- "\\\\\0 ]\0 ^\0 _\0 "
- "`\0 a\0 b\0 c\0 "
- "d\0 e\0 f\0 g\0 "
- "h\0 i\0 j\0 k\0 "
- "l\0 m\0 n\0 o\0 "
- "p\0 q\0 r\0 s\0 "
- "t\0 u\0 v\0 w\0 "
- "x\0 y\0 z\0 {\0 "
- "|\0 }\0 ~\0 \177\0 "
- "\200\0 \201\0 \202\0 \203\0 "
- "\204\0 \205\0 \206\0 \207\0 "
- "\210\0 \211\0 \212\0 \213\0 "
- "\214\0 \215\0 \216\0 \217\0 "
- "\220\0 \221\0 \222\0 \223\0 "
- "\224\0 \225\0 \226\0 \227\0 "
- "\230\0 \231\0 \232\0 \233\0 "
- "\234\0 \235\0 \236\0 \237\0 "
- "\240\0 \241\0 \242\0 \243\0 "
- "\244\0 \245\0 \246\0 \247\0 "
- "\250\0 \251\0 \252\0 \253\0 "
- "\254\0 \255\0 \256\0 \257\0 "
- "\260\0 \261\0 \262\0 \263\0 "
- "\264\0 \265\0 \266\0 \267\0 "
- "\270\0 \271\0 \272\0 \273\0 "
- "\274\0 \275\0 \276\0 \277\0 "
- "\300\0 \301\0 \302\0 \303\0 "
- "\304\0 \305\0 \306\0 \307\0 "
- "\310\0 \311\0 \312\0 \313\0 "
- "\314\0 \315\0 \316\0 \317\0 "
- "\320\0 \321\0 \322\0 \323\0 "
- "\324\0 \325\0 \326\0 \327\0 "
- "\330\0 \331\0 \332\0 \333\0 "
- "\334\0 \335\0 \336\0 \337\0 "
- "\340\0 \341\0 \342\0 \343\0 "
- "\344\0 \345\0 \346\0 \347\0 "
- "\350\0 \351\0 \352\0 \353\0 "
- "\354\0 \355\0 \356\0 \357\0 "
- "\360\0 \361\0 \362\0 \363\0 "
- "\364\0 \365\0 \366\0 \367\0 "
- "\370\0 \371\0 \372\0 \373\0 "
- "\374\0 \375\0 \376\0 \377\0 ";
-
-BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
- : isolate_(isolate), current_index_(0), is_ascii_(true) {
- factory_ = isolate_->factory();
- accumulator_store_ = Handle<JSValue>::cast(
- factory_->ToObject(factory_->empty_string()));
- part_length_ = kInitialPartLength;
- current_part_ = factory_->NewRawOneByteString(kInitialPartLength);
- tojson_string_ =
- factory_->InternalizeOneByteString(STATIC_ASCII_VECTOR("toJSON"));
- stack_ = factory_->NewJSArray(8);
-}
-
-
-MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) {
- switch (SerializeObject(object)) {
- case UNCHANGED:
- return isolate_->heap()->undefined_value();
- case SUCCESS:
- ShrinkCurrentPart();
- return *factory_->NewConsString(accumulator(), current_part_);
- case CIRCULAR:
- return isolate_->Throw(*factory_->NewTypeError(
- "circular_structure", HandleVector<Object>(NULL, 0)));
- case STACK_OVERFLOW:
- return isolate_->StackOverflow();
- default:
- return Failure::Exception();
- }
-}
-
-
-template <bool is_ascii, typename Char>
-void BasicJsonStringifier::Append_(Char c) {
- if (is_ascii) {
- SeqOneByteString::cast(*current_part_)->SeqOneByteStringSet(
- current_index_++, c);
- } else {
- SeqTwoByteString::cast(*current_part_)->SeqTwoByteStringSet(
- current_index_++, c);
- }
- if (current_index_ == part_length_) Extend();
-}
-
-
-template <bool is_ascii, typename Char>
-void BasicJsonStringifier::Append_(const Char* chars) {
- for ( ; *chars != '\0'; chars++) Append_<is_ascii, Char>(*chars);
-}
-
-
-Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
- Handle<Object> object, Handle<Object> key) {
- LookupResult lookup(isolate_);
- JSObject::cast(*object)->LookupRealNamedProperty(*tojson_string_, &lookup);
- if (!lookup.IsProperty()) return object;
- PropertyAttributes attr;
- Handle<Object> fun =
- Object::GetProperty(object, object, &lookup, tojson_string_, &attr);
- if (!fun->IsJSFunction()) return object;
-
- // Call toJSON function.
- if (key->IsSmi()) key = factory_->NumberToString(key);
- Handle<Object> argv[] = { key };
- bool has_exception = false;
- HandleScope scope(isolate_);
- object = Execution::Call(fun, object, 1, argv, &has_exception);
- // Return empty handle to signal an exception.
- if (has_exception) return Handle<Object>::null();
- return scope.CloseAndEscape(object);
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
- Handle<Object> object) {
- StackLimitCheck check(isolate_);
- if (check.HasOverflowed()) return STACK_OVERFLOW;
-
- int length = Smi::cast(stack_->length())->value();
- FixedArray* elements = FixedArray::cast(stack_->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == *object) {
- return CIRCULAR;
- }
- }
- stack_->EnsureSize(length + 1);
- FixedArray::cast(stack_->elements())->set(length, *object);
- stack_->set_length(Smi::FromInt(length + 1));
- return SUCCESS;
-}
-
-
-void BasicJsonStringifier::StackPop() {
- int length = Smi::cast(stack_->length())->value();
- stack_->set_length(Smi::FromInt(length - 1));
-}
-
-
-template <bool deferred_string_key>
-BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
- Handle<Object> object, bool comma, Handle<Object> key) {
- if (object->IsJSObject()) {
- object = ApplyToJsonFunction(object, key);
- if (object.is_null()) return EXCEPTION;
- }
-
- if (object->IsSmi()) {
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeSmi(Smi::cast(*object));
- }
-
- switch (HeapObject::cast(*object)->map()->instance_type()) {
- case HEAP_NUMBER_TYPE:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
- case ODDBALL_TYPE:
- switch (Oddball::cast(*object)->kind()) {
- case Oddball::kFalse:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- AppendAscii("false");
- return SUCCESS;
- case Oddball::kTrue:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- AppendAscii("true");
- return SUCCESS;
- case Oddball::kNull:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- AppendAscii("null");
- return SUCCESS;
- default:
- return UNCHANGED;
- }
- case JS_ARRAY_TYPE:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSArray(Handle<JSArray>::cast(object));
- case JS_VALUE_TYPE:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSValue(Handle<JSValue>::cast(object));
- case JS_FUNCTION_TYPE:
- return UNCHANGED;
- default:
- if (object->IsString()) {
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- SerializeString(Handle<String>::cast(object));
- return SUCCESS;
- } else if (object->IsJSObject()) {
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSObject(Handle<JSObject>::cast(object));
- } else {
- return SerializeGeneric(object, key, comma, deferred_string_key);
- }
- }
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
- Handle<Object> object,
- Handle<Object> key,
- bool deferred_comma,
- bool deferred_key) {
- Handle<JSObject> builtins(isolate_->native_context()->builtins());
- Handle<JSFunction> builtin =
- Handle<JSFunction>::cast(GetProperty(builtins, "JSONSerializeAdapter"));
-
- Handle<Object> argv[] = { key, object };
- bool has_exception = false;
- Handle<Object> result =
- Execution::Call(builtin, object, 2, argv, &has_exception);
- if (has_exception) return EXCEPTION;
- if (result->IsUndefined()) return UNCHANGED;
- if (deferred_key) {
- if (key->IsSmi()) key = factory_->NumberToString(key);
- SerializeDeferredKey(deferred_comma, key);
- }
-
- Handle<String> result_string = Handle<String>::cast(result);
- // Shrink current part, attach it to the accumulator, also attach the result
- // string to the accumulator, and allocate a new part.
- ShrinkCurrentPart(); // Shrink.
- part_length_ = kInitialPartLength; // Allocate conservatively.
- Extend(); // Attach current part and allocate new part.
- // Attach result string to the accumulator.
- set_accumulator(factory_->NewConsString(accumulator(), result_string));
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
- Handle<JSValue> object) {
- bool has_exception = false;
- String* class_name = object->class_name();
- if (class_name == isolate_->heap()->String_string()) {
- Handle<Object> value = Execution::ToString(object, &has_exception);
- if (has_exception) return EXCEPTION;
- SerializeString(Handle<String>::cast(value));
- } else if (class_name == isolate_->heap()->Number_string()) {
- Handle<Object> value = Execution::ToNumber(object, &has_exception);
- if (has_exception) return EXCEPTION;
- if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
- SerializeHeapNumber(Handle<HeapNumber>::cast(value));
- } else {
- ASSERT(class_name == isolate_->heap()->Boolean_string());
- Object* value = JSValue::cast(*object)->value();
- ASSERT(value->IsBoolean());
- AppendAscii(value->IsTrue() ? "true" : "false");
- }
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
- static const int kBufferSize = 100;
- char chars[kBufferSize];
- Vector<char> buffer(chars, kBufferSize);
- AppendAscii(IntToCString(object->value(), buffer));
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
- double number) {
- if (isinf(number) || isnan(number)) {
- AppendAscii("null");
- return SUCCESS;
- }
- static const int kBufferSize = 100;
- char chars[kBufferSize];
- Vector<char> buffer(chars, kBufferSize);
- AppendAscii(DoubleToCString(number, buffer));
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
- Handle<JSArray> object) {
- HandleScope handle_scope(isolate_);
- Result stack_push = StackPush(object);
- if (stack_push != SUCCESS) return stack_push;
- int length = Smi::cast(object->length())->value();
- Append('[');
- switch (object->GetElementsKind()) {
- case FAST_SMI_ELEMENTS: {
- Handle<FixedArray> elements(
- FixedArray::cast(object->elements()), isolate_);
- for (int i = 0; i < length; i++) {
- if (i > 0) Append(',');
- SerializeSmi(Smi::cast(elements->get(i)));
- }
- break;
- }
- case FAST_DOUBLE_ELEMENTS: {
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(object->elements()), isolate_);
- for (int i = 0; i < length; i++) {
- if (i > 0) Append(',');
- SerializeDouble(elements->get_scalar(i));
- }
- break;
- }
- case FAST_ELEMENTS: {
- Handle<FixedArray> elements(
- FixedArray::cast(object->elements()), isolate_);
- for (int i = 0; i < length; i++) {
- if (i > 0) Append(',');
- Result result =
- SerializeElement(isolate_,
- Handle<Object>(elements->get(i), isolate_),
- i);
- if (result == SUCCESS) continue;
- if (result == UNCHANGED) {
- AppendAscii("null");
- } else {
- return result;
- }
- }
- break;
- }
- // TODO(yangguo): The FAST_HOLEY_* cases could be handled in a faster way.
- // They resemble the non-holey cases except that a prototype chain lookup
- // is necessary for holes.
- default: {
- Result result = SerializeJSArraySlow(object, length);
- if (result != SUCCESS) return result;
- break;
- }
- }
- Append(']');
- StackPop();
- current_part_ = handle_scope.CloseAndEscape(current_part_);
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
- Handle<JSArray> object, int length) {
- for (int i = 0; i < length; i++) {
- if (i > 0) Append(',');
- Handle<Object> element = Object::GetElement(object, i);
- if (element->IsUndefined()) {
- AppendAscii("null");
- } else {
- Result result = SerializeElement(object->GetIsolate(), element, i);
- if (result == SUCCESS) continue;
- if (result == UNCHANGED) {
- AppendAscii("null");
- } else {
- return result;
- }
- }
- }
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
- Handle<JSObject> object) {
- HandleScope handle_scope(isolate_);
- Result stack_push = StackPush(object);
- if (stack_push != SUCCESS) return stack_push;
- if (object->IsJSGlobalProxy()) {
- object = Handle<JSObject>(
- JSObject::cast(object->GetPrototype()), isolate_);
- ASSERT(object->IsGlobalObject());
- }
-
- Append('{');
- bool comma = false;
-
- if (object->HasFastProperties() &&
- !object->HasIndexedInterceptor() &&
- !object->HasNamedInterceptor() &&
- object->elements()->length() == 0) {
- Handle<Map> map(object->map());
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- Handle<String> key(map->instance_descriptors()->GetKey(i), isolate_);
- PropertyDetails details = map->instance_descriptors()->GetDetails(i);
- if (details.IsDontEnum() || details.IsDeleted()) continue;
- Handle<Object> property;
- if (details.type() == FIELD && *map == object->map()) {
- property = Handle<Object>(
- object->FastPropertyAt(
- map->instance_descriptors()->GetFieldIndex(i)),
- isolate_);
- } else {
- property = GetProperty(isolate_, object, key);
- if (property.is_null()) return EXCEPTION;
- }
- Result result = SerializeProperty(property, comma, key);
- if (!comma && result == SUCCESS) comma = true;
- if (result >= EXCEPTION) return result;
- }
- } else {
- bool has_exception = false;
- Handle<FixedArray> contents =
- GetKeysInFixedArrayFor(object, LOCAL_ONLY, &has_exception);
- if (has_exception) return EXCEPTION;
-
- for (int i = 0; i < contents->length(); i++) {
- Object* key = contents->get(i);
- Handle<String> key_handle;
- Handle<Object> property;
- if (key->IsString()) {
- key_handle = Handle<String>(String::cast(key), isolate_);
- property = GetProperty(isolate_, object, key_handle);
- } else {
- ASSERT(key->IsNumber());
- key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
- uint32_t index;
- if (key->IsSmi()) {
- property = Object::GetElement(object, Smi::cast(key)->value());
- } else if (key_handle->AsArrayIndex(&index)) {
- property = Object::GetElement(object, index);
- } else {
- property = GetProperty(isolate_, object, key_handle);
- }
- }
- if (property.is_null()) return EXCEPTION;
- Result result = SerializeProperty(property, comma, key_handle);
- if (!comma && result == SUCCESS) comma = true;
- if (result >= EXCEPTION) return result;
- }
- }
-
- Append('}');
- StackPop();
- current_part_ = handle_scope.CloseAndEscape(current_part_);
- return SUCCESS;
-}
-
-
-void BasicJsonStringifier::ShrinkCurrentPart() {
- ASSERT(current_index_ < part_length_);
- current_part_ = Handle<String>(
- SeqString::cast(*current_part_)->Truncate(current_index_), isolate_);
-}
-
-
-void BasicJsonStringifier::Extend() {
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
- if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
- part_length_ *= kPartLengthGrowthFactor;
- }
- if (is_ascii_) {
- current_part_ = factory_->NewRawOneByteString(part_length_);
- } else {
- current_part_ = factory_->NewRawTwoByteString(part_length_);
- }
- current_index_ = 0;
-}
-
-
-void BasicJsonStringifier::ChangeEncoding() {
- ShrinkCurrentPart();
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
- current_part_ = factory_->NewRawTwoByteString(part_length_);
- current_index_ = 0;
- is_ascii_ = false;
-}
-
-
-template <typename SrcChar, typename DestChar>
-void BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src,
- DestChar* dest,
- int length) {
- dest += current_index_;
- DestChar* dest_start = dest;
-
- // Assert that uc16 character is not truncated down to 8 bit.
- // The <uc16, char> version of this method must not be called.
- ASSERT(sizeof(*dest) >= sizeof(*src));
-
- for (int i = 0; i < length; i++) {
- SrcChar c = src[i];
- if (DoNotEscape(c)) {
- *(dest++) = static_cast<DestChar>(c);
- } else {
- const char* chars = &JsonEscapeTable[c * kJsonEscapeTableEntrySize];
- while (*chars != '\0') *(dest++) = *(chars++);
- }
- }
-
- current_index_ += static_cast<int>(dest - dest_start);
-}
-
-
-template <bool is_ascii, typename Char>
-void BasicJsonStringifier::SerializeString_(Handle<String> string) {
- int length = string->length();
- Append_<is_ascii, char>('"');
- // We make a rough estimate to find out if the current string can be
- // serialized without allocating a new string part. The worst case length of
- // an escaped character is 6. Shifting the remainin string length right by 3
- // is a more pessimistic estimate, but faster to calculate.
-
- if (((part_length_ - current_index_) >> 3) > length) {
- AssertNoAllocation no_allocation;
- Vector<const Char> vector = GetCharVector<Char>(string);
- if (is_ascii) {
- SerializeStringUnchecked_(
- vector.start(),
- SeqOneByteString::cast(*current_part_)->GetChars(),
- length);
- } else {
- SerializeStringUnchecked_(
- vector.start(),
- SeqTwoByteString::cast(*current_part_)->GetChars(),
- length);
- }
- } else {
- String* string_location = *string;
- Vector<const Char> vector = GetCharVector<Char>(string);
- for (int i = 0; i < length; i++) {
- Char c = vector[i];
- if (DoNotEscape(c)) {
- Append_<is_ascii, Char>(c);
- } else {
- Append_<is_ascii, uint8_t>(
- reinterpret_cast<const uint8_t*>(
- &JsonEscapeTable[c * kJsonEscapeTableEntrySize]));
- }
- // If GC moved the string, we need to refresh the vector.
- if (*string != string_location) {
- vector = GetCharVector<Char>(string);
- string_location = *string;
- }
- }
- }
-
- Append_<is_ascii, uint8_t>('"');
-}
-
-
-template <>
-bool BasicJsonStringifier::DoNotEscape(uint8_t c) {
- return c >= '#' && c <= '~' && c != '\\';
-}
-
-
-template <>
-bool BasicJsonStringifier::DoNotEscape(uint16_t c) {
- return c >= '#' && c != '\\' && c != 0x7f;
-}
-
-
-template <>
-Vector<const uint8_t> BasicJsonStringifier::GetCharVector(
- Handle<String> string) {
- String::FlatContent flat = string->GetFlatContent();
- ASSERT(flat.IsAscii());
- return flat.ToOneByteVector();
-}
-
-
-template <>
-Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
- String::FlatContent flat = string->GetFlatContent();
- ASSERT(flat.IsTwoByte());
- return flat.ToUC16Vector();
-}
-
-
-void BasicJsonStringifier::SerializeString(Handle<String> object) {
- FlattenString(object);
- String::FlatContent flat = object->GetFlatContent();
- if (is_ascii_) {
- if (flat.IsAscii()) {
- SerializeString_<true, uint8_t>(object);
- } else {
- ChangeEncoding();
- SerializeString(object);
- }
- } else {
- if (flat.IsAscii()) {
- SerializeString_<false, uint8_t>(object);
- } else {
- SerializeString_<false, uc16>(object);
- }
- }
-}
-
-} } // namespace v8::internal
-
-#endif // V8_JSON_STRINGIFIER_H_
diff --git a/src/3rdparty/v8/src/json.js b/src/3rdparty/v8/src/json.js
deleted file mode 100644
index e94d3c8..0000000
--- a/src/3rdparty/v8/src/json.js
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var $JSON = global.JSON;
-
-function Revive(holder, name, reviver) {
- var val = holder[name];
- if (IS_OBJECT(val)) {
- if (IS_ARRAY(val)) {
- var length = val.length;
- for (var i = 0; i < length; i++) {
- var newElement = Revive(val, $String(i), reviver);
- val[i] = newElement;
- }
- } else {
- for (var p in val) {
- if (%_CallFunction(val, p, ObjectHasOwnProperty)) {
- var newElement = Revive(val, p, reviver);
- if (IS_UNDEFINED(newElement)) {
- delete val[p];
- } else {
- val[p] = newElement;
- }
- }
- }
- }
- }
- return %_CallFunction(holder, name, val, reviver);
-}
-
-function JSONParse(text, reviver) {
- var unfiltered = %ParseJson(TO_STRING_INLINE(text));
- if (IS_SPEC_FUNCTION(reviver)) {
- return Revive({'': unfiltered}, '', reviver);
- } else {
- return unfiltered;
- }
-}
-
-function SerializeArray(value, replacer, stack, indent, gap) {
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- var stepback = indent;
- indent += gap;
- var partial = new InternalArray();
- var len = value.length;
- for (var i = 0; i < len; i++) {
- var strP = JSONSerialize($String(i), value, replacer, stack,
- indent, gap);
- if (IS_UNDEFINED(strP)) {
- strP = "null";
- }
- partial.push(strP);
- }
- var final;
- if (gap == "") {
- final = "[" + partial.join(",") + "]";
- } else if (partial.length > 0) {
- var separator = ",\n" + indent;
- final = "[\n" + indent + partial.join(separator) + "\n" +
- stepback + "]";
- } else {
- final = "[]";
- }
- stack.pop();
- return final;
-}
-
-function SerializeObject(value, replacer, stack, indent, gap) {
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- var stepback = indent;
- indent += gap;
- var partial = new InternalArray();
- if (IS_ARRAY(replacer)) {
- var length = replacer.length;
- for (var i = 0; i < length; i++) {
- if (%_CallFunction(replacer, i, ObjectHasOwnProperty)) {
- var p = replacer[i];
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
- }
- }
- } else {
- for (var p in value) {
- if (%_CallFunction(value, p, ObjectHasOwnProperty)) {
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
- }
- }
- }
- var final;
- if (gap == "") {
- final = "{" + partial.join(",") + "}";
- } else if (partial.length > 0) {
- var separator = ",\n" + indent;
- final = "{\n" + indent + partial.join(separator) + "\n" +
- stepback + "}";
- } else {
- final = "{}";
- }
- stack.pop();
- return final;
-}
-
-function JSONSerialize(key, holder, replacer, stack, indent, gap) {
- var value = holder[key];
- if (IS_SPEC_OBJECT(value)) {
- var toJSON = value.toJSON;
- if (IS_SPEC_FUNCTION(toJSON)) {
- value = %_CallFunction(value, key, toJSON);
- }
- }
- if (IS_SPEC_FUNCTION(replacer)) {
- value = %_CallFunction(holder, key, value, replacer);
- }
- if (IS_STRING(value)) {
- return %QuoteJSONString(value);
- } else if (IS_NUMBER(value)) {
- return JSON_NUMBER_TO_STRING(value);
- } else if (IS_BOOLEAN(value)) {
- return value ? "true" : "false";
- } else if (IS_NULL(value)) {
- return "null";
- } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
- // Non-callable object. If it's a primitive wrapper, it must be unwrapped.
- if (IS_ARRAY(value)) {
- return SerializeArray(value, replacer, stack, indent, gap);
- } else if (IS_NUMBER_WRAPPER(value)) {
- value = ToNumber(value);
- return JSON_NUMBER_TO_STRING(value);
- } else if (IS_STRING_WRAPPER(value)) {
- return %QuoteJSONString(ToString(value));
- } else if (IS_BOOLEAN_WRAPPER(value)) {
- return %_ValueOf(value) ? "true" : "false";
- } else {
- return SerializeObject(value, replacer, stack, indent, gap);
- }
- }
- // Undefined or a callable object.
- return void 0;
-}
-
-
-function JSONStringify(value, replacer, space) {
- if (%_ArgumentsLength() == 1) {
- return %BasicJSONStringify(value);
- }
- if (IS_OBJECT(space)) {
- // Unwrap 'space' if it is wrapped
- if (IS_NUMBER_WRAPPER(space)) {
- space = ToNumber(space);
- } else if (IS_STRING_WRAPPER(space)) {
- space = ToString(space);
- }
- }
- var gap;
- if (IS_NUMBER(space)) {
- space = MathMax(0, MathMin(ToInteger(space), 10));
- gap = %_SubString(" ", 0, space);
- } else if (IS_STRING(space)) {
- if (space.length > 10) {
- gap = %_SubString(space, 0, 10);
- } else {
- gap = space;
- }
- } else {
- gap = "";
- }
- return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
-}
-
-
-function SetUpJSON() {
- %CheckIsBootstrapping();
- InstallFunctions($JSON, DONT_ENUM, $Array(
- "parse", JSONParse,
- "stringify", JSONStringify
- ));
-}
-
-
-function JSONSerializeAdapter(key, object) {
- var holder = {};
- holder[key] = object;
- // No need to pass the actual holder since there is no replacer function.
- return JSONSerialize(key, holder, void 0, new InternalArray(), "", "");
-}
-
-SetUpJSON();
diff --git a/src/3rdparty/v8/src/jsregexp-inl.h b/src/3rdparty/v8/src/jsregexp-inl.h
deleted file mode 100644
index 3ef07d8..0000000
--- a/src/3rdparty/v8/src/jsregexp-inl.h
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_JSREGEXP_INL_H_
-#define V8_JSREGEXP_INL_H_
-
-#include "allocation.h"
-#include "handles.h"
-#include "heap.h"
-#include "jsregexp.h"
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-RegExpImpl::GlobalCache::~GlobalCache() {
- // Deallocate the register array if we allocated it in the constructor
- // (as opposed to using the existing jsregexp_static_offsets_vector).
- if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- DeleteArray(register_array_);
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::FetchNext() {
- current_match_index_++;
- if (current_match_index_ >= num_matches_) {
- // Current batch of results exhausted.
- // Fail if last batch was not even fully filled.
- if (num_matches_ < max_matches_) {
- num_matches_ = 0; // Signal failed match.
- return NULL;
- }
-
- int32_t* last_match =
- &register_array_[(current_match_index_ - 1) * registers_per_match_];
- int last_end_index = last_match[1];
-
- if (regexp_->TypeTag() == JSRegExp::ATOM) {
- num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
- subject_,
- last_end_index,
- register_array_,
- register_array_size_);
- } else {
- int last_start_index = last_match[0];
- if (last_start_index == last_end_index) last_end_index++;
- if (last_end_index > subject_->length()) {
- num_matches_ = 0; // Signal failed match.
- return NULL;
- }
- num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
- subject_,
- last_end_index,
- register_array_,
- register_array_size_);
- }
-
- if (num_matches_ <= 0) return NULL;
- current_match_index_ = 0;
- return register_array_;
- } else {
- return &register_array_[current_match_index_ * registers_per_match_];
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
- int index = current_match_index_ * registers_per_match_;
- if (num_matches_ == 0) {
- // After a failed match we shift back by one result.
- index -= registers_per_match_;
- }
- return &register_array_[index];
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_JSREGEXP_INL_H_
diff --git a/src/3rdparty/v8/src/jsregexp.cc b/src/3rdparty/v8/src/jsregexp.cc
deleted file mode 100644
index e73b1d4..0000000
--- a/src/3rdparty/v8/src/jsregexp.cc
+++ /dev/null
@@ -1,6150 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "execution.h"
-#include "factory.h"
-#include "jsregexp.h"
-#include "jsregexp-inl.h"
-#include "platform.h"
-#include "string-search.h"
-#include "runtime.h"
-#include "compilation-cache.h"
-#include "string-stream.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-macro-assembler-tracer.h"
-#include "regexp-macro-assembler-irregexp.h"
-#include "regexp-stack.h"
-
-#ifndef V8_INTERPRETED_REGEXP
-#if V8_TARGET_ARCH_IA32
-#include "ia32/regexp-macro-assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/regexp-macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/regexp-macro-assembler-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-#endif
-
-#include "interpreter-irregexp.h"
-
-
-namespace v8 {
-namespace internal {
-
-Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags,
- bool* has_pending_exception) {
- // Call the construct code with 2 arguments.
- Handle<Object> argv[] = { pattern, flags };
- return Execution::New(constructor, ARRAY_SIZE(argv), argv,
- has_pending_exception);
-}
-
-
-static JSRegExp::Flags RegExpFlagsFromString(Handle<String> str) {
- int flags = JSRegExp::NONE;
- for (int i = 0; i < str->length(); i++) {
- switch (str->Get(i)) {
- case 'i':
- flags |= JSRegExp::IGNORE_CASE;
- break;
- case 'g':
- flags |= JSRegExp::GLOBAL;
- break;
- case 'm':
- flags |= JSRegExp::MULTILINE;
- break;
- }
- }
- return JSRegExp::Flags(flags);
-}
-
-
-static inline void ThrowRegExpException(Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> error_text,
- const char* message) {
- Isolate* isolate = re->GetIsolate();
- Factory* factory = isolate->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set(0, *pattern);
- elements->set(1, *error_text);
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> regexp_err = factory->NewSyntaxError(message, array);
- isolate->Throw(*regexp_err);
-}
-
-
-ContainedInLattice AddRange(ContainedInLattice containment,
- const int* ranges,
- int ranges_length,
- Interval new_range) {
- ASSERT((ranges_length & 1) == 1);
- ASSERT(ranges[ranges_length - 1] == String::kMaxUtf16CodeUnit + 1);
- if (containment == kLatticeUnknown) return containment;
- bool inside = false;
- int last = 0;
- for (int i = 0; i < ranges_length; inside = !inside, last = ranges[i], i++) {
- // Consider the range from last to ranges[i].
- // We haven't got to the new range yet.
- if (ranges[i] <= new_range.from()) continue;
- // New range is wholly inside last-ranges[i]. Note that new_range.to() is
- // inclusive, but the values in ranges are not.
- if (last <= new_range.from() && new_range.to() < ranges[i]) {
- return Combine(containment, inside ? kLatticeIn : kLatticeOut);
- }
- return kLatticeUnknown;
- }
- return containment;
-}
-
-
-// More makes code generation slower, less makes V8 benchmark score lower.
-const int kMaxLookaheadForBoyerMoore = 8;
-// In a 3-character pattern you can maximally step forwards 3 characters
-// at a time, which is not always enough to pay for the extra logic.
-const int kPatternTooShortForBoyerMoore = 2;
-
-
-// Identifies the sort of regexps where the regexp engine is faster
-// than the code used for atom matches.
-static bool HasFewDifferentCharacters(Handle<String> pattern) {
- int length = Min(kMaxLookaheadForBoyerMoore, pattern->length());
- if (length <= kPatternTooShortForBoyerMoore) return false;
- const int kMod = 128;
- bool character_found[kMod];
- int different = 0;
- memset(&character_found[0], 0, sizeof(character_found));
- for (int i = 0; i < length; i++) {
- int ch = (pattern->Get(i) & (kMod - 1));
- if (!character_found[ch]) {
- character_found[ch] = true;
- different++;
- // We declare a regexp low-alphabet if it has at least 3 times as many
- // characters as it has different characters.
- if (different * 3 > length) return false;
- }
- }
- return true;
-}
-
-
-// Generic RegExp methods. Dispatches to implementation specific methods.
-
-
-Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> flag_str,
- Zone* zone) {
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
- Isolate* isolate = re->GetIsolate();
- JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
- CompilationCache* compilation_cache = isolate->compilation_cache();
- Handle<FixedArray> cached = compilation_cache->LookupRegExp(pattern, flags);
- bool in_cache = !cached.is_null();
- LOG(isolate, RegExpCompileEvent(re, in_cache));
-
- Handle<Object> result;
- if (in_cache) {
- re->set_data(*cached);
- return re;
- }
- pattern = FlattenGetString(pattern);
- PostponeInterruptsScope postpone(isolate);
- RegExpCompileData parse_result;
- FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &parse_result, zone)) {
- // Throw an exception if we fail to parse the pattern.
- ThrowRegExpException(re,
- pattern,
- parse_result.error,
- "malformed_regexp");
- return Handle<Object>::null();
- }
-
- bool has_been_compiled = false;
-
- if (parse_result.simple &&
- !flags.is_ignore_case() &&
- !HasFewDifferentCharacters(pattern)) {
- // Parse-tree is a single atom that is equal to the pattern.
- AtomCompile(re, pattern, flags, pattern);
- has_been_compiled = true;
- } else if (parse_result.tree->IsAtom() &&
- !flags.is_ignore_case() &&
- parse_result.capture_count == 0) {
- RegExpAtom* atom = parse_result.tree->AsAtom();
- Vector<const uc16> atom_pattern = atom->data();
- Handle<String> atom_string =
- isolate->factory()->NewStringFromTwoByte(atom_pattern);
- if (!HasFewDifferentCharacters(atom_string)) {
- AtomCompile(re, pattern, flags, atom_string);
- has_been_compiled = true;
- }
- }
- if (!has_been_compiled) {
- IrregexpInitialize(re, pattern, flags, parse_result.capture_count);
- }
- ASSERT(re->data()->IsFixedArray());
- // Compilation succeeded so the data is set on the regexp
- // and we can store it in the cache.
- Handle<FixedArray> data(FixedArray::cast(re->data()));
- compilation_cache->PutRegExp(pattern, flags, data);
-
- return re;
-}
-
-
-Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> last_match_info) {
- switch (regexp->TypeTag()) {
- case JSRegExp::ATOM:
- return AtomExec(regexp, subject, index, last_match_info);
- case JSRegExp::IRREGEXP: {
- Handle<Object> result =
- IrregexpExec(regexp, subject, index, last_match_info);
- ASSERT(!result.is_null() ||
- regexp->GetIsolate()->has_pending_exception());
- return result;
- }
- default:
- UNREACHABLE();
- return Handle<Object>::null();
- }
-}
-
-
-// RegExp Atom implementation: Simple string search using indexOf.
-
-
-void RegExpImpl::AtomCompile(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
- Handle<String> match_pattern) {
- re->GetIsolate()->factory()->SetRegExpAtomData(re,
- JSRegExp::ATOM,
- pattern,
- flags,
- match_pattern);
-}
-
-
-static void SetAtomLastCapture(FixedArray* array,
- String* subject,
- int from,
- int to) {
- NoHandleAllocation no_handles(array->GetIsolate());
- RegExpImpl::SetLastCaptureCount(array, 2);
- RegExpImpl::SetLastSubject(array, subject);
- RegExpImpl::SetLastInput(array, subject);
- RegExpImpl::SetCapture(array, 0, from);
- RegExpImpl::SetCapture(array, 1, to);
-}
-
-
-int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- int32_t* output,
- int output_size) {
- Isolate* isolate = regexp->GetIsolate();
-
- ASSERT(0 <= index);
- ASSERT(index <= subject->length());
-
- if (!subject->IsFlat()) FlattenString(subject);
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
-
- String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
- int needle_len = needle->length();
- ASSERT(needle->IsFlat());
- ASSERT_LT(0, needle_len);
-
- if (index + needle_len > subject->length()) {
- return RegExpImpl::RE_FAILURE;
- }
-
- for (int i = 0; i < output_size; i += 2) {
- String::FlatContent needle_content = needle->GetFlatContent();
- String::FlatContent subject_content = subject->GetFlatContent();
- ASSERT(needle_content.IsFlat());
- ASSERT(subject_content.IsFlat());
- // dispatch on type of strings
- index = (needle_content.IsAscii()
- ? (subject_content.IsAscii()
- ? SearchString(isolate,
- subject_content.ToOneByteVector(),
- needle_content.ToOneByteVector(),
- index)
- : SearchString(isolate,
- subject_content.ToUC16Vector(),
- needle_content.ToOneByteVector(),
- index))
- : (subject_content.IsAscii()
- ? SearchString(isolate,
- subject_content.ToOneByteVector(),
- needle_content.ToUC16Vector(),
- index)
- : SearchString(isolate,
- subject_content.ToUC16Vector(),
- needle_content.ToUC16Vector(),
- index)));
- if (index == -1) {
- return i / 2; // Return number of matches.
- } else {
- output[i] = index;
- output[i+1] = index + needle_len;
- index += needle_len;
- }
- }
- return output_size / 2;
-}
-
-
-Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
- Handle<String> subject,
- int index,
- Handle<JSArray> last_match_info) {
- Isolate* isolate = re->GetIsolate();
-
- static const int kNumRegisters = 2;
- STATIC_ASSERT(kNumRegisters <= Isolate::kJSRegexpStaticOffsetsVectorSize);
- int32_t* output_registers = isolate->jsregexp_static_offsets_vector();
-
- int res = AtomExecRaw(re, subject, index, output_registers, kNumRegisters);
-
- if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
-
- ASSERT_EQ(res, RegExpImpl::RE_SUCCESS);
- NoHandleAllocation no_handles(isolate);
- FixedArray* array = FixedArray::cast(last_match_info->elements());
- SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
- return last_match_info;
-}
-
-
-// Irregexp implementation.
-
-// Ensures that the regexp object contains a compiled version of the
-// source for either ASCII or non-ASCII strings.
-// If the compiled version doesn't already exist, it is compiled
-// from the source pattern.
-// If compilation fails, an exception is thrown and this function
-// returns false.
-bool RegExpImpl::EnsureCompiledIrregexp(
- Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii) {
- Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
-#ifdef V8_INTERPRETED_REGEXP
- if (compiled_code->IsByteArray()) return true;
-#else // V8_INTERPRETED_REGEXP (RegExp native code)
- if (compiled_code->IsCode()) return true;
-#endif
- // We could potentially have marked this as flushable, but have kept
- // a saved version if we did not flush it yet.
- Object* saved_code = re->DataAt(JSRegExp::saved_code_index(is_ascii));
- if (saved_code->IsCode()) {
- // Reinstate the code in the original place.
- re->SetDataAt(JSRegExp::code_index(is_ascii), saved_code);
- ASSERT(compiled_code->IsSmi());
- return true;
- }
- return CompileIrregexp(re, sample_subject, is_ascii);
-}
-
-
-static bool CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
- bool is_ascii,
- Handle<String> error_message,
- Isolate* isolate) {
- Factory* factory = isolate->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set(0, re->Pattern());
- elements->set(1, *error_message);
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> regexp_err =
- factory->NewSyntaxError("malformed_regexp", array);
- isolate->Throw(*regexp_err);
- return false;
-}
-
-
-bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
- Handle<String> sample_subject,
- bool is_ascii) {
- // Compile the RegExp.
- Isolate* isolate = re->GetIsolate();
- ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
- PostponeInterruptsScope postpone(isolate);
- // If we had a compilation error the last time this is saved at the
- // saved code index.
- Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
- // When arriving here entry can only be a smi, either representing an
- // uncompiled regexp, a previous compilation error, or code that has
- // been flushed.
- ASSERT(entry->IsSmi());
- int entry_value = Smi::cast(entry)->value();
- ASSERT(entry_value == JSRegExp::kUninitializedValue ||
- entry_value == JSRegExp::kCompilationErrorValue ||
- (entry_value < JSRegExp::kCodeAgeMask && entry_value >= 0));
-
- if (entry_value == JSRegExp::kCompilationErrorValue) {
- // A previous compilation failed and threw an error which we store in
- // the saved code index (we store the error message, not the actual
- // error). Recreate the error object and throw it.
- Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_ascii));
- ASSERT(error_string->IsString());
- Handle<String> error_message(String::cast(error_string));
- CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
- return false;
- }
-
- JSRegExp::Flags flags = re->GetFlags();
-
- Handle<String> pattern(re->Pattern());
- if (!pattern->IsFlat()) FlattenString(pattern);
- RegExpCompileData compile_data;
- FlatStringReader reader(isolate, pattern);
- Zone* zone = isolate->runtime_zone();
- if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &compile_data,
- zone)) {
- // Throw an exception if we fail to parse the pattern.
- // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
- ThrowRegExpException(re,
- pattern,
- compile_data.error,
- "malformed_regexp");
- return false;
- }
- RegExpEngine::CompilationResult result =
- RegExpEngine::Compile(&compile_data,
- flags.is_ignore_case(),
- flags.is_global(),
- flags.is_multiline(),
- pattern,
- sample_subject,
- is_ascii,
- zone);
- if (result.error_message != NULL) {
- // Unable to compile regexp.
- Handle<String> error_message =
- isolate->factory()->NewStringFromUtf8(CStrVector(result.error_message));
- CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
- return false;
- }
-
- Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data()));
- data->set(JSRegExp::code_index(is_ascii), result.code);
- int register_max = IrregexpMaxRegisterCount(*data);
- if (result.num_registers > register_max) {
- SetIrregexpMaxRegisterCount(*data, result.num_registers);
- }
-
- return true;
-}
-
-
-int RegExpImpl::IrregexpMaxRegisterCount(FixedArray* re) {
- return Smi::cast(
- re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
-}
-
-
-void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray* re, int value) {
- re->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
-}
-
-
-int RegExpImpl::IrregexpNumberOfCaptures(FixedArray* re) {
- return Smi::cast(re->get(JSRegExp::kIrregexpCaptureCountIndex))->value();
-}
-
-
-int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) {
- return Smi::cast(re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
-}
-
-
-ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_ascii) {
- return ByteArray::cast(re->get(JSRegExp::code_index(is_ascii)));
-}
-
-
-Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_ascii) {
- return Code::cast(re->get(JSRegExp::code_index(is_ascii)));
-}
-
-
-void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
- int capture_count) {
- // Initialize compiled code entries to null.
- re->GetIsolate()->factory()->SetRegExpIrregexpData(re,
- JSRegExp::IRREGEXP,
- pattern,
- flags,
- capture_count);
-}
-
-
-int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
- Handle<String> subject) {
- if (!subject->IsFlat()) FlattenString(subject);
-
- // Check the asciiness of the underlying storage.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
- if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1;
-
-#ifdef V8_INTERPRETED_REGEXP
- // Byte-code regexp needs space allocated for all its registers.
- // The result captures are copied to the start of the registers array
- // if the match succeeds. This way those registers are not clobbered
- // when we set the last match info from last successful match.
- return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data())) +
- (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
-#else // V8_INTERPRETED_REGEXP
- // Native regexp only needs room to output captures. Registers are handled
- // internally.
- return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- int32_t* output,
- int output_size) {
- Isolate* isolate = regexp->GetIsolate();
-
- Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
-
- ASSERT(index >= 0);
- ASSERT(index <= subject->length());
- ASSERT(subject->IsFlat());
-
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-
-#ifndef V8_INTERPRETED_REGEXP
- ASSERT(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
- do {
- EnsureCompiledIrregexp(regexp, subject, is_ascii);
- Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
- // The stack is used to allocate registers for the compiled regexp code.
- // This means that in case of failure, the output registers array is left
- // untouched and contains the capture results from the previous successful
- // match. We can use that to set the last match info lazily.
- NativeRegExpMacroAssembler::Result res =
- NativeRegExpMacroAssembler::Match(code,
- subject,
- output,
- output_size,
- index,
- isolate);
- if (res != NativeRegExpMacroAssembler::RETRY) {
- ASSERT(res != NativeRegExpMacroAssembler::EXCEPTION ||
- isolate->has_pending_exception());
- STATIC_ASSERT(
- static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS);
- STATIC_ASSERT(
- static_cast<int>(NativeRegExpMacroAssembler::FAILURE) == RE_FAILURE);
- STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION)
- == RE_EXCEPTION);
- return static_cast<IrregexpResult>(res);
- }
- // If result is RETRY, the string has changed representation, and we
- // must restart from scratch.
- // In this case, it means we must make sure we are prepared to handle
- // the, potentially, different subject (the string can switch between
- // being internal and external, and even between being ASCII and UC16,
- // but the characters are always the same).
- IrregexpPrepare(regexp, subject);
- is_ascii = subject->IsOneByteRepresentationUnderneath();
- } while (true);
- UNREACHABLE();
- return RE_EXCEPTION;
-#else // V8_INTERPRETED_REGEXP
-
- ASSERT(output_size >= IrregexpNumberOfRegisters(*irregexp));
- // We must have done EnsureCompiledIrregexp, so we can get the number of
- // registers.
- int number_of_capture_registers =
- (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
- int32_t* raw_output = &output[number_of_capture_registers];
- // We do not touch the actual capture result registers until we know there
- // has been a match so that we can use those capture results to set the
- // last match info.
- for (int i = number_of_capture_registers - 1; i >= 0; i--) {
- raw_output[i] = -1;
- }
- Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
-
- IrregexpResult result = IrregexpInterpreter::Match(isolate,
- byte_codes,
- subject,
- raw_output,
- index);
- if (result == RE_SUCCESS) {
- // Copy capture results to the start of the registers array.
- memcpy(output, raw_output, number_of_capture_registers * sizeof(int32_t));
- }
- if (result == RE_EXCEPTION) {
- ASSERT(!isolate->has_pending_exception());
- isolate->StackOverflow();
- }
- return result;
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int previous_index,
- Handle<JSArray> last_match_info) {
- Isolate* isolate = regexp->GetIsolate();
- ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
-
- // Prepare space for the return values.
-#if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
- if (FLAG_trace_regexp_bytecodes) {
- String* pattern = regexp->Pattern();
- PrintF("\n\nRegexp match: /%s/\n\n", *(pattern->ToCString()));
- PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
- }
-#endif
- int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
- if (required_registers < 0) {
- // Compiling failed with an exception.
- ASSERT(isolate->has_pending_exception());
- return Handle<Object>::null();
- }
-
- int32_t* output_registers = NULL;
- if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- output_registers = NewArray<int32_t>(required_registers);
- }
- SmartArrayPointer<int32_t> auto_release(output_registers);
- if (output_registers == NULL) {
- output_registers = isolate->jsregexp_static_offsets_vector();
- }
-
- int res = RegExpImpl::IrregexpExecRaw(
- regexp, subject, previous_index, output_registers, required_registers);
- if (res == RE_SUCCESS) {
- int capture_count =
- IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
- return SetLastMatchInfo(
- last_match_info, subject, capture_count, output_registers);
- }
- if (res == RE_EXCEPTION) {
- ASSERT(isolate->has_pending_exception());
- return Handle<Object>::null();
- }
- ASSERT(res == RE_FAILURE);
- return isolate->factory()->null_value();
-}
-
-
-Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
- Handle<String> subject,
- int capture_count,
- int32_t* match) {
- ASSERT(last_match_info->HasFastObjectElements());
- int capture_register_count = (capture_count + 1) * 2;
- last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
- AssertNoAllocation no_gc;
- FixedArray* array = FixedArray::cast(last_match_info->elements());
- if (match != NULL) {
- for (int i = 0; i < capture_register_count; i += 2) {
- SetCapture(array, i, match[i]);
- SetCapture(array, i + 1, match[i + 1]);
- }
- }
- SetLastCaptureCount(array, capture_register_count);
- SetLastSubject(array, *subject);
- SetLastInput(array, *subject);
- return last_match_info;
-}
-
-
-RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
- Handle<String> subject,
- bool is_global,
- Isolate* isolate)
- : register_array_(NULL),
- register_array_size_(0),
- regexp_(regexp),
- subject_(subject) {
-#ifdef V8_INTERPRETED_REGEXP
- bool interpreted = true;
-#else
- bool interpreted = false;
-#endif // V8_INTERPRETED_REGEXP
-
- if (regexp_->TypeTag() == JSRegExp::ATOM) {
- static const int kAtomRegistersPerMatch = 2;
- registers_per_match_ = kAtomRegistersPerMatch;
- // There is no distinction between interpreted and native for atom regexps.
- interpreted = false;
- } else {
- registers_per_match_ = RegExpImpl::IrregexpPrepare(regexp_, subject_);
- if (registers_per_match_ < 0) {
- num_matches_ = -1; // Signal exception.
- return;
- }
- }
-
- if (is_global && !interpreted) {
- register_array_size_ =
- Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
- max_matches_ = register_array_size_ / registers_per_match_;
- } else {
- // Global loop in interpreted regexp is not implemented. We choose
- // the size of the offsets vector so that it can only store one match.
- register_array_size_ = registers_per_match_;
- max_matches_ = 1;
- }
-
- if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- register_array_ = NewArray<int32_t>(register_array_size_);
- } else {
- register_array_ = isolate->jsregexp_static_offsets_vector();
- }
-
- // Set state so that fetching the results the first time triggers a call
- // to the compiled regexp.
- current_match_index_ = max_matches_ - 1;
- num_matches_ = max_matches_;
- ASSERT(registers_per_match_ >= 2); // Each match has at least one capture.
- ASSERT_GE(register_array_size_, registers_per_match_);
- int32_t* last_match =
- &register_array_[current_match_index_ * registers_per_match_];
- last_match[0] = -1;
- last_match[1] = 0;
-}
-
-
-// -------------------------------------------------------------------
-// Implementation of the Irregexp regular expression engine.
-//
-// The Irregexp regular expression engine is intended to be a complete
-// implementation of ECMAScript regular expressions. It generates either
-// bytecodes or native code.
-
-// The Irregexp regexp engine is structured in three steps.
-// 1) The parser generates an abstract syntax tree. See ast.cc.
-// 2) From the AST a node network is created. The nodes are all
-// subclasses of RegExpNode. The nodes represent states when
-// executing a regular expression. Several optimizations are
-// performed on the node network.
-// 3) From the nodes we generate either byte codes or native code
-// that can actually execute the regular expression (perform
-// the search). The code generation step is described in more
-// detail below.
-
-// Code generation.
-//
-// The nodes are divided into four main categories.
-// * Choice nodes
-// These represent places where the regular expression can
-// match in more than one way. For example on entry to an
-// alternation (foo|bar) or a repetition (*, +, ? or {}).
-// * Action nodes
-// These represent places where some action should be
-// performed. Examples include recording the current position
-// in the input string to a register (in order to implement
-// captures) or other actions on register for example in order
-// to implement the counters needed for {} repetitions.
-// * Matching nodes
-// These attempt to match some element part of the input string.
-// Examples of elements include character classes, plain strings
-// or back references.
-// * End nodes
-// These are used to implement the actions required on finding
-// a successful match or failing to find a match.
-//
-// The code generated (whether as byte codes or native code) maintains
-// some state as it runs. This consists of the following elements:
-//
-// * The capture registers. Used for string captures.
-// * Other registers. Used for counters etc.
-// * The current position.
-// * The stack of backtracking information. Used when a matching node
-// fails to find a match and needs to try an alternative.
-//
-// Conceptual regular expression execution model:
-//
-// There is a simple conceptual model of regular expression execution
-// which will be presented first. The actual code generated is a more
-// efficient simulation of the simple conceptual model:
-//
-// * Choice nodes are implemented as follows:
-// For each choice except the last {
-// push current position
-// push backtrack code location
-// <generate code to test for choice>
-// backtrack code location:
-// pop current position
-// }
-// <generate code to test for last choice>
-//
-// * Actions nodes are generated as follows
-// <push affected registers on backtrack stack>
-// <generate code to perform action>
-// push backtrack code location
-// <generate code to test for following nodes>
-// backtrack code location:
-// <pop affected registers to restore their state>
-// <pop backtrack location from stack and go to it>
-//
-// * Matching nodes are generated as follows:
-// if input string matches at current position
-// update current position
-// <generate code to test for following nodes>
-// else
-// <pop backtrack location from stack and go to it>
-//
-// Thus it can be seen that the current position is saved and restored
-// by the choice nodes, whereas the registers are saved and restored by
-// by the action nodes that manipulate them.
-//
-// The other interesting aspect of this model is that nodes are generated
-// at the point where they are needed by a recursive call to Emit(). If
-// the node has already been code generated then the Emit() call will
-// generate a jump to the previously generated code instead. In order to
-// limit recursion it is possible for the Emit() function to put the node
-// on a work list for later generation and instead generate a jump. The
-// destination of the jump is resolved later when the code is generated.
-//
-// Actual regular expression code generation.
-//
-// Code generation is actually more complicated than the above. In order
-// to improve the efficiency of the generated code some optimizations are
-// performed
-//
-// * Choice nodes have 1-character lookahead.
-// A choice node looks at the following character and eliminates some of
-// the choices immediately based on that character. This is not yet
-// implemented.
-// * Simple greedy loops store reduced backtracking information.
-// A quantifier like /.*foo/m will greedily match the whole input. It will
-// then need to backtrack to a point where it can match "foo". The naive
-// implementation of this would push each character position onto the
-// backtracking stack, then pop them off one by one. This would use space
-// proportional to the length of the input string. However since the "."
-// can only match in one way and always has a constant length (in this case
-// of 1) it suffices to store the current position on the top of the stack
-// once. Matching now becomes merely incrementing the current position and
-// backtracking becomes decrementing the current position and checking the
-// result against the stored current position. This is faster and saves
-// space.
-// * The current state is virtualized.
-// This is used to defer expensive operations until it is clear that they
-// are needed and to generate code for a node more than once, allowing
-// specialized an efficient versions of the code to be created. This is
-// explained in the section below.
-//
-// Execution state virtualization.
-//
-// Instead of emitting code, nodes that manipulate the state can record their
-// manipulation in an object called the Trace. The Trace object can record a
-// current position offset, an optional backtrack code location on the top of
-// the virtualized backtrack stack and some register changes. When a node is
-// to be emitted it can flush the Trace or update it. Flushing the Trace
-// will emit code to bring the actual state into line with the virtual state.
-// Avoiding flushing the state can postpone some work (e.g. updates of capture
-// registers). Postponing work can save time when executing the regular
-// expression since it may be found that the work never has to be done as a
-// failure to match can occur. In addition it is much faster to jump to a
-// known backtrack code location than it is to pop an unknown backtrack
-// location from the stack and jump there.
-//
-// The virtual state found in the Trace affects code generation. For example
-// the virtual state contains the difference between the actual current
-// position and the virtual current position, and matching code needs to use
-// this offset to attempt a match in the correct location of the input
-// string. Therefore code generated for a non-trivial trace is specialized
-// to that trace. The code generator therefore has the ability to generate
-// code for each node several times. In order to limit the size of the
-// generated code there is an arbitrary limit on how many specialized sets of
-// code may be generated for a given node. If the limit is reached, the
-// trace is flushed and a generic version of the code for a node is emitted.
-// This is subsequently used for that node. The code emitted for non-generic
-// trace is not recorded in the node and so it cannot currently be reused in
-// the event that code generation is requested for an identical trace.
-
-
-void RegExpTree::AppendToText(RegExpText* text, Zone* zone) {
- UNREACHABLE();
-}
-
-
-void RegExpAtom::AppendToText(RegExpText* text, Zone* zone) {
- text->AddElement(TextElement::Atom(this), zone);
-}
-
-
-void RegExpCharacterClass::AppendToText(RegExpText* text, Zone* zone) {
- text->AddElement(TextElement::CharClass(this), zone);
-}
-
-
-void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
- for (int i = 0; i < elements()->length(); i++)
- text->AddElement(elements()->at(i), zone);
-}
-
-
-TextElement TextElement::Atom(RegExpAtom* atom) {
- TextElement result = TextElement(ATOM);
- result.data.u_atom = atom;
- return result;
-}
-
-
-TextElement TextElement::CharClass(
- RegExpCharacterClass* char_class) {
- TextElement result = TextElement(CHAR_CLASS);
- result.data.u_char_class = char_class;
- return result;
-}
-
-
-int TextElement::length() {
- if (type == ATOM) {
- return data.u_atom->length();
- } else {
- ASSERT(type == CHAR_CLASS);
- return 1;
- }
-}
-
-
-DispatchTable* ChoiceNode::GetTable(bool ignore_case) {
- if (table_ == NULL) {
- table_ = new(zone()) DispatchTable(zone());
- DispatchTableConstructor cons(table_, ignore_case, zone());
- cons.BuildTable(this);
- }
- return table_;
-}
-
-
-class FrequencyCollator {
- public:
- FrequencyCollator() : total_samples_(0) {
- for (int i = 0; i < RegExpMacroAssembler::kTableSize; i++) {
- frequencies_[i] = CharacterFrequency(i);
- }
- }
-
- void CountCharacter(int character) {
- int index = (character & RegExpMacroAssembler::kTableMask);
- frequencies_[index].Increment();
- total_samples_++;
- }
-
- // Does not measure in percent, but rather per-128 (the table size from the
- // regexp macro assembler).
- int Frequency(int in_character) {
- ASSERT((in_character & RegExpMacroAssembler::kTableMask) == in_character);
- if (total_samples_ < 1) return 1; // Division by zero.
- int freq_in_per128 =
- (frequencies_[in_character].counter() * 128) / total_samples_;
- return freq_in_per128;
- }
-
- private:
- class CharacterFrequency {
- public:
- CharacterFrequency() : counter_(0), character_(-1) { }
- explicit CharacterFrequency(int character)
- : counter_(0), character_(character) { }
-
- void Increment() { counter_++; }
- int counter() { return counter_; }
- int character() { return character_; }
-
- private:
- int counter_;
- int character_;
- };
-
-
- private:
- CharacterFrequency frequencies_[RegExpMacroAssembler::kTableSize];
- int total_samples_;
-};
-
-
-class RegExpCompiler {
- public:
- RegExpCompiler(int capture_count, bool ignore_case, bool is_ascii,
- Zone* zone);
-
- int AllocateRegister() {
- if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
- reg_exp_too_big_ = true;
- return next_register_;
- }
- return next_register_++;
- }
-
- RegExpEngine::CompilationResult Assemble(RegExpMacroAssembler* assembler,
- RegExpNode* start,
- int capture_count,
- Handle<String> pattern);
-
- inline void AddWork(RegExpNode* node) { work_list_->Add(node); }
-
- static const int kImplementationOffset = 0;
- static const int kNumberOfRegistersOffset = 0;
- static const int kCodeOffset = 1;
-
- RegExpMacroAssembler* macro_assembler() { return macro_assembler_; }
- EndNode* accept() { return accept_; }
-
- static const int kMaxRecursion = 100;
- inline int recursion_depth() { return recursion_depth_; }
- inline void IncrementRecursionDepth() { recursion_depth_++; }
- inline void DecrementRecursionDepth() { recursion_depth_--; }
-
- void SetRegExpTooBig() { reg_exp_too_big_ = true; }
-
- inline bool ignore_case() { return ignore_case_; }
- inline bool ascii() { return ascii_; }
- FrequencyCollator* frequency_collator() { return &frequency_collator_; }
-
- int current_expansion_factor() { return current_expansion_factor_; }
- void set_current_expansion_factor(int value) {
- current_expansion_factor_ = value;
- }
-
- Zone* zone() const { return zone_; }
-
- static const int kNoRegister = -1;
-
- private:
- EndNode* accept_;
- int next_register_;
- List<RegExpNode*>* work_list_;
- int recursion_depth_;
- RegExpMacroAssembler* macro_assembler_;
- bool ignore_case_;
- bool ascii_;
- bool reg_exp_too_big_;
- int current_expansion_factor_;
- FrequencyCollator frequency_collator_;
- Zone* zone_;
-};
-
-
-class RecursionCheck {
- public:
- explicit RecursionCheck(RegExpCompiler* compiler) : compiler_(compiler) {
- compiler->IncrementRecursionDepth();
- }
- ~RecursionCheck() { compiler_->DecrementRecursionDepth(); }
- private:
- RegExpCompiler* compiler_;
-};
-
-
-static RegExpEngine::CompilationResult IrregexpRegExpTooBig() {
- return RegExpEngine::CompilationResult("RegExp too big");
-}
-
-
-// Attempts to compile the regexp using an Irregexp code generator. Returns
-// a fixed array or a null handle depending on whether it succeeded.
-RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case, bool ascii,
- Zone* zone)
- : next_register_(2 * (capture_count + 1)),
- work_list_(NULL),
- recursion_depth_(0),
- ignore_case_(ignore_case),
- ascii_(ascii),
- reg_exp_too_big_(false),
- current_expansion_factor_(1),
- frequency_collator_(),
- zone_(zone) {
- accept_ = new(zone) EndNode(EndNode::ACCEPT, zone);
- ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
-}
-
-
-RegExpEngine::CompilationResult RegExpCompiler::Assemble(
- RegExpMacroAssembler* macro_assembler,
- RegExpNode* start,
- int capture_count,
- Handle<String> pattern) {
- Heap* heap = pattern->GetHeap();
-
- bool use_slow_safe_regexp_compiler = false;
- if (heap->total_regexp_code_generated() >
- RegExpImpl::kRegWxpCompiledLimit &&
- heap->isolate()->memory_allocator()->SizeExecutable() >
- RegExpImpl::kRegExpExecutableMemoryLimit) {
- use_slow_safe_regexp_compiler = true;
- }
-
- macro_assembler->set_slow_safe(use_slow_safe_regexp_compiler);
-
-#ifdef DEBUG
- if (FLAG_trace_regexp_assembler)
- macro_assembler_ = new RegExpMacroAssemblerTracer(macro_assembler);
- else
-#endif
- macro_assembler_ = macro_assembler;
-
- List <RegExpNode*> work_list(0);
- work_list_ = &work_list;
- Label fail;
- macro_assembler_->PushBacktrack(&fail);
- Trace new_trace;
- start->Emit(this, &new_trace);
- macro_assembler_->Bind(&fail);
- macro_assembler_->Fail();
- while (!work_list.is_empty()) {
- work_list.RemoveLast()->Emit(this, &new_trace);
- }
- if (reg_exp_too_big_) return IrregexpRegExpTooBig();
-
- Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
- heap->IncreaseTotalRegexpCodeGenerated(code->Size());
- work_list_ = NULL;
-#ifdef DEBUG
- if (FLAG_print_code) {
- Handle<Code>::cast(code)->Disassemble(*pattern->ToCString());
- }
- if (FLAG_trace_regexp_assembler) {
- delete macro_assembler_;
- }
-#endif
- return RegExpEngine::CompilationResult(*code, next_register_);
-}
-
-
-bool Trace::DeferredAction::Mentions(int that) {
- if (type() == ActionNode::CLEAR_CAPTURES) {
- Interval range = static_cast<DeferredClearCaptures*>(this)->range();
- return range.Contains(that);
- } else {
- return reg() == that;
- }
-}
-
-
-bool Trace::mentions_reg(int reg) {
- for (DeferredAction* action = actions_;
- action != NULL;
- action = action->next()) {
- if (action->Mentions(reg))
- return true;
- }
- return false;
-}
-
-
-bool Trace::GetStoredPosition(int reg, int* cp_offset) {
- ASSERT_EQ(0, *cp_offset);
- for (DeferredAction* action = actions_;
- action != NULL;
- action = action->next()) {
- if (action->Mentions(reg)) {
- if (action->type() == ActionNode::STORE_POSITION) {
- *cp_offset = static_cast<DeferredCapture*>(action)->cp_offset();
- return true;
- } else {
- return false;
- }
- }
- }
- return false;
-}
-
-
-int Trace::FindAffectedRegisters(OutSet* affected_registers,
- Zone* zone) {
- int max_register = RegExpCompiler::kNoRegister;
- for (DeferredAction* action = actions_;
- action != NULL;
- action = action->next()) {
- if (action->type() == ActionNode::CLEAR_CAPTURES) {
- Interval range = static_cast<DeferredClearCaptures*>(action)->range();
- for (int i = range.from(); i <= range.to(); i++)
- affected_registers->Set(i, zone);
- if (range.to() > max_register) max_register = range.to();
- } else {
- affected_registers->Set(action->reg(), zone);
- if (action->reg() > max_register) max_register = action->reg();
- }
- }
- return max_register;
-}
-
-
-void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler,
- int max_register,
- OutSet& registers_to_pop,
- OutSet& registers_to_clear) {
- for (int reg = max_register; reg >= 0; reg--) {
- if (registers_to_pop.Get(reg)) assembler->PopRegister(reg);
- else if (registers_to_clear.Get(reg)) {
- int clear_to = reg;
- while (reg > 0 && registers_to_clear.Get(reg - 1)) {
- reg--;
- }
- assembler->ClearRegisters(reg, clear_to);
- }
- }
-}
-
-
-void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
- int max_register,
- OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear,
- Zone* zone) {
- // The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
- const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
-
- // Count pushes performed to force a stack limit check occasionally.
- int pushes = 0;
-
- for (int reg = 0; reg <= max_register; reg++) {
- if (!affected_registers.Get(reg)) {
- continue;
- }
-
- // The chronologically first deferred action in the trace
- // is used to infer the action needed to restore a register
- // to its previous state (or not, if it's safe to ignore it).
- enum DeferredActionUndoType { IGNORE, RESTORE, CLEAR };
- DeferredActionUndoType undo_action = IGNORE;
-
- int value = 0;
- bool absolute = false;
- bool clear = false;
- int store_position = -1;
- // This is a little tricky because we are scanning the actions in reverse
- // historical order (newest first).
- for (DeferredAction* action = actions_;
- action != NULL;
- action = action->next()) {
- if (action->Mentions(reg)) {
- switch (action->type()) {
- case ActionNode::SET_REGISTER: {
- Trace::DeferredSetRegister* psr =
- static_cast<Trace::DeferredSetRegister*>(action);
- if (!absolute) {
- value += psr->value();
- absolute = true;
- }
- // SET_REGISTER is currently only used for newly introduced loop
- // counters. They can have a significant previous value if they
- // occour in a loop. TODO(lrn): Propagate this information, so
- // we can set undo_action to IGNORE if we know there is no value to
- // restore.
- undo_action = RESTORE;
- ASSERT_EQ(store_position, -1);
- ASSERT(!clear);
- break;
- }
- case ActionNode::INCREMENT_REGISTER:
- if (!absolute) {
- value++;
- }
- ASSERT_EQ(store_position, -1);
- ASSERT(!clear);
- undo_action = RESTORE;
- break;
- case ActionNode::STORE_POSITION: {
- Trace::DeferredCapture* pc =
- static_cast<Trace::DeferredCapture*>(action);
- if (!clear && store_position == -1) {
- store_position = pc->cp_offset();
- }
-
- // For captures we know that stores and clears alternate.
- // Other register, are never cleared, and if the occur
- // inside a loop, they might be assigned more than once.
- if (reg <= 1) {
- // Registers zero and one, aka "capture zero", is
- // always set correctly if we succeed. There is no
- // need to undo a setting on backtrack, because we
- // will set it again or fail.
- undo_action = IGNORE;
- } else {
- undo_action = pc->is_capture() ? CLEAR : RESTORE;
- }
- ASSERT(!absolute);
- ASSERT_EQ(value, 0);
- break;
- }
- case ActionNode::CLEAR_CAPTURES: {
- // Since we're scanning in reverse order, if we've already
- // set the position we have to ignore historically earlier
- // clearing operations.
- if (store_position == -1) {
- clear = true;
- }
- undo_action = RESTORE;
- ASSERT(!absolute);
- ASSERT_EQ(value, 0);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
- }
- // Prepare for the undo-action (e.g., push if it's going to be popped).
- if (undo_action == RESTORE) {
- pushes++;
- RegExpMacroAssembler::StackCheckFlag stack_check =
- RegExpMacroAssembler::kNoStackLimitCheck;
- if (pushes == push_limit) {
- stack_check = RegExpMacroAssembler::kCheckStackLimit;
- pushes = 0;
- }
-
- assembler->PushRegister(reg, stack_check);
- registers_to_pop->Set(reg, zone);
- } else if (undo_action == CLEAR) {
- registers_to_clear->Set(reg, zone);
- }
- // Perform the chronologically last action (or accumulated increment)
- // for the register.
- if (store_position != -1) {
- assembler->WriteCurrentPositionToRegister(reg, store_position);
- } else if (clear) {
- assembler->ClearRegisters(reg, reg);
- } else if (absolute) {
- assembler->SetRegister(reg, value);
- } else if (value != 0) {
- assembler->AdvanceRegister(reg, value);
- }
- }
-}
-
-
-// This is called as we come into a loop choice node and some other tricky
-// nodes. It normalizes the state of the code generator to ensure we can
-// generate generic code.
-void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- ASSERT(!is_trivial());
-
- if (actions_ == NULL && backtrack() == NULL) {
- // Here we just have some deferred cp advances to fix and we are back to
- // a normal situation. We may also have to forget some information gained
- // through a quick check that was already performed.
- if (cp_offset_ != 0) assembler->AdvanceCurrentPosition(cp_offset_);
- // Create a new trivial state and generate the node with that.
- Trace new_state;
- successor->Emit(compiler, &new_state);
- return;
- }
-
- // Generate deferred actions here along with code to undo them again.
- OutSet affected_registers;
-
- if (backtrack() != NULL) {
- // Here we have a concrete backtrack location. These are set up by choice
- // nodes and so they indicate that we have a deferred save of the current
- // position which we may need to emit here.
- assembler->PushCurrentPosition();
- }
-
- int max_register = FindAffectedRegisters(&affected_registers,
- compiler->zone());
- OutSet registers_to_pop;
- OutSet registers_to_clear;
- PerformDeferredActions(assembler,
- max_register,
- affected_registers,
- &registers_to_pop,
- &registers_to_clear,
- compiler->zone());
- if (cp_offset_ != 0) {
- assembler->AdvanceCurrentPosition(cp_offset_);
- }
-
- // Create a new trivial state and generate the node with that.
- Label undo;
- assembler->PushBacktrack(&undo);
- Trace new_state;
- successor->Emit(compiler, &new_state);
-
- // On backtrack we need to restore state.
- assembler->Bind(&undo);
- RestoreAffectedRegisters(assembler,
- max_register,
- registers_to_pop,
- registers_to_clear);
- if (backtrack() == NULL) {
- assembler->Backtrack();
- } else {
- assembler->PopCurrentPosition();
- assembler->GoTo(backtrack());
- }
-}
-
-
-void NegativeSubmatchSuccess::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- // Omit flushing the trace. We discard the entire stack frame anyway.
-
- if (!label()->is_bound()) {
- // We are completely independent of the trace, since we ignore it,
- // so this code can be used as the generic version.
- assembler->Bind(label());
- }
-
- // Throw away everything on the backtrack stack since the start
- // of the negative submatch and restore the character position.
- assembler->ReadCurrentPositionFromRegister(current_position_register_);
- assembler->ReadStackPointerFromRegister(stack_pointer_register_);
- if (clear_capture_count_ > 0) {
- // Clear any captures that might have been performed during the success
- // of the body of the negative look-ahead.
- int clear_capture_end = clear_capture_start_ + clear_capture_count_ - 1;
- assembler->ClearRegisters(clear_capture_start_, clear_capture_end);
- }
- // Now that we have unwound the stack we find at the top of the stack the
- // backtrack that the BeginSubmatch node got.
- assembler->Backtrack();
-}
-
-
-void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- if (!label()->is_bound()) {
- assembler->Bind(label());
- }
- switch (action_) {
- case ACCEPT:
- assembler->Succeed();
- return;
- case BACKTRACK:
- assembler->GoTo(trace->backtrack());
- return;
- case NEGATIVE_SUBMATCH_SUCCESS:
- // This case is handled in a different virtual method.
- UNREACHABLE();
- }
- UNIMPLEMENTED();
-}
-
-
-void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) {
- if (guards_ == NULL)
- guards_ = new(zone) ZoneList<Guard*>(1, zone);
- guards_->Add(guard, zone);
-}
-
-
-ActionNode* ActionNode::SetRegister(int reg,
- int val,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(SET_REGISTER, on_success);
- result->data_.u_store_register.reg = reg;
- result->data_.u_store_register.value = val;
- return result;
-}
-
-
-ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(INCREMENT_REGISTER, on_success);
- result->data_.u_increment_register.reg = reg;
- return result;
-}
-
-
-ActionNode* ActionNode::StorePosition(int reg,
- bool is_capture,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(STORE_POSITION, on_success);
- result->data_.u_position_register.reg = reg;
- result->data_.u_position_register.is_capture = is_capture;
- return result;
-}
-
-
-ActionNode* ActionNode::ClearCaptures(Interval range,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(CLEAR_CAPTURES, on_success);
- result->data_.u_clear_captures.range_from = range.from();
- result->data_.u_clear_captures.range_to = range.to();
- return result;
-}
-
-
-ActionNode* ActionNode::BeginSubmatch(int stack_reg,
- int position_reg,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(BEGIN_SUBMATCH, on_success);
- result->data_.u_submatch.stack_pointer_register = stack_reg;
- result->data_.u_submatch.current_position_register = position_reg;
- return result;
-}
-
-
-ActionNode* ActionNode::PositiveSubmatchSuccess(int stack_reg,
- int position_reg,
- int clear_register_count,
- int clear_register_from,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
- result->data_.u_submatch.stack_pointer_register = stack_reg;
- result->data_.u_submatch.current_position_register = position_reg;
- result->data_.u_submatch.clear_register_count = clear_register_count;
- result->data_.u_submatch.clear_register_from = clear_register_from;
- return result;
-}
-
-
-ActionNode* ActionNode::EmptyMatchCheck(int start_register,
- int repetition_register,
- int repetition_limit,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(EMPTY_MATCH_CHECK, on_success);
- result->data_.u_empty_match_check.start_register = start_register;
- result->data_.u_empty_match_check.repetition_register = repetition_register;
- result->data_.u_empty_match_check.repetition_limit = repetition_limit;
- return result;
-}
-
-
-#define DEFINE_ACCEPT(Type) \
- void Type##Node::Accept(NodeVisitor* visitor) { \
- visitor->Visit##Type(this); \
- }
-FOR_EACH_NODE_TYPE(DEFINE_ACCEPT)
-#undef DEFINE_ACCEPT
-
-
-void LoopChoiceNode::Accept(NodeVisitor* visitor) {
- visitor->VisitLoopChoice(this);
-}
-
-
-// -------------------------------------------------------------------
-// Emit code.
-
-
-void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
- Guard* guard,
- Trace* trace) {
- switch (guard->op()) {
- case Guard::LT:
- ASSERT(!trace->mentions_reg(guard->reg()));
- macro_assembler->IfRegisterGE(guard->reg(),
- guard->value(),
- trace->backtrack());
- break;
- case Guard::GEQ:
- ASSERT(!trace->mentions_reg(guard->reg()));
- macro_assembler->IfRegisterLT(guard->reg(),
- guard->value(),
- trace->backtrack());
- break;
- }
-}
-
-
-// Returns the number of characters in the equivalence class, omitting those
-// that cannot occur in the source string because it is ASCII.
-static int GetCaseIndependentLetters(Isolate* isolate,
- uc16 character,
- bool ascii_subject,
- unibrow::uchar* letters) {
- int length =
- isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
- // Unibrow returns 0 or 1 for characters where case independence is
- // trivial.
- if (length == 0) {
- letters[0] = character;
- length = 1;
- }
- if (!ascii_subject || character <= String::kMaxOneByteCharCode) {
- return length;
- }
- // The standard requires that non-ASCII characters cannot have ASCII
- // character codes in their equivalence class.
- return 0;
-}
-
-
-static inline bool EmitSimpleCharacter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- bool bound_checked = false;
- if (!preloaded) {
- assembler->LoadCurrentCharacter(
- cp_offset,
- on_failure,
- check);
- bound_checked = true;
- }
- assembler->CheckNotCharacter(c, on_failure);
- return bound_checked;
-}
-
-
-// Only emits non-letters (things that don't have case). Only used for case
-// independent matches.
-static inline bool EmitAtomNonLetter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool ascii = compiler->ascii();
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
- if (length < 1) {
- // This can't match. Must be an ASCII subject and a non-ASCII character.
- // We do not need to do anything since the ASCII pass already handled this.
- return false; // Bounds not checked.
- }
- bool checked = false;
- // We handle the length > 1 case in a later pass.
- if (length == 1) {
- if (ascii && c > String::kMaxOneByteCharCodeU) {
- // Can't match - see above.
- return false; // Bounds not checked.
- }
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
- checked = check;
- }
- macro_assembler->CheckNotCharacter(c, on_failure);
- }
- return checked;
-}
-
-
-static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
- bool ascii,
- uc16 c1,
- uc16 c2,
- Label* on_failure) {
- uc16 char_mask;
- if (ascii) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- uc16 exor = c1 ^ c2;
- // Check whether exor has only one bit set.
- if (((exor - 1) & exor) == 0) {
- // If c1 and c2 differ only by one bit.
- // Ecma262UnCanonicalize always gives the highest number last.
- ASSERT(c2 > c1);
- uc16 mask = char_mask ^ exor;
- macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure);
- return true;
- }
- ASSERT(c2 > c1);
- uc16 diff = c2 - c1;
- if (((diff - 1) & diff) == 0 && c1 >= diff) {
- // If the characters differ by 2^n but don't differ by one bit then
- // subtract the difference from the found character, then do the or
- // trick. We avoid the theoretical case where negative numbers are
- // involved in order to simplify code generation.
- uc16 mask = char_mask ^ diff;
- macro_assembler->CheckNotCharacterAfterMinusAnd(c1 - diff,
- diff,
- mask,
- on_failure);
- return true;
- }
- return false;
-}
-
-
-typedef bool EmitCharacterFunction(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded);
-
-// Only emits letters (things that have case). Only used for case independent
-// matches.
-static inline bool EmitAtomLetter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool ascii = compiler->ascii();
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
- if (length <= 1) return false;
- // We may not need to check against the end of the input string
- // if this character lies before a character that matched.
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
- }
- Label ok;
- ASSERT(unibrow::Ecma262UnCanonicalize::kMaxWidth == 4);
- switch (length) {
- case 2: {
- if (ShortCutEmitCharacterPair(macro_assembler,
- ascii,
- chars[0],
- chars[1],
- on_failure)) {
- } else {
- macro_assembler->CheckCharacter(chars[0], &ok);
- macro_assembler->CheckNotCharacter(chars[1], on_failure);
- macro_assembler->Bind(&ok);
- }
- break;
- }
- case 4:
- macro_assembler->CheckCharacter(chars[3], &ok);
- // Fall through!
- case 3:
- macro_assembler->CheckCharacter(chars[0], &ok);
- macro_assembler->CheckCharacter(chars[1], &ok);
- macro_assembler->CheckNotCharacter(chars[2], on_failure);
- macro_assembler->Bind(&ok);
- break;
- default:
- UNREACHABLE();
- break;
- }
- return true;
-}
-
-
-static void EmitBoundaryTest(RegExpMacroAssembler* masm,
- int border,
- Label* fall_through,
- Label* above_or_equal,
- Label* below) {
- if (below != fall_through) {
- masm->CheckCharacterLT(border, below);
- if (above_or_equal != fall_through) masm->GoTo(above_or_equal);
- } else {
- masm->CheckCharacterGT(border - 1, above_or_equal);
- }
-}
-
-
-static void EmitDoubleBoundaryTest(RegExpMacroAssembler* masm,
- int first,
- int last,
- Label* fall_through,
- Label* in_range,
- Label* out_of_range) {
- if (in_range == fall_through) {
- if (first == last) {
- masm->CheckNotCharacter(first, out_of_range);
- } else {
- masm->CheckCharacterNotInRange(first, last, out_of_range);
- }
- } else {
- if (first == last) {
- masm->CheckCharacter(first, in_range);
- } else {
- masm->CheckCharacterInRange(first, last, in_range);
- }
- if (out_of_range != fall_through) masm->GoTo(out_of_range);
- }
-}
-
-
-// even_label is for ranges[i] to ranges[i + 1] where i - start_index is even.
-// odd_label is for ranges[i] to ranges[i + 1] where i - start_index is odd.
-static void EmitUseLookupTable(
- RegExpMacroAssembler* masm,
- ZoneList<int>* ranges,
- int start_index,
- int end_index,
- int min_char,
- Label* fall_through,
- Label* even_label,
- Label* odd_label) {
- static const int kSize = RegExpMacroAssembler::kTableSize;
- static const int kMask = RegExpMacroAssembler::kTableMask;
-
- int base = (min_char & ~kMask);
- USE(base);
-
- // Assert that everything is on one kTableSize page.
- for (int i = start_index; i <= end_index; i++) {
- ASSERT_EQ(ranges->at(i) & ~kMask, base);
- }
- ASSERT(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base);
-
- char templ[kSize];
- Label* on_bit_set;
- Label* on_bit_clear;
- int bit;
- if (even_label == fall_through) {
- on_bit_set = odd_label;
- on_bit_clear = even_label;
- bit = 1;
- } else {
- on_bit_set = even_label;
- on_bit_clear = odd_label;
- bit = 0;
- }
- for (int i = 0; i < (ranges->at(start_index) & kMask) && i < kSize; i++) {
- templ[i] = bit;
- }
- int j = 0;
- bit ^= 1;
- for (int i = start_index; i < end_index; i++) {
- for (j = (ranges->at(i) & kMask); j < (ranges->at(i + 1) & kMask); j++) {
- templ[j] = bit;
- }
- bit ^= 1;
- }
- for (int i = j; i < kSize; i++) {
- templ[i] = bit;
- }
- // TODO(erikcorry): Cache these.
- Handle<ByteArray> ba = FACTORY->NewByteArray(kSize, TENURED);
- for (int i = 0; i < kSize; i++) {
- ba->set(i, templ[i]);
- }
- masm->CheckBitInTable(ba, on_bit_set);
- if (on_bit_clear != fall_through) masm->GoTo(on_bit_clear);
-}
-
-
-static void CutOutRange(RegExpMacroAssembler* masm,
- ZoneList<int>* ranges,
- int start_index,
- int end_index,
- int cut_index,
- Label* even_label,
- Label* odd_label) {
- bool odd = (((cut_index - start_index) & 1) == 1);
- Label* in_range_label = odd ? odd_label : even_label;
- Label dummy;
- EmitDoubleBoundaryTest(masm,
- ranges->at(cut_index),
- ranges->at(cut_index + 1) - 1,
- &dummy,
- in_range_label,
- &dummy);
- ASSERT(!dummy.is_linked());
- // Cut out the single range by rewriting the array. This creates a new
- // range that is a merger of the two ranges on either side of the one we
- // are cutting out. The oddity of the labels is preserved.
- for (int j = cut_index; j > start_index; j--) {
- ranges->at(j) = ranges->at(j - 1);
- }
- for (int j = cut_index + 1; j < end_index; j++) {
- ranges->at(j) = ranges->at(j + 1);
- }
-}
-
-
-// Unicode case. Split the search space into kSize spaces that are handled
-// with recursion.
-static void SplitSearchSpace(ZoneList<int>* ranges,
- int start_index,
- int end_index,
- int* new_start_index,
- int* new_end_index,
- int* border) {
- static const int kSize = RegExpMacroAssembler::kTableSize;
- static const int kMask = RegExpMacroAssembler::kTableMask;
-
- int first = ranges->at(start_index);
- int last = ranges->at(end_index) - 1;
-
- *new_start_index = start_index;
- *border = (ranges->at(start_index) & ~kMask) + kSize;
- while (*new_start_index < end_index) {
- if (ranges->at(*new_start_index) > *border) break;
- (*new_start_index)++;
- }
- // new_start_index is the index of the first edge that is beyond the
- // current kSize space.
-
- // For very large search spaces we do a binary chop search of the non-ASCII
- // space instead of just going to the end of the current kSize space. The
- // heuristics are complicated a little by the fact that any 128-character
- // encoding space can be quickly tested with a table lookup, so we don't
- // wish to do binary chop search at a smaller granularity than that. A
- // 128-character space can take up a lot of space in the ranges array if,
- // for example, we only want to match every second character (eg. the lower
- // case characters on some Unicode pages).
- int binary_chop_index = (end_index + start_index) / 2;
- // The first test ensures that we get to the code that handles the ASCII
- // range with a single not-taken branch, speeding up this important
- // character range (even non-ASCII charset-based text has spaces and
- // punctuation).
- if (*border - 1 > String::kMaxOneByteCharCode && // ASCII case.
- end_index - start_index > (*new_start_index - start_index) * 2 &&
- last - first > kSize * 2 &&
- binary_chop_index > *new_start_index &&
- ranges->at(binary_chop_index) >= first + 2 * kSize) {
- int scan_forward_for_section_border = binary_chop_index;;
- int new_border = (ranges->at(binary_chop_index) | kMask) + 1;
-
- while (scan_forward_for_section_border < end_index) {
- if (ranges->at(scan_forward_for_section_border) > new_border) {
- *new_start_index = scan_forward_for_section_border;
- *border = new_border;
- break;
- }
- scan_forward_for_section_border++;
- }
- }
-
- ASSERT(*new_start_index > start_index);
- *new_end_index = *new_start_index - 1;
- if (ranges->at(*new_end_index) == *border) {
- (*new_end_index)--;
- }
- if (*border >= ranges->at(end_index)) {
- *border = ranges->at(end_index);
- *new_start_index = end_index; // Won't be used.
- *new_end_index = end_index - 1;
- }
-}
-
-
-// Gets a series of segment boundaries representing a character class. If the
-// character is in the range between an even and an odd boundary (counting from
-// start_index) then go to even_label, otherwise go to odd_label. We already
-// know that the character is in the range of min_char to max_char inclusive.
-// Either label can be NULL indicating backtracking. Either label can also be
-// equal to the fall_through label.
-static void GenerateBranches(RegExpMacroAssembler* masm,
- ZoneList<int>* ranges,
- int start_index,
- int end_index,
- uc16 min_char,
- uc16 max_char,
- Label* fall_through,
- Label* even_label,
- Label* odd_label) {
- int first = ranges->at(start_index);
- int last = ranges->at(end_index) - 1;
-
- ASSERT_LT(min_char, first);
-
- // Just need to test if the character is before or on-or-after
- // a particular character.
- if (start_index == end_index) {
- EmitBoundaryTest(masm, first, fall_through, even_label, odd_label);
- return;
- }
-
- // Another almost trivial case: There is one interval in the middle that is
- // different from the end intervals.
- if (start_index + 1 == end_index) {
- EmitDoubleBoundaryTest(
- masm, first, last, fall_through, even_label, odd_label);
- return;
- }
-
- // It's not worth using table lookup if there are very few intervals in the
- // character class.
- if (end_index - start_index <= 6) {
- // It is faster to test for individual characters, so we look for those
- // first, then try arbitrary ranges in the second round.
- static int kNoCutIndex = -1;
- int cut = kNoCutIndex;
- for (int i = start_index; i < end_index; i++) {
- if (ranges->at(i) == ranges->at(i + 1) - 1) {
- cut = i;
- break;
- }
- }
- if (cut == kNoCutIndex) cut = start_index;
- CutOutRange(
- masm, ranges, start_index, end_index, cut, even_label, odd_label);
- ASSERT_GE(end_index - start_index, 2);
- GenerateBranches(masm,
- ranges,
- start_index + 1,
- end_index - 1,
- min_char,
- max_char,
- fall_through,
- even_label,
- odd_label);
- return;
- }
-
- // If there are a lot of intervals in the regexp, then we will use tables to
- // determine whether the character is inside or outside the character class.
- static const int kBits = RegExpMacroAssembler::kTableSizeBits;
-
- if ((max_char >> kBits) == (min_char >> kBits)) {
- EmitUseLookupTable(masm,
- ranges,
- start_index,
- end_index,
- min_char,
- fall_through,
- even_label,
- odd_label);
- return;
- }
-
- if ((min_char >> kBits) != (first >> kBits)) {
- masm->CheckCharacterLT(first, odd_label);
- GenerateBranches(masm,
- ranges,
- start_index + 1,
- end_index,
- first,
- max_char,
- fall_through,
- odd_label,
- even_label);
- return;
- }
-
- int new_start_index = 0;
- int new_end_index = 0;
- int border = 0;
-
- SplitSearchSpace(ranges,
- start_index,
- end_index,
- &new_start_index,
- &new_end_index,
- &border);
-
- Label handle_rest;
- Label* above = &handle_rest;
- if (border == last + 1) {
- // We didn't find any section that started after the limit, so everything
- // above the border is one of the terminal labels.
- above = (end_index & 1) != (start_index & 1) ? odd_label : even_label;
- ASSERT(new_end_index == end_index - 1);
- }
-
- ASSERT_LE(start_index, new_end_index);
- ASSERT_LE(new_start_index, end_index);
- ASSERT_LT(start_index, new_start_index);
- ASSERT_LT(new_end_index, end_index);
- ASSERT(new_end_index + 1 == new_start_index ||
- (new_end_index + 2 == new_start_index &&
- border == ranges->at(new_end_index + 1)));
- ASSERT_LT(min_char, border - 1);
- ASSERT_LT(border, max_char);
- ASSERT_LT(ranges->at(new_end_index), border);
- ASSERT(border < ranges->at(new_start_index) ||
- (border == ranges->at(new_start_index) &&
- new_start_index == end_index &&
- new_end_index == end_index - 1 &&
- border == last + 1));
- ASSERT(new_start_index == 0 || border >= ranges->at(new_start_index - 1));
-
- masm->CheckCharacterGT(border - 1, above);
- Label dummy;
- GenerateBranches(masm,
- ranges,
- start_index,
- new_end_index,
- min_char,
- border - 1,
- &dummy,
- even_label,
- odd_label);
- if (handle_rest.is_linked()) {
- masm->Bind(&handle_rest);
- bool flip = (new_start_index & 1) != (start_index & 1);
- GenerateBranches(masm,
- ranges,
- new_start_index,
- end_index,
- border,
- max_char,
- &dummy,
- flip ? odd_label : even_label,
- flip ? even_label : odd_label);
- }
-}
-
-
-static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
- RegExpCharacterClass* cc,
- bool ascii,
- Label* on_failure,
- int cp_offset,
- bool check_offset,
- bool preloaded,
- Zone* zone) {
- ZoneList<CharacterRange>* ranges = cc->ranges(zone);
- if (!CharacterRange::IsCanonical(ranges)) {
- CharacterRange::Canonicalize(ranges);
- }
-
- int max_char;
- if (ascii) {
- max_char = String::kMaxOneByteCharCode;
- } else {
- max_char = String::kMaxUtf16CodeUnit;
- }
-
- int range_count = ranges->length();
-
- int last_valid_range = range_count - 1;
- while (last_valid_range >= 0) {
- CharacterRange& range = ranges->at(last_valid_range);
- if (range.from() <= max_char) {
- break;
- }
- last_valid_range--;
- }
-
- if (last_valid_range < 0) {
- if (!cc->is_negated()) {
- macro_assembler->GoTo(on_failure);
- }
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- return;
- }
-
- if (last_valid_range == 0 &&
- ranges->at(0).IsEverything(max_char)) {
- if (cc->is_negated()) {
- macro_assembler->GoTo(on_failure);
- } else {
- // This is a common case hit by non-anchored expressions.
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- }
- return;
- }
- if (last_valid_range == 0 &&
- !cc->is_negated() &&
- ranges->at(0).IsEverything(max_char)) {
- // This is a common case hit by non-anchored expressions.
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- return;
- }
-
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
- }
-
- if (cc->is_standard(zone) &&
- macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
- on_failure)) {
- return;
- }
-
-
- // A new list with ascending entries. Each entry is a code unit
- // where there is a boundary between code units that are part of
- // the class and code units that are not. Normally we insert an
- // entry at zero which goes to the failure label, but if there
- // was already one there we fall through for success on that entry.
- // Subsequent entries have alternating meaning (success/failure).
- ZoneList<int>* range_boundaries =
- new(zone) ZoneList<int>(last_valid_range, zone);
-
- bool zeroth_entry_is_failure = !cc->is_negated();
-
- for (int i = 0; i <= last_valid_range; i++) {
- CharacterRange& range = ranges->at(i);
- if (range.from() == 0) {
- ASSERT_EQ(i, 0);
- zeroth_entry_is_failure = !zeroth_entry_is_failure;
- } else {
- range_boundaries->Add(range.from(), zone);
- }
- range_boundaries->Add(range.to() + 1, zone);
- }
- int end_index = range_boundaries->length() - 1;
- if (range_boundaries->at(end_index) > max_char) {
- end_index--;
- }
-
- Label fall_through;
- GenerateBranches(macro_assembler,
- range_boundaries,
- 0, // start_index.
- end_index,
- 0, // min_char.
- max_char,
- &fall_through,
- zeroth_entry_is_failure ? &fall_through : on_failure,
- zeroth_entry_is_failure ? on_failure : &fall_through);
- macro_assembler->Bind(&fall_through);
-}
-
-
-RegExpNode::~RegExpNode() {
-}
-
-
-RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
- Trace* trace) {
- // If we are generating a greedy loop then don't stop and don't reuse code.
- if (trace->stop_node() != NULL) {
- return CONTINUE;
- }
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- if (trace->is_trivial()) {
- if (label_.is_bound()) {
- // We are being asked to generate a generic version, but that's already
- // been done so just go to it.
- macro_assembler->GoTo(&label_);
- return DONE;
- }
- if (compiler->recursion_depth() >= RegExpCompiler::kMaxRecursion) {
- // To avoid too deep recursion we push the node to the work queue and just
- // generate a goto here.
- compiler->AddWork(this);
- macro_assembler->GoTo(&label_);
- return DONE;
- }
- // Generate generic version of the node and bind the label for later use.
- macro_assembler->Bind(&label_);
- return CONTINUE;
- }
-
- // We are being asked to make a non-generic version. Keep track of how many
- // non-generic versions we generate so as not to overdo it.
- trace_count_++;
- if (FLAG_regexp_optimization &&
- trace_count_ < kMaxCopiesCodeGenerated &&
- compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion) {
- return CONTINUE;
- }
-
- // If we get here code has been generated for this node too many times or
- // recursion is too deep. Time to switch to a generic version. The code for
- // generic versions above can handle deep recursion properly.
- trace->Flush(compiler, this);
- return DONE;
-}
-
-
-int ActionNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- if (type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
- return on_success()->EatsAtLeast(still_to_find,
- budget - 1,
- not_at_start);
-}
-
-
-void ActionNode::FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- if (type_ == BEGIN_SUBMATCH) {
- bm->SetRest(offset);
- } else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
- on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
- }
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-int AssertionNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- // If we know we are not at the start and we are asked "how many characters
- // will you match if you succeed?" then we can answer anything since false
- // implies false. So lets just return the max answer (still_to_find) since
- // that won't prevent us from preloading a lot of characters for the other
- // branches in the node graph.
- if (type() == AT_START && not_at_start) return still_to_find;
- return on_success()->EatsAtLeast(still_to_find,
- budget - 1,
- not_at_start);
-}
-
-
-void AssertionNode::FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- // Match the behaviour of EatsAtLeast on this node.
- if (type() == AT_START && not_at_start) return;
- on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-int BackReferenceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- return on_success()->EatsAtLeast(still_to_find,
- budget - 1,
- not_at_start);
-}
-
-
-int TextNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- int answer = Length();
- if (answer >= still_to_find) return answer;
- if (budget <= 0) return answer;
- // We are not at start after this node so we set the last argument to 'true'.
- return answer + on_success()->EatsAtLeast(still_to_find - answer,
- budget - 1,
- true);
-}
-
-
-int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- return node->EatsAtLeast(still_to_find, budget - 1, not_at_start);
-}
-
-
-void NegativeLookaheadChoiceNode::GetQuickCheckDetails(
- QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start) {
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- return node->GetQuickCheckDetails(details, compiler, filled_in, not_at_start);
-}
-
-
-int ChoiceNode::EatsAtLeastHelper(int still_to_find,
- int budget,
- RegExpNode* ignore_this_node,
- bool not_at_start) {
- if (budget <= 0) return 0;
- int min = 100;
- int choice_count = alternatives_->length();
- budget = (budget - 1) / choice_count;
- for (int i = 0; i < choice_count; i++) {
- RegExpNode* node = alternatives_->at(i).node();
- if (node == ignore_this_node) continue;
- int node_eats_at_least =
- node->EatsAtLeast(still_to_find, budget, not_at_start);
- if (node_eats_at_least < min) min = node_eats_at_least;
- if (min == 0) return 0;
- }
- return min;
-}
-
-
-int LoopChoiceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- return EatsAtLeastHelper(still_to_find,
- budget - 1,
- loop_node_,
- not_at_start);
-}
-
-
-int ChoiceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- return EatsAtLeastHelper(still_to_find,
- budget,
- NULL,
- not_at_start);
-}
-
-
-// Takes the left-most 1-bit and smears it out, setting all bits to its right.
-static inline uint32_t SmearBitsRight(uint32_t v) {
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return v;
-}
-
-
-bool QuickCheckDetails::Rationalize(bool asc) {
- bool found_useful_op = false;
- uint32_t char_mask;
- if (asc) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- mask_ = 0;
- value_ = 0;
- int char_shift = 0;
- for (int i = 0; i < characters_; i++) {
- Position* pos = &positions_[i];
- if ((pos->mask & String::kMaxOneByteCharCode) != 0) {
- found_useful_op = true;
- }
- mask_ |= (pos->mask & char_mask) << char_shift;
- value_ |= (pos->value & char_mask) << char_shift;
- char_shift += asc ? 8 : 16;
- }
- return found_useful_op;
-}
-
-
-bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
- Trace* trace,
- bool preload_has_checked_bounds,
- Label* on_possible_success,
- QuickCheckDetails* details,
- bool fall_through_on_failure) {
- if (details->characters() == 0) return false;
- GetQuickCheckDetails(details, compiler, 0, trace->at_start() == Trace::FALSE);
- if (details->cannot_match()) return false;
- if (!details->Rationalize(compiler->ascii())) return false;
- ASSERT(details->characters() == 1 ||
- compiler->macro_assembler()->CanReadUnaligned());
- uint32_t mask = details->mask();
- uint32_t value = details->value();
-
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- if (trace->characters_preloaded() != details->characters()) {
- assembler->LoadCurrentCharacter(trace->cp_offset(),
- trace->backtrack(),
- !preload_has_checked_bounds,
- details->characters());
- }
-
-
- bool need_mask = true;
-
- if (details->characters() == 1) {
- // If number of characters preloaded is 1 then we used a byte or 16 bit
- // load so the value is already masked down.
- uint32_t char_mask;
- if (compiler->ascii()) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- if ((mask & char_mask) == char_mask) need_mask = false;
- mask &= char_mask;
- } else {
- // For 2-character preloads in ASCII mode or 1-character preloads in
- // TWO_BYTE mode we also use a 16 bit load with zero extend.
- if (details->characters() == 2 && compiler->ascii()) {
-#ifndef ENABLE_LATIN_1
- if ((mask & 0x7f7f) == 0xffff) need_mask = false;
-#else
- if ((mask & 0xffff) == 0xffff) need_mask = false;
-#endif
- } else if (details->characters() == 1 && !compiler->ascii()) {
- if ((mask & 0xffff) == 0xffff) need_mask = false;
- } else {
- if (mask == 0xffffffff) need_mask = false;
- }
- }
-
- if (fall_through_on_failure) {
- if (need_mask) {
- assembler->CheckCharacterAfterAnd(value, mask, on_possible_success);
- } else {
- assembler->CheckCharacter(value, on_possible_success);
- }
- } else {
- if (need_mask) {
- assembler->CheckNotCharacterAfterAnd(value, mask, trace->backtrack());
- } else {
- assembler->CheckNotCharacter(value, trace->backtrack());
- }
- }
- return true;
-}
-
-
-// Here is the meat of GetQuickCheckDetails (see also the comment on the
-// super-class in the .h file).
-//
-// We iterate along the text object, building up for each character a
-// mask and value that can be used to test for a quick failure to match.
-// The masks and values for the positions will be combined into a single
-// machine word for the current character width in order to be used in
-// generating a quick check.
-void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- Isolate* isolate = Isolate::Current();
- ASSERT(characters_filled_in < details->characters());
- int characters = details->characters();
- int char_mask;
- if (compiler->ascii()) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- for (int k = 0; k < elms_->length(); k++) {
- TextElement elm = elms_->at(k);
- if (elm.type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
- for (int i = 0; i < characters && i < quarks.length(); i++) {
- QuickCheckDetails::Position* pos =
- details->positions(characters_filled_in);
- uc16 c = quarks[i];
- if (c > char_mask) {
- // If we expect a non-ASCII character from an ASCII string,
- // there is no way we can match. Not even case independent
- // matching can turn an ASCII character into non-ASCII or
- // vice versa.
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- if (compiler->ignore_case()) {
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(isolate, c, compiler->ascii(),
- chars);
- ASSERT(length != 0); // Can only happen if c > char_mask (see above).
- if (length == 1) {
- // This letter has no case equivalents, so it's nice and simple
- // and the mask-compare will determine definitely whether we have
- // a match at this character position.
- pos->mask = char_mask;
- pos->value = c;
- pos->determines_perfectly = true;
- } else {
- uint32_t common_bits = char_mask;
- uint32_t bits = chars[0];
- for (int j = 1; j < length; j++) {
- uint32_t differing_bits = ((chars[j] & common_bits) ^ bits);
- common_bits ^= differing_bits;
- bits &= common_bits;
- }
- // If length is 2 and common bits has only one zero in it then
- // our mask and compare instruction will determine definitely
- // whether we have a match at this character position. Otherwise
- // it can only be an approximate check.
- uint32_t one_zero = (common_bits | ~char_mask);
- if (length == 2 && ((~one_zero) & ((~one_zero) - 1)) == 0) {
- pos->determines_perfectly = true;
- }
- pos->mask = common_bits;
- pos->value = bits;
- }
- } else {
- // Don't ignore case. Nice simple case where the mask-compare will
- // determine definitely whether we have a match at this character
- // position.
- pos->mask = char_mask;
- pos->value = c;
- pos->determines_perfectly = true;
- }
- characters_filled_in++;
- ASSERT(characters_filled_in <= details->characters());
- if (characters_filled_in == details->characters()) {
- return;
- }
- }
- } else {
- QuickCheckDetails::Position* pos =
- details->positions(characters_filled_in);
- RegExpCharacterClass* tree = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = tree->ranges(zone());
- if (tree->is_negated()) {
- // A quick check uses multi-character mask and compare. There is no
- // useful way to incorporate a negative char class into this scheme
- // so we just conservatively create a mask and value that will always
- // succeed.
- pos->mask = 0;
- pos->value = 0;
- } else {
- int first_range = 0;
- while (ranges->at(first_range).from() > char_mask) {
- first_range++;
- if (first_range == ranges->length()) {
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- }
- CharacterRange range = ranges->at(first_range);
- uc16 from = range.from();
- uc16 to = range.to();
- if (to > char_mask) {
- to = char_mask;
- }
- uint32_t differing_bits = (from ^ to);
- // A mask and compare is only perfect if the differing bits form a
- // number like 00011111 with one single block of trailing 1s.
- if ((differing_bits & (differing_bits + 1)) == 0 &&
- from + differing_bits == to) {
- pos->determines_perfectly = true;
- }
- uint32_t common_bits = ~SmearBitsRight(differing_bits);
- uint32_t bits = (from & common_bits);
- for (int i = first_range + 1; i < ranges->length(); i++) {
- CharacterRange range = ranges->at(i);
- uc16 from = range.from();
- uc16 to = range.to();
- if (from > char_mask) continue;
- if (to > char_mask) to = char_mask;
- // Here we are combining more ranges into the mask and compare
- // value. With each new range the mask becomes more sparse and
- // so the chances of a false positive rise. A character class
- // with multiple ranges is assumed never to be equivalent to a
- // mask and compare operation.
- pos->determines_perfectly = false;
- uint32_t new_common_bits = (from ^ to);
- new_common_bits = ~SmearBitsRight(new_common_bits);
- common_bits &= new_common_bits;
- bits &= new_common_bits;
- uint32_t differing_bits = (from & common_bits) ^ bits;
- common_bits ^= differing_bits;
- bits &= common_bits;
- }
- pos->mask = common_bits;
- pos->value = bits;
- }
- characters_filled_in++;
- ASSERT(characters_filled_in <= details->characters());
- if (characters_filled_in == details->characters()) {
- return;
- }
- }
- }
- ASSERT(characters_filled_in != details->characters());
- if (!details->cannot_match()) {
- on_success()-> GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- true);
- }
-}
-
-
-void QuickCheckDetails::Clear() {
- for (int i = 0; i < characters_; i++) {
- positions_[i].mask = 0;
- positions_[i].value = 0;
- positions_[i].determines_perfectly = false;
- }
- characters_ = 0;
-}
-
-
-void QuickCheckDetails::Advance(int by, bool ascii) {
- ASSERT(by >= 0);
- if (by >= characters_) {
- Clear();
- return;
- }
- for (int i = 0; i < characters_ - by; i++) {
- positions_[i] = positions_[by + i];
- }
- for (int i = characters_ - by; i < characters_; i++) {
- positions_[i].mask = 0;
- positions_[i].value = 0;
- positions_[i].determines_perfectly = false;
- }
- characters_ -= by;
- // We could change mask_ and value_ here but we would never advance unless
- // they had already been used in a check and they won't be used again because
- // it would gain us nothing. So there's no point.
-}
-
-
-void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) {
- ASSERT(characters_ == other->characters_);
- if (other->cannot_match_) {
- return;
- }
- if (cannot_match_) {
- *this = *other;
- return;
- }
- for (int i = from_index; i < characters_; i++) {
- QuickCheckDetails::Position* pos = positions(i);
- QuickCheckDetails::Position* other_pos = other->positions(i);
- if (pos->mask != other_pos->mask ||
- pos->value != other_pos->value ||
- !other_pos->determines_perfectly) {
- // Our mask-compare operation will be approximate unless we have the
- // exact same operation on both sides of the alternation.
- pos->determines_perfectly = false;
- }
- pos->mask &= other_pos->mask;
- pos->value &= pos->mask;
- other_pos->value &= pos->mask;
- uc16 differing_bits = (pos->value ^ other_pos->value);
- pos->mask &= ~differing_bits;
- pos->value &= pos->mask;
- }
-}
-
-
-class VisitMarker {
- public:
- explicit VisitMarker(NodeInfo* info) : info_(info) {
- ASSERT(!info->visited);
- info->visited = true;
- }
- ~VisitMarker() {
- info_->visited = false;
- }
- private:
- NodeInfo* info_;
-};
-
-
-RegExpNode* SeqRegExpNode::FilterASCII(int depth, bool ignore_case) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- ASSERT(!info()->visited);
- VisitMarker marker(info());
- return FilterSuccessor(depth - 1, ignore_case);
-}
-
-
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, bool ignore_case) {
- RegExpNode* next = on_success_->FilterASCII(depth - 1, ignore_case);
- if (next == NULL) return set_replacement(NULL);
- on_success_ = next;
- return set_replacement(this);
-}
-
-
-// We need to check for the following characters: 0x39c 0x3bc 0x178.
-static inline bool RangeContainsLatin1Equivalents(CharacterRange range) {
-#ifdef ENABLE_LATIN_1
- // TODO(dcarney): this could be a lot more efficient.
- return range.Contains(0x39c) ||
- range.Contains(0x3bc) || range.Contains(0x178);
-#else
- return false;
-#endif
-}
-
-
-#ifdef ENABLE_LATIN_1
-static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
- for (int i = 0; i < ranges->length(); i++) {
- // TODO(dcarney): this could be a lot more efficient.
- if (RangeContainsLatin1Equivalents(ranges->at(i))) return true;
- }
- return false;
-}
-#endif
-
-
-RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- ASSERT(!info()->visited);
- VisitMarker marker(info());
- int element_count = elms_->length();
- for (int i = 0; i < element_count; i++) {
- TextElement elm = elms_->at(i);
- if (elm.type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
- for (int j = 0; j < quarks.length(); j++) {
-#ifndef ENABLE_LATIN_1
- if (quarks[j] > String::kMaxOneByteCharCode) {
- return set_replacement(NULL);
- }
-#else
- uint16_t c = quarks[j];
- if (c <= String::kMaxOneByteCharCode) continue;
- if (!ignore_case) return set_replacement(NULL);
- // Here, we need to check for characters whose upper and lower cases
- // are outside the Latin-1 range.
- uint16_t converted = unibrow::Latin1::ConvertNonLatin1ToLatin1(c);
- // Character is outside Latin-1 completely
- if (converted == 0) return set_replacement(NULL);
- // Convert quark to Latin-1 in place.
- uint16_t* copy = const_cast<uint16_t*>(quarks.start());
- copy[j] = converted;
-#endif
- }
- } else {
- ASSERT(elm.type == TextElement::CHAR_CLASS);
- RegExpCharacterClass* cc = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- if (!CharacterRange::IsCanonical(ranges)) {
- CharacterRange::Canonicalize(ranges);
- }
- // Now they are in order so we only need to look at the first.
- int range_count = ranges->length();
- if (cc->is_negated()) {
- if (range_count != 0 &&
- ranges->at(0).from() == 0 &&
- ranges->at(0).to() >= String::kMaxOneByteCharCode) {
-#ifdef ENABLE_LATIN_1
- // This will be handled in a later filter.
- if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue;
-#endif
- return set_replacement(NULL);
- }
- } else {
- if (range_count == 0 ||
- ranges->at(0).from() > String::kMaxOneByteCharCode) {
-#ifdef ENABLE_LATIN_1
- // This will be handled in a later filter.
- if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue;
-#endif
- return set_replacement(NULL);
- }
- }
- }
- }
- return FilterSuccessor(depth - 1, ignore_case);
-}
-
-
-RegExpNode* LoopChoiceNode::FilterASCII(int depth, bool ignore_case) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- if (info()->visited) return this;
- {
- VisitMarker marker(info());
-
- RegExpNode* continue_replacement =
- continue_node_->FilterASCII(depth - 1, ignore_case);
- // If we can't continue after the loop then there is no sense in doing the
- // loop.
- if (continue_replacement == NULL) return set_replacement(NULL);
- }
-
- return ChoiceNode::FilterASCII(depth - 1, ignore_case);
-}
-
-
-RegExpNode* ChoiceNode::FilterASCII(int depth, bool ignore_case) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- if (info()->visited) return this;
- VisitMarker marker(info());
- int choice_count = alternatives_->length();
-
- for (int i = 0; i < choice_count; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- if (alternative.guards() != NULL && alternative.guards()->length() != 0) {
- set_replacement(this);
- return this;
- }
- }
-
- int surviving = 0;
- RegExpNode* survivor = NULL;
- for (int i = 0; i < choice_count; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- RegExpNode* replacement =
- alternative.node()->FilterASCII(depth - 1, ignore_case);
- ASSERT(replacement != this); // No missing EMPTY_MATCH_CHECK.
- if (replacement != NULL) {
- alternatives_->at(i).set_node(replacement);
- surviving++;
- survivor = replacement;
- }
- }
- if (surviving < 2) return set_replacement(survivor);
-
- set_replacement(this);
- if (surviving == choice_count) {
- return this;
- }
- // Only some of the nodes survived the filtering. We need to rebuild the
- // alternatives list.
- ZoneList<GuardedAlternative>* new_alternatives =
- new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
- for (int i = 0; i < choice_count; i++) {
- RegExpNode* replacement =
- alternatives_->at(i).node()->FilterASCII(depth - 1, ignore_case);
- if (replacement != NULL) {
- alternatives_->at(i).set_node(replacement);
- new_alternatives->Add(alternatives_->at(i), zone());
- }
- }
- alternatives_ = new_alternatives;
- return this;
-}
-
-
-RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth,
- bool ignore_case) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- if (info()->visited) return this;
- VisitMarker marker(info());
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- RegExpNode* replacement = node->FilterASCII(depth - 1, ignore_case);
- if (replacement == NULL) return set_replacement(NULL);
- alternatives_->at(1).set_node(replacement);
-
- RegExpNode* neg_node = alternatives_->at(0).node();
- RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1, ignore_case);
- // If the negative lookahead is always going to fail then
- // we don't need to check it.
- if (neg_replacement == NULL) return set_replacement(replacement);
- alternatives_->at(0).set_node(neg_replacement);
- return set_replacement(this);
-}
-
-
-void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- if (body_can_be_zero_length_ || info()->visited) return;
- VisitMarker marker(info());
- return ChoiceNode::GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- not_at_start);
-}
-
-
-void LoopChoiceNode::FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- if (body_can_be_zero_length_ || budget <= 0) {
- bm->SetRest(offset);
- SaveBMInfo(bm, not_at_start, offset);
- return;
- }
- ChoiceNode::FillInBMInfo(offset, budget - 1, bm, not_at_start);
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-void ChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- not_at_start = (not_at_start || not_at_start_);
- int choice_count = alternatives_->length();
- ASSERT(choice_count > 0);
- alternatives_->at(0).node()->GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- not_at_start);
- for (int i = 1; i < choice_count; i++) {
- QuickCheckDetails new_details(details->characters());
- RegExpNode* node = alternatives_->at(i).node();
- node->GetQuickCheckDetails(&new_details, compiler,
- characters_filled_in,
- not_at_start);
- // Here we merge the quick match details of the two branches.
- details->Merge(&new_details, characters_filled_in);
- }
-}
-
-
-// Check for [0-9A-Z_a-z].
-static void EmitWordCheck(RegExpMacroAssembler* assembler,
- Label* word,
- Label* non_word,
- bool fall_through_on_word) {
- if (assembler->CheckSpecialCharacterClass(
- fall_through_on_word ? 'w' : 'W',
- fall_through_on_word ? non_word : word)) {
- // Optimized implementation available.
- return;
- }
- assembler->CheckCharacterGT('z', non_word);
- assembler->CheckCharacterLT('0', non_word);
- assembler->CheckCharacterGT('a' - 1, word);
- assembler->CheckCharacterLT('9' + 1, word);
- assembler->CheckCharacterLT('A', non_word);
- assembler->CheckCharacterLT('Z' + 1, word);
- if (fall_through_on_word) {
- assembler->CheckNotCharacter('_', non_word);
- } else {
- assembler->CheckCharacter('_', word);
- }
-}
-
-
-// Emit the code to check for a ^ in multiline mode (1-character lookbehind
-// that matches newline or the start of input).
-static void EmitHat(RegExpCompiler* compiler,
- RegExpNode* on_success,
- Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- // We will be loading the previous character into the current character
- // register.
- Trace new_trace(*trace);
- new_trace.InvalidateCurrentCharacter();
-
- Label ok;
- if (new_trace.cp_offset() == 0) {
- // The start of input counts as a newline in this context, so skip to
- // ok if we are at the start.
- assembler->CheckAtStart(&ok);
- }
- // We already checked that we are not at the start of input so it must be
- // OK to load the previous character.
- assembler->LoadCurrentCharacter(new_trace.cp_offset() -1,
- new_trace.backtrack(),
- false);
- if (!assembler->CheckSpecialCharacterClass('n',
- new_trace.backtrack())) {
- // Newline means \n, \r, 0x2028 or 0x2029.
- if (!compiler->ascii()) {
- assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
- }
- assembler->CheckCharacter('\n', &ok);
- assembler->CheckNotCharacter('\r', new_trace.backtrack());
- }
- assembler->Bind(&ok);
- on_success->Emit(compiler, &new_trace);
-}
-
-
-// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
-void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Trace::TriBool next_is_word_character = Trace::UNKNOWN;
- bool not_at_start = (trace->at_start() == Trace::FALSE);
- BoyerMooreLookahead* lookahead = bm_info(not_at_start);
- if (lookahead == NULL) {
- int eats_at_least =
- Min(kMaxLookaheadForBoyerMoore, EatsAtLeast(kMaxLookaheadForBoyerMoore,
- kRecursionBudget,
- not_at_start));
- if (eats_at_least >= 1) {
- BoyerMooreLookahead* bm =
- new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
- FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
- if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
- if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
- }
- } else {
- if (lookahead->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
- if (lookahead->at(0)->is_word()) next_is_word_character = Trace::TRUE;
- }
- bool at_boundary = (type_ == AssertionNode::AT_BOUNDARY);
- if (next_is_word_character == Trace::UNKNOWN) {
- Label before_non_word;
- Label before_word;
- if (trace->characters_preloaded() != 1) {
- assembler->LoadCurrentCharacter(trace->cp_offset(), &before_non_word);
- }
- // Fall through on non-word.
- EmitWordCheck(assembler, &before_word, &before_non_word, false);
- // Next character is not a word character.
- assembler->Bind(&before_non_word);
- Label ok;
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
- assembler->GoTo(&ok);
-
- assembler->Bind(&before_word);
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
- assembler->Bind(&ok);
- } else if (next_is_word_character == Trace::TRUE) {
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
- } else {
- ASSERT(next_is_word_character == Trace::FALSE);
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
- }
-}
-
-
-void AssertionNode::BacktrackIfPrevious(
- RegExpCompiler* compiler,
- Trace* trace,
- AssertionNode::IfPrevious backtrack_if_previous) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Trace new_trace(*trace);
- new_trace.InvalidateCurrentCharacter();
-
- Label fall_through, dummy;
-
- Label* non_word = backtrack_if_previous == kIsNonWord ?
- new_trace.backtrack() :
- &fall_through;
- Label* word = backtrack_if_previous == kIsNonWord ?
- &fall_through :
- new_trace.backtrack();
-
- if (new_trace.cp_offset() == 0) {
- // The start of input counts as a non-word character, so the question is
- // decided if we are at the start.
- assembler->CheckAtStart(non_word);
- }
- // We already checked that we are not at the start of input so it must be
- // OK to load the previous character.
- assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1, &dummy, false);
- EmitWordCheck(assembler, word, non_word, backtrack_if_previous == kIsNonWord);
-
- assembler->Bind(&fall_through);
- on_success()->Emit(compiler, &new_trace);
-}
-
-
-void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start) {
- if (type_ == AT_START && not_at_start) {
- details->set_cannot_match();
- return;
- }
- return on_success()->GetQuickCheckDetails(details,
- compiler,
- filled_in,
- not_at_start);
-}
-
-
-void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- switch (type_) {
- case AT_END: {
- Label ok;
- assembler->CheckPosition(trace->cp_offset(), &ok);
- assembler->GoTo(trace->backtrack());
- assembler->Bind(&ok);
- break;
- }
- case AT_START: {
- if (trace->at_start() == Trace::FALSE) {
- assembler->GoTo(trace->backtrack());
- return;
- }
- if (trace->at_start() == Trace::UNKNOWN) {
- assembler->CheckNotAtStart(trace->backtrack());
- Trace at_start_trace = *trace;
- at_start_trace.set_at_start(true);
- on_success()->Emit(compiler, &at_start_trace);
- return;
- }
- }
- break;
- case AFTER_NEWLINE:
- EmitHat(compiler, on_success(), trace);
- return;
- case AT_BOUNDARY:
- case AT_NON_BOUNDARY: {
- EmitBoundaryCheck(compiler, trace);
- return;
- }
- }
- on_success()->Emit(compiler, trace);
-}
-
-
-static bool DeterminedAlready(QuickCheckDetails* quick_check, int offset) {
- if (quick_check == NULL) return false;
- if (offset >= quick_check->characters()) return false;
- return quick_check->positions(offset)->determines_perfectly;
-}
-
-
-static void UpdateBoundsCheck(int index, int* checked_up_to) {
- if (index > *checked_up_to) {
- *checked_up_to = index;
- }
-}
-
-
-// We call this repeatedly to generate code for each pass over the text node.
-// The passes are in increasing order of difficulty because we hope one
-// of the first passes will fail in which case we are saved the work of the
-// later passes. for example for the case independent regexp /%[asdfghjkl]a/
-// we will check the '%' in the first pass, the case independent 'a' in the
-// second pass and the character class in the last pass.
-//
-// The passes are done from right to left, so for example to test for /bar/
-// we will first test for an 'r' with offset 2, then an 'a' with offset 1
-// and then a 'b' with offset 0. This means we can avoid the end-of-input
-// bounds check most of the time. In the example we only need to check for
-// end-of-input when loading the putative 'r'.
-//
-// A slight complication involves the fact that the first character may already
-// be fetched into a register by the previous node. In this case we want to
-// do the test for that character first. We do this in separate passes. The
-// 'preloaded' argument indicates that we are doing such a 'pass'. If such a
-// pass has been performed then subsequent passes will have true in
-// first_element_checked to indicate that that character does not need to be
-// checked again.
-//
-// In addition to all this we are passed a Trace, which can
-// contain an AlternativeGeneration object. In this AlternativeGeneration
-// object we can see details of any quick check that was already passed in
-// order to get to the code we are now generating. The quick check can involve
-// loading characters, which means we do not need to recheck the bounds
-// up to the limit the quick check already checked. In addition the quick
-// check can have involved a mask and compare operation which may simplify
-// or obviate the need for further checks at some character positions.
-void TextNode::TextEmitPass(RegExpCompiler* compiler,
- TextEmitPassType pass,
- bool preloaded,
- Trace* trace,
- bool first_element_checked,
- int* checked_up_to) {
- Isolate* isolate = Isolate::Current();
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- bool ascii = compiler->ascii();
- Label* backtrack = trace->backtrack();
- QuickCheckDetails* quick_check = trace->quick_check_performed();
- int element_count = elms_->length();
- for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
- TextElement elm = elms_->at(i);
- int cp_offset = trace->cp_offset() + elm.cp_offset;
- if (elm.type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
- for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
- if (first_element_checked && i == 0 && j == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset + j)) continue;
- EmitCharacterFunction* emit_function = NULL;
- switch (pass) {
- case NON_ASCII_MATCH:
- ASSERT(ascii);
- if (quarks[j] > String::kMaxOneByteCharCode) {
- assembler->GoTo(backtrack);
- return;
- }
- break;
- case NON_LETTER_CHARACTER_MATCH:
- emit_function = &EmitAtomNonLetter;
- break;
- case SIMPLE_CHARACTER_MATCH:
- emit_function = &EmitSimpleCharacter;
- break;
- case CASE_CHARACTER_MATCH:
- emit_function = &EmitAtomLetter;
- break;
- default:
- break;
- }
- if (emit_function != NULL) {
- bool bound_checked = emit_function(isolate,
- compiler,
- quarks[j],
- backtrack,
- cp_offset + j,
- *checked_up_to < cp_offset + j,
- preloaded);
- if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
- }
- }
- } else {
- ASSERT_EQ(elm.type, TextElement::CHAR_CLASS);
- if (pass == CHARACTER_CLASS_MATCH) {
- if (first_element_checked && i == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset)) continue;
- RegExpCharacterClass* cc = elm.data.u_char_class;
- EmitCharClass(assembler,
- cc,
- ascii,
- backtrack,
- cp_offset,
- *checked_up_to < cp_offset,
- preloaded,
- zone());
- UpdateBoundsCheck(cp_offset, checked_up_to);
- }
- }
- }
-}
-
-
-int TextNode::Length() {
- TextElement elm = elms_->last();
- ASSERT(elm.cp_offset >= 0);
- if (elm.type == TextElement::ATOM) {
- return elm.cp_offset + elm.data.u_atom->data().length();
- } else {
- return elm.cp_offset + 1;
- }
-}
-
-
-bool TextNode::SkipPass(int int_pass, bool ignore_case) {
- TextEmitPassType pass = static_cast<TextEmitPassType>(int_pass);
- if (ignore_case) {
- return pass == SIMPLE_CHARACTER_MATCH;
- } else {
- return pass == NON_LETTER_CHARACTER_MATCH || pass == CASE_CHARACTER_MATCH;
- }
-}
-
-
-// This generates the code to match a text node. A text node can contain
-// straight character sequences (possibly to be matched in a case-independent
-// way) and character classes. For efficiency we do not do this in a single
-// pass from left to right. Instead we pass over the text node several times,
-// emitting code for some character positions every time. See the comment on
-// TextEmitPass for details.
-void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- ASSERT(limit_result == CONTINUE);
-
- if (trace->cp_offset() + Length() > RegExpMacroAssembler::kMaxCPOffset) {
- compiler->SetRegExpTooBig();
- return;
- }
-
- if (compiler->ascii()) {
- int dummy = 0;
- TextEmitPass(compiler, NON_ASCII_MATCH, false, trace, false, &dummy);
- }
-
- bool first_elt_done = false;
- int bound_checked_to = trace->cp_offset() - 1;
- bound_checked_to += trace->bound_checked_up_to();
-
- // If a character is preloaded into the current character register then
- // check that now.
- if (trace->characters_preloaded() == 1) {
- for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- if (!SkipPass(pass, compiler->ignore_case())) {
- TextEmitPass(compiler,
- static_cast<TextEmitPassType>(pass),
- true,
- trace,
- false,
- &bound_checked_to);
- }
- }
- first_elt_done = true;
- }
-
- for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- if (!SkipPass(pass, compiler->ignore_case())) {
- TextEmitPass(compiler,
- static_cast<TextEmitPassType>(pass),
- false,
- trace,
- first_elt_done,
- &bound_checked_to);
- }
- }
-
- Trace successor_trace(*trace);
- successor_trace.set_at_start(false);
- successor_trace.AdvanceCurrentPositionInTrace(Length(), compiler);
- RecursionCheck rc(compiler);
- on_success()->Emit(compiler, &successor_trace);
-}
-
-
-void Trace::InvalidateCurrentCharacter() {
- characters_preloaded_ = 0;
-}
-
-
-void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
- ASSERT(by > 0);
- // We don't have an instruction for shifting the current character register
- // down or for using a shifted value for anything so lets just forget that
- // we preloaded any characters into it.
- characters_preloaded_ = 0;
- // Adjust the offsets of the quick check performed information. This
- // information is used to find out what we already determined about the
- // characters by means of mask and compare.
- quick_check_performed_.Advance(by, compiler->ascii());
- cp_offset_ += by;
- if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
- compiler->SetRegExpTooBig();
- cp_offset_ = 0;
- }
- bound_checked_up_to_ = Max(0, bound_checked_up_to_ - by);
-}
-
-
-void TextNode::MakeCaseIndependent(bool is_ascii) {
- int element_count = elms_->length();
- for (int i = 0; i < element_count; i++) {
- TextElement elm = elms_->at(i);
- if (elm.type == TextElement::CHAR_CLASS) {
- RegExpCharacterClass* cc = elm.data.u_char_class;
- // None of the standard character classes is different in the case
- // independent case and it slows us down if we don't know that.
- if (cc->is_standard(zone())) continue;
- ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- int range_count = ranges->length();
- for (int j = 0; j < range_count; j++) {
- ranges->at(j).AddCaseEquivalents(ranges, is_ascii, zone());
- }
- }
- }
-}
-
-
-int TextNode::GreedyLoopTextLength() {
- TextElement elm = elms_->at(elms_->length() - 1);
- if (elm.type == TextElement::CHAR_CLASS) {
- return elm.cp_offset + 1;
- } else {
- return elm.cp_offset + elm.data.u_atom->data().length();
- }
-}
-
-
-RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler) {
- if (elms_->length() != 1) return NULL;
- TextElement elm = elms_->at(0);
- if (elm.type != TextElement::CHAR_CLASS) return NULL;
- RegExpCharacterClass* node = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = node->ranges(zone());
- if (!CharacterRange::IsCanonical(ranges)) {
- CharacterRange::Canonicalize(ranges);
- }
- if (node->is_negated()) {
- return ranges->length() == 0 ? on_success() : NULL;
- }
- if (ranges->length() != 1) return NULL;
- uint32_t max_char;
- if (compiler->ascii()) {
- max_char = String::kMaxOneByteCharCode;
- } else {
- max_char = String::kMaxUtf16CodeUnit;
- }
- return ranges->at(0).IsEverything(max_char) ? on_success() : NULL;
-}
-
-
-// Finds the fixed match length of a sequence of nodes that goes from
-// this alternative and back to this choice node. If there are variable
-// length nodes or other complications in the way then return a sentinel
-// value indicating that a greedy loop cannot be constructed.
-int ChoiceNode::GreedyLoopTextLengthForAlternative(
- GuardedAlternative* alternative) {
- int length = 0;
- RegExpNode* node = alternative->node();
- // Later we will generate code for all these text nodes using recursion
- // so we have to limit the max number.
- int recursion_depth = 0;
- while (node != this) {
- if (recursion_depth++ > RegExpCompiler::kMaxRecursion) {
- return kNodeIsTooComplexForGreedyLoops;
- }
- int node_length = node->GreedyLoopTextLength();
- if (node_length == kNodeIsTooComplexForGreedyLoops) {
- return kNodeIsTooComplexForGreedyLoops;
- }
- length += node_length;
- SeqRegExpNode* seq_node = static_cast<SeqRegExpNode*>(node);
- node = seq_node->on_success();
- }
- return length;
-}
-
-
-void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) {
- ASSERT_EQ(loop_node_, NULL);
- AddAlternative(alt);
- loop_node_ = alt.node();
-}
-
-
-void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
- ASSERT_EQ(continue_node_, NULL);
- AddAlternative(alt);
- continue_node_ = alt.node();
-}
-
-
-void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- if (trace->stop_node() == this) {
- int text_length =
- GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
- ASSERT(text_length != kNodeIsTooComplexForGreedyLoops);
- // Update the counter-based backtracking info on the stack. This is an
- // optimization for greedy loops (see below).
- ASSERT(trace->cp_offset() == text_length);
- macro_assembler->AdvanceCurrentPosition(text_length);
- macro_assembler->GoTo(trace->loop_label());
- return;
- }
- ASSERT(trace->stop_node() == NULL);
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- ChoiceNode::Emit(compiler, trace);
-}
-
-
-int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
- int eats_at_least) {
- int preload_characters = Min(4, eats_at_least);
- if (compiler->macro_assembler()->CanReadUnaligned()) {
- bool ascii = compiler->ascii();
- if (ascii) {
- if (preload_characters > 4) preload_characters = 4;
- // We can't preload 3 characters because there is no machine instruction
- // to do that. We can't just load 4 because we could be reading
- // beyond the end of the string, which could cause a memory fault.
- if (preload_characters == 3) preload_characters = 2;
- } else {
- if (preload_characters > 2) preload_characters = 2;
- }
- } else {
- if (preload_characters > 1) preload_characters = 1;
- }
- return preload_characters;
-}
-
-
-// This class is used when generating the alternatives in a choice node. It
-// records the way the alternative is being code generated.
-class AlternativeGeneration: public Malloced {
- public:
- AlternativeGeneration()
- : possible_success(),
- expects_preload(false),
- after(),
- quick_check_details() { }
- Label possible_success;
- bool expects_preload;
- Label after;
- QuickCheckDetails quick_check_details;
-};
-
-
-// Creates a list of AlternativeGenerations. If the list has a reasonable
-// size then it is on the stack, otherwise the excess is on the heap.
-class AlternativeGenerationList {
- public:
- AlternativeGenerationList(int count, Zone* zone)
- : alt_gens_(count, zone) {
- for (int i = 0; i < count && i < kAFew; i++) {
- alt_gens_.Add(a_few_alt_gens_ + i, zone);
- }
- for (int i = kAFew; i < count; i++) {
- alt_gens_.Add(new AlternativeGeneration(), zone);
- }
- }
- ~AlternativeGenerationList() {
- for (int i = kAFew; i < alt_gens_.length(); i++) {
- delete alt_gens_[i];
- alt_gens_[i] = NULL;
- }
- }
-
- AlternativeGeneration* at(int i) {
- return alt_gens_[i];
- }
-
- private:
- static const int kAFew = 10;
- ZoneList<AlternativeGeneration*> alt_gens_;
- AlternativeGeneration a_few_alt_gens_[kAFew];
-};
-
-
-// The '2' variant is has inclusive from and exclusive to.
-static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0,
- 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B, 0x2028, 0x202A,
- 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, 0x10000 };
-static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
-
-static const int kWordRanges[] = {
- '0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, 0x10000 };
-static const int kWordRangeCount = ARRAY_SIZE(kWordRanges);
-static const int kDigitRanges[] = { '0', '9' + 1, 0x10000 };
-static const int kDigitRangeCount = ARRAY_SIZE(kDigitRanges);
-static const int kSurrogateRanges[] = { 0xd800, 0xe000, 0x10000 };
-static const int kSurrogateRangeCount = ARRAY_SIZE(kSurrogateRanges);
-static const int kLineTerminatorRanges[] = { 0x000A, 0x000B, 0x000D, 0x000E,
- 0x2028, 0x202A, 0x10000 };
-static const int kLineTerminatorRangeCount = ARRAY_SIZE(kLineTerminatorRanges);
-
-
-void BoyerMoorePositionInfo::Set(int character) {
- SetInterval(Interval(character, character));
-}
-
-
-void BoyerMoorePositionInfo::SetInterval(const Interval& interval) {
- s_ = AddRange(s_, kSpaceRanges, kSpaceRangeCount, interval);
- w_ = AddRange(w_, kWordRanges, kWordRangeCount, interval);
- d_ = AddRange(d_, kDigitRanges, kDigitRangeCount, interval);
- surrogate_ =
- AddRange(surrogate_, kSurrogateRanges, kSurrogateRangeCount, interval);
- if (interval.to() - interval.from() >= kMapSize - 1) {
- if (map_count_ != kMapSize) {
- map_count_ = kMapSize;
- for (int i = 0; i < kMapSize; i++) map_->at(i) = true;
- }
- return;
- }
- for (int i = interval.from(); i <= interval.to(); i++) {
- int mod_character = (i & kMask);
- if (!map_->at(mod_character)) {
- map_count_++;
- map_->at(mod_character) = true;
- }
- if (map_count_ == kMapSize) return;
- }
-}
-
-
-void BoyerMoorePositionInfo::SetAll() {
- s_ = w_ = d_ = kLatticeUnknown;
- if (map_count_ != kMapSize) {
- map_count_ = kMapSize;
- for (int i = 0; i < kMapSize; i++) map_->at(i) = true;
- }
-}
-
-
-BoyerMooreLookahead::BoyerMooreLookahead(
- int length, RegExpCompiler* compiler, Zone* zone)
- : length_(length),
- compiler_(compiler) {
- if (compiler->ascii()) {
- max_char_ = String::kMaxOneByteCharCode;
- } else {
- max_char_ = String::kMaxUtf16CodeUnit;
- }
- bitmaps_ = new(zone) ZoneList<BoyerMoorePositionInfo*>(length, zone);
- for (int i = 0; i < length; i++) {
- bitmaps_->Add(new(zone) BoyerMoorePositionInfo(zone), zone);
- }
-}
-
-
-// Find the longest range of lookahead that has the fewest number of different
-// characters that can occur at a given position. Since we are optimizing two
-// different parameters at once this is a tradeoff.
-bool BoyerMooreLookahead::FindWorthwhileInterval(int* from, int* to) {
- int biggest_points = 0;
- // If more than 32 characters out of 128 can occur it is unlikely that we can
- // be lucky enough to step forwards much of the time.
- const int kMaxMax = 32;
- for (int max_number_of_chars = 4;
- max_number_of_chars < kMaxMax;
- max_number_of_chars *= 2) {
- biggest_points =
- FindBestInterval(max_number_of_chars, biggest_points, from, to);
- }
- if (biggest_points == 0) return false;
- return true;
-}
-
-
-// Find the highest-points range between 0 and length_ where the character
-// information is not too vague. 'Too vague' means that there are more than
-// max_number_of_chars that can occur at this position. Calculates the number
-// of points as the product of width-of-the-range and
-// probability-of-finding-one-of-the-characters, where the probability is
-// calculated using the frequency distribution of the sample subject string.
-int BoyerMooreLookahead::FindBestInterval(
- int max_number_of_chars, int old_biggest_points, int* from, int* to) {
- int biggest_points = old_biggest_points;
- static const int kSize = RegExpMacroAssembler::kTableSize;
- for (int i = 0; i < length_; ) {
- while (i < length_ && Count(i) > max_number_of_chars) i++;
- if (i == length_) break;
- int remembered_from = i;
- bool union_map[kSize];
- for (int j = 0; j < kSize; j++) union_map[j] = false;
- while (i < length_ && Count(i) <= max_number_of_chars) {
- BoyerMoorePositionInfo* map = bitmaps_->at(i);
- for (int j = 0; j < kSize; j++) union_map[j] |= map->at(j);
- i++;
- }
- int frequency = 0;
- for (int j = 0; j < kSize; j++) {
- if (union_map[j]) {
- // Add 1 to the frequency to give a small per-character boost for
- // the cases where our sampling is not good enough and many
- // characters have a frequency of zero. This means the frequency
- // can theoretically be up to 2*kSize though we treat it mostly as
- // a fraction of kSize.
- frequency += compiler_->frequency_collator()->Frequency(j) + 1;
- }
- }
- // We use the probability of skipping times the distance we are skipping to
- // judge the effectiveness of this. Actually we have a cut-off: By
- // dividing by 2 we switch off the skipping if the probability of skipping
- // is less than 50%. This is because the multibyte mask-and-compare
- // skipping in quickcheck is more likely to do well on this case.
- bool in_quickcheck_range = ((i - remembered_from < 4) ||
- (compiler_->ascii() ? remembered_from <= 4 : remembered_from <= 2));
- // Called 'probability' but it is only a rough estimate and can actually
- // be outside the 0-kSize range.
- int probability = (in_quickcheck_range ? kSize / 2 : kSize) - frequency;
- int points = (i - remembered_from) * probability;
- if (points > biggest_points) {
- *from = remembered_from;
- *to = i - 1;
- biggest_points = points;
- }
- }
- return biggest_points;
-}
-
-
-// Take all the characters that will not prevent a successful match if they
-// occur in the subject string in the range between min_lookahead and
-// max_lookahead (inclusive) measured from the current position. If the
-// character at max_lookahead offset is not one of these characters, then we
-// can safely skip forwards by the number of characters in the range.
-int BoyerMooreLookahead::GetSkipTable(int min_lookahead,
- int max_lookahead,
- Handle<ByteArray> boolean_skip_table) {
- const int kSize = RegExpMacroAssembler::kTableSize;
-
- const int kSkipArrayEntry = 0;
- const int kDontSkipArrayEntry = 1;
-
- for (int i = 0; i < kSize; i++) {
- boolean_skip_table->set(i, kSkipArrayEntry);
- }
- int skip = max_lookahead + 1 - min_lookahead;
-
- for (int i = max_lookahead; i >= min_lookahead; i--) {
- BoyerMoorePositionInfo* map = bitmaps_->at(i);
- for (int j = 0; j < kSize; j++) {
- if (map->at(j)) {
- boolean_skip_table->set(j, kDontSkipArrayEntry);
- }
- }
- }
-
- return skip;
-}
-
-
-// See comment above on the implementation of GetSkipTable.
-bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
- const int kSize = RegExpMacroAssembler::kTableSize;
-
- int min_lookahead = 0;
- int max_lookahead = 0;
-
- if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return false;
-
- bool found_single_character = false;
- int single_character = 0;
- for (int i = max_lookahead; i >= min_lookahead; i--) {
- BoyerMoorePositionInfo* map = bitmaps_->at(i);
- if (map->map_count() > 1 ||
- (found_single_character && map->map_count() != 0)) {
- found_single_character = false;
- break;
- }
- for (int j = 0; j < kSize; j++) {
- if (map->at(j)) {
- found_single_character = true;
- single_character = j;
- break;
- }
- }
- }
-
- int lookahead_width = max_lookahead + 1 - min_lookahead;
-
- if (found_single_character && lookahead_width == 1 && max_lookahead < 3) {
- // The mask-compare can probably handle this better.
- return false;
- }
-
- if (found_single_character) {
- Label cont, again;
- masm->Bind(&again);
- masm->LoadCurrentCharacter(max_lookahead, &cont, true);
- if (max_char_ > kSize) {
- masm->CheckCharacterAfterAnd(single_character,
- RegExpMacroAssembler::kTableMask,
- &cont);
- } else {
- masm->CheckCharacter(single_character, &cont);
- }
- masm->AdvanceCurrentPosition(lookahead_width);
- masm->GoTo(&again);
- masm->Bind(&cont);
- return true;
- }
-
- Handle<ByteArray> boolean_skip_table =
- FACTORY->NewByteArray(kSize, TENURED);
- int skip_distance = GetSkipTable(
- min_lookahead, max_lookahead, boolean_skip_table);
- ASSERT(skip_distance != 0);
-
- Label cont, again;
- masm->Bind(&again);
- masm->LoadCurrentCharacter(max_lookahead, &cont, true);
- masm->CheckBitInTable(boolean_skip_table, &cont);
- masm->AdvanceCurrentPosition(skip_distance);
- masm->GoTo(&again);
- masm->Bind(&cont);
-
- return true;
-}
-
-
-/* Code generation for choice nodes.
- *
- * We generate quick checks that do a mask and compare to eliminate a
- * choice. If the quick check succeeds then it jumps to the continuation to
- * do slow checks and check subsequent nodes. If it fails (the common case)
- * it falls through to the next choice.
- *
- * Here is the desired flow graph. Nodes directly below each other imply
- * fallthrough. Alternatives 1 and 2 have quick checks. Alternative
- * 3 doesn't have a quick check so we have to call the slow check.
- * Nodes are marked Qn for quick checks and Sn for slow checks. The entire
- * regexp continuation is generated directly after the Sn node, up to the
- * next GoTo if we decide to reuse some already generated code. Some
- * nodes expect preload_characters to be preloaded into the current
- * character register. R nodes do this preloading. Vertices are marked
- * F for failures and S for success (possible success in the case of quick
- * nodes). L, V, < and > are used as arrow heads.
- *
- * ----------> R
- * |
- * V
- * Q1 -----> S1
- * | S /
- * F| /
- * | F/
- * | /
- * | R
- * | /
- * V L
- * Q2 -----> S2
- * | S /
- * F| /
- * | F/
- * | /
- * | R
- * | /
- * V L
- * S3
- * |
- * F|
- * |
- * R
- * |
- * backtrack V
- * <----------Q4
- * \ F |
- * \ |S
- * \ F V
- * \-----S4
- *
- * For greedy loops we reverse our expectation and expect to match rather
- * than fail. Therefore we want the loop code to look like this (U is the
- * unwind code that steps back in the greedy loop). The following alternatives
- * look the same as above.
- * _____
- * / \
- * V |
- * ----------> S1 |
- * /| |
- * / |S |
- * F/ \_____/
- * /
- * |<-----------
- * | \
- * V \
- * Q2 ---> S2 \
- * | S / |
- * F| / |
- * | F/ |
- * | / |
- * | R |
- * | / |
- * F VL |
- * <------U |
- * back |S |
- * \______________/
- */
-
-void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- int choice_count = alternatives_->length();
-#ifdef DEBUG
- for (int i = 0; i < choice_count - 1; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == NULL) ? 0 : guards->length();
- for (int j = 0; j < guard_count; j++) {
- ASSERT(!trace->mentions_reg(guards->at(j)->reg()));
- }
- }
-#endif
-
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- ASSERT(limit_result == CONTINUE);
-
- int new_flush_budget = trace->flush_budget() / choice_count;
- if (trace->flush_budget() == 0 && trace->actions() != NULL) {
- trace->Flush(compiler, this);
- return;
- }
-
- RecursionCheck rc(compiler);
-
- Trace* current_trace = trace;
-
- int text_length = GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
- bool greedy_loop = false;
- Label greedy_loop_label;
- Trace counter_backtrack_trace;
- counter_backtrack_trace.set_backtrack(&greedy_loop_label);
- if (not_at_start()) counter_backtrack_trace.set_at_start(false);
-
- if (choice_count > 1 && text_length != kNodeIsTooComplexForGreedyLoops) {
- // Here we have special handling for greedy loops containing only text nodes
- // and other simple nodes. These are handled by pushing the current
- // position on the stack and then incrementing the current position each
- // time around the switch. On backtrack we decrement the current position
- // and check it against the pushed value. This avoids pushing backtrack
- // information for each iteration of the loop, which could take up a lot of
- // space.
- greedy_loop = true;
- ASSERT(trace->stop_node() == NULL);
- macro_assembler->PushCurrentPosition();
- current_trace = &counter_backtrack_trace;
- Label greedy_match_failed;
- Trace greedy_match_trace;
- if (not_at_start()) greedy_match_trace.set_at_start(false);
- greedy_match_trace.set_backtrack(&greedy_match_failed);
- Label loop_label;
- macro_assembler->Bind(&loop_label);
- greedy_match_trace.set_stop_node(this);
- greedy_match_trace.set_loop_label(&loop_label);
- alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
- macro_assembler->Bind(&greedy_match_failed);
- }
-
- Label second_choice; // For use in greedy matches.
- macro_assembler->Bind(&second_choice);
-
- int first_normal_choice = greedy_loop ? 1 : 0;
-
- bool not_at_start = current_trace->at_start() == Trace::FALSE;
- const int kEatsAtLeastNotYetInitialized = -1;
- int eats_at_least = kEatsAtLeastNotYetInitialized;
-
- bool skip_was_emitted = false;
-
- if (!greedy_loop && choice_count == 2) {
- GuardedAlternative alt1 = alternatives_->at(1);
- if (alt1.guards() == NULL || alt1.guards()->length() == 0) {
- RegExpNode* eats_anything_node = alt1.node();
- if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) ==
- this) {
- // At this point we know that we are at a non-greedy loop that will eat
- // any character one at a time. Any non-anchored regexp has such a
- // loop prepended to it in order to find where it starts. We look for
- // a pattern of the form ...abc... where we can look 6 characters ahead
- // and step forwards 3 if the character is not one of abc. Abc need
- // not be atoms, they can be any reasonably limited character class or
- // small alternation.
- ASSERT(trace->is_trivial()); // This is the case on LoopChoiceNodes.
- BoyerMooreLookahead* lookahead = bm_info(not_at_start);
- if (lookahead == NULL) {
- eats_at_least = Min(kMaxLookaheadForBoyerMoore,
- EatsAtLeast(kMaxLookaheadForBoyerMoore,
- kRecursionBudget,
- not_at_start));
- if (eats_at_least >= 1) {
- BoyerMooreLookahead* bm =
- new(zone()) BoyerMooreLookahead(eats_at_least,
- compiler,
- zone());
- GuardedAlternative alt0 = alternatives_->at(0);
- alt0.node()->FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
- skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
- }
- } else {
- skip_was_emitted = lookahead->EmitSkipInstructions(macro_assembler);
- }
- }
- }
- }
-
- if (eats_at_least == kEatsAtLeastNotYetInitialized) {
- // Save some time by looking at most one machine word ahead.
- eats_at_least =
- EatsAtLeast(compiler->ascii() ? 4 : 2, kRecursionBudget, not_at_start);
- }
- int preload_characters = CalculatePreloadCharacters(compiler, eats_at_least);
-
- bool preload_is_current = !skip_was_emitted &&
- (current_trace->characters_preloaded() == preload_characters);
- bool preload_has_checked_bounds = preload_is_current;
-
- AlternativeGenerationList alt_gens(choice_count, zone());
-
- // For now we just call all choices one after the other. The idea ultimately
- // is to use the Dispatch table to try only the relevant ones.
- for (int i = first_normal_choice; i < choice_count; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- AlternativeGeneration* alt_gen = alt_gens.at(i);
- alt_gen->quick_check_details.set_characters(preload_characters);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == NULL) ? 0 : guards->length();
- Trace new_trace(*current_trace);
- new_trace.set_characters_preloaded(preload_is_current ?
- preload_characters :
- 0);
- if (preload_has_checked_bounds) {
- new_trace.set_bound_checked_up_to(preload_characters);
- }
- new_trace.quick_check_performed()->Clear();
- if (not_at_start_) new_trace.set_at_start(Trace::FALSE);
- alt_gen->expects_preload = preload_is_current;
- bool generate_full_check_inline = false;
- if (FLAG_regexp_optimization &&
- try_to_emit_quick_check_for_alternative(i) &&
- alternative.node()->EmitQuickCheck(compiler,
- &new_trace,
- preload_has_checked_bounds,
- &alt_gen->possible_success,
- &alt_gen->quick_check_details,
- i < choice_count - 1)) {
- // Quick check was generated for this choice.
- preload_is_current = true;
- preload_has_checked_bounds = true;
- // On the last choice in the ChoiceNode we generated the quick
- // check to fall through on possible success. So now we need to
- // generate the full check inline.
- if (i == choice_count - 1) {
- macro_assembler->Bind(&alt_gen->possible_success);
- new_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- new_trace.set_characters_preloaded(preload_characters);
- new_trace.set_bound_checked_up_to(preload_characters);
- generate_full_check_inline = true;
- }
- } else if (alt_gen->quick_check_details.cannot_match()) {
- if (i == choice_count - 1 && !greedy_loop) {
- macro_assembler->GoTo(trace->backtrack());
- }
- continue;
- } else {
- // No quick check was generated. Put the full code here.
- // If this is not the first choice then there could be slow checks from
- // previous cases that go here when they fail. There's no reason to
- // insist that they preload characters since the slow check we are about
- // to generate probably can't use it.
- if (i != first_normal_choice) {
- alt_gen->expects_preload = false;
- new_trace.InvalidateCurrentCharacter();
- }
- if (i < choice_count - 1) {
- new_trace.set_backtrack(&alt_gen->after);
- }
- generate_full_check_inline = true;
- }
- if (generate_full_check_inline) {
- if (new_trace.actions() != NULL) {
- new_trace.set_flush_budget(new_flush_budget);
- }
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &new_trace);
- }
- alternative.node()->Emit(compiler, &new_trace);
- preload_is_current = false;
- }
- macro_assembler->Bind(&alt_gen->after);
- }
- if (greedy_loop) {
- macro_assembler->Bind(&greedy_loop_label);
- // If we have unwound to the bottom then backtrack.
- macro_assembler->CheckGreedyLoop(trace->backtrack());
- // Otherwise try the second priority at an earlier position.
- macro_assembler->AdvanceCurrentPosition(-text_length);
- macro_assembler->GoTo(&second_choice);
- }
-
- // At this point we need to generate slow checks for the alternatives where
- // the quick check was inlined. We can recognize these because the associated
- // label was bound.
- for (int i = first_normal_choice; i < choice_count - 1; i++) {
- AlternativeGeneration* alt_gen = alt_gens.at(i);
- Trace new_trace(*current_trace);
- // If there are actions to be flushed we have to limit how many times
- // they are flushed. Take the budget of the parent trace and distribute
- // it fairly amongst the children.
- if (new_trace.actions() != NULL) {
- new_trace.set_flush_budget(new_flush_budget);
- }
- EmitOutOfLineContinuation(compiler,
- &new_trace,
- alternatives_->at(i),
- alt_gen,
- preload_characters,
- alt_gens.at(i + 1)->expects_preload);
- }
-}
-
-
-void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
- Trace* trace,
- GuardedAlternative alternative,
- AlternativeGeneration* alt_gen,
- int preload_characters,
- bool next_expects_preload) {
- if (!alt_gen->possible_success.is_linked()) return;
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- macro_assembler->Bind(&alt_gen->possible_success);
- Trace out_of_line_trace(*trace);
- out_of_line_trace.set_characters_preloaded(preload_characters);
- out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == NULL) ? 0 : guards->length();
- if (next_expects_preload) {
- Label reload_current_char;
- out_of_line_trace.set_backtrack(&reload_current_char);
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
- }
- alternative.node()->Emit(compiler, &out_of_line_trace);
- macro_assembler->Bind(&reload_current_char);
- // Reload the current character, since the next quick check expects that.
- // We don't need to check bounds here because we only get into this
- // code through a quick check which already did the checked load.
- macro_assembler->LoadCurrentCharacter(trace->cp_offset(),
- NULL,
- false,
- preload_characters);
- macro_assembler->GoTo(&(alt_gen->after));
- } else {
- out_of_line_trace.set_backtrack(&(alt_gen->after));
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
- }
- alternative.node()->Emit(compiler, &out_of_line_trace);
- }
-}
-
-
-void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- ASSERT(limit_result == CONTINUE);
-
- RecursionCheck rc(compiler);
-
- switch (type_) {
- case STORE_POSITION: {
- Trace::DeferredCapture
- new_capture(data_.u_position_register.reg,
- data_.u_position_register.is_capture,
- trace);
- Trace new_trace = *trace;
- new_trace.add_action(&new_capture);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case INCREMENT_REGISTER: {
- Trace::DeferredIncrementRegister
- new_increment(data_.u_increment_register.reg);
- Trace new_trace = *trace;
- new_trace.add_action(&new_increment);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case SET_REGISTER: {
- Trace::DeferredSetRegister
- new_set(data_.u_store_register.reg, data_.u_store_register.value);
- Trace new_trace = *trace;
- new_trace.add_action(&new_set);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case CLEAR_CAPTURES: {
- Trace::DeferredClearCaptures
- new_capture(Interval(data_.u_clear_captures.range_from,
- data_.u_clear_captures.range_to));
- Trace new_trace = *trace;
- new_trace.add_action(&new_capture);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case BEGIN_SUBMATCH:
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- } else {
- assembler->WriteCurrentPositionToRegister(
- data_.u_submatch.current_position_register, 0);
- assembler->WriteStackPointerToRegister(
- data_.u_submatch.stack_pointer_register);
- on_success()->Emit(compiler, trace);
- }
- break;
- case EMPTY_MATCH_CHECK: {
- int start_pos_reg = data_.u_empty_match_check.start_register;
- int stored_pos = 0;
- int rep_reg = data_.u_empty_match_check.repetition_register;
- bool has_minimum = (rep_reg != RegExpCompiler::kNoRegister);
- bool know_dist = trace->GetStoredPosition(start_pos_reg, &stored_pos);
- if (know_dist && !has_minimum && stored_pos == trace->cp_offset()) {
- // If we know we haven't advanced and there is no minimum we
- // can just backtrack immediately.
- assembler->GoTo(trace->backtrack());
- } else if (know_dist && stored_pos < trace->cp_offset()) {
- // If we know we've advanced we can generate the continuation
- // immediately.
- on_success()->Emit(compiler, trace);
- } else if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- } else {
- Label skip_empty_check;
- // If we have a minimum number of repetitions we check the current
- // number first and skip the empty check if it's not enough.
- if (has_minimum) {
- int limit = data_.u_empty_match_check.repetition_limit;
- assembler->IfRegisterLT(rep_reg, limit, &skip_empty_check);
- }
- // If the match is empty we bail out, otherwise we fall through
- // to the on-success continuation.
- assembler->IfRegisterEqPos(data_.u_empty_match_check.start_register,
- trace->backtrack());
- assembler->Bind(&skip_empty_check);
- on_success()->Emit(compiler, trace);
- }
- break;
- }
- case POSITIVE_SUBMATCH_SUCCESS: {
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- assembler->ReadCurrentPositionFromRegister(
- data_.u_submatch.current_position_register);
- assembler->ReadStackPointerFromRegister(
- data_.u_submatch.stack_pointer_register);
- int clear_register_count = data_.u_submatch.clear_register_count;
- if (clear_register_count == 0) {
- on_success()->Emit(compiler, trace);
- return;
- }
- int clear_registers_from = data_.u_submatch.clear_register_from;
- Label clear_registers_backtrack;
- Trace new_trace = *trace;
- new_trace.set_backtrack(&clear_registers_backtrack);
- on_success()->Emit(compiler, &new_trace);
-
- assembler->Bind(&clear_registers_backtrack);
- int clear_registers_to = clear_registers_from + clear_register_count - 1;
- assembler->ClearRegisters(clear_registers_from, clear_registers_to);
-
- ASSERT(trace->backtrack() == NULL);
- assembler->Backtrack();
- return;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
-
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- ASSERT(limit_result == CONTINUE);
-
- RecursionCheck rc(compiler);
-
- ASSERT_EQ(start_reg_ + 1, end_reg_);
- if (compiler->ignore_case()) {
- assembler->CheckNotBackReferenceIgnoreCase(start_reg_,
- trace->backtrack());
- } else {
- assembler->CheckNotBackReference(start_reg_, trace->backtrack());
- }
- on_success()->Emit(compiler, trace);
-}
-
-
-// -------------------------------------------------------------------
-// Dot/dotty output
-
-
-#ifdef DEBUG
-
-
-class DotPrinter: public NodeVisitor {
- public:
- explicit DotPrinter(bool ignore_case)
- : ignore_case_(ignore_case),
- stream_(&alloc_) { }
- void PrintNode(const char* label, RegExpNode* node);
- void Visit(RegExpNode* node);
- void PrintAttributes(RegExpNode* from);
- StringStream* stream() { return &stream_; }
- void PrintOnFailure(RegExpNode* from, RegExpNode* to);
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- private:
- bool ignore_case_;
- HeapStringAllocator alloc_;
- StringStream stream_;
-};
-
-
-void DotPrinter::PrintNode(const char* label, RegExpNode* node) {
- stream()->Add("digraph G {\n graph [label=\"");
- for (int i = 0; label[i]; i++) {
- switch (label[i]) {
- case '\\':
- stream()->Add("\\\\");
- break;
- case '"':
- stream()->Add("\"");
- break;
- default:
- stream()->Put(label[i]);
- break;
- }
- }
- stream()->Add("\"];\n");
- Visit(node);
- stream()->Add("}\n");
- printf("%s", *(stream()->ToCString()));
-}
-
-
-void DotPrinter::Visit(RegExpNode* node) {
- if (node->info()->visited) return;
- node->info()->visited = true;
- node->Accept(this);
-}
-
-
-void DotPrinter::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) {
- stream()->Add(" n%p -> n%p [style=dotted];\n", from, on_failure);
- Visit(on_failure);
-}
-
-
-class TableEntryBodyPrinter {
- public:
- TableEntryBodyPrinter(StringStream* stream, ChoiceNode* choice)
- : stream_(stream), choice_(choice) { }
- void Call(uc16 from, DispatchTable::Entry entry) {
- OutSet* out_set = entry.out_set();
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (out_set->Get(i)) {
- stream()->Add(" n%p:s%io%i -> n%p;\n",
- choice(),
- from,
- i,
- choice()->alternatives()->at(i).node());
- }
- }
- }
- private:
- StringStream* stream() { return stream_; }
- ChoiceNode* choice() { return choice_; }
- StringStream* stream_;
- ChoiceNode* choice_;
-};
-
-
-class TableEntryHeaderPrinter {
- public:
- explicit TableEntryHeaderPrinter(StringStream* stream)
- : first_(true), stream_(stream) { }
- void Call(uc16 from, DispatchTable::Entry entry) {
- if (first_) {
- first_ = false;
- } else {
- stream()->Add("|");
- }
- stream()->Add("{\\%k-\\%k|{", from, entry.to());
- OutSet* out_set = entry.out_set();
- int priority = 0;
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (out_set->Get(i)) {
- if (priority > 0) stream()->Add("|");
- stream()->Add("<s%io%i> %i", from, i, priority);
- priority++;
- }
- }
- stream()->Add("}}");
- }
-
- private:
- bool first_;
- StringStream* stream() { return stream_; }
- StringStream* stream_;
-};
-
-
-class AttributePrinter {
- public:
- explicit AttributePrinter(DotPrinter* out)
- : out_(out), first_(true) { }
- void PrintSeparator() {
- if (first_) {
- first_ = false;
- } else {
- out_->stream()->Add("|");
- }
- }
- void PrintBit(const char* name, bool value) {
- if (!value) return;
- PrintSeparator();
- out_->stream()->Add("{%s}", name);
- }
- void PrintPositive(const char* name, int value) {
- if (value < 0) return;
- PrintSeparator();
- out_->stream()->Add("{%s|%x}", name, value);
- }
- private:
- DotPrinter* out_;
- bool first_;
-};
-
-
-void DotPrinter::PrintAttributes(RegExpNode* that) {
- stream()->Add(" a%p [shape=Mrecord, color=grey, fontcolor=grey, "
- "margin=0.1, fontsize=10, label=\"{",
- that);
- AttributePrinter printer(this);
- NodeInfo* info = that->info();
- printer.PrintBit("NI", info->follows_newline_interest);
- printer.PrintBit("WI", info->follows_word_interest);
- printer.PrintBit("SI", info->follows_start_interest);
- Label* label = that->label();
- if (label->is_bound())
- printer.PrintPositive("@", label->pos());
- stream()->Add("}\"];\n");
- stream()->Add(" a%p -> n%p [style=dashed, color=grey, "
- "arrowhead=none];\n", that, that);
-}
-
-
-static const bool kPrintDispatchTable = false;
-void DotPrinter::VisitChoice(ChoiceNode* that) {
- if (kPrintDispatchTable) {
- stream()->Add(" n%p [shape=Mrecord, label=\"", that);
- TableEntryHeaderPrinter header_printer(stream());
- that->GetTable(ignore_case_)->ForEach(&header_printer);
- stream()->Add("\"]\n", that);
- PrintAttributes(that);
- TableEntryBodyPrinter body_printer(stream(), that);
- that->GetTable(ignore_case_)->ForEach(&body_printer);
- } else {
- stream()->Add(" n%p [shape=Mrecord, label=\"?\"];\n", that);
- for (int i = 0; i < that->alternatives()->length(); i++) {
- GuardedAlternative alt = that->alternatives()->at(i);
- stream()->Add(" n%p -> n%p;\n", that, alt.node());
- }
- }
- for (int i = 0; i < that->alternatives()->length(); i++) {
- GuardedAlternative alt = that->alternatives()->at(i);
- alt.node()->Accept(this);
- }
-}
-
-
-void DotPrinter::VisitText(TextNode* that) {
- Zone* zone = that->zone();
- stream()->Add(" n%p [label=\"", that);
- for (int i = 0; i < that->elements()->length(); i++) {
- if (i > 0) stream()->Add(" ");
- TextElement elm = that->elements()->at(i);
- switch (elm.type) {
- case TextElement::ATOM: {
- stream()->Add("'%w'", elm.data.u_atom->data());
- break;
- }
- case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* node = elm.data.u_char_class;
- stream()->Add("[");
- if (node->is_negated())
- stream()->Add("^");
- for (int j = 0; j < node->ranges(zone)->length(); j++) {
- CharacterRange range = node->ranges(zone)->at(j);
- stream()->Add("%k-%k", range.from(), range.to());
- }
- stream()->Add("]");
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- stream()->Add("\", shape=box, peripheries=2];\n");
- PrintAttributes(that);
- stream()->Add(" n%p -> n%p;\n", that, that->on_success());
- Visit(that->on_success());
-}
-
-
-void DotPrinter::VisitBackReference(BackReferenceNode* that) {
- stream()->Add(" n%p [label=\"$%i..$%i\", shape=doubleoctagon];\n",
- that,
- that->start_register(),
- that->end_register());
- PrintAttributes(that);
- stream()->Add(" n%p -> n%p;\n", that, that->on_success());
- Visit(that->on_success());
-}
-
-
-void DotPrinter::VisitEnd(EndNode* that) {
- stream()->Add(" n%p [style=bold, shape=point];\n", that);
- PrintAttributes(that);
-}
-
-
-void DotPrinter::VisitAssertion(AssertionNode* that) {
- stream()->Add(" n%p [", that);
- switch (that->type()) {
- case AssertionNode::AT_END:
- stream()->Add("label=\"$\", shape=septagon");
- break;
- case AssertionNode::AT_START:
- stream()->Add("label=\"^\", shape=septagon");
- break;
- case AssertionNode::AT_BOUNDARY:
- stream()->Add("label=\"\\b\", shape=septagon");
- break;
- case AssertionNode::AT_NON_BOUNDARY:
- stream()->Add("label=\"\\B\", shape=septagon");
- break;
- case AssertionNode::AFTER_NEWLINE:
- stream()->Add("label=\"(?<=\\n)\", shape=septagon");
- break;
- }
- stream()->Add("];\n");
- PrintAttributes(that);
- RegExpNode* successor = that->on_success();
- stream()->Add(" n%p -> n%p;\n", that, successor);
- Visit(successor);
-}
-
-
-void DotPrinter::VisitAction(ActionNode* that) {
- stream()->Add(" n%p [", that);
- switch (that->type_) {
- case ActionNode::SET_REGISTER:
- stream()->Add("label=\"$%i:=%i\", shape=octagon",
- that->data_.u_store_register.reg,
- that->data_.u_store_register.value);
- break;
- case ActionNode::INCREMENT_REGISTER:
- stream()->Add("label=\"$%i++\", shape=octagon",
- that->data_.u_increment_register.reg);
- break;
- case ActionNode::STORE_POSITION:
- stream()->Add("label=\"$%i:=$pos\", shape=octagon",
- that->data_.u_position_register.reg);
- break;
- case ActionNode::BEGIN_SUBMATCH:
- stream()->Add("label=\"$%i:=$pos,begin\", shape=septagon",
- that->data_.u_submatch.current_position_register);
- break;
- case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
- stream()->Add("label=\"escape\", shape=septagon");
- break;
- case ActionNode::EMPTY_MATCH_CHECK:
- stream()->Add("label=\"$%i=$pos?,$%i<%i?\", shape=septagon",
- that->data_.u_empty_match_check.start_register,
- that->data_.u_empty_match_check.repetition_register,
- that->data_.u_empty_match_check.repetition_limit);
- break;
- case ActionNode::CLEAR_CAPTURES: {
- stream()->Add("label=\"clear $%i to $%i\", shape=septagon",
- that->data_.u_clear_captures.range_from,
- that->data_.u_clear_captures.range_to);
- break;
- }
- }
- stream()->Add("];\n");
- PrintAttributes(that);
- RegExpNode* successor = that->on_success();
- stream()->Add(" n%p -> n%p;\n", that, successor);
- Visit(successor);
-}
-
-
-class DispatchTableDumper {
- public:
- explicit DispatchTableDumper(StringStream* stream) : stream_(stream) { }
- void Call(uc16 key, DispatchTable::Entry entry);
- StringStream* stream() { return stream_; }
- private:
- StringStream* stream_;
-};
-
-
-void DispatchTableDumper::Call(uc16 key, DispatchTable::Entry entry) {
- stream()->Add("[%k-%k]: {", key, entry.to());
- OutSet* set = entry.out_set();
- bool first = true;
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (set->Get(i)) {
- if (first) {
- first = false;
- } else {
- stream()->Add(", ");
- }
- stream()->Add("%i", i);
- }
- }
- stream()->Add("}\n");
-}
-
-
-void DispatchTable::Dump() {
- HeapStringAllocator alloc;
- StringStream stream(&alloc);
- DispatchTableDumper dumper(&stream);
- tree()->ForEach(&dumper);
- OS::PrintError("%s", *stream.ToCString());
-}
-
-
-void RegExpEngine::DotPrint(const char* label,
- RegExpNode* node,
- bool ignore_case) {
- DotPrinter printer(ignore_case);
- printer.PrintNode(label, node);
-}
-
-
-#endif // DEBUG
-
-
-// -------------------------------------------------------------------
-// Tree to graph conversion
-
-RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<TextElement>* elms =
- new(compiler->zone()) ZoneList<TextElement>(1, compiler->zone());
- elms->Add(TextElement::Atom(this), compiler->zone());
- return new(compiler->zone()) TextNode(elms, on_success);
-}
-
-
-RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new(compiler->zone()) TextNode(elements(), on_success);
-}
-
-
-static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
- const int* special_class,
- int length) {
- length--; // Remove final 0x10000.
- ASSERT(special_class[length] == 0x10000);
- ASSERT(ranges->length() != 0);
- ASSERT(length != 0);
- ASSERT(special_class[0] != 0);
- if (ranges->length() != (length >> 1) + 1) {
- return false;
- }
- CharacterRange range = ranges->at(0);
- if (range.from() != 0) {
- return false;
- }
- for (int i = 0; i < length; i += 2) {
- if (special_class[i] != (range.to() + 1)) {
- return false;
- }
- range = ranges->at((i >> 1) + 1);
- if (special_class[i+1] != range.from()) {
- return false;
- }
- }
- if (range.to() != 0xffff) {
- return false;
- }
- return true;
-}
-
-
-static bool CompareRanges(ZoneList<CharacterRange>* ranges,
- const int* special_class,
- int length) {
- length--; // Remove final 0x10000.
- ASSERT(special_class[length] == 0x10000);
- if (ranges->length() * 2 != length) {
- return false;
- }
- for (int i = 0; i < length; i += 2) {
- CharacterRange range = ranges->at(i >> 1);
- if (range.from() != special_class[i] ||
- range.to() != special_class[i + 1] - 1) {
- return false;
- }
- }
- return true;
-}
-
-
-bool RegExpCharacterClass::is_standard(Zone* zone) {
- // TODO(lrn): Remove need for this function, by not throwing away information
- // along the way.
- if (is_negated_) {
- return false;
- }
- if (set_.is_standard()) {
- return true;
- }
- if (CompareRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
- set_.set_standard_set_type('s');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
- set_.set_standard_set_type('S');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(zone),
- kLineTerminatorRanges,
- kLineTerminatorRangeCount)) {
- set_.set_standard_set_type('.');
- return true;
- }
- if (CompareRanges(set_.ranges(zone),
- kLineTerminatorRanges,
- kLineTerminatorRangeCount)) {
- set_.set_standard_set_type('n');
- return true;
- }
- if (CompareRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
- set_.set_standard_set_type('w');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
- set_.set_standard_set_type('W');
- return true;
- }
- return false;
-}
-
-
-RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new(compiler->zone()) TextNode(this, on_success);
-}
-
-
-RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- int length = alternatives->length();
- ChoiceNode* result =
- new(compiler->zone()) ChoiceNode(length, compiler->zone());
- for (int i = 0; i < length; i++) {
- GuardedAlternative alternative(alternatives->at(i)->ToNode(compiler,
- on_success));
- result->AddAlternative(alternative);
- }
- return result;
-}
-
-
-RegExpNode* RegExpQuantifier::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return ToNode(min(),
- max(),
- is_greedy(),
- body(),
- compiler,
- on_success);
-}
-
-
-// Scoped object to keep track of how much we unroll quantifier loops in the
-// regexp graph generator.
-class RegExpExpansionLimiter {
- public:
- static const int kMaxExpansionFactor = 6;
- RegExpExpansionLimiter(RegExpCompiler* compiler, int factor)
- : compiler_(compiler),
- saved_expansion_factor_(compiler->current_expansion_factor()),
- ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
- ASSERT(factor > 0);
- if (ok_to_expand_) {
- if (factor > kMaxExpansionFactor) {
- // Avoid integer overflow of the current expansion factor.
- ok_to_expand_ = false;
- compiler->set_current_expansion_factor(kMaxExpansionFactor + 1);
- } else {
- int new_factor = saved_expansion_factor_ * factor;
- ok_to_expand_ = (new_factor <= kMaxExpansionFactor);
- compiler->set_current_expansion_factor(new_factor);
- }
- }
- }
-
- ~RegExpExpansionLimiter() {
- compiler_->set_current_expansion_factor(saved_expansion_factor_);
- }
-
- bool ok_to_expand() { return ok_to_expand_; }
-
- private:
- RegExpCompiler* compiler_;
- int saved_expansion_factor_;
- bool ok_to_expand_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpExpansionLimiter);
-};
-
-
-RegExpNode* RegExpQuantifier::ToNode(int min,
- int max,
- bool is_greedy,
- RegExpTree* body,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- bool not_at_start) {
- // x{f, t} becomes this:
- //
- // (r++)<-.
- // | `
- // | (x)
- // v ^
- // (r=0)-->(?)---/ [if r < t]
- // |
- // [if r >= f] \----> ...
- //
-
- // 15.10.2.5 RepeatMatcher algorithm.
- // The parser has already eliminated the case where max is 0. In the case
- // where max_match is zero the parser has removed the quantifier if min was
- // > 0 and removed the atom if min was 0. See AddQuantifierToAtom.
-
- // If we know that we cannot match zero length then things are a little
- // simpler since we don't need to make the special zero length match check
- // from step 2.1. If the min and max are small we can unroll a little in
- // this case.
- static const int kMaxUnrolledMinMatches = 3; // Unroll (foo)+ and (foo){3,}
- static const int kMaxUnrolledMaxMatches = 3; // Unroll (foo)? and (foo){x,3}
- if (max == 0) return on_success; // This can happen due to recursion.
- bool body_can_be_empty = (body->min_match() == 0);
- int body_start_reg = RegExpCompiler::kNoRegister;
- Interval capture_registers = body->CaptureRegisters();
- bool needs_capture_clearing = !capture_registers.is_empty();
- Zone* zone = compiler->zone();
-
- if (body_can_be_empty) {
- body_start_reg = compiler->AllocateRegister();
- } else if (FLAG_regexp_optimization && !needs_capture_clearing) {
- // Only unroll if there are no captures and the body can't be
- // empty.
- {
- RegExpExpansionLimiter limiter(
- compiler, min + ((max != min) ? 1 : 0));
- if (min > 0 && min <= kMaxUnrolledMinMatches && limiter.ok_to_expand()) {
- int new_max = (max == kInfinity) ? max : max - min;
- // Recurse once to get the loop or optional matches after the fixed
- // ones.
- RegExpNode* answer = ToNode(
- 0, new_max, is_greedy, body, compiler, on_success, true);
- // Unroll the forced matches from 0 to min. This can cause chains of
- // TextNodes (which the parser does not generate). These should be
- // combined if it turns out they hinder good code generation.
- for (int i = 0; i < min; i++) {
- answer = body->ToNode(compiler, answer);
- }
- return answer;
- }
- }
- if (max <= kMaxUnrolledMaxMatches && min == 0) {
- ASSERT(max > 0); // Due to the 'if' above.
- RegExpExpansionLimiter limiter(compiler, max);
- if (limiter.ok_to_expand()) {
- // Unroll the optional matches up to max.
- RegExpNode* answer = on_success;
- for (int i = 0; i < max; i++) {
- ChoiceNode* alternation = new(zone) ChoiceNode(2, zone);
- if (is_greedy) {
- alternation->AddAlternative(
- GuardedAlternative(body->ToNode(compiler, answer)));
- alternation->AddAlternative(GuardedAlternative(on_success));
- } else {
- alternation->AddAlternative(GuardedAlternative(on_success));
- alternation->AddAlternative(
- GuardedAlternative(body->ToNode(compiler, answer)));
- }
- answer = alternation;
- if (not_at_start) alternation->set_not_at_start();
- }
- return answer;
- }
- }
- }
- bool has_min = min > 0;
- bool has_max = max < RegExpTree::kInfinity;
- bool needs_counter = has_min || has_max;
- int reg_ctr = needs_counter
- ? compiler->AllocateRegister()
- : RegExpCompiler::kNoRegister;
- LoopChoiceNode* center = new(zone) LoopChoiceNode(body->min_match() == 0,
- zone);
- if (not_at_start) center->set_not_at_start();
- RegExpNode* loop_return = needs_counter
- ? static_cast<RegExpNode*>(ActionNode::IncrementRegister(reg_ctr, center))
- : static_cast<RegExpNode*>(center);
- if (body_can_be_empty) {
- // If the body can be empty we need to check if it was and then
- // backtrack.
- loop_return = ActionNode::EmptyMatchCheck(body_start_reg,
- reg_ctr,
- min,
- loop_return);
- }
- RegExpNode* body_node = body->ToNode(compiler, loop_return);
- if (body_can_be_empty) {
- // If the body can be empty we need to store the start position
- // so we can bail out if it was empty.
- body_node = ActionNode::StorePosition(body_start_reg, false, body_node);
- }
- if (needs_capture_clearing) {
- // Before entering the body of this loop we need to clear captures.
- body_node = ActionNode::ClearCaptures(capture_registers, body_node);
- }
- GuardedAlternative body_alt(body_node);
- if (has_max) {
- Guard* body_guard =
- new(zone) Guard(reg_ctr, Guard::LT, max);
- body_alt.AddGuard(body_guard, zone);
- }
- GuardedAlternative rest_alt(on_success);
- if (has_min) {
- Guard* rest_guard = new(compiler->zone()) Guard(reg_ctr, Guard::GEQ, min);
- rest_alt.AddGuard(rest_guard, zone);
- }
- if (is_greedy) {
- center->AddLoopAlternative(body_alt);
- center->AddContinueAlternative(rest_alt);
- } else {
- center->AddContinueAlternative(rest_alt);
- center->AddLoopAlternative(body_alt);
- }
- if (needs_counter) {
- return ActionNode::SetRegister(reg_ctr, 0, center);
- } else {
- return center;
- }
-}
-
-
-RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- NodeInfo info;
- Zone* zone = compiler->zone();
-
- switch (type()) {
- case START_OF_LINE:
- return AssertionNode::AfterNewline(on_success);
- case START_OF_INPUT:
- return AssertionNode::AtStart(on_success);
- case BOUNDARY:
- return AssertionNode::AtBoundary(on_success);
- case NON_BOUNDARY:
- return AssertionNode::AtNonBoundary(on_success);
- case END_OF_INPUT:
- return AssertionNode::AtEnd(on_success);
- case END_OF_LINE: {
- // Compile $ in multiline regexps as an alternation with a positive
- // lookahead in one side and an end-of-input on the other side.
- // We need two registers for the lookahead.
- int stack_pointer_register = compiler->AllocateRegister();
- int position_register = compiler->AllocateRegister();
- // The ChoiceNode to distinguish between a newline and end-of-input.
- ChoiceNode* result = new(zone) ChoiceNode(2, zone);
- // Create a newline atom.
- ZoneList<CharacterRange>* newline_ranges =
- new(zone) ZoneList<CharacterRange>(3, zone);
- CharacterRange::AddClassEscape('n', newline_ranges, zone);
- RegExpCharacterClass* newline_atom = new(zone) RegExpCharacterClass('n');
- TextNode* newline_matcher = new(zone) TextNode(
- newline_atom,
- ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
- position_register,
- 0, // No captures inside.
- -1, // Ignored if no captures.
- on_success));
- // Create an end-of-input matcher.
- RegExpNode* end_of_line = ActionNode::BeginSubmatch(
- stack_pointer_register,
- position_register,
- newline_matcher);
- // Add the two alternatives to the ChoiceNode.
- GuardedAlternative eol_alternative(end_of_line);
- result->AddAlternative(eol_alternative);
- GuardedAlternative end_alternative(AssertionNode::AtEnd(on_success));
- result->AddAlternative(end_alternative);
- return result;
- }
- default:
- UNREACHABLE();
- }
- return on_success;
-}
-
-
-RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new(compiler->zone())
- BackReferenceNode(RegExpCapture::StartRegister(index()),
- RegExpCapture::EndRegister(index()),
- on_success);
-}
-
-
-RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return on_success;
-}
-
-
-RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- int stack_pointer_register = compiler->AllocateRegister();
- int position_register = compiler->AllocateRegister();
-
- const int registers_per_capture = 2;
- const int register_of_first_capture = 2;
- int register_count = capture_count_ * registers_per_capture;
- int register_start =
- register_of_first_capture + capture_from_ * registers_per_capture;
-
- RegExpNode* success;
- if (is_positive()) {
- RegExpNode* node = ActionNode::BeginSubmatch(
- stack_pointer_register,
- position_register,
- body()->ToNode(
- compiler,
- ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start,
- on_success)));
- return node;
- } else {
- // We use a ChoiceNode for a negative lookahead because it has most of
- // the characteristics we need. It has the body of the lookahead as its
- // first alternative and the expression after the lookahead of the second
- // alternative. If the first alternative succeeds then the
- // NegativeSubmatchSuccess will unwind the stack including everything the
- // choice node set up and backtrack. If the first alternative fails then
- // the second alternative is tried, which is exactly the desired result
- // for a negative lookahead. The NegativeLookaheadChoiceNode is a special
- // ChoiceNode that knows to ignore the first exit when calculating quick
- // checks.
- Zone* zone = compiler->zone();
-
- GuardedAlternative body_alt(
- body()->ToNode(
- compiler,
- success = new(zone) NegativeSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start,
- zone)));
- ChoiceNode* choice_node =
- new(zone) NegativeLookaheadChoiceNode(body_alt,
- GuardedAlternative(on_success),
- zone);
- return ActionNode::BeginSubmatch(stack_pointer_register,
- position_register,
- choice_node);
- }
-}
-
-
-RegExpNode* RegExpCapture::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return ToNode(body(), index(), compiler, on_success);
-}
-
-
-RegExpNode* RegExpCapture::ToNode(RegExpTree* body,
- int index,
- RegExpCompiler* compiler,
- RegExpNode* on_success) {
- int start_reg = RegExpCapture::StartRegister(index);
- int end_reg = RegExpCapture::EndRegister(index);
- RegExpNode* store_end = ActionNode::StorePosition(end_reg, true, on_success);
- RegExpNode* body_node = body->ToNode(compiler, store_end);
- return ActionNode::StorePosition(start_reg, true, body_node);
-}
-
-
-RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<RegExpTree*>* children = nodes();
- RegExpNode* current = on_success;
- for (int i = children->length() - 1; i >= 0; i--) {
- current = children->at(i)->ToNode(compiler, current);
- }
- return current;
-}
-
-
-static void AddClass(const int* elmv,
- int elmc,
- ZoneList<CharacterRange>* ranges,
- Zone* zone) {
- elmc--;
- ASSERT(elmv[elmc] == 0x10000);
- for (int i = 0; i < elmc; i += 2) {
- ASSERT(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1), zone);
- }
-}
-
-
-static void AddClassNegated(const int *elmv,
- int elmc,
- ZoneList<CharacterRange>* ranges,
- Zone* zone) {
- elmc--;
- ASSERT(elmv[elmc] == 0x10000);
- ASSERT(elmv[0] != 0x0000);
- ASSERT(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
- uc16 last = 0x0000;
- for (int i = 0; i < elmc; i += 2) {
- ASSERT(last <= elmv[i] - 1);
- ASSERT(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange(last, elmv[i] - 1), zone);
- last = elmv[i + 1];
- }
- ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit), zone);
-}
-
-
-void CharacterRange::AddClassEscape(uc16 type,
- ZoneList<CharacterRange>* ranges,
- Zone* zone) {
- switch (type) {
- case 's':
- AddClass(kSpaceRanges, kSpaceRangeCount, ranges, zone);
- break;
- case 'S':
- AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges, zone);
- break;
- case 'w':
- AddClass(kWordRanges, kWordRangeCount, ranges, zone);
- break;
- case 'W':
- AddClassNegated(kWordRanges, kWordRangeCount, ranges, zone);
- break;
- case 'd':
- AddClass(kDigitRanges, kDigitRangeCount, ranges, zone);
- break;
- case 'D':
- AddClassNegated(kDigitRanges, kDigitRangeCount, ranges, zone);
- break;
- case '.':
- AddClassNegated(kLineTerminatorRanges,
- kLineTerminatorRangeCount,
- ranges,
- zone);
- break;
- // This is not a character range as defined by the spec but a
- // convenient shorthand for a character class that matches any
- // character.
- case '*':
- ranges->Add(CharacterRange::Everything(), zone);
- break;
- // This is the set of characters matched by the $ and ^ symbols
- // in multiline mode.
- case 'n':
- AddClass(kLineTerminatorRanges,
- kLineTerminatorRangeCount,
- ranges,
- zone);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-Vector<const int> CharacterRange::GetWordBounds() {
- return Vector<const int>(kWordRanges, kWordRangeCount - 1);
-}
-
-
-class CharacterRangeSplitter {
- public:
- CharacterRangeSplitter(ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded,
- Zone* zone)
- : included_(included),
- excluded_(excluded),
- zone_(zone) { }
- void Call(uc16 from, DispatchTable::Entry entry);
-
- static const int kInBase = 0;
- static const int kInOverlay = 1;
-
- private:
- ZoneList<CharacterRange>** included_;
- ZoneList<CharacterRange>** excluded_;
- Zone* zone_;
-};
-
-
-void CharacterRangeSplitter::Call(uc16 from, DispatchTable::Entry entry) {
- if (!entry.out_set()->Get(kInBase)) return;
- ZoneList<CharacterRange>** target = entry.out_set()->Get(kInOverlay)
- ? included_
- : excluded_;
- if (*target == NULL) *target = new(zone_) ZoneList<CharacterRange>(2, zone_);
- (*target)->Add(CharacterRange(entry.from(), entry.to()), zone_);
-}
-
-
-void CharacterRange::Split(ZoneList<CharacterRange>* base,
- Vector<const int> overlay,
- ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded,
- Zone* zone) {
- ASSERT_EQ(NULL, *included);
- ASSERT_EQ(NULL, *excluded);
- DispatchTable table(zone);
- for (int i = 0; i < base->length(); i++)
- table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone);
- for (int i = 0; i < overlay.length(); i += 2) {
- table.AddRange(CharacterRange(overlay[i], overlay[i + 1] - 1),
- CharacterRangeSplitter::kInOverlay, zone);
- }
- CharacterRangeSplitter callback(included, excluded, zone);
- table.ForEach(&callback);
-}
-
-
-void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
- bool is_ascii,
- Zone* zone) {
- Isolate* isolate = Isolate::Current();
- uc16 bottom = from();
- uc16 top = to();
- if (is_ascii && !RangeContainsLatin1Equivalents(*this)) {
- if (bottom > String::kMaxOneByteCharCode) return;
- if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
- }
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- if (top == bottom) {
- // If this is a singleton we just expand the one character.
- int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
- for (int i = 0; i < length; i++) {
- uc32 chr = chars[i];
- if (chr != bottom) {
- ranges->Add(CharacterRange::Singleton(chars[i]), zone);
- }
- }
- } else {
- // If this is a range we expand the characters block by block,
- // expanding contiguous subranges (blocks) one at a time.
- // The approach is as follows. For a given start character we
- // look up the remainder of the block that contains it (represented
- // by the end point), for instance we find 'z' if the character
- // is 'c'. A block is characterized by the property
- // that all characters uncanonicalize in the same way, except that
- // each entry in the result is incremented by the distance from the first
- // element. So a-z is a block because 'a' uncanonicalizes to ['a', 'A'] and
- // the k'th letter uncanonicalizes to ['a' + k, 'A' + k].
- // Once we've found the end point we look up its uncanonicalization
- // and produce a range for each element. For instance for [c-f]
- // we look up ['z', 'Z'] and produce [c-f] and [C-F]. We then only
- // add a range if it is not already contained in the input, so [c-f]
- // will be skipped but [C-F] will be added. If this range is not
- // completely contained in a block we do this for all the blocks
- // covered by the range (handling characters that is not in a block
- // as a "singleton block").
- unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int pos = bottom;
- while (pos <= top) {
- int length = isolate->jsregexp_canonrange()->get(pos, '\0', range);
- uc16 block_end;
- if (length == 0) {
- block_end = pos;
- } else {
- ASSERT_EQ(1, length);
- block_end = range[0];
- }
- int end = (block_end > top) ? top : block_end;
- length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0', range);
- for (int i = 0; i < length; i++) {
- uc32 c = range[i];
- uc16 range_from = c - (block_end - pos);
- uc16 range_to = c - (block_end - end);
- if (!(bottom <= range_from && range_to <= top)) {
- ranges->Add(CharacterRange(range_from, range_to), zone);
- }
- }
- pos = end + 1;
- }
- }
-}
-
-
-bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
- ASSERT_NOT_NULL(ranges);
- int n = ranges->length();
- if (n <= 1) return true;
- int max = ranges->at(0).to();
- for (int i = 1; i < n; i++) {
- CharacterRange next_range = ranges->at(i);
- if (next_range.from() <= max + 1) return false;
- max = next_range.to();
- }
- return true;
-}
-
-
-ZoneList<CharacterRange>* CharacterSet::ranges(Zone* zone) {
- if (ranges_ == NULL) {
- ranges_ = new(zone) ZoneList<CharacterRange>(2, zone);
- CharacterRange::AddClassEscape(standard_set_type_, ranges_, zone);
- }
- return ranges_;
-}
-
-
-// Move a number of elements in a zonelist to another position
-// in the same list. Handles overlapping source and target areas.
-static void MoveRanges(ZoneList<CharacterRange>* list,
- int from,
- int to,
- int count) {
- // Ranges are potentially overlapping.
- if (from < to) {
- for (int i = count - 1; i >= 0; i--) {
- list->at(to + i) = list->at(from + i);
- }
- } else {
- for (int i = 0; i < count; i++) {
- list->at(to + i) = list->at(from + i);
- }
- }
-}
-
-
-static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
- int count,
- CharacterRange insert) {
- // Inserts a range into list[0..count[, which must be sorted
- // by from value and non-overlapping and non-adjacent, using at most
- // list[0..count] for the result. Returns the number of resulting
- // canonicalized ranges. Inserting a range may collapse existing ranges into
- // fewer ranges, so the return value can be anything in the range 1..count+1.
- uc16 from = insert.from();
- uc16 to = insert.to();
- int start_pos = 0;
- int end_pos = count;
- for (int i = count - 1; i >= 0; i--) {
- CharacterRange current = list->at(i);
- if (current.from() > to + 1) {
- end_pos = i;
- } else if (current.to() + 1 < from) {
- start_pos = i + 1;
- break;
- }
- }
-
- // Inserted range overlaps, or is adjacent to, ranges at positions
- // [start_pos..end_pos[. Ranges before start_pos or at or after end_pos are
- // not affected by the insertion.
- // If start_pos == end_pos, the range must be inserted before start_pos.
- // if start_pos < end_pos, the entire range from start_pos to end_pos
- // must be merged with the insert range.
-
- if (start_pos == end_pos) {
- // Insert between existing ranges at position start_pos.
- if (start_pos < count) {
- MoveRanges(list, start_pos, start_pos + 1, count - start_pos);
- }
- list->at(start_pos) = insert;
- return count + 1;
- }
- if (start_pos + 1 == end_pos) {
- // Replace single existing range at position start_pos.
- CharacterRange to_replace = list->at(start_pos);
- int new_from = Min(to_replace.from(), from);
- int new_to = Max(to_replace.to(), to);
- list->at(start_pos) = CharacterRange(new_from, new_to);
- return count;
- }
- // Replace a number of existing ranges from start_pos to end_pos - 1.
- // Move the remaining ranges down.
-
- int new_from = Min(list->at(start_pos).from(), from);
- int new_to = Max(list->at(end_pos - 1).to(), to);
- if (end_pos < count) {
- MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
- }
- list->at(start_pos) = CharacterRange(new_from, new_to);
- return count - (end_pos - start_pos) + 1;
-}
-
-
-void CharacterSet::Canonicalize() {
- // Special/default classes are always considered canonical. The result
- // of calling ranges() will be sorted.
- if (ranges_ == NULL) return;
- CharacterRange::Canonicalize(ranges_);
-}
-
-
-void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
- if (character_ranges->length() <= 1) return;
- // Check whether ranges are already canonical (increasing, non-overlapping,
- // non-adjacent).
- int n = character_ranges->length();
- int max = character_ranges->at(0).to();
- int i = 1;
- while (i < n) {
- CharacterRange current = character_ranges->at(i);
- if (current.from() <= max + 1) {
- break;
- }
- max = current.to();
- i++;
- }
- // Canonical until the i'th range. If that's all of them, we are done.
- if (i == n) return;
-
- // The ranges at index i and forward are not canonicalized. Make them so by
- // doing the equivalent of insertion sort (inserting each into the previous
- // list, in order).
- // Notice that inserting a range can reduce the number of ranges in the
- // result due to combining of adjacent and overlapping ranges.
- int read = i; // Range to insert.
- int num_canonical = i; // Length of canonicalized part of list.
- do {
- num_canonical = InsertRangeInCanonicalList(character_ranges,
- num_canonical,
- character_ranges->at(read));
- read++;
- } while (read < n);
- character_ranges->Rewind(num_canonical);
-
- ASSERT(CharacterRange::IsCanonical(character_ranges));
-}
-
-
-void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
- ZoneList<CharacterRange>* negated_ranges,
- Zone* zone) {
- ASSERT(CharacterRange::IsCanonical(ranges));
- ASSERT_EQ(0, negated_ranges->length());
- int range_count = ranges->length();
- uc16 from = 0;
- int i = 0;
- if (range_count > 0 && ranges->at(0).from() == 0) {
- from = ranges->at(0).to();
- i = 1;
- }
- while (i < range_count) {
- CharacterRange range = ranges->at(i);
- negated_ranges->Add(CharacterRange(from + 1, range.from() - 1), zone);
- from = range.to();
- i++;
- }
- if (from < String::kMaxUtf16CodeUnit) {
- negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit),
- zone);
- }
-}
-
-
-// -------------------------------------------------------------------
-// Splay tree
-
-
-OutSet* OutSet::Extend(unsigned value, Zone* zone) {
- if (Get(value))
- return this;
- if (successors(zone) != NULL) {
- for (int i = 0; i < successors(zone)->length(); i++) {
- OutSet* successor = successors(zone)->at(i);
- if (successor->Get(value))
- return successor;
- }
- } else {
- successors_ = new(zone) ZoneList<OutSet*>(2, zone);
- }
- OutSet* result = new(zone) OutSet(first_, remaining_);
- result->Set(value, zone);
- successors(zone)->Add(result, zone);
- return result;
-}
-
-
-void OutSet::Set(unsigned value, Zone *zone) {
- if (value < kFirstLimit) {
- first_ |= (1 << value);
- } else {
- if (remaining_ == NULL)
- remaining_ = new(zone) ZoneList<unsigned>(1, zone);
- if (remaining_->is_empty() || !remaining_->Contains(value))
- remaining_->Add(value, zone);
- }
-}
-
-
-bool OutSet::Get(unsigned value) {
- if (value < kFirstLimit) {
- return (first_ & (1 << value)) != 0;
- } else if (remaining_ == NULL) {
- return false;
- } else {
- return remaining_->Contains(value);
- }
-}
-
-
-const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
-
-
-void DispatchTable::AddRange(CharacterRange full_range, int value,
- Zone* zone) {
- CharacterRange current = full_range;
- if (tree()->is_empty()) {
- // If this is the first range we just insert into the table.
- ZoneSplayTree<Config>::Locator loc;
- ASSERT_RESULT(tree()->Insert(current.from(), &loc));
- loc.set_value(Entry(current.from(), current.to(),
- empty()->Extend(value, zone)));
- return;
- }
- // First see if there is a range to the left of this one that
- // overlaps.
- ZoneSplayTree<Config>::Locator loc;
- if (tree()->FindGreatestLessThan(current.from(), &loc)) {
- Entry* entry = &loc.value();
- // If we've found a range that overlaps with this one, and it
- // starts strictly to the left of this one, we have to fix it
- // because the following code only handles ranges that start on
- // or after the start point of the range we're adding.
- if (entry->from() < current.from() && entry->to() >= current.from()) {
- // Snap the overlapping range in half around the start point of
- // the range we're adding.
- CharacterRange left(entry->from(), current.from() - 1);
- CharacterRange right(current.from(), entry->to());
- // The left part of the overlapping range doesn't overlap.
- // Truncate the whole entry to be just the left part.
- entry->set_to(left.to());
- // The right part is the one that overlaps. We add this part
- // to the map and let the next step deal with merging it with
- // the range we're adding.
- ZoneSplayTree<Config>::Locator loc;
- ASSERT_RESULT(tree()->Insert(right.from(), &loc));
- loc.set_value(Entry(right.from(),
- right.to(),
- entry->out_set()));
- }
- }
- while (current.is_valid()) {
- if (tree()->FindLeastGreaterThan(current.from(), &loc) &&
- (loc.value().from() <= current.to()) &&
- (loc.value().to() >= current.from())) {
- Entry* entry = &loc.value();
- // We have overlap. If there is space between the start point of
- // the range we're adding and where the overlapping range starts
- // then we have to add a range covering just that space.
- if (current.from() < entry->from()) {
- ZoneSplayTree<Config>::Locator ins;
- ASSERT_RESULT(tree()->Insert(current.from(), &ins));
- ins.set_value(Entry(current.from(),
- entry->from() - 1,
- empty()->Extend(value, zone)));
- current.set_from(entry->from());
- }
- ASSERT_EQ(current.from(), entry->from());
- // If the overlapping range extends beyond the one we want to add
- // we have to snap the right part off and add it separately.
- if (entry->to() > current.to()) {
- ZoneSplayTree<Config>::Locator ins;
- ASSERT_RESULT(tree()->Insert(current.to() + 1, &ins));
- ins.set_value(Entry(current.to() + 1,
- entry->to(),
- entry->out_set()));
- entry->set_to(current.to());
- }
- ASSERT(entry->to() <= current.to());
- // The overlapping range is now completely contained by the range
- // we're adding so we can just update it and move the start point
- // of the range we're adding just past it.
- entry->AddValue(value, zone);
- // Bail out if the last interval ended at 0xFFFF since otherwise
- // adding 1 will wrap around to 0.
- if (entry->to() == String::kMaxUtf16CodeUnit)
- break;
- ASSERT(entry->to() + 1 > current.from());
- current.set_from(entry->to() + 1);
- } else {
- // There is no overlap so we can just add the range
- ZoneSplayTree<Config>::Locator ins;
- ASSERT_RESULT(tree()->Insert(current.from(), &ins));
- ins.set_value(Entry(current.from(),
- current.to(),
- empty()->Extend(value, zone)));
- break;
- }
- }
-}
-
-
-OutSet* DispatchTable::Get(uc16 value) {
- ZoneSplayTree<Config>::Locator loc;
- if (!tree()->FindGreatestLessThan(value, &loc))
- return empty();
- Entry* entry = &loc.value();
- if (value <= entry->to())
- return entry->out_set();
- else
- return empty();
-}
-
-
-// -------------------------------------------------------------------
-// Analysis
-
-
-void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check(Isolate::Current());
- if (check.HasOverflowed()) {
- fail("Stack overflow");
- return;
- }
- if (that->info()->been_analyzed || that->info()->being_analyzed)
- return;
- that->info()->being_analyzed = true;
- that->Accept(this);
- that->info()->being_analyzed = false;
- that->info()->been_analyzed = true;
-}
-
-
-void Analysis::VisitEnd(EndNode* that) {
- // nothing to do
-}
-
-
-void TextNode::CalculateOffsets() {
- int element_count = elements()->length();
- // Set up the offsets of the elements relative to the start. This is a fixed
- // quantity since a TextNode can only contain fixed-width things.
- int cp_offset = 0;
- for (int i = 0; i < element_count; i++) {
- TextElement& elm = elements()->at(i);
- elm.cp_offset = cp_offset;
- if (elm.type == TextElement::ATOM) {
- cp_offset += elm.data.u_atom->data().length();
- } else {
- cp_offset++;
- }
- }
-}
-
-
-void Analysis::VisitText(TextNode* that) {
- if (ignore_case_) {
- that->MakeCaseIndependent(is_ascii_);
- }
- EnsureAnalyzed(that->on_success());
- if (!has_failed()) {
- that->CalculateOffsets();
- }
-}
-
-
-void Analysis::VisitAction(ActionNode* that) {
- RegExpNode* target = that->on_success();
- EnsureAnalyzed(target);
- if (!has_failed()) {
- // If the next node is interested in what it follows then this node
- // has to be interested too so it can pass the information on.
- that->info()->AddFromFollowing(target->info());
- }
-}
-
-
-void Analysis::VisitChoice(ChoiceNode* that) {
- NodeInfo* info = that->info();
- for (int i = 0; i < that->alternatives()->length(); i++) {
- RegExpNode* node = that->alternatives()->at(i).node();
- EnsureAnalyzed(node);
- if (has_failed()) return;
- // Anything the following nodes need to know has to be known by
- // this node also, so it can pass it on.
- info->AddFromFollowing(node->info());
- }
-}
-
-
-void Analysis::VisitLoopChoice(LoopChoiceNode* that) {
- NodeInfo* info = that->info();
- for (int i = 0; i < that->alternatives()->length(); i++) {
- RegExpNode* node = that->alternatives()->at(i).node();
- if (node != that->loop_node()) {
- EnsureAnalyzed(node);
- if (has_failed()) return;
- info->AddFromFollowing(node->info());
- }
- }
- // Check the loop last since it may need the value of this node
- // to get a correct result.
- EnsureAnalyzed(that->loop_node());
- if (!has_failed()) {
- info->AddFromFollowing(that->loop_node()->info());
- }
-}
-
-
-void Analysis::VisitBackReference(BackReferenceNode* that) {
- EnsureAnalyzed(that->on_success());
-}
-
-
-void Analysis::VisitAssertion(AssertionNode* that) {
- EnsureAnalyzed(that->on_success());
-}
-
-
-void BackReferenceNode::FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- // Working out the set of characters that a backreference can match is too
- // hard, so we just say that any character can match.
- bm->SetRest(offset);
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
- RegExpMacroAssembler::kTableSize);
-
-
-void ChoiceNode::FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- ZoneList<GuardedAlternative>* alts = alternatives();
- budget = (budget - 1) / alts->length();
- for (int i = 0; i < alts->length(); i++) {
- GuardedAlternative& alt = alts->at(i);
- if (alt.guards() != NULL && alt.guards()->length() != 0) {
- bm->SetRest(offset); // Give up trying to fill in info.
- SaveBMInfo(bm, not_at_start, offset);
- return;
- }
- alt.node()->FillInBMInfo(offset, budget, bm, not_at_start);
- }
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-void TextNode::FillInBMInfo(int initial_offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- if (initial_offset >= bm->length()) return;
- int offset = initial_offset;
- int max_char = bm->max_char();
- for (int i = 0; i < elements()->length(); i++) {
- if (offset >= bm->length()) {
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
- return;
- }
- TextElement text = elements()->at(i);
- if (text.type == TextElement::ATOM) {
- RegExpAtom* atom = text.data.u_atom;
- for (int j = 0; j < atom->length(); j++, offset++) {
- if (offset >= bm->length()) {
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
- return;
- }
- uc16 character = atom->data()[j];
- if (bm->compiler()->ignore_case()) {
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(
- ISOLATE,
- character,
- bm->max_char() == String::kMaxOneByteCharCode,
- chars);
- for (int j = 0; j < length; j++) {
- bm->Set(offset, chars[j]);
- }
- } else {
- if (character <= max_char) bm->Set(offset, character);
- }
- }
- } else {
- ASSERT(text.type == TextElement::CHAR_CLASS);
- RegExpCharacterClass* char_class = text.data.u_char_class;
- ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
- if (char_class->is_negated()) {
- bm->SetAll(offset);
- } else {
- for (int k = 0; k < ranges->length(); k++) {
- CharacterRange& range = ranges->at(k);
- if (range.from() > max_char) continue;
- int to = Min(max_char, static_cast<int>(range.to()));
- bm->SetInterval(offset, Interval(range.from(), to));
- }
- }
- offset++;
- }
- }
- if (offset >= bm->length()) {
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
- return;
- }
- on_success()->FillInBMInfo(offset,
- budget - 1,
- bm,
- true); // Not at start after a text node.
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
-}
-
-
-// -------------------------------------------------------------------
-// Dispatch table construction
-
-
-void DispatchTableConstructor::VisitEnd(EndNode* that) {
- AddRange(CharacterRange::Everything());
-}
-
-
-void DispatchTableConstructor::BuildTable(ChoiceNode* node) {
- node->set_being_calculated(true);
- ZoneList<GuardedAlternative>* alternatives = node->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- set_choice_index(i);
- alternatives->at(i).node()->Accept(this);
- }
- node->set_being_calculated(false);
-}
-
-
-class AddDispatchRange {
- public:
- explicit AddDispatchRange(DispatchTableConstructor* constructor)
- : constructor_(constructor) { }
- void Call(uc32 from, DispatchTable::Entry entry);
- private:
- DispatchTableConstructor* constructor_;
-};
-
-
-void AddDispatchRange::Call(uc32 from, DispatchTable::Entry entry) {
- CharacterRange range(from, entry.to());
- constructor_->AddRange(range);
-}
-
-
-void DispatchTableConstructor::VisitChoice(ChoiceNode* node) {
- if (node->being_calculated())
- return;
- DispatchTable* table = node->GetTable(ignore_case_);
- AddDispatchRange adder(this);
- table->ForEach(&adder);
-}
-
-
-void DispatchTableConstructor::VisitBackReference(BackReferenceNode* that) {
- // TODO(160): Find the node that we refer back to and propagate its start
- // set back to here. For now we just accept anything.
- AddRange(CharacterRange::Everything());
-}
-
-
-void DispatchTableConstructor::VisitAssertion(AssertionNode* that) {
- RegExpNode* target = that->on_success();
- target->Accept(this);
-}
-
-
-static int CompareRangeByFrom(const CharacterRange* a,
- const CharacterRange* b) {
- return Compare<uc16>(a->from(), b->from());
-}
-
-
-void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
- ranges->Sort(CompareRangeByFrom);
- uc16 last = 0;
- for (int i = 0; i < ranges->length(); i++) {
- CharacterRange range = ranges->at(i);
- if (last < range.from())
- AddRange(CharacterRange(last, range.from() - 1));
- if (range.to() >= last) {
- if (range.to() == String::kMaxUtf16CodeUnit) {
- return;
- } else {
- last = range.to() + 1;
- }
- }
- }
- AddRange(CharacterRange(last, String::kMaxUtf16CodeUnit));
-}
-
-
-void DispatchTableConstructor::VisitText(TextNode* that) {
- TextElement elm = that->elements()->at(0);
- switch (elm.type) {
- case TextElement::ATOM: {
- uc16 c = elm.data.u_atom->data()[0];
- AddRange(CharacterRange(c, c));
- break;
- }
- case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* tree = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = tree->ranges(that->zone());
- if (tree->is_negated()) {
- AddInverse(ranges);
- } else {
- for (int i = 0; i < ranges->length(); i++)
- AddRange(ranges->at(i));
- }
- break;
- }
- default: {
- UNIMPLEMENTED();
- }
- }
-}
-
-
-void DispatchTableConstructor::VisitAction(ActionNode* that) {
- RegExpNode* target = that->on_success();
- target->Accept(this);
-}
-
-
-RegExpEngine::CompilationResult RegExpEngine::Compile(
- RegExpCompileData* data,
- bool ignore_case,
- bool is_global,
- bool is_multiline,
- Handle<String> pattern,
- Handle<String> sample_subject,
- bool is_ascii,
- Zone* zone) {
- if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
- return IrregexpRegExpTooBig();
- }
- RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii, zone);
-
- // Sample some characters from the middle of the string.
- static const int kSampleSize = 128;
-
- FlattenString(sample_subject);
- int chars_sampled = 0;
- int half_way = (sample_subject->length() - kSampleSize) / 2;
- for (int i = Max(0, half_way);
- i < sample_subject->length() && chars_sampled < kSampleSize;
- i++, chars_sampled++) {
- compiler.frequency_collator()->CountCharacter(sample_subject->Get(i));
- }
-
- // Wrap the body of the regexp in capture #0.
- RegExpNode* captured_body = RegExpCapture::ToNode(data->tree,
- 0,
- &compiler,
- compiler.accept());
- RegExpNode* node = captured_body;
- bool is_end_anchored = data->tree->IsAnchoredAtEnd();
- bool is_start_anchored = data->tree->IsAnchoredAtStart();
- int max_length = data->tree->max_match();
- if (!is_start_anchored) {
- // Add a .*? at the beginning, outside the body capture, unless
- // this expression is anchored at the beginning.
- RegExpNode* loop_node =
- RegExpQuantifier::ToNode(0,
- RegExpTree::kInfinity,
- false,
- new(zone) RegExpCharacterClass('*'),
- &compiler,
- captured_body,
- data->contains_anchor);
-
- if (data->contains_anchor) {
- // Unroll loop once, to take care of the case that might start
- // at the start of input.
- ChoiceNode* first_step_node = new(zone) ChoiceNode(2, zone);
- first_step_node->AddAlternative(GuardedAlternative(captured_body));
- first_step_node->AddAlternative(GuardedAlternative(
- new(zone) TextNode(new(zone) RegExpCharacterClass('*'), loop_node)));
- node = first_step_node;
- } else {
- node = loop_node;
- }
- }
- if (is_ascii) {
- node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
- // Do it again to propagate the new nodes to places where they were not
- // put because they had not been calculated yet.
- if (node != NULL) {
- node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
- }
- }
-
- if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
- data->node = node;
- Analysis analysis(ignore_case, is_ascii);
- analysis.EnsureAnalyzed(node);
- if (analysis.has_failed()) {
- const char* error_message = analysis.error_message();
- return CompilationResult(error_message);
- }
-
- // Create the correct assembler for the architecture.
-#ifndef V8_INTERPRETED_REGEXP
- // Native regexp implementation.
-
- NativeRegExpMacroAssembler::Mode mode =
- is_ascii ? NativeRegExpMacroAssembler::ASCII
- : NativeRegExpMacroAssembler::UC16;
-
-#if V8_TARGET_ARCH_IA32
- RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
-#elif V8_TARGET_ARCH_X64
- RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
-#elif V8_TARGET_ARCH_ARM
- RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
-#elif V8_TARGET_ARCH_MIPS
- RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
-#endif
-
-#else // V8_INTERPRETED_REGEXP
- // Interpreted regexp implementation.
- EmbeddedVector<byte, 1024> codes;
- RegExpMacroAssemblerIrregexp macro_assembler(codes, zone);
-#endif // V8_INTERPRETED_REGEXP
-
- // Inserted here, instead of in Assembler, because it depends on information
- // in the AST that isn't replicated in the Node structure.
- static const int kMaxBacksearchLimit = 1024;
- if (is_end_anchored &&
- !is_start_anchored &&
- max_length < kMaxBacksearchLimit) {
- macro_assembler.SetCurrentPositionFromEnd(max_length);
- }
-
- if (is_global) {
- macro_assembler.set_global_mode(
- (data->tree->min_match() > 0)
- ? RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK
- : RegExpMacroAssembler::GLOBAL);
- }
-
- return compiler.Assemble(&macro_assembler,
- node,
- data->capture_count,
- pattern);
-}
-
-
-}} // namespace v8::internal
diff --git a/src/3rdparty/v8/src/jsregexp.h b/src/3rdparty/v8/src/jsregexp.h
deleted file mode 100644
index 625f192..0000000
--- a/src/3rdparty/v8/src/jsregexp.h
+++ /dev/null
@@ -1,1624 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JSREGEXP_H_
-#define V8_JSREGEXP_H_
-
-#include "allocation.h"
-#include "assembler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-class NodeVisitor;
-class RegExpCompiler;
-class RegExpMacroAssembler;
-class RegExpNode;
-class RegExpTree;
-class BoyerMooreLookahead;
-
-class RegExpImpl {
- public:
- // Whether V8 is compiled with native regexp support or not.
- static bool UsesNativeRegExp() {
-#ifdef V8_INTERPRETED_REGEXP
- return false;
-#else
- return true;
-#endif
- }
-
- // Creates a regular expression literal in the old space.
- // This function calls the garbage collector if necessary.
- static Handle<Object> CreateRegExpLiteral(Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags,
- bool* has_pending_exception);
-
- // Returns a string representation of a regular expression.
- // Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
- // This function calls the garbage collector if necessary.
- static Handle<String> ToString(Handle<Object> value);
-
- // Parses the RegExp pattern and prepares the JSRegExp object with
- // generic data and choice of implementation - as well as what
- // the implementation wants to store in the data field.
- // Returns false if compilation fails.
- static Handle<Object> Compile(Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> flags,
- Zone* zone);
-
- // See ECMA-262 section 15.10.6.2.
- // This function calls the garbage collector if necessary.
- static Handle<Object> Exec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
-
- // Prepares a JSRegExp object with Irregexp-specific data.
- static void IrregexpInitialize(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
- int capture_register_count);
-
-
- static void AtomCompile(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
- Handle<String> match_pattern);
-
-
- static int AtomExecRaw(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- int32_t* output,
- int output_size);
-
-
- static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
-
- enum IrregexpResult { RE_FAILURE = 0, RE_SUCCESS = 1, RE_EXCEPTION = -1 };
-
- // Prepare a RegExp for being executed one or more times (using
- // IrregexpExecOnce) on the subject.
- // This ensures that the regexp is compiled for the subject, and that
- // the subject is flat.
- // Returns the number of integer spaces required by IrregexpExecOnce
- // as its "registers" argument. If the regexp cannot be compiled,
- // an exception is set as pending, and this function returns negative.
- static int IrregexpPrepare(Handle<JSRegExp> regexp,
- Handle<String> subject);
-
- // Execute a regular expression on the subject, starting from index.
- // If matching succeeds, return the number of matches. This can be larger
- // than one in the case of global regular expressions.
- // The captures and subcaptures are stored into the registers vector.
- // If matching fails, returns RE_FAILURE.
- // If execution fails, sets a pending exception and returns RE_EXCEPTION.
- static int IrregexpExecRaw(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- int32_t* output,
- int output_size);
-
- // Execute an Irregexp bytecode pattern.
- // On a successful match, the result is a JSArray containing
- // captured positions. On a failure, the result is the null value.
- // Returns an empty handle in case of an exception.
- static Handle<Object> IrregexpExec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
-
- // Set last match info. If match is NULL, then setting captures is omitted.
- static Handle<JSArray> SetLastMatchInfo(Handle<JSArray> last_match_info,
- Handle<String> subject,
- int capture_count,
- int32_t* match);
-
-
- class GlobalCache {
- public:
- GlobalCache(Handle<JSRegExp> regexp,
- Handle<String> subject,
- bool is_global,
- Isolate* isolate);
-
- INLINE(~GlobalCache());
-
- // Fetch the next entry in the cache for global regexp match results.
- // This does not set the last match info. Upon failure, NULL is returned.
- // The cause can be checked with Result(). The previous
- // result is still in available in memory when a failure happens.
- INLINE(int32_t* FetchNext());
-
- INLINE(int32_t* LastSuccessfulMatch());
-
- INLINE(bool HasException()) { return num_matches_ < 0; }
-
- private:
- int num_matches_;
- int max_matches_;
- int current_match_index_;
- int registers_per_match_;
- // Pointer to the last set of captures.
- int32_t* register_array_;
- int register_array_size_;
- Handle<JSRegExp> regexp_;
- Handle<String> subject_;
- };
-
-
- // Array index in the lastMatchInfo array.
- static const int kLastCaptureCount = 0;
- static const int kLastSubject = 1;
- static const int kLastInput = 2;
- static const int kFirstCapture = 3;
- static const int kLastMatchOverhead = 3;
-
- // Direct offset into the lastMatchInfo array.
- static const int kLastCaptureCountOffset =
- FixedArray::kHeaderSize + kLastCaptureCount * kPointerSize;
- static const int kLastSubjectOffset =
- FixedArray::kHeaderSize + kLastSubject * kPointerSize;
- static const int kLastInputOffset =
- FixedArray::kHeaderSize + kLastInput * kPointerSize;
- static const int kFirstCaptureOffset =
- FixedArray::kHeaderSize + kFirstCapture * kPointerSize;
-
- // Used to access the lastMatchInfo array.
- static int GetCapture(FixedArray* array, int index) {
- return Smi::cast(array->get(index + kFirstCapture))->value();
- }
-
- static void SetLastCaptureCount(FixedArray* array, int to) {
- array->set(kLastCaptureCount, Smi::FromInt(to));
- }
-
- static void SetLastSubject(FixedArray* array, String* to) {
- array->set(kLastSubject, to);
- }
-
- static void SetLastInput(FixedArray* array, String* to) {
- array->set(kLastInput, to);
- }
-
- static void SetCapture(FixedArray* array, int index, int to) {
- array->set(index + kFirstCapture, Smi::FromInt(to));
- }
-
- static int GetLastCaptureCount(FixedArray* array) {
- return Smi::cast(array->get(kLastCaptureCount))->value();
- }
-
- // For acting on the JSRegExp data FixedArray.
- static int IrregexpMaxRegisterCount(FixedArray* re);
- static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
- static int IrregexpNumberOfCaptures(FixedArray* re);
- static int IrregexpNumberOfRegisters(FixedArray* re);
- static ByteArray* IrregexpByteCode(FixedArray* re, bool is_ascii);
- static Code* IrregexpNativeCode(FixedArray* re, bool is_ascii);
-
- // Limit the space regexps take up on the heap. In order to limit this we
- // would like to keep track of the amount of regexp code on the heap. This
- // is not tracked, however. As a conservative approximation we track the
- // total regexp code compiled including code that has subsequently been freed
- // and the total executable memory at any point.
- static const int kRegExpExecutableMemoryLimit = 16 * MB;
- static const int kRegWxpCompiledLimit = 1 * MB;
-
- private:
- static bool CompileIrregexp(
- Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
- static inline bool EnsureCompiledIrregexp(
- Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
-};
-
-
-// Represents the location of one element relative to the intersection of
-// two sets. Corresponds to the four areas of a Venn diagram.
-enum ElementInSetsRelation {
- kInsideNone = 0,
- kInsideFirst = 1,
- kInsideSecond = 2,
- kInsideBoth = 3
-};
-
-
-// Represents code units in the range from from_ to to_, both ends are
-// inclusive.
-class CharacterRange {
- public:
- CharacterRange() : from_(0), to_(0) { }
- // For compatibility with the CHECK_OK macro
- CharacterRange(void* null) { ASSERT_EQ(NULL, null); } //NOLINT
- CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
- static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
- Zone* zone);
- static Vector<const int> GetWordBounds();
- static inline CharacterRange Singleton(uc16 value) {
- return CharacterRange(value, value);
- }
- static inline CharacterRange Range(uc16 from, uc16 to) {
- ASSERT(from <= to);
- return CharacterRange(from, to);
- }
- static inline CharacterRange Everything() {
- return CharacterRange(0, 0xFFFF);
- }
- bool Contains(uc16 i) { return from_ <= i && i <= to_; }
- uc16 from() const { return from_; }
- void set_from(uc16 value) { from_ = value; }
- uc16 to() const { return to_; }
- void set_to(uc16 value) { to_ = value; }
- bool is_valid() { return from_ <= to_; }
- bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
- bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii,
- Zone* zone);
- static void Split(ZoneList<CharacterRange>* base,
- Vector<const int> overlay,
- ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded,
- Zone* zone);
- // Whether a range list is in canonical form: Ranges ordered by from value,
- // and ranges non-overlapping and non-adjacent.
- static bool IsCanonical(ZoneList<CharacterRange>* ranges);
- // Convert range list to canonical form. The characters covered by the ranges
- // will still be the same, but no character is in more than one range, and
- // adjacent ranges are merged. The resulting list may be shorter than the
- // original, but cannot be longer.
- static void Canonicalize(ZoneList<CharacterRange>* ranges);
- // Negate the contents of a character range in canonical form.
- static void Negate(ZoneList<CharacterRange>* src,
- ZoneList<CharacterRange>* dst,
- Zone* zone);
- static const int kStartMarker = (1 << 24);
- static const int kPayloadMask = (1 << 24) - 1;
-
- private:
- uc16 from_;
- uc16 to_;
-};
-
-
-// A set of unsigned integers that behaves especially well on small
-// integers (< 32). May do zone-allocation.
-class OutSet: public ZoneObject {
- public:
- OutSet() : first_(0), remaining_(NULL), successors_(NULL) { }
- OutSet* Extend(unsigned value, Zone* zone);
- bool Get(unsigned value);
- static const unsigned kFirstLimit = 32;
-
- private:
- // Destructively set a value in this set. In most cases you want
- // to use Extend instead to ensure that only one instance exists
- // that contains the same values.
- void Set(unsigned value, Zone* zone);
-
- // The successors are a list of sets that contain the same values
- // as this set and the one more value that is not present in this
- // set.
- ZoneList<OutSet*>* successors(Zone* zone) { return successors_; }
-
- OutSet(uint32_t first, ZoneList<unsigned>* remaining)
- : first_(first), remaining_(remaining), successors_(NULL) { }
- uint32_t first_;
- ZoneList<unsigned>* remaining_;
- ZoneList<OutSet*>* successors_;
- friend class Trace;
-};
-
-
-// A mapping from integers, specified as ranges, to a set of integers.
-// Used for mapping character ranges to choices.
-class DispatchTable : public ZoneObject {
- public:
- explicit DispatchTable(Zone* zone) : tree_(zone) { }
-
- class Entry {
- public:
- Entry() : from_(0), to_(0), out_set_(NULL) { }
- Entry(uc16 from, uc16 to, OutSet* out_set)
- : from_(from), to_(to), out_set_(out_set) { }
- uc16 from() { return from_; }
- uc16 to() { return to_; }
- void set_to(uc16 value) { to_ = value; }
- void AddValue(int value, Zone* zone) {
- out_set_ = out_set_->Extend(value, zone);
- }
- OutSet* out_set() { return out_set_; }
- private:
- uc16 from_;
- uc16 to_;
- OutSet* out_set_;
- };
-
- class Config {
- public:
- typedef uc16 Key;
- typedef Entry Value;
- static const uc16 kNoKey;
- static const Entry NoValue() { return Value(); }
- static inline int Compare(uc16 a, uc16 b) {
- if (a == b)
- return 0;
- else if (a < b)
- return -1;
- else
- return 1;
- }
- };
-
- void AddRange(CharacterRange range, int value, Zone* zone);
- OutSet* Get(uc16 value);
- void Dump();
-
- template <typename Callback>
- void ForEach(Callback* callback) {
- return tree()->ForEach(callback);
- }
-
- private:
- // There can't be a static empty set since it allocates its
- // successors in a zone and caches them.
- OutSet* empty() { return &empty_; }
- OutSet empty_;
- ZoneSplayTree<Config>* tree() { return &tree_; }
- ZoneSplayTree<Config> tree_;
-};
-
-
-#define FOR_EACH_NODE_TYPE(VISIT) \
- VISIT(End) \
- VISIT(Action) \
- VISIT(Choice) \
- VISIT(BackReference) \
- VISIT(Assertion) \
- VISIT(Text)
-
-
-#define FOR_EACH_REG_EXP_TREE_TYPE(VISIT) \
- VISIT(Disjunction) \
- VISIT(Alternative) \
- VISIT(Assertion) \
- VISIT(CharacterClass) \
- VISIT(Atom) \
- VISIT(Quantifier) \
- VISIT(Capture) \
- VISIT(Lookahead) \
- VISIT(BackReference) \
- VISIT(Empty) \
- VISIT(Text)
-
-
-#define FORWARD_DECLARE(Name) class RegExp##Name;
-FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
-#undef FORWARD_DECLARE
-
-
-class TextElement {
- public:
- enum Type {UNINITIALIZED, ATOM, CHAR_CLASS};
- TextElement() : type(UNINITIALIZED) { }
- explicit TextElement(Type t) : type(t), cp_offset(-1) { }
- static TextElement Atom(RegExpAtom* atom);
- static TextElement CharClass(RegExpCharacterClass* char_class);
- int length();
- Type type;
- union {
- RegExpAtom* u_atom;
- RegExpCharacterClass* u_char_class;
- } data;
- int cp_offset;
-};
-
-
-class Trace;
-
-
-struct NodeInfo {
- NodeInfo()
- : being_analyzed(false),
- been_analyzed(false),
- follows_word_interest(false),
- follows_newline_interest(false),
- follows_start_interest(false),
- at_end(false),
- visited(false),
- replacement_calculated(false) { }
-
- // Returns true if the interests and assumptions of this node
- // matches the given one.
- bool Matches(NodeInfo* that) {
- return (at_end == that->at_end) &&
- (follows_word_interest == that->follows_word_interest) &&
- (follows_newline_interest == that->follows_newline_interest) &&
- (follows_start_interest == that->follows_start_interest);
- }
-
- // Updates the interests of this node given the interests of the
- // node preceding it.
- void AddFromPreceding(NodeInfo* that) {
- at_end |= that->at_end;
- follows_word_interest |= that->follows_word_interest;
- follows_newline_interest |= that->follows_newline_interest;
- follows_start_interest |= that->follows_start_interest;
- }
-
- bool HasLookbehind() {
- return follows_word_interest ||
- follows_newline_interest ||
- follows_start_interest;
- }
-
- // Sets the interests of this node to include the interests of the
- // following node.
- void AddFromFollowing(NodeInfo* that) {
- follows_word_interest |= that->follows_word_interest;
- follows_newline_interest |= that->follows_newline_interest;
- follows_start_interest |= that->follows_start_interest;
- }
-
- void ResetCompilationState() {
- being_analyzed = false;
- been_analyzed = false;
- }
-
- bool being_analyzed: 1;
- bool been_analyzed: 1;
-
- // These bits are set of this node has to know what the preceding
- // character was.
- bool follows_word_interest: 1;
- bool follows_newline_interest: 1;
- bool follows_start_interest: 1;
-
- bool at_end: 1;
- bool visited: 1;
- bool replacement_calculated: 1;
-};
-
-
-// Details of a quick mask-compare check that can look ahead in the
-// input stream.
-class QuickCheckDetails {
- public:
- QuickCheckDetails()
- : characters_(0),
- mask_(0),
- value_(0),
- cannot_match_(false) { }
- explicit QuickCheckDetails(int characters)
- : characters_(characters),
- mask_(0),
- value_(0),
- cannot_match_(false) { }
- bool Rationalize(bool ascii);
- // Merge in the information from another branch of an alternation.
- void Merge(QuickCheckDetails* other, int from_index);
- // Advance the current position by some amount.
- void Advance(int by, bool ascii);
- void Clear();
- bool cannot_match() { return cannot_match_; }
- void set_cannot_match() { cannot_match_ = true; }
- struct Position {
- Position() : mask(0), value(0), determines_perfectly(false) { }
- uc16 mask;
- uc16 value;
- bool determines_perfectly;
- };
- int characters() { return characters_; }
- void set_characters(int characters) { characters_ = characters; }
- Position* positions(int index) {
- ASSERT(index >= 0);
- ASSERT(index < characters_);
- return positions_ + index;
- }
- uint32_t mask() { return mask_; }
- uint32_t value() { return value_; }
-
- private:
- // How many characters do we have quick check information from. This is
- // the same for all branches of a choice node.
- int characters_;
- Position positions_[4];
- // These values are the condensate of the above array after Rationalize().
- uint32_t mask_;
- uint32_t value_;
- // If set to true, there is no way this quick check can match at all.
- // E.g., if it requires to be at the start of the input, and isn't.
- bool cannot_match_;
-};
-
-
-extern int kUninitializedRegExpNodePlaceHolder;
-
-
-class RegExpNode: public ZoneObject {
- public:
- explicit RegExpNode(Zone* zone)
- : replacement_(NULL), trace_count_(0), zone_(zone) {
- bm_info_[0] = bm_info_[1] = NULL;
- }
- virtual ~RegExpNode();
- virtual void Accept(NodeVisitor* visitor) = 0;
- // Generates a goto to this node or actually generates the code at this point.
- virtual void Emit(RegExpCompiler* compiler, Trace* trace) = 0;
- // How many characters must this node consume at a minimum in order to
- // succeed. If we have found at least 'still_to_find' characters that
- // must be consumed there is no need to ask any following nodes whether
- // they are sure to eat any more characters. The not_at_start argument is
- // used to indicate that we know we are not at the start of the input. In
- // this case anchored branches will always fail and can be ignored when
- // determining how many characters are consumed on success.
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start) = 0;
- // Emits some quick code that checks whether the preloaded characters match.
- // Falls through on certain failure, jumps to the label on possible success.
- // If the node cannot make a quick check it does nothing and returns false.
- bool EmitQuickCheck(RegExpCompiler* compiler,
- Trace* trace,
- bool preload_has_checked_bounds,
- Label* on_possible_success,
- QuickCheckDetails* details_return,
- bool fall_through_on_failure);
- // For a given number of characters this returns a mask and a value. The
- // next n characters are anded with the mask and compared with the value.
- // A comparison failure indicates the node cannot match the next n characters.
- // A comparison success indicates the node may match.
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) = 0;
- static const int kNodeIsTooComplexForGreedyLoops = -1;
- virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
- // Only returns the successor for a text node of length 1 that matches any
- // character and that has no guards on it.
- virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler) {
- return NULL;
- }
-
- // Collects information on the possible code units (mod 128) that can match if
- // we look forward. This is used for a Boyer-Moore-like string searching
- // implementation. TODO(erikcorry): This should share more code with
- // EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
- // the number of nodes we are willing to look at in order to create this data.
- static const int kRecursionBudget = 200;
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- UNREACHABLE();
- }
-
- // If we know that the input is ASCII then there are some nodes that can
- // never match. This method returns a node that can be substituted for
- // itself, or NULL if the node can never match.
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case) { return this; }
- // Helper for FilterASCII.
- RegExpNode* replacement() {
- ASSERT(info()->replacement_calculated);
- return replacement_;
- }
- RegExpNode* set_replacement(RegExpNode* replacement) {
- info()->replacement_calculated = true;
- replacement_ = replacement;
- return replacement; // For convenience.
- }
-
- // We want to avoid recalculating the lookahead info, so we store it on the
- // node. Only info that is for this node is stored. We can tell that the
- // info is for this node when offset == 0, so the information is calculated
- // relative to this node.
- void SaveBMInfo(BoyerMooreLookahead* bm, bool not_at_start, int offset) {
- if (offset == 0) set_bm_info(not_at_start, bm);
- }
-
- Label* label() { return &label_; }
- // If non-generic code is generated for a node (i.e. the node is not at the
- // start of the trace) then it cannot be reused. This variable sets a limit
- // on how often we allow that to happen before we insist on starting a new
- // trace and generating generic code for a node that can be reused by flushing
- // the deferred actions in the current trace and generating a goto.
- static const int kMaxCopiesCodeGenerated = 10;
-
- NodeInfo* info() { return &info_; }
-
- BoyerMooreLookahead* bm_info(bool not_at_start) {
- return bm_info_[not_at_start ? 1 : 0];
- }
-
- Zone* zone() const { return zone_; }
-
- protected:
- enum LimitResult { DONE, CONTINUE };
- RegExpNode* replacement_;
-
- LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
-
- void set_bm_info(bool not_at_start, BoyerMooreLookahead* bm) {
- bm_info_[not_at_start ? 1 : 0] = bm;
- }
-
- private:
- static const int kFirstCharBudget = 10;
- Label label_;
- NodeInfo info_;
- // This variable keeps track of how many times code has been generated for
- // this node (in different traces). We don't keep track of where the
- // generated code is located unless the code is generated at the start of
- // a trace, in which case it is generic and can be reused by flushing the
- // deferred operations in the current trace and generating a goto.
- int trace_count_;
- BoyerMooreLookahead* bm_info_[2];
-
- Zone* zone_;
-};
-
-
-// A simple closed interval.
-class Interval {
- public:
- Interval() : from_(kNone), to_(kNone) { }
- Interval(int from, int to) : from_(from), to_(to) { }
- Interval Union(Interval that) {
- if (that.from_ == kNone)
- return *this;
- else if (from_ == kNone)
- return that;
- else
- return Interval(Min(from_, that.from_), Max(to_, that.to_));
- }
- bool Contains(int value) {
- return (from_ <= value) && (value <= to_);
- }
- bool is_empty() { return from_ == kNone; }
- int from() const { return from_; }
- int to() const { return to_; }
- static Interval Empty() { return Interval(); }
- static const int kNone = -1;
- private:
- int from_;
- int to_;
-};
-
-
-class SeqRegExpNode: public RegExpNode {
- public:
- explicit SeqRegExpNode(RegExpNode* on_success)
- : RegExpNode(on_success->zone()), on_success_(on_success) { }
- RegExpNode* on_success() { return on_success_; }
- void set_on_success(RegExpNode* node) { on_success_ = node; }
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- on_success_->FillInBMInfo(offset, budget - 1, bm, not_at_start);
- if (offset == 0) set_bm_info(not_at_start, bm);
- }
-
- protected:
- RegExpNode* FilterSuccessor(int depth, bool ignore_case);
-
- private:
- RegExpNode* on_success_;
-};
-
-
-class ActionNode: public SeqRegExpNode {
- public:
- enum Type {
- SET_REGISTER,
- INCREMENT_REGISTER,
- STORE_POSITION,
- BEGIN_SUBMATCH,
- POSITIVE_SUBMATCH_SUCCESS,
- EMPTY_MATCH_CHECK,
- CLEAR_CAPTURES
- };
- static ActionNode* SetRegister(int reg, int val, RegExpNode* on_success);
- static ActionNode* IncrementRegister(int reg, RegExpNode* on_success);
- static ActionNode* StorePosition(int reg,
- bool is_capture,
- RegExpNode* on_success);
- static ActionNode* ClearCaptures(Interval range, RegExpNode* on_success);
- static ActionNode* BeginSubmatch(int stack_pointer_reg,
- int position_reg,
- RegExpNode* on_success);
- static ActionNode* PositiveSubmatchSuccess(int stack_pointer_reg,
- int restore_reg,
- int clear_capture_count,
- int clear_capture_from,
- RegExpNode* on_success);
- static ActionNode* EmptyMatchCheck(int start_register,
- int repetition_register,
- int repetition_limit,
- RegExpNode* on_success);
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start) {
- return on_success()->GetQuickCheckDetails(
- details, compiler, filled_in, not_at_start);
- }
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
- Type type() { return type_; }
- // TODO(erikcorry): We should allow some action nodes in greedy loops.
- virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
-
- private:
- union {
- struct {
- int reg;
- int value;
- } u_store_register;
- struct {
- int reg;
- } u_increment_register;
- struct {
- int reg;
- bool is_capture;
- } u_position_register;
- struct {
- int stack_pointer_register;
- int current_position_register;
- int clear_register_count;
- int clear_register_from;
- } u_submatch;
- struct {
- int start_register;
- int repetition_register;
- int repetition_limit;
- } u_empty_match_check;
- struct {
- int range_from;
- int range_to;
- } u_clear_captures;
- } data_;
- ActionNode(Type type, RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- type_(type) { }
- Type type_;
- friend class DotPrinter;
-};
-
-
-class TextNode: public SeqRegExpNode {
- public:
- TextNode(ZoneList<TextElement>* elms,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- elms_(elms) { }
- TextNode(RegExpCharacterClass* that,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- elms_(new(zone()) ZoneList<TextElement>(1, zone())) {
- elms_->Add(TextElement::CharClass(that), zone());
- }
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- ZoneList<TextElement>* elements() { return elms_; }
- void MakeCaseIndependent(bool is_ascii);
- virtual int GreedyLoopTextLength();
- virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
- void CalculateOffsets();
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
-
- private:
- enum TextEmitPassType {
- NON_ASCII_MATCH, // Check for characters that can't match.
- SIMPLE_CHARACTER_MATCH, // Case-dependent single character check.
- NON_LETTER_CHARACTER_MATCH, // Check characters that have no case equivs.
- CASE_CHARACTER_MATCH, // Case-independent single character check.
- CHARACTER_CLASS_MATCH // Character class.
- };
- static bool SkipPass(int pass, bool ignore_case);
- static const int kFirstRealPass = SIMPLE_CHARACTER_MATCH;
- static const int kLastPass = CHARACTER_CLASS_MATCH;
- void TextEmitPass(RegExpCompiler* compiler,
- TextEmitPassType pass,
- bool preloaded,
- Trace* trace,
- bool first_element_checked,
- int* checked_up_to);
- int Length();
- ZoneList<TextElement>* elms_;
-};
-
-
-class AssertionNode: public SeqRegExpNode {
- public:
- enum AssertionNodeType {
- AT_END,
- AT_START,
- AT_BOUNDARY,
- AT_NON_BOUNDARY,
- AFTER_NEWLINE
- };
- static AssertionNode* AtEnd(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_END, on_success);
- }
- static AssertionNode* AtStart(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_START, on_success);
- }
- static AssertionNode* AtBoundary(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_BOUNDARY, on_success);
- }
- static AssertionNode* AtNonBoundary(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_NON_BOUNDARY, on_success);
- }
- static AssertionNode* AfterNewline(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AFTER_NEWLINE, on_success);
- }
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
- AssertionNodeType type() { return type_; }
- void set_type(AssertionNodeType type) { type_ = type; }
-
- private:
- void EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace);
- enum IfPrevious { kIsNonWord, kIsWord };
- void BacktrackIfPrevious(RegExpCompiler* compiler,
- Trace* trace,
- IfPrevious backtrack_if_previous);
- AssertionNode(AssertionNodeType t, RegExpNode* on_success)
- : SeqRegExpNode(on_success), type_(t) { }
- AssertionNodeType type_;
-};
-
-
-class BackReferenceNode: public SeqRegExpNode {
- public:
- BackReferenceNode(int start_reg,
- int end_reg,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- start_reg_(start_reg),
- end_reg_(end_reg) { }
- virtual void Accept(NodeVisitor* visitor);
- int start_register() { return start_reg_; }
- int end_register() { return end_reg_; }
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- return;
- }
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
-
- private:
- int start_reg_;
- int end_reg_;
-};
-
-
-class EndNode: public RegExpNode {
- public:
- enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
- explicit EndNode(Action action, Zone* zone)
- : RegExpNode(zone), action_(action) { }
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) { return 0; }
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- // Returning 0 from EatsAtLeast should ensure we never get here.
- UNREACHABLE();
- }
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- // Returning 0 from EatsAtLeast should ensure we never get here.
- UNREACHABLE();
- }
-
- private:
- Action action_;
-};
-
-
-class NegativeSubmatchSuccess: public EndNode {
- public:
- NegativeSubmatchSuccess(int stack_pointer_reg,
- int position_reg,
- int clear_capture_count,
- int clear_capture_start,
- Zone* zone)
- : EndNode(NEGATIVE_SUBMATCH_SUCCESS, zone),
- stack_pointer_register_(stack_pointer_reg),
- current_position_register_(position_reg),
- clear_capture_count_(clear_capture_count),
- clear_capture_start_(clear_capture_start) { }
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
-
- private:
- int stack_pointer_register_;
- int current_position_register_;
- int clear_capture_count_;
- int clear_capture_start_;
-};
-
-
-class Guard: public ZoneObject {
- public:
- enum Relation { LT, GEQ };
- Guard(int reg, Relation op, int value)
- : reg_(reg),
- op_(op),
- value_(value) { }
- int reg() { return reg_; }
- Relation op() { return op_; }
- int value() { return value_; }
-
- private:
- int reg_;
- Relation op_;
- int value_;
-};
-
-
-class GuardedAlternative {
- public:
- explicit GuardedAlternative(RegExpNode* node) : node_(node), guards_(NULL) { }
- void AddGuard(Guard* guard, Zone* zone);
- RegExpNode* node() { return node_; }
- void set_node(RegExpNode* node) { node_ = node; }
- ZoneList<Guard*>* guards() { return guards_; }
-
- private:
- RegExpNode* node_;
- ZoneList<Guard*>* guards_;
-};
-
-
-class AlternativeGeneration;
-
-
-class ChoiceNode: public RegExpNode {
- public:
- explicit ChoiceNode(int expected_size, Zone* zone)
- : RegExpNode(zone),
- alternatives_(new(zone)
- ZoneList<GuardedAlternative>(expected_size, zone)),
- table_(NULL),
- not_at_start_(false),
- being_calculated_(false) { }
- virtual void Accept(NodeVisitor* visitor);
- void AddAlternative(GuardedAlternative node) {
- alternatives()->Add(node, zone());
- }
- ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
- DispatchTable* GetTable(bool ignore_case);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- int EatsAtLeastHelper(int still_to_find,
- int budget,
- RegExpNode* ignore_this_node,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
-
- bool being_calculated() { return being_calculated_; }
- bool not_at_start() { return not_at_start_; }
- void set_not_at_start() { not_at_start_ = true; }
- void set_being_calculated(bool b) { being_calculated_ = b; }
- virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
-
- protected:
- int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
- ZoneList<GuardedAlternative>* alternatives_;
-
- private:
- friend class DispatchTableConstructor;
- friend class Analysis;
- void GenerateGuard(RegExpMacroAssembler* macro_assembler,
- Guard* guard,
- Trace* trace);
- int CalculatePreloadCharacters(RegExpCompiler* compiler, int eats_at_least);
- void EmitOutOfLineContinuation(RegExpCompiler* compiler,
- Trace* trace,
- GuardedAlternative alternative,
- AlternativeGeneration* alt_gen,
- int preload_characters,
- bool next_expects_preload);
- DispatchTable* table_;
- // If true, this node is never checked at the start of the input.
- // Allows a new trace to start with at_start() set to false.
- bool not_at_start_;
- bool being_calculated_;
-};
-
-
-class NegativeLookaheadChoiceNode: public ChoiceNode {
- public:
- explicit NegativeLookaheadChoiceNode(GuardedAlternative this_must_fail,
- GuardedAlternative then_do_this,
- Zone* zone)
- : ChoiceNode(2, zone) {
- AddAlternative(this_must_fail);
- AddAlternative(then_do_this);
- }
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- alternatives_->at(1).node()->FillInBMInfo(
- offset, budget - 1, bm, not_at_start);
- if (offset == 0) set_bm_info(not_at_start, bm);
- }
- // For a negative lookahead we don't emit the quick check for the
- // alternative that is expected to fail. This is because quick check code
- // starts by loading enough characters for the alternative that takes fewest
- // characters, but on a negative lookahead the negative branch did not take
- // part in that calculation (EatsAtLeast) so the assumptions don't hold.
- virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
-};
-
-
-class LoopChoiceNode: public ChoiceNode {
- public:
- explicit LoopChoiceNode(bool body_can_be_zero_length, Zone* zone)
- : ChoiceNode(2, zone),
- loop_node_(NULL),
- continue_node_(NULL),
- body_can_be_zero_length_(body_can_be_zero_length) { }
- void AddLoopAlternative(GuardedAlternative alt);
- void AddContinueAlternative(GuardedAlternative alt);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
- RegExpNode* loop_node() { return loop_node_; }
- RegExpNode* continue_node() { return continue_node_; }
- bool body_can_be_zero_length() { return body_can_be_zero_length_; }
- virtual void Accept(NodeVisitor* visitor);
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
-
- private:
- // AddAlternative is made private for loop nodes because alternatives
- // should not be added freely, we need to keep track of which node
- // goes back to the node itself.
- void AddAlternative(GuardedAlternative node) {
- ChoiceNode::AddAlternative(node);
- }
-
- RegExpNode* loop_node_;
- RegExpNode* continue_node_;
- bool body_can_be_zero_length_;
-};
-
-
-// Improve the speed that we scan for an initial point where a non-anchored
-// regexp can match by using a Boyer-Moore-like table. This is done by
-// identifying non-greedy non-capturing loops in the nodes that eat any
-// character one at a time. For example in the middle of the regexp
-// /foo[\s\S]*?bar/ we find such a loop. There is also such a loop implicitly
-// inserted at the start of any non-anchored regexp.
-//
-// When we have found such a loop we look ahead in the nodes to find the set of
-// characters that can come at given distances. For example for the regexp
-// /.?foo/ we know that there are at least 3 characters ahead of us, and the
-// sets of characters that can occur are [any, [f, o], [o]]. We find a range in
-// the lookahead info where the set of characters is reasonably constrained. In
-// our example this is from index 1 to 2 (0 is not constrained). We can now
-// look 3 characters ahead and if we don't find one of [f, o] (the union of
-// [f, o] and [o]) then we can skip forwards by the range size (in this case 2).
-//
-// For Unicode input strings we do the same, but modulo 128.
-//
-// We also look at the first string fed to the regexp and use that to get a hint
-// of the character frequencies in the inputs. This affects the assessment of
-// whether the set of characters is 'reasonably constrained'.
-//
-// We also have another lookahead mechanism (called quick check in the code),
-// which uses a wide load of multiple characters followed by a mask and compare
-// to determine whether a match is possible at this point.
-enum ContainedInLattice {
- kNotYet = 0,
- kLatticeIn = 1,
- kLatticeOut = 2,
- kLatticeUnknown = 3 // Can also mean both in and out.
-};
-
-
-inline ContainedInLattice Combine(ContainedInLattice a, ContainedInLattice b) {
- return static_cast<ContainedInLattice>(a | b);
-}
-
-
-ContainedInLattice AddRange(ContainedInLattice a,
- const int* ranges,
- int ranges_size,
- Interval new_range);
-
-
-class BoyerMoorePositionInfo : public ZoneObject {
- public:
- explicit BoyerMoorePositionInfo(Zone* zone)
- : map_(new(zone) ZoneList<bool>(kMapSize, zone)),
- map_count_(0),
- w_(kNotYet),
- s_(kNotYet),
- d_(kNotYet),
- surrogate_(kNotYet) {
- for (int i = 0; i < kMapSize; i++) {
- map_->Add(false, zone);
- }
- }
-
- bool& at(int i) { return map_->at(i); }
-
- static const int kMapSize = 128;
- static const int kMask = kMapSize - 1;
-
- int map_count() const { return map_count_; }
-
- void Set(int character);
- void SetInterval(const Interval& interval);
- void SetAll();
- bool is_non_word() { return w_ == kLatticeOut; }
- bool is_word() { return w_ == kLatticeIn; }
-
- private:
- ZoneList<bool>* map_;
- int map_count_; // Number of set bits in the map.
- ContainedInLattice w_; // The \w character class.
- ContainedInLattice s_; // The \s character class.
- ContainedInLattice d_; // The \d character class.
- ContainedInLattice surrogate_; // Surrogate UTF-16 code units.
-};
-
-
-class BoyerMooreLookahead : public ZoneObject {
- public:
- BoyerMooreLookahead(int length, RegExpCompiler* compiler, Zone* zone);
-
- int length() { return length_; }
- int max_char() { return max_char_; }
- RegExpCompiler* compiler() { return compiler_; }
-
- int Count(int map_number) {
- return bitmaps_->at(map_number)->map_count();
- }
-
- BoyerMoorePositionInfo* at(int i) { return bitmaps_->at(i); }
-
- void Set(int map_number, int character) {
- if (character > max_char_) return;
- BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
- info->Set(character);
- }
-
- void SetInterval(int map_number, const Interval& interval) {
- if (interval.from() > max_char_) return;
- BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
- if (interval.to() > max_char_) {
- info->SetInterval(Interval(interval.from(), max_char_));
- } else {
- info->SetInterval(interval);
- }
- }
-
- void SetAll(int map_number) {
- bitmaps_->at(map_number)->SetAll();
- }
-
- void SetRest(int from_map) {
- for (int i = from_map; i < length_; i++) SetAll(i);
- }
- bool EmitSkipInstructions(RegExpMacroAssembler* masm);
-
- private:
- // This is the value obtained by EatsAtLeast. If we do not have at least this
- // many characters left in the sample string then the match is bound to fail.
- // Therefore it is OK to read a character this far ahead of the current match
- // point.
- int length_;
- RegExpCompiler* compiler_;
- // 0x7f for ASCII, 0xffff for UTF-16.
- int max_char_;
- ZoneList<BoyerMoorePositionInfo*>* bitmaps_;
-
- int GetSkipTable(int min_lookahead,
- int max_lookahead,
- Handle<ByteArray> boolean_skip_table);
- bool FindWorthwhileInterval(int* from, int* to);
- int FindBestInterval(
- int max_number_of_chars, int old_biggest_points, int* from, int* to);
-};
-
-
-// There are many ways to generate code for a node. This class encapsulates
-// the current way we should be generating. In other words it encapsulates
-// the current state of the code generator. The effect of this is that we
-// generate code for paths that the matcher can take through the regular
-// expression. A given node in the regexp can be code-generated several times
-// as it can be part of several traces. For example for the regexp:
-// /foo(bar|ip)baz/ the code to match baz will be generated twice, once as part
-// of the foo-bar-baz trace and once as part of the foo-ip-baz trace. The code
-// to match foo is generated only once (the traces have a common prefix). The
-// code to store the capture is deferred and generated (twice) after the places
-// where baz has been matched.
-class Trace {
- public:
- // A value for a property that is either known to be true, know to be false,
- // or not known.
- enum TriBool {
- UNKNOWN = -1, FALSE = 0, TRUE = 1
- };
-
- class DeferredAction {
- public:
- DeferredAction(ActionNode::Type type, int reg)
- : type_(type), reg_(reg), next_(NULL) { }
- DeferredAction* next() { return next_; }
- bool Mentions(int reg);
- int reg() { return reg_; }
- ActionNode::Type type() { return type_; }
- private:
- ActionNode::Type type_;
- int reg_;
- DeferredAction* next_;
- friend class Trace;
- };
-
- class DeferredCapture : public DeferredAction {
- public:
- DeferredCapture(int reg, bool is_capture, Trace* trace)
- : DeferredAction(ActionNode::STORE_POSITION, reg),
- cp_offset_(trace->cp_offset()),
- is_capture_(is_capture) { }
- int cp_offset() { return cp_offset_; }
- bool is_capture() { return is_capture_; }
- private:
- int cp_offset_;
- bool is_capture_;
- void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
- };
-
- class DeferredSetRegister : public DeferredAction {
- public:
- DeferredSetRegister(int reg, int value)
- : DeferredAction(ActionNode::SET_REGISTER, reg),
- value_(value) { }
- int value() { return value_; }
- private:
- int value_;
- };
-
- class DeferredClearCaptures : public DeferredAction {
- public:
- explicit DeferredClearCaptures(Interval range)
- : DeferredAction(ActionNode::CLEAR_CAPTURES, -1),
- range_(range) { }
- Interval range() { return range_; }
- private:
- Interval range_;
- };
-
- class DeferredIncrementRegister : public DeferredAction {
- public:
- explicit DeferredIncrementRegister(int reg)
- : DeferredAction(ActionNode::INCREMENT_REGISTER, reg) { }
- };
-
- Trace()
- : cp_offset_(0),
- actions_(NULL),
- backtrack_(NULL),
- stop_node_(NULL),
- loop_label_(NULL),
- characters_preloaded_(0),
- bound_checked_up_to_(0),
- flush_budget_(100),
- at_start_(UNKNOWN) { }
-
- // End the trace. This involves flushing the deferred actions in the trace
- // and pushing a backtrack location onto the backtrack stack. Once this is
- // done we can start a new trace or go to one that has already been
- // generated.
- void Flush(RegExpCompiler* compiler, RegExpNode* successor);
- int cp_offset() { return cp_offset_; }
- DeferredAction* actions() { return actions_; }
- // A trivial trace is one that has no deferred actions or other state that
- // affects the assumptions used when generating code. There is no recorded
- // backtrack location in a trivial trace, so with a trivial trace we will
- // generate code that, on a failure to match, gets the backtrack location
- // from the backtrack stack rather than using a direct jump instruction. We
- // always start code generation with a trivial trace and non-trivial traces
- // are created as we emit code for nodes or add to the list of deferred
- // actions in the trace. The location of the code generated for a node using
- // a trivial trace is recorded in a label in the node so that gotos can be
- // generated to that code.
- bool is_trivial() {
- return backtrack_ == NULL &&
- actions_ == NULL &&
- cp_offset_ == 0 &&
- characters_preloaded_ == 0 &&
- bound_checked_up_to_ == 0 &&
- quick_check_performed_.characters() == 0 &&
- at_start_ == UNKNOWN;
- }
- TriBool at_start() { return at_start_; }
- void set_at_start(bool at_start) { at_start_ = at_start ? TRUE : FALSE; }
- Label* backtrack() { return backtrack_; }
- Label* loop_label() { return loop_label_; }
- RegExpNode* stop_node() { return stop_node_; }
- int characters_preloaded() { return characters_preloaded_; }
- int bound_checked_up_to() { return bound_checked_up_to_; }
- int flush_budget() { return flush_budget_; }
- QuickCheckDetails* quick_check_performed() { return &quick_check_performed_; }
- bool mentions_reg(int reg);
- // Returns true if a deferred position store exists to the specified
- // register and stores the offset in the out-parameter. Otherwise
- // returns false.
- bool GetStoredPosition(int reg, int* cp_offset);
- // These set methods and AdvanceCurrentPositionInTrace should be used only on
- // new traces - the intention is that traces are immutable after creation.
- void add_action(DeferredAction* new_action) {
- ASSERT(new_action->next_ == NULL);
- new_action->next_ = actions_;
- actions_ = new_action;
- }
- void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
- void set_stop_node(RegExpNode* node) { stop_node_ = node; }
- void set_loop_label(Label* label) { loop_label_ = label; }
- void set_characters_preloaded(int count) { characters_preloaded_ = count; }
- void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
- void set_flush_budget(int to) { flush_budget_ = to; }
- void set_quick_check_performed(QuickCheckDetails* d) {
- quick_check_performed_ = *d;
- }
- void InvalidateCurrentCharacter();
- void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
-
- private:
- int FindAffectedRegisters(OutSet* affected_registers, Zone* zone);
- void PerformDeferredActions(RegExpMacroAssembler* macro,
- int max_register,
- OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear,
- Zone* zone);
- void RestoreAffectedRegisters(RegExpMacroAssembler* macro,
- int max_register,
- OutSet& registers_to_pop,
- OutSet& registers_to_clear);
- int cp_offset_;
- DeferredAction* actions_;
- Label* backtrack_;
- RegExpNode* stop_node_;
- Label* loop_label_;
- int characters_preloaded_;
- int bound_checked_up_to_;
- QuickCheckDetails quick_check_performed_;
- int flush_budget_;
- TriBool at_start_;
-};
-
-
-class NodeVisitor {
- public:
- virtual ~NodeVisitor() { }
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that) = 0;
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- virtual void VisitLoopChoice(LoopChoiceNode* that) { VisitChoice(that); }
-};
-
-
-// Node visitor used to add the start set of the alternatives to the
-// dispatch table of a choice node.
-class DispatchTableConstructor: public NodeVisitor {
- public:
- DispatchTableConstructor(DispatchTable* table, bool ignore_case,
- Zone* zone)
- : table_(table),
- choice_index_(-1),
- ignore_case_(ignore_case),
- zone_(zone) { }
-
- void BuildTable(ChoiceNode* node);
-
- void AddRange(CharacterRange range) {
- table()->AddRange(range, choice_index_, zone_);
- }
-
- void AddInverse(ZoneList<CharacterRange>* ranges);
-
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- DispatchTable* table() { return table_; }
- void set_choice_index(int value) { choice_index_ = value; }
-
- protected:
- DispatchTable* table_;
- int choice_index_;
- bool ignore_case_;
- Zone* zone_;
-};
-
-
-// Assertion propagation moves information about assertions such as
-// \b to the affected nodes. For instance, in /.\b./ information must
-// be propagated to the first '.' that whatever follows needs to know
-// if it matched a word or a non-word, and to the second '.' that it
-// has to check if it succeeds a word or non-word. In this case the
-// result will be something like:
-//
-// +-------+ +------------+
-// | . | | . |
-// +-------+ ---> +------------+
-// | word? | | check word |
-// +-------+ +------------+
-class Analysis: public NodeVisitor {
- public:
- Analysis(bool ignore_case, bool is_ascii)
- : ignore_case_(ignore_case),
- is_ascii_(is_ascii),
- error_message_(NULL) { }
- void EnsureAnalyzed(RegExpNode* node);
-
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- virtual void VisitLoopChoice(LoopChoiceNode* that);
-
- bool has_failed() { return error_message_ != NULL; }
- const char* error_message() {
- ASSERT(error_message_ != NULL);
- return error_message_;
- }
- void fail(const char* error_message) {
- error_message_ = error_message;
- }
-
- private:
- bool ignore_case_;
- bool is_ascii_;
- const char* error_message_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
-};
-
-
-struct RegExpCompileData {
- RegExpCompileData()
- : tree(NULL),
- node(NULL),
- simple(true),
- contains_anchor(false),
- capture_count(0) { }
- RegExpTree* tree;
- RegExpNode* node;
- bool simple;
- bool contains_anchor;
- Handle<String> error;
- int capture_count;
-};
-
-
-class RegExpEngine: public AllStatic {
- public:
- struct CompilationResult {
- explicit CompilationResult(const char* error_message)
- : error_message(error_message),
- code(HEAP->the_hole_value()),
- num_registers(0) {}
- CompilationResult(Object* code, int registers)
- : error_message(NULL),
- code(code),
- num_registers(registers) {}
- const char* error_message;
- Object* code;
- int num_registers;
- };
-
- static CompilationResult Compile(RegExpCompileData* input,
- bool ignore_case,
- bool global,
- bool multiline,
- Handle<String> pattern,
- Handle<String> sample_subject,
- bool is_ascii, Zone* zone);
-
- static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_JSREGEXP_H_
diff --git a/src/3rdparty/v8/src/lazy-instance.h b/src/3rdparty/v8/src/lazy-instance.h
deleted file mode 100644
index 9d68b8c..0000000
--- a/src/3rdparty/v8/src/lazy-instance.h
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The LazyInstance<Type, Traits> class manages a single instance of Type,
-// which will be lazily created on the first time it's accessed. This class is
-// useful for places you would normally use a function-level static, but you
-// need to have guaranteed thread-safety. The Type constructor will only ever
-// be called once, even if two threads are racing to create the object. Get()
-// and Pointer() will always return the same, completely initialized instance.
-//
-// LazyInstance is completely thread safe, assuming that you create it safely.
-// The class was designed to be POD initialized, so it shouldn't require a
-// static constructor. It really only makes sense to declare a LazyInstance as
-// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
-//
-// LazyInstance is similar to Singleton, except it does not have the singleton
-// property. You can have multiple LazyInstance's of the same type, and each
-// will manage a unique instance. It also preallocates the space for Type, as
-// to avoid allocating the Type instance on the heap. This may help with the
-// performance of creating the instance, and reducing heap fragmentation. This
-// requires that Type be a complete type so we can determine the size. See
-// notes for advanced users below for more explanations.
-//
-// Example usage:
-// static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER;
-// void SomeMethod() {
-// my_instance.Get().SomeMethod(); // MyClass::SomeMethod()
-//
-// MyClass* ptr = my_instance.Pointer();
-// ptr->DoDoDo(); // MyClass::DoDoDo
-// }
-//
-// Additionally you can override the way your instance is constructed by
-// providing your own trait:
-// Example usage:
-// struct MyCreateTrait {
-// static void Construct(MyClass* allocated_ptr) {
-// new (allocated_ptr) MyClass(/* extra parameters... */);
-// }
-// };
-// static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
-// LAZY_INSTANCE_INITIALIZER;
-//
-// WARNINGS:
-// - This implementation of LazyInstance is NOT THREAD-SAFE by default. See
-// ThreadSafeInitOnceTrait declared below for that.
-// - Lazy initialization comes with a cost. Make sure that you don't use it on
-// critical path. Consider adding your initialization code to a function
-// which is explicitly called once.
-//
-// Notes for advanced users:
-// LazyInstance can actually be used in two different ways:
-//
-// - "Static mode" which is the default mode since it is the most efficient
-// (no extra heap allocation). In this mode, the instance is statically
-// allocated (stored in the global data section at compile time).
-// The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER)
-// must be used to initialize static lazy instances.
-//
-// - "Dynamic mode". In this mode, the instance is dynamically allocated and
-// constructed (using new) by default. This mode is useful if you have to
-// deal with some code already allocating the instance for you (e.g.
-// OS::Mutex() which returns a new private OS-dependent subclass of Mutex).
-// The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
-// dynamic lazy instances.
-
-#ifndef V8_LAZY_INSTANCE_H_
-#define V8_LAZY_INSTANCE_H_
-
-#include "once.h"
-
-namespace v8 {
-namespace internal {
-
-#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, {} }
-#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
-
-// Default to static mode.
-#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-
-
-template <typename T>
-struct LeakyInstanceTrait {
- static void Destroy(T* /* instance */) {}
-};
-
-
-// Traits that define how an instance is allocated and accessed.
-
-// TODO(kalmard): __alignof__ is only defined for GCC > 4.2. Fix alignment issue
-// on MIPS with other compilers.
-#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
-#define LAZY_ALIGN(x) __attribute__((aligned(__alignof__(x))))
-#else
-#define LAZY_ALIGN(x)
-#endif
-
-template <typename T>
-struct StaticallyAllocatedInstanceTrait {
- typedef char StorageType[sizeof(T)] LAZY_ALIGN(T);
-
- static T* MutableInstance(StorageType* storage) {
- return reinterpret_cast<T*>(storage);
- }
-
- template <typename ConstructTrait>
- static void InitStorageUsingTrait(StorageType* storage) {
- ConstructTrait::Construct(MutableInstance(storage));
- }
-};
-
-#undef LAZY_ALIGN
-
-
-template <typename T>
-struct DynamicallyAllocatedInstanceTrait {
- typedef T* StorageType;
-
- static T* MutableInstance(StorageType* storage) {
- return *storage;
- }
-
- template <typename CreateTrait>
- static void InitStorageUsingTrait(StorageType* storage) {
- *storage = CreateTrait::Create();
- }
-};
-
-
-template <typename T>
-struct DefaultConstructTrait {
- // Constructs the provided object which was already allocated.
- static void Construct(T* allocated_ptr) {
- new(allocated_ptr) T();
- }
-};
-
-
-template <typename T>
-struct DefaultCreateTrait {
- static T* Create() {
- return new T();
- }
-};
-
-
-struct ThreadSafeInitOnceTrait {
- template <typename Function, typename Storage>
- static void Init(OnceType* once, Function function, Storage storage) {
- CallOnce(once, function, storage);
- }
-};
-
-
-// Initialization trait for users who don't care about thread-safety.
-struct SingleThreadInitOnceTrait {
- template <typename Function, typename Storage>
- static void Init(OnceType* once, Function function, Storage storage) {
- if (*once == ONCE_STATE_UNINITIALIZED) {
- function(storage);
- *once = ONCE_STATE_DONE;
- }
- }
-};
-
-
-// TODO(pliard): Handle instances destruction (using global destructors).
-template <typename T, typename AllocationTrait, typename CreateTrait,
- typename InitOnceTrait, typename DestroyTrait /* not used yet. */>
-struct LazyInstanceImpl {
- public:
- typedef typename AllocationTrait::StorageType StorageType;
-
- private:
- static void InitInstance(StorageType* storage) {
- AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
- }
-
- void Init() const {
- InitOnceTrait::Init(
- &once_,
- // Casts to void* are needed here to avoid breaking strict aliasing
- // rules.
- reinterpret_cast<void(*)(void*)>(&InitInstance), // NOLINT
- reinterpret_cast<void*>(&storage_));
- }
-
- public:
- T* Pointer() {
- Init();
- return AllocationTrait::MutableInstance(&storage_);
- }
-
- const T& Get() const {
- Init();
- return *AllocationTrait::MutableInstance(&storage_);
- }
-
- mutable OnceType once_;
- // Note that the previous field, OnceType, is an AtomicWord which guarantees
- // 4-byte alignment of the storage field below. If compiling with GCC (>4.2),
- // the LAZY_ALIGN macro above will guarantee correctness for any alignment.
- mutable StorageType storage_;
-};
-
-
-template <typename T,
- typename CreateTrait = DefaultConstructTrait<T>,
- typename InitOnceTrait = SingleThreadInitOnceTrait,
- typename DestroyTrait = LeakyInstanceTrait<T> >
-struct LazyStaticInstance {
- typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>,
- CreateTrait, InitOnceTrait, DestroyTrait> type;
-};
-
-
-template <typename T,
- typename CreateTrait = DefaultConstructTrait<T>,
- typename InitOnceTrait = SingleThreadInitOnceTrait,
- typename DestroyTrait = LeakyInstanceTrait<T> >
-struct LazyInstance {
- // A LazyInstance is a LazyStaticInstance.
- typedef typename LazyStaticInstance<T, CreateTrait, InitOnceTrait,
- DestroyTrait>::type type;
-};
-
-
-template <typename T,
- typename CreateTrait = DefaultCreateTrait<T>,
- typename InitOnceTrait = SingleThreadInitOnceTrait,
- typename DestroyTrait = LeakyInstanceTrait<T> >
-struct LazyDynamicInstance {
- typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>,
- CreateTrait, InitOnceTrait, DestroyTrait> type;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_LAZY_INSTANCE_H_
diff --git a/src/3rdparty/v8/src/list-inl.h b/src/3rdparty/v8/src/list-inl.h
deleted file mode 100644
index 7a84313..0000000
--- a/src/3rdparty/v8/src/list-inl.h
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIST_INL_H_
-#define V8_LIST_INL_H_
-
-#include "list.h"
-
-namespace v8 {
-namespace internal {
-
-
-template<typename T, class P>
-void List<T, P>::Add(const T& element, P alloc) {
- if (length_ < capacity_) {
- data_[length_++] = element;
- } else {
- List<T, P>::ResizeAdd(element, alloc);
- }
-}
-
-
-template<typename T, class P>
-void List<T, P>::AddAll(const List<T, P>& other, P alloc) {
- AddAll(other.ToVector(), alloc);
-}
-
-
-template<typename T, class P>
-void List<T, P>::AddAll(const Vector<T>& other, P alloc) {
- int result_length = length_ + other.length();
- if (capacity_ < result_length) Resize(result_length, alloc);
- for (int i = 0; i < other.length(); i++) {
- data_[length_ + i] = other.at(i);
- }
- length_ = result_length;
-}
-
-
-// Use two layers of inlining so that the non-inlined function can
-// use the same implementation as the inlined version.
-template<typename T, class P>
-void List<T, P>::ResizeAdd(const T& element, P alloc) {
- ResizeAddInternal(element, alloc);
-}
-
-
-template<typename T, class P>
-void List<T, P>::ResizeAddInternal(const T& element, P alloc) {
- ASSERT(length_ >= capacity_);
- // Grow the list capacity by 100%, but make sure to let it grow
- // even when the capacity is zero (possible initial case).
- int new_capacity = 1 + 2 * capacity_;
- // Since the element reference could be an element of the list, copy
- // it out of the old backing storage before resizing.
- T temp = element;
- Resize(new_capacity, alloc);
- data_[length_++] = temp;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Resize(int new_capacity, P alloc) {
- ASSERT_LE(length_, new_capacity);
- T* new_data = NewData(new_capacity, alloc);
- memcpy(new_data, data_, length_ * sizeof(T));
- List<T, P>::DeleteData(data_);
- data_ = new_data;
- capacity_ = new_capacity;
-}
-
-
-template<typename T, class P>
-Vector<T> List<T, P>::AddBlock(T value, int count, P alloc) {
- int start = length_;
- for (int i = 0; i < count; i++) Add(value, alloc);
- return Vector<T>(&data_[start], count);
-}
-
-
-template<typename T, class P>
-void List<T, P>::InsertAt(int index, const T& elm, P alloc) {
- ASSERT(index >= 0 && index <= length_);
- Add(elm, alloc);
- for (int i = length_ - 1; i > index; --i) {
- data_[i] = data_[i - 1];
- }
- data_[index] = elm;
-}
-
-
-template<typename T, class P>
-T List<T, P>::Remove(int i) {
- T element = at(i);
- length_--;
- while (i < length_) {
- data_[i] = data_[i + 1];
- i++;
- }
- return element;
-}
-
-
-template<typename T, class P>
-bool List<T, P>::RemoveElement(const T& elm) {
- for (int i = 0; i < length_; i++) {
- if (data_[i] == elm) {
- Remove(i);
- return true;
- }
- }
- return false;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Allocate(int length, P allocator) {
- DeleteData(data_);
- Initialize(length, allocator);
- length_ = length;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Clear() {
- DeleteData(data_);
- // We don't call Initialize(0) since that requires passing a Zone,
- // which we don't really need.
- data_ = NULL;
- capacity_ = 0;
- length_ = 0;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Rewind(int pos) {
- length_ = pos;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Trim(P alloc) {
- if (length_ < capacity_ / 4) {
- Resize(capacity_ / 2, alloc);
- }
-}
-
-
-template<typename T, class P>
-void List<T, P>::Iterate(void (*callback)(T* x)) {
- for (int i = 0; i < length_; i++) callback(&data_[i]);
-}
-
-
-template<typename T, class P>
-template<class Visitor>
-void List<T, P>::Iterate(Visitor* visitor) {
- for (int i = 0; i < length_; i++) visitor->Apply(&data_[i]);
-}
-
-
-template<typename T, class P>
-bool List<T, P>::Contains(const T& elm) const {
- for (int i = 0; i < length_; i++) {
- if (data_[i] == elm)
- return true;
- }
- return false;
-}
-
-
-template<typename T, class P>
-int List<T, P>::CountOccurrences(const T& elm, int start, int end) const {
- int result = 0;
- for (int i = start; i <= end; i++) {
- if (data_[i] == elm) ++result;
- }
- return result;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Sort(int (*cmp)(const T* x, const T* y)) {
- ToVector().Sort(cmp);
-#ifdef DEBUG
- for (int i = 1; i < length_; i++)
- ASSERT(cmp(&data_[i - 1], &data_[i]) <= 0);
-#endif
-}
-
-
-template<typename T, class P>
-void List<T, P>::Sort() {
- Sort(PointerValueCompare<T>);
-}
-
-
-template<typename T, class P>
-void List<T, P>::Initialize(int capacity, P allocator) {
- ASSERT(capacity >= 0);
- data_ = (capacity > 0) ? NewData(capacity, allocator) : NULL;
- capacity_ = capacity;
- length_ = 0;
-}
-
-
-template <typename T, typename P>
-int SortedListBSearch(const List<T>& list, P cmp) {
- int low = 0;
- int high = list.length() - 1;
- while (low <= high) {
- int mid = (low + high) / 2;
- T mid_elem = list[mid];
-
- if (cmp(&mid_elem) > 0) {
- high = mid - 1;
- continue;
- }
- if (cmp(&mid_elem) < 0) {
- low = mid + 1;
- continue;
- }
- // Found the elememt.
- return mid;
- }
- return -1;
-}
-
-
-template<typename T>
-class ElementCmp {
- public:
- explicit ElementCmp(T e) : elem_(e) {}
- int operator()(const T* other) {
- return PointerValueCompare(other, &elem_);
- }
- private:
- T elem_;
-};
-
-
-template <typename T>
-int SortedListBSearch(const List<T>& list, T elem) {
- return SortedListBSearch<T, ElementCmp<T> > (list, ElementCmp<T>(elem));
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_LIST_INL_H_
diff --git a/src/3rdparty/v8/src/list.h b/src/3rdparty/v8/src/list.h
deleted file mode 100644
index 43d982f..0000000
--- a/src/3rdparty/v8/src/list.h
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIST_H_
-#define V8_LIST_H_
-
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// The list is a template for very light-weight lists. We are not
-// using the STL because we want full control over space and speed of
-// the code. This implementation is based on code by Robert Griesemer
-// and Rob Pike.
-//
-// The list is parameterized by the type of its elements (T) and by an
-// allocation policy (P). The policy is used for allocating lists in
-// the C free store or the zone; see zone.h.
-
-// Forward defined as
-// template <typename T,
-// class AllocationPolicy = FreeStoreAllocationPolicy> class List;
-template <typename T, class AllocationPolicy>
-class List {
- public:
- explicit List(AllocationPolicy allocator = AllocationPolicy()) {
- Initialize(0, allocator);
- }
- INLINE(explicit List(int capacity,
- AllocationPolicy allocator = AllocationPolicy())) {
- Initialize(capacity, allocator);
- }
- INLINE(~List()) { DeleteData(data_); }
-
- // Deallocates memory used by the list and leaves the list in a consistent
- // empty state.
- void Free() {
- DeleteData(data_);
- Initialize(0);
- }
-
- INLINE(void* operator new(size_t size,
- AllocationPolicy allocator = AllocationPolicy())) {
- return allocator.New(static_cast<int>(size));
- }
- INLINE(void operator delete(void* p)) {
- AllocationPolicy::Delete(p);
- }
-
- // Please the MSVC compiler. We should never have to execute this.
- INLINE(void operator delete(void* p, AllocationPolicy allocator)) {
- UNREACHABLE();
- }
-
- // Returns a reference to the element at index i. This reference is
- // not safe to use after operations that can change the list's
- // backing store (e.g. Add).
- inline T& operator[](int i) const {
- ASSERT(0 <= i);
- ASSERT(i < length_);
- return data_[i];
- }
- inline T& at(int i) const { return operator[](i); }
- inline T& last() const { return at(length_ - 1); }
- inline T& first() const { return at(0); }
-
- INLINE(bool is_empty() const) { return length_ == 0; }
- INLINE(int length() const) { return length_; }
- INLINE(int capacity() const) { return capacity_; }
-
- Vector<T> ToVector() const { return Vector<T>(data_, length_); }
-
- Vector<const T> ToConstVector() { return Vector<const T>(data_, length_); }
-
- // Adds a copy of the given 'element' to the end of the list,
- // expanding the list if necessary.
- void Add(const T& element, AllocationPolicy allocator = AllocationPolicy());
-
- // Add all the elements from the argument list to this list.
- void AddAll(const List<T, AllocationPolicy>& other,
- AllocationPolicy allocator = AllocationPolicy());
-
- // Add all the elements from the vector to this list.
- void AddAll(const Vector<T>& other,
- AllocationPolicy allocator = AllocationPolicy());
-
- // Inserts the element at the specific index.
- void InsertAt(int index, const T& element,
- AllocationPolicy allocator = AllocationPolicy());
-
- // Added 'count' elements with the value 'value' and returns a
- // vector that allows access to the elements. The vector is valid
- // until the next change is made to this list.
- Vector<T> AddBlock(T value, int count,
- AllocationPolicy allocator = AllocationPolicy());
-
- // Removes the i'th element without deleting it even if T is a
- // pointer type; moves all elements above i "down". Returns the
- // removed element. This function's complexity is linear in the
- // size of the list.
- T Remove(int i);
-
- // Remove the given element from the list. Returns whether or not
- // the input is included in the list in the first place.
- bool RemoveElement(const T& elm);
-
- // Removes the last element without deleting it even if T is a
- // pointer type. Returns the removed element.
- INLINE(T RemoveLast()) { return Remove(length_ - 1); }
-
- // Deletes current list contents and allocates space for 'length' elements.
- INLINE(void Allocate(int length,
- AllocationPolicy allocator = AllocationPolicy()));
-
- // Clears the list by setting the length to zero. Even if T is a
- // pointer type, clearing the list doesn't delete the entries.
- INLINE(void Clear());
-
- // Drops all but the first 'pos' elements from the list.
- INLINE(void Rewind(int pos));
-
- // Drop the last 'count' elements from the list.
- INLINE(void RewindBy(int count)) { Rewind(length_ - count); }
-
- // Halve the capacity if fill level is less than a quarter.
- INLINE(void Trim(AllocationPolicy allocator = AllocationPolicy()));
-
- bool Contains(const T& elm) const;
- int CountOccurrences(const T& elm, int start, int end) const;
-
- // Iterate through all list entries, starting at index 0.
- void Iterate(void (*callback)(T* x));
- template<class Visitor>
- void Iterate(Visitor* visitor);
-
- // Sort all list entries (using QuickSort)
- void Sort(int (*cmp)(const T* x, const T* y));
- void Sort();
-
- INLINE(void Initialize(int capacity,
- AllocationPolicy allocator = AllocationPolicy()));
-
- private:
- T* data_;
- int capacity_;
- int length_;
-
- INLINE(T* NewData(int n, AllocationPolicy allocator)) {
- return static_cast<T*>(allocator.New(n * sizeof(T)));
- }
- INLINE(void DeleteData(T* data)) {
- AllocationPolicy::Delete(data);
- }
-
- // Increase the capacity of a full list, and add an element.
- // List must be full already.
- void ResizeAdd(const T& element, AllocationPolicy allocator);
-
- // Inlined implementation of ResizeAdd, shared by inlined and
- // non-inlined versions of ResizeAdd.
- void ResizeAddInternal(const T& element, AllocationPolicy allocator);
-
- // Resize the list.
- void Resize(int new_capacity, AllocationPolicy allocator);
-
- DISALLOW_COPY_AND_ASSIGN(List);
-};
-
-class Map;
-class Code;
-template<typename T> class Handle;
-typedef List<Map*> MapList;
-typedef List<Code*> CodeList;
-typedef List<Handle<Map> > MapHandleList;
-typedef List<Handle<Code> > CodeHandleList;
-
-// Perform binary search for an element in an already sorted
-// list. Returns the index of the element of -1 if it was not found.
-// |cmp| is a predicate that takes a pointer to an element of the List
-// and returns +1 if it is greater, -1 if it is less than the element
-// being searched.
-template <typename T, class P>
-int SortedListBSearch(const List<T>& list, P cmp);
-template <typename T>
-int SortedListBSearch(const List<T>& list, T elem);
-
-
-} } // namespace v8::internal
-
-
-#endif // V8_LIST_H_
diff --git a/src/3rdparty/v8/src/lithium-allocator-inl.h b/src/3rdparty/v8/src/lithium-allocator-inl.h
deleted file mode 100644
index a6d053a..0000000
--- a/src/3rdparty/v8/src/lithium-allocator-inl.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LITHIUM_ALLOCATOR_INL_H_
-#define V8_LITHIUM_ALLOCATOR_INL_H_
-
-#include "lithium-allocator.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#else
-#error "Unknown architecture."
-#endif
-
-namespace v8 {
-namespace internal {
-
-bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); }
-
-
-LInstruction* LAllocator::InstructionAt(int index) {
- return chunk_->instructions()->at(index);
-}
-
-
-LGap* LAllocator::GapAt(int index) {
- return chunk_->GetGapAt(index);
-}
-
-
-TempIterator::TempIterator(LInstruction* instr)
- : instr_(instr),
- limit_(instr->TempCount()),
- current_(0) {
- SkipUninteresting();
-}
-
-
-bool TempIterator::Done() { return current_ >= limit_; }
-
-
-LOperand* TempIterator::Current() {
- ASSERT(!Done());
- return instr_->TempAt(current_);
-}
-
-
-void TempIterator::SkipUninteresting() {
- while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
-}
-
-
-void TempIterator::Advance() {
- ++current_;
- SkipUninteresting();
-}
-
-
-InputIterator::InputIterator(LInstruction* instr)
- : instr_(instr),
- limit_(instr->InputCount()),
- current_(0) {
- SkipUninteresting();
-}
-
-
-bool InputIterator::Done() { return current_ >= limit_; }
-
-
-LOperand* InputIterator::Current() {
- ASSERT(!Done());
- ASSERT(instr_->InputAt(current_) != NULL);
- return instr_->InputAt(current_);
-}
-
-
-void InputIterator::Advance() {
- ++current_;
- SkipUninteresting();
-}
-
-
-void InputIterator::SkipUninteresting() {
- while (current_ < limit_) {
- LOperand* current = instr_->InputAt(current_);
- if (current != NULL && !current->IsConstantOperand()) break;
- ++current_;
- }
-}
-
-
-UseIterator::UseIterator(LInstruction* instr)
- : input_iterator_(instr), env_iterator_(instr->environment()) { }
-
-
-bool UseIterator::Done() {
- return input_iterator_.Done() && env_iterator_.Done();
-}
-
-
-LOperand* UseIterator::Current() {
- ASSERT(!Done());
- LOperand* result = input_iterator_.Done()
- ? env_iterator_.Current()
- : input_iterator_.Current();
- ASSERT(result != NULL);
- return result;
-}
-
-
-void UseIterator::Advance() {
- input_iterator_.Done()
- ? env_iterator_.Advance()
- : input_iterator_.Advance();
-}
-
-
-void LAllocator::SetLiveRangeAssignedRegister(
- LiveRange* range,
- int reg,
- RegisterKind register_kind,
- Zone* zone) {
- if (register_kind == DOUBLE_REGISTERS) {
- assigned_double_registers_->Add(reg);
- } else {
- assigned_registers_->Add(reg);
- }
- range->set_assigned_register(reg, register_kind, zone);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_LITHIUM_ALLOCATOR_INL_H_
diff --git a/src/3rdparty/v8/src/lithium-allocator.cc b/src/3rdparty/v8/src/lithium-allocator.cc
deleted file mode 100644
index dcfbead..0000000
--- a/src/3rdparty/v8/src/lithium-allocator.cc
+++ /dev/null
@@ -1,2133 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "lithium-allocator-inl.h"
-
-#include "hydrogen.h"
-#include "string-stream.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#else
-#error "Unknown architecture."
-#endif
-
-namespace v8 {
-namespace internal {
-
-static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
- return a.Value() < b.Value() ? a : b;
-}
-
-
-static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
- return a.Value() > b.Value() ? a : b;
-}
-
-
-UsePosition::UsePosition(LifetimePosition pos, LOperand* operand)
- : operand_(operand),
- hint_(NULL),
- pos_(pos),
- next_(NULL),
- requires_reg_(false),
- register_beneficial_(true) {
- if (operand_ != NULL && operand_->IsUnallocated()) {
- LUnallocated* unalloc = LUnallocated::cast(operand_);
- requires_reg_ = unalloc->HasRegisterPolicy();
- register_beneficial_ = !unalloc->HasAnyPolicy();
- }
- ASSERT(pos_.IsValid());
-}
-
-
-bool UsePosition::HasHint() const {
- return hint_ != NULL && !hint_->IsUnallocated();
-}
-
-
-bool UsePosition::RequiresRegister() const {
- return requires_reg_;
-}
-
-
-bool UsePosition::RegisterIsBeneficial() const {
- return register_beneficial_;
-}
-
-
-void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
- ASSERT(Contains(pos) && pos.Value() != start().Value());
- UseInterval* after = new(zone) UseInterval(pos, end_);
- after->next_ = next_;
- next_ = after;
- end_ = pos;
-}
-
-
-#ifdef DEBUG
-
-
-void LiveRange::Verify() const {
- UsePosition* cur = first_pos_;
- while (cur != NULL) {
- ASSERT(Start().Value() <= cur->pos().Value() &&
- cur->pos().Value() <= End().Value());
- cur = cur->next();
- }
-}
-
-
-bool LiveRange::HasOverlap(UseInterval* target) const {
- UseInterval* current_interval = first_interval_;
- while (current_interval != NULL) {
- // Intervals overlap if the start of one is contained in the other.
- if (current_interval->Contains(target->start()) ||
- target->Contains(current_interval->start())) {
- return true;
- }
- current_interval = current_interval->next();
- }
- return false;
-}
-
-
-#endif
-
-
-LiveRange::LiveRange(int id, Zone* zone)
- : id_(id),
- spilled_(false),
- is_double_(false),
- assigned_register_(kInvalidAssignment),
- last_interval_(NULL),
- first_interval_(NULL),
- first_pos_(NULL),
- parent_(NULL),
- next_(NULL),
- current_interval_(NULL),
- last_processed_use_(NULL),
- spill_operand_(new(zone) LOperand()),
- spill_start_index_(kMaxInt) { }
-
-
-void LiveRange::set_assigned_register(int reg,
- RegisterKind register_kind,
- Zone* zone) {
- ASSERT(!HasRegisterAssigned() && !IsSpilled());
- assigned_register_ = reg;
- is_double_ = (register_kind == DOUBLE_REGISTERS);
- ConvertOperands(zone);
-}
-
-
-void LiveRange::MakeSpilled(Zone* zone) {
- ASSERT(!IsSpilled());
- ASSERT(TopLevel()->HasAllocatedSpillOperand());
- spilled_ = true;
- assigned_register_ = kInvalidAssignment;
- ConvertOperands(zone);
-}
-
-
-bool LiveRange::HasAllocatedSpillOperand() const {
- ASSERT(spill_operand_ != NULL);
- return !spill_operand_->IsIgnored();
-}
-
-
-void LiveRange::SetSpillOperand(LOperand* operand) {
- ASSERT(!operand->IsUnallocated());
- ASSERT(spill_operand_ != NULL);
- ASSERT(spill_operand_->IsIgnored());
- spill_operand_->ConvertTo(operand->kind(), operand->index());
-}
-
-
-UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
- UsePosition* use_pos = last_processed_use_;
- if (use_pos == NULL) use_pos = first_pos();
- while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
- use_pos = use_pos->next();
- }
- last_processed_use_ = use_pos;
- return use_pos;
-}
-
-
-UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
- LifetimePosition start) {
- UsePosition* pos = NextUsePosition(start);
- while (pos != NULL && !pos->RegisterIsBeneficial()) {
- pos = pos->next();
- }
- return pos;
-}
-
-
-UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
- UsePosition* pos = NextUsePosition(start);
- while (pos != NULL && !pos->RequiresRegister()) {
- pos = pos->next();
- }
- return pos;
-}
-
-
-bool LiveRange::CanBeSpilled(LifetimePosition pos) {
- // TODO(kmillikin): Comment. Now.
- if (pos.Value() <= Start().Value() && HasRegisterAssigned()) return false;
-
- // We cannot spill a live range that has a use requiring a register
- // at the current or the immediate next position.
- UsePosition* use_pos = NextRegisterPosition(pos);
- if (use_pos == NULL) return true;
- return
- use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value();
-}
-
-
-UsePosition* LiveRange::FirstPosWithHint() const {
- UsePosition* pos = first_pos_;
- while (pos != NULL && !pos->HasHint()) pos = pos->next();
- return pos;
-}
-
-
-LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
- LOperand* op = NULL;
- if (HasRegisterAssigned()) {
- ASSERT(!IsSpilled());
- if (IsDouble()) {
- op = LDoubleRegister::Create(assigned_register(), zone);
- } else {
- op = LRegister::Create(assigned_register(), zone);
- }
- } else if (IsSpilled()) {
- ASSERT(!HasRegisterAssigned());
- op = TopLevel()->GetSpillOperand();
- ASSERT(!op->IsUnallocated());
- } else {
- LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE);
- unalloc->set_virtual_register(id_);
- op = unalloc;
- }
- return op;
-}
-
-
-UseInterval* LiveRange::FirstSearchIntervalForPosition(
- LifetimePosition position) const {
- if (current_interval_ == NULL) return first_interval_;
- if (current_interval_->start().Value() > position.Value()) {
- current_interval_ = NULL;
- return first_interval_;
- }
- return current_interval_;
-}
-
-
-void LiveRange::AdvanceLastProcessedMarker(
- UseInterval* to_start_of, LifetimePosition but_not_past) const {
- if (to_start_of == NULL) return;
- if (to_start_of->start().Value() > but_not_past.Value()) return;
- LifetimePosition start =
- current_interval_ == NULL ? LifetimePosition::Invalid()
- : current_interval_->start();
- if (to_start_of->start().Value() > start.Value()) {
- current_interval_ = to_start_of;
- }
-}
-
-
-void LiveRange::SplitAt(LifetimePosition position,
- LiveRange* result,
- Zone* zone) {
- ASSERT(Start().Value() < position.Value());
- ASSERT(result->IsEmpty());
- // Find the last interval that ends before the position. If the
- // position is contained in one of the intervals in the chain, we
- // split that interval and use the first part.
- UseInterval* current = FirstSearchIntervalForPosition(position);
-
- // If the split position coincides with the beginning of a use interval
- // we need to split use positons in a special way.
- bool split_at_start = false;
-
- if (current->start().Value() == position.Value()) {
- // When splitting at start we need to locate the previous use interval.
- current = first_interval_;
- }
-
- while (current != NULL) {
- if (current->Contains(position)) {
- current->SplitAt(position, zone);
- break;
- }
- UseInterval* next = current->next();
- if (next->start().Value() >= position.Value()) {
- split_at_start = (next->start().Value() == position.Value());
- break;
- }
- current = next;
- }
-
- // Partition original use intervals to the two live ranges.
- UseInterval* before = current;
- UseInterval* after = before->next();
- result->last_interval_ = (last_interval_ == before)
- ? after // Only interval in the range after split.
- : last_interval_; // Last interval of the original range.
- result->first_interval_ = after;
- last_interval_ = before;
-
- // Find the last use position before the split and the first use
- // position after it.
- UsePosition* use_after = first_pos_;
- UsePosition* use_before = NULL;
- if (split_at_start) {
- // The split position coincides with the beginning of a use interval (the
- // end of a lifetime hole). Use at this position should be attributed to
- // the split child because split child owns use interval covering it.
- while (use_after != NULL && use_after->pos().Value() < position.Value()) {
- use_before = use_after;
- use_after = use_after->next();
- }
- } else {
- while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
- use_before = use_after;
- use_after = use_after->next();
- }
- }
-
- // Partition original use positions to the two live ranges.
- if (use_before != NULL) {
- use_before->next_ = NULL;
- } else {
- first_pos_ = NULL;
- }
- result->first_pos_ = use_after;
-
- // Discard cached iteration state. It might be pointing
- // to the use that no longer belongs to this live range.
- last_processed_use_ = NULL;
- current_interval_ = NULL;
-
- // Link the new live range in the chain before any of the other
- // ranges linked from the range before the split.
- result->parent_ = (parent_ == NULL) ? this : parent_;
- result->next_ = next_;
- next_ = result;
-
-#ifdef DEBUG
- Verify();
- result->Verify();
-#endif
-}
-
-
-// This implements an ordering on live ranges so that they are ordered by their
-// start positions. This is needed for the correctness of the register
-// allocation algorithm. If two live ranges start at the same offset then there
-// is a tie breaker based on where the value is first used. This part of the
-// ordering is merely a heuristic.
-bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
- LifetimePosition start = Start();
- LifetimePosition other_start = other->Start();
- if (start.Value() == other_start.Value()) {
- UsePosition* pos = FirstPosWithHint();
- if (pos == NULL) return false;
- UsePosition* other_pos = other->first_pos();
- if (other_pos == NULL) return true;
- return pos->pos().Value() < other_pos->pos().Value();
- }
- return start.Value() < other_start.Value();
-}
-
-
-void LiveRange::ShortenTo(LifetimePosition start) {
- LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
- ASSERT(first_interval_ != NULL);
- ASSERT(first_interval_->start().Value() <= start.Value());
- ASSERT(start.Value() < first_interval_->end().Value());
- first_interval_->set_start(start);
-}
-
-
-void LiveRange::EnsureInterval(LifetimePosition start,
- LifetimePosition end,
- Zone* zone) {
- LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
- id_,
- start.Value(),
- end.Value());
- LifetimePosition new_end = end;
- while (first_interval_ != NULL &&
- first_interval_->start().Value() <= end.Value()) {
- if (first_interval_->end().Value() > end.Value()) {
- new_end = first_interval_->end();
- }
- first_interval_ = first_interval_->next();
- }
-
- UseInterval* new_interval = new(zone) UseInterval(start, new_end);
- new_interval->next_ = first_interval_;
- first_interval_ = new_interval;
- if (new_interval->next() == NULL) {
- last_interval_ = new_interval;
- }
-}
-
-
-void LiveRange::AddUseInterval(LifetimePosition start,
- LifetimePosition end,
- Zone* zone) {
- LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
- id_,
- start.Value(),
- end.Value());
- if (first_interval_ == NULL) {
- UseInterval* interval = new(zone) UseInterval(start, end);
- first_interval_ = interval;
- last_interval_ = interval;
- } else {
- if (end.Value() == first_interval_->start().Value()) {
- first_interval_->set_start(start);
- } else if (end.Value() < first_interval_->start().Value()) {
- UseInterval* interval = new(zone) UseInterval(start, end);
- interval->set_next(first_interval_);
- first_interval_ = interval;
- } else {
- // Order of instruction's processing (see ProcessInstructions) guarantees
- // that each new use interval either precedes or intersects with
- // last added interval.
- ASSERT(start.Value() < first_interval_->end().Value());
- first_interval_->start_ = Min(start, first_interval_->start_);
- first_interval_->end_ = Max(end, first_interval_->end_);
- }
- }
-}
-
-
-UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- Zone* zone) {
- LAllocator::TraceAlloc("Add to live range %d use position %d\n",
- id_,
- pos.Value());
- UsePosition* use_pos = new(zone) UsePosition(pos, operand);
- UsePosition* prev = NULL;
- UsePosition* current = first_pos_;
- while (current != NULL && current->pos().Value() < pos.Value()) {
- prev = current;
- current = current->next();
- }
-
- if (prev == NULL) {
- use_pos->set_next(first_pos_);
- first_pos_ = use_pos;
- } else {
- use_pos->next_ = prev->next_;
- prev->next_ = use_pos;
- }
-
- return use_pos;
-}
-
-
-void LiveRange::ConvertOperands(Zone* zone) {
- LOperand* op = CreateAssignedOperand(zone);
- UsePosition* use_pos = first_pos();
- while (use_pos != NULL) {
- ASSERT(Start().Value() <= use_pos->pos().Value() &&
- use_pos->pos().Value() <= End().Value());
-
- if (use_pos->HasOperand()) {
- ASSERT(op->IsRegister() || op->IsDoubleRegister() ||
- !use_pos->RequiresRegister());
- use_pos->operand()->ConvertTo(op->kind(), op->index());
- }
- use_pos = use_pos->next();
- }
-}
-
-
-bool LiveRange::CanCover(LifetimePosition position) const {
- if (IsEmpty()) return false;
- return Start().Value() <= position.Value() &&
- position.Value() < End().Value();
-}
-
-
-bool LiveRange::Covers(LifetimePosition position) {
- if (!CanCover(position)) return false;
- UseInterval* start_search = FirstSearchIntervalForPosition(position);
- for (UseInterval* interval = start_search;
- interval != NULL;
- interval = interval->next()) {
- ASSERT(interval->next() == NULL ||
- interval->next()->start().Value() >= interval->start().Value());
- AdvanceLastProcessedMarker(interval, position);
- if (interval->Contains(position)) return true;
- if (interval->start().Value() > position.Value()) return false;
- }
- return false;
-}
-
-
-LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
- UseInterval* b = other->first_interval();
- if (b == NULL) return LifetimePosition::Invalid();
- LifetimePosition advance_last_processed_up_to = b->start();
- UseInterval* a = FirstSearchIntervalForPosition(b->start());
- while (a != NULL && b != NULL) {
- if (a->start().Value() > other->End().Value()) break;
- if (b->start().Value() > End().Value()) break;
- LifetimePosition cur_intersection = a->Intersect(b);
- if (cur_intersection.IsValid()) {
- return cur_intersection;
- }
- if (a->start().Value() < b->start().Value()) {
- a = a->next();
- if (a == NULL || a->start().Value() > other->End().Value()) break;
- AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
- } else {
- b = b->next();
- }
- }
- return LifetimePosition::Invalid();
-}
-
-
-LAllocator::LAllocator(int num_values, HGraph* graph)
- : zone_(graph->zone()),
- chunk_(NULL),
- live_in_sets_(graph->blocks()->length(), zone_),
- live_ranges_(num_values * 2, zone_),
- fixed_live_ranges_(NULL),
- fixed_double_live_ranges_(NULL),
- unhandled_live_ranges_(num_values * 2, zone_),
- active_live_ranges_(8, zone_),
- inactive_live_ranges_(8, zone_),
- reusable_slots_(8, zone_),
- next_virtual_register_(num_values),
- first_artificial_register_(num_values),
- mode_(GENERAL_REGISTERS),
- num_registers_(-1),
- graph_(graph),
- has_osr_entry_(false),
- allocation_ok_(true) { }
-
-
-void LAllocator::InitializeLivenessAnalysis() {
- // Initialize the live_in sets for each block to NULL.
- int block_count = graph_->blocks()->length();
- live_in_sets_.Initialize(block_count, zone());
- live_in_sets_.AddBlock(NULL, block_count, zone());
-}
-
-
-BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
- // Compute live out for the given block, except not including backward
- // successor edges.
- BitVector* live_out = new(zone_) BitVector(next_virtual_register_, zone_);
-
- // Process all successor blocks.
- for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
- // Add values live on entry to the successor. Note the successor's
- // live_in will not be computed yet for backwards edges.
- HBasicBlock* successor = it.Current();
- BitVector* live_in = live_in_sets_[successor->block_id()];
- if (live_in != NULL) live_out->Union(*live_in);
-
- // All phi input operands corresponding to this successor edge are live
- // out from this block.
- int index = successor->PredecessorIndexOf(block);
- const ZoneList<HPhi*>* phis = successor->phis();
- for (int i = 0; i < phis->length(); ++i) {
- HPhi* phi = phis->at(i);
- if (!phi->OperandAt(index)->IsConstant()) {
- live_out->Add(phi->OperandAt(index)->id());
- }
- }
- }
-
- return live_out;
-}
-
-
-void LAllocator::AddInitialIntervals(HBasicBlock* block,
- BitVector* live_out) {
- // Add an interval that includes the entire block to the live range for
- // each live_out value.
- LifetimePosition start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- LifetimePosition end = LifetimePosition::FromInstructionIndex(
- block->last_instruction_index()).NextInstruction();
- BitVector::Iterator iterator(live_out);
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- LiveRange* range = LiveRangeFor(operand_index);
- range->AddUseInterval(start, end, zone_);
- iterator.Advance();
- }
-}
-
-
-int LAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - Register::kMaxNumAllocatableRegisters;
-}
-
-
-LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
- int pos,
- bool is_tagged) {
- TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
- ASSERT(operand->HasFixedPolicy());
- if (operand->policy() == LUnallocated::FIXED_SLOT) {
- operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_index());
- } else if (operand->policy() == LUnallocated::FIXED_REGISTER) {
- int reg_index = operand->fixed_index();
- operand->ConvertTo(LOperand::REGISTER, reg_index);
- } else if (operand->policy() == LUnallocated::FIXED_DOUBLE_REGISTER) {
- int reg_index = operand->fixed_index();
- operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
- } else {
- UNREACHABLE();
- }
- if (is_tagged) {
- TraceAlloc("Fixed reg is tagged at %d\n", pos);
- LInstruction* instr = InstructionAt(pos);
- if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(operand, zone());
- }
- }
- return operand;
-}
-
-
-LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- ASSERT(index < Register::kMaxNumAllocatableRegisters);
- LiveRange* result = fixed_live_ranges_[index];
- if (result == NULL) {
- result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
- ASSERT(result->IsFixed());
- SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS, zone_);
- fixed_live_ranges_[index] = result;
- }
- return result;
-}
-
-
-LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- ASSERT(index < DoubleRegister::NumAllocatableRegisters());
- LiveRange* result = fixed_double_live_ranges_[index];
- if (result == NULL) {
- result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
- ASSERT(result->IsFixed());
- SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS, zone_);
- fixed_double_live_ranges_[index] = result;
- }
- return result;
-}
-
-
-LiveRange* LAllocator::LiveRangeFor(int index) {
- if (index >= live_ranges_.length()) {
- live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
- }
- LiveRange* result = live_ranges_[index];
- if (result == NULL) {
- result = new(zone_) LiveRange(index, zone_);
- live_ranges_[index] = result;
- }
- return result;
-}
-
-
-LGap* LAllocator::GetLastGap(HBasicBlock* block) {
- int last_instruction = block->last_instruction_index();
- int index = chunk_->NearestGapPos(last_instruction);
- return GapAt(index);
-}
-
-
-HPhi* LAllocator::LookupPhi(LOperand* operand) const {
- if (!operand->IsUnallocated()) return NULL;
- int index = LUnallocated::cast(operand)->virtual_register();
- HValue* instr = graph_->LookupValue(index);
- if (instr != NULL && instr->IsPhi()) {
- return HPhi::cast(instr);
- }
- return NULL;
-}
-
-
-LiveRange* LAllocator::LiveRangeFor(LOperand* operand) {
- if (operand->IsUnallocated()) {
- return LiveRangeFor(LUnallocated::cast(operand)->virtual_register());
- } else if (operand->IsRegister()) {
- return FixedLiveRangeFor(operand->index());
- } else if (operand->IsDoubleRegister()) {
- return FixedDoubleLiveRangeFor(operand->index());
- } else {
- return NULL;
- }
-}
-
-
-void LAllocator::Define(LifetimePosition position,
- LOperand* operand,
- LOperand* hint) {
- LiveRange* range = LiveRangeFor(operand);
- if (range == NULL) return;
-
- if (range->IsEmpty() || range->Start().Value() > position.Value()) {
- // Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextInstruction(), zone_);
- range->AddUsePosition(position.NextInstruction(), NULL, zone_);
- } else {
- range->ShortenTo(position);
- }
-
- if (operand->IsUnallocated()) {
- LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
- }
-}
-
-
-void LAllocator::Use(LifetimePosition block_start,
- LifetimePosition position,
- LOperand* operand,
- LOperand* hint) {
- LiveRange* range = LiveRangeFor(operand);
- if (range == NULL) return;
- if (operand->IsUnallocated()) {
- LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
- }
- range->AddUseInterval(block_start, position, zone_);
-}
-
-
-void LAllocator::AddConstraintsGapMove(int index,
- LOperand* from,
- LOperand* to) {
- LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
- if (from->IsUnallocated()) {
- const ZoneList<LMoveOperands>* move_operands = move->move_operands();
- for (int i = 0; i < move_operands->length(); ++i) {
- LMoveOperands cur = move_operands->at(i);
- LOperand* cur_to = cur.destination();
- if (cur_to->IsUnallocated()) {
- if (LUnallocated::cast(cur_to)->virtual_register() ==
- LUnallocated::cast(from)->virtual_register()) {
- move->AddMove(cur.source(), to, zone());
- return;
- }
- }
- }
- }
- move->AddMove(from, to, zone());
-}
-
-
-void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
- int start = block->first_instruction_index();
- int end = block->last_instruction_index();
- if (start == -1) return;
- for (int i = start; i <= end; ++i) {
- if (IsGapAt(i)) {
- LInstruction* instr = NULL;
- LInstruction* prev_instr = NULL;
- if (i < end) instr = InstructionAt(i + 1);
- if (i > start) prev_instr = InstructionAt(i - 1);
- MeetConstraintsBetween(prev_instr, instr, i);
- if (!AllocationOk()) return;
- }
- }
-}
-
-
-void LAllocator::MeetConstraintsBetween(LInstruction* first,
- LInstruction* second,
- int gap_index) {
- // Handle fixed temporaries.
- if (first != NULL) {
- for (TempIterator it(first); !it.Done(); it.Advance()) {
- LUnallocated* temp = LUnallocated::cast(it.Current());
- if (temp->HasFixedPolicy()) {
- AllocateFixed(temp, gap_index - 1, false);
- }
- }
- }
-
- // Handle fixed output operand.
- if (first != NULL && first->Output() != NULL) {
- LUnallocated* first_output = LUnallocated::cast(first->Output());
- LiveRange* range = LiveRangeFor(first_output->virtual_register());
- bool assigned = false;
- if (first_output->HasFixedPolicy()) {
- LUnallocated* output_copy = first_output->CopyUnconstrained(zone());
- bool is_tagged = HasTaggedValue(first_output->virtual_register());
- AllocateFixed(first_output, gap_index, is_tagged);
-
- // This value is produced on the stack, we never need to spill it.
- if (first_output->IsStackSlot()) {
- range->SetSpillOperand(first_output);
- range->SetSpillStartIndex(gap_index - 1);
- assigned = true;
- }
- chunk_->AddGapMove(gap_index, first_output, output_copy);
- }
-
- if (!assigned) {
- range->SetSpillStartIndex(gap_index);
-
- // This move to spill operand is not a real use. Liveness analysis
- // and splitting of live ranges do not account for it.
- // Thus it should be inserted to a lifetime position corresponding to
- // the instruction end.
- LGap* gap = GapAt(gap_index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE, zone());
- move->AddMove(first_output, range->GetSpillOperand(), zone());
- }
- }
-
- // Handle fixed input operands of second instruction.
- if (second != NULL) {
- for (UseIterator it(second); !it.Done(); it.Advance()) {
- LUnallocated* cur_input = LUnallocated::cast(it.Current());
- if (cur_input->HasFixedPolicy()) {
- LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
- bool is_tagged = HasTaggedValue(cur_input->virtual_register());
- AllocateFixed(cur_input, gap_index + 1, is_tagged);
- AddConstraintsGapMove(gap_index, input_copy, cur_input);
- } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
- // The live range of writable input registers always goes until the end
- // of the instruction.
- ASSERT(!cur_input->IsUsedAtStart());
-
- LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
- cur_input->set_virtual_register(GetVirtualRegister());
- if (!AllocationOk()) return;
-
- if (RequiredRegisterKind(input_copy->virtual_register()) ==
- DOUBLE_REGISTERS) {
- double_artificial_registers_.Add(
- cur_input->virtual_register() - first_artificial_register_,
- zone_);
- }
-
- AddConstraintsGapMove(gap_index, input_copy, cur_input);
- }
- }
- }
-
- // Handle "output same as input" for second instruction.
- if (second != NULL && second->Output() != NULL) {
- LUnallocated* second_output = LUnallocated::cast(second->Output());
- if (second_output->HasSameAsInputPolicy()) {
- LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
- int output_vreg = second_output->virtual_register();
- int input_vreg = cur_input->virtual_register();
-
- LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
- cur_input->set_virtual_register(second_output->virtual_register());
- AddConstraintsGapMove(gap_index, input_copy, cur_input);
-
- if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
- int index = gap_index + 1;
- LInstruction* instr = InstructionAt(index);
- if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(input_copy, zone());
- }
- } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
- // The input is assumed to immediately have a tagged representation,
- // before the pointer map can be used. I.e. the pointer map at the
- // instruction will include the output operand (whose value at the
- // beginning of the instruction is equal to the input operand). If
- // this is not desired, then the pointer map at this instruction needs
- // to be adjusted manually.
- }
- }
- }
-}
-
-
-void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
- int block_start = block->first_instruction_index();
- int index = block->last_instruction_index();
-
- LifetimePosition block_start_position =
- LifetimePosition::FromInstructionIndex(block_start);
-
- while (index >= block_start) {
- LifetimePosition curr_position =
- LifetimePosition::FromInstructionIndex(index);
-
- if (IsGapAt(index)) {
- // We have a gap at this position.
- LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
- const ZoneList<LMoveOperands>* move_operands = move->move_operands();
- for (int i = 0; i < move_operands->length(); ++i) {
- LMoveOperands* cur = &move_operands->at(i);
- if (cur->IsIgnored()) continue;
- LOperand* from = cur->source();
- LOperand* to = cur->destination();
- HPhi* phi = LookupPhi(to);
- LOperand* hint = to;
- if (phi != NULL) {
- // This is a phi resolving move.
- if (!phi->block()->IsLoopHeader()) {
- hint = LiveRangeFor(phi->id())->FirstHint();
- }
- } else {
- if (to->IsUnallocated()) {
- if (live->Contains(LUnallocated::cast(to)->virtual_register())) {
- Define(curr_position, to, from);
- live->Remove(LUnallocated::cast(to)->virtual_register());
- } else {
- cur->Eliminate();
- continue;
- }
- } else {
- Define(curr_position, to, from);
- }
- }
- Use(block_start_position, curr_position, from, hint);
- if (from->IsUnallocated()) {
- live->Add(LUnallocated::cast(from)->virtual_register());
- }
- }
- } else {
- ASSERT(!IsGapAt(index));
- LInstruction* instr = InstructionAt(index);
-
- if (instr != NULL) {
- LOperand* output = instr->Output();
- if (output != NULL) {
- if (output->IsUnallocated()) {
- live->Remove(LUnallocated::cast(output)->virtual_register());
- }
- Define(curr_position, output, NULL);
- }
-
- if (instr->ClobbersRegisters()) {
- for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
- if (output == NULL || !output->IsRegister() ||
- output->index() != i) {
- LiveRange* range = FixedLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd(),
- zone_);
- }
- }
- }
-
- if (instr->ClobbersDoubleRegisters()) {
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- if (output == NULL || !output->IsDoubleRegister() ||
- output->index() != i) {
- LiveRange* range = FixedDoubleLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd(),
- zone_);
- }
- }
- }
-
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LOperand* input = it.Current();
-
- LifetimePosition use_pos;
- if (input->IsUnallocated() &&
- LUnallocated::cast(input)->IsUsedAtStart()) {
- use_pos = curr_position;
- } else {
- use_pos = curr_position.InstructionEnd();
- }
-
- Use(block_start_position, use_pos, input, NULL);
- if (input->IsUnallocated()) {
- live->Add(LUnallocated::cast(input)->virtual_register());
- }
- }
-
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LOperand* temp = it.Current();
- if (instr->ClobbersTemps()) {
- if (temp->IsRegister()) continue;
- if (temp->IsUnallocated()) {
- LUnallocated* temp_unalloc = LUnallocated::cast(temp);
- if (temp_unalloc->HasFixedPolicy()) {
- continue;
- }
- }
- }
- Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
- Define(curr_position, temp, NULL);
- }
- }
- }
-
- index = index - 1;
- }
-}
-
-
-void LAllocator::ResolvePhis(HBasicBlock* block) {
- const ZoneList<HPhi*>* phis = block->phis();
- for (int i = 0; i < phis->length(); ++i) {
- HPhi* phi = phis->at(i);
- LUnallocated* phi_operand = new(zone_) LUnallocated(LUnallocated::NONE);
- phi_operand->set_virtual_register(phi->id());
- for (int j = 0; j < phi->OperandCount(); ++j) {
- HValue* op = phi->OperandAt(j);
- LOperand* operand = NULL;
- if (op->IsConstant() && op->EmitAtUses()) {
- HConstant* constant = HConstant::cast(op);
- operand = chunk_->DefineConstantOperand(constant);
- } else {
- ASSERT(!op->EmitAtUses());
- LUnallocated* unalloc = new(zone_) LUnallocated(LUnallocated::ANY);
- unalloc->set_virtual_register(op->id());
- operand = unalloc;
- }
- HBasicBlock* cur_block = block->predecessors()->at(j);
- // The gap move must be added without any special processing as in
- // the AddConstraintsGapMove.
- chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
- operand,
- phi_operand);
-
- // We are going to insert a move before the branch instruction.
- // Some branch instructions (e.g. loops' back edges)
- // can potentially cause a GC so they have a pointer map.
- // By inserting a move we essentially create a copy of a
- // value which is invisible to PopulatePointerMaps(), because we store
- // it into a location different from the operand of a live range
- // covering a branch instruction.
- // Thus we need to manually record a pointer.
- LInstruction* branch =
- InstructionAt(cur_block->last_instruction_index());
- if (branch->HasPointerMap()) {
- if (phi->representation().IsTagged()) {
- branch->pointer_map()->RecordPointer(phi_operand, zone());
- } else if (!phi->representation().IsDouble()) {
- branch->pointer_map()->RecordUntagged(phi_operand, zone());
- }
- }
- }
-
- LiveRange* live_range = LiveRangeFor(phi->id());
- LLabel* label = chunk_->GetLabel(phi->block()->block_id());
- label->GetOrCreateParallelMove(LGap::START, zone())->
- AddMove(phi_operand, live_range->GetSpillOperand(), zone());
- live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
- }
-}
-
-
-bool LAllocator::Allocate(LChunk* chunk) {
- ASSERT(chunk_ == NULL);
- chunk_ = static_cast<LPlatformChunk*>(chunk);
- assigned_registers_ =
- new(zone()) BitVector(Register::NumAllocatableRegisters(), zone());
- assigned_registers_->Clear();
- assigned_double_registers_ =
- new(zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
- zone());
- assigned_double_registers_->Clear();
- MeetRegisterConstraints();
- if (!AllocationOk()) return false;
- ResolvePhis();
- BuildLiveRanges();
- AllocateGeneralRegisters();
- if (!AllocationOk()) return false;
- AllocateDoubleRegisters();
- if (!AllocationOk()) return false;
- PopulatePointerMaps();
- if (has_osr_entry_) ProcessOsrEntry();
- ConnectRanges();
- ResolveControlFlow();
- return true;
-}
-
-
-void LAllocator::MeetRegisterConstraints() {
- HPhase phase("L_Register constraints", chunk_);
- first_artificial_register_ = next_virtual_register_;
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int i = 0; i < blocks->length(); ++i) {
- HBasicBlock* block = blocks->at(i);
- MeetRegisterConstraints(block);
- if (!AllocationOk()) return;
- }
-}
-
-
-void LAllocator::ResolvePhis() {
- HPhase phase("L_Resolve phis", chunk_);
-
- // Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
- HBasicBlock* block = blocks->at(block_id);
- ResolvePhis(block);
- }
-}
-
-
-void LAllocator::ResolveControlFlow(LiveRange* range,
- HBasicBlock* block,
- HBasicBlock* pred) {
- LifetimePosition pred_end =
- LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
- LifetimePosition cur_start =
- LifetimePosition::FromInstructionIndex(block->first_instruction_index());
- LiveRange* pred_cover = NULL;
- LiveRange* cur_cover = NULL;
- LiveRange* cur_range = range;
- while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
- if (cur_range->CanCover(cur_start)) {
- ASSERT(cur_cover == NULL);
- cur_cover = cur_range;
- }
- if (cur_range->CanCover(pred_end)) {
- ASSERT(pred_cover == NULL);
- pred_cover = cur_range;
- }
- cur_range = cur_range->next();
- }
-
- if (cur_cover->IsSpilled()) return;
- ASSERT(pred_cover != NULL && cur_cover != NULL);
- if (pred_cover != cur_cover) {
- LOperand* pred_op = pred_cover->CreateAssignedOperand(zone_);
- LOperand* cur_op = cur_cover->CreateAssignedOperand(zone_);
- if (!pred_op->Equals(cur_op)) {
- LGap* gap = NULL;
- if (block->predecessors()->length() == 1) {
- gap = GapAt(block->first_instruction_index());
- } else {
- ASSERT(pred->end()->SecondSuccessor() == NULL);
- gap = GetLastGap(pred);
-
- // We are going to insert a move before the branch instruction.
- // Some branch instructions (e.g. loops' back edges)
- // can potentially cause a GC so they have a pointer map.
- // By inserting a move we essentially create a copy of a
- // value which is invisible to PopulatePointerMaps(), because we store
- // it into a location different from the operand of a live range
- // covering a branch instruction.
- // Thus we need to manually record a pointer.
- LInstruction* branch = InstructionAt(pred->last_instruction_index());
- if (branch->HasPointerMap()) {
- if (HasTaggedValue(range->id())) {
- branch->pointer_map()->RecordPointer(cur_op, zone());
- } else if (!cur_op->IsDoubleStackSlot() &&
- !cur_op->IsDoubleRegister()) {
- branch->pointer_map()->RemovePointer(cur_op);
- }
- }
- }
- gap->GetOrCreateParallelMove(
- LGap::START, zone())->AddMove(pred_op, cur_op, zone());
- }
- }
-}
-
-
-LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
- int index = pos.InstructionIndex();
- if (IsGapAt(index)) {
- LGap* gap = GapAt(index);
- return gap->GetOrCreateParallelMove(
- pos.IsInstructionStart() ? LGap::START : LGap::END, zone());
- }
- int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
- return GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, zone());
-}
-
-
-HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
- LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
- return gap->block();
-}
-
-
-void LAllocator::ConnectRanges() {
- HPhase phase("L_Connect ranges", this);
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* first_range = live_ranges()->at(i);
- if (first_range == NULL || first_range->parent() != NULL) continue;
-
- LiveRange* second_range = first_range->next();
- while (second_range != NULL) {
- LifetimePosition pos = second_range->Start();
-
- if (!second_range->IsSpilled()) {
- // Add gap move if the two live ranges touch and there is no block
- // boundary.
- if (first_range->End().Value() == pos.Value()) {
- bool should_insert = true;
- if (IsBlockBoundary(pos)) {
- should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
- }
- if (should_insert) {
- LParallelMove* move = GetConnectingParallelMove(pos);
- LOperand* prev_operand = first_range->CreateAssignedOperand(zone_);
- LOperand* cur_operand = second_range->CreateAssignedOperand(zone_);
- move->AddMove(prev_operand, cur_operand, zone());
- }
- }
- }
-
- first_range = second_range;
- second_range = second_range->next();
- }
- }
-}
-
-
-bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
- if (block->predecessors()->length() != 1) return false;
- return block->predecessors()->first()->block_id() == block->block_id() - 1;
-}
-
-
-void LAllocator::ResolveControlFlow() {
- HPhase phase("L_Resolve control flow", this);
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int block_id = 1; block_id < blocks->length(); ++block_id) {
- HBasicBlock* block = blocks->at(block_id);
- if (CanEagerlyResolveControlFlow(block)) continue;
- BitVector* live = live_in_sets_[block->block_id()];
- BitVector::Iterator iterator(live);
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- for (int i = 0; i < block->predecessors()->length(); ++i) {
- HBasicBlock* cur = block->predecessors()->at(i);
- LiveRange* cur_range = LiveRangeFor(operand_index);
- ResolveControlFlow(cur_range, block, cur);
- }
- iterator.Advance();
- }
- }
-}
-
-
-void LAllocator::BuildLiveRanges() {
- HPhase phase("L_Build live ranges", this);
- InitializeLivenessAnalysis();
- // Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
- HBasicBlock* block = blocks->at(block_id);
- BitVector* live = ComputeLiveOut(block);
- // Initially consider all live_out values live for the entire block. We
- // will shorten these intervals if necessary.
- AddInitialIntervals(block, live);
-
- // Process the instructions in reverse order, generating and killing
- // live values.
- ProcessInstructions(block, live);
- // All phi output operands are killed by this block.
- const ZoneList<HPhi*>* phis = block->phis();
- for (int i = 0; i < phis->length(); ++i) {
- // The live range interval already ends at the first instruction of the
- // block.
- HPhi* phi = phis->at(i);
- live->Remove(phi->id());
-
- LOperand* hint = NULL;
- LOperand* phi_operand = NULL;
- LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
- for (int j = 0; j < move->move_operands()->length(); ++j) {
- LOperand* to = move->move_operands()->at(j).destination();
- if (to->IsUnallocated() &&
- LUnallocated::cast(to)->virtual_register() == phi->id()) {
- hint = move->move_operands()->at(j).source();
- phi_operand = to;
- break;
- }
- }
- ASSERT(hint != NULL);
-
- LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- Define(block_start, phi_operand, hint);
- }
-
- // Now live is live_in for this block except not including values live
- // out on backward successor edges.
- live_in_sets_[block_id] = live;
-
- // If this block is a loop header go back and patch up the necessary
- // predecessor blocks.
- if (block->IsLoopHeader()) {
- // TODO(kmillikin): Need to be able to get the last block of the loop
- // in the loop information. Add a live range stretching from the first
- // loop instruction to the last for each value live on entry to the
- // header.
- HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
- BitVector::Iterator iterator(live);
- LifetimePosition start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- LifetimePosition end = LifetimePosition::FromInstructionIndex(
- back_edge->last_instruction_index()).NextInstruction();
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- LiveRange* range = LiveRangeFor(operand_index);
- range->EnsureInterval(start, end, zone_);
- iterator.Advance();
- }
-
- for (int i = block->block_id() + 1; i <= back_edge->block_id(); ++i) {
- live_in_sets_[i]->Union(*live);
- }
- }
-
-#ifdef DEBUG
- if (block_id == 0) {
- BitVector::Iterator iterator(live);
- bool found = false;
- while (!iterator.Done()) {
- found = true;
- int operand_index = iterator.Current();
- if (chunk_->info()->IsStub()) {
- CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
- PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
- } else {
- ASSERT(chunk_->info()->IsOptimizing());
- PrintF("Function: %s\n",
- *chunk_->info()->function()->debug_name()->ToCString());
- }
- PrintF("Value %d used before first definition!\n", operand_index);
- LiveRange* range = LiveRangeFor(operand_index);
- PrintF("First use is at %d\n", range->first_pos()->pos().Value());
- iterator.Advance();
- }
- ASSERT(!found);
- }
-#endif
- }
-}
-
-
-bool LAllocator::SafePointsAreInOrder() const {
- const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
- int safe_point = 0;
- for (int i = 0; i < pointer_maps->length(); ++i) {
- LPointerMap* map = pointer_maps->at(i);
- if (safe_point > map->lithium_position()) return false;
- safe_point = map->lithium_position();
- }
- return true;
-}
-
-
-void LAllocator::PopulatePointerMaps() {
- HPhase phase("L_Populate pointer maps", this);
- const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
-
- ASSERT(SafePointsAreInOrder());
-
- // Iterate over all safe point positions and record a pointer
- // for all spilled live ranges at this point.
- int first_safe_point_index = 0;
- int last_range_start = 0;
- for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
- LiveRange* range = live_ranges()->at(range_idx);
- if (range == NULL) continue;
- // Iterate over the first parts of multi-part live ranges.
- if (range->parent() != NULL) continue;
- // Skip non-pointer values.
- if (!HasTaggedValue(range->id())) continue;
- // Skip empty live ranges.
- if (range->IsEmpty()) continue;
-
- // Find the extent of the range and its children.
- int start = range->Start().InstructionIndex();
- int end = 0;
- for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
- LifetimePosition this_end = cur->End();
- if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
- ASSERT(cur->Start().InstructionIndex() >= start);
- }
-
- // Most of the ranges are in order, but not all. Keep an eye on when
- // they step backwards and reset the first_safe_point_index so we don't
- // miss any safe points.
- if (start < last_range_start) {
- first_safe_point_index = 0;
- }
- last_range_start = start;
-
- // Step across all the safe points that are before the start of this range,
- // recording how far we step in order to save doing this for the next range.
- while (first_safe_point_index < pointer_maps->length()) {
- LPointerMap* map = pointer_maps->at(first_safe_point_index);
- int safe_point = map->lithium_position();
- if (safe_point >= start) break;
- first_safe_point_index++;
- }
-
- // Step through the safe points to see whether they are in the range.
- for (int safe_point_index = first_safe_point_index;
- safe_point_index < pointer_maps->length();
- ++safe_point_index) {
- LPointerMap* map = pointer_maps->at(safe_point_index);
- int safe_point = map->lithium_position();
-
- // The safe points are sorted so we can stop searching here.
- if (safe_point - 1 > end) break;
-
- // Advance to the next active range that covers the current
- // safe point position.
- LifetimePosition safe_point_pos =
- LifetimePosition::FromInstructionIndex(safe_point);
- LiveRange* cur = range;
- while (cur != NULL && !cur->Covers(safe_point_pos)) {
- cur = cur->next();
- }
- if (cur == NULL) continue;
-
- // Check if the live range is spilled and the safe point is after
- // the spill position.
- if (range->HasAllocatedSpillOperand() &&
- safe_point >= range->spill_start_index()) {
- TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
- range->id(), range->spill_start_index(), safe_point);
- map->RecordPointer(range->GetSpillOperand(), zone());
- }
-
- if (!cur->IsSpilled()) {
- TraceAlloc("Pointer in register for range %d (start at %d) "
- "at safe point %d\n",
- cur->id(), cur->Start().Value(), safe_point);
- LOperand* operand = cur->CreateAssignedOperand(zone_);
- ASSERT(!operand->IsStackSlot());
- map->RecordPointer(operand, zone());
- }
- }
- }
-}
-
-
-void LAllocator::ProcessOsrEntry() {
- const ZoneList<LInstruction*>* instrs = chunk_->instructions();
-
- // Linear search for the OSR entry instruction in the chunk.
- int index = -1;
- while (++index < instrs->length() &&
- !instrs->at(index)->IsOsrEntry()) {
- }
- ASSERT(index < instrs->length());
- LOsrEntry* instruction = LOsrEntry::cast(instrs->at(index));
-
- LifetimePosition position = LifetimePosition::FromInstructionIndex(index);
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* range = live_ranges()->at(i);
- if (range != NULL) {
- if (range->Covers(position) &&
- range->HasRegisterAssigned() &&
- range->TopLevel()->HasAllocatedSpillOperand()) {
- int reg_index = range->assigned_register();
- LOperand* spill_operand = range->TopLevel()->GetSpillOperand();
- if (range->IsDouble()) {
- instruction->MarkSpilledDoubleRegister(reg_index, spill_operand);
- } else {
- instruction->MarkSpilledRegister(reg_index, spill_operand);
- }
- }
- }
- }
-}
-
-
-void LAllocator::AllocateGeneralRegisters() {
- HPhase phase("L_Allocate general registers", this);
- num_registers_ = Register::NumAllocatableRegisters();
- AllocateRegisters();
-}
-
-
-void LAllocator::AllocateDoubleRegisters() {
- HPhase phase("L_Allocate double registers", this);
- num_registers_ = DoubleRegister::NumAllocatableRegisters();
- mode_ = DOUBLE_REGISTERS;
- AllocateRegisters();
-}
-
-
-void LAllocator::AllocateRegisters() {
- ASSERT(unhandled_live_ranges_.is_empty());
-
- for (int i = 0; i < live_ranges_.length(); ++i) {
- if (live_ranges_[i] != NULL) {
- if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
- AddToUnhandledUnsorted(live_ranges_[i]);
- }
- }
- }
- SortUnhandled();
- ASSERT(UnhandledIsSorted());
-
- ASSERT(reusable_slots_.is_empty());
- ASSERT(active_live_ranges_.is_empty());
- ASSERT(inactive_live_ranges_.is_empty());
-
- if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- LiveRange* current = fixed_double_live_ranges_.at(i);
- if (current != NULL) {
- AddToInactive(current);
- }
- }
- } else {
- for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
- LiveRange* current = fixed_live_ranges_.at(i);
- if (current != NULL) {
- AddToInactive(current);
- }
- }
- }
-
- while (!unhandled_live_ranges_.is_empty()) {
- ASSERT(UnhandledIsSorted());
- LiveRange* current = unhandled_live_ranges_.RemoveLast();
- ASSERT(UnhandledIsSorted());
- LifetimePosition position = current->Start();
- TraceAlloc("Processing interval %d start=%d\n",
- current->id(),
- position.Value());
-
- if (current->HasAllocatedSpillOperand()) {
- TraceAlloc("Live range %d already has a spill operand\n", current->id());
- LifetimePosition next_pos = position;
- if (IsGapAt(next_pos.InstructionIndex())) {
- next_pos = next_pos.NextInstruction();
- }
- UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
- // If the range already has a spill operand and it doesn't need a
- // register immediately, split it and spill the first part of the range.
- if (pos == NULL) {
- Spill(current);
- continue;
- } else if (pos->pos().Value() >
- current->Start().NextInstruction().Value()) {
- // Do not spill live range eagerly if use position that can benefit from
- // the register is too close to the start of live range.
- SpillBetween(current, current->Start(), pos->pos());
- if (!AllocationOk()) return;
- ASSERT(UnhandledIsSorted());
- continue;
- }
- }
-
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* cur_active = active_live_ranges_.at(i);
- if (cur_active->End().Value() <= position.Value()) {
- ActiveToHandled(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- } else if (!cur_active->Covers(position)) {
- ActiveToInactive(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- }
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* cur_inactive = inactive_live_ranges_.at(i);
- if (cur_inactive->End().Value() <= position.Value()) {
- InactiveToHandled(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- } else if (cur_inactive->Covers(position)) {
- InactiveToActive(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- }
- }
-
- ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
-
- bool result = TryAllocateFreeReg(current);
- if (!AllocationOk()) return;
-
- if (!result) AllocateBlockedReg(current);
- if (!AllocationOk()) return;
-
- if (current->HasRegisterAssigned()) {
- AddToActive(current);
- }
- }
-
- reusable_slots_.Rewind(0);
- active_live_ranges_.Rewind(0);
- inactive_live_ranges_.Rewind(0);
-}
-
-
-const char* LAllocator::RegisterName(int allocation_index) {
- if (mode_ == GENERAL_REGISTERS) {
- return Register::AllocationIndexToString(allocation_index);
- } else {
- return DoubleRegister::AllocationIndexToString(allocation_index);
- }
-}
-
-
-void LAllocator::TraceAlloc(const char* msg, ...) {
- if (FLAG_trace_alloc) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-bool LAllocator::HasTaggedValue(int virtual_register) const {
- HValue* value = graph_->LookupValue(virtual_register);
- if (value == NULL) return false;
- return value->representation().IsTagged();
-}
-
-
-RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
- if (virtual_register < first_artificial_register_) {
- HValue* value = graph_->LookupValue(virtual_register);
- if (value != NULL && value->representation().IsDouble()) {
- return DOUBLE_REGISTERS;
- }
- } else if (double_artificial_registers_.Contains(
- virtual_register - first_artificial_register_)) {
- return DOUBLE_REGISTERS;
- }
-
- return GENERAL_REGISTERS;
-}
-
-
-void LAllocator::AddToActive(LiveRange* range) {
- TraceAlloc("Add live range %d to active\n", range->id());
- active_live_ranges_.Add(range, zone());
-}
-
-
-void LAllocator::AddToInactive(LiveRange* range) {
- TraceAlloc("Add live range %d to inactive\n", range->id());
- inactive_live_ranges_.Add(range, zone());
-}
-
-
-void LAllocator::AddToUnhandledSorted(LiveRange* range) {
- if (range == NULL || range->IsEmpty()) return;
- ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
- for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
- LiveRange* cur_range = unhandled_live_ranges_.at(i);
- if (range->ShouldBeAllocatedBefore(cur_range)) {
- TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
- unhandled_live_ranges_.InsertAt(i + 1, range, zone());
- ASSERT(UnhandledIsSorted());
- return;
- }
- }
- TraceAlloc("Add live range %d to unhandled at start\n", range->id());
- unhandled_live_ranges_.InsertAt(0, range, zone());
- ASSERT(UnhandledIsSorted());
-}
-
-
-void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
- if (range == NULL || range->IsEmpty()) return;
- ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
- TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
- unhandled_live_ranges_.Add(range, zone());
-}
-
-
-static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
- ASSERT(!(*a)->ShouldBeAllocatedBefore(*b) ||
- !(*b)->ShouldBeAllocatedBefore(*a));
- if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
- if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
- return (*a)->id() - (*b)->id();
-}
-
-
-// Sort the unhandled live ranges so that the ranges to be processed first are
-// at the end of the array list. This is convenient for the register allocation
-// algorithm because it is efficient to remove elements from the end.
-void LAllocator::SortUnhandled() {
- TraceAlloc("Sort unhandled\n");
- unhandled_live_ranges_.Sort(&UnhandledSortHelper);
-}
-
-
-bool LAllocator::UnhandledIsSorted() {
- int len = unhandled_live_ranges_.length();
- for (int i = 1; i < len; i++) {
- LiveRange* a = unhandled_live_ranges_.at(i - 1);
- LiveRange* b = unhandled_live_ranges_.at(i);
- if (a->Start().Value() < b->Start().Value()) return false;
- }
- return true;
-}
-
-
-void LAllocator::FreeSpillSlot(LiveRange* range) {
- // Check that we are the last range.
- if (range->next() != NULL) return;
-
- if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
-
- int index = range->TopLevel()->GetSpillOperand()->index();
- if (index >= 0) {
- reusable_slots_.Add(range, zone());
- }
-}
-
-
-LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
- if (reusable_slots_.is_empty()) return NULL;
- if (reusable_slots_.first()->End().Value() >
- range->TopLevel()->Start().Value()) {
- return NULL;
- }
- LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
- reusable_slots_.Remove(0);
- return result;
-}
-
-
-void LAllocator::ActiveToHandled(LiveRange* range) {
- ASSERT(active_live_ranges_.Contains(range));
- active_live_ranges_.RemoveElement(range);
- TraceAlloc("Moving live range %d from active to handled\n", range->id());
- FreeSpillSlot(range);
-}
-
-
-void LAllocator::ActiveToInactive(LiveRange* range) {
- ASSERT(active_live_ranges_.Contains(range));
- active_live_ranges_.RemoveElement(range);
- inactive_live_ranges_.Add(range, zone());
- TraceAlloc("Moving live range %d from active to inactive\n", range->id());
-}
-
-
-void LAllocator::InactiveToHandled(LiveRange* range) {
- ASSERT(inactive_live_ranges_.Contains(range));
- inactive_live_ranges_.RemoveElement(range);
- TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
- FreeSpillSlot(range);
-}
-
-
-void LAllocator::InactiveToActive(LiveRange* range) {
- ASSERT(inactive_live_ranges_.Contains(range));
- inactive_live_ranges_.RemoveElement(range);
- active_live_ranges_.Add(range, zone());
- TraceAlloc("Moving live range %d from inactive to active\n", range->id());
-}
-
-
-// TryAllocateFreeReg and AllocateBlockedReg assume this
-// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
- Register::kMaxNumAllocatableRegisters);
-
-
-bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
-
- for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) {
- free_until_pos[i] = LifetimePosition::MaxPosition();
- }
-
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* cur_active = active_live_ranges_.at(i);
- free_until_pos[cur_active->assigned_register()] =
- LifetimePosition::FromInstructionIndex(0);
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* cur_inactive = inactive_live_ranges_.at(i);
- ASSERT(cur_inactive->End().Value() > current->Start().Value());
- LifetimePosition next_intersection =
- cur_inactive->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
- int cur_reg = cur_inactive->assigned_register();
- free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
- }
-
- UsePosition* hinted_use = current->FirstPosWithHint();
- if (hinted_use != NULL) {
- LOperand* hint = hinted_use->hint();
- if (hint->IsRegister() || hint->IsDoubleRegister()) {
- int register_index = hint->index();
- TraceAlloc(
- "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
- RegisterName(register_index),
- free_until_pos[register_index].Value(),
- current->id(),
- current->End().Value());
-
- // The desired register is free until the end of the current live range.
- if (free_until_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %s to live range %d\n",
- RegisterName(register_index),
- current->id());
- SetLiveRangeAssignedRegister(current, register_index, mode_, zone_);
- return true;
- }
- }
- }
-
- // Find the register which stays free for the longest time.
- int reg = 0;
- for (int i = 1; i < RegisterCount(); ++i) {
- if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
- reg = i;
- }
- }
-
- LifetimePosition pos = free_until_pos[reg];
-
- if (pos.Value() <= current->Start().Value()) {
- // All registers are blocked.
- return false;
- }
-
- if (pos.Value() < current->End().Value()) {
- // Register reg is available at the range start but becomes blocked before
- // the range end. Split current at position where it becomes blocked.
- LiveRange* tail = SplitRangeAt(current, pos);
- if (!AllocationOk()) return false;
- AddToUnhandledSorted(tail);
- }
-
-
- // Register reg is available at the range start and is free until
- // the range end.
- ASSERT(pos.Value() >= current->End().Value());
- TraceAlloc("Assigning free reg %s to live range %d\n",
- RegisterName(reg),
- current->id());
- SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
-
- return true;
-}
-
-
-void LAllocator::AllocateBlockedReg(LiveRange* current) {
- UsePosition* register_use = current->NextRegisterPosition(current->Start());
- if (register_use == NULL) {
- // There is no use in the current live range that requires a register.
- // We can just spill it.
- Spill(current);
- return;
- }
-
-
- LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
-
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
- }
-
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* range = active_live_ranges_[i];
- int cur_reg = range->assigned_register();
- if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
- block_pos[cur_reg] = use_pos[cur_reg] =
- LifetimePosition::FromInstructionIndex(0);
- } else {
- UsePosition* next_use = range->NextUsePositionRegisterIsBeneficial(
- current->Start());
- if (next_use == NULL) {
- use_pos[cur_reg] = range->End();
- } else {
- use_pos[cur_reg] = next_use->pos();
- }
- }
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* range = inactive_live_ranges_.at(i);
- ASSERT(range->End().Value() > current->Start().Value());
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
- int cur_reg = range->assigned_register();
- if (range->IsFixed()) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
- } else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
- }
- }
-
- int reg = 0;
- for (int i = 1; i < RegisterCount(); ++i) {
- if (use_pos[i].Value() > use_pos[reg].Value()) {
- reg = i;
- }
- }
-
- LifetimePosition pos = use_pos[reg];
-
- if (pos.Value() < register_use->pos().Value()) {
- // All registers are blocked before the first use that requires a register.
- // Spill starting part of live range up to that use.
- //
- // Corner case: the first use position is equal to the start of the range.
- // In this case we have nothing to spill and SpillBetween will just return
- // this range to the list of unhandled ones. This will lead to the infinite
- // loop.
- ASSERT(current->Start().Value() < register_use->pos().Value());
- SpillBetween(current, current->Start(), register_use->pos());
- return;
- }
-
- if (block_pos[reg].Value() < current->End().Value()) {
- // Register becomes blocked before the current range end. Split before that
- // position.
- LiveRange* tail = SplitBetween(current,
- current->Start(),
- block_pos[reg].InstructionStart());
- AddToUnhandledSorted(tail);
- }
-
- // Register reg is not blocked for the whole range.
- ASSERT(block_pos[reg].Value() >= current->End().Value());
- TraceAlloc("Assigning blocked reg %s to live range %d\n",
- RegisterName(reg),
- current->id());
- SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
-
- // This register was not free. Thus we need to find and spill
- // parts of active and inactive live regions that use the same register
- // at the same lifetime positions as current.
- SplitAndSpillIntersecting(current);
-}
-
-
-void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
- ASSERT(current->HasRegisterAssigned());
- int reg = current->assigned_register();
- LifetimePosition split_pos = current->Start();
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* range = active_live_ranges_[i];
- if (range->assigned_register() == reg) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == NULL) {
- SpillAfter(range, split_pos);
- } else {
- SpillBetween(range, split_pos, next_pos->pos());
- }
- ActiveToHandled(range);
- --i;
- }
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* range = inactive_live_ranges_[i];
- ASSERT(range->End().Value() > current->Start().Value());
- if (range->assigned_register() == reg && !range->IsFixed()) {
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (next_intersection.IsValid()) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == NULL) {
- SpillAfter(range, split_pos);
- } else {
- next_intersection = Min(next_intersection, next_pos->pos());
- SpillBetween(range, split_pos, next_intersection);
- }
- InactiveToHandled(range);
- --i;
- }
- }
- }
-}
-
-
-bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
- return pos.IsInstructionStart() &&
- InstructionAt(pos.InstructionIndex())->IsLabel();
-}
-
-
-LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
- ASSERT(!range->IsFixed());
- TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
-
- if (pos.Value() <= range->Start().Value()) return range;
-
- // We can't properly connect liveranges if split occured at the end
- // of control instruction.
- ASSERT(pos.IsInstructionStart() ||
- !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
-
- LiveRange* result = LiveRangeFor(GetVirtualRegister());
- if (!AllocationOk()) return NULL;
- range->SplitAt(pos, result, zone_);
- return result;
-}
-
-
-LiveRange* LAllocator::SplitBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- ASSERT(!range->IsFixed());
- TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
- range->id(),
- start.Value(),
- end.Value());
-
- LifetimePosition split_pos = FindOptimalSplitPos(start, end);
- ASSERT(split_pos.Value() >= start.Value());
- return SplitRangeAt(range, split_pos);
-}
-
-
-LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end) {
- int start_instr = start.InstructionIndex();
- int end_instr = end.InstructionIndex();
- ASSERT(start_instr <= end_instr);
-
- // We have no choice
- if (start_instr == end_instr) return end;
-
- HBasicBlock* start_block = GetBlock(start);
- HBasicBlock* end_block = GetBlock(end);
-
- if (end_block == start_block) {
- // The interval is split in the same basic block. Split at the latest
- // possible position.
- return end;
- }
-
- HBasicBlock* block = end_block;
- // Find header of outermost loop.
- while (block->parent_loop_header() != NULL &&
- block->parent_loop_header()->block_id() > start_block->block_id()) {
- block = block->parent_loop_header();
- }
-
- // We did not find any suitable outer loop. Split at the latest possible
- // position unless end_block is a loop header itself.
- if (block == end_block && !end_block->IsLoopHeader()) return end;
-
- return LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
-}
-
-
-void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
- LiveRange* second_part = SplitRangeAt(range, pos);
- if (!AllocationOk()) return;
- Spill(second_part);
-}
-
-
-void LAllocator::SpillBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- ASSERT(start.Value() < end.Value());
- LiveRange* second_part = SplitRangeAt(range, start);
- if (!AllocationOk()) return;
-
- if (second_part->Start().Value() < end.Value()) {
- // The split result intersects with [start, end[.
- // Split it at position between ]start+1, end[, spill the middle part
- // and put the rest to unhandled.
- LiveRange* third_part = SplitBetween(
- second_part,
- second_part->Start().InstructionEnd(),
- end.PrevInstruction().InstructionEnd());
-
- ASSERT(third_part != second_part);
-
- Spill(second_part);
- AddToUnhandledSorted(third_part);
- } else {
- // The split result does not intersect with [start, end[.
- // Nothing to spill. Just put it to unhandled as whole.
- AddToUnhandledSorted(second_part);
- }
-}
-
-
-void LAllocator::Spill(LiveRange* range) {
- ASSERT(!range->IsSpilled());
- TraceAlloc("Spilling live range %d\n", range->id());
- LiveRange* first = range->TopLevel();
-
- if (!first->HasAllocatedSpillOperand()) {
- LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
- first->SetSpillOperand(op);
- }
- range->MakeSpilled(zone_);
-}
-
-
-int LAllocator::RegisterCount() const {
- return num_registers_;
-}
-
-
-#ifdef DEBUG
-
-
-void LAllocator::Verify() const {
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* current = live_ranges()->at(i);
- if (current != NULL) current->Verify();
- }
-}
-
-
-#endif
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/lithium-allocator.h b/src/3rdparty/v8/src/lithium-allocator.h
deleted file mode 100644
index 2953550..0000000
--- a/src/3rdparty/v8/src/lithium-allocator.h
+++ /dev/null
@@ -1,622 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LITHIUM_ALLOCATOR_H_
-#define V8_LITHIUM_ALLOCATOR_H_
-
-#include "v8.h"
-
-#include "allocation.h"
-#include "lithium.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class HBasicBlock;
-class HGraph;
-class HInstruction;
-class HPhi;
-class HTracer;
-class HValue;
-class BitVector;
-class StringStream;
-
-class LArgument;
-class LPlatformChunk;
-class LOperand;
-class LUnallocated;
-class LConstantOperand;
-class LGap;
-class LParallelMove;
-class LPointerMap;
-class LStackSlot;
-class LRegister;
-
-
-// This class represents a single point of a LOperand's lifetime.
-// For each lithium instruction there are exactly two lifetime positions:
-// the beginning and the end of the instruction. Lifetime positions for
-// different lithium instructions are disjoint.
-class LifetimePosition {
- public:
- // Return the lifetime position that corresponds to the beginning of
- // the instruction with the given index.
- static LifetimePosition FromInstructionIndex(int index) {
- return LifetimePosition(index * kStep);
- }
-
- // Returns a numeric representation of this lifetime position.
- int Value() const {
- return value_;
- }
-
- // Returns the index of the instruction to which this lifetime position
- // corresponds.
- int InstructionIndex() const {
- ASSERT(IsValid());
- return value_ / kStep;
- }
-
- // Returns true if this lifetime position corresponds to the instruction
- // start.
- bool IsInstructionStart() const {
- return (value_ & (kStep - 1)) == 0;
- }
-
- // Returns the lifetime position for the start of the instruction which
- // corresponds to this lifetime position.
- LifetimePosition InstructionStart() const {
- ASSERT(IsValid());
- return LifetimePosition(value_ & ~(kStep - 1));
- }
-
- // Returns the lifetime position for the end of the instruction which
- // corresponds to this lifetime position.
- LifetimePosition InstructionEnd() const {
- ASSERT(IsValid());
- return LifetimePosition(InstructionStart().Value() + kStep/2);
- }
-
- // Returns the lifetime position for the beginning of the next instruction.
- LifetimePosition NextInstruction() const {
- ASSERT(IsValid());
- return LifetimePosition(InstructionStart().Value() + kStep);
- }
-
- // Returns the lifetime position for the beginning of the previous
- // instruction.
- LifetimePosition PrevInstruction() const {
- ASSERT(IsValid());
- ASSERT(value_ > 1);
- return LifetimePosition(InstructionStart().Value() - kStep);
- }
-
- // Constructs the lifetime position which does not correspond to any
- // instruction.
- LifetimePosition() : value_(-1) {}
-
- // Returns true if this lifetime positions corrensponds to some
- // instruction.
- bool IsValid() const { return value_ != -1; }
-
- static inline LifetimePosition Invalid() { return LifetimePosition(); }
-
- static inline LifetimePosition MaxPosition() {
- // We have to use this kind of getter instead of static member due to
- // crash bug in GDB.
- return LifetimePosition(kMaxInt);
- }
-
- private:
- static const int kStep = 2;
-
- // Code relies on kStep being a power of two.
- STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
-
- explicit LifetimePosition(int value) : value_(value) { }
-
- int value_;
-};
-
-
-enum RegisterKind {
- GENERAL_REGISTERS,
- DOUBLE_REGISTERS
-};
-
-
-// A register-allocator view of a Lithium instruction. It contains the id of
-// the output operand and a list of input operand uses.
-
-class LInstruction;
-class LEnvironment;
-
-// Iterator for non-null temp operands.
-class TempIterator BASE_EMBEDDED {
- public:
- inline explicit TempIterator(LInstruction* instr);
- inline bool Done();
- inline LOperand* Current();
- inline void Advance();
-
- private:
- inline void SkipUninteresting();
- LInstruction* instr_;
- int limit_;
- int current_;
-};
-
-
-// Iterator for non-constant input operands.
-class InputIterator BASE_EMBEDDED {
- public:
- inline explicit InputIterator(LInstruction* instr);
- inline bool Done();
- inline LOperand* Current();
- inline void Advance();
-
- private:
- inline void SkipUninteresting();
- LInstruction* instr_;
- int limit_;
- int current_;
-};
-
-
-class UseIterator BASE_EMBEDDED {
- public:
- inline explicit UseIterator(LInstruction* instr);
- inline bool Done();
- inline LOperand* Current();
- inline void Advance();
-
- private:
- InputIterator input_iterator_;
- DeepIterator env_iterator_;
-};
-
-
-// Representation of the non-empty interval [start,end[.
-class UseInterval: public ZoneObject {
- public:
- UseInterval(LifetimePosition start, LifetimePosition end)
- : start_(start), end_(end), next_(NULL) {
- ASSERT(start.Value() < end.Value());
- }
-
- LifetimePosition start() const { return start_; }
- LifetimePosition end() const { return end_; }
- UseInterval* next() const { return next_; }
-
- // Split this interval at the given position without effecting the
- // live range that owns it. The interval must contain the position.
- void SplitAt(LifetimePosition pos, Zone* zone);
-
- // If this interval intersects with other return smallest position
- // that belongs to both of them.
- LifetimePosition Intersect(const UseInterval* other) const {
- if (other->start().Value() < start_.Value()) return other->Intersect(this);
- if (other->start().Value() < end_.Value()) return other->start();
- return LifetimePosition::Invalid();
- }
-
- bool Contains(LifetimePosition point) const {
- return start_.Value() <= point.Value() && point.Value() < end_.Value();
- }
-
- private:
- void set_start(LifetimePosition start) { start_ = start; }
- void set_next(UseInterval* next) { next_ = next; }
-
- LifetimePosition start_;
- LifetimePosition end_;
- UseInterval* next_;
-
- friend class LiveRange; // Assigns to start_.
-};
-
-// Representation of a use position.
-class UsePosition: public ZoneObject {
- public:
- UsePosition(LifetimePosition pos, LOperand* operand);
-
- LOperand* operand() const { return operand_; }
- bool HasOperand() const { return operand_ != NULL; }
-
- LOperand* hint() const { return hint_; }
- void set_hint(LOperand* hint) { hint_ = hint; }
- bool HasHint() const;
- bool RequiresRegister() const;
- bool RegisterIsBeneficial() const;
-
- LifetimePosition pos() const { return pos_; }
- UsePosition* next() const { return next_; }
-
- private:
- void set_next(UsePosition* next) { next_ = next; }
-
- LOperand* operand_;
- LOperand* hint_;
- LifetimePosition pos_;
- UsePosition* next_;
- bool requires_reg_;
- bool register_beneficial_;
-
- friend class LiveRange;
-};
-
-// Representation of SSA values' live ranges as a collection of (continuous)
-// intervals over the instruction ordering.
-class LiveRange: public ZoneObject {
- public:
- static const int kInvalidAssignment = 0x7fffffff;
-
- LiveRange(int id, Zone* zone);
-
- UseInterval* first_interval() const { return first_interval_; }
- UsePosition* first_pos() const { return first_pos_; }
- LiveRange* parent() const { return parent_; }
- LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
- LiveRange* next() const { return next_; }
- bool IsChild() const { return parent() != NULL; }
- int id() const { return id_; }
- bool IsFixed() const { return id_ < 0; }
- bool IsEmpty() const { return first_interval() == NULL; }
- LOperand* CreateAssignedOperand(Zone* zone);
- int assigned_register() const { return assigned_register_; }
- int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg,
- RegisterKind register_kind,
- Zone* zone);
- void MakeSpilled(Zone* zone);
-
- // Returns use position in this live range that follows both start
- // and last processed use position.
- // Modifies internal state of live range!
- UsePosition* NextUsePosition(LifetimePosition start);
-
- // Returns use position for which register is required in this live
- // range and which follows both start and last processed use position
- // Modifies internal state of live range!
- UsePosition* NextRegisterPosition(LifetimePosition start);
-
- // Returns use position for which register is beneficial in this live
- // range and which follows both start and last processed use position
- // Modifies internal state of live range!
- UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
-
- // Can this live range be spilled at this position.
- bool CanBeSpilled(LifetimePosition pos);
-
- // Split this live range at the given position which must follow the start of
- // the range.
- // All uses following the given position will be moved from this
- // live range to the result live range.
- void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
-
- bool IsDouble() const { return is_double_; }
- bool HasRegisterAssigned() const {
- return assigned_register_ != kInvalidAssignment;
- }
- bool IsSpilled() const { return spilled_; }
- UsePosition* FirstPosWithHint() const;
-
- LOperand* FirstHint() const {
- UsePosition* pos = FirstPosWithHint();
- if (pos != NULL) return pos->hint();
- return NULL;
- }
-
- LifetimePosition Start() const {
- ASSERT(!IsEmpty());
- return first_interval()->start();
- }
-
- LifetimePosition End() const {
- ASSERT(!IsEmpty());
- return last_interval_->end();
- }
-
- bool HasAllocatedSpillOperand() const;
- LOperand* GetSpillOperand() const { return spill_operand_; }
- void SetSpillOperand(LOperand* operand);
-
- void SetSpillStartIndex(int start) {
- spill_start_index_ = Min(start, spill_start_index_);
- }
-
- bool ShouldBeAllocatedBefore(const LiveRange* other) const;
- bool CanCover(LifetimePosition position) const;
- bool Covers(LifetimePosition position);
- LifetimePosition FirstIntersection(LiveRange* other);
-
- // Add a new interval or a new use position to this live range.
- void EnsureInterval(LifetimePosition start,
- LifetimePosition end,
- Zone* zone);
- void AddUseInterval(LifetimePosition start,
- LifetimePosition end,
- Zone* zone);
- UsePosition* AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- Zone* zone);
-
- // Shorten the most recently added interval by setting a new start.
- void ShortenTo(LifetimePosition start);
-
-#ifdef DEBUG
- // True if target overlaps an existing interval.
- bool HasOverlap(UseInterval* target) const;
- void Verify() const;
-#endif
-
- private:
- void ConvertOperands(Zone* zone);
- UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
- void AdvanceLastProcessedMarker(UseInterval* to_start_of,
- LifetimePosition but_not_past) const;
-
- int id_;
- bool spilled_;
- bool is_double_;
- int assigned_register_;
- UseInterval* last_interval_;
- UseInterval* first_interval_;
- UsePosition* first_pos_;
- LiveRange* parent_;
- LiveRange* next_;
- // This is used as a cache, it doesn't affect correctness.
- mutable UseInterval* current_interval_;
- UsePosition* last_processed_use_;
- LOperand* spill_operand_;
- int spill_start_index_;
-};
-
-
-class LAllocator BASE_EMBEDDED {
- public:
- LAllocator(int first_virtual_register, HGraph* graph);
-
- static void TraceAlloc(const char* msg, ...);
-
- // Checks whether the value of a given virtual register is tagged.
- bool HasTaggedValue(int virtual_register) const;
-
- // Returns the register kind required by the given virtual register.
- RegisterKind RequiredRegisterKind(int virtual_register) const;
-
- bool Allocate(LChunk* chunk);
-
- const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
- const Vector<LiveRange*>* fixed_live_ranges() const {
- return &fixed_live_ranges_;
- }
- const Vector<LiveRange*>* fixed_double_live_ranges() const {
- return &fixed_double_live_ranges_;
- }
-
- LPlatformChunk* chunk() const { return chunk_; }
- HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
-
- int GetVirtualRegister() {
- if (next_virtual_register_ > LUnallocated::kMaxVirtualRegisters) {
- allocation_ok_ = false;
- }
- return next_virtual_register_++;
- }
-
- bool AllocationOk() { return allocation_ok_; }
-
- void MarkAsOsrEntry() {
- // There can be only one.
- ASSERT(!has_osr_entry_);
- // Simply set a flag to find and process instruction later.
- has_osr_entry_ = true;
- }
-
-#ifdef DEBUG
- void Verify() const;
-#endif
-
- BitVector* assigned_registers() {
- return assigned_registers_;
- }
- BitVector* assigned_double_registers() {
- return assigned_double_registers_;
- }
-
- private:
- void MeetRegisterConstraints();
- void ResolvePhis();
- void BuildLiveRanges();
- void AllocateGeneralRegisters();
- void AllocateDoubleRegisters();
- void ConnectRanges();
- void ResolveControlFlow();
- void PopulatePointerMaps();
- void ProcessOsrEntry();
- void AllocateRegisters();
- bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
- inline bool SafePointsAreInOrder() const;
-
- // Liveness analysis support.
- void InitializeLivenessAnalysis();
- BitVector* ComputeLiveOut(HBasicBlock* block);
- void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
- void ProcessInstructions(HBasicBlock* block, BitVector* live);
- void MeetRegisterConstraints(HBasicBlock* block);
- void MeetConstraintsBetween(LInstruction* first,
- LInstruction* second,
- int gap_index);
- void ResolvePhis(HBasicBlock* block);
-
- // Helper methods for building intervals.
- LOperand* AllocateFixed(LUnallocated* operand, int pos, bool is_tagged);
- LiveRange* LiveRangeFor(LOperand* operand);
- void Define(LifetimePosition position, LOperand* operand, LOperand* hint);
- void Use(LifetimePosition block_start,
- LifetimePosition position,
- LOperand* operand,
- LOperand* hint);
- void AddConstraintsGapMove(int index, LOperand* from, LOperand* to);
-
- // Helper methods for updating the life range lists.
- void AddToActive(LiveRange* range);
- void AddToInactive(LiveRange* range);
- void AddToUnhandledSorted(LiveRange* range);
- void AddToUnhandledUnsorted(LiveRange* range);
- void SortUnhandled();
- bool UnhandledIsSorted();
- void ActiveToHandled(LiveRange* range);
- void ActiveToInactive(LiveRange* range);
- void InactiveToHandled(LiveRange* range);
- void InactiveToActive(LiveRange* range);
- void FreeSpillSlot(LiveRange* range);
- LOperand* TryReuseSpillSlot(LiveRange* range);
-
- // Helper methods for allocating registers.
- bool TryAllocateFreeReg(LiveRange* range);
- void AllocateBlockedReg(LiveRange* range);
-
- // Live range splitting helpers.
-
- // Split the given range at the given position.
- // If range starts at or after the given position then the
- // original range is returned.
- // Otherwise returns the live range that starts at pos and contains
- // all uses from the original range that follow pos. Uses at pos will
- // still be owned by the original range after splitting.
- LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
-
- // Split the given range in a position from the interval [start, end].
- LiveRange* SplitBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
-
- // Find a lifetime position in the interval [start, end] which
- // is optimal for splitting: it is either header of the outermost
- // loop covered by this interval or the latest possible position.
- LifetimePosition FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end);
-
- // Spill the given life range after position pos.
- void SpillAfter(LiveRange* range, LifetimePosition pos);
-
- // Spill the given life range after position start and up to position end.
- void SpillBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
-
- void SplitAndSpillIntersecting(LiveRange* range);
-
- void Spill(LiveRange* range);
- bool IsBlockBoundary(LifetimePosition pos);
-
- // Helper methods for resolving control flow.
- void ResolveControlFlow(LiveRange* range,
- HBasicBlock* block,
- HBasicBlock* pred);
-
- inline void SetLiveRangeAssignedRegister(LiveRange* range,
- int reg,
- RegisterKind register_kind,
- Zone* zone);
-
- // Return parallel move that should be used to connect ranges split at the
- // given position.
- LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
-
- // Return the block which contains give lifetime position.
- HBasicBlock* GetBlock(LifetimePosition pos);
-
- // Helper methods for the fixed registers.
- int RegisterCount() const;
- static int FixedLiveRangeID(int index) { return -index - 1; }
- static int FixedDoubleLiveRangeID(int index);
- LiveRange* FixedLiveRangeFor(int index);
- LiveRange* FixedDoubleLiveRangeFor(int index);
- LiveRange* LiveRangeFor(int index);
- HPhi* LookupPhi(LOperand* operand) const;
- LGap* GetLastGap(HBasicBlock* block);
-
- const char* RegisterName(int allocation_index);
-
- inline bool IsGapAt(int index);
-
- inline LInstruction* InstructionAt(int index);
-
- inline LGap* GapAt(int index);
-
- Zone* zone_;
-
- LPlatformChunk* chunk_;
-
- // During liveness analysis keep a mapping from block id to live_in sets
- // for blocks already analyzed.
- ZoneList<BitVector*> live_in_sets_;
-
- // Liveness analysis results.
- ZoneList<LiveRange*> live_ranges_;
-
- // Lists of live ranges
- EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
- fixed_live_ranges_;
- EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
- fixed_double_live_ranges_;
- ZoneList<LiveRange*> unhandled_live_ranges_;
- ZoneList<LiveRange*> active_live_ranges_;
- ZoneList<LiveRange*> inactive_live_ranges_;
- ZoneList<LiveRange*> reusable_slots_;
-
- // Next virtual register number to be assigned to temporaries.
- int next_virtual_register_;
- int first_artificial_register_;
- GrowableBitVector double_artificial_registers_;
-
- RegisterKind mode_;
- int num_registers_;
-
- BitVector* assigned_registers_;
- BitVector* assigned_double_registers_;
-
- HGraph* graph_;
-
- bool has_osr_entry_;
-
- // Indicates success or failure during register allocation.
- bool allocation_ok_;
-
- DISALLOW_COPY_AND_ASSIGN(LAllocator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_LITHIUM_ALLOCATOR_H_
diff --git a/src/3rdparty/v8/src/lithium.cc b/src/3rdparty/v8/src/lithium.cc
deleted file mode 100644
index 09c0f44..0000000
--- a/src/3rdparty/v8/src/lithium.cc
+++ /dev/null
@@ -1,495 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "lithium.h"
-#include "scopes.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#include "x64/lithium-codegen-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#include "arm/lithium-codegen-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#include "mips/lithium-codegen-mips.h"
-#else
-#error "Unknown architecture."
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-void LOperand::PrintTo(StringStream* stream) {
- LUnallocated* unalloc = NULL;
- switch (kind()) {
- case INVALID:
- stream->Add("(0)");
- break;
- case UNALLOCATED:
- unalloc = LUnallocated::cast(this);
- stream->Add("v%d", unalloc->virtual_register());
- switch (unalloc->policy()) {
- case LUnallocated::NONE:
- break;
- case LUnallocated::FIXED_REGISTER: {
- const char* register_name =
- Register::AllocationIndexToString(unalloc->fixed_index());
- stream->Add("(=%s)", register_name);
- break;
- }
- case LUnallocated::FIXED_DOUBLE_REGISTER: {
- const char* double_register_name =
- DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
- stream->Add("(=%s)", double_register_name);
- break;
- }
- case LUnallocated::FIXED_SLOT:
- stream->Add("(=%dS)", unalloc->fixed_index());
- break;
- case LUnallocated::MUST_HAVE_REGISTER:
- stream->Add("(R)");
- break;
- case LUnallocated::WRITABLE_REGISTER:
- stream->Add("(WR)");
- break;
- case LUnallocated::SAME_AS_FIRST_INPUT:
- stream->Add("(1)");
- break;
- case LUnallocated::ANY:
- stream->Add("(-)");
- break;
- }
- break;
- case CONSTANT_OPERAND:
- stream->Add("[constant:%d]", index());
- break;
- case STACK_SLOT:
- stream->Add("[stack:%d]", index());
- break;
- case DOUBLE_STACK_SLOT:
- stream->Add("[double_stack:%d]", index());
- break;
- case REGISTER:
- stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
- break;
- case DOUBLE_REGISTER:
- stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
- break;
- case ARGUMENT:
- stream->Add("[arg:%d]", index());
- break;
- }
-}
-
-#define DEFINE_OPERAND_CACHE(name, type) \
- L##name* L##name::cache = NULL; \
- \
- void L##name::SetUpCache() { \
- if (cache) return; \
- cache = new L##name[kNumCachedOperands]; \
- for (int i = 0; i < kNumCachedOperands; i++) { \
- cache[i].ConvertTo(type, i); \
- } \
- } \
- \
- void L##name::TearDownCache() { \
- delete[] cache; \
- }
-
-LITHIUM_OPERAND_LIST(DEFINE_OPERAND_CACHE)
-#undef DEFINE_OPERAND_CACHE
-
-void LOperand::SetUpCaches() {
-#define LITHIUM_OPERAND_SETUP(name, type) L##name::SetUpCache();
- LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
-#undef LITHIUM_OPERAND_SETUP
-}
-
-
-void LOperand::TearDownCaches() {
-#define LITHIUM_OPERAND_TEARDOWN(name, type) L##name::TearDownCache();
- LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
-#undef LITHIUM_OPERAND_TEARDOWN
-}
-
-
-bool LParallelMove::IsRedundant() const {
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsRedundant()) return false;
- }
- return true;
-}
-
-
-void LParallelMove::PrintDataTo(StringStream* stream) const {
- bool first = true;
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsEliminated()) {
- LOperand* source = move_operands_[i].source();
- LOperand* destination = move_operands_[i].destination();
- if (!first) stream->Add(" ");
- first = false;
- if (source->Equals(destination)) {
- destination->PrintTo(stream);
- } else {
- destination->PrintTo(stream);
- stream->Add(" = ");
- source->PrintTo(stream);
- }
- stream->Add(";");
- }
- }
-}
-
-
-void LEnvironment::PrintTo(StringStream* stream) {
- stream->Add("[id=%d|", ast_id().ToInt());
- if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
- stream->Add("deopt_id=%d|", deoptimization_index());
- }
- stream->Add("[parameters=%d|", parameter_count());
- stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
- for (int i = 0; i < values_.length(); ++i) {
- if (i != 0) stream->Add(";");
- if (values_[i] == NULL) {
- stream->Add("[hole]");
- } else {
- values_[i]->PrintTo(stream);
- }
- }
- stream->Add("]");
-}
-
-
-void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- pointer_operands_.Add(op, zone);
-}
-
-
-void LPointerMap::RemovePointer(LOperand* op) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (pointer_operands_[i]->Equals(op)) {
- pointer_operands_.Remove(i);
- --i;
- }
- }
-}
-
-
-void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- untagged_operands_.Add(op, zone);
-}
-
-
-void LPointerMap::PrintTo(StringStream* stream) {
- stream->Add("{");
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (i != 0) stream->Add(";");
- pointer_operands_[i]->PrintTo(stream);
- }
- stream->Add("} @%d", position());
-}
-
-
-int ElementsKindToShiftSize(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return 0;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return 1;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- return 2;
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return 3;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- return kPointerSizeLog2;
- }
- UNREACHABLE();
- return 0;
-}
-
-
-int StackSlotOffset(int index) {
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return -(index + 3) * kPointerSize;
- } else {
- // Incoming parameter. Skip the return address.
- return -(index - 1) * kPointerSize;
- }
-}
-
-
-LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32, graph->zone()),
- pointer_maps_(8, graph->zone()),
- inlined_closures_(1, graph->zone()) {
-}
-
-
-LLabel* LChunk::GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
-}
-
-
-int LChunk::LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
-}
-
-Label* LChunk::GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
-}
-
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap, zone());
- index = instructions_.length();
- instructions_.Add(instr, zone());
- } else {
- index = instructions_.length();
- instructions_.Add(instr, zone());
- instructions_.Add(gap, zone());
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map(), zone());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id(), zone());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(
- LGap::START, zone())->AddMove(from, to, zone());
-}
-
-
-HConstant* LChunk::LookupConstant(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()));
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunk::NewChunk(HGraph* graph) {
- NoHandleAllocation no_handles(graph->isolate());
- AssertNoAllocation no_gc;
-
- int values = graph->GetMaximumValueID();
- CompilationInfo* info = graph->info();
- if (values > LUnallocated::kMaxVirtualRegisters) {
- info->set_bailout_reason("not enough virtual registers for values");
- return NULL;
- }
- LAllocator allocator(values, graph);
- LChunkBuilder builder(info, graph, &allocator);
- LChunk* chunk = builder.Build();
- if (chunk == NULL) return NULL;
-
- if (!allocator.Allocate(chunk)) {
- info->set_bailout_reason("not enough virtual registers (regalloc)");
- return NULL;
- }
-
- chunk->set_allocated_double_registers(
- allocator.assigned_double_registers());
-
- return chunk;
-}
-
-
-Handle<Code> LChunk::Codegen(Code::Kind kind) {
- MacroAssembler assembler(info()->isolate(), NULL, 0);
- LOG_CODE_EVENT(info()->isolate(),
- CodeStartLinePosInfoRecordEvent(
- assembler.positions_recorder()));
- LCodeGen generator(this, &assembler, info());
-
- MarkEmptyBlocks();
-
- if (generator.GenerateCode()) {
- if (FLAG_trace_codegen) {
- PrintF("Crankshaft Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info());
- Code::Flags flags = Code::ComputeFlags(kind);
- Handle<Code> code =
- CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
- generator.FinishCode(code);
-
- if (!code.is_null()) {
- void* jit_handler_data =
- assembler.positions_recorder()->DetachJITHandlerData();
- LOG_CODE_EVENT(info()->isolate(),
- CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
- }
-
- CodeGenerator::PrintCode(code, info());
- return code;
- }
- return Handle<Code>::null();
-}
-
-
-void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
- allocated_double_registers_ = allocated_registers;
- BitVector* doubles = allocated_double_registers();
- BitVector::Iterator iterator(doubles);
- while (!iterator.Done()) {
- if (info()->saves_caller_doubles()) {
- if (kDoubleSize == kPointerSize * 2) {
- spill_slot_count_ += 2;
- } else {
- spill_slot_count_++;
- }
- }
- iterator.Advance();
- }
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/lithium.h b/src/3rdparty/v8/src/lithium.h
deleted file mode 100644
index 420a262..0000000
--- a/src/3rdparty/v8/src/lithium.h
+++ /dev/null
@@ -1,722 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LITHIUM_H_
-#define V8_LITHIUM_H_
-
-#include "allocation.h"
-#include "hydrogen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-#define LITHIUM_OPERAND_LIST(V) \
- V(ConstantOperand, CONSTANT_OPERAND) \
- V(StackSlot, STACK_SLOT) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
- V(Register, REGISTER) \
- V(DoubleRegister, DOUBLE_REGISTER)
-
-
-class LOperand: public ZoneObject {
- public:
- enum Kind {
- INVALID,
- UNALLOCATED,
- CONSTANT_OPERAND,
- STACK_SLOT,
- DOUBLE_STACK_SLOT,
- REGISTER,
- DOUBLE_REGISTER,
- ARGUMENT
- };
-
- LOperand() : value_(KindField::encode(INVALID)) { }
-
- Kind kind() const { return KindField::decode(value_); }
- int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
-#define LITHIUM_OPERAND_PREDICATE(name, type) \
- bool Is##name() const { return kind() == type; }
- LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
- LITHIUM_OPERAND_PREDICATE(Argument, ARGUMENT)
- LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
- LITHIUM_OPERAND_PREDICATE(Ignored, INVALID)
-#undef LITHIUM_OPERAND_PREDICATE
- bool Equals(LOperand* other) const { return value_ == other->value_; }
-
- void PrintTo(StringStream* stream);
- void ConvertTo(Kind kind, int index) {
- value_ = KindField::encode(kind);
- value_ |= index << kKindFieldWidth;
- ASSERT(this->index() == index);
- }
-
- // Calls SetUpCache()/TearDownCache() for each subclass.
- static void SetUpCaches();
- static void TearDownCaches();
-
- protected:
- static const int kKindFieldWidth = 3;
- class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
-
- LOperand(Kind kind, int index) { ConvertTo(kind, index); }
-
- unsigned value_;
-};
-
-
-class LUnallocated: public LOperand {
- public:
- enum Policy {
- NONE,
- ANY,
- FIXED_REGISTER,
- FIXED_DOUBLE_REGISTER,
- FIXED_SLOT,
- MUST_HAVE_REGISTER,
- WRITABLE_REGISTER,
- SAME_AS_FIRST_INPUT
- };
-
- // Lifetime of operand inside the instruction.
- enum Lifetime {
- // USED_AT_START operand is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- USED_AT_START,
-
- // USED_AT_END operand is treated as live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- USED_AT_END
- };
-
- explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, USED_AT_END);
- }
-
- LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, fixed_index, USED_AT_END);
- }
-
- LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, lifetime);
- }
-
- // The superclass has a KindField. Some policies have a signed fixed
- // index in the upper bits.
- static const int kPolicyWidth = 3;
- static const int kLifetimeWidth = 1;
- static const int kVirtualRegisterWidth = 15;
-
- static const int kPolicyShift = kKindFieldWidth;
- static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
- static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
- static const int kFixedIndexShift =
- kVirtualRegisterShift + kVirtualRegisterWidth;
- static const int kFixedIndexWidth = 32 - kFixedIndexShift;
- STATIC_ASSERT(kFixedIndexWidth > 5);
-
- class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
-
- class LifetimeField
- : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
- };
-
- class VirtualRegisterField
- : public BitField<unsigned,
- kVirtualRegisterShift,
- kVirtualRegisterWidth> {
- };
-
- static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
- static const int kMaxFixedIndex = (1 << (kFixedIndexWidth - 1)) - 1;
- static const int kMinFixedIndex = -(1 << (kFixedIndexWidth - 1));
-
- bool HasAnyPolicy() const {
- return policy() == ANY;
- }
- bool HasFixedPolicy() const {
- return policy() == FIXED_REGISTER ||
- policy() == FIXED_DOUBLE_REGISTER ||
- policy() == FIXED_SLOT;
- }
- bool HasRegisterPolicy() const {
- return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
- }
- bool HasSameAsInputPolicy() const {
- return policy() == SAME_AS_FIRST_INPUT;
- }
- Policy policy() const { return PolicyField::decode(value_); }
- void set_policy(Policy policy) {
- value_ = PolicyField::update(value_, policy);
- }
- int fixed_index() const {
- return static_cast<int>(value_) >> kFixedIndexShift;
- }
-
- int virtual_register() const {
- return VirtualRegisterField::decode(value_);
- }
-
- void set_virtual_register(unsigned id) {
- value_ = VirtualRegisterField::update(value_, id);
- }
-
- LUnallocated* CopyUnconstrained(Zone* zone) {
- LUnallocated* result = new(zone) LUnallocated(ANY);
- result->set_virtual_register(virtual_register());
- return result;
- }
-
- static LUnallocated* cast(LOperand* op) {
- ASSERT(op->IsUnallocated());
- return reinterpret_cast<LUnallocated*>(op);
- }
-
- bool IsUsedAtStart() {
- return LifetimeField::decode(value_) == USED_AT_START;
- }
-
- private:
- void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
- value_ |= PolicyField::encode(policy);
- value_ |= LifetimeField::encode(lifetime);
- value_ |= fixed_index << kFixedIndexShift;
- ASSERT(this->fixed_index() == fixed_index);
- }
-};
-
-
-class LMoveOperands BASE_EMBEDDED {
- public:
- LMoveOperands(LOperand* source, LOperand* destination)
- : source_(source), destination_(destination) {
- }
-
- LOperand* source() const { return source_; }
- void set_source(LOperand* operand) { source_ = operand; }
-
- LOperand* destination() const { return destination_; }
- void set_destination(LOperand* operand) { destination_ = operand; }
-
- // The gap resolver marks moves as "in-progress" by clearing the
- // destination (but not the source).
- bool IsPending() const {
- return destination_ == NULL && source_ != NULL;
- }
-
- // True if this move a move into the given destination operand.
- bool Blocks(LOperand* operand) const {
- return !IsEliminated() && source()->Equals(operand);
- }
-
- // A move is redundant if it's been eliminated, if its source and
- // destination are the same, or if its destination is unneeded.
- bool IsRedundant() const {
- return IsEliminated() || source_->Equals(destination_) || IsIgnored();
- }
-
- bool IsIgnored() const {
- return destination_ != NULL && destination_->IsIgnored();
- }
-
- // We clear both operands to indicate move that's been eliminated.
- void Eliminate() { source_ = destination_ = NULL; }
- bool IsEliminated() const {
- ASSERT(source_ != NULL || destination_ == NULL);
- return source_ == NULL;
- }
-
- private:
- LOperand* source_;
- LOperand* destination_;
-};
-
-
-class LConstantOperand: public LOperand {
- public:
- static LConstantOperand* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LConstantOperand(index);
- }
-
- static LConstantOperand* cast(LOperand* op) {
- ASSERT(op->IsConstantOperand());
- return reinterpret_cast<LConstantOperand*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LConstantOperand* cache;
-
- LConstantOperand() : LOperand() { }
- explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
-};
-
-
-class LArgument: public LOperand {
- public:
- explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
-
- static LArgument* cast(LOperand* op) {
- ASSERT(op->IsArgument());
- return reinterpret_cast<LArgument*>(op);
- }
-};
-
-
-class LStackSlot: public LOperand {
- public:
- static LStackSlot* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LStackSlot(index);
- }
-
- static LStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LStackSlot*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LStackSlot* cache;
-
- LStackSlot() : LOperand() { }
- explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
-};
-
-
-class LDoubleStackSlot: public LOperand {
- public:
- static LDoubleStackSlot* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LDoubleStackSlot(index);
- }
-
- static LDoubleStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LDoubleStackSlot*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LDoubleStackSlot* cache;
-
- LDoubleStackSlot() : LOperand() { }
- explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
-};
-
-
-class LRegister: public LOperand {
- public:
- static LRegister* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LRegister(index);
- }
-
- static LRegister* cast(LOperand* op) {
- ASSERT(op->IsRegister());
- return reinterpret_cast<LRegister*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LRegister* cache;
-
- LRegister() : LOperand() { }
- explicit LRegister(int index) : LOperand(REGISTER, index) { }
-};
-
-
-class LDoubleRegister: public LOperand {
- public:
- static LDoubleRegister* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LDoubleRegister(index);
- }
-
- static LDoubleRegister* cast(LOperand* op) {
- ASSERT(op->IsDoubleRegister());
- return reinterpret_cast<LDoubleRegister*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LDoubleRegister* cache;
-
- LDoubleRegister() : LOperand() { }
- explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
-};
-
-
-class LParallelMove : public ZoneObject {
- public:
- explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
-
- void AddMove(LOperand* from, LOperand* to, Zone* zone) {
- move_operands_.Add(LMoveOperands(from, to), zone);
- }
-
- bool IsRedundant() const;
-
- const ZoneList<LMoveOperands>* move_operands() const {
- return &move_operands_;
- }
-
- void PrintDataTo(StringStream* stream) const;
-
- private:
- ZoneList<LMoveOperands> move_operands_;
-};
-
-
-class LPointerMap: public ZoneObject {
- public:
- explicit LPointerMap(int position, Zone* zone)
- : pointer_operands_(8, zone),
- untagged_operands_(0, zone),
- position_(position),
- lithium_position_(-1) { }
-
- const ZoneList<LOperand*>* GetNormalizedOperands() {
- for (int i = 0; i < untagged_operands_.length(); ++i) {
- RemovePointer(untagged_operands_[i]);
- }
- untagged_operands_.Clear();
- return &pointer_operands_;
- }
- int position() const { return position_; }
- int lithium_position() const { return lithium_position_; }
-
- void set_lithium_position(int pos) {
- ASSERT(lithium_position_ == -1);
- lithium_position_ = pos;
- }
-
- void RecordPointer(LOperand* op, Zone* zone);
- void RemovePointer(LOperand* op);
- void RecordUntagged(LOperand* op, Zone* zone);
- void PrintTo(StringStream* stream);
-
- private:
- ZoneList<LOperand*> pointer_operands_;
- ZoneList<LOperand*> untagged_operands_;
- int position_;
- int lithium_position_;
-};
-
-
-class LEnvironment: public ZoneObject {
- public:
- LEnvironment(Handle<JSFunction> closure,
- FrameType frame_type,
- BailoutId ast_id,
- int parameter_count,
- int argument_count,
- int value_count,
- LEnvironment* outer,
- HEnterInlined* entry,
- Zone* zone)
- : closure_(closure),
- frame_type_(frame_type),
- arguments_stack_height_(argument_count),
- deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
- translation_index_(-1),
- ast_id_(ast_id),
- parameter_count_(parameter_count),
- pc_offset_(-1),
- values_(value_count, zone),
- is_tagged_(value_count, zone),
- is_uint32_(value_count, zone),
- spilled_registers_(NULL),
- spilled_double_registers_(NULL),
- outer_(outer),
- entry_(entry),
- zone_(zone) { }
-
- Handle<JSFunction> closure() const { return closure_; }
- FrameType frame_type() const { return frame_type_; }
- int arguments_stack_height() const { return arguments_stack_height_; }
- int deoptimization_index() const { return deoptimization_index_; }
- int translation_index() const { return translation_index_; }
- BailoutId ast_id() const { return ast_id_; }
- int parameter_count() const { return parameter_count_; }
- int pc_offset() const { return pc_offset_; }
- LOperand** spilled_registers() const { return spilled_registers_; }
- LOperand** spilled_double_registers() const {
- return spilled_double_registers_;
- }
- const ZoneList<LOperand*>* values() const { return &values_; }
- LEnvironment* outer() const { return outer_; }
- HEnterInlined* entry() { return entry_; }
-
- void AddValue(LOperand* operand,
- Representation representation,
- bool is_uint32) {
- values_.Add(operand, zone());
- if (representation.IsTagged()) {
- ASSERT(!is_uint32);
- is_tagged_.Add(values_.length() - 1);
- }
-
- if (is_uint32) {
- is_uint32_.Add(values_.length() - 1);
- }
- }
-
- bool HasTaggedValueAt(int index) const {
- return is_tagged_.Contains(index);
- }
-
- bool HasUint32ValueAt(int index) const {
- return is_uint32_.Contains(index);
- }
-
- void Register(int deoptimization_index,
- int translation_index,
- int pc_offset) {
- ASSERT(!HasBeenRegistered());
- deoptimization_index_ = deoptimization_index;
- translation_index_ = translation_index;
- pc_offset_ = pc_offset;
- }
- bool HasBeenRegistered() const {
- return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
- }
-
- void SetSpilledRegisters(LOperand** registers,
- LOperand** double_registers) {
- spilled_registers_ = registers;
- spilled_double_registers_ = double_registers;
- }
-
- void PrintTo(StringStream* stream);
-
- Zone* zone() const { return zone_; }
-
- private:
- Handle<JSFunction> closure_;
- FrameType frame_type_;
- int arguments_stack_height_;
- int deoptimization_index_;
- int translation_index_;
- BailoutId ast_id_;
- int parameter_count_;
- int pc_offset_;
- ZoneList<LOperand*> values_;
- BitVector is_tagged_;
- BitVector is_uint32_;
-
- // Allocation index indexed arrays of spill slot operands for registers
- // that are also in spill slots at an OSR entry. NULL for environments
- // that do not correspond to an OSR entry.
- LOperand** spilled_registers_;
- LOperand** spilled_double_registers_;
-
- LEnvironment* outer_;
- HEnterInlined* entry_;
-
- Zone* zone_;
-};
-
-
-// Iterates over the non-null, non-constant operands in an environment.
-class ShallowIterator BASE_EMBEDDED {
- public:
- explicit ShallowIterator(LEnvironment* env)
- : env_(env),
- limit_(env != NULL ? env->values()->length() : 0),
- current_(0) {
- SkipUninteresting();
- }
-
- bool Done() { return current_ >= limit_; }
-
- LOperand* Current() {
- ASSERT(!Done());
- ASSERT(env_->values()->at(current_) != NULL);
- return env_->values()->at(current_);
- }
-
- void Advance() {
- ASSERT(!Done());
- ++current_;
- SkipUninteresting();
- }
-
- LEnvironment* env() { return env_; }
-
- private:
- bool ShouldSkip(LOperand* op) {
- return op == NULL || op->IsConstantOperand() || op->IsArgument();
- }
-
- // Skip until something interesting, beginning with and including current_.
- void SkipUninteresting() {
- while (current_ < limit_ && ShouldSkip(env_->values()->at(current_))) {
- ++current_;
- }
- }
-
- LEnvironment* env_;
- int limit_;
- int current_;
-};
-
-
-// Iterator for non-null, non-constant operands incl. outer environments.
-class DeepIterator BASE_EMBEDDED {
- public:
- explicit DeepIterator(LEnvironment* env)
- : current_iterator_(env) {
- SkipUninteresting();
- }
-
- bool Done() { return current_iterator_.Done(); }
-
- LOperand* Current() {
- ASSERT(!current_iterator_.Done());
- ASSERT(current_iterator_.Current() != NULL);
- return current_iterator_.Current();
- }
-
- void Advance() {
- current_iterator_.Advance();
- SkipUninteresting();
- }
-
- private:
- void SkipUninteresting() {
- while (current_iterator_.env() != NULL && current_iterator_.Done()) {
- current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
- }
- }
-
- ShallowIterator current_iterator_;
-};
-
-
-class LPlatformChunk;
-class LGap;
-class LLabel;
-
-// Superclass providing data and behavior common to all the
-// arch-specific LPlatformChunk classes.
-class LChunk: public ZoneObject {
- public:
- static LChunk* NewChunk(HGraph* graph);
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- HConstant* LookupConstant(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const;
- int LookupDestination(int block_id) const;
- Label* GetAssemblyLabel(int block_id) const;
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure, zone());
- }
-
- Zone* zone() const { return info_->zone(); }
-
- Handle<Code> Codegen(Code::Kind kind);
-
- void set_allocated_double_registers(BitVector* allocated_registers);
- BitVector* allocated_double_registers() {
- return allocated_double_registers_;
- }
-
- protected:
- LChunk(CompilationInfo* info, HGraph* graph);
-
- int spill_slot_count_;
-
- private:
- CompilationInfo* info_;
- HGraph* const graph_;
- BitVector* allocated_double_registers_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
-};
-
-
-int ElementsKindToShiftSize(ElementsKind elements_kind);
-int StackSlotOffset(int index);
-
-enum NumberUntagDMode {
- NUMBER_CANDIDATE_IS_SMI,
- NUMBER_CANDIDATE_IS_SMI_OR_HOLE,
- NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE,
- NUMBER_CANDIDATE_IS_ANY_TAGGED
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_LITHIUM_H_
diff --git a/src/3rdparty/v8/src/liveedit-debugger.js b/src/3rdparty/v8/src/liveedit-debugger.js
deleted file mode 100644
index 451b146..0000000
--- a/src/3rdparty/v8/src/liveedit-debugger.js
+++ /dev/null
@@ -1,1137 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// LiveEdit feature implementation. The script should be executed after
-// debug-debugger.js.
-
-// A LiveEdit namespace. It contains functions that modifies JavaScript code
-// according to changes of script source (if possible).
-//
-// When new script source is put in, the difference is calculated textually,
-// in form of list of delete/add/change chunks. The functions that include
-// change chunk(s) get recompiled, or their enclosing functions are
-// recompiled instead.
-// If the function may not be recompiled (e.g. it was completely erased in new
-// version of the script) it remains unchanged, but the code that could
-// create a new instance of this function goes away. An old version of script
-// is created to back up this obsolete function.
-// All unchanged functions have their positions updated accordingly.
-//
-// LiveEdit namespace is declared inside a single function constructor.
-Debug.LiveEdit = new function() {
-
- // Forward declaration for minifier.
- var FunctionStatus;
-
- var NEEDS_STEP_IN_PROPERTY_NAME = "stack_update_needs_step_in";
-
- // Applies the change to the script.
- // The change is in form of list of chunks encoded in a single array as
- // a series of triplets (pos1_start, pos1_end, pos2_end)
- function ApplyPatchMultiChunk(script, diff_array, new_source, preview_only,
- change_log) {
-
- var old_source = script.source;
-
- // Gather compile information about old version of script.
- var old_compile_info = GatherCompileInfo(old_source, script);
-
- // Build tree structures for old and new versions of the script.
- var root_old_node = BuildCodeInfoTree(old_compile_info);
-
- var pos_translator = new PosTranslator(diff_array);
-
- // Analyze changes.
- MarkChangedFunctions(root_old_node, pos_translator.GetChunks());
-
- // Find all SharedFunctionInfo's that were compiled from this script.
- FindLiveSharedInfos(root_old_node, script);
-
- // Gather compile information about new version of script.
- var new_compile_info;
- try {
- new_compile_info = GatherCompileInfo(new_source, script);
- } catch (e) {
- var failure =
- new Failure("Failed to compile new version of script: " + e);
- if (e instanceof SyntaxError) {
- var details = {
- type: "liveedit_compile_error",
- syntaxErrorMessage: e.message
- };
- CopyErrorPositionToDetails(e, details);
- failure.details = details;
- }
- throw failure;
- }
- var root_new_node = BuildCodeInfoTree(new_compile_info);
-
- // Link recompiled script data with other data.
- FindCorrespondingFunctions(root_old_node, root_new_node);
-
- // Prepare to-do lists.
- var replace_code_list = new Array();
- var link_to_old_script_list = new Array();
- var link_to_original_script_list = new Array();
- var update_positions_list = new Array();
-
- function HarvestTodo(old_node) {
- function CollectDamaged(node) {
- link_to_old_script_list.push(node);
- for (var i = 0; i < node.children.length; i++) {
- CollectDamaged(node.children[i]);
- }
- }
-
- // Recursively collects all newly compiled functions that are going into
- // business and should have link to the actual script updated.
- function CollectNew(node_list) {
- for (var i = 0; i < node_list.length; i++) {
- link_to_original_script_list.push(node_list[i]);
- CollectNew(node_list[i].children);
- }
- }
-
- if (old_node.status == FunctionStatus.DAMAGED) {
- CollectDamaged(old_node);
- return;
- }
- if (old_node.status == FunctionStatus.UNCHANGED) {
- update_positions_list.push(old_node);
- } else if (old_node.status == FunctionStatus.SOURCE_CHANGED) {
- update_positions_list.push(old_node);
- } else if (old_node.status == FunctionStatus.CHANGED) {
- replace_code_list.push(old_node);
- CollectNew(old_node.unmatched_new_nodes);
- }
- for (var i = 0; i < old_node.children.length; i++) {
- HarvestTodo(old_node.children[i]);
- }
- }
-
- var preview_description = {
- change_tree: DescribeChangeTree(root_old_node),
- textual_diff: {
- old_len: old_source.length,
- new_len: new_source.length,
- chunks: diff_array
- },
- updated: false
- };
-
- if (preview_only) {
- return preview_description;
- }
-
- HarvestTodo(root_old_node);
-
- // Collect shared infos for functions whose code need to be patched.
- var replaced_function_infos = new Array();
- for (var i = 0; i < replace_code_list.length; i++) {
- var live_shared_function_infos =
- replace_code_list[i].live_shared_function_infos;
-
- if (live_shared_function_infos) {
- for (var j = 0; j < live_shared_function_infos.length; j++) {
- replaced_function_infos.push(live_shared_function_infos[j]);
- }
- }
- }
-
- // We haven't changed anything before this line yet.
- // Committing all changes.
-
- // Check that function being patched is not currently on stack or drop them.
- var dropped_functions_number =
- CheckStackActivations(replaced_function_infos, change_log);
-
- preview_description.stack_modified = dropped_functions_number != 0;
-
- // Our current implementation requires client to manually issue "step in"
- // command for correct stack state.
- preview_description[NEEDS_STEP_IN_PROPERTY_NAME] =
- preview_description.stack_modified;
-
- // Start with breakpoints. Convert their line/column positions and
- // temporary remove.
- var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
-
- var old_script;
-
- // Create an old script only if there are function that should be linked
- // to old version.
- if (link_to_old_script_list.length == 0) {
- %LiveEditReplaceScript(script, new_source, null);
- old_script = void 0;
- } else {
- var old_script_name = CreateNameForOldScript(script);
-
- // Update the script text and create a new script representing an old
- // version of the script.
- old_script = %LiveEditReplaceScript(script, new_source,
- old_script_name);
-
- var link_to_old_script_report = new Array();
- change_log.push( { linked_to_old_script: link_to_old_script_report } );
-
- // We need to link to old script all former nested functions.
- for (var i = 0; i < link_to_old_script_list.length; i++) {
- LinkToOldScript(link_to_old_script_list[i], old_script,
- link_to_old_script_report);
- }
-
- preview_description.created_script_name = old_script_name;
- }
-
- // Link to an actual script all the functions that we are going to use.
- for (var i = 0; i < link_to_original_script_list.length; i++) {
- %LiveEditFunctionSetScript(
- link_to_original_script_list[i].info.shared_function_info, script);
- }
-
- for (var i = 0; i < replace_code_list.length; i++) {
- PatchFunctionCode(replace_code_list[i], change_log);
- }
-
- var position_patch_report = new Array();
- change_log.push( {position_patched: position_patch_report} );
-
- for (var i = 0; i < update_positions_list.length; i++) {
- // TODO(LiveEdit): take into account wether it's source_changed or
- // unchanged and whether positions changed at all.
- PatchPositions(update_positions_list[i], diff_array,
- position_patch_report);
-
- if (update_positions_list[i].live_shared_function_infos) {
- update_positions_list[i].live_shared_function_infos.
- forEach(function (info) {
- %LiveEditFunctionSourceUpdated(info.raw_array);
- });
- }
- }
-
- break_points_restorer(pos_translator, old_script);
-
- preview_description.updated = true;
- return preview_description;
- }
- // Function is public.
- this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
-
-
- // Fully compiles source string as a script. Returns Array of
- // FunctionCompileInfo -- a descriptions of all functions of the script.
- // Elements of array are ordered by start positions of functions (from top
- // to bottom) in the source. Fields outer_index and next_sibling_index help
- // to navigate the nesting structure of functions.
- //
- // All functions get compiled linked to script provided as parameter script.
- // TODO(LiveEdit): consider not using actual scripts as script, because
- // we have to manually erase all links right after compile.
- function GatherCompileInfo(source, script) {
- // Get function info, elements are partially sorted (it is a tree of
- // nested functions serialized as parent followed by serialized children.
- var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
-
- // Sort function infos by start position field.
- var compile_info = new Array();
- var old_index_map = new Array();
- for (var i = 0; i < raw_compile_info.length; i++) {
- var info = new FunctionCompileInfo(raw_compile_info[i]);
- // Remove all links to the actual script. Breakpoints system and
- // LiveEdit itself believe that any function in heap that points to a
- // particular script is a regular function.
- // For some functions we will restore this link later.
- %LiveEditFunctionSetScript(info.shared_function_info, void 0);
- compile_info.push(info);
- old_index_map.push(i);
- }
-
- for (var i = 0; i < compile_info.length; i++) {
- var k = i;
- for (var j = i + 1; j < compile_info.length; j++) {
- if (compile_info[k].start_position > compile_info[j].start_position) {
- k = j;
- }
- }
- if (k != i) {
- var temp_info = compile_info[k];
- var temp_index = old_index_map[k];
- compile_info[k] = compile_info[i];
- old_index_map[k] = old_index_map[i];
- compile_info[i] = temp_info;
- old_index_map[i] = temp_index;
- }
- }
-
- // After sorting update outer_inder field using old_index_map. Also
- // set next_sibling_index field.
- var current_index = 0;
-
- // The recursive function, that goes over all children of a particular
- // node (i.e. function info).
- function ResetIndexes(new_parent_index, old_parent_index) {
- var previous_sibling = -1;
- while (current_index < compile_info.length &&
- compile_info[current_index].outer_index == old_parent_index) {
- var saved_index = current_index;
- compile_info[saved_index].outer_index = new_parent_index;
- if (previous_sibling != -1) {
- compile_info[previous_sibling].next_sibling_index = saved_index;
- }
- previous_sibling = saved_index;
- current_index++;
- ResetIndexes(saved_index, old_index_map[saved_index]);
- }
- if (previous_sibling != -1) {
- compile_info[previous_sibling].next_sibling_index = -1;
- }
- }
-
- ResetIndexes(-1, -1);
- Assert(current_index == compile_info.length);
-
- return compile_info;
- }
-
-
- // Replaces function's Code.
- function PatchFunctionCode(old_node, change_log) {
- var new_info = old_node.corresponding_node.info;
- if (old_node.live_shared_function_infos) {
- old_node.live_shared_function_infos.forEach(function (old_info) {
- %LiveEditReplaceFunctionCode(new_info.raw_array,
- old_info.raw_array);
-
- // The function got a new code. However, this new code brings all new
- // instances of SharedFunctionInfo for nested functions. However,
- // we want the original instances to be used wherever possible.
- // (This is because old instances and new instances will be both
- // linked to a script and breakpoints subsystem does not really
- // expects this; neither does LiveEdit subsystem on next call).
- for (var i = 0; i < old_node.children.length; i++) {
- if (old_node.children[i].corresponding_node) {
- var corresponding_child_info =
- old_node.children[i].corresponding_node.info.
- shared_function_info;
-
- if (old_node.children[i].live_shared_function_infos) {
- old_node.children[i].live_shared_function_infos.
- forEach(function (old_child_info) {
- %LiveEditReplaceRefToNestedFunction(
- old_info.info,
- corresponding_child_info,
- old_child_info.info);
- });
- }
- }
- }
- });
-
- change_log.push( {function_patched: new_info.function_name} );
- } else {
- change_log.push( {function_patched: new_info.function_name,
- function_info_not_found: true} );
- }
- }
-
-
- // Makes a function associated with another instance of a script (the
- // one representing its old version). This way the function still
- // may access its own text.
- function LinkToOldScript(old_info_node, old_script, report_array) {
- if (old_info_node.live_shared_function_infos) {
- old_info_node.live_shared_function_infos.
- forEach(function (info) {
- %LiveEditFunctionSetScript(info.info, old_script);
- });
-
- report_array.push( { name: old_info_node.info.function_name } );
- } else {
- report_array.push(
- { name: old_info_node.info.function_name, not_found: true } );
- }
- }
-
-
- // Returns function that restores breakpoints.
- function TemporaryRemoveBreakPoints(original_script, change_log) {
- var script_break_points = GetScriptBreakPoints(original_script);
-
- var break_points_update_report = [];
- change_log.push( { break_points_update: break_points_update_report } );
-
- var break_point_old_positions = [];
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
-
- break_point.clear();
-
- // TODO(LiveEdit): be careful with resource offset here.
- var break_point_position = Debug.findScriptSourcePosition(original_script,
- break_point.line(), break_point.column());
-
- var old_position_description = {
- position: break_point_position,
- line: break_point.line(),
- column: break_point.column()
- };
- break_point_old_positions.push(old_position_description);
- }
-
-
- // Restores breakpoints and creates their copies in the "old" copy of
- // the script.
- return function (pos_translator, old_script_copy_opt) {
- // Update breakpoints (change positions and restore them in old version
- // of script.
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
- if (old_script_copy_opt) {
- var clone = break_point.cloneForOtherScript(old_script_copy_opt);
- clone.set(old_script_copy_opt);
-
- break_points_update_report.push( {
- type: "copied_to_old",
- id: break_point.number(),
- new_id: clone.number(),
- positions: break_point_old_positions[i]
- } );
- }
-
- var updated_position = pos_translator.Translate(
- break_point_old_positions[i].position,
- PosTranslator.ShiftWithTopInsideChunkHandler);
-
- var new_location =
- original_script.locationFromPosition(updated_position, false);
-
- break_point.update_positions(new_location.line, new_location.column);
-
- var new_position_description = {
- position: updated_position,
- line: new_location.line,
- column: new_location.column
- };
-
- break_point.set(original_script);
-
- break_points_update_report.push( { type: "position_changed",
- id: break_point.number(),
- old_positions: break_point_old_positions[i],
- new_positions: new_position_description
- } );
- }
- };
- }
-
-
- function Assert(condition, message) {
- if (!condition) {
- if (message) {
- throw "Assert " + message;
- } else {
- throw "Assert";
- }
- }
- }
-
- function DiffChunk(pos1, pos2, len1, len2) {
- this.pos1 = pos1;
- this.pos2 = pos2;
- this.len1 = len1;
- this.len2 = len2;
- }
-
- function PosTranslator(diff_array) {
- var chunks = new Array();
- var current_diff = 0;
- for (var i = 0; i < diff_array.length; i += 3) {
- var pos1_begin = diff_array[i];
- var pos2_begin = pos1_begin + current_diff;
- var pos1_end = diff_array[i + 1];
- var pos2_end = diff_array[i + 2];
- chunks.push(new DiffChunk(pos1_begin, pos2_begin, pos1_end - pos1_begin,
- pos2_end - pos2_begin));
- current_diff = pos2_end - pos1_end;
- }
- this.chunks = chunks;
- }
- PosTranslator.prototype.GetChunks = function() {
- return this.chunks;
- };
-
- PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
- var array = this.chunks;
- if (array.length == 0 || pos < array[0].pos1) {
- return pos;
- }
- var chunk_index1 = 0;
- var chunk_index2 = array.length - 1;
-
- while (chunk_index1 < chunk_index2) {
- var middle_index = Math.floor((chunk_index1 + chunk_index2) / 2);
- if (pos < array[middle_index + 1].pos1) {
- chunk_index2 = middle_index;
- } else {
- chunk_index1 = middle_index + 1;
- }
- }
- var chunk = array[chunk_index1];
- if (pos >= chunk.pos1 + chunk.len1) {
- return pos + chunk.pos2 + chunk.len2 - chunk.pos1 - chunk.len1;
- }
-
- if (!inside_chunk_handler) {
- inside_chunk_handler = PosTranslator.DefaultInsideChunkHandler;
- }
- return inside_chunk_handler(pos, chunk);
- };
-
- PosTranslator.DefaultInsideChunkHandler = function(pos, diff_chunk) {
- Assert(false, "Cannot translate position in changed area");
- };
-
- PosTranslator.ShiftWithTopInsideChunkHandler =
- function(pos, diff_chunk) {
- // We carelessly do not check whether we stay inside the chunk after
- // translation.
- return pos - diff_chunk.pos1 + diff_chunk.pos2;
- };
-
- var FunctionStatus = {
- // No change to function or its inner functions; however its positions
- // in script may have been shifted.
- UNCHANGED: "unchanged",
- // The code of a function remains unchanged, but something happened inside
- // some inner functions.
- SOURCE_CHANGED: "source changed",
- // The code of a function is changed or some nested function cannot be
- // properly patched so this function must be recompiled.
- CHANGED: "changed",
- // Function is changed but cannot be patched.
- DAMAGED: "damaged"
- };
-
- function CodeInfoTreeNode(code_info, children, array_index) {
- this.info = code_info;
- this.children = children;
- // an index in array of compile_info
- this.array_index = array_index;
- this.parent = void 0;
-
- this.status = FunctionStatus.UNCHANGED;
- // Status explanation is used for debugging purposes and will be shown
- // in user UI if some explanations are needed.
- this.status_explanation = void 0;
- this.new_start_pos = void 0;
- this.new_end_pos = void 0;
- this.corresponding_node = void 0;
- this.unmatched_new_nodes = void 0;
-
- // 'Textual' correspondence/matching is weaker than 'pure'
- // correspondence/matching. We need 'textual' level for visual presentation
- // in UI, we use 'pure' level for actual code manipulation.
- // Sometimes only function body is changed (functions in old and new script
- // textually correspond), but we cannot patch the code, so we see them
- // as an old function deleted and new function created.
- this.textual_corresponding_node = void 0;
- this.textually_unmatched_new_nodes = void 0;
-
- this.live_shared_function_infos = void 0;
- }
-
- // From array of function infos that is implicitly a tree creates
- // an actual tree of functions in script.
- function BuildCodeInfoTree(code_info_array) {
- // Throughtout all function we iterate over input array.
- var index = 0;
-
- // Recursive function that builds a branch of tree.
- function BuildNode() {
- var my_index = index;
- index++;
- var child_array = new Array();
- while (index < code_info_array.length &&
- code_info_array[index].outer_index == my_index) {
- child_array.push(BuildNode());
- }
- var node = new CodeInfoTreeNode(code_info_array[my_index], child_array,
- my_index);
- for (var i = 0; i < child_array.length; i++) {
- child_array[i].parent = node;
- }
- return node;
- }
-
- var root = BuildNode();
- Assert(index == code_info_array.length);
- return root;
- }
-
- // Applies a list of the textual diff chunks onto the tree of functions.
- // Determines status of each function (from unchanged to damaged). However
- // children of unchanged functions are ignored.
- function MarkChangedFunctions(code_info_tree, chunks) {
-
- // A convenient iterator over diff chunks that also translates
- // positions from old to new in a current non-changed part of script.
- var chunk_it = new function() {
- var chunk_index = 0;
- var pos_diff = 0;
- this.current = function() { return chunks[chunk_index]; };
- this.next = function() {
- var chunk = chunks[chunk_index];
- pos_diff = chunk.pos2 + chunk.len2 - (chunk.pos1 + chunk.len1);
- chunk_index++;
- };
- this.done = function() { return chunk_index >= chunks.length; };
- this.TranslatePos = function(pos) { return pos + pos_diff; };
- };
-
- // A recursive function that processes internals of a function and all its
- // inner functions. Iterator chunk_it initially points to a chunk that is
- // below function start.
- function ProcessInternals(info_node) {
- info_node.new_start_pos = chunk_it.TranslatePos(
- info_node.info.start_position);
- var child_index = 0;
- var code_changed = false;
- var source_changed = false;
- // Simultaneously iterates over child functions and over chunks.
- while (!chunk_it.done() &&
- chunk_it.current().pos1 < info_node.info.end_position) {
- if (child_index < info_node.children.length) {
- var child = info_node.children[child_index];
-
- if (child.info.end_position <= chunk_it.current().pos1) {
- ProcessUnchangedChild(child);
- child_index++;
- continue;
- } else if (child.info.start_position >=
- chunk_it.current().pos1 + chunk_it.current().len1) {
- code_changed = true;
- chunk_it.next();
- continue;
- } else if (child.info.start_position <= chunk_it.current().pos1 &&
- child.info.end_position >= chunk_it.current().pos1 +
- chunk_it.current().len1) {
- ProcessInternals(child);
- source_changed = source_changed ||
- ( child.status != FunctionStatus.UNCHANGED );
- code_changed = code_changed ||
- ( child.status == FunctionStatus.DAMAGED );
- child_index++;
- continue;
- } else {
- code_changed = true;
- child.status = FunctionStatus.DAMAGED;
- child.status_explanation =
- "Text diff overlaps with function boundary";
- child_index++;
- continue;
- }
- } else {
- if (chunk_it.current().pos1 + chunk_it.current().len1 <=
- info_node.info.end_position) {
- info_node.status = FunctionStatus.CHANGED;
- chunk_it.next();
- continue;
- } else {
- info_node.status = FunctionStatus.DAMAGED;
- info_node.status_explanation =
- "Text diff overlaps with function boundary";
- return;
- }
- }
- Assert("Unreachable", false);
- }
- while (child_index < info_node.children.length) {
- var child = info_node.children[child_index];
- ProcessUnchangedChild(child);
- child_index++;
- }
- if (code_changed) {
- info_node.status = FunctionStatus.CHANGED;
- } else if (source_changed) {
- info_node.status = FunctionStatus.SOURCE_CHANGED;
- }
- info_node.new_end_pos =
- chunk_it.TranslatePos(info_node.info.end_position);
- }
-
- function ProcessUnchangedChild(node) {
- node.new_start_pos = chunk_it.TranslatePos(node.info.start_position);
- node.new_end_pos = chunk_it.TranslatePos(node.info.end_position);
- }
-
- ProcessInternals(code_info_tree);
- }
-
- // For ecah old function (if it is not damaged) tries to find a corresponding
- // function in new script. Typically it should succeed (non-damaged functions
- // by definition may only have changes inside their bodies). However there are
- // reasons for corresponence not to be found; function with unmodified text
- // in new script may become enclosed into other function; the innocent change
- // inside function body may in fact be something like "} function B() {" that
- // splits a function into 2 functions.
- function FindCorrespondingFunctions(old_code_tree, new_code_tree) {
-
- // A recursive function that tries to find a correspondence for all
- // child functions and for their inner functions.
- function ProcessChildren(old_node, new_node) {
- var old_children = old_node.children;
- var new_children = new_node.children;
-
- var unmatched_new_nodes_list = [];
- var textually_unmatched_new_nodes_list = [];
-
- var old_index = 0;
- var new_index = 0;
- while (old_index < old_children.length) {
- if (old_children[old_index].status == FunctionStatus.DAMAGED) {
- old_index++;
- } else if (new_index < new_children.length) {
- if (new_children[new_index].info.start_position <
- old_children[old_index].new_start_pos) {
- unmatched_new_nodes_list.push(new_children[new_index]);
- textually_unmatched_new_nodes_list.push(new_children[new_index]);
- new_index++;
- } else if (new_children[new_index].info.start_position ==
- old_children[old_index].new_start_pos) {
- if (new_children[new_index].info.end_position ==
- old_children[old_index].new_end_pos) {
- old_children[old_index].corresponding_node =
- new_children[new_index];
- old_children[old_index].textual_corresponding_node =
- new_children[new_index];
- if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
- ProcessChildren(old_children[old_index],
- new_children[new_index]);
- if (old_children[old_index].status == FunctionStatus.DAMAGED) {
- unmatched_new_nodes_list.push(
- old_children[old_index].corresponding_node);
- old_children[old_index].corresponding_node = void 0;
- old_node.status = FunctionStatus.CHANGED;
- }
- }
- } else {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "No corresponding function in new script found";
- old_node.status = FunctionStatus.CHANGED;
- unmatched_new_nodes_list.push(new_children[new_index]);
- textually_unmatched_new_nodes_list.push(new_children[new_index]);
- }
- new_index++;
- old_index++;
- } else {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "No corresponding function in new script found";
- old_node.status = FunctionStatus.CHANGED;
- old_index++;
- }
- } else {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "No corresponding function in new script found";
- old_node.status = FunctionStatus.CHANGED;
- old_index++;
- }
- }
-
- while (new_index < new_children.length) {
- unmatched_new_nodes_list.push(new_children[new_index]);
- textually_unmatched_new_nodes_list.push(new_children[new_index]);
- new_index++;
- }
-
- if (old_node.status == FunctionStatus.CHANGED) {
- var why_wrong_expectations =
- WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
- if (why_wrong_expectations) {
- old_node.status = FunctionStatus.DAMAGED;
- old_node.status_explanation = why_wrong_expectations;
- }
- }
- old_node.unmatched_new_nodes = unmatched_new_nodes_list;
- old_node.textually_unmatched_new_nodes =
- textually_unmatched_new_nodes_list;
- }
-
- ProcessChildren(old_code_tree, new_code_tree);
-
- old_code_tree.corresponding_node = new_code_tree;
- old_code_tree.textual_corresponding_node = new_code_tree;
-
- Assert(old_code_tree.status != FunctionStatus.DAMAGED,
- "Script became damaged");
- }
-
- function FindLiveSharedInfos(old_code_tree, script) {
- var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
-
- var shared_infos = new Array();
-
- for (var i = 0; i < shared_raw_list.length; i++) {
- shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
- }
-
- // Finds all SharedFunctionInfos that corresponds to compile info
- // in old version of the script.
- function FindFunctionInfos(compile_info) {
- var wrappers = [];
-
- for (var i = 0; i < shared_infos.length; i++) {
- var wrapper = shared_infos[i];
- if (wrapper.start_position == compile_info.start_position &&
- wrapper.end_position == compile_info.end_position) {
- wrappers.push(wrapper);
- }
- }
-
- if (wrappers.length > 0) {
- return wrappers;
- }
- }
-
- function TraverseTree(node) {
- node.live_shared_function_infos = FindFunctionInfos(node.info);
-
- for (var i = 0; i < node.children.length; i++) {
- TraverseTree(node.children[i]);
- }
- }
-
- TraverseTree(old_code_tree);
- }
-
-
- // An object describing function compilation details. Its index fields
- // apply to indexes inside array that stores these objects.
- function FunctionCompileInfo(raw_array) {
- this.function_name = raw_array[0];
- this.start_position = raw_array[1];
- this.end_position = raw_array[2];
- this.param_num = raw_array[3];
- this.code = raw_array[4];
- this.code_scope_info = raw_array[5];
- this.scope_info = raw_array[6];
- this.outer_index = raw_array[7];
- this.shared_function_info = raw_array[8];
- this.next_sibling_index = null;
- this.raw_array = raw_array;
- }
-
- function SharedInfoWrapper(raw_array) {
- this.function_name = raw_array[0];
- this.start_position = raw_array[1];
- this.end_position = raw_array[2];
- this.info = raw_array[3];
- this.raw_array = raw_array;
- }
-
- // Changes positions (including all statments) in function.
- function PatchPositions(old_info_node, diff_array, report_array) {
- if (old_info_node.live_shared_function_infos) {
- old_info_node.live_shared_function_infos.forEach(function (info) {
- %LiveEditPatchFunctionPositions(info.raw_array,
- diff_array);
- });
-
- report_array.push( { name: old_info_node.info.function_name } );
- } else {
- // TODO(LiveEdit): function is not compiled yet or is already collected.
- report_array.push(
- { name: old_info_node.info.function_name, info_not_found: true } );
- }
- }
-
- // Adds a suffix to script name to mark that it is old version.
- function CreateNameForOldScript(script) {
- // TODO(635): try better than this; support several changes.
- return script.name + " (old)";
- }
-
- // Compares a function interface old and new version, whether it
- // changed or not. Returns explanation if they differ.
- function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
- // Check that function has the same number of parameters (there may exist
- // an adapter, that won't survive function parameter number change).
- if (function_info1.param_num != function_info2.param_num) {
- return "Changed parameter number: " + function_info1.param_num +
- " and " + function_info2.param_num;
- }
- var scope_info1 = function_info1.scope_info;
- var scope_info2 = function_info2.scope_info;
-
- var scope_info1_text;
- var scope_info2_text;
-
- if (scope_info1) {
- scope_info1_text = scope_info1.toString();
- } else {
- scope_info1_text = "";
- }
- if (scope_info2) {
- scope_info2_text = scope_info2.toString();
- } else {
- scope_info2_text = "";
- }
-
- if (scope_info1_text != scope_info2_text) {
- return "Incompatible variable maps: [" + scope_info1_text +
- "] and [" + scope_info2_text + "]";
- }
- // No differences. Return undefined.
- return;
- }
-
- // Minifier forward declaration.
- var FunctionPatchabilityStatus;
-
- // For array of wrapped shared function infos checks that none of them
- // have activations on stack (of any thread). Throws a Failure exception
- // if this proves to be false.
- function CheckStackActivations(shared_wrapper_list, change_log) {
- var shared_list = new Array();
- for (var i = 0; i < shared_wrapper_list.length; i++) {
- shared_list[i] = shared_wrapper_list[i].info;
- }
- var result = %LiveEditCheckAndDropActivations(shared_list, true);
- if (result[shared_list.length]) {
- // Extra array element may contain error message.
- throw new Failure(result[shared_list.length]);
- }
-
- var problems = new Array();
- var dropped = new Array();
- for (var i = 0; i < shared_list.length; i++) {
- var shared = shared_wrapper_list[i];
- if (result[i] == FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
- dropped.push({ name: shared.function_name } );
- } else if (result[i] != FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) {
- var description = {
- name: shared.function_name,
- start_pos: shared.start_position,
- end_pos: shared.end_position,
- replace_problem:
- FunctionPatchabilityStatus.SymbolName(result[i])
- };
- problems.push(description);
- }
- }
- if (dropped.length > 0) {
- change_log.push({ dropped_from_stack: dropped });
- }
- if (problems.length > 0) {
- change_log.push( { functions_on_stack: problems } );
- throw new Failure("Blocked by functions on stack");
- }
-
- return dropped.length;
- }
-
- // A copy of the FunctionPatchabilityStatus enum from liveedit.h
- var FunctionPatchabilityStatus = {
- AVAILABLE_FOR_PATCH: 1,
- BLOCKED_ON_ACTIVE_STACK: 2,
- BLOCKED_ON_OTHER_STACK: 3,
- BLOCKED_UNDER_NATIVE_CODE: 4,
- REPLACED_ON_ACTIVE_STACK: 5
- };
-
- FunctionPatchabilityStatus.SymbolName = function(code) {
- var enumeration = FunctionPatchabilityStatus;
- for (name in enumeration) {
- if (enumeration[name] == code) {
- return name;
- }
- }
- };
-
-
- // A logical failure in liveedit process. This means that change_log
- // is valid and consistent description of what happened.
- function Failure(message) {
- this.message = message;
- }
- // Function (constructor) is public.
- this.Failure = Failure;
-
- Failure.prototype.toString = function() {
- return "LiveEdit Failure: " + this.message;
- };
-
- function CopyErrorPositionToDetails(e, details) {
- function createPositionStruct(script, position) {
- if (position == -1) return;
- var location = script.locationFromPosition(position, true);
- if (location == null) return;
- return {
- line: location.line + 1,
- column: location.column + 1,
- position: position
- };
- }
-
- if (!("scriptObject" in e) || !("startPosition" in e)) {
- return;
- }
-
- var script = e.scriptObject;
-
- var position_struct = {
- start: createPositionStruct(script, e.startPosition),
- end: createPositionStruct(script, e.endPosition)
- };
- details.position = position_struct;
- }
-
- // A testing entry.
- function GetPcFromSourcePos(func, source_pos) {
- return %GetFunctionCodePositionFromSource(func, source_pos);
- }
- // Function is public.
- this.GetPcFromSourcePos = GetPcFromSourcePos;
-
- // LiveEdit main entry point: changes a script text to a new string.
- function SetScriptSource(script, new_source, preview_only, change_log) {
- var old_source = script.source;
- var diff = CompareStrings(old_source, new_source);
- return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
- change_log);
- }
- // Function is public.
- this.SetScriptSource = SetScriptSource;
-
- function CompareStrings(s1, s2) {
- return %LiveEditCompareStrings(s1, s2);
- }
-
- // Applies the change to the script.
- // The change is always a substring (change_pos, change_pos + change_len)
- // being replaced with a completely different string new_str.
- // This API is a legacy and is obsolete.
- //
- // @param {Script} script that is being changed
- // @param {Array} change_log a list that collects engineer-readable
- // description of what happened.
- function ApplySingleChunkPatch(script, change_pos, change_len, new_str,
- change_log) {
- var old_source = script.source;
-
- // Prepare new source string.
- var new_source = old_source.substring(0, change_pos) +
- new_str + old_source.substring(change_pos + change_len);
-
- return ApplyPatchMultiChunk(script,
- [ change_pos, change_pos + change_len, change_pos + new_str.length],
- new_source, false, change_log);
- }
-
- // Creates JSON description for a change tree.
- function DescribeChangeTree(old_code_tree) {
-
- function ProcessOldNode(node) {
- var child_infos = [];
- for (var i = 0; i < node.children.length; i++) {
- var child = node.children[i];
- if (child.status != FunctionStatus.UNCHANGED) {
- child_infos.push(ProcessOldNode(child));
- }
- }
- var new_child_infos = [];
- if (node.textually_unmatched_new_nodes) {
- for (var i = 0; i < node.textually_unmatched_new_nodes.length; i++) {
- var child = node.textually_unmatched_new_nodes[i];
- new_child_infos.push(ProcessNewNode(child));
- }
- }
- var res = {
- name: node.info.function_name,
- positions: DescribePositions(node),
- status: node.status,
- children: child_infos,
- new_children: new_child_infos
- };
- if (node.status_explanation) {
- res.status_explanation = node.status_explanation;
- }
- if (node.textual_corresponding_node) {
- res.new_positions = DescribePositions(node.textual_corresponding_node);
- }
- return res;
- }
-
- function ProcessNewNode(node) {
- var child_infos = [];
- // Do not list ancestors.
- if (false) {
- for (var i = 0; i < node.children.length; i++) {
- child_infos.push(ProcessNewNode(node.children[i]));
- }
- }
- var res = {
- name: node.info.function_name,
- positions: DescribePositions(node),
- children: child_infos,
- };
- return res;
- }
-
- function DescribePositions(node) {
- return {
- start_position: node.info.start_position,
- end_position: node.info.end_position
- };
- }
-
- return ProcessOldNode(old_code_tree);
- }
-
- // Restarts call frame and returns value similar to what LiveEdit returns.
- function RestartFrame(frame_mirror) {
- var result = frame_mirror.restart();
- if (IS_STRING(result)) {
- throw new Failure("Failed to restart frame: " + result);
- }
- var result = {};
- result[NEEDS_STEP_IN_PROPERTY_NAME] = true;
- return result;
- }
- // Function is public.
- this.RestartFrame = RestartFrame;
-
- // Functions are public for tests.
- this.TestApi = {
- PosTranslator: PosTranslator,
- CompareStrings: CompareStrings,
- ApplySingleChunkPatch: ApplySingleChunkPatch
- };
-};
diff --git a/src/3rdparty/v8/src/liveedit.cc b/src/3rdparty/v8/src/liveedit.cc
deleted file mode 100644
index 382f209..0000000
--- a/src/3rdparty/v8/src/liveedit.cc
+++ /dev/null
@@ -1,2128 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#include "liveedit.h"
-
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "compiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "global-handles.h"
-#include "messages.h"
-#include "parser.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-#include "v8memory.h"
-
-namespace v8 {
-namespace internal {
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-
-void SetElementNonStrict(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value) {
- // Ignore return value from SetElement. It can only be a failure if there
- // are element setters causing exceptions and the debugger context has none
- // of these.
- Handle<Object> no_failure =
- JSObject::SetElement(object, index, value, NONE, kNonStrictMode);
- ASSERT(!no_failure.is_null());
- USE(no_failure);
-}
-
-// A simple implementation of dynamic programming algorithm. It solves
-// the problem of finding the difference of 2 arrays. It uses a table of results
-// of subproblems. Each cell contains a number together with 2-bit flag
-// that helps building the chunk list.
-class Differencer {
- public:
- explicit Differencer(Comparator::Input* input)
- : input_(input), len1_(input->GetLength1()), len2_(input->GetLength2()) {
- buffer_ = NewArray<int>(len1_ * len2_);
- }
- ~Differencer() {
- DeleteArray(buffer_);
- }
-
- void Initialize() {
- int array_size = len1_ * len2_;
- for (int i = 0; i < array_size; i++) {
- buffer_[i] = kEmptyCellValue;
- }
- }
-
- // Makes sure that result for the full problem is calculated and stored
- // in the table together with flags showing a path through subproblems.
- void FillTable() {
- CompareUpToTail(0, 0);
- }
-
- void SaveResult(Comparator::Output* chunk_writer) {
- ResultWriter writer(chunk_writer);
-
- int pos1 = 0;
- int pos2 = 0;
- while (true) {
- if (pos1 < len1_) {
- if (pos2 < len2_) {
- Direction dir = get_direction(pos1, pos2);
- switch (dir) {
- case EQ:
- writer.eq();
- pos1++;
- pos2++;
- break;
- case SKIP1:
- writer.skip1(1);
- pos1++;
- break;
- case SKIP2:
- case SKIP_ANY:
- writer.skip2(1);
- pos2++;
- break;
- default:
- UNREACHABLE();
- }
- } else {
- writer.skip1(len1_ - pos1);
- break;
- }
- } else {
- if (len2_ != pos2) {
- writer.skip2(len2_ - pos2);
- }
- break;
- }
- }
- writer.close();
- }
-
- private:
- Comparator::Input* input_;
- int* buffer_;
- int len1_;
- int len2_;
-
- enum Direction {
- EQ = 0,
- SKIP1,
- SKIP2,
- SKIP_ANY,
-
- MAX_DIRECTION_FLAG_VALUE = SKIP_ANY
- };
-
- // Computes result for a subtask and optionally caches it in the buffer table.
- // All results values are shifted to make space for flags in the lower bits.
- int CompareUpToTail(int pos1, int pos2) {
- if (pos1 < len1_) {
- if (pos2 < len2_) {
- int cached_res = get_value4(pos1, pos2);
- if (cached_res == kEmptyCellValue) {
- Direction dir;
- int res;
- if (input_->Equals(pos1, pos2)) {
- res = CompareUpToTail(pos1 + 1, pos2 + 1);
- dir = EQ;
- } else {
- int res1 = CompareUpToTail(pos1 + 1, pos2) +
- (1 << kDirectionSizeBits);
- int res2 = CompareUpToTail(pos1, pos2 + 1) +
- (1 << kDirectionSizeBits);
- if (res1 == res2) {
- res = res1;
- dir = SKIP_ANY;
- } else if (res1 < res2) {
- res = res1;
- dir = SKIP1;
- } else {
- res = res2;
- dir = SKIP2;
- }
- }
- set_value4_and_dir(pos1, pos2, res, dir);
- cached_res = res;
- }
- return cached_res;
- } else {
- return (len1_ - pos1) << kDirectionSizeBits;
- }
- } else {
- return (len2_ - pos2) << kDirectionSizeBits;
- }
- }
-
- inline int& get_cell(int i1, int i2) {
- return buffer_[i1 + i2 * len1_];
- }
-
- // Each cell keeps a value plus direction. Value is multiplied by 4.
- void set_value4_and_dir(int i1, int i2, int value4, Direction dir) {
- ASSERT((value4 & kDirectionMask) == 0);
- get_cell(i1, i2) = value4 | dir;
- }
-
- int get_value4(int i1, int i2) {
- return get_cell(i1, i2) & (kMaxUInt32 ^ kDirectionMask);
- }
- Direction get_direction(int i1, int i2) {
- return static_cast<Direction>(get_cell(i1, i2) & kDirectionMask);
- }
-
- static const int kDirectionSizeBits = 2;
- static const int kDirectionMask = (1 << kDirectionSizeBits) - 1;
- static const int kEmptyCellValue = -1 << kDirectionSizeBits;
-
- // This method only holds static assert statement (unfortunately you cannot
- // place one in class scope).
- void StaticAssertHolder() {
- STATIC_ASSERT(MAX_DIRECTION_FLAG_VALUE < (1 << kDirectionSizeBits));
- }
-
- class ResultWriter {
- public:
- explicit ResultWriter(Comparator::Output* chunk_writer)
- : chunk_writer_(chunk_writer), pos1_(0), pos2_(0),
- pos1_begin_(-1), pos2_begin_(-1), has_open_chunk_(false) {
- }
- void eq() {
- FlushChunk();
- pos1_++;
- pos2_++;
- }
- void skip1(int len1) {
- StartChunk();
- pos1_ += len1;
- }
- void skip2(int len2) {
- StartChunk();
- pos2_ += len2;
- }
- void close() {
- FlushChunk();
- }
-
- private:
- Comparator::Output* chunk_writer_;
- int pos1_;
- int pos2_;
- int pos1_begin_;
- int pos2_begin_;
- bool has_open_chunk_;
-
- void StartChunk() {
- if (!has_open_chunk_) {
- pos1_begin_ = pos1_;
- pos2_begin_ = pos2_;
- has_open_chunk_ = true;
- }
- }
-
- void FlushChunk() {
- if (has_open_chunk_) {
- chunk_writer_->AddChunk(pos1_begin_, pos2_begin_,
- pos1_ - pos1_begin_, pos2_ - pos2_begin_);
- has_open_chunk_ = false;
- }
- }
- };
-};
-
-
-void Comparator::CalculateDifference(Comparator::Input* input,
- Comparator::Output* result_writer) {
- Differencer differencer(input);
- differencer.Initialize();
- differencer.FillTable();
- differencer.SaveResult(result_writer);
-}
-
-
-static bool CompareSubstrings(Handle<String> s1, int pos1,
- Handle<String> s2, int pos2, int len) {
- for (int i = 0; i < len; i++) {
- if (s1->Get(i + pos1) != s2->Get(i + pos2)) {
- return false;
- }
- }
- return true;
-}
-
-
-// Additional to Input interface. Lets switch Input range to subrange.
-// More elegant way would be to wrap one Input as another Input object
-// and translate positions there, but that would cost us additional virtual
-// call per comparison.
-class SubrangableInput : public Comparator::Input {
- public:
- virtual void SetSubrange1(int offset, int len) = 0;
- virtual void SetSubrange2(int offset, int len) = 0;
-};
-
-
-class SubrangableOutput : public Comparator::Output {
- public:
- virtual void SetSubrange1(int offset, int len) = 0;
- virtual void SetSubrange2(int offset, int len) = 0;
-};
-
-
-static int min(int a, int b) {
- return a < b ? a : b;
-}
-
-
-// Finds common prefix and suffix in input. This parts shouldn't take space in
-// linear programming table. Enable subranging in input and output.
-static void NarrowDownInput(SubrangableInput* input,
- SubrangableOutput* output) {
- const int len1 = input->GetLength1();
- const int len2 = input->GetLength2();
-
- int common_prefix_len;
- int common_suffix_len;
-
- {
- common_prefix_len = 0;
- int prefix_limit = min(len1, len2);
- while (common_prefix_len < prefix_limit &&
- input->Equals(common_prefix_len, common_prefix_len)) {
- common_prefix_len++;
- }
-
- common_suffix_len = 0;
- int suffix_limit = min(len1 - common_prefix_len, len2 - common_prefix_len);
-
- while (common_suffix_len < suffix_limit &&
- input->Equals(len1 - common_suffix_len - 1,
- len2 - common_suffix_len - 1)) {
- common_suffix_len++;
- }
- }
-
- if (common_prefix_len > 0 || common_suffix_len > 0) {
- int new_len1 = len1 - common_suffix_len - common_prefix_len;
- int new_len2 = len2 - common_suffix_len - common_prefix_len;
-
- input->SetSubrange1(common_prefix_len, new_len1);
- input->SetSubrange2(common_prefix_len, new_len2);
-
- output->SetSubrange1(common_prefix_len, new_len1);
- output->SetSubrange2(common_prefix_len, new_len2);
- }
-}
-
-
-// A helper class that writes chunk numbers into JSArray.
-// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
-class CompareOutputArrayWriter {
- public:
- explicit CompareOutputArrayWriter(Isolate* isolate)
- : array_(isolate->factory()->NewJSArray(10)), current_size_(0) {}
-
- Handle<JSArray> GetResult() {
- return array_;
- }
-
- void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
- Isolate* isolate = array_->GetIsolate();
- SetElementNonStrict(array_,
- current_size_,
- Handle<Object>(Smi::FromInt(char_pos1), isolate));
- SetElementNonStrict(array_,
- current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
- isolate));
- SetElementNonStrict(array_,
- current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
- isolate));
- current_size_ += 3;
- }
-
- private:
- Handle<JSArray> array_;
- int current_size_;
-};
-
-
-// Represents 2 strings as 2 arrays of tokens.
-// TODO(LiveEdit): Currently it's actually an array of charactres.
-// Make array of tokens instead.
-class TokensCompareInput : public Comparator::Input {
- public:
- TokensCompareInput(Handle<String> s1, int offset1, int len1,
- Handle<String> s2, int offset2, int len2)
- : s1_(s1), offset1_(offset1), len1_(len1),
- s2_(s2), offset2_(offset2), len2_(len2) {
- }
- virtual int GetLength1() {
- return len1_;
- }
- virtual int GetLength2() {
- return len2_;
- }
- bool Equals(int index1, int index2) {
- return s1_->Get(offset1_ + index1) == s2_->Get(offset2_ + index2);
- }
-
- private:
- Handle<String> s1_;
- int offset1_;
- int len1_;
- Handle<String> s2_;
- int offset2_;
- int len2_;
-};
-
-
-// Stores compare result in JSArray. Converts substring positions
-// to absolute positions.
-class TokensCompareOutput : public Comparator::Output {
- public:
- TokensCompareOutput(CompareOutputArrayWriter* array_writer,
- int offset1, int offset2)
- : array_writer_(array_writer), offset1_(offset1), offset2_(offset2) {
- }
-
- void AddChunk(int pos1, int pos2, int len1, int len2) {
- array_writer_->WriteChunk(pos1 + offset1_, pos2 + offset2_, len1, len2);
- }
-
- private:
- CompareOutputArrayWriter* array_writer_;
- int offset1_;
- int offset2_;
-};
-
-
-// Wraps raw n-elements line_ends array as a list of n+1 lines. The last line
-// never has terminating new line character.
-class LineEndsWrapper {
- public:
- explicit LineEndsWrapper(Handle<String> string)
- : ends_array_(CalculateLineEnds(string, false)),
- string_len_(string->length()) {
- }
- int length() {
- return ends_array_->length() + 1;
- }
- // Returns start for any line including start of the imaginary line after
- // the last line.
- int GetLineStart(int index) {
- if (index == 0) {
- return 0;
- } else {
- return GetLineEnd(index - 1);
- }
- }
- int GetLineEnd(int index) {
- if (index == ends_array_->length()) {
- // End of the last line is always an end of the whole string.
- // If the string ends with a new line character, the last line is an
- // empty string after this character.
- return string_len_;
- } else {
- return GetPosAfterNewLine(index);
- }
- }
-
- private:
- Handle<FixedArray> ends_array_;
- int string_len_;
-
- int GetPosAfterNewLine(int index) {
- return Smi::cast(ends_array_->get(index))->value() + 1;
- }
-};
-
-
-// Represents 2 strings as 2 arrays of lines.
-class LineArrayCompareInput : public SubrangableInput {
- public:
- LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
- LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
- : s1_(s1), s2_(s2), line_ends1_(line_ends1),
- line_ends2_(line_ends2),
- subrange_offset1_(0), subrange_offset2_(0),
- subrange_len1_(line_ends1_.length()),
- subrange_len2_(line_ends2_.length()) {
- }
- int GetLength1() {
- return subrange_len1_;
- }
- int GetLength2() {
- return subrange_len2_;
- }
- bool Equals(int index1, int index2) {
- index1 += subrange_offset1_;
- index2 += subrange_offset2_;
-
- int line_start1 = line_ends1_.GetLineStart(index1);
- int line_start2 = line_ends2_.GetLineStart(index2);
- int line_end1 = line_ends1_.GetLineEnd(index1);
- int line_end2 = line_ends2_.GetLineEnd(index2);
- int len1 = line_end1 - line_start1;
- int len2 = line_end2 - line_start2;
- if (len1 != len2) {
- return false;
- }
- return CompareSubstrings(s1_, line_start1, s2_, line_start2,
- len1);
- }
- void SetSubrange1(int offset, int len) {
- subrange_offset1_ = offset;
- subrange_len1_ = len;
- }
- void SetSubrange2(int offset, int len) {
- subrange_offset2_ = offset;
- subrange_len2_ = len;
- }
-
- private:
- Handle<String> s1_;
- Handle<String> s2_;
- LineEndsWrapper line_ends1_;
- LineEndsWrapper line_ends2_;
- int subrange_offset1_;
- int subrange_offset2_;
- int subrange_len1_;
- int subrange_len2_;
-};
-
-
-// Stores compare result in JSArray. For each chunk tries to conduct
-// a fine-grained nested diff token-wise.
-class TokenizingLineArrayCompareOutput : public SubrangableOutput {
- public:
- TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
- LineEndsWrapper line_ends2,
- Handle<String> s1, Handle<String> s2)
- : array_writer_(s1->GetIsolate()),
- line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2),
- subrange_offset1_(0), subrange_offset2_(0) {
- }
-
- void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
- line_pos1 += subrange_offset1_;
- line_pos2 += subrange_offset2_;
-
- int char_pos1 = line_ends1_.GetLineStart(line_pos1);
- int char_pos2 = line_ends2_.GetLineStart(line_pos2);
- int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
- int char_len2 = line_ends2_.GetLineStart(line_pos2 + line_len2) - char_pos2;
-
- if (char_len1 < CHUNK_LEN_LIMIT && char_len2 < CHUNK_LEN_LIMIT) {
- // Chunk is small enough to conduct a nested token-level diff.
- HandleScope subTaskScope(s1_->GetIsolate());
-
- TokensCompareInput tokens_input(s1_, char_pos1, char_len1,
- s2_, char_pos2, char_len2);
- TokensCompareOutput tokens_output(&array_writer_, char_pos1,
- char_pos2);
-
- Comparator::CalculateDifference(&tokens_input, &tokens_output);
- } else {
- array_writer_.WriteChunk(char_pos1, char_pos2, char_len1, char_len2);
- }
- }
- void SetSubrange1(int offset, int len) {
- subrange_offset1_ = offset;
- }
- void SetSubrange2(int offset, int len) {
- subrange_offset2_ = offset;
- }
-
- Handle<JSArray> GetResult() {
- return array_writer_.GetResult();
- }
-
- private:
- static const int CHUNK_LEN_LIMIT = 800;
-
- CompareOutputArrayWriter array_writer_;
- LineEndsWrapper line_ends1_;
- LineEndsWrapper line_ends2_;
- Handle<String> s1_;
- Handle<String> s2_;
- int subrange_offset1_;
- int subrange_offset2_;
-};
-
-
-Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
- Handle<String> s2) {
- s1 = FlattenGetString(s1);
- s2 = FlattenGetString(s2);
-
- LineEndsWrapper line_ends1(s1);
- LineEndsWrapper line_ends2(s2);
-
- LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
- TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
-
- NarrowDownInput(&input, &output);
-
- Comparator::CalculateDifference(&input, &output);
-
- return output.GetResult();
-}
-
-
-static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
- // TODO(635): support extensions.
- PostponeInterruptsScope postpone(isolate);
-
- // Build AST.
- CompilationInfoWithZone info(script);
- info.MarkAsGlobal();
- // Parse and don't allow skipping lazy functions.
- if (ParserApi::Parse(&info, kNoParsingFlags)) {
- // Compile the code.
- LiveEditFunctionTracker tracker(info.isolate(), info.function());
- if (Compiler::MakeCodeForLiveEdit(&info)) {
- ASSERT(!info.code().is_null());
- tracker.RecordRootFunctionInfo(info.code());
- } else {
- info.isolate()->StackOverflow();
- }
- }
-}
-
-
-// Unwraps JSValue object, returning its field "value"
-static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
- return Handle<Object>(jsValue->value(), jsValue->GetIsolate());
-}
-
-
-// Wraps any object into a OpaqueReference, that will hide the object
-// from JavaScript.
-static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
- Handle<JSFunction> constructor =
- Isolate::Current()->opaque_reference_function();
- Handle<JSValue> result =
- Handle<JSValue>::cast(FACTORY->NewJSObject(constructor));
- result->set_value(*object);
- return result;
-}
-
-
-static Handle<SharedFunctionInfo> UnwrapSharedFunctionInfoFromJSValue(
- Handle<JSValue> jsValue) {
- Object* shared = jsValue->value();
- CHECK(shared->IsSharedFunctionInfo());
- return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(shared));
-}
-
-
-static int GetArrayLength(Handle<JSArray> array) {
- Object* length = array->length();
- CHECK(length->IsSmi());
- return Smi::cast(length)->value();
-}
-
-
-// Simple helper class that creates more or less typed structures over
-// JSArray object. This is an adhoc method of passing structures from C++
-// to JavaScript.
-template<typename S>
-class JSArrayBasedStruct {
- public:
- static S Create() {
- Handle<JSArray> array = FACTORY->NewJSArray(S::kSize_);
- return S(array);
- }
- static S cast(Object* object) {
- JSArray* array = JSArray::cast(object);
- Handle<JSArray> array_handle(array);
- return S(array_handle);
- }
- explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) {
- }
- Handle<JSArray> GetJSArray() {
- return array_;
- }
- Isolate* isolate() const {
- return array_->GetIsolate();
- }
-
- protected:
- void SetField(int field_position, Handle<Object> value) {
- SetElementNonStrict(array_, field_position, value);
- }
- void SetSmiValueField(int field_position, int value) {
- SetElementNonStrict(array_,
- field_position,
- Handle<Smi>(Smi::FromInt(value), isolate()));
- }
- Object* GetField(int field_position) {
- return array_->GetElementNoExceptionThrown(field_position);
- }
- int GetSmiValueField(int field_position) {
- Object* res = GetField(field_position);
- CHECK(res->IsSmi());
- return Smi::cast(res)->value();
- }
-
- private:
- Handle<JSArray> array_;
-};
-
-
-// Represents some function compilation details. This structure will be used
-// from JavaScript. It contains Code object, which is kept wrapped
-// into a BlindReference for sanitizing reasons.
-class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
- public:
- explicit FunctionInfoWrapper(Handle<JSArray> array)
- : JSArrayBasedStruct<FunctionInfoWrapper>(array) {
- }
- void SetInitialProperties(Handle<String> name, int start_position,
- int end_position, int param_num,
- int literal_count, int parent_index) {
- HandleScope scope(isolate());
- this->SetField(kFunctionNameOffset_, name);
- this->SetSmiValueField(kStartPositionOffset_, start_position);
- this->SetSmiValueField(kEndPositionOffset_, end_position);
- this->SetSmiValueField(kParamNumOffset_, param_num);
- this->SetSmiValueField(kLiteralNumOffset_, literal_count);
- this->SetSmiValueField(kParentIndexOffset_, parent_index);
- }
- void SetFunctionCode(Handle<Code> function_code,
- Handle<Object> code_scope_info) {
- Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
- this->SetField(kCodeOffset_, code_wrapper);
-
- Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
- this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
- }
- void SetOuterScopeInfo(Handle<Object> scope_info_array) {
- this->SetField(kOuterScopeInfoOffset_, scope_info_array);
- }
- void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
- Handle<JSValue> info_holder = WrapInJSValue(info);
- this->SetField(kSharedFunctionInfoOffset_, info_holder);
- }
- int GetLiteralCount() {
- return this->GetSmiValueField(kLiteralNumOffset_);
- }
- int GetParentIndex() {
- return this->GetSmiValueField(kParentIndexOffset_);
- }
- Handle<Code> GetFunctionCode() {
- Object* element = this->GetField(kCodeOffset_);
- CHECK(element->IsJSValue());
- Handle<JSValue> value_wrapper(JSValue::cast(element));
- Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
- CHECK(raw_result->IsCode());
- return Handle<Code>::cast(raw_result);
- }
- Handle<Object> GetCodeScopeInfo() {
- Object* element = this->GetField(kCodeScopeInfoOffset_);
- CHECK(element->IsJSValue());
- return UnwrapJSValue(Handle<JSValue>(JSValue::cast(element)));
- }
- int GetStartPosition() {
- return this->GetSmiValueField(kStartPositionOffset_);
- }
- int GetEndPosition() {
- return this->GetSmiValueField(kEndPositionOffset_);
- }
-
- private:
- static const int kFunctionNameOffset_ = 0;
- static const int kStartPositionOffset_ = 1;
- static const int kEndPositionOffset_ = 2;
- static const int kParamNumOffset_ = 3;
- static const int kCodeOffset_ = 4;
- static const int kCodeScopeInfoOffset_ = 5;
- static const int kOuterScopeInfoOffset_ = 6;
- static const int kParentIndexOffset_ = 7;
- static const int kSharedFunctionInfoOffset_ = 8;
- static const int kLiteralNumOffset_ = 9;
- static const int kSize_ = 10;
-
- friend class JSArrayBasedStruct<FunctionInfoWrapper>;
-};
-
-
-// Wraps SharedFunctionInfo along with some of its fields for passing it
-// back to JavaScript. SharedFunctionInfo object itself is additionally
-// wrapped into BlindReference for sanitizing reasons.
-class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
- public:
- static bool IsInstance(Handle<JSArray> array) {
- return array->length() == Smi::FromInt(kSize_) &&
- array->GetElementNoExceptionThrown(kSharedInfoOffset_)->IsJSValue();
- }
-
- explicit SharedInfoWrapper(Handle<JSArray> array)
- : JSArrayBasedStruct<SharedInfoWrapper>(array) {
- }
-
- void SetProperties(Handle<String> name, int start_position, int end_position,
- Handle<SharedFunctionInfo> info) {
- HandleScope scope(isolate());
- this->SetField(kFunctionNameOffset_, name);
- Handle<JSValue> info_holder = WrapInJSValue(info);
- this->SetField(kSharedInfoOffset_, info_holder);
- this->SetSmiValueField(kStartPositionOffset_, start_position);
- this->SetSmiValueField(kEndPositionOffset_, end_position);
- }
- Handle<SharedFunctionInfo> GetInfo() {
- Object* element = this->GetField(kSharedInfoOffset_);
- CHECK(element->IsJSValue());
- Handle<JSValue> value_wrapper(JSValue::cast(element));
- return UnwrapSharedFunctionInfoFromJSValue(value_wrapper);
- }
-
- private:
- static const int kFunctionNameOffset_ = 0;
- static const int kStartPositionOffset_ = 1;
- static const int kEndPositionOffset_ = 2;
- static const int kSharedInfoOffset_ = 3;
- static const int kSize_ = 4;
-
- friend class JSArrayBasedStruct<SharedInfoWrapper>;
-};
-
-
-class FunctionInfoListener {
- public:
- explicit FunctionInfoListener(Isolate* isolate) {
- current_parent_index_ = -1;
- len_ = 0;
- result_ = isolate->factory()->NewJSArray(10);
- }
-
- void FunctionStarted(FunctionLiteral* fun) {
- HandleScope scope(isolate());
- FunctionInfoWrapper info = FunctionInfoWrapper::Create();
- info.SetInitialProperties(fun->name(), fun->start_position(),
- fun->end_position(), fun->parameter_count(),
- fun->materialized_literal_count(),
- current_parent_index_);
- current_parent_index_ = len_;
- SetElementNonStrict(result_, len_, info.GetJSArray());
- len_++;
- }
-
- void FunctionDone() {
- HandleScope scope(isolate());
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
- current_parent_index_ = info.GetParentIndex();
- }
-
- // Saves only function code, because for a script function we
- // may never create a SharedFunctionInfo object.
- void FunctionCode(Handle<Code> function_code) {
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
- info.SetFunctionCode(function_code,
- Handle<Object>(isolate()->heap()->null_value(),
- isolate()));
- }
-
- // Saves full information about a function: its code, its scope info
- // and a SharedFunctionInfo object.
- void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope,
- Zone* zone) {
- if (!shared->IsSharedFunctionInfo()) {
- return;
- }
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
- info.SetFunctionCode(Handle<Code>(shared->code()),
- Handle<Object>(shared->scope_info(), isolate()));
- info.SetSharedFunctionInfo(shared);
-
- Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone),
- isolate());
- info.SetOuterScopeInfo(scope_info_list);
- }
-
- Handle<JSArray> GetResult() { return result_; }
-
- private:
- Isolate* isolate() const { return result_->GetIsolate(); }
-
- Object* SerializeFunctionScope(Scope* scope, Zone* zone) {
- HandleScope handle_scope(isolate());
-
- Handle<JSArray> scope_info_list = isolate()->factory()->NewJSArray(10);
- int scope_info_length = 0;
-
- // Saves some description of scope. It stores name and indexes of
- // variables in the whole scope chain. Null-named slots delimit
- // scopes of this chain.
- Scope* outer_scope = scope->outer_scope();
- if (outer_scope == NULL) {
- return isolate()->heap()->undefined_value();
- }
- do {
- ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone);
- ZoneList<Variable*> context_list(outer_scope->ContextLocalCount(), zone);
- outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
- context_list.Sort(&Variable::CompareIndex);
-
- for (int i = 0; i < context_list.length(); i++) {
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- context_list[i]->name());
- scope_info_length++;
- SetElementNonStrict(
- scope_info_list,
- scope_info_length,
- Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate()));
- scope_info_length++;
- }
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- Handle<Object>(isolate()->heap()->null_value(),
- isolate()));
- scope_info_length++;
-
- outer_scope = outer_scope->outer_scope();
- } while (outer_scope != NULL);
-
- return *scope_info_list;
- }
-
- Handle<JSArray> result_;
- int len_;
- int current_parent_index_;
-};
-
-
-JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
- Handle<String> source) {
- Isolate* isolate = Isolate::Current();
-
- FunctionInfoListener listener(isolate);
- Handle<Object> original_source =
- Handle<Object>(script->source(), isolate);
- script->set_source(*source);
- isolate->set_active_function_info_listener(&listener);
-
- {
- // Creating verbose TryCatch from public API is currently the only way to
- // force code save location. We do not use this the object directly.
- v8::TryCatch try_catch;
- try_catch.SetVerbose(true);
-
- // A logical 'try' section.
- CompileScriptForTracker(isolate, script);
- }
-
- // A logical 'catch' section.
- Handle<JSObject> rethrow_exception;
- if (isolate->has_pending_exception()) {
- Handle<Object> exception(isolate->pending_exception()->ToObjectChecked(),
- isolate);
- MessageLocation message_location = isolate->GetMessageLocation();
-
- isolate->clear_pending_message();
- isolate->clear_pending_exception();
-
- // If possible, copy positions from message object to exception object.
- if (exception->IsJSObject() && !message_location.script().is_null()) {
- rethrow_exception = Handle<JSObject>::cast(exception);
-
- Factory* factory = isolate->factory();
- Handle<String> start_pos_key = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("startPosition"));
- Handle<String> end_pos_key = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("endPosition"));
- Handle<String> script_obj_key = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("scriptObject"));
- Handle<Smi> start_pos(
- Smi::FromInt(message_location.start_pos()), isolate);
- Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
- Handle<JSValue> script_obj = GetScriptWrapper(message_location.script());
- JSReceiver::SetProperty(
- rethrow_exception, start_pos_key, start_pos, NONE, kNonStrictMode);
- JSReceiver::SetProperty(
- rethrow_exception, end_pos_key, end_pos, NONE, kNonStrictMode);
- JSReceiver::SetProperty(
- rethrow_exception, script_obj_key, script_obj, NONE, kNonStrictMode);
- }
- }
-
- // A logical 'finally' section.
- isolate->set_active_function_info_listener(NULL);
- script->set_source(*original_source);
-
- if (rethrow_exception.is_null()) {
- return *(listener.GetResult());
- } else {
- isolate->Throw(*rethrow_exception);
- return 0;
- }
-}
-
-
-void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
- HandleScope scope(array->GetIsolate());
- int len = GetArrayLength(array);
- for (int i = 0; i < len; i++) {
- Handle<SharedFunctionInfo> info(
- SharedFunctionInfo::cast(array->GetElementNoExceptionThrown(i)));
- SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create();
- Handle<String> name_handle(String::cast(info->name()));
- info_wrapper.SetProperties(name_handle, info->start_position(),
- info->end_position(), info);
- SetElementNonStrict(array, i, info_wrapper.GetJSArray());
- }
-}
-
-
-// Visitor that finds all references to a particular code object,
-// including "CODE_TARGET" references in other code objects and replaces
-// them on the fly.
-class ReplacingVisitor : public ObjectVisitor {
- public:
- explicit ReplacingVisitor(Code* original, Code* substitution)
- : original_(original), substitution_(substitution) {
- }
-
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (*p == original_) {
- *p = substitution_;
- }
- }
- }
-
- virtual void VisitCodeEntry(Address entry) {
- if (Code::GetObjectFromEntryAddress(entry) == original_) {
- Address substitution_entry = substitution_->instruction_start();
- Memory::Address_at(entry) = substitution_entry;
- }
- }
-
- virtual void VisitCodeTarget(RelocInfo* rinfo) {
- if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
- Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
- Address substitution_entry = substitution_->instruction_start();
- rinfo->set_target_address(substitution_entry);
- }
- }
-
- virtual void VisitDebugTarget(RelocInfo* rinfo) {
- VisitCodeTarget(rinfo);
- }
-
- private:
- Code* original_;
- Code* substitution_;
-};
-
-
-// Finds all references to original and replaces them with substitution.
-static void ReplaceCodeObject(Handle<Code> original,
- Handle<Code> substitution) {
- // Perform a full GC in order to ensure that we are not in the middle of an
- // incremental marking phase when we are replacing the code object.
- // Since we are not in an incremental marking phase we can write pointers
- // to code objects (that are never in new space) without worrying about
- // write barriers.
- Heap* heap = original->GetHeap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "liveedit.cc ReplaceCodeObject");
-
- ASSERT(!heap->InNewSpace(*substitution));
-
- AssertNoAllocation no_allocations_please;
-
- ReplacingVisitor visitor(*original, *substitution);
-
- // Iterate over all roots. Stack frames may have pointer into original code,
- // so temporary replace the pointers with offset numbers
- // in prologue/epilogue.
- heap->IterateRoots(&visitor, VISIT_ALL);
-
- // Now iterate over all pointers of all objects, including code_target
- // implicit pointers.
- HeapIterator iterator(heap);
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- obj->Iterate(&visitor);
- }
-}
-
-
-// Patch function literals.
-// Name 'literals' is a misnomer. Rather it's a cache for complex object
-// boilerplates and for a native context. We must clean cached values.
-// Additionally we may need to allocate a new array if number of literals
-// changed.
-class LiteralFixer {
- public:
- static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
- Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate) {
- int new_literal_count = compile_info_wrapper->GetLiteralCount();
- if (new_literal_count > 0) {
- new_literal_count += JSFunction::kLiteralsPrefixSize;
- }
- int old_literal_count = shared_info->num_literals();
-
- if (old_literal_count == new_literal_count) {
- // If literal count didn't change, simply go over all functions
- // and clear literal arrays.
- ClearValuesVisitor visitor;
- IterateJSFunctions(*shared_info, &visitor);
- } else {
- // When literal count changes, we have to create new array instances.
- // Since we cannot create instances when iterating heap, we should first
- // collect all functions and fix their literal arrays.
- Handle<FixedArray> function_instances =
- CollectJSFunctions(shared_info, isolate);
- for (int i = 0; i < function_instances->length(); i++) {
- Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
- Handle<FixedArray> old_literals(fun->literals());
- Handle<FixedArray> new_literals =
- isolate->factory()->NewFixedArray(new_literal_count);
- if (new_literal_count > 0) {
- Handle<Context> native_context;
- if (old_literals->length() >
- JSFunction::kLiteralNativeContextIndex) {
- native_context = Handle<Context>(
- JSFunction::NativeContextFromLiterals(fun->literals()));
- } else {
- native_context = Handle<Context>(fun->context()->native_context());
- }
- new_literals->set(JSFunction::kLiteralNativeContextIndex,
- *native_context);
- }
- fun->set_literals(*new_literals);
- }
-
- shared_info->set_num_literals(new_literal_count);
- }
- }
-
- private:
- // Iterates all function instances in the HEAP that refers to the
- // provided shared_info.
- template<typename Visitor>
- static void IterateJSFunctions(SharedFunctionInfo* shared_info,
- Visitor* visitor) {
- AssertNoAllocation no_allocations_please;
-
- HeapIterator iterator(shared_info->GetHeap());
- for (HeapObject* obj = iterator.next(); obj != NULL;
- obj = iterator.next()) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- if (function->shared() == shared_info) {
- visitor->visit(function);
- }
- }
- }
- }
-
- // Finds all instances of JSFunction that refers to the provided shared_info
- // and returns array with them.
- static Handle<FixedArray> CollectJSFunctions(
- Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
- CountVisitor count_visitor;
- count_visitor.count = 0;
- IterateJSFunctions(*shared_info, &count_visitor);
- int size = count_visitor.count;
-
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
- if (size > 0) {
- CollectVisitor collect_visitor(result);
- IterateJSFunctions(*shared_info, &collect_visitor);
- }
- return result;
- }
-
- class ClearValuesVisitor {
- public:
- void visit(JSFunction* fun) {
- FixedArray* literals = fun->literals();
- int len = literals->length();
- for (int j = JSFunction::kLiteralsPrefixSize; j < len; j++) {
- literals->set_undefined(j);
- }
- }
- };
-
- class CountVisitor {
- public:
- void visit(JSFunction* fun) {
- count++;
- }
- int count;
- };
-
- class CollectVisitor {
- public:
- explicit CollectVisitor(Handle<FixedArray> output)
- : m_output(output), m_pos(0) {}
-
- void visit(JSFunction* fun) {
- m_output->set(m_pos, fun);
- m_pos++;
- }
- private:
- Handle<FixedArray> m_output;
- int m_pos;
- };
-};
-
-
-// Check whether the code is natural function code (not a lazy-compile stub
-// code).
-static bool IsJSFunctionCode(Code* code) {
- return code->kind() == Code::FUNCTION;
-}
-
-
-// Returns true if an instance of candidate were inlined into function's code.
-static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
- AssertNoAllocation no_gc;
-
- if (function->code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(function->code()->deoptimization_data());
-
- if (data == HEAP->empty_fixed_array()) return false;
-
- FixedArray* literals = data->LiteralArray();
-
- int inlined_count = data->InlinedFunctionCount()->value();
- for (int i = 0; i < inlined_count; ++i) {
- JSFunction* inlined = JSFunction::cast(literals->get(i));
- if (inlined->shared() == candidate) return true;
- }
-
- return false;
-}
-
-
-class DependentFunctionFilter : public OptimizedFunctionFilter {
- public:
- explicit DependentFunctionFilter(
- SharedFunctionInfo* function_info)
- : function_info_(function_info) {}
-
- virtual bool TakeFunction(JSFunction* function) {
- return (function->shared() == function_info_ ||
- IsInlined(function, function_info_));
- }
-
- private:
- SharedFunctionInfo* function_info_;
-};
-
-
-static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
- AssertNoAllocation no_allocation;
-
- DependentFunctionFilter filter(function_info);
- Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
-}
-
-
-MaybeObject* LiveEdit::ReplaceFunctionCode(
- Handle<JSArray> new_compile_info_array,
- Handle<JSArray> shared_info_array) {
- Isolate* isolate = Isolate::Current();
- HandleScope scope(isolate);
-
- if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return isolate->ThrowIllegalOperation();
- }
-
- FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
-
- Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
-
- isolate->heap()->EnsureHeapIsIterable();
-
- if (IsJSFunctionCode(shared_info->code())) {
- Handle<Code> code = compile_info_wrapper.GetFunctionCode();
- ReplaceCodeObject(Handle<Code>(shared_info->code()), code);
- Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
- if (code_scope_info->IsFixedArray()) {
- shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
- }
- }
-
- if (shared_info->debug_info()->IsDebugInfo()) {
- Handle<DebugInfo> debug_info(DebugInfo::cast(shared_info->debug_info()));
- Handle<Code> new_original_code =
- FACTORY->CopyCode(compile_info_wrapper.GetFunctionCode());
- debug_info->set_original_code(*new_original_code);
- }
-
- int start_position = compile_info_wrapper.GetStartPosition();
- int end_position = compile_info_wrapper.GetEndPosition();
- shared_info->set_start_position(start_position);
- shared_info->set_end_position(end_position);
-
- LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
-
- shared_info->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kJSConstructStubGeneric));
-
- DeoptimizeDependentFunctions(*shared_info);
- isolate->compilation_cache()->Remove(shared_info);
-
- return isolate->heap()->undefined_value();
-}
-
-
-MaybeObject* LiveEdit::FunctionSourceUpdated(
- Handle<JSArray> shared_info_array) {
- Isolate* isolate = shared_info_array->GetIsolate();
- HandleScope scope(isolate);
-
- if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return isolate->ThrowIllegalOperation();
- }
-
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
- Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
-
- DeoptimizeDependentFunctions(*shared_info);
- isolate->compilation_cache()->Remove(shared_info);
-
- return isolate->heap()->undefined_value();
-}
-
-
-void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
- Handle<Object> script_handle) {
- Handle<SharedFunctionInfo> shared_info =
- UnwrapSharedFunctionInfoFromJSValue(function_wrapper);
- CHECK(script_handle->IsScript() || script_handle->IsUndefined());
- shared_info->set_script(*script_handle);
-
- Isolate::Current()->compilation_cache()->Remove(shared_info);
-}
-
-
-// For a script text change (defined as position_change_array), translates
-// position in unchanged text to position in changed text.
-// Text change is a set of non-overlapping regions in text, that have changed
-// their contents and length. It is specified as array of groups of 3 numbers:
-// (change_begin, change_end, change_end_new_position).
-// Each group describes a change in text; groups are sorted by change_begin.
-// Only position in text beyond any changes may be successfully translated.
-// If a positions is inside some region that changed, result is currently
-// undefined.
-static int TranslatePosition(int original_position,
- Handle<JSArray> position_change_array) {
- int position_diff = 0;
- int array_len = GetArrayLength(position_change_array);
- // TODO(635): binary search may be used here
- for (int i = 0; i < array_len; i += 3) {
- Object* element = position_change_array->GetElementNoExceptionThrown(i);
- CHECK(element->IsSmi());
- int chunk_start = Smi::cast(element)->value();
- if (original_position < chunk_start) {
- break;
- }
- element = position_change_array->GetElementNoExceptionThrown(i + 1);
- CHECK(element->IsSmi());
- int chunk_end = Smi::cast(element)->value();
- // Position mustn't be inside a chunk.
- ASSERT(original_position >= chunk_end);
- element = position_change_array->GetElementNoExceptionThrown(i + 2);
- CHECK(element->IsSmi());
- int chunk_changed_end = Smi::cast(element)->value();
- position_diff = chunk_changed_end - chunk_end;
- }
-
- return original_position + position_diff;
-}
-
-
-// Auto-growing buffer for writing relocation info code section. This buffer
-// is a simplified version of buffer from Assembler. Unlike Assembler, this
-// class is platform-independent and it works without dealing with instructions.
-// As specified by RelocInfo format, the buffer is filled in reversed order:
-// from upper to lower addresses.
-// It uses NewArray/DeleteArray for memory management.
-class RelocInfoBuffer {
- public:
- RelocInfoBuffer(int buffer_initial_capicity, byte* pc) {
- buffer_size_ = buffer_initial_capicity + kBufferGap;
- buffer_ = NewArray<byte>(buffer_size_);
-
- reloc_info_writer_.Reposition(buffer_ + buffer_size_, pc);
- }
- ~RelocInfoBuffer() {
- DeleteArray(buffer_);
- }
-
- // As specified by RelocInfo format, the buffer is filled in reversed order:
- // from upper to lower addresses.
- void Write(const RelocInfo* rinfo) {
- if (buffer_ + kBufferGap >= reloc_info_writer_.pos()) {
- Grow();
- }
- reloc_info_writer_.Write(rinfo);
- }
-
- Vector<byte> GetResult() {
- // Return the bytes from pos up to end of buffer.
- int result_size =
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer_.pos());
- return Vector<byte>(reloc_info_writer_.pos(), result_size);
- }
-
- private:
- void Grow() {
- // Compute new buffer size.
- int new_buffer_size;
- if (buffer_size_ < 2 * KB) {
- new_buffer_size = 4 * KB;
- } else {
- new_buffer_size = 2 * buffer_size_;
- }
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if (new_buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
- }
-
- // Set up new buffer.
- byte* new_buffer = NewArray<byte>(new_buffer_size);
-
- // Copy the data.
- int curently_used_size =
- static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos());
- memmove(new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.pos(), curently_used_size);
-
- reloc_info_writer_.Reposition(
- new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.last_pc());
-
- DeleteArray(buffer_);
- buffer_ = new_buffer;
- buffer_size_ = new_buffer_size;
- }
-
- RelocInfoWriter reloc_info_writer_;
- byte* buffer_;
- int buffer_size_;
-
- static const int kBufferGap = RelocInfoWriter::kMaxSize;
- static const int kMaximalBufferSize = 512*MB;
-};
-
-// Patch positions in code (changes relocation info section) and possibly
-// returns new instance of code.
-static Handle<Code> PatchPositionsInCode(
- Handle<Code> code,
- Handle<JSArray> position_change_array) {
-
- RelocInfoBuffer buffer_writer(code->relocation_size(),
- code->instruction_start());
-
- {
- AssertNoAllocation no_allocations_please;
- for (RelocIterator it(*code); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (RelocInfo::IsPosition(rinfo->rmode())) {
- int position = static_cast<int>(rinfo->data());
- int new_position = TranslatePosition(position,
- position_change_array);
- if (position != new_position) {
- RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL);
- buffer_writer.Write(&info_copy);
- continue;
- }
- }
- if (RelocInfo::IsRealRelocMode(rinfo->rmode())) {
- buffer_writer.Write(it.rinfo());
- }
- }
- }
-
- Vector<byte> buffer = buffer_writer.GetResult();
-
- if (buffer.length() == code->relocation_size()) {
- // Simply patch relocation area of code.
- memcpy(code->relocation_start(), buffer.start(), buffer.length());
- return code;
- } else {
- // Relocation info section now has different size. We cannot simply
- // rewrite it inside code object. Instead we have to create a new
- // code object.
- Handle<Code> result(FACTORY->CopyCode(code, buffer));
- return result;
- }
-}
-
-
-MaybeObject* LiveEdit::PatchFunctionPositions(
- Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
- if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
- }
-
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
- Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
-
- int old_function_start = info->start_position();
- int new_function_start = TranslatePosition(old_function_start,
- position_change_array);
- int new_function_end = TranslatePosition(info->end_position(),
- position_change_array);
- int new_function_token_pos =
- TranslatePosition(info->function_token_position(), position_change_array);
-
- info->set_start_position(new_function_start);
- info->set_end_position(new_function_end);
- info->set_function_token_position(new_function_token_pos);
-
- HEAP->EnsureHeapIsIterable();
-
- if (IsJSFunctionCode(info->code())) {
- // Patch relocation info section of the code.
- Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
- position_change_array);
- if (*patched_code != info->code()) {
- // Replace all references to the code across the heap. In particular,
- // some stubs may refer to this code and this code may be being executed
- // on stack (it is safe to substitute the code object on stack, because
- // we only change the structure of rinfo and leave instructions
- // untouched).
- ReplaceCodeObject(Handle<Code>(info->code()), patched_code);
- }
- }
-
- return HEAP->undefined_value();
-}
-
-
-static Handle<Script> CreateScriptCopy(Handle<Script> original) {
- Handle<String> original_source(String::cast(original->source()));
-
- Handle<Script> copy = FACTORY->NewScript(original_source);
-
- copy->set_name(original->name());
- copy->set_line_offset(original->line_offset());
- copy->set_column_offset(original->column_offset());
- copy->set_data(original->data());
- copy->set_type(original->type());
- copy->set_context_data(original->context_data());
- copy->set_compilation_type(original->compilation_type());
- copy->set_eval_from_shared(original->eval_from_shared());
- copy->set_eval_from_instructions_offset(
- original->eval_from_instructions_offset());
-
- return copy;
-}
-
-
-Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
- Handle<String> new_source,
- Handle<Object> old_script_name) {
- Isolate* isolate = original_script->GetIsolate();
- Handle<Object> old_script_object;
- if (old_script_name->IsString()) {
- Handle<Script> old_script = CreateScriptCopy(original_script);
- old_script->set_name(String::cast(*old_script_name));
- old_script_object = old_script;
- isolate->debugger()->OnAfterCompile(
- old_script, Debugger::SEND_WHEN_DEBUGGING);
- } else {
- old_script_object = isolate->factory()->null_value();
- }
-
- original_script->set_source(*new_source);
-
- // Drop line ends so that they will be recalculated.
- original_script->set_line_ends(HEAP->undefined_value());
-
- return *old_script_object;
-}
-
-
-
-void LiveEdit::ReplaceRefToNestedFunction(
- Handle<JSValue> parent_function_wrapper,
- Handle<JSValue> orig_function_wrapper,
- Handle<JSValue> subst_function_wrapper) {
-
- Handle<SharedFunctionInfo> parent_shared =
- UnwrapSharedFunctionInfoFromJSValue(parent_function_wrapper);
- Handle<SharedFunctionInfo> orig_shared =
- UnwrapSharedFunctionInfoFromJSValue(orig_function_wrapper);
- Handle<SharedFunctionInfo> subst_shared =
- UnwrapSharedFunctionInfoFromJSValue(subst_function_wrapper);
-
- for (RelocIterator it(parent_shared->code()); !it.done(); it.next()) {
- if (it.rinfo()->rmode() == RelocInfo::EMBEDDED_OBJECT) {
- if (it.rinfo()->target_object() == *orig_shared) {
- it.rinfo()->set_target_object(*subst_shared);
- }
- }
- }
-}
-
-
-// Check an activation against list of functions. If there is a function
-// that matches, its status in result array is changed to status argument value.
-static bool CheckActivation(Handle<JSArray> shared_info_array,
- Handle<JSArray> result,
- StackFrame* frame,
- LiveEdit::FunctionPatchabilityStatus status) {
- if (!frame->is_java_script()) return false;
-
- Handle<JSFunction> function(
- JSFunction::cast(JavaScriptFrame::cast(frame)->function()));
-
- Isolate* isolate = shared_info_array->GetIsolate();
- int len = GetArrayLength(shared_info_array);
- for (int i = 0; i < len; i++) {
- Object* element = shared_info_array->GetElementNoExceptionThrown(i);
- CHECK(element->IsJSValue());
- Handle<JSValue> jsvalue(JSValue::cast(element));
- Handle<SharedFunctionInfo> shared =
- UnwrapSharedFunctionInfoFromJSValue(jsvalue);
-
- if (function->shared() == *shared || IsInlined(*function, *shared)) {
- SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status),
- isolate));
- return true;
- }
- }
- return false;
-}
-
-
-// Iterates over handler chain and removes all elements that are inside
-// frames being dropped.
-static bool FixTryCatchHandler(StackFrame* top_frame,
- StackFrame* bottom_frame) {
- Address* pointer_address =
- &Memory::Address_at(Isolate::Current()->get_address_from_id(
- Isolate::kHandlerAddress));
-
- while (*pointer_address < top_frame->sp()) {
- pointer_address = &Memory::Address_at(*pointer_address);
- }
- Address* above_frame_address = pointer_address;
- while (*pointer_address < bottom_frame->fp()) {
- pointer_address = &Memory::Address_at(*pointer_address);
- }
- bool change = *above_frame_address != *pointer_address;
- *above_frame_address = *pointer_address;
- return change;
-}
-
-
-// Removes specified range of frames from stack. There may be 1 or more
-// frames in range. Anyway the bottom frame is restarted rather than dropped,
-// and therefore has to be a JavaScript frame.
-// Returns error message or NULL.
-static const char* DropFrames(Vector<StackFrame*> frames,
- int top_frame_index,
- int bottom_js_frame_index,
- Debug::FrameDropMode* mode,
- Object*** restarter_frame_function_pointer) {
- if (!Debug::kFrameDropperSupported) {
- return "Stack manipulations are not supported in this architecture.";
- }
-
- StackFrame* pre_top_frame = frames[top_frame_index - 1];
- StackFrame* top_frame = frames[top_frame_index];
- StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
-
- ASSERT(bottom_js_frame->is_java_script());
-
- // Check the nature of the top frame.
- Isolate* isolate = Isolate::Current();
- Code* pre_top_frame_code = pre_top_frame->LookupCode();
- bool frame_has_padding;
- if (pre_top_frame_code->is_inline_cache_stub() &&
- pre_top_frame_code->is_debug_break()) {
- // OK, we can drop inline cache calls.
- *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
- frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
- } else if (pre_top_frame_code ==
- isolate->debug()->debug_break_slot()) {
- // OK, we can drop debug break slot.
- *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
- frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
- } else if (pre_top_frame_code ==
- isolate->builtins()->builtin(
- Builtins::kFrameDropper_LiveEdit)) {
- // OK, we can drop our own code.
- pre_top_frame = frames[top_frame_index - 2];
- top_frame = frames[top_frame_index - 1];
- *mode = Debug::CURRENTLY_SET_MODE;
- frame_has_padding = false;
- } else if (pre_top_frame_code ==
- isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
- *mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
- frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
- } else if (pre_top_frame_code->kind() == Code::STUB &&
- pre_top_frame_code->major_key() == CodeStub::CEntry) {
- // Entry from our unit tests on 'debugger' statement.
- // It's fine, we support this case.
- *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
- // We don't have a padding from 'debugger' statement call.
- // Here the stub is CEntry, it's not debug-only and can't be padded.
- // If anyone would complain, a proxy padded stub could be added.
- frame_has_padding = false;
- } else if (pre_top_frame->type() == StackFrame::ARGUMENTS_ADAPTOR) {
- // This must be adaptor that remain from the frame dropping that
- // is still on stack. A frame dropper frame must be above it.
- ASSERT(frames[top_frame_index - 2]->LookupCode() ==
- isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
- pre_top_frame = frames[top_frame_index - 3];
- top_frame = frames[top_frame_index - 2];
- *mode = Debug::CURRENTLY_SET_MODE;
- frame_has_padding = false;
- } else {
- return "Unknown structure of stack above changing function";
- }
-
- Address unused_stack_top = top_frame->sp();
- Address unused_stack_bottom = bottom_js_frame->fp()
- - Debug::kFrameDropperFrameSize * kPointerSize // Size of the new frame.
- + kPointerSize; // Bigger address end is exclusive.
-
- Address* top_frame_pc_address = top_frame->pc_address();
-
- // top_frame may be damaged below this point. Do not used it.
- ASSERT(!(top_frame = NULL));
-
- if (unused_stack_top > unused_stack_bottom) {
- if (frame_has_padding) {
- int shortage_bytes =
- static_cast<int>(unused_stack_top - unused_stack_bottom);
-
- Address padding_start = pre_top_frame->fp() -
- Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize;
-
- Address padding_pointer = padding_start;
- Smi* padding_object =
- Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue);
- while (Memory::Object_at(padding_pointer) == padding_object) {
- padding_pointer -= kPointerSize;
- }
- int padding_counter =
- Smi::cast(Memory::Object_at(padding_pointer))->value();
- if (padding_counter * kPointerSize < shortage_bytes) {
- return "Not enough space for frame dropper frame "
- "(even with padding frame)";
- }
- Memory::Object_at(padding_pointer) =
- Smi::FromInt(padding_counter - shortage_bytes / kPointerSize);
-
- StackFrame* pre_pre_frame = frames[top_frame_index - 2];
-
- memmove(padding_start + kPointerSize - shortage_bytes,
- padding_start + kPointerSize,
- Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
-
- pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
- pre_pre_frame->SetCallerFp(pre_top_frame->fp());
- unused_stack_top -= shortage_bytes;
-
- STATIC_ASSERT(sizeof(Address) == kPointerSize);
- top_frame_pc_address -= shortage_bytes / kPointerSize;
- } else {
- return "Not enough space for frame dropper frame";
- }
- }
-
- // Committing now. After this point we should return only NULL value.
-
- FixTryCatchHandler(pre_top_frame, bottom_js_frame);
- // Make sure FixTryCatchHandler is idempotent.
- ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
-
- Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
- *top_frame_pc_address = code->entry();
- pre_top_frame->SetCallerFp(bottom_js_frame->fp());
-
- *restarter_frame_function_pointer =
- Debug::SetUpFrameDropperFrame(bottom_js_frame, code);
-
- ASSERT((**restarter_frame_function_pointer)->IsJSFunction());
-
- for (Address a = unused_stack_top;
- a < unused_stack_bottom;
- a += kPointerSize) {
- Memory::Object_at(a) = Smi::FromInt(0);
- }
-
- return NULL;
-}
-
-
-static bool IsDropableFrame(StackFrame* frame) {
- return !frame->is_exit();
-}
-
-
-// Describes a set of call frames that execute any of listed functions.
-// Finding no such frames does not mean error.
-class MultipleFunctionTarget {
- public:
- MultipleFunctionTarget(Handle<JSArray> shared_info_array,
- Handle<JSArray> result)
- : m_shared_info_array(shared_info_array),
- m_result(result) {}
- bool MatchActivation(StackFrame* frame,
- LiveEdit::FunctionPatchabilityStatus status) {
- return CheckActivation(m_shared_info_array, m_result, frame, status);
- }
- const char* GetNotFoundMessage() {
- return NULL;
- }
- private:
- Handle<JSArray> m_shared_info_array;
- Handle<JSArray> m_result;
-};
-
-// Drops all call frame matched by target and all frames above them.
-template<typename TARGET>
-static const char* DropActivationsInActiveThreadImpl(
- TARGET& target, bool do_drop, Zone* zone) {
- Isolate* isolate = Isolate::Current();
- Debug* debug = isolate->debug();
- ZoneScope scope(zone, DELETE_ON_EXIT);
- Vector<StackFrame*> frames = CreateStackMap(isolate, zone);
-
-
- int top_frame_index = -1;
- int frame_index = 0;
- for (; frame_index < frames.length(); frame_index++) {
- StackFrame* frame = frames[frame_index];
- if (frame->id() == debug->break_frame_id()) {
- top_frame_index = frame_index;
- break;
- }
- if (target.MatchActivation(
- frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
- // We are still above break_frame. It is not a target frame,
- // it is a problem.
- return "Debugger mark-up on stack is not found";
- }
- }
-
- if (top_frame_index == -1) {
- // We haven't found break frame, but no function is blocking us anyway.
- return target.GetNotFoundMessage();
- }
-
- bool target_frame_found = false;
- int bottom_js_frame_index = top_frame_index;
- bool c_code_found = false;
-
- for (; frame_index < frames.length(); frame_index++) {
- StackFrame* frame = frames[frame_index];
- if (!IsDropableFrame(frame)) {
- c_code_found = true;
- break;
- }
- if (target.MatchActivation(
- frame, LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
- target_frame_found = true;
- bottom_js_frame_index = frame_index;
- }
- }
-
- if (c_code_found) {
- // There is a C frames on stack. Check that there are no target frames
- // below them.
- for (; frame_index < frames.length(); frame_index++) {
- StackFrame* frame = frames[frame_index];
- if (frame->is_java_script()) {
- if (target.MatchActivation(
- frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
- // Cannot drop frame under C frames.
- return NULL;
- }
- }
- }
- }
-
- if (!do_drop) {
- // We are in check-only mode.
- return NULL;
- }
-
- if (!target_frame_found) {
- // Nothing to drop.
- return target.GetNotFoundMessage();
- }
-
- Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
- Object** restarter_frame_function_pointer = NULL;
- const char* error_message = DropFrames(frames, top_frame_index,
- bottom_js_frame_index, &drop_mode,
- &restarter_frame_function_pointer);
-
- if (error_message != NULL) {
- return error_message;
- }
-
- // Adjust break_frame after some frames has been dropped.
- StackFrame::Id new_id = StackFrame::NO_ID;
- for (int i = bottom_js_frame_index + 1; i < frames.length(); i++) {
- if (frames[i]->type() == StackFrame::JAVA_SCRIPT) {
- new_id = frames[i]->id();
- break;
- }
- }
- debug->FramesHaveBeenDropped(new_id, drop_mode,
- restarter_frame_function_pointer);
- return NULL;
-}
-
-// Fills result array with statuses of functions. Modifies the stack
-// removing all listed function if possible and if do_drop is true.
-static const char* DropActivationsInActiveThread(
- Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
- Zone* zone) {
- MultipleFunctionTarget target(shared_info_array, result);
-
- const char* message =
- DropActivationsInActiveThreadImpl(target, do_drop, zone);
- if (message) {
- return message;
- }
-
- Isolate* isolate = shared_info_array->GetIsolate();
- int array_len = GetArrayLength(shared_info_array);
-
- // Replace "blocked on active" with "replaced on active" status.
- for (int i = 0; i < array_len; i++) {
- if (result->GetElement(i) ==
- Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
- Handle<Object> replaced(
- Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
- SetElementNonStrict(result, i, replaced);
- }
- }
- return NULL;
-}
-
-
-class InactiveThreadActivationsChecker : public ThreadVisitor {
- public:
- InactiveThreadActivationsChecker(Handle<JSArray> shared_info_array,
- Handle<JSArray> result)
- : shared_info_array_(shared_info_array), result_(result),
- has_blocked_functions_(false) {
- }
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- has_blocked_functions_ |= CheckActivation(
- shared_info_array_, result_, it.frame(),
- LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
- }
- }
- bool HasBlockedFunctions() {
- return has_blocked_functions_;
- }
-
- private:
- Handle<JSArray> shared_info_array_;
- Handle<JSArray> result_;
- bool has_blocked_functions_;
-};
-
-
-Handle<JSArray> LiveEdit::CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop, Zone* zone) {
- Isolate* isolate = shared_info_array->GetIsolate();
- int len = GetArrayLength(shared_info_array);
-
- Handle<JSArray> result = isolate->factory()->NewJSArray(len);
-
- // Fill the default values.
- for (int i = 0; i < len; i++) {
- SetElementNonStrict(
- result,
- i,
- Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH), isolate));
- }
-
-
- // First check inactive threads. Fail if some functions are blocked there.
- InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
- result);
- Isolate::Current()->thread_manager()->IterateArchivedThreads(
- &inactive_threads_checker);
- if (inactive_threads_checker.HasBlockedFunctions()) {
- return result;
- }
-
- // Try to drop activations from the current stack.
- const char* error_message =
- DropActivationsInActiveThread(shared_info_array, result, do_drop, zone);
- if (error_message != NULL) {
- // Add error message as an array extra element.
- Vector<const char> vector_message(error_message, StrLength(error_message));
- Handle<String> str = FACTORY->NewStringFromAscii(vector_message);
- SetElementNonStrict(result, len, str);
- }
- return result;
-}
-
-
-// Describes a single callframe a target. Not finding this frame
-// means an error.
-class SingleFrameTarget {
- public:
- explicit SingleFrameTarget(JavaScriptFrame* frame)
- : m_frame(frame),
- m_saved_status(LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH) {}
-
- bool MatchActivation(StackFrame* frame,
- LiveEdit::FunctionPatchabilityStatus status) {
- if (frame->fp() == m_frame->fp()) {
- m_saved_status = status;
- return true;
- }
- return false;
- }
- const char* GetNotFoundMessage() {
- return "Failed to found requested frame";
- }
- LiveEdit::FunctionPatchabilityStatus saved_status() {
- return m_saved_status;
- }
- private:
- JavaScriptFrame* m_frame;
- LiveEdit::FunctionPatchabilityStatus m_saved_status;
-};
-
-
-// Finds a drops required frame and all frames above.
-// Returns error message or NULL.
-const char* LiveEdit::RestartFrame(JavaScriptFrame* frame, Zone* zone) {
- SingleFrameTarget target(frame);
-
- const char* result = DropActivationsInActiveThreadImpl(target, true, zone);
- if (result != NULL) {
- return result;
- }
- if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE) {
- return "Function is blocked under native code";
- }
- return NULL;
-}
-
-
-LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
- FunctionLiteral* fun)
- : isolate_(isolate) {
- if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionStarted(fun);
- }
-}
-
-
-LiveEditFunctionTracker::~LiveEditFunctionTracker() {
- if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionDone();
- }
-}
-
-
-void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit,
- Zone* zone) {
- if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope(),
- zone);
- }
-}
-
-
-void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
- isolate_->active_function_info_listener()->FunctionCode(code);
-}
-
-
-bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
- return isolate->active_function_info_listener() != NULL;
-}
-
-
-#else // ENABLE_DEBUGGER_SUPPORT
-
-// This ifdef-else-endif section provides working or stub implementation of
-// LiveEditFunctionTracker.
-LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
- FunctionLiteral* fun) {
-}
-
-
-LiveEditFunctionTracker::~LiveEditFunctionTracker() {
-}
-
-
-void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit,
- Zone* zone) {
-}
-
-
-void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
-}
-
-
-bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
- return false;
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/liveedit.h b/src/3rdparty/v8/src/liveedit.h
deleted file mode 100644
index 5b12854..0000000
--- a/src/3rdparty/v8/src/liveedit.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIVEEDIT_H_
-#define V8_LIVEEDIT_H_
-
-
-
-// Live Edit feature implementation.
-// User should be able to change script on already running VM. This feature
-// matches hot swap features in other frameworks.
-//
-// The basic use-case is when user spots some mistake in function body
-// from debugger and wishes to change the algorithm without restart.
-//
-// A single change always has a form of a simple replacement (in pseudo-code):
-// script.source[positions, positions+length] = new_string;
-// Implementation first determines, which function's body includes this
-// change area. Then both old and new versions of script are fully compiled
-// in order to analyze, whether the function changed its outer scope
-// expectations (or number of parameters). If it didn't, function's code is
-// patched with a newly compiled code. If it did change, enclosing function
-// gets patched. All inner functions are left untouched, whatever happened
-// to them in a new script version. However, new version of code will
-// instantiate newly compiled functions.
-
-
-#include "allocation.h"
-#include "compiler.h"
-
-namespace v8 {
-namespace internal {
-
-// This class collects some specific information on structure of functions
-// in a particular script. It gets called from compiler all the time, but
-// actually records any data only when liveedit operation is in process;
-// in any other time this class is very cheap.
-//
-// The primary interest of the Tracker is to record function scope structures
-// in order to analyze whether function code maybe safely patched (with new
-// code successfully reading existing data from function scopes). The Tracker
-// also collects compiled function codes.
-class LiveEditFunctionTracker {
- public:
- explicit LiveEditFunctionTracker(Isolate* isolate, FunctionLiteral* fun);
- ~LiveEditFunctionTracker();
- void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
- FunctionLiteral* lit, Zone* zone);
- void RecordRootFunctionInfo(Handle<Code> code);
-
- static bool IsActive(Isolate* isolate);
-
- private:
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Isolate* isolate_;
-#endif
-};
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-class LiveEdit : AllStatic {
- public:
- static JSArray* GatherCompileInfo(Handle<Script> script,
- Handle<String> source);
-
- static void WrapSharedFunctionInfos(Handle<JSArray> array);
-
- MUST_USE_RESULT static MaybeObject* ReplaceFunctionCode(
- Handle<JSArray> new_compile_info_array,
- Handle<JSArray> shared_info_array);
-
- static MaybeObject* FunctionSourceUpdated(Handle<JSArray> shared_info_array);
-
- // Updates script field in FunctionSharedInfo.
- static void SetFunctionScript(Handle<JSValue> function_wrapper,
- Handle<Object> script_handle);
-
- MUST_USE_RESULT static MaybeObject* PatchFunctionPositions(
- Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array);
-
- // For a script updates its source field. If old_script_name is provided
- // (i.e. is a String), also creates a copy of the script with its original
- // source and sends notification to debugger.
- static Object* ChangeScriptSource(Handle<Script> original_script,
- Handle<String> new_source,
- Handle<Object> old_script_name);
-
- // In a code of a parent function replaces original function as embedded
- // object with a substitution one.
- static void ReplaceRefToNestedFunction(Handle<JSValue> parent_function_shared,
- Handle<JSValue> orig_function_shared,
- Handle<JSValue> subst_function_shared);
-
- // Checks listed functions on stack and return array with corresponding
- // FunctionPatchabilityStatus statuses; extra array element may
- // contain general error message. Modifies the current stack and
- // has restart the lowest found frames and drops all other frames above
- // if possible and if do_drop is true.
- static Handle<JSArray> CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop, Zone* zone);
-
- // Restarts the call frame and completely drops all frames above it.
- // Return error message or NULL.
- static const char* RestartFrame(JavaScriptFrame* frame, Zone* zone);
-
- // A copy of this is in liveedit-debugger.js.
- enum FunctionPatchabilityStatus {
- FUNCTION_AVAILABLE_FOR_PATCH = 1,
- FUNCTION_BLOCKED_ON_ACTIVE_STACK = 2,
- FUNCTION_BLOCKED_ON_OTHER_STACK = 3,
- FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4,
- FUNCTION_REPLACED_ON_ACTIVE_STACK = 5
- };
-
- // Compares 2 strings line-by-line, then token-wise and returns diff in form
- // of array of triplets (pos1, pos1_end, pos2_end) describing list
- // of diff chunks.
- static Handle<JSArray> CompareStrings(Handle<String> s1,
- Handle<String> s2);
-};
-
-
-// A general-purpose comparator between 2 arrays.
-class Comparator {
- public:
- // Holds 2 arrays of some elements allowing to compare any pair of
- // element from the first array and element from the second array.
- class Input {
- public:
- virtual int GetLength1() = 0;
- virtual int GetLength2() = 0;
- virtual bool Equals(int index1, int index2) = 0;
-
- protected:
- virtual ~Input() {}
- };
-
- // Receives compare result as a series of chunks.
- class Output {
- public:
- // Puts another chunk in result list. Note that technically speaking
- // only 3 arguments actually needed with 4th being derivable.
- virtual void AddChunk(int pos1, int pos2, int len1, int len2) = 0;
-
- protected:
- virtual ~Output() {}
- };
-
- // Finds the difference between 2 arrays of elements.
- static void CalculateDifference(Input* input,
- Output* result_writer);
-};
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-} } // namespace v8::internal
-
-#endif /* V*_LIVEEDIT_H_ */
diff --git a/src/3rdparty/v8/src/log-inl.h b/src/3rdparty/v8/src/log-inl.h
deleted file mode 100644
index 8aebbc7..0000000
--- a/src/3rdparty/v8/src/log-inl.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LOG_INL_H_
-#define V8_LOG_INL_H_
-
-#include "log.h"
-#include "cpu-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
- Script* script) {
- if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG)
- && script->type()->value() == Script::TYPE_NATIVE) {
- switch (tag) {
- case FUNCTION_TAG: return NATIVE_FUNCTION_TAG;
- case LAZY_COMPILE_TAG: return NATIVE_LAZY_COMPILE_TAG;
- case SCRIPT_TAG: return NATIVE_SCRIPT_TAG;
- default: return tag;
- }
- } else {
- return tag;
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_LOG_INL_H_
diff --git a/src/3rdparty/v8/src/log-utils.cc b/src/3rdparty/v8/src/log-utils.cc
deleted file mode 100644
index 830c3da..0000000
--- a/src/3rdparty/v8/src/log-utils.cc
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "log-utils.h"
-#include "string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-
-const char* const Log::kLogToTemporaryFile = "&";
-
-
-Log::Log(Logger* logger)
- : is_stopped_(false),
- output_handle_(NULL),
- ll_output_handle_(NULL),
- mutex_(NULL),
- message_buffer_(NULL),
- logger_(logger) {
-}
-
-
-static void AddIsolateIdIfNeeded(StringStream* stream) {
- Isolate* isolate = Isolate::Current();
- if (isolate->IsDefaultIsolate()) return;
- stream->Add("isolate-%p-", isolate);
-}
-
-
-void Log::Initialize() {
- mutex_ = OS::CreateMutex();
- message_buffer_ = NewArray<char>(kMessageBufferSize);
-
- // --log-all enables all the log flags.
- if (FLAG_log_all) {
- FLAG_log_runtime = true;
- FLAG_log_api = true;
- FLAG_log_code = true;
- FLAG_log_gc = true;
- FLAG_log_suspect = true;
- FLAG_log_handles = true;
- FLAG_log_regexp = true;
- FLAG_log_internal_timer_events = true;
- }
-
- // --prof implies --log-code.
- if (FLAG_prof) FLAG_log_code = true;
-
- // --prof_lazy controls --log-code, implies --noprof_auto.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- FLAG_prof_auto = false;
- }
-
- bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
- || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof
- || FLAG_log_internal_timer_events;
-
- // If we're logging anything, we need to open the log file.
- if (open_log_file) {
- if (strcmp(FLAG_logfile, "-") == 0) {
- OpenStdout();
- } else if (strcmp(FLAG_logfile, kLogToTemporaryFile) == 0) {
- OpenTemporaryFile();
- } else {
- if (strchr(FLAG_logfile, '%') != NULL ||
- !Isolate::Current()->IsDefaultIsolate()) {
- // If there's a '%' in the log file name we have to expand
- // placeholders.
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- AddIsolateIdIfNeeded(&stream);
- for (const char* p = FLAG_logfile; *p; p++) {
- if (*p == '%') {
- p++;
- switch (*p) {
- case '\0':
- // If there's a % at the end of the string we back up
- // one character so we can escape the loop properly.
- p--;
- break;
- case 't': {
- // %t expands to the current time in milliseconds.
- double time = OS::TimeCurrentMillis();
- stream.Add("%.0f", FmtElm(time));
- break;
- }
- case '%':
- // %% expands (contracts really) to %.
- stream.Put('%');
- break;
- default:
- // All other %'s expand to themselves.
- stream.Put('%');
- stream.Put(*p);
- break;
- }
- } else {
- stream.Put(*p);
- }
- }
- SmartArrayPointer<const char> expanded = stream.ToCString();
- OpenFile(*expanded);
- } else {
- OpenFile(FLAG_logfile);
- }
- }
- }
-}
-
-
-void Log::OpenStdout() {
- ASSERT(!IsEnabled());
- output_handle_ = stdout;
-}
-
-
-void Log::OpenTemporaryFile() {
- ASSERT(!IsEnabled());
- output_handle_ = i::OS::OpenTemporaryFile();
-}
-
-
-// Extension added to V8 log file name to get the low-level log name.
-static const char kLowLevelLogExt[] = ".ll";
-
-// File buffer size of the low-level log. We don't use the default to
-// minimize the associated overhead.
-static const int kLowLevelLogBufferSize = 2 * MB;
-
-
-void Log::OpenFile(const char* name) {
- ASSERT(!IsEnabled());
- output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
- if (FLAG_ll_prof) {
- // Open the low-level log file.
- size_t len = strlen(name);
- ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLowLevelLogExt)));
- memcpy(ll_name.start(), name, len);
- memcpy(ll_name.start() + len, kLowLevelLogExt, sizeof(kLowLevelLogExt));
- ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
- setvbuf(ll_output_handle_, NULL, _IOFBF, kLowLevelLogBufferSize);
- }
-}
-
-
-FILE* Log::Close() {
- FILE* result = NULL;
- if (output_handle_ != NULL) {
- if (strcmp(FLAG_logfile, kLogToTemporaryFile) != 0) {
- fclose(output_handle_);
- } else {
- result = output_handle_;
- }
- }
- output_handle_ = NULL;
- if (ll_output_handle_ != NULL) fclose(ll_output_handle_);
- ll_output_handle_ = NULL;
-
- DeleteArray(message_buffer_);
- message_buffer_ = NULL;
-
- delete mutex_;
- mutex_ = NULL;
-
- is_stopped_ = false;
- return result;
-}
-
-
-LogMessageBuilder::LogMessageBuilder(Logger* logger)
- : log_(logger->log_),
- sl(log_->mutex_),
- pos_(0) {
- ASSERT(log_->message_buffer_ != NULL);
-}
-
-
-void LogMessageBuilder::Append(const char* format, ...) {
- Vector<char> buf(log_->message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- va_list args;
- va_start(args, format);
- AppendVA(format, args);
- va_end(args);
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-void LogMessageBuilder::AppendVA(const char* format, va_list args) {
- Vector<char> buf(log_->message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
-
- // Result is -1 if output was truncated.
- if (result >= 0) {
- pos_ += result;
- } else {
- pos_ = Log::kMessageBufferSize;
- }
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-void LogMessageBuilder::Append(const char c) {
- if (pos_ < Log::kMessageBufferSize) {
- log_->message_buffer_[pos_++] = c;
- }
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-void LogMessageBuilder::Append(String* str) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
- int length = str->length();
- for (int i = 0; i < length; i++) {
- Append(static_cast<char>(str->Get(i)));
- }
-}
-
-
-void LogMessageBuilder::AppendAddress(Address addr) {
- Append("0x%" V8PRIxPTR, addr);
-}
-
-
-void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
- if (str == NULL) return;
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
- int len = str->length();
- if (len > 0x1000)
- len = 0x1000;
- if (show_impl_info) {
- Append(str->IsOneByteRepresentation() ? 'a' : '2');
- if (StringShape(str).IsExternal())
- Append('e');
- if (StringShape(str).IsInternalized())
- Append('#');
- Append(":%i:", str->length());
- }
- for (int i = 0; i < len; i++) {
- uc32 c = str->Get(i);
- if (c > 0xff) {
- Append("\\u%04x", c);
- } else if (c < 32 || c > 126) {
- Append("\\x%02x", c);
- } else if (c == ',') {
- Append("\\,");
- } else if (c == '\\') {
- Append("\\\\");
- } else if (c == '\"') {
- Append("\"\"");
- } else {
- Append("%lc", c);
- }
- }
-}
-
-
-void LogMessageBuilder::AppendStringPart(const char* str, int len) {
- if (pos_ + len > Log::kMessageBufferSize) {
- len = Log::kMessageBufferSize - pos_;
- ASSERT(len >= 0);
- if (len == 0) return;
- }
- Vector<char> buf(log_->message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- OS::StrNCpy(buf, str, len);
- pos_ += len;
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-void LogMessageBuilder::WriteToLogFile() {
- ASSERT(pos_ <= Log::kMessageBufferSize);
- const int written = log_->WriteToFile(log_->message_buffer_, pos_);
- if (written != pos_) {
- log_->stop();
- log_->logger_->LogFailure();
- }
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/log-utils.h b/src/3rdparty/v8/src/log-utils.h
deleted file mode 100644
index d0cb828..0000000
--- a/src/3rdparty/v8/src/log-utils.h
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LOG_UTILS_H_
-#define V8_LOG_UTILS_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-class Logger;
-
-// Functions and data for performing output of log messages.
-class Log {
- public:
- // Performs process-wide initialization.
- void Initialize();
-
- // Disables logging, but preserves acquired resources.
- void stop() { is_stopped_ = true; }
-
- // Frees all resources acquired in Initialize and Open... functions.
- // When a temporary file is used for the log, returns its stream descriptor,
- // leaving the file open.
- FILE* Close();
-
- // Returns whether logging is enabled.
- bool IsEnabled() {
- return !is_stopped_ && output_handle_ != NULL;
- }
-
- // Size of buffer used for formatting log messages.
- static const int kMessageBufferSize = 2048;
-
- // This mode is only used in tests, as temporary files are automatically
- // deleted on close and thus can't be accessed afterwards.
- static const char* const kLogToTemporaryFile;
-
- private:
- explicit Log(Logger* logger);
-
- // Opens stdout for logging.
- void OpenStdout();
-
- // Opens file for logging.
- void OpenFile(const char* name);
-
- // Opens a temporary file for logging.
- void OpenTemporaryFile();
-
- // Implementation of writing to a log file.
- int WriteToFile(const char* msg, int length) {
- ASSERT(output_handle_ != NULL);
- size_t rv = fwrite(msg, 1, length, output_handle_);
- ASSERT(static_cast<size_t>(length) == rv);
- USE(rv);
- fflush(output_handle_);
- return length;
- }
-
- // Whether logging is stopped (e.g. due to insufficient resources).
- bool is_stopped_;
-
- // When logging is active output_handle_ is used to store a pointer to log
- // destination. mutex_ should be acquired before using output_handle_.
- FILE* output_handle_;
-
- // Used when low-level profiling is active.
- FILE* ll_output_handle_;
-
- // mutex_ is a Mutex used for enforcing exclusive
- // access to the formatting buffer and the log file or log memory buffer.
- Mutex* mutex_;
-
- // Buffer used for formatting log messages. This is a singleton buffer and
- // mutex_ should be acquired before using it.
- char* message_buffer_;
-
- Logger* logger_;
-
- friend class Logger;
- friend class LogMessageBuilder;
-};
-
-
-// Utility class for formatting log messages. It fills the message into the
-// static buffer in Log.
-class LogMessageBuilder BASE_EMBEDDED {
- public:
- // Create a message builder starting from position 0. This acquires the mutex
- // in the log as well.
- explicit LogMessageBuilder(Logger* logger);
- ~LogMessageBuilder() { }
-
- // Append string data to the log message.
- void Append(const char* format, ...);
-
- // Append string data to the log message.
- void AppendVA(const char* format, va_list args);
-
- // Append a character to the log message.
- void Append(const char c);
-
- // Append a heap string.
- void Append(String* str);
-
- // Appends an address.
- void AppendAddress(Address addr);
-
- void AppendDetailed(String* str, bool show_impl_info);
-
- // Append a portion of a string.
- void AppendStringPart(const char* str, int len);
-
- // Write the log message to the log file currently opened.
- void WriteToLogFile();
-
- private:
- Log* log_;
- ScopedLock sl;
- int pos_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_LOG_UTILS_H_
diff --git a/src/3rdparty/v8/src/log.cc b/src/3rdparty/v8/src/log.cc
deleted file mode 100644
index 2ed0141..0000000
--- a/src/3rdparty/v8/src/log.cc
+++ /dev/null
@@ -1,1912 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "deoptimizer.h"
-#include "global-handles.h"
-#include "log.h"
-#include "macro-assembler.h"
-#include "platform.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-#include "string-stream.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// The Profiler samples pc and sp values for the main thread.
-// Each sample is appended to a circular buffer.
-// An independent thread removes data and writes it to the log.
-// This design minimizes the time spent in the sampler.
-//
-class Profiler: public Thread {
- public:
- explicit Profiler(Isolate* isolate);
- void Engage();
- void Disengage();
-
- // Inserts collected profiling data into buffer.
- void Insert(TickSample* sample) {
- if (paused_)
- return;
-
- if (Succ(head_) == tail_) {
- overflow_ = true;
- } else {
- buffer_[head_] = *sample;
- head_ = Succ(head_);
- buffer_semaphore_->Signal(); // Tell we have an element.
- }
- }
-
- // Waits for a signal and removes profiling data.
- bool Remove(TickSample* sample) {
- buffer_semaphore_->Wait(); // Wait for an element.
- *sample = buffer_[tail_];
- bool result = overflow_;
- tail_ = Succ(tail_);
- overflow_ = false;
- return result;
- }
-
- void Run();
-
- // Pause and Resume TickSample data collection.
- bool paused() const { return paused_; }
- void pause() { paused_ = true; }
- void resume() { paused_ = false; }
-
- private:
- // Returns the next index in the cyclic buffer.
- int Succ(int index) { return (index + 1) % kBufferSize; }
-
- Isolate* isolate_;
- // Cyclic buffer for communicating profiling samples
- // between the signal handler and the worker thread.
- static const int kBufferSize = 128;
- TickSample buffer_[kBufferSize]; // Buffer storage.
- int head_; // Index to the buffer head.
- int tail_; // Index to the buffer tail.
- bool overflow_; // Tell whether a buffer overflow has occurred.
- Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization.
-
- // Tells whether profiler is engaged, that is, processing thread is stated.
- bool engaged_;
-
- // Tells whether worker thread should continue running.
- bool running_;
-
- // Tells whether we are currently recording tick samples.
- bool paused_;
-};
-
-
-//
-// StackTracer implementation
-//
-DISABLE_ASAN void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
- ASSERT(isolate->IsInitialized());
-
- // Avoid collecting traces while doing GC.
- if (sample->state == GC) return;
-
- const Address js_entry_sp =
- Isolate::js_entry_sp(isolate->thread_local_top());
- if (js_entry_sp == 0) {
- // Not executing JS now.
- return;
- }
-
- const Address callback = isolate->external_callback();
- if (callback != NULL) {
- sample->external_callback = callback;
- sample->has_external_callback = true;
- } else {
- // Sample potential return address value for frameless invocation of
- // stubs (we'll figure out later, if this value makes sense).
- sample->tos = Memory::Address_at(sample->sp);
- sample->has_external_callback = false;
- }
-
- SafeStackTraceFrameIterator it(isolate,
- sample->fp, sample->sp,
- sample->sp, js_entry_sp);
- int i = 0;
- while (!it.done() && i < TickSample::kMaxFramesCount) {
- sample->stack[i++] = it.frame()->pc();
- it.Advance();
- }
- sample->frames_count = i;
-}
-
-
-//
-// Ticker used to provide ticks to the profiler and the sliding state
-// window.
-//
-class Ticker: public Sampler {
- public:
- Ticker(Isolate* isolate, int interval):
- Sampler(isolate, interval),
- profiler_(NULL) {}
-
- ~Ticker() { if (IsActive()) Stop(); }
-
- virtual void Tick(TickSample* sample) {
- if (profiler_) profiler_->Insert(sample);
- }
-
- void SetProfiler(Profiler* profiler) {
- ASSERT(profiler_ == NULL);
- profiler_ = profiler;
- IncreaseProfilingDepth();
- if (!FLAG_prof_lazy && !IsActive()) Start();
- }
-
- void ClearProfiler() {
- DecreaseProfilingDepth();
- profiler_ = NULL;
- if (IsActive()) Stop();
- }
-
- protected:
- virtual void DoSampleStack(TickSample* sample) {
- StackTracer::Trace(isolate(), sample);
- }
-
- private:
- Profiler* profiler_;
-};
-
-
-//
-// Profiler implementation.
-//
-Profiler::Profiler(Isolate* isolate)
- : Thread("v8:Profiler"),
- isolate_(isolate),
- head_(0),
- tail_(0),
- overflow_(false),
- buffer_semaphore_(OS::CreateSemaphore(0)),
- engaged_(false),
- running_(false),
- paused_(false) {
-}
-
-
-void Profiler::Engage() {
- if (engaged_) return;
- engaged_ = true;
-
- OS::LogSharedLibraryAddresses();
-
- // Start thread processing the profiler buffer.
- running_ = true;
- Start();
-
- // Register to get ticks.
- LOGGER->ticker_->SetProfiler(this);
-
- LOGGER->ProfilerBeginEvent();
-}
-
-
-void Profiler::Disengage() {
- if (!engaged_) return;
-
- // Stop receiving ticks.
- LOGGER->ticker_->ClearProfiler();
-
- // Terminate the worker thread by setting running_ to false,
- // inserting a fake element in the queue and then wait for
- // the thread to terminate.
- running_ = false;
- TickSample sample;
- // Reset 'paused_' flag, otherwise semaphore may not be signalled.
- resume();
- Insert(&sample);
- Join();
-
- LOG(ISOLATE, UncheckedStringEvent("profiler", "end"));
-}
-
-
-void Profiler::Run() {
- TickSample sample;
- bool overflow = Remove(&sample);
- while (running_) {
- LOG(isolate_, TickEvent(&sample, overflow));
- overflow = Remove(&sample);
- }
-}
-
-
-// Low-level profiling event structures.
-
-struct LowLevelCodeCreateStruct {
- static const char kTag = 'C';
-
- int32_t name_size;
- Address code_address;
- int32_t code_size;
-};
-
-
-struct LowLevelCodeMoveStruct {
- static const char kTag = 'M';
-
- Address from_address;
- Address to_address;
-};
-
-
-struct LowLevelCodeDeleteStruct {
- static const char kTag = 'D';
-
- Address address;
-};
-
-
-struct LowLevelSnapshotPositionStruct {
- static const char kTag = 'P';
-
- Address address;
- int32_t position;
-};
-
-
-static const char kCodeMovingGCTag = 'G';
-
-
-//
-// Logger class implementation.
-//
-
-class Logger::NameMap {
- public:
- NameMap() : impl_(&PointerEquals) {}
-
- ~NameMap() {
- for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
- DeleteArray(static_cast<const char*>(p->value));
- }
- }
-
- void Insert(Address code_address, const char* name, int name_size) {
- HashMap::Entry* entry = FindOrCreateEntry(code_address);
- if (entry->value == NULL) {
- entry->value = CopyName(name, name_size);
- }
- }
-
- const char* Lookup(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
- return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
- }
-
- void Remove(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
- if (entry != NULL) {
- DeleteArray(static_cast<char*>(entry->value));
- RemoveEntry(entry);
- }
- }
-
- void Move(Address from, Address to) {
- if (from == to) return;
- HashMap::Entry* from_entry = FindEntry(from);
- ASSERT(from_entry != NULL);
- void* value = from_entry->value;
- RemoveEntry(from_entry);
- HashMap::Entry* to_entry = FindOrCreateEntry(to);
- ASSERT(to_entry->value == NULL);
- to_entry->value = value;
- }
-
- private:
- static bool PointerEquals(void* lhs, void* rhs) {
- return lhs == rhs;
- }
-
- static char* CopyName(const char* name, int name_size) {
- char* result = NewArray<char>(name_size + 1);
- for (int i = 0; i < name_size; ++i) {
- char c = name[i];
- if (c == '\0') c = ' ';
- result[i] = c;
- }
- result[name_size] = '\0';
- return result;
- }
-
- HashMap::Entry* FindOrCreateEntry(Address code_address) {
- return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
- }
-
- HashMap::Entry* FindEntry(Address code_address) {
- return impl_.Lookup(code_address, ComputePointerHash(code_address), false);
- }
-
- void RemoveEntry(HashMap::Entry* entry) {
- impl_.Remove(entry->key, entry->hash);
- }
-
- HashMap impl_;
-
- DISALLOW_COPY_AND_ASSIGN(NameMap);
-};
-
-
-class Logger::NameBuffer {
- public:
- NameBuffer() { Reset(); }
-
- void Reset() {
- utf8_pos_ = 0;
- }
-
- void AppendString(String* str) {
- if (str == NULL) return;
- if (str->HasOnlyAsciiChars()) {
- int utf8_length = Min(str->length(), kUtf8BufferSize - utf8_pos_);
- String::WriteToFlat(str,
- reinterpret_cast<uint8_t*>(utf8_buffer_ + utf8_pos_),
- 0,
- utf8_length);
- utf8_pos_ += utf8_length;
- return;
- }
- int uc16_length = Min(str->length(), kUtf16BufferSize);
- String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
- uc16 c = utf16_buffer[i];
- if (c <= unibrow::Utf8::kMaxOneByteChar) {
- utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
- } else {
- int char_length = unibrow::Utf8::Length(c, previous);
- if (utf8_pos_ + char_length > kUtf8BufferSize) break;
- unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
- utf8_pos_ += char_length;
- }
- previous = c;
- }
- }
-
- void AppendBytes(const char* bytes, int size) {
- size = Min(size, kUtf8BufferSize - utf8_pos_);
- memcpy(utf8_buffer_ + utf8_pos_, bytes, size);
- utf8_pos_ += size;
- }
-
- void AppendBytes(const char* bytes) {
- AppendBytes(bytes, StrLength(bytes));
- }
-
- void AppendByte(char c) {
- if (utf8_pos_ >= kUtf8BufferSize) return;
- utf8_buffer_[utf8_pos_++] = c;
- }
-
- void AppendInt(int n) {
- Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_);
- int size = OS::SNPrintF(buffer, "%d", n);
- if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
- utf8_pos_ += size;
- }
- }
-
- const char* get() { return utf8_buffer_; }
- int size() const { return utf8_pos_; }
-
- private:
- static const int kUtf8BufferSize = 512;
- static const int kUtf16BufferSize = 128;
-
- int utf8_pos_;
- char utf8_buffer_[kUtf8BufferSize];
- uc16 utf16_buffer[kUtf16BufferSize];
-};
-
-
-Logger::Logger(Isolate* isolate)
- : isolate_(isolate),
- ticker_(NULL),
- profiler_(NULL),
- log_events_(NULL),
- logging_nesting_(0),
- cpu_profiler_nesting_(0),
- log_(new Log(this)),
- name_buffer_(new NameBuffer),
- address_to_name_map_(NULL),
- is_initialized_(false),
- code_event_handler_(NULL),
- last_address_(NULL),
- prev_sp_(NULL),
- prev_function_(NULL),
- prev_to_(NULL),
- prev_code_(NULL),
- epoch_(0) {
-}
-
-
-Logger::~Logger() {
- delete address_to_name_map_;
- delete name_buffer_;
- delete log_;
-}
-
-
-void Logger::IssueCodeAddedEvent(Code* code,
- Script* script,
- const char* name,
- size_t name_len) {
- JitCodeEvent event;
- memset(&event, 0, sizeof(event));
- event.type = JitCodeEvent::CODE_ADDED;
- event.code_start = code->instruction_start();
- event.code_len = code->instruction_size();
- Handle<Script> script_handle =
- script != NULL ? Handle<Script>(script) : Handle<Script>();
- event.script = v8::Handle<v8::Script>(ToApi<v8::Script>(script_handle));
- event.name.str = name;
- event.name.len = name_len;
-
- code_event_handler_(&event);
-}
-
-
-void Logger::IssueCodeMovedEvent(Address from, Address to) {
- Code* from_code = Code::cast(HeapObject::FromAddress(from));
-
- JitCodeEvent event;
- event.type = JitCodeEvent::CODE_MOVED;
- event.code_start = from_code->instruction_start();
- event.code_len = from_code->instruction_size();
-
- // Calculate the header size.
- const size_t header_size =
- from_code->instruction_start() - reinterpret_cast<byte*>(from_code);
-
- // Calculate the new start address of the instructions.
- event.new_code_start =
- reinterpret_cast<byte*>(HeapObject::FromAddress(to)) + header_size;
-
- code_event_handler_(&event);
-}
-
-
-void Logger::IssueCodeRemovedEvent(Address from) {
- Code* from_code = Code::cast(HeapObject::FromAddress(from));
-
- JitCodeEvent event;
- event.type = JitCodeEvent::CODE_REMOVED;
- event.code_start = from_code->instruction_start();
- event.code_len = from_code->instruction_size();
-
- code_event_handler_(&event);
-}
-
-void Logger::IssueAddCodeLinePosInfoEvent(
- void* jit_handler_data,
- int pc_offset,
- int position,
- JitCodeEvent::PositionType position_type) {
- JitCodeEvent event;
- memset(&event, 0, sizeof(event));
- event.type = JitCodeEvent::CODE_ADD_LINE_POS_INFO;
- event.user_data = jit_handler_data;
- event.line_info.offset = pc_offset;
- event.line_info.pos = position;
- event.line_info.position_type = position_type;
-
- code_event_handler_(&event);
-}
-
-void* Logger::IssueStartCodePosInfoEvent() {
- JitCodeEvent event;
- memset(&event, 0, sizeof(event));
- event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING;
-
- code_event_handler_(&event);
- return event.user_data;
-}
-
-void Logger::IssueEndCodePosInfoEvent(Code* code, void* jit_handler_data) {
- JitCodeEvent event;
- memset(&event, 0, sizeof(event));
- event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
- event.code_start = code->instruction_start();
- event.user_data = jit_handler_data;
-
- code_event_handler_(&event);
-}
-
-#define DECLARE_EVENT(ignore1, name) name,
-static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
-};
-#undef DECLARE_EVENT
-
-
-void Logger::ProfilerBeginEvent() {
- if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
- msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
- msg.WriteToLogFile();
-}
-
-
-void Logger::StringEvent(const char* name, const char* value) {
- if (FLAG_log) UncheckedStringEvent(name, value);
-}
-
-
-void Logger::UncheckedStringEvent(const char* name, const char* value) {
- if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,\"%s\"\n", name, value);
- msg.WriteToLogFile();
-}
-
-
-void Logger::IntEvent(const char* name, int value) {
- if (FLAG_log) UncheckedIntEvent(name, value);
-}
-
-
-void Logger::IntPtrTEvent(const char* name, intptr_t value) {
- if (FLAG_log) UncheckedIntPtrTEvent(name, value);
-}
-
-
-void Logger::UncheckedIntEvent(const char* name, int value) {
- if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%d\n", name, value);
- msg.WriteToLogFile();
-}
-
-
-void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
- if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
- msg.WriteToLogFile();
-}
-
-
-void Logger::HandleEvent(const char* name, Object** location) {
- if (!log_->IsEnabled() || !FLAG_log_handles) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
- msg.WriteToLogFile();
-}
-
-
-// ApiEvent is private so all the calls come from the Logger class. It is the
-// caller's responsibility to ensure that log is enabled and that
-// FLAG_log_api is true.
-void Logger::ApiEvent(const char* format, ...) {
- ASSERT(log_->IsEnabled() && FLAG_log_api);
- LogMessageBuilder msg(this);
- va_list ap;
- va_start(ap, format);
- msg.AppendVA(format, ap);
- va_end(ap);
- msg.WriteToLogFile();
-}
-
-
-void Logger::ApiNamedSecurityCheck(Object* key) {
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- if (key->IsString()) {
- SmartArrayPointer<char> str =
- String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,check-security,\"%s\"\n", *str);
- } else if (key->IsUndefined()) {
- ApiEvent("api,check-security,undefined\n");
- } else {
- ApiEvent("api,check-security,['no-name']\n");
- }
-}
-
-
-void Logger::SharedLibraryEvent(const char* library_path,
- uintptr_t start,
- uintptr_t end) {
- if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
- msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
- library_path,
- start,
- end);
- msg.WriteToLogFile();
-}
-
-
-void Logger::SharedLibraryEvent(const wchar_t* library_path,
- uintptr_t start,
- uintptr_t end) {
- if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
- msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
- library_path,
- start,
- end);
- msg.WriteToLogFile();
-}
-
-
-void Logger::TimerEvent(StartEnd se, const char* name) {
- if (!log_->IsEnabled()) return;
- ASSERT(FLAG_log_internal_timer_events);
- LogMessageBuilder msg(this);
- int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
- const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n"
- : "timer-event-end,\"%s\",%ld\n";
- msg.Append(format, name, since_epoch);
- msg.WriteToLogFile();
-}
-
-
-void Logger::EnterExternal() {
- LOG(ISOLATE, TimerEvent(START, TimerEventScope::v8_external));
-}
-
-
-void Logger::LeaveExternal() {
- LOG(ISOLATE, TimerEvent(END, TimerEventScope::v8_external));
-}
-
-
-void Logger::TimerEventScope::LogTimerEvent(StartEnd se) {
- LOG(isolate_, TimerEvent(se, name_));
-}
-
-
-const char* Logger::TimerEventScope::v8_recompile_synchronous =
- "V8.RecompileSynchronous";
-const char* Logger::TimerEventScope::v8_recompile_parallel =
- "V8.RecompileParallel";
-const char* Logger::TimerEventScope::v8_compile_full_code =
- "V8.CompileFullCode";
-const char* Logger::TimerEventScope::v8_execute = "V8.Execute";
-const char* Logger::TimerEventScope::v8_external = "V8.External";
-
-
-void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
- // Prints "/" + re.source + "/" +
- // (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
- LogMessageBuilder msg(this);
-
- Handle<Object> source = GetProperty(regexp, "source");
- if (!source->IsString()) {
- msg.Append("no source");
- return;
- }
-
- switch (regexp->TypeTag()) {
- case JSRegExp::ATOM:
- msg.Append('a');
- break;
- default:
- break;
- }
- msg.Append('/');
- msg.AppendDetailed(*Handle<String>::cast(source), false);
- msg.Append('/');
-
- // global flag
- Handle<Object> global = GetProperty(regexp, "global");
- if (global->IsTrue()) {
- msg.Append('g');
- }
- // ignorecase flag
- Handle<Object> ignorecase = GetProperty(regexp, "ignoreCase");
- if (ignorecase->IsTrue()) {
- msg.Append('i');
- }
- // multiline flag
- Handle<Object> multiline = GetProperty(regexp, "multiline");
- if (multiline->IsTrue()) {
- msg.Append('m');
- }
-
- msg.WriteToLogFile();
-}
-
-
-void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
- if (!log_->IsEnabled() || !FLAG_log_regexp) return;
- LogMessageBuilder msg(this);
- msg.Append("regexp-compile,");
- LogRegExpSource(regexp);
- msg.Append(in_cache ? ",hit\n" : ",miss\n");
- msg.WriteToLogFile();
-}
-
-
-void Logger::LogRuntime(Isolate* isolate,
- Vector<const char> format,
- JSArray* args) {
- if (!log_->IsEnabled() || !FLAG_log_runtime) return;
- HandleScope scope(isolate);
- LogMessageBuilder msg(this);
- for (int i = 0; i < format.length(); i++) {
- char c = format[i];
- if (c == '%' && i <= format.length() - 2) {
- i++;
- ASSERT('0' <= format[i] && format[i] <= '9');
- MaybeObject* maybe = args->GetElement(format[i] - '0');
- Object* obj;
- if (!maybe->ToObject(&obj)) {
- msg.Append("<exception>");
- continue;
- }
- i++;
- switch (format[i]) {
- case 's':
- msg.AppendDetailed(String::cast(obj), false);
- break;
- case 'S':
- msg.AppendDetailed(String::cast(obj), true);
- break;
- case 'r':
- Logger::LogRegExpSource(Handle<JSRegExp>(JSRegExp::cast(obj)));
- break;
- case 'x':
- msg.Append("0x%x", Smi::cast(obj)->value());
- break;
- case 'i':
- msg.Append("%i", Smi::cast(obj)->value());
- break;
- default:
- UNREACHABLE();
- }
- } else {
- msg.Append(c);
- }
- }
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::ApiIndexedSecurityCheck(uint32_t index) {
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- ApiEvent("api,check-security,%u\n", index);
-}
-
-
-void Logger::ApiNamedPropertyAccess(const char* tag,
- JSObject* holder,
- Object* name) {
- ASSERT(name->IsString());
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- String* class_name_obj = holder->class_name();
- SmartArrayPointer<char> class_name =
- class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- SmartArrayPointer<char> property_name =
- String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
-}
-
-void Logger::ApiIndexedPropertyAccess(const char* tag,
- JSObject* holder,
- uint32_t index) {
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- String* class_name_obj = holder->class_name();
- SmartArrayPointer<char> class_name =
- class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
-}
-
-void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- String* class_name_obj = object->class_name();
- SmartArrayPointer<char> class_name =
- class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
-}
-
-
-void Logger::ApiEntryCall(const char* name) {
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- ApiEvent("api,%s\n", name);
-}
-
-
-void Logger::NewEvent(const char* name, void* object, size_t size) {
- if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
- msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
- static_cast<unsigned int>(size));
- msg.WriteToLogFile();
-}
-
-
-void Logger::DeleteEvent(const char* name, void* object) {
- if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
- msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
- msg.WriteToLogFile();
-}
-
-
-void Logger::NewEventStatic(const char* name, void* object, size_t size) {
- LOGGER->NewEvent(name, object, size);
-}
-
-
-void Logger::DeleteEventStatic(const char* name, void* object) {
- LOGGER->DeleteEvent(name, object);
-}
-
-void Logger::CallbackEventInternal(const char* prefix, const char* name,
- Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,-3,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[CALLBACK_TAG]);
- msg.AppendAddress(entry_point);
- msg.Append(",1,\"%s%s\"", prefix, name);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::CallbackEvent(String* name, Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- SmartArrayPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CallbackEventInternal("", *str, entry_point);
-}
-
-
-void Logger::GetterCallbackEvent(String* name, Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- SmartArrayPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CallbackEventInternal("get ", *str, entry_point);
-}
-
-
-void Logger::SetterCallbackEvent(String* name, Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- SmartArrayPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CallbackEventInternal("set ", *str, entry_point);
-}
-
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- const char* comment) {
- if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendBytes(comment);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
- for (const char* p = comment; *p != '\0'; p++) {
- if (*p == '"') {
- msg.Append('\\');
- }
- msg.Append(*p);
- }
- msg.Append('"');
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- String* name) {
- if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendString(name);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
- msg.AppendDetailed(name, false);
- msg.Append('"');
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-// ComputeMarker must only be used when SharedFunctionInfo is known.
-static const char* ComputeMarker(Code* code) {
- switch (code->kind()) {
- case Code::FUNCTION: return code->optimizable() ? "~" : "";
- case Code::OPTIMIZED_FUNCTION: return "*";
- default: return "";
- }
-}
-
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name) {
- if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendBytes(ComputeMarker(code));
- name_buffer_->AppendString(name);
- }
- if (code_event_handler_ != NULL) {
- Script* script =
- shared->script()->IsScript() ? Script::cast(shared->script()) : NULL;
- IssueCodeAddedEvent(code,
- script,
- name_buffer_->get(),
- name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- if (code == Isolate::Current()->builtins()->builtin(
- Builtins::kLazyCompile))
- return;
-
- LogMessageBuilder msg(this);
- SmartArrayPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
- msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(code));
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-// Although, it is possible to extract source and line from
-// the SharedFunctionInfo object, we left it to caller
-// to leave logging functions free from heap allocations.
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line) {
- if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendBytes(ComputeMarker(code));
- name_buffer_->AppendString(shared->DebugName());
- name_buffer_->AppendByte(' ');
- name_buffer_->AppendString(source);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendInt(line);
- }
- if (code_event_handler_ != NULL) {
- Script* script =
- shared->script()->IsScript() ? Script::cast(shared->script()) : NULL;
- IssueCodeAddedEvent(code,
- script,
- name_buffer_->get(),
- name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- SmartArrayPointer<char> name =
- shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- SmartArrayPointer<char> sourcestr =
- source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s %s:%d\",",
- code->ExecutableSize(),
- *name,
- *sourcestr,
- line);
- msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(code));
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
- if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendInt(args_count);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::CodeMovingGCEvent() {
- if (!log_->IsEnabled() || !FLAG_ll_prof) return;
- LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag));
- OS::SignalCodeMovingGC();
-}
-
-
-void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
- if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendString(source);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,-2,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[REG_EXP_TAG]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
- msg.AppendDetailed(source, false);
- msg.Append('\"');
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::CodeMoveEvent(Address from, Address to) {
- if (code_event_handler_ != NULL) IssueCodeMovedEvent(from, to);
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
- if (Serializer::enabled() && address_to_name_map_ != NULL) {
- address_to_name_map_->Move(from, to);
- }
- MoveEventInternal(CODE_MOVE_EVENT, from, to);
-}
-
-
-void Logger::CodeDeleteEvent(Address from) {
- if (code_event_handler_ != NULL) IssueCodeRemovedEvent(from);
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
- if (Serializer::enabled() && address_to_name_map_ != NULL) {
- address_to_name_map_->Remove(from);
- }
- DeleteEventInternal(CODE_DELETE_EVENT, from);
-}
-
-void Logger::CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
- int pc_offset,
- int position) {
- if (code_event_handler_ != NULL) {
- IssueAddCodeLinePosInfoEvent(jit_handler_data,
- pc_offset,
- position,
- JitCodeEvent::POSITION);
- }
-}
-
-void Logger::CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data,
- int pc_offset,
- int position) {
- if (code_event_handler_ != NULL) {
- IssueAddCodeLinePosInfoEvent(jit_handler_data,
- pc_offset,
- position,
- JitCodeEvent::STATEMENT_POSITION);
- }
-}
-
-void Logger::CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder) {
- if (code_event_handler_ != NULL) {
- pos_recorder->AttachJITHandlerData(IssueStartCodePosInfoEvent());
- }
-}
-
-void Logger::CodeEndLinePosInfoRecordEvent(Code* code,
- void* jit_handler_data) {
- if (code_event_handler_ != NULL) {
- IssueEndCodePosInfoEvent(code, jit_handler_data);
- }
-}
-
-void Logger::SnapshotPositionEvent(Address addr, int pos) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos);
- if (Serializer::enabled() && address_to_name_map_ != NULL) {
- const char* code_name = address_to_name_map_->Lookup(addr);
- if (code_name == NULL) return; // Not a code object.
- LogMessageBuilder msg(this);
- msg.Append("%s,%d,\"", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
- for (const char* p = code_name; *p != '\0'; ++p) {
- if (*p == '"') msg.Append('\\');
- msg.Append(*p);
- }
- msg.Append("\"\n");
- msg.WriteToLogFile();
- }
- if (!FLAG_log_snapshot_positions) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
- msg.AppendAddress(addr);
- msg.Append(",%d", pos);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
- MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
-}
-
-
-void Logger::MoveEventInternal(LogEventsAndTags event,
- Address from,
- Address to) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[event]);
- msg.AppendAddress(from);
- msg.Append(',');
- msg.AppendAddress(to);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[event]);
- msg.AppendAddress(from);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::ResourceEvent(const char* name, const char* tag) {
- if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,", name, tag);
-
- uint32_t sec, usec;
- if (OS::GetUserTime(&sec, &usec) != -1) {
- msg.Append("%d,%d,", sec, usec);
- }
- msg.Append("%.0f", OS::TimeCurrentMillis());
-
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::SuspectReadEvent(String* name, Object* obj) {
- if (!log_->IsEnabled() || !FLAG_log_suspect) return;
- LogMessageBuilder msg(this);
- String* class_name = obj->IsJSObject()
- ? JSObject::cast(obj)->class_name()
- : isolate_->heap()->empty_string();
- msg.Append("suspect-read,");
- msg.Append(class_name);
- msg.Append(',');
- msg.Append('"');
- msg.Append(name);
- msg.Append('"');
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- // Using non-relative system time in order to be able to synchronize with
- // external memory profiling events (e.g. DOM memory size).
- msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
- space, kind, OS::TimeCurrentMillis());
- msg.WriteToLogFile();
-}
-
-
-void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
- msg.WriteToLogFile();
-}
-
-
-void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
- msg.WriteToLogFile();
-}
-
-
-void Logger::DebugTag(const char* call_site_tag) {
- if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
- msg.Append("debug-tag,%s\n", call_site_tag);
- msg.WriteToLogFile();
-}
-
-
-void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
- if (!log_->IsEnabled() || !FLAG_log) return;
- StringBuilder s(parameter.length() + 1);
- for (int i = 0; i < parameter.length(); ++i) {
- s.AddCharacter(static_cast<char>(parameter[i]));
- }
- char* parameter_string = s.Finalize();
- LogMessageBuilder msg(this);
- msg.Append("debug-queue-event,%s,%15.3f,%s\n",
- event_type,
- OS::TimeCurrentMillis(),
- parameter_string);
- DeleteArray(parameter_string);
- msg.WriteToLogFile();
-}
-
-
-void Logger::TickEvent(TickSample* sample, bool overflow) {
- if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
- msg.AppendAddress(sample->pc);
- msg.Append(',');
- msg.AppendAddress(sample->sp);
- msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
- if (sample->has_external_callback) {
- msg.Append(",1,");
- msg.AppendAddress(sample->external_callback);
- } else {
- msg.Append(",0,");
- msg.AppendAddress(sample->tos);
- }
- msg.Append(",%d", static_cast<int>(sample->state));
- if (overflow) {
- msg.Append(",overflow");
- }
- for (int i = 0; i < sample->frames_count; ++i) {
- msg.Append(',');
- msg.AppendAddress(sample->stack[i]);
- }
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
-bool Logger::IsProfilerPaused() {
- return profiler_ == NULL || profiler_->paused();
-}
-
-
-void Logger::PauseProfiler() {
- if (!log_->IsEnabled()) return;
- if (profiler_ != NULL) {
- // It is OK to have negative nesting.
- if (--cpu_profiler_nesting_ == 0) {
- profiler_->pause();
- if (FLAG_prof_lazy) {
- ticker_->Stop();
- FLAG_log_code = false;
- LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
- }
- --logging_nesting_;
- }
- }
-}
-
-
-void Logger::ResumeProfiler() {
- if (!log_->IsEnabled()) return;
- if (profiler_ != NULL) {
- if (cpu_profiler_nesting_++ == 0) {
- ++logging_nesting_;
- if (FLAG_prof_lazy) {
- profiler_->Engage();
- LOG(ISOLATE, UncheckedStringEvent("profiler", "resume"));
- FLAG_log_code = true;
- LogCompiledFunctions();
- LogAccessorCallbacks();
- if (!ticker_->IsActive()) ticker_->Start();
- }
- profiler_->resume();
- }
- }
-}
-
-
-// This function can be called when Log's mutex is acquired,
-// either from main or Profiler's thread.
-void Logger::LogFailure() {
- PauseProfiler();
-}
-
-
-bool Logger::IsProfilerSamplerActive() {
- return ticker_->IsActive();
-}
-
-
-class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
- public:
- EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
- Handle<Code>* code_objects,
- int* count)
- : sfis_(sfis), code_objects_(code_objects), count_(count) { }
-
- virtual void EnterContext(Context* context) {}
- virtual void LeaveContext(Context* context) {}
-
- virtual void VisitFunction(JSFunction* function) {
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(function->shared());
- Object* maybe_script = sfi->script();
- if (maybe_script->IsScript()
- && !Script::cast(maybe_script)->HasValidSource()) return;
- if (sfis_ != NULL) {
- sfis_[*count_] = Handle<SharedFunctionInfo>(sfi);
- }
- if (code_objects_ != NULL) {
- ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- code_objects_[*count_] = Handle<Code>(function->code());
- }
- *count_ = *count_ + 1;
- }
-
- private:
- Handle<SharedFunctionInfo>* sfis_;
- Handle<Code>* code_objects_;
- int* count_;
-};
-
-
-static int EnumerateCompiledFunctions(Heap* heap,
- Handle<SharedFunctionInfo>* sfis,
- Handle<Code>* code_objects) {
- HeapIterator iterator(heap);
- AssertNoAllocation no_alloc;
- int compiled_funcs_count = 0;
-
- // Iterate the heap to find shared function info objects and record
- // the unoptimized code for them.
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsSharedFunctionInfo()) continue;
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
- if (sfi->is_compiled()
- && (!sfi->script()->IsScript()
- || Script::cast(sfi->script())->HasValidSource())) {
- if (sfis != NULL) {
- sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
- }
- if (code_objects != NULL) {
- code_objects[compiled_funcs_count] = Handle<Code>(sfi->code());
- }
- ++compiled_funcs_count;
- }
- }
-
- // Iterate all optimized functions in all contexts.
- EnumerateOptimizedFunctionsVisitor visitor(sfis,
- code_objects,
- &compiled_funcs_count);
- Deoptimizer::VisitAllOptimizedFunctions(&visitor);
-
- return compiled_funcs_count;
-}
-
-
-void Logger::LogCodeObject(Object* object) {
- if (FLAG_log_code || FLAG_ll_prof || is_logging_code_events()) {
- Code* code_object = Code::cast(object);
- LogEventsAndTags tag = Logger::STUB_TAG;
- const char* description = "Unknown code from the snapshot";
- switch (code_object->kind()) {
- case Code::FUNCTION:
- case Code::OPTIMIZED_FUNCTION:
- return; // We log this later using LogCompiledFunctions.
- case Code::UNARY_OP_IC: // fall through
- case Code::BINARY_OP_IC: // fall through
- case Code::COMPARE_IC: // fall through
- case Code::TO_BOOLEAN_IC: // fall through
- case Code::COMPILED_STUB: // fall through
- case Code::STUB:
- description =
- CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
- if (description == NULL)
- description = "A stub from the snapshot";
- tag = Logger::STUB_TAG;
- break;
- case Code::BUILTIN:
- description = "A builtin from the snapshot";
- tag = Logger::BUILTIN_TAG;
- break;
- case Code::KEYED_LOAD_IC:
- description = "A keyed load IC from the snapshot";
- tag = Logger::KEYED_LOAD_IC_TAG;
- break;
- case Code::LOAD_IC:
- description = "A load IC from the snapshot";
- tag = Logger::LOAD_IC_TAG;
- break;
- case Code::STORE_IC:
- description = "A store IC from the snapshot";
- tag = Logger::STORE_IC_TAG;
- break;
- case Code::KEYED_STORE_IC:
- description = "A keyed store IC from the snapshot";
- tag = Logger::KEYED_STORE_IC_TAG;
- break;
- case Code::CALL_IC:
- description = "A call IC from the snapshot";
- tag = Logger::CALL_IC_TAG;
- break;
- case Code::KEYED_CALL_IC:
- description = "A keyed call IC from the snapshot";
- tag = Logger::KEYED_CALL_IC_TAG;
- break;
- }
- PROFILE(ISOLATE, CodeCreateEvent(tag, code_object, description));
- }
-}
-
-
-void Logger::LogCodeInfo() {
- if (!log_->IsEnabled() || !FLAG_ll_prof) return;
-#if V8_TARGET_ARCH_IA32
- const char arch[] = "ia32";
-#elif V8_TARGET_ARCH_X64
- const char arch[] = "x64";
-#elif V8_TARGET_ARCH_ARM
- const char arch[] = "arm";
-#elif V8_TARGET_ARCH_MIPS
- const char arch[] = "mips";
-#else
- const char arch[] = "unknown";
-#endif
- LowLevelLogWriteBytes(arch, sizeof(arch));
-}
-
-
-void Logger::RegisterSnapshotCodeName(Code* code,
- const char* name,
- int name_size) {
- ASSERT(Serializer::enabled());
- if (address_to_name_map_ == NULL) {
- address_to_name_map_ = new NameMap;
- }
- address_to_name_map_->Insert(code->address(), name, name_size);
-}
-
-
-void Logger::LowLevelCodeCreateEvent(Code* code,
- const char* name,
- int name_size) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelCodeCreateStruct event;
- event.name_size = name_size;
- event.code_address = code->instruction_start();
- ASSERT(event.code_address == code->address() + Code::kHeaderSize);
- event.code_size = code->instruction_size();
- LowLevelLogWriteStruct(event);
- LowLevelLogWriteBytes(name, name_size);
- LowLevelLogWriteBytes(
- reinterpret_cast<const char*>(code->instruction_start()),
- code->instruction_size());
-}
-
-
-void Logger::LowLevelCodeMoveEvent(Address from, Address to) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelCodeMoveStruct event;
- event.from_address = from + Code::kHeaderSize;
- event.to_address = to + Code::kHeaderSize;
- LowLevelLogWriteStruct(event);
-}
-
-
-void Logger::LowLevelCodeDeleteEvent(Address from) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelCodeDeleteStruct event;
- event.address = from + Code::kHeaderSize;
- LowLevelLogWriteStruct(event);
-}
-
-
-void Logger::LowLevelSnapshotPositionEvent(Address addr, int pos) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelSnapshotPositionStruct event;
- event.address = addr + Code::kHeaderSize;
- event.position = pos;
- LowLevelLogWriteStruct(event);
-}
-
-
-void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
- size_t rv = fwrite(bytes, 1, size, log_->ll_output_handle_);
- ASSERT(static_cast<size_t>(size) == rv);
- USE(rv);
-}
-
-
-void Logger::LogCodeObjects() {
- Heap* heap = isolate_->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "Logger::LogCodeObjects");
- HeapIterator iterator(heap);
- AssertNoAllocation no_alloc;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (obj->IsCode()) LogCodeObject(obj);
- }
-}
-
-
-void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
- Handle<Code> code) {
- Handle<String> func_name(shared->DebugName());
- if (shared->script()->IsScript()) {
- Handle<Script> script(Script::cast(shared->script()));
- if (script->name()->IsString()) {
- Handle<String> script_name(String::cast(script->name()));
- int line_num = GetScriptLineNumber(script, shared->start_position());
- if (line_num > 0) {
- PROFILE(ISOLATE,
- CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code, *shared,
- *script_name, line_num + 1));
- } else {
- // Can't distinguish eval and script here, so always use Script.
- PROFILE(ISOLATE,
- CodeCreateEvent(
- Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *code, *shared, *script_name));
- }
- } else {
- PROFILE(ISOLATE,
- CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code, *shared, *func_name));
- }
- } else if (shared->IsApiFunction()) {
- // API function.
- FunctionTemplateInfo* fun_data = shared->get_api_func_data();
- Object* raw_call_data = fun_data->call_code();
- if (!raw_call_data->IsUndefined()) {
- CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* callback_obj = call_data->callback();
- Address entry_point = v8::ToCData<Address>(callback_obj);
- PROFILE(ISOLATE, CallbackEvent(*func_name, entry_point));
- }
- } else {
- PROFILE(ISOLATE,
- CodeCreateEvent(
- Logger::LAZY_COMPILE_TAG, *code, *shared, *func_name));
- }
-}
-
-
-void Logger::LogCompiledFunctions() {
- Heap* heap = isolate_->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "Logger::LogCompiledFunctions");
- HandleScope scope(isolate_);
- const int compiled_funcs_count = EnumerateCompiledFunctions(heap, NULL, NULL);
- ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
- ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
- EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
-
- // During iteration, there can be heap allocation due to
- // GetScriptLineNumber call.
- for (int i = 0; i < compiled_funcs_count; ++i) {
- if (*code_objects[i] == Isolate::Current()->builtins()->builtin(
- Builtins::kLazyCompile))
- continue;
- LogExistingFunction(sfis[i], code_objects[i]);
- }
-}
-
-
-void Logger::LogAccessorCallbacks() {
- Heap* heap = isolate_->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "Logger::LogAccessorCallbacks");
- HeapIterator iterator(heap);
- AssertNoAllocation no_alloc;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsExecutableAccessorInfo()) continue;
- ExecutableAccessorInfo* ai = ExecutableAccessorInfo::cast(obj);
- if (!ai->name()->IsString()) continue;
- String* name = String::cast(ai->name());
- Address getter_entry = v8::ToCData<Address>(ai->getter());
- if (getter_entry != 0) {
- PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry));
- }
- Address setter_entry = v8::ToCData<Address>(ai->setter());
- if (setter_entry != 0) {
- PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry));
- }
- }
-}
-
-
-bool Logger::SetUp() {
- // Tests and EnsureInitialize() can call this twice in a row. It's harmless.
- if (is_initialized_) return true;
- is_initialized_ = true;
-
- // --ll-prof implies --log-code and --log-snapshot-positions.
- if (FLAG_ll_prof) {
- FLAG_log_snapshot_positions = true;
- }
-
- // --prof_lazy controls --log-code, implies --noprof_auto.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- FLAG_prof_auto = false;
- }
-
- // TODO(isolates): this assert introduces cyclic dependency (logger
- // -> thread local top -> heap -> logger).
- // ASSERT(VMState::is_outermost_external());
-
- log_->Initialize();
-
- if (FLAG_ll_prof) LogCodeInfo();
-
- Isolate* isolate = Isolate::Current();
- ticker_ = new Ticker(isolate, kSamplingIntervalMs);
-
- bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
- || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof
- || FLAG_log_internal_timer_events;
-
- if (start_logging) {
- logging_nesting_ = 1;
- }
-
- if (FLAG_prof) {
- profiler_ = new Profiler(isolate);
- if (!FLAG_prof_auto) {
- profiler_->pause();
- } else {
- logging_nesting_ = 1;
- }
- if (!FLAG_prof_lazy) {
- profiler_->Engage();
- }
- }
-
- if (FLAG_log_internal_timer_events || FLAG_prof) epoch_ = OS::Ticks();
-
- return true;
-}
-
-
-void Logger::SetCodeEventHandler(uint32_t options,
- JitCodeEventHandler event_handler) {
- code_event_handler_ = event_handler;
-
- if (code_event_handler_ != NULL && (options & kJitCodeEventEnumExisting)) {
- HandleScope scope(Isolate::Current());
- LogCodeObjects();
- LogCompiledFunctions();
- }
-}
-
-
-Sampler* Logger::sampler() {
- return ticker_;
-}
-
-
-void Logger::EnsureTickerStarted() {
- ASSERT(ticker_ != NULL);
- if (!ticker_->IsActive()) ticker_->Start();
-}
-
-
-void Logger::EnsureTickerStopped() {
- if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
-}
-
-
-FILE* Logger::TearDown() {
- if (!is_initialized_) return NULL;
- is_initialized_ = false;
-
- // Stop the profiler before closing the file.
- if (profiler_ != NULL) {
- profiler_->Disengage();
- delete profiler_;
- profiler_ = NULL;
- }
-
- delete ticker_;
- ticker_ = NULL;
-
- return log_->Close();
-}
-
-
-// Protects the state below.
-static Mutex* active_samplers_mutex = NULL;
-
-List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
-
-
-void SamplerRegistry::SetUp() {
- if (!active_samplers_mutex) {
- active_samplers_mutex = OS::CreateMutex();
- }
-}
-
-
-bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
- ScopedLock lock(active_samplers_mutex);
- for (int i = 0;
- ActiveSamplersExist() && i < active_samplers_->length();
- ++i) {
- func(active_samplers_->at(i), param);
- }
- return ActiveSamplersExist();
-}
-
-
-static void ComputeCpuProfiling(Sampler* sampler, void* flag_ptr) {
- bool* flag = reinterpret_cast<bool*>(flag_ptr);
- *flag |= sampler->IsProfiling();
-}
-
-
-SamplerRegistry::State SamplerRegistry::GetState() {
- bool flag = false;
- if (!IterateActiveSamplers(&ComputeCpuProfiling, &flag)) {
- return HAS_NO_SAMPLERS;
- }
- return flag ? HAS_CPU_PROFILING_SAMPLERS : HAS_SAMPLERS;
-}
-
-
-void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
- ASSERT(sampler->IsActive());
- ScopedLock lock(active_samplers_mutex);
- if (active_samplers_ == NULL) {
- active_samplers_ = new List<Sampler*>;
- } else {
- ASSERT(!active_samplers_->Contains(sampler));
- }
- active_samplers_->Add(sampler);
-}
-
-
-void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
- ASSERT(sampler->IsActive());
- ScopedLock lock(active_samplers_mutex);
- ASSERT(active_samplers_ != NULL);
- bool removed = active_samplers_->RemoveElement(sampler);
- ASSERT(removed);
- USE(removed);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/log.h b/src/3rdparty/v8/src/log.h
deleted file mode 100644
index 718dc02..0000000
--- a/src/3rdparty/v8/src/log.h
+++ /dev/null
@@ -1,554 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LOG_H_
-#define V8_LOG_H_
-
-#include "allocation.h"
-#include "objects.h"
-#include "platform.h"
-#include "log-utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Logger is used for collecting logging information from V8 during
-// execution. The result is dumped to a file.
-//
-// Available command line flags:
-//
-// --log
-// Minimal logging (no API, code, or GC sample events), default is off.
-//
-// --log-all
-// Log all events to the file, default is off. This is the same as combining
-// --log-api, --log-code, --log-gc, and --log-regexp.
-//
-// --log-api
-// Log API events to the logfile, default is off. --log-api implies --log.
-//
-// --log-code
-// Log code (create, move, and delete) events to the logfile, default is off.
-// --log-code implies --log.
-//
-// --log-gc
-// Log GC heap samples after each GC that can be processed by hp2ps, default
-// is off. --log-gc implies --log.
-//
-// --log-regexp
-// Log creation and use of regular expressions, Default is off.
-// --log-regexp implies --log.
-//
-// --logfile <filename>
-// Specify the name of the logfile, default is "v8.log".
-//
-// --prof
-// Collect statistical profiling information (ticks), default is off. The
-// tick profiler requires code events, so --prof implies --log-code.
-
-// Forward declarations.
-class LogMessageBuilder;
-class Profiler;
-class Semaphore;
-class Ticker;
-class Isolate;
-class PositionsRecorder;
-
-#undef LOG
-#define LOG(isolate, Call) \
- do { \
- v8::internal::Logger* logger = \
- (isolate)->logger(); \
- if (logger->is_logging()) \
- logger->Call; \
- } while (false)
-
-#define LOG_CODE_EVENT(isolate, Call) \
- do { \
- v8::internal::Logger* logger = \
- (isolate)->logger(); \
- if (logger->is_logging_code_events()) \
- logger->Call; \
- } while (false)
-
-
-#define LOG_EVENTS_AND_TAGS_LIST(V) \
- V(CODE_CREATION_EVENT, "code-creation") \
- V(CODE_MOVE_EVENT, "code-move") \
- V(CODE_DELETE_EVENT, "code-delete") \
- V(CODE_MOVING_GC, "code-moving-gc") \
- V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
- V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \
- V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name") \
- V(TICK_EVENT, "tick") \
- V(REPEAT_META_EVENT, "repeat") \
- V(BUILTIN_TAG, "Builtin") \
- V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
- V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
- V(CALL_IC_TAG, "CallIC") \
- V(CALL_INITIALIZE_TAG, "CallInitialize") \
- V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
- V(CALL_MISS_TAG, "CallMiss") \
- V(CALL_NORMAL_TAG, "CallNormal") \
- V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
- V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
- V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
- "KeyedCallDebugPrepareStepIn") \
- V(KEYED_CALL_IC_TAG, "KeyedCallIC") \
- V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
- V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
- V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
- V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
- V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic") \
- V(CALLBACK_TAG, "Callback") \
- V(EVAL_TAG, "Eval") \
- V(FUNCTION_TAG, "Function") \
- V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
- V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
- V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
- V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
- V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \
- V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
- V(LAZY_COMPILE_TAG, "LazyCompile") \
- V(LOAD_IC_TAG, "LoadIC") \
- V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \
- V(REG_EXP_TAG, "RegExp") \
- V(SCRIPT_TAG, "Script") \
- V(STORE_IC_TAG, "StoreIC") \
- V(STUB_TAG, "Stub") \
- V(NATIVE_FUNCTION_TAG, "Function") \
- V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
- V(NATIVE_SCRIPT_TAG, "Script")
-// Note that 'NATIVE_' cases for functions and scripts are mapped onto
-// original tags when writing to the log.
-
-
-class Sampler;
-
-
-class Logger {
- public:
-#define DECLARE_ENUM(enum_item, ignore) enum_item,
- enum LogEventsAndTags {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
- NUMBER_OF_LOG_EVENTS
- };
-#undef DECLARE_ENUM
-
- // Acquires resources for logging if the right flags are set.
- bool SetUp();
-
- // Sets the current code event handler.
- void SetCodeEventHandler(uint32_t options,
- JitCodeEventHandler event_handler);
-
- void EnsureTickerStarted();
- void EnsureTickerStopped();
-
- Sampler* sampler();
-
- // Frees resources acquired in SetUp.
- // When a temporary file is used for the log, returns its stream descriptor,
- // leaving the file open.
- FILE* TearDown();
-
- // Emits an event with a string value -> (name, value).
- void StringEvent(const char* name, const char* value);
-
- // Emits an event with an int value -> (name, value).
- void IntEvent(const char* name, int value);
- void IntPtrTEvent(const char* name, intptr_t value);
-
- // Emits an event with an handle value -> (name, location).
- void HandleEvent(const char* name, Object** location);
-
- // Emits memory management events for C allocated structures.
- void NewEvent(const char* name, void* object, size_t size);
- void DeleteEvent(const char* name, void* object);
-
- // Static versions of the above, operate on current isolate's logger.
- // Used in TRACK_MEMORY(TypeName) defined in globals.h
- static void NewEventStatic(const char* name, void* object, size_t size);
- static void DeleteEventStatic(const char* name, void* object);
-
- // Emits an event with a tag, and some resource usage information.
- // -> (name, tag, <rusage information>).
- // Currently, the resource usage information is a process time stamp
- // and a real time timestamp.
- void ResourceEvent(const char* name, const char* tag);
-
- // Emits an event that an undefined property was read from an
- // object.
- void SuspectReadEvent(String* name, Object* obj);
-
- // Emits an event when a message is put on or read from a debugging queue.
- // DebugTag lets us put a call-site specific label on the event.
- void DebugTag(const char* call_site_tag);
- void DebugEvent(const char* event_type, Vector<uint16_t> parameter);
-
-
- // ==== Events logged by --log-api. ====
- void ApiNamedSecurityCheck(Object* key);
- void ApiIndexedSecurityCheck(uint32_t index);
- void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
- void ApiIndexedPropertyAccess(const char* tag,
- JSObject* holder,
- uint32_t index);
- void ApiObjectAccess(const char* tag, JSObject* obj);
- void ApiEntryCall(const char* name);
-
-
- // ==== Events logged by --log-code. ====
- // Emits a code event for a callback function.
- void CallbackEvent(String* name, Address entry_point);
- void GetterCallbackEvent(String* name, Address entry_point);
- void SetterCallbackEvent(String* name, Address entry_point);
- // Emits a code create event.
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, const char* source);
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, String* name);
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name);
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line);
- void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
- void CodeMovingGCEvent();
- // Emits a code create event for a RegExp.
- void RegExpCodeCreateEvent(Code* code, String* source);
- // Emits a code move event.
- void CodeMoveEvent(Address from, Address to);
- // Emits a code delete event.
- void CodeDeleteEvent(Address from);
- // Emits a code line info add event with Postion type.
- void CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
- int pc_offset,
- int position);
- // Emits a code line info add event with StatementPostion type.
- void CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data,
- int pc_offset,
- int position);
- // Emits a code line info start to record event
- void CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder);
- // Emits a code line info finish record event.
- // It's the callee's responsibility to dispose the parameter jit_handler_data.
- void CodeEndLinePosInfoRecordEvent(Code* code, void* jit_handler_data);
-
- void SharedFunctionInfoMoveEvent(Address from, Address to);
-
- void SnapshotPositionEvent(Address addr, int pos);
-
- // ==== Events logged by --log-gc. ====
- // Heap sampling events: start, end, and individual types.
- void HeapSampleBeginEvent(const char* space, const char* kind);
- void HeapSampleEndEvent(const char* space, const char* kind);
- void HeapSampleItemEvent(const char* type, int number, int bytes);
- void HeapSampleJSConstructorEvent(const char* constructor,
- int number, int bytes);
- void HeapSampleJSRetainersEvent(const char* constructor,
- const char* event);
- void HeapSampleJSProducerEvent(const char* constructor,
- Address* stack);
- void HeapSampleStats(const char* space, const char* kind,
- intptr_t capacity, intptr_t used);
-
- void SharedLibraryEvent(const char* library_path,
- uintptr_t start,
- uintptr_t end);
- void SharedLibraryEvent(const wchar_t* library_path,
- uintptr_t start,
- uintptr_t end);
-
- // ==== Events logged by --log-timer-events. ====
- enum StartEnd { START, END };
-
- void TimerEvent(StartEnd se, const char* name);
-
- static void EnterExternal();
- static void LeaveExternal();
-
- class TimerEventScope {
- public:
- TimerEventScope(Isolate* isolate, const char* name)
- : isolate_(isolate), name_(name) {
- if (FLAG_log_internal_timer_events) LogTimerEvent(START);
- }
-
- ~TimerEventScope() {
- if (FLAG_log_internal_timer_events) LogTimerEvent(END);
- }
-
- void LogTimerEvent(StartEnd se);
-
- static const char* v8_recompile_synchronous;
- static const char* v8_recompile_parallel;
- static const char* v8_compile_full_code;
- static const char* v8_execute;
- static const char* v8_external;
-
- private:
- Isolate* isolate_;
- const char* name_;
- };
-
- // ==== Events logged by --log-regexp ====
- // Regexp compilation and execution events.
-
- void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
-
- // Log an event reported from generated code
- void LogRuntime(Isolate* isolate, Vector<const char> format, JSArray* args);
-
- bool is_logging() {
- return logging_nesting_ > 0;
- }
-
- bool is_code_event_handler_enabled() {
- return code_event_handler_ != NULL;
- }
-
- bool is_logging_code_events() {
- return is_logging() || code_event_handler_ != NULL;
- }
-
- // Pause/Resume collection of profiling data.
- // When data collection is paused, CPU Tick events are discarded until
- // data collection is Resumed.
- void PauseProfiler();
- void ResumeProfiler();
- bool IsProfilerPaused();
-
- void LogExistingFunction(Handle<SharedFunctionInfo> shared,
- Handle<Code> code);
- // Logs all compiled functions found in the heap.
- void LogCompiledFunctions();
- // Logs all accessor callbacks found in the heap.
- void LogAccessorCallbacks();
- // Used for logging stubs found in the snapshot.
- void LogCodeObjects();
-
- // Converts tag to a corresponding NATIVE_... if the script is native.
- INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
-
- // Profiler's sampling interval (in milliseconds).
-#if defined(ANDROID)
- // Phones and tablets have processors that are much slower than desktop
- // and laptop computers for which current heuristics are tuned.
- static const int kSamplingIntervalMs = 5;
-#else
- static const int kSamplingIntervalMs = 1;
-#endif
-
- // Callback from Log, stops profiling in case of insufficient resources.
- void LogFailure();
-
- private:
- class NameBuffer;
- class NameMap;
-
- explicit Logger(Isolate* isolate);
- ~Logger();
-
- // Issue code notifications.
- void IssueCodeAddedEvent(Code* code,
- Script* script,
- const char* name,
- size_t name_len);
- void IssueCodeMovedEvent(Address from, Address to);
- void IssueCodeRemovedEvent(Address from);
- void IssueAddCodeLinePosInfoEvent(void* jit_handler_data,
- int pc_offset,
- int position,
- JitCodeEvent::PositionType position_Type);
- void* IssueStartCodePosInfoEvent();
- void IssueEndCodePosInfoEvent(Code* code, void* jit_handler_data);
- // Emits the profiler's first message.
- void ProfilerBeginEvent();
-
- // Emits callback event messages.
- void CallbackEventInternal(const char* prefix,
- const char* name,
- Address entry_point);
-
- // Internal configurable move event.
- void MoveEventInternal(LogEventsAndTags event, Address from, Address to);
-
- // Internal configurable move event.
- void DeleteEventInternal(LogEventsAndTags event, Address from);
-
- // Emits the source code of a regexp. Used by regexp events.
- void LogRegExpSource(Handle<JSRegExp> regexp);
-
- // Used for logging stubs found in the snapshot.
- void LogCodeObject(Object* code_object);
-
- // Emits general information about generated code.
- void LogCodeInfo();
-
- void RegisterSnapshotCodeName(Code* code, const char* name, int name_size);
-
- // Low-level logging support.
-
- void LowLevelCodeCreateEvent(Code* code, const char* name, int name_size);
-
- void LowLevelCodeMoveEvent(Address from, Address to);
-
- void LowLevelCodeDeleteEvent(Address from);
-
- void LowLevelSnapshotPositionEvent(Address addr, int pos);
-
- void LowLevelLogWriteBytes(const char* bytes, int size);
-
- template <typename T>
- void LowLevelLogWriteStruct(const T& s) {
- char tag = T::kTag;
- LowLevelLogWriteBytes(reinterpret_cast<const char*>(&tag), sizeof(tag));
- LowLevelLogWriteBytes(reinterpret_cast<const char*>(&s), sizeof(s));
- }
-
- // Emits a profiler tick event. Used by the profiler thread.
- void TickEvent(TickSample* sample, bool overflow);
-
- void ApiEvent(const char* name, ...);
-
- // Logs a StringEvent regardless of whether FLAG_log is true.
- void UncheckedStringEvent(const char* name, const char* value);
-
- // Logs an IntEvent regardless of whether FLAG_log is true.
- void UncheckedIntEvent(const char* name, int value);
- void UncheckedIntPtrTEvent(const char* name, intptr_t value);
-
- // Returns whether profiler's sampler is active.
- bool IsProfilerSamplerActive();
-
- Isolate* isolate_;
-
- // The sampler used by the profiler and the sliding state window.
- Ticker* ticker_;
-
- // When the statistical profile is active, profiler_
- // points to a Profiler, that handles collection
- // of samples.
- Profiler* profiler_;
-
- // An array of log events names.
- const char* const* log_events_;
-
- // Internal implementation classes with access to
- // private members.
- friend class EventLog;
- friend class Isolate;
- friend class LogMessageBuilder;
- friend class TimeLog;
- friend class Profiler;
- friend class StackTracer;
- friend class VMState;
-
- friend class LoggerTestHelper;
-
-
- int logging_nesting_;
- int cpu_profiler_nesting_;
-
- Log* log_;
-
- NameBuffer* name_buffer_;
-
- NameMap* address_to_name_map_;
-
- // Guards against multiple calls to TearDown() that can happen in some tests.
- // 'true' between SetUp() and TearDown().
- bool is_initialized_;
-
- // The code event handler - if any.
- JitCodeEventHandler code_event_handler_;
-
- // Support for 'incremental addresses' in compressed logs:
- // LogMessageBuilder::AppendAddress(Address addr)
- Address last_address_;
- // Logger::TickEvent(...)
- Address prev_sp_;
- Address prev_function_;
- // Logger::MoveEventInternal(...)
- Address prev_to_;
- // Logger::FunctionCreateEvent(...)
- Address prev_code_;
-
- int64_t epoch_;
-
- friend class CpuProfiler;
-};
-
-
-// Process wide registry of samplers.
-class SamplerRegistry : public AllStatic {
- public:
- enum State {
- HAS_NO_SAMPLERS,
- HAS_SAMPLERS,
- HAS_CPU_PROFILING_SAMPLERS
- };
-
- static void SetUp();
-
- typedef void (*VisitSampler)(Sampler*, void*);
-
- static State GetState();
-
- // Iterates over all active samplers keeping the internal lock held.
- // Returns whether there are any active samplers.
- static bool IterateActiveSamplers(VisitSampler func, void* param);
-
- // Adds/Removes an active sampler.
- static void AddActiveSampler(Sampler* sampler);
- static void RemoveActiveSampler(Sampler* sampler);
-
- private:
- static bool ActiveSamplersExist() {
- return active_samplers_ != NULL && !active_samplers_->is_empty();
- }
-
- static List<Sampler*>* active_samplers_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
-};
-
-
-// Class that extracts stack trace, used for profiling.
-class StackTracer : public AllStatic {
- public:
- static void Trace(Isolate* isolate, TickSample* sample);
-};
-
-} } // namespace v8::internal
-
-
-#endif // V8_LOG_H_
diff --git a/src/3rdparty/v8/src/macro-assembler.h b/src/3rdparty/v8/src/macro-assembler.h
deleted file mode 100644
index 9e71123..0000000
--- a/src/3rdparty/v8/src/macro-assembler.h
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MACRO_ASSEMBLER_H_
-#define V8_MACRO_ASSEMBLER_H_
-
-
-// Helper types to make boolean flag easier to read at call-site.
-enum InvokeFlag {
- CALL_FUNCTION,
- JUMP_FUNCTION
-};
-
-
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
- // words instead of bytes.
- SIZE_IN_WORDS = 1 << 2,
- // Align the allocation to a multiple of kDoubleSize
- DOUBLE_ALIGNMENT = 1 << 3
-};
-
-
-// Invalid depth in prototype chain.
-const int kInvalidProtoDepth = -1;
-
-#if V8_TARGET_ARCH_IA32
-#include "assembler.h"
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "ia32/macro-assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "assembler.h"
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "x64/macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/constants-arm.h"
-#include "assembler.h"
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "arm/macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/constants-mips.h"
-#include "assembler.h"
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "mips/macro-assembler-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-class FrameScope {
- public:
- explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
- : masm_(masm), type_(type), old_has_frame_(masm->has_frame()) {
- masm->set_has_frame(true);
- if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
- masm->EnterFrame(type);
- }
- }
-
- ~FrameScope() {
- if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
- masm_->LeaveFrame(type_);
- }
- masm_->set_has_frame(old_has_frame_);
- }
-
- // Normally we generate the leave-frame code when this object goes
- // out of scope. Sometimes we may need to generate the code somewhere else
- // in addition. Calling this will achieve that, but the object stays in
- // scope, the MacroAssembler is still marked as being in a frame scope, and
- // the code will be generated again when it goes out of scope.
- void GenerateLeaveFrame() {
- masm_->LeaveFrame(type_);
- }
-
- private:
- MacroAssembler* masm_;
- StackFrame::Type type_;
- bool old_has_frame_;
-};
-
-
-class AllowExternalCallThatCantCauseGC: public FrameScope {
- public:
- explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
- : FrameScope(masm, StackFrame::NONE) { }
-};
-
-
-class NoCurrentFrameScope {
- public:
- explicit NoCurrentFrameScope(MacroAssembler* masm)
- : masm_(masm), saved_(masm->has_frame()) {
- masm->set_has_frame(false);
- }
-
- ~NoCurrentFrameScope() {
- masm_->set_has_frame(saved_);
- }
-
- private:
- MacroAssembler* masm_;
- bool saved_;
-};
-
-
-// Support for "structured" code comments.
-#ifdef DEBUG
-
-class Comment {
- public:
- Comment(MacroAssembler* masm, const char* msg);
- ~Comment();
-
- private:
- MacroAssembler* masm_;
- const char* msg_;
-};
-
-#else
-
-class Comment {
- public:
- Comment(MacroAssembler*, const char*) {}
-};
-
-#endif // DEBUG
-
-} } // namespace v8::internal
-
-#endif // V8_MACRO_ASSEMBLER_H_
diff --git a/src/3rdparty/v8/src/macros.py b/src/3rdparty/v8/src/macros.py
deleted file mode 100644
index 291a898..0000000
--- a/src/3rdparty/v8/src/macros.py
+++ /dev/null
@@ -1,242 +0,0 @@
-# Copyright 2006-2009 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Dictionary that is passed as defines for js2c.py.
-# Used for defines that must be defined for all native JS files.
-
-const NONE = 0;
-const READ_ONLY = 1;
-const DONT_ENUM = 2;
-const DONT_DELETE = 4;
-const NEW_ONE_BYTE_STRING = true;
-const NEW_TWO_BYTE_STRING = false;
-
-# Constants used for getter and setter operations.
-const GETTER = 0;
-const SETTER = 1;
-
-# These definitions must match the index of the properties in objects.h.
-const kApiTagOffset = 0;
-const kApiPropertyListOffset = 1;
-const kApiSerialNumberOffset = 2;
-const kApiConstructorOffset = 2;
-const kApiPrototypeTemplateOffset = 5;
-const kApiParentTemplateOffset = 6;
-const kApiFlagOffset = 14;
-
-const NO_HINT = 0;
-const NUMBER_HINT = 1;
-const STRING_HINT = 2;
-
-const kFunctionTag = 0;
-const kNewObjectTag = 1;
-
-# For date.js.
-const HoursPerDay = 24;
-const MinutesPerHour = 60;
-const SecondsPerMinute = 60;
-const msPerSecond = 1000;
-const msPerMinute = 60000;
-const msPerHour = 3600000;
-const msPerDay = 86400000;
-const msPerMonth = 2592000000;
-
-# For apinatives.js
-const kUninitialized = -1;
-const kReadOnlyPrototypeBit = 3; # For FunctionTemplateInfo, matches objects.h
-
-# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
-const kInvalidDate = 'Invalid Date';
-const kDayZeroInJulianDay = 2440588;
-const kMonthMask = 0x1e0;
-const kDayMask = 0x01f;
-const kYearShift = 9;
-const kMonthShift = 5;
-
-# Limits for parts of the date, so that we support all the dates that
-# ECMA 262 - 15.9.1.1 requires us to, but at the same time be sure that
-# the date (days since 1970) is in SMI range.
-const kMinYear = -1000000;
-const kMaxYear = 1000000;
-const kMinMonth = -10000000;
-const kMaxMonth = 10000000;
-
-# Native cache ids.
-const STRING_TO_REGEXP_CACHE_ID = 0;
-
-# Type query macros.
-#
-# Note: We have special support for typeof(foo) === 'bar' in the compiler.
-# It will *not* generate a runtime typeof call for the most important
-# values of 'bar'.
-macro IS_NULL(arg) = (arg === null);
-macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
-macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
-macro IS_NUMBER(arg) = (typeof(arg) === 'number');
-macro IS_STRING(arg) = (typeof(arg) === 'string');
-macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
-macro IS_SYMBOL(arg) = (%_IsSymbol(arg));
-macro IS_OBJECT(arg) = (%_IsObject(arg));
-macro IS_ARRAY(arg) = (%_IsArray(arg));
-macro IS_FUNCTION(arg) = (%_IsFunction(arg));
-macro IS_REGEXP(arg) = (%_IsRegExp(arg));
-macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
-macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
-macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
-macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
-macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
-macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
-macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
-macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
-macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
-macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
-macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
-macro FLOOR(arg) = $floor(arg);
-
-# Macro for ECMAScript 5 queries of the type:
-# "Type(O) is object."
-# This is the same as being either a function or an object in V8 terminology
-# (including proxies).
-# In addition, an undetectable object is also included by this.
-macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
-
-# Macro for ECMAScript 5 queries of the type:
-# "IsCallable(O)"
-# We assume here that this is the same as being either a function or a function
-# proxy. That ignores host objects with [[Call]] methods, but in most situations
-# we cannot handle those anyway.
-macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
-
-# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
-const kBoundFunctionIndex = 0;
-const kBoundThisIndex = 1;
-const kBoundArgumentsStartIndex = 2;
-
-# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
-macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
-macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
-macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
-macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
-macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
-macro TO_UINT32(arg) = (arg >>> 0);
-macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
-macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
-macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
-macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
-
-# Macros implemented in Python.
-python macro CHAR_CODE(str) = ord(str[1]);
-
-# Constants used on an array to implement the properties of the RegExp object.
-const REGEXP_NUMBER_OF_CAPTURES = 0;
-const REGEXP_FIRST_CAPTURE = 3;
-
-# We can't put macros in macros so we use constants here.
-# REGEXP_NUMBER_OF_CAPTURES
-macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
-
-# Limit according to ECMA 262 15.9.1.1
-const MAX_TIME_MS = 8640000000000000;
-# Limit which is MAX_TIME_MS + msPerMonth.
-const MAX_TIME_BEFORE_UTC = 8640002592000000;
-
-# Gets the value of a Date object. If arg is not a Date object
-# a type error is thrown.
-macro CHECK_DATE(arg) = if (%_ClassOf(arg) !== 'Date') ThrowDateTypeError();
-macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21));
-macro UTC_DATE_VALUE(arg) = (%_DateField(arg, 0));
-
-macro LOCAL_YEAR(arg) = (%_DateField(arg, 1));
-macro LOCAL_MONTH(arg) = (%_DateField(arg, 2));
-macro LOCAL_DAY(arg) = (%_DateField(arg, 3));
-macro LOCAL_WEEKDAY(arg) = (%_DateField(arg, 4));
-macro LOCAL_HOUR(arg) = (%_DateField(arg, 5));
-macro LOCAL_MIN(arg) = (%_DateField(arg, 6));
-macro LOCAL_SEC(arg) = (%_DateField(arg, 7));
-macro LOCAL_MS(arg) = (%_DateField(arg, 8));
-macro LOCAL_DAYS(arg) = (%_DateField(arg, 9));
-macro LOCAL_TIME_IN_DAY(arg) = (%_DateField(arg, 10));
-
-macro UTC_YEAR(arg) = (%_DateField(arg, 11));
-macro UTC_MONTH(arg) = (%_DateField(arg, 12));
-macro UTC_DAY(arg) = (%_DateField(arg, 13));
-macro UTC_WEEKDAY(arg) = (%_DateField(arg, 14));
-macro UTC_HOUR(arg) = (%_DateField(arg, 15));
-macro UTC_MIN(arg) = (%_DateField(arg, 16));
-macro UTC_SEC(arg) = (%_DateField(arg, 17));
-macro UTC_MS(arg) = (%_DateField(arg, 18));
-macro UTC_DAYS(arg) = (%_DateField(arg, 19));
-macro UTC_TIME_IN_DAY(arg) = (%_DateField(arg, 20));
-
-macro TIMEZONE_OFFSET(arg) = (%_DateField(arg, 21));
-
-macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1));
-macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
-
-# Last input and last subject of regexp matches.
-const LAST_SUBJECT_INDEX = 1;
-macro LAST_SUBJECT(array) = ((array)[1]);
-macro LAST_INPUT(array) = ((array)[2]);
-
-# REGEXP_FIRST_CAPTURE
-macro CAPTURE(index) = (3 + (index));
-const CAPTURE0 = 3;
-const CAPTURE1 = 4;
-
-# For the regexp capture override array. This has the same
-# format as the arguments to a function called from
-# String.prototype.replace.
-macro OVERRIDE_MATCH(override) = ((override)[0]);
-macro OVERRIDE_POS(override) = ((override)[(override).length - 2]);
-macro OVERRIDE_SUBJECT(override) = ((override)[(override).length - 1]);
-# 1-based so index of 1 returns the first capture
-macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]);
-
-# PropertyDescriptor return value indices - must match
-# PropertyDescriptorIndices in runtime.cc.
-const IS_ACCESSOR_INDEX = 0;
-const VALUE_INDEX = 1;
-const GETTER_INDEX = 2;
-const SETTER_INDEX = 3;
-const WRITABLE_INDEX = 4;
-const ENUMERABLE_INDEX = 5;
-const CONFIGURABLE_INDEX = 6;
-
-# For messages.js
-# Matches Script::Type from objects.h
-const TYPE_NATIVE = 0;
-const TYPE_EXTENSION = 1;
-const TYPE_NORMAL = 2;
-
-# Matches Script::CompilationType from objects.h
-const COMPILATION_TYPE_HOST = 0;
-const COMPILATION_TYPE_EVAL = 1;
-const COMPILATION_TYPE_JSON = 2;
-
-# Matches Messages::kNoLineNumberInfo from v8.h
-const kNoLineNumberInfo = 0;
diff --git a/src/3rdparty/v8/src/mark-compact-inl.h b/src/3rdparty/v8/src/mark-compact-inl.h
deleted file mode 100644
index 10773e7..0000000
--- a/src/3rdparty/v8/src/mark-compact-inl.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MARK_COMPACT_INL_H_
-#define V8_MARK_COMPACT_INL_H_
-
-#include "isolate.h"
-#include "memory.h"
-#include "mark-compact.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-MarkBit Marking::MarkBitFrom(Address addr) {
- MemoryChunk* p = MemoryChunk::FromAddress(addr);
- return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
- p->ContainsOnlyData());
-}
-
-
-void MarkCompactCollector::SetFlags(int flags) {
- sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
- reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
- abort_incremental_marking_ =
- ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
-}
-
-
-void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
- ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
- if (!mark_bit.Get()) {
- mark_bit.Set();
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
- ASSERT(IsMarked(obj));
- ASSERT(HEAP->Contains(obj));
- marking_deque_.PushBlack(obj);
- }
-}
-
-
-void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
- ASSERT(!mark_bit.Get());
- ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
- mark_bit.Set();
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
-}
-
-
-bool MarkCompactCollector::IsMarked(Object* obj) {
- ASSERT(obj->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(obj);
- return Marking::MarkBitFrom(heap_object).Get();
-}
-
-
-void MarkCompactCollector::RecordSlot(Object** anchor_slot,
- Object** slot,
- Object* object) {
- Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
- if (object_page->IsEvacuationCandidate() &&
- !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- object_page->slots_buffer_address(),
- slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
- EvictEvacuationCandidate(object_page);
- }
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_MARK_COMPACT_INL_H_
diff --git a/src/3rdparty/v8/src/mark-compact.cc b/src/3rdparty/v8/src/mark-compact.cc
deleted file mode 100644
index ba19bf3..0000000
--- a/src/3rdparty/v8/src/mark-compact.cc
+++ /dev/null
@@ -1,4132 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "gdb-jit.h"
-#include "global-handles.h"
-#include "heap-profiler.h"
-#include "ic-inl.h"
-#include "incremental-marking.h"
-#include "mark-compact.h"
-#include "marking-thread.h"
-#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
-#include "stub-cache.h"
-#include "sweeper-thread.h"
-
-namespace v8 {
-namespace internal {
-
-
-const char* Marking::kWhiteBitPattern = "00";
-const char* Marking::kBlackBitPattern = "10";
-const char* Marking::kGreyBitPattern = "11";
-const char* Marking::kImpossibleBitPattern = "01";
-
-
-// -------------------------------------------------------------------------
-// MarkCompactCollector
-
-MarkCompactCollector::MarkCompactCollector() : // NOLINT
-#ifdef DEBUG
- state_(IDLE),
-#endif
- sweep_precisely_(false),
- reduce_memory_footprint_(false),
- abort_incremental_marking_(false),
- marking_parity_(ODD_MARKING_PARITY),
- compacting_(false),
- was_marked_incrementally_(false),
- sweeping_pending_(false),
- tracer_(NULL),
- migration_slots_buffer_(NULL),
- heap_(NULL),
- code_flusher_(NULL),
- encountered_weak_maps_(NULL) { }
-
-
-#ifdef VERIFY_HEAP
-class VerifyMarkingVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK(HEAP->mark_compact_collector()->IsMarked(object));
- }
- }
- }
-
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
- rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
- !rinfo->target_object()->IsMap() ||
- !Map::cast(rinfo->target_object())->CanTransition()) {
- VisitPointer(rinfo->target_object_address());
- }
- }
-};
-
-
-static void VerifyMarking(Address bottom, Address top) {
- VerifyMarkingVisitor visitor;
- HeapObject* object;
- Address next_object_must_be_here_or_later = bottom;
-
- for (Address current = bottom;
- current < top;
- current += kPointerSize) {
- object = HeapObject::FromAddress(current);
- if (MarkCompactCollector::IsMarked(object)) {
- CHECK(current >= next_object_must_be_here_or_later);
- object->Iterate(&visitor);
- next_object_must_be_here_or_later = current + object->Size();
- }
- }
-}
-
-
-static void VerifyMarking(NewSpace* space) {
- Address end = space->top();
- NewSpacePageIterator it(space->bottom(), end);
- // The bottom position is at the start of its page. Allows us to use
- // page->area_start() as start of range on all pages.
- CHECK_EQ(space->bottom(),
- NewSpacePage::FromAddress(space->bottom())->area_start());
- while (it.has_next()) {
- NewSpacePage* page = it.next();
- Address limit = it.has_next() ? page->area_end() : end;
- CHECK(limit == end || !page->Contains(end));
- VerifyMarking(page->area_start(), limit);
- }
-}
-
-
-static void VerifyMarking(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
- VerifyMarking(p->area_start(), p->area_end());
- }
-}
-
-
-static void VerifyMarking(Heap* heap) {
- VerifyMarking(heap->old_pointer_space());
- VerifyMarking(heap->old_data_space());
- VerifyMarking(heap->code_space());
- VerifyMarking(heap->cell_space());
- VerifyMarking(heap->map_space());
- VerifyMarking(heap->new_space());
-
- VerifyMarkingVisitor visitor;
-
- LargeObjectIterator it(heap->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- if (MarkCompactCollector::IsMarked(obj)) {
- obj->Iterate(&visitor);
- }
- }
-
- heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
-}
-
-
-class VerifyEvacuationVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
- }
- }
- }
-};
-
-
-static void VerifyEvacuation(Address bottom, Address top) {
- VerifyEvacuationVisitor visitor;
- HeapObject* object;
- Address next_object_must_be_here_or_later = bottom;
-
- for (Address current = bottom;
- current < top;
- current += kPointerSize) {
- object = HeapObject::FromAddress(current);
- if (MarkCompactCollector::IsMarked(object)) {
- CHECK(current >= next_object_must_be_here_or_later);
- object->Iterate(&visitor);
- next_object_must_be_here_or_later = current + object->Size();
- }
- }
-}
-
-
-static void VerifyEvacuation(NewSpace* space) {
- NewSpacePageIterator it(space->bottom(), space->top());
- VerifyEvacuationVisitor visitor;
-
- while (it.has_next()) {
- NewSpacePage* page = it.next();
- Address current = page->area_start();
- Address limit = it.has_next() ? page->area_end() : space->top();
- CHECK(limit == space->top() || !page->Contains(space->top()));
- while (current < limit) {
- HeapObject* object = HeapObject::FromAddress(current);
- object->Iterate(&visitor);
- current += object->Size();
- }
- }
-}
-
-
-static void VerifyEvacuation(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
- if (p->IsEvacuationCandidate()) continue;
- VerifyEvacuation(p->area_start(), p->area_end());
- }
-}
-
-
-static void VerifyEvacuation(Heap* heap) {
- VerifyEvacuation(heap->old_pointer_space());
- VerifyEvacuation(heap->old_data_space());
- VerifyEvacuation(heap->code_space());
- VerifyEvacuation(heap->cell_space());
- VerifyEvacuation(heap->map_space());
- VerifyEvacuation(heap->new_space());
-
- VerifyEvacuationVisitor visitor;
- heap->IterateStrongRoots(&visitor, VISIT_ALL);
-}
-#endif // VERIFY_HEAP
-
-
-#ifdef DEBUG
-class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
- public:
- VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- if (object->IsString()) continue;
- switch (object->map()->instance_type()) {
- case JS_FUNCTION_TYPE:
- CheckContext(JSFunction::cast(object)->context());
- break;
- case JS_GLOBAL_PROXY_TYPE:
- CheckContext(JSGlobalProxy::cast(object)->native_context());
- break;
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- CheckContext(GlobalObject::cast(object)->native_context());
- break;
- case JS_ARRAY_TYPE:
- case JS_DATE_TYPE:
- case JS_OBJECT_TYPE:
- case JS_REGEXP_TYPE:
- VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
- break;
- case MAP_TYPE:
- VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
- VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
- break;
- case FIXED_ARRAY_TYPE:
- if (object->IsContext()) {
- CheckContext(object);
- } else {
- FixedArray* array = FixedArray::cast(object);
- int length = array->length();
- // Set array length to zero to prevent cycles while iterating
- // over array bodies, this is easier than intrusive marking.
- array->set_length(0);
- array->IterateBody(
- FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
- array->set_length(length);
- }
- break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- case JS_PROXY_TYPE:
- case JS_VALUE_TYPE:
- case TYPE_FEEDBACK_INFO_TYPE:
- object->Iterate(this);
- break;
- case DECLARED_ACCESSOR_INFO_TYPE:
- case EXECUTABLE_ACCESSOR_INFO_TYPE:
- case BYTE_ARRAY_TYPE:
- case CALL_HANDLER_INFO_TYPE:
- case CODE_TYPE:
- case FIXED_DOUBLE_ARRAY_TYPE:
- case HEAP_NUMBER_TYPE:
- case INTERCEPTOR_INFO_TYPE:
- case ODDBALL_TYPE:
- case SCRIPT_TYPE:
- case SHARED_FUNCTION_INFO_TYPE:
- break;
- default:
- UNREACHABLE();
- }
- }
- }
- }
-
- private:
- void CheckContext(Object* context) {
- if (!context->IsContext()) return;
- Context* native_context = Context::cast(context)->native_context();
- if (current_native_context_ == NULL) {
- current_native_context_ = native_context;
- } else {
- CHECK_EQ(current_native_context_, native_context);
- }
- }
-
- Context* current_native_context_;
-};
-
-
-static void VerifyNativeContextSeparation(Heap* heap) {
- HeapObjectIterator it(heap->code_space());
-
- for (Object* object = it.Next(); object != NULL; object = it.Next()) {
- VerifyNativeContextSeparationVisitor visitor;
- Code::cast(object)->CodeIterateBody(&visitor);
- }
-}
-#endif
-
-
-void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
- p->MarkEvacuationCandidate();
- evacuation_candidates_.Add(p);
-}
-
-
-static void TraceFragmentation(PagedSpace* space) {
- int number_of_pages = space->CountTotalPages();
- intptr_t reserved = (number_of_pages * space->AreaSize());
- intptr_t free = reserved - space->SizeOfObjects();
- PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
- AllocationSpaceName(space->identity()),
- number_of_pages,
- static_cast<int>(free),
- static_cast<double>(free) * 100 / reserved);
-}
-
-
-bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
- if (!compacting_) {
- ASSERT(evacuation_candidates_.length() == 0);
-
-#ifdef ENABLE_GDB_JIT_INTERFACE
- // If GDBJIT interface is active disable compaction.
- if (FLAG_gdbjit) return false;
-#endif
-
- CollectEvacuationCandidates(heap()->old_pointer_space());
- CollectEvacuationCandidates(heap()->old_data_space());
-
- if (FLAG_compact_code_space &&
- (mode == NON_INCREMENTAL_COMPACTION ||
- FLAG_incremental_code_compaction)) {
- CollectEvacuationCandidates(heap()->code_space());
- } else if (FLAG_trace_fragmentation) {
- TraceFragmentation(heap()->code_space());
- }
-
- if (FLAG_trace_fragmentation) {
- TraceFragmentation(heap()->map_space());
- TraceFragmentation(heap()->cell_space());
- }
-
- heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
- heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
- heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
-
- compacting_ = evacuation_candidates_.length() > 0;
- }
-
- return compacting_;
-}
-
-
-void MarkCompactCollector::CollectGarbage() {
- // Make sure that Prepare() has been called. The individual steps below will
- // update the state as they proceed.
- ASSERT(state_ == PREPARE_GC);
- ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
-
- MarkLiveObjects();
- ASSERT(heap_->incremental_marking()->IsStopped());
-
- if (FLAG_collect_maps) ClearNonLiveReferences();
-
- ClearWeakMaps();
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyMarking(heap_);
- }
-#endif
-
- SweepSpaces();
-
- if (!FLAG_collect_maps) ReattachInitialMaps();
-
-#ifdef DEBUG
- if (FLAG_verify_native_context_separation) {
- VerifyNativeContextSeparation(heap_);
- }
-#endif
-
-#ifdef VERIFY_HEAP
- if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code &&
- heap()->weak_embedded_maps_verification_enabled()) {
- VerifyWeakEmbeddedMapsInOptimizedCode();
- }
- if (FLAG_collect_maps && FLAG_omit_prototype_checks_for_leaf_maps) {
- VerifyOmittedPrototypeChecks();
- }
-#endif
-
- Finish();
-
- if (marking_parity_ == EVEN_MARKING_PARITY) {
- marking_parity_ = ODD_MARKING_PARITY;
- } else {
- ASSERT(marking_parity_ == ODD_MARKING_PARITY);
- marking_parity_ = EVEN_MARKING_PARITY;
- }
-
- tracer_ = NULL;
-}
-
-
-#ifdef VERIFY_HEAP
-void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
- CHECK(p->markbits()->IsClean());
- CHECK_EQ(0, p->LiveBytes());
- }
-}
-
-
-void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
- NewSpacePageIterator it(space->bottom(), space->top());
-
- while (it.has_next()) {
- NewSpacePage* p = it.next();
- CHECK(p->markbits()->IsClean());
- CHECK_EQ(0, p->LiveBytes());
- }
-}
-
-
-void MarkCompactCollector::VerifyMarkbitsAreClean() {
- VerifyMarkbitsAreClean(heap_->old_pointer_space());
- VerifyMarkbitsAreClean(heap_->old_data_space());
- VerifyMarkbitsAreClean(heap_->code_space());
- VerifyMarkbitsAreClean(heap_->cell_space());
- VerifyMarkbitsAreClean(heap_->map_space());
- VerifyMarkbitsAreClean(heap_->new_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- CHECK(Marking::IsWhite(mark_bit));
- CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
- }
-}
-
-
-void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
- HeapObjectIterator code_iterator(heap()->code_space());
- for (HeapObject* obj = code_iterator.Next();
- obj != NULL;
- obj = code_iterator.Next()) {
- Code* code = Code::cast(obj);
- if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
- if (code->marked_for_deoptimization()) continue;
- code->VerifyEmbeddedMapsDependency();
- }
-}
-
-
-void MarkCompactCollector::VerifyOmittedPrototypeChecks() {
- HeapObjectIterator iterator(heap()->map_space());
- for (HeapObject* obj = iterator.Next();
- obj != NULL;
- obj = iterator.Next()) {
- Map* map = Map::cast(obj);
- map->VerifyOmittedPrototypeChecks();
- }
-}
-#endif // VERIFY_HEAP
-
-
-static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
-}
-
-
-static void ClearMarkbitsInNewSpace(NewSpace* space) {
- NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
-}
-
-
-void MarkCompactCollector::ClearMarkbits() {
- ClearMarkbitsInPagedSpace(heap_->code_space());
- ClearMarkbitsInPagedSpace(heap_->map_space());
- ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
- ClearMarkbitsInPagedSpace(heap_->old_data_space());
- ClearMarkbitsInPagedSpace(heap_->cell_space());
- ClearMarkbitsInNewSpace(heap_->new_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- mark_bit.Clear();
- mark_bit.Next().Clear();
- Page::FromAddress(obj->address())->ResetProgressBar();
- Page::FromAddress(obj->address())->ResetLiveBytes();
- }
-}
-
-
-void MarkCompactCollector::StartSweeperThreads() {
- sweeping_pending_ = true;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- heap()->isolate()->sweeper_threads()[i]->StartSweeping();
- }
-}
-
-
-void MarkCompactCollector::WaitUntilSweepingCompleted() {
- if (sweeping_pending_) {
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
- }
- sweeping_pending_ = false;
- StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
- StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
- heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
- heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
- }
-}
-
-
-intptr_t MarkCompactCollector::
- StealMemoryFromSweeperThreads(PagedSpace* space) {
- intptr_t freed_bytes = 0;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
- }
- space->AddToAccountingStats(freed_bytes);
- space->DecrementUnsweptFreeBytes(freed_bytes);
- return freed_bytes;
-}
-
-
-bool MarkCompactCollector::AreSweeperThreadsActivated() {
- return heap()->isolate()->sweeper_threads() != NULL;
-}
-
-
-bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
- return sweeping_pending_;
-}
-
-
-void MarkCompactCollector::FinalizeSweeping() {
- ASSERT(sweeping_pending_ == false);
- ReleaseEvacuationCandidates();
- heap()->FreeQueuedChunks();
-}
-
-
-void MarkCompactCollector::MarkInParallel() {
- for (int i = 0; i < FLAG_marking_threads; i++) {
- heap()->isolate()->marking_threads()[i]->StartMarking();
- }
-}
-
-
-void MarkCompactCollector::WaitUntilMarkingCompleted() {
- for (int i = 0; i < FLAG_marking_threads; i++) {
- heap()->isolate()->marking_threads()[i]->WaitForMarkingThread();
- }
-}
-
-
-bool Marking::TransferMark(Address old_start, Address new_start) {
- // This is only used when resizing an object.
- ASSERT(MemoryChunk::FromAddress(old_start) ==
- MemoryChunk::FromAddress(new_start));
-
- // If the mark doesn't move, we don't check the color of the object.
- // It doesn't matter whether the object is black, since it hasn't changed
- // size, so the adjustment to the live data count will be zero anyway.
- if (old_start == new_start) return false;
-
- MarkBit new_mark_bit = MarkBitFrom(new_start);
- MarkBit old_mark_bit = MarkBitFrom(old_start);
-
-#ifdef DEBUG
- ObjectColor old_color = Color(old_mark_bit);
-#endif
-
- if (Marking::IsBlack(old_mark_bit)) {
- old_mark_bit.Clear();
- ASSERT(IsWhite(old_mark_bit));
- Marking::MarkBlack(new_mark_bit);
- return true;
- } else if (Marking::IsGrey(old_mark_bit)) {
- ASSERT(heap_->incremental_marking()->IsMarking());
- old_mark_bit.Clear();
- old_mark_bit.Next().Clear();
- ASSERT(IsWhite(old_mark_bit));
- heap_->incremental_marking()->WhiteToGreyAndPush(
- HeapObject::FromAddress(new_start), new_mark_bit);
- heap_->incremental_marking()->RestartIfNotMarking();
- }
-
-#ifdef DEBUG
- ObjectColor new_color = Color(new_mark_bit);
- ASSERT(new_color == old_color);
-#endif
-
- return false;
-}
-
-
-const char* AllocationSpaceName(AllocationSpace space) {
- switch (space) {
- case NEW_SPACE: return "NEW_SPACE";
- case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
- case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
- case CODE_SPACE: return "CODE_SPACE";
- case MAP_SPACE: return "MAP_SPACE";
- case CELL_SPACE: return "CELL_SPACE";
- case LO_SPACE: return "LO_SPACE";
- default:
- UNREACHABLE();
- }
-
- return NULL;
-}
-
-
-// Returns zero for pages that have so little fragmentation that it is not
-// worth defragmenting them. Otherwise a positive integer that gives an
-// estimate of fragmentation on an arbitrary scale.
-static int FreeListFragmentation(PagedSpace* space, Page* p) {
- // If page was not swept then there are no free list items on it.
- if (!p->WasSwept()) {
- if (FLAG_trace_fragmentation) {
- PrintF("%p [%s]: %d bytes live (unswept)\n",
- reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()),
- p->LiveBytes());
- }
- return 0;
- }
-
- FreeList::SizeStats sizes;
- space->CountFreeListItems(p, &sizes);
-
- intptr_t ratio;
- intptr_t ratio_threshold;
- intptr_t area_size = space->AreaSize();
- if (space->identity() == CODE_SPACE) {
- ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
- area_size;
- ratio_threshold = 10;
- } else {
- ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
- area_size;
- ratio_threshold = 15;
- }
-
- if (FLAG_trace_fragmentation) {
- PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
- reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()),
- static_cast<int>(sizes.small_size_),
- static_cast<double>(sizes.small_size_ * 100) /
- area_size,
- static_cast<int>(sizes.medium_size_),
- static_cast<double>(sizes.medium_size_ * 100) /
- area_size,
- static_cast<int>(sizes.large_size_),
- static_cast<double>(sizes.large_size_ * 100) /
- area_size,
- static_cast<int>(sizes.huge_size_),
- static_cast<double>(sizes.huge_size_ * 100) /
- area_size,
- (ratio > ratio_threshold) ? "[fragmented]" : "");
- }
-
- if (FLAG_always_compact && sizes.Total() != area_size) {
- return 1;
- }
-
- if (ratio <= ratio_threshold) return 0; // Not fragmented.
-
- return static_cast<int>(ratio - ratio_threshold);
-}
-
-
-void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
- ASSERT(space->identity() == OLD_POINTER_SPACE ||
- space->identity() == OLD_DATA_SPACE ||
- space->identity() == CODE_SPACE);
-
- static const int kMaxMaxEvacuationCandidates = 1000;
- int number_of_pages = space->CountTotalPages();
- int max_evacuation_candidates =
- static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
-
- if (FLAG_stress_compaction || FLAG_always_compact) {
- max_evacuation_candidates = kMaxMaxEvacuationCandidates;
- }
-
- class Candidate {
- public:
- Candidate() : fragmentation_(0), page_(NULL) { }
- Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
-
- int fragmentation() { return fragmentation_; }
- Page* page() { return page_; }
-
- private:
- int fragmentation_;
- Page* page_;
- };
-
- enum CompactionMode {
- COMPACT_FREE_LISTS,
- REDUCE_MEMORY_FOOTPRINT
- };
-
- CompactionMode mode = COMPACT_FREE_LISTS;
-
- intptr_t reserved = number_of_pages * space->AreaSize();
- intptr_t over_reserved = reserved - space->SizeOfObjects();
- static const intptr_t kFreenessThreshold = 50;
-
- if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
- // If reduction of memory footprint was requested, we are aggressive
- // about choosing pages to free. We expect that half-empty pages
- // are easier to compact so slightly bump the limit.
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates += 2;
- }
-
-
- if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
- // If over-usage is very high (more than a third of the space), we
- // try to free all mostly empty pages. We expect that almost empty
- // pages are even easier to compact so bump the limit even more.
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates *= 2;
- }
-
- if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
- PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
- static_cast<double>(over_reserved) / MB,
- static_cast<double>(reserved) / MB,
- static_cast<int>(kFreenessThreshold));
- }
-
- intptr_t estimated_release = 0;
-
- Candidate candidates[kMaxMaxEvacuationCandidates];
-
- max_evacuation_candidates =
- Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
-
- int count = 0;
- int fragmentation = 0;
- Candidate* least = NULL;
-
- PageIterator it(space);
- if (it.has_next()) it.next(); // Never compact the first page.
-
- while (it.has_next()) {
- Page* p = it.next();
- p->ClearEvacuationCandidate();
-
- if (FLAG_stress_compaction) {
- unsigned int counter = space->heap()->ms_count();
- uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
- if ((counter & 1) == (page_number & 1)) fragmentation = 1;
- } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
- // Don't try to release too many pages.
- if (estimated_release >= ((over_reserved * 3) / 4)) {
- continue;
- }
-
- intptr_t free_bytes = 0;
-
- if (!p->WasSwept()) {
- free_bytes = (p->area_size() - p->LiveBytes());
- } else {
- FreeList::SizeStats sizes;
- space->CountFreeListItems(p, &sizes);
- free_bytes = sizes.Total();
- }
-
- int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
-
- if (free_pct >= kFreenessThreshold) {
- estimated_release += 2 * p->area_size() - free_bytes;
- fragmentation = free_pct;
- } else {
- fragmentation = 0;
- }
-
- if (FLAG_trace_fragmentation) {
- PrintF("%p [%s]: %d (%.2f%%) free %s\n",
- reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()),
- static_cast<int>(free_bytes),
- static_cast<double>(free_bytes * 100) / p->area_size(),
- (fragmentation > 0) ? "[fragmented]" : "");
- }
- } else {
- fragmentation = FreeListFragmentation(space, p);
- }
-
- if (fragmentation != 0) {
- if (count < max_evacuation_candidates) {
- candidates[count++] = Candidate(fragmentation, p);
- } else {
- if (least == NULL) {
- for (int i = 0; i < max_evacuation_candidates; i++) {
- if (least == NULL ||
- candidates[i].fragmentation() < least->fragmentation()) {
- least = candidates + i;
- }
- }
- }
- if (least->fragmentation() < fragmentation) {
- *least = Candidate(fragmentation, p);
- least = NULL;
- }
- }
- }
- }
-
- for (int i = 0; i < count; i++) {
- AddEvacuationCandidate(candidates[i].page());
- }
-
- if (count > 0 && FLAG_trace_fragmentation) {
- PrintF("Collected %d evacuation candidates for space %s\n",
- count,
- AllocationSpaceName(space->identity()));
- }
-}
-
-
-void MarkCompactCollector::AbortCompaction() {
- if (compacting_) {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
- p->ClearEvacuationCandidate();
- p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
- }
- compacting_ = false;
- evacuation_candidates_.Rewind(0);
- invalidated_code_.Rewind(0);
- }
- ASSERT_EQ(0, evacuation_candidates_.length());
-}
-
-
-void MarkCompactCollector::Prepare(GCTracer* tracer) {
- was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
-
- // Rather than passing the tracer around we stash it in a static member
- // variable.
- tracer_ = tracer;
-
-#ifdef DEBUG
- ASSERT(state_ == IDLE);
- state_ = PREPARE_GC;
-#endif
-
- ASSERT(!FLAG_never_compact || !FLAG_always_compact);
-
- if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) {
- // Instead of waiting we could also abort the sweeper threads here.
- WaitUntilSweepingCompleted();
- FinalizeSweeping();
- }
-
- // Clear marking bits if incremental marking is aborted.
- if (was_marked_incrementally_ && abort_incremental_marking_) {
- heap()->incremental_marking()->Abort();
- ClearMarkbits();
- AbortCompaction();
- was_marked_incrementally_ = false;
- }
-
- // Don't start compaction if we are in the middle of incremental
- // marking cycle. We did not collect any slots.
- if (!FLAG_never_compact && !was_marked_incrementally_) {
- StartCompaction(NON_INCREMENTAL_COMPACTION);
- }
-
- PagedSpaces spaces(heap());
- for (PagedSpace* space = spaces.next();
- space != NULL;
- space = spaces.next()) {
- space->PrepareForMarkCompact();
- }
-
-#ifdef VERIFY_HEAP
- if (!was_marked_incrementally_ && FLAG_verify_heap) {
- VerifyMarkbitsAreClean();
- }
-#endif
-}
-
-
-class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
- public:
- virtual bool TakeFunction(JSFunction* function) {
- return function->code()->marked_for_deoptimization();
- }
-};
-
-
-void MarkCompactCollector::Finish() {
-#ifdef DEBUG
- ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
- state_ = IDLE;
-#endif
- // The stub cache is not traversed during GC; clear the cache to
- // force lazy re-initialization of it. This must be done after the
- // GC, because it relies on the new address of certain old space
- // objects (empty string, illegal builtin).
- heap()->isolate()->stub_cache()->Clear();
-
- DeoptimizeMarkedCodeFilter filter;
- Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 1: tracing and marking live objects.
-// before: all objects are in normal state.
-// after: a live object's map pointer is marked as '00'.
-
-// Marking all live objects in the heap as part of mark-sweep or mark-compact
-// collection. Before marking, all objects are in their normal state. After
-// marking, live objects' map pointers are marked indicating that the object
-// has been found reachable.
-//
-// The marking algorithm is a (mostly) depth-first (because of possible stack
-// overflow) traversal of the graph of objects reachable from the roots. It
-// uses an explicit stack of pointers rather than recursion. The young
-// generation's inactive ('from') space is used as a marking stack. The
-// objects in the marking stack are the ones that have been reached and marked
-// but their children have not yet been visited.
-//
-// The marking stack can overflow during traversal. In that case, we set an
-// overflow flag. When the overflow flag is set, we continue marking objects
-// reachable from the objects on the marking stack, but no longer push them on
-// the marking stack. Instead, we mark them as both marked and overflowed.
-// When the stack is in the overflowed state, objects marked as overflowed
-// have been reached and marked but their children have not been visited yet.
-// After emptying the marking stack, we clear the overflow flag and traverse
-// the heap looking for objects marked as overflowed, push them on the stack,
-// and continue with marking. This process repeats until all reachable
-// objects have been marked.
-
-void CodeFlusher::ProcessJSFunctionCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
- Object* undefined = isolate_->heap()->undefined_value();
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate, undefined);
-
- SharedFunctionInfo* shared = candidate->shared();
-
- Code* code = shared->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
- shared->set_code(lazy_compile);
- candidate->set_code(lazy_compile);
- } else {
- candidate->set_code(code);
- }
-
- // We are in the middle of a GC cycle so the write barrier in the code
- // setter did not record the slot update and we have to do that manually.
- Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
- isolate_->heap()->mark_compact_collector()->
- RecordCodeEntrySlot(slot, target);
-
- Object** shared_code_slot =
- HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
-
- candidate = next_candidate;
- }
-
- jsfunction_candidates_head_ = NULL;
-}
-
-
-void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
-
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate);
-
- Code* code = candidate->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
- candidate->set_code(lazy_compile);
- }
-
- Object** code_slot =
- HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(code_slot, code_slot, *code_slot);
-
- candidate = next_candidate;
- }
-
- shared_function_info_candidates_head_ = NULL;
-}
-
-
-bool CodeFlusher::ContainsCandidate(SharedFunctionInfo* shared_info) {
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- while (candidate != NULL) {
- if (candidate == shared_info) return true;
- candidate = GetNextCandidate(candidate);
- }
- return false;
-}
-
-
-void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
-
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- if (candidate == shared_info) {
- next_candidate = GetNextCandidate(shared_info);
- shared_function_info_candidates_head_ = next_candidate;
- ClearNextCandidate(shared_info);
- } else {
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- if (next_candidate == shared_info) {
- next_candidate = GetNextCandidate(shared_info);
- SetNextCandidate(candidate, next_candidate);
- ClearNextCandidate(shared_info);
- break;
- }
-
- candidate = next_candidate;
- }
- }
-}
-
-
-void CodeFlusher::EvictCandidate(JSFunction* function) {
- ASSERT(!function->next_function_link()->IsUndefined());
- Object* undefined = isolate_->heap()->undefined_value();
-
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->RecordWrites(function);
- isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- if (candidate == function) {
- next_candidate = GetNextCandidate(function);
- jsfunction_candidates_head_ = next_candidate;
- ClearNextCandidate(function, undefined);
- } else {
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- if (next_candidate == function) {
- next_candidate = GetNextCandidate(function);
- SetNextCandidate(candidate, next_candidate);
- ClearNextCandidate(function, undefined);
- break;
- }
-
- candidate = next_candidate;
- }
- }
-}
-
-
-void CodeFlusher::EvictJSFunctionCandidates() {
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- EvictCandidate(candidate);
- candidate = next_candidate;
- }
- ASSERT(jsfunction_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictSharedFunctionInfoCandidates() {
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- EvictCandidate(candidate);
- candidate = next_candidate;
- }
- ASSERT(shared_function_info_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
- Heap* heap = isolate_->heap();
-
- JSFunction** slot = &jsfunction_candidates_head_;
- JSFunction* candidate = jsfunction_candidates_head_;
- while (candidate != NULL) {
- if (heap->InFromSpace(candidate)) {
- v->VisitPointer(reinterpret_cast<Object**>(slot));
- }
- candidate = GetNextCandidate(*slot);
- slot = GetNextCandidateSlot(*slot);
- }
-}
-
-
-MarkCompactCollector::~MarkCompactCollector() {
- if (code_flusher_ != NULL) {
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-}
-
-
-static inline HeapObject* ShortCircuitConsString(Object** p) {
- // Optimization: If the heap object pointed to by p is a non-internalized
- // cons string whose right substring is HEAP->empty_string, update
- // it in place to its left substring. Return the updated value.
- //
- // Here we assume that if we change *p, we replace it with a heap object
- // (i.e., the left substring of a cons string is always a heap object).
- //
- // The check performed is:
- // object->IsConsString() && !object->IsInternalizedString() &&
- // (ConsString::cast(object)->second() == HEAP->empty_string())
- // except the maps for the object and its possible substrings might be
- // marked.
- HeapObject* object = HeapObject::cast(*p);
- if (!FLAG_clever_optimizations) return object;
- Map* map = object->map();
- InstanceType type = map->instance_type();
- if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
-
- Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
- Heap* heap = map->GetHeap();
- if (second != heap->empty_string()) {
- return object;
- }
-
- // Since we don't have the object's start, it is impossible to update the
- // page dirty marks. Therefore, we only replace the string with its left
- // substring when page dirty marks do not change.
- Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
- if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
-
- *p = first;
- return HeapObject::cast(first);
-}
-
-
-class MarkCompactMarkingVisitor
- : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
- public:
- static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
- Map* map, HeapObject* obj);
-
- static void ObjectStatsCountFixedArray(
- FixedArrayBase* fixed_array,
- FixedArraySubInstanceType fast_type,
- FixedArraySubInstanceType dictionary_type);
-
- template<MarkCompactMarkingVisitor::VisitorId id>
- class ObjectStatsTracker {
- public:
- static inline void Visit(Map* map, HeapObject* obj);
- };
-
- static void Initialize();
-
- INLINE(static void VisitPointer(Heap* heap, Object** p)) {
- MarkObjectByPointer(heap->mark_compact_collector(), p, p);
- }
-
- INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
- // Mark all objects pointed to in [start, end).
- const int kMinRangeForMarkingRecursion = 64;
- if (end - start >= kMinRangeForMarkingRecursion) {
- if (VisitUnmarkedObjects(heap, start, end)) return;
- // We are close to a stack overflow, so just mark the objects.
- }
- MarkCompactCollector* collector = heap->mark_compact_collector();
- for (Object** p = start; p < end; p++) {
- MarkObjectByPointer(collector, start, p);
- }
- }
-
- // Marks the object black and pushes it on the marking stack.
- INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
- MarkBit mark = Marking::MarkBitFrom(object);
- heap->mark_compact_collector()->MarkObject(object, mark);
- }
-
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (!mark_bit.Get()) {
- heap->mark_compact_collector()->SetMark(object, mark_bit);
- return true;
- }
- return false;
- }
-
- // Mark object pointed to by p.
- INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
- Object** anchor_slot,
- Object** p)) {
- if (!(*p)->IsHeapObject()) return;
- HeapObject* object = ShortCircuitConsString(p);
- collector->RecordSlot(anchor_slot, p, object);
- MarkBit mark = Marking::MarkBitFrom(object);
- collector->MarkObject(object, mark);
- }
-
-
- // Visit an unmarked object.
- INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
- HeapObject* obj)) {
-#ifdef DEBUG
- ASSERT(Isolate::Current()->heap()->Contains(obj));
- ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
-#endif
- Map* map = obj->map();
- Heap* heap = obj->GetHeap();
- MarkBit mark = Marking::MarkBitFrom(obj);
- heap->mark_compact_collector()->SetMark(obj, mark);
- // Mark the map pointer and the body.
- MarkBit map_mark = Marking::MarkBitFrom(map);
- heap->mark_compact_collector()->MarkObject(map, map_mark);
- IterateBody(map, obj);
- }
-
- // Visit all unmarked objects pointed to by [start, end).
- // Returns false if the operation fails (lack of stack space).
- INLINE(static bool VisitUnmarkedObjects(Heap* heap,
- Object** start,
- Object** end)) {
- // Return false is we are close to the stack limit.
- StackLimitCheck check(heap->isolate());
- if (check.HasOverflowed()) return false;
-
- MarkCompactCollector* collector = heap->mark_compact_collector();
- // Visit the unmarked objects.
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
- if (!o->IsHeapObject()) continue;
- collector->RecordSlot(start, p, o);
- HeapObject* obj = HeapObject::cast(o);
- MarkBit mark = Marking::MarkBitFrom(obj);
- if (mark.Get()) continue;
- VisitUnmarkedObject(collector, obj);
- }
- return true;
- }
-
- INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
- shared->BeforeVisitingPointers();
- }
-
- static void VisitJSWeakMap(Map* map, HeapObject* object) {
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
-
- // Enqueue weak map in linked list of encountered weak maps.
- if (weak_map->next() == Smi::FromInt(0)) {
- weak_map->set_next(collector->encountered_weak_maps());
- collector->set_encountered_weak_maps(weak_map);
- }
-
- // Skip visiting the backing hash table containing the mappings.
- int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
- BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
- map->GetHeap(),
- object,
- JSWeakMap::BodyDescriptor::kStartOffset,
- JSWeakMap::kTableOffset);
- BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
- map->GetHeap(),
- object,
- JSWeakMap::kTableOffset + kPointerSize,
- object_size);
-
- // Mark the backing hash table without pushing it on the marking stack.
- Object* table_object = weak_map->table();
- if (!table_object->IsHashTable()) return;
- ObjectHashTable* table = ObjectHashTable::cast(table_object);
- Object** table_slot =
- HeapObject::RawField(weak_map, JSWeakMap::kTableOffset);
- MarkBit table_mark = Marking::MarkBitFrom(table);
- collector->RecordSlot(table_slot, table_slot, table);
- if (!table_mark.Get()) collector->SetMark(table, table_mark);
- // Recording the map slot can be skipped, because maps are not compacted.
- collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
- ASSERT(MarkCompactCollector::IsMarked(table->map()));
- }
-
- private:
- template<int id>
- static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
-
- // Code flushing support.
-
- static const int kRegExpCodeThreshold = 5;
-
- static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
- JSRegExp* re,
- bool is_ascii) {
- // Make sure that the fixed array is in fact initialized on the RegExp.
- // We could potentially trigger a GC when initializing the RegExp.
- if (HeapObject::cast(re->data())->map()->instance_type() !=
- FIXED_ARRAY_TYPE) return;
-
- // Make sure this is a RegExp that actually contains code.
- if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
-
- Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
- if (!code->IsSmi() &&
- HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
- // Save a copy that can be reinstated if we need the code again.
- re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
- code,
- heap);
-
- // Saving a copy might create a pointer into compaction candidate
- // that was not observed by marker. This might happen if JSRegExp data
- // was marked through the compilation cache before marker reached JSRegExp
- // object.
- FixedArray* data = FixedArray::cast(re->data());
- Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
- heap->mark_compact_collector()->
- RecordSlot(slot, slot, code);
-
- // Set a number in the 0-255 range to guarantee no smi overflow.
- re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
- Smi::FromInt(heap->sweep_generation() & 0xff),
- heap);
- } else if (code->IsSmi()) {
- int value = Smi::cast(code)->value();
- // The regexp has not been compiled yet or there was a compilation error.
- if (value == JSRegExp::kUninitializedValue ||
- value == JSRegExp::kCompilationErrorValue) {
- return;
- }
-
- // Check if we should flush now.
- if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
- re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
- Smi::FromInt(JSRegExp::kUninitializedValue),
- heap);
- re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
- Smi::FromInt(JSRegExp::kUninitializedValue),
- heap);
- }
- }
- }
-
-
- // Works by setting the current sweep_generation (as a smi) in the
- // code object place in the data array of the RegExp and keeps a copy
- // around that can be reinstated if we reuse the RegExp before flushing.
- // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
- // we flush the code.
- static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (!collector->is_code_flushing_enabled()) {
- VisitJSRegExp(map, object);
- return;
- }
- JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
- // Flush code or set age on both ASCII and two byte code.
- UpdateRegExpCodeAgeAndFlush(heap, re, true);
- UpdateRegExpCodeAgeAndFlush(heap, re, false);
- // Visit the fields of the RegExp, including the updated FixedArray.
- VisitJSRegExp(map, object);
- }
-
- static VisitorDispatchTable<Callback> non_count_table_;
-};
-
-
-void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
- FixedArrayBase* fixed_array,
- FixedArraySubInstanceType fast_type,
- FixedArraySubInstanceType dictionary_type) {
- Heap* heap = fixed_array->map()->GetHeap();
- if (fixed_array->map() != heap->fixed_cow_array_map() &&
- fixed_array->map() != heap->fixed_double_array_map() &&
- fixed_array != heap->empty_fixed_array()) {
- if (fixed_array->IsDictionary()) {
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- dictionary_type,
- fixed_array->Size());
- } else {
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- fast_type,
- fixed_array->Size());
- }
- }
-}
-
-
-void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
- MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- int object_size = obj->Size();
- heap->RecordObjectStats(map->instance_type(), -1, object_size);
- non_count_table_.GetVisitorById(id)(map, obj);
- if (obj->IsJSObject()) {
- JSObject* object = JSObject::cast(obj);
- ObjectStatsCountFixedArray(object->elements(),
- DICTIONARY_ELEMENTS_SUB_TYPE,
- FAST_ELEMENTS_SUB_TYPE);
- ObjectStatsCountFixedArray(object->properties(),
- DICTIONARY_PROPERTIES_SUB_TYPE,
- FAST_PROPERTIES_SUB_TYPE);
- }
-}
-
-
-template<MarkCompactMarkingVisitor::VisitorId id>
-void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
- Map* map, HeapObject* obj) {
- ObjectStatsVisitBase(id, map, obj);
-}
-
-
-template<>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitMap> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- Map* map_obj = Map::cast(obj);
- ASSERT(map->instance_type() == MAP_TYPE);
- DescriptorArray* array = map_obj->instance_descriptors();
- if (map_obj->owns_descriptors() &&
- array != heap->empty_descriptor_array()) {
- int fixed_array_size = array->Size();
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- DESCRIPTOR_ARRAY_SUB_TYPE,
- fixed_array_size);
- }
- if (map_obj->HasTransitionArray()) {
- int fixed_array_size = map_obj->transitions()->Size();
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- TRANSITION_ARRAY_SUB_TYPE,
- fixed_array_size);
- }
- if (map_obj->code_cache() != heap->empty_fixed_array()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
- MAP_CODE_CACHE_SUB_TYPE,
- FixedArray::cast(map_obj->code_cache())->Size());
- }
- ObjectStatsVisitBase(kVisitMap, map, obj);
- }
-};
-
-
-template<>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitCode> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- int object_size = obj->Size();
- ASSERT(map->instance_type() == CODE_TYPE);
- heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
- ObjectStatsVisitBase(kVisitCode, map, obj);
- }
-};
-
-
-template<>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
- if (sfi->scope_info() != heap->empty_fixed_array()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
- SCOPE_INFO_SUB_TYPE,
- FixedArray::cast(sfi->scope_info())->Size());
- }
- ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
- }
-};
-
-
-template<>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitFixedArray> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- FixedArray* fixed_array = FixedArray::cast(obj);
- if (fixed_array == heap->string_table()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
- STRING_TABLE_SUB_TYPE,
- fixed_array->Size());
- }
- ObjectStatsVisitBase(kVisitFixedArray, map, obj);
- }
-};
-
-
-void MarkCompactMarkingVisitor::Initialize() {
- StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
-
- table_.Register(kVisitJSRegExp,
- &VisitRegExpAndFlushCode);
-
- if (FLAG_track_gc_object_stats) {
- // Copy the visitor table to make call-through possible.
- non_count_table_.CopyFrom(&table_);
-#define VISITOR_ID_COUNT_FUNCTION(id) \
- table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
- VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
-#undef VISITOR_ID_COUNT_FUNCTION
- }
-}
-
-
-VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
- MarkCompactMarkingVisitor::non_count_table_;
-
-
-class MarkingVisitor : public ObjectVisitor {
- public:
- explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
-
- void VisitPointer(Object** p) {
- MarkCompactMarkingVisitor::VisitPointer(heap_, p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
- }
-
- private:
- Heap* heap_;
-};
-
-
-class CodeMarkingVisitor : public ThreadVisitor {
- public:
- explicit CodeMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- collector_->PrepareThreadForCodeFlushing(isolate, top);
- }
-
- private:
- MarkCompactCollector* collector_;
-};
-
-
-class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
- public:
- explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) VisitPointer(p);
- }
-
- void VisitPointer(Object** slot) {
- Object* obj = *slot;
- if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- MarkBit shared_mark = Marking::MarkBitFrom(shared);
- MarkBit code_mark = Marking::MarkBitFrom(shared->code());
- collector_->MarkObject(shared->code(), code_mark);
- collector_->MarkObject(shared, shared_mark);
- }
- }
-
- private:
- MarkCompactCollector* collector_;
-};
-
-
-void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
- ThreadLocalTop* top) {
- for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- // Note: for the frame that has a pending lazy deoptimization
- // StackFrame::unchecked_code will return a non-optimized code object for
- // the outermost function and StackFrame::LookupCode will return
- // actual optimized code object.
- StackFrame* frame = it.frame();
- Code* code = frame->unchecked_code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- MarkObject(code, code_mark);
- if (frame->is_optimized()) {
- MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
- frame->LookupCode());
- }
- }
-}
-
-
-void MarkCompactCollector::PrepareForCodeFlushing() {
- ASSERT(heap() == Isolate::Current()->heap());
-
- // Enable code flushing for non-incremental cycles.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(!was_marked_incrementally_);
- }
-
- // If code flushing is disabled, there is no need to prepare for it.
- if (!is_code_flushing_enabled()) return;
-
- // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
- // relies on it being marked before any other descriptor array.
- HeapObject* descriptor_array = heap()->empty_descriptor_array();
- MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
- MarkObject(descriptor_array, descriptor_array_mark);
-
- // Make sure we are not referencing the code from the stack.
- ASSERT(this == heap()->mark_compact_collector());
- PrepareThreadForCodeFlushing(heap()->isolate(),
- heap()->isolate()->thread_local_top());
-
- // Iterate the archived stacks in all threads to check if
- // the code is referenced.
- CodeMarkingVisitor code_marking_visitor(this);
- heap()->isolate()->thread_manager()->IterateArchivedThreads(
- &code_marking_visitor);
-
- SharedFunctionInfoMarkingVisitor visitor(this);
- heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
- heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
-
- ProcessMarkingDeque();
-}
-
-
-// Visitor class for marking heap roots.
-class RootMarkingVisitor : public ObjectVisitor {
- public:
- explicit RootMarkingVisitor(Heap* heap)
- : collector_(heap->mark_compact_collector()) { }
-
- void VisitPointer(Object** p) {
- MarkObjectByPointer(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
- }
-
- private:
- void MarkObjectByPointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- // Replace flat cons strings in place.
- HeapObject* object = ShortCircuitConsString(p);
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) return;
-
- Map* map = object->map();
- // Mark the object.
- collector_->SetMark(object, mark_bit);
-
- // Mark the map pointer and body, and push them on the marking stack.
- MarkBit map_mark = Marking::MarkBitFrom(map);
- collector_->MarkObject(map, map_mark);
- MarkCompactMarkingVisitor::IterateBody(map, object);
-
- // Mark all the objects reachable from the map and body. May leave
- // overflowed objects in the heap.
- collector_->EmptyMarkingDeque();
- }
-
- MarkCompactCollector* collector_;
-};
-
-
-// Helper class for pruning the string table.
-class StringTableCleaner : public ObjectVisitor {
- public:
- explicit StringTableCleaner(Heap* heap)
- : heap_(heap), pointers_removed_(0) { }
-
- virtual void VisitPointers(Object** start, Object** end) {
- // Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
- if (o->IsHeapObject() &&
- !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
- // Check if the internalized string being pruned is external. We need to
- // delete the associated external data as this string is going away.
-
- // Since no objects have yet been moved we can safely access the map of
- // the object.
- if (o->IsExternalString() ||
- (o->IsHeapObject() &&
- HeapObject::cast(o)->map()->has_external_resource())) {
- heap_->FinalizeExternalString(HeapObject::cast(*p));
- }
- // Set the entry to the_hole_value (as deleted).
- *p = heap_->the_hole_value();
- pointers_removed_++;
- }
- }
- }
-
- int PointersRemoved() {
- return pointers_removed_;
- }
-
- private:
- Heap* heap_;
- int pointers_removed_;
-};
-
-
-// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
-// are retained.
-class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
- public:
- virtual Object* RetainAs(Object* object) {
- if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
- return object;
- } else {
- return NULL;
- }
- }
-};
-
-
-// Fill the marking stack with overflowed objects returned by the given
-// iterator. Stop when the marking stack is filled or the end of the space
-// is reached, whichever comes first.
-template<class T>
-static void DiscoverGreyObjectsWithIterator(Heap* heap,
- MarkingDeque* marking_deque,
- T* it) {
- // The caller should ensure that the marking stack is initially not full,
- // so that we don't waste effort pointlessly scanning for objects.
- ASSERT(!marking_deque->IsFull());
-
- Map* filler_map = heap->one_pointer_filler_map();
- for (HeapObject* object = it->Next();
- object != NULL;
- object = it->Next()) {
- MarkBit markbit = Marking::MarkBitFrom(object);
- if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
- Marking::GreyToBlack(markbit);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
- marking_deque->PushBlack(object);
- if (marking_deque->IsFull()) return;
- }
- }
-}
-
-
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-
-
-static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
- ASSERT(!marking_deque->IsFull());
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- MarkBit::CellType* cells = p->markbits()->cells();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
- Address cell_base = p->area_start();
- int cell_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base)));
-
-
- for (;
- cell_index < last_cell_index;
- cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT(static_cast<unsigned>(cell_index) ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
-
- const MarkBit::CellType current_cell = cells[cell_index];
- if (current_cell == 0) continue;
-
- const MarkBit::CellType next_cell = cells[cell_index + 1];
- MarkBit::CellType grey_objects = current_cell &
- ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
-
- int offset = 0;
- while (grey_objects != 0) {
- int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
- grey_objects >>= trailing_zeros;
- offset += trailing_zeros;
- MarkBit markbit(&cells[cell_index], 1 << offset, false);
- ASSERT(Marking::IsGrey(markbit));
- Marking::GreyToBlack(markbit);
- Address addr = cell_base + offset * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(addr);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
- marking_deque->PushBlack(object);
- if (marking_deque->IsFull()) return;
- offset += 2;
- grey_objects >>= 2;
- }
-
- grey_objects >>= (Bitmap::kBitsPerCell - 1);
- }
-}
-
-
-static void DiscoverGreyObjectsInSpace(Heap* heap,
- MarkingDeque* marking_deque,
- PagedSpace* space) {
- if (!space->was_swept_conservatively()) {
- HeapObjectIterator it(space);
- DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
- } else {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- DiscoverGreyObjectsOnPage(marking_deque, p);
- if (marking_deque->IsFull()) return;
- }
- }
-}
-
-
-bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
- Object* o = *p;
- if (!o->IsHeapObject()) return false;
- HeapObject* heap_object = HeapObject::cast(o);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- return !mark.Get();
-}
-
-
-bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
- Object** p) {
- Object* o = *p;
- ASSERT(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- return !mark.Get();
-}
-
-
-void MarkCompactCollector::MarkStringTable() {
- StringTable* string_table = heap()->string_table();
- // Mark the string table itself.
- MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
- SetMark(string_table, string_table_mark);
- // Explicitly mark the prefix.
- MarkingVisitor marker(heap());
- string_table->IteratePrefix(&marker);
- ProcessMarkingDeque();
-}
-
-
-void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
- // Mark the heap roots including global variables, stack variables,
- // etc., and all objects reachable from them.
- heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
-
- // Handle the string table specially.
- MarkStringTable();
-
- // There may be overflowed objects in the heap. Visit them now.
- while (marking_deque_.overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
- }
-}
-
-
-void MarkCompactCollector::MarkImplicitRefGroups() {
- List<ImplicitRefGroup*>* ref_groups =
- heap()->isolate()->global_handles()->implicit_ref_groups();
-
- int last = 0;
- for (int i = 0; i < ref_groups->length(); i++) {
- ImplicitRefGroup* entry = ref_groups->at(i);
- ASSERT(entry != NULL);
-
- if (!IsMarked(*entry->parent_)) {
- (*ref_groups)[last++] = entry;
- continue;
- }
-
- Object*** children = entry->children_;
- // A parent object is marked, so mark all child heap objects.
- for (size_t j = 0; j < entry->length_; ++j) {
- if ((*children[j])->IsHeapObject()) {
- HeapObject* child = HeapObject::cast(*children[j]);
- MarkBit mark = Marking::MarkBitFrom(child);
- MarkObject(child, mark);
- }
- }
-
- // Once the entire group has been marked, dispose it because it's
- // not needed anymore.
- entry->Dispose();
- }
- ref_groups->Rewind(last);
-}
-
-
-// Mark all objects reachable from the objects on the marking stack.
-// Before: the marking stack contains zero or more heap object pointers.
-// After: the marking stack is empty, and all objects reachable from the
-// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingDeque() {
- while (!marking_deque_.IsEmpty()) {
- while (!marking_deque_.IsEmpty()) {
- HeapObject* object = marking_deque_.Pop();
- ASSERT(object->IsHeapObject());
- ASSERT(heap()->Contains(object));
- ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
- Map* map = object->map();
- MarkBit map_mark = Marking::MarkBitFrom(map);
- MarkObject(map, map_mark);
-
- MarkCompactMarkingVisitor::IterateBody(map, object);
- }
-
- // Process encountered weak maps, mark objects only reachable by those
- // weak maps and repeat until fix-point is reached.
- ProcessWeakMaps();
- }
-}
-
-
-// Sweep the heap for overflowed objects, clear their overflow bits, and
-// push them on the marking stack. Stop early if the marking stack fills
-// before sweeping completes. If sweeping completes, there are no remaining
-// overflowed objects in the heap so the overflow flag on the markings stack
-// is cleared.
-void MarkCompactCollector::RefillMarkingDeque() {
- ASSERT(marking_deque_.overflowed());
-
- SemiSpaceIterator new_it(heap()->new_space());
- DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
- if (marking_deque_.IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->old_pointer_space());
- if (marking_deque_.IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->old_data_space());
- if (marking_deque_.IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->code_space());
- if (marking_deque_.IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->map_space());
- if (marking_deque_.IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->cell_space());
- if (marking_deque_.IsFull()) return;
-
- LargeObjectIterator lo_it(heap()->lo_space());
- DiscoverGreyObjectsWithIterator(heap(),
- &marking_deque_,
- &lo_it);
- if (marking_deque_.IsFull()) return;
-
- marking_deque_.ClearOverflowed();
-}
-
-
-// Mark all objects reachable (transitively) from objects on the marking
-// stack. Before: the marking stack contains zero or more heap object
-// pointers. After: the marking stack is empty and there are no overflowed
-// objects in the heap.
-void MarkCompactCollector::ProcessMarkingDeque() {
- EmptyMarkingDeque();
- while (marking_deque_.overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
- }
-}
-
-
-void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) {
- bool work_to_do = true;
- ASSERT(marking_deque_.IsEmpty());
- while (work_to_do) {
- heap()->isolate()->global_handles()->IterateObjectGroups(
- visitor, &IsUnmarkedHeapObjectWithHeap);
- MarkImplicitRefGroups();
- work_to_do = !marking_deque_.IsEmpty();
- ProcessMarkingDeque();
- }
-}
-
-
-void MarkCompactCollector::MarkLiveObjects() {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
- // The recursive GC marker detects when it is nearing stack overflow,
- // and switches to a different marking system. JS interrupts interfere
- // with the C stack limit check.
- PostponeInterruptsScope postpone(heap()->isolate());
-
- bool incremental_marking_overflowed = false;
- IncrementalMarking* incremental_marking = heap_->incremental_marking();
- if (was_marked_incrementally_) {
- // Finalize the incremental marking and check whether we had an overflow.
- // Both markers use grey color to mark overflowed objects so
- // non-incremental marker can deal with them as if overflow
- // occured during normal marking.
- // But incremental marker uses a separate marking deque
- // so we have to explicitly copy its overflow state.
- incremental_marking->Finalize();
- incremental_marking_overflowed =
- incremental_marking->marking_deque()->overflowed();
- incremental_marking->marking_deque()->ClearOverflowed();
- } else {
- // Abort any pending incremental activities e.g. incremental sweeping.
- incremental_marking->Abort();
- }
-
-#ifdef DEBUG
- ASSERT(state_ == PREPARE_GC);
- state_ = MARK_LIVE_OBJECTS;
-#endif
- // The to space contains live objects, a page in from space is used as a
- // marking stack.
- Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
- Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
- if (FLAG_force_marking_deque_overflows) {
- marking_deque_end = marking_deque_start + 64 * kPointerSize;
- }
- marking_deque_.Initialize(marking_deque_start,
- marking_deque_end);
- ASSERT(!marking_deque_.overflowed());
-
- if (incremental_marking_overflowed) {
- // There are overflowed objects left in the heap after incremental marking.
- marking_deque_.SetOverflowed();
- }
-
- PrepareForCodeFlushing();
-
- if (was_marked_incrementally_) {
- // There is no write barrier on cells so we have to scan them now at the end
- // of the incremental marking.
- {
- HeapObjectIterator cell_iterator(heap()->cell_space());
- HeapObject* cell;
- while ((cell = cell_iterator.Next()) != NULL) {
- ASSERT(cell->IsJSGlobalPropertyCell());
- if (IsMarked(cell)) {
- int offset = JSGlobalPropertyCell::kValueOffset;
- MarkCompactMarkingVisitor::VisitPointer(
- heap(),
- reinterpret_cast<Object**>(cell->address() + offset));
- }
- }
- }
- }
-
- RootMarkingVisitor root_visitor(heap());
- MarkRoots(&root_visitor);
-
- // The objects reachable from the roots are marked, yet unreachable
- // objects are unmarked. Mark objects reachable due to host
- // application specific logic.
- ProcessExternalMarking(&root_visitor);
-
- // The objects reachable from the roots or object groups are marked,
- // yet unreachable objects are unmarked. Mark objects reachable
- // only from weak global handles.
- //
- // First we identify nonlive weak handles and mark them as pending
- // destruction.
- heap()->isolate()->global_handles()->IdentifyWeakHandles(
- &IsUnmarkedHeapObject);
- // Then we mark the objects and process the transitive closure.
- heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
- while (marking_deque_.overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
- }
-
- // Repeat host application specific marking to mark unmarked objects
- // reachable from the weak roots.
- ProcessExternalMarking(&root_visitor);
-
- AfterMarking();
-}
-
-
-void MarkCompactCollector::AfterMarking() {
- // Object literal map caches reference strings (cache keys) and maps
- // (cache values). At this point still useful maps have already been
- // marked. Mark the keys for the alive values before we process the
- // string table.
- ProcessMapCaches();
-
- // Prune the string table removing all strings only pointed to by the
- // string table. Cannot use string_table() here because the string
- // table is marked.
- StringTable* string_table = heap()->string_table();
- StringTableCleaner v(heap());
- string_table->IterateElements(&v);
- string_table->ElementsRemoved(v.PointersRemoved());
- heap()->external_string_table_.Iterate(&v);
- heap()->external_string_table_.CleanUp();
- heap()->error_object_list_.RemoveUnmarked(heap());
-
- // Process the weak references.
- MarkCompactWeakObjectRetainer mark_compact_object_retainer;
- heap()->ProcessWeakReferences(&mark_compact_object_retainer);
-
- // Remove object groups after marking phase.
- heap()->isolate()->global_handles()->RemoveObjectGroups();
- heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
-
- // Flush code from collected candidates.
- if (is_code_flushing_enabled()) {
- code_flusher_->ProcessCandidates();
- // If incremental marker does not support code flushing, we need to
- // disable it before incremental marking steps for next cycle.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(false);
- }
- }
-
- if (!FLAG_watch_ic_patching) {
- // Clean up dead objects from the runtime profiler.
- heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
- }
-
- if (FLAG_track_gc_object_stats) {
- heap()->CheckpointObjectStats();
- }
-}
-
-
-void MarkCompactCollector::ProcessMapCaches() {
- Object* raw_context = heap()->native_contexts_list_;
- while (raw_context != heap()->undefined_value()) {
- Context* context = reinterpret_cast<Context*>(raw_context);
- if (IsMarked(context)) {
- HeapObject* raw_map_cache =
- HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
- // A map cache may be reachable from the stack. In this case
- // it's already transitively marked and it's too late to clean
- // up its parts.
- if (!IsMarked(raw_map_cache) &&
- raw_map_cache != heap()->undefined_value()) {
- MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
- int existing_elements = map_cache->NumberOfElements();
- int used_elements = 0;
- for (int i = MapCache::kElementsStartIndex;
- i < map_cache->length();
- i += MapCache::kEntrySize) {
- Object* raw_key = map_cache->get(i);
- if (raw_key == heap()->undefined_value() ||
- raw_key == heap()->the_hole_value()) continue;
- STATIC_ASSERT(MapCache::kEntrySize == 2);
- Object* raw_map = map_cache->get(i + 1);
- if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
- ++used_elements;
- } else {
- // Delete useless entries with unmarked maps.
- ASSERT(raw_map->IsMap());
- map_cache->set_the_hole(i);
- map_cache->set_the_hole(i + 1);
- }
- }
- if (used_elements == 0) {
- context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
- } else {
- // Note: we don't actually shrink the cache here to avoid
- // extra complexity during GC. We rely on subsequent cache
- // usages (EnsureCapacity) to do this.
- map_cache->ElementsRemoved(existing_elements - used_elements);
- MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
- MarkObject(map_cache, map_cache_markbit);
- }
- }
- }
- // Move to next element in the list.
- raw_context = context->get(Context::NEXT_CONTEXT_LINK);
- }
- ProcessMarkingDeque();
-}
-
-
-void MarkCompactCollector::ReattachInitialMaps() {
- HeapObjectIterator map_iterator(heap()->map_space());
- for (HeapObject* obj = map_iterator.Next();
- obj != NULL;
- obj = map_iterator.Next()) {
- if (obj->IsFreeSpace()) continue;
- Map* map = Map::cast(obj);
-
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
-
- if (map->attached_to_shared_function_info()) {
- JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
- }
- }
-}
-
-
-void MarkCompactCollector::ClearNonLiveReferences() {
- HeapObjectIterator map_iterator(heap()->map_space());
- // Iterate over the map space, setting map transitions that go from
- // a marked map to an unmarked map to null transitions. This action
- // is carried out only on maps of JSObjects and related subtypes.
- for (HeapObject* obj = map_iterator.Next();
- obj != NULL; obj = map_iterator.Next()) {
- Map* map = reinterpret_cast<Map*>(obj);
- MarkBit map_mark = Marking::MarkBitFrom(map);
- if (map->IsFreeSpace()) continue;
-
- ASSERT(map->IsMap());
- if (!map->CanTransition()) continue;
-
- if (map_mark.Get() &&
- map->attached_to_shared_function_info()) {
- // This map is used for inobject slack tracking and has been detached
- // from SharedFunctionInfo during the mark phase.
- // Since it survived the GC, reattach it now.
- map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
- }
-
- ClearNonLivePrototypeTransitions(map);
- ClearNonLiveMapTransitions(map, map_mark);
-
- if (map_mark.Get()) {
- ClearNonLiveDependentCode(map);
- } else {
- ClearAndDeoptimizeDependentCode(map);
- }
- }
-}
-
-
-void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
- int number_of_transitions = map->NumberOfProtoTransitions();
- FixedArray* prototype_transitions = map->GetPrototypeTransitions();
-
- int new_number_of_transitions = 0;
- const int header = Map::kProtoTransitionHeaderSize;
- const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
- const int map_offset = header + Map::kProtoTransitionMapOffset;
- const int step = Map::kProtoTransitionElementsPerEntry;
- for (int i = 0; i < number_of_transitions; i++) {
- Object* prototype = prototype_transitions->get(proto_offset + i * step);
- Object* cached_map = prototype_transitions->get(map_offset + i * step);
- if (IsMarked(prototype) && IsMarked(cached_map)) {
- int proto_index = proto_offset + new_number_of_transitions * step;
- int map_index = map_offset + new_number_of_transitions * step;
- if (new_number_of_transitions != i) {
- prototype_transitions->set_unchecked(
- heap_,
- proto_index,
- prototype,
- UPDATE_WRITE_BARRIER);
- prototype_transitions->set_unchecked(
- heap_,
- map_index,
- cached_map,
- SKIP_WRITE_BARRIER);
- }
- Object** slot =
- HeapObject::RawField(prototype_transitions,
- FixedArray::OffsetOfElementAt(proto_index));
- RecordSlot(slot, slot, prototype);
- new_number_of_transitions++;
- }
- }
-
- if (new_number_of_transitions != number_of_transitions) {
- map->SetNumberOfProtoTransitions(new_number_of_transitions);
- }
-
- // Fill slots that became free with undefined value.
- for (int i = new_number_of_transitions * step;
- i < number_of_transitions * step;
- i++) {
- prototype_transitions->set_undefined(heap_, header + i);
- }
-}
-
-
-void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
- MarkBit map_mark) {
- Object* potential_parent = map->GetBackPointer();
- if (!potential_parent->IsMap()) return;
- Map* parent = Map::cast(potential_parent);
-
- // Follow back pointer, check whether we are dealing with a map transition
- // from a live map to a dead path and in case clear transitions of parent.
- bool current_is_alive = map_mark.Get();
- bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
- if (!current_is_alive && parent_is_alive) {
- parent->ClearNonLiveTransitions(heap());
- }
-}
-
-
-void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
- AssertNoAllocation no_allocation_scope;
- DependentCode* entries = map->dependent_code();
- DependentCode::GroupStartIndexes starts(entries);
- int number_of_entries = starts.number_of_entries();
- if (number_of_entries == 0) return;
- for (int i = 0; i < number_of_entries; i++) {
- Code* code = entries->code_at(i);
- if (IsMarked(code) && !code->marked_for_deoptimization()) {
- code->set_marked_for_deoptimization(true);
- }
- entries->clear_code_at(i);
- }
- map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
-}
-
-
-void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
- AssertNoAllocation no_allocation_scope;
- DependentCode* entries = map->dependent_code();
- DependentCode::GroupStartIndexes starts(entries);
- int number_of_entries = starts.number_of_entries();
- if (number_of_entries == 0) return;
- int new_number_of_entries = 0;
- // Go through all groups, remove dead codes and compact.
- for (int g = 0; g < DependentCode::kGroupCount; g++) {
- int group_number_of_entries = 0;
- for (int i = starts.at(g); i < starts.at(g + 1); i++) {
- Code* code = entries->code_at(i);
- if (IsMarked(code) && !code->marked_for_deoptimization()) {
- if (new_number_of_entries + group_number_of_entries != i) {
- entries->set_code_at(new_number_of_entries +
- group_number_of_entries, code);
- }
- Object** slot = entries->code_slot_at(new_number_of_entries +
- group_number_of_entries);
- RecordSlot(slot, slot, code);
- group_number_of_entries++;
- }
- }
- entries->set_number_of_entries(
- static_cast<DependentCode::DependencyGroup>(g),
- group_number_of_entries);
- new_number_of_entries += group_number_of_entries;
- }
- for (int i = new_number_of_entries; i < number_of_entries; i++) {
- entries->clear_code_at(i);
- }
-}
-
-
-void MarkCompactCollector::ProcessWeakMaps() {
- Object* weak_map_obj = encountered_weak_maps();
- while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
- JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
- ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
- Object** anchor = reinterpret_cast<Object**>(table->address());
- for (int i = 0; i < table->Capacity(); i++) {
- if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
- Object** key_slot =
- HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
- ObjectHashTable::EntryToIndex(i)));
- RecordSlot(anchor, key_slot, *key_slot);
- Object** value_slot =
- HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
- ObjectHashTable::EntryToValueIndex(i)));
- MarkCompactMarkingVisitor::MarkObjectByPointer(
- this, anchor, value_slot);
- }
- }
- weak_map_obj = weak_map->next();
- }
-}
-
-
-void MarkCompactCollector::ClearWeakMaps() {
- Object* weak_map_obj = encountered_weak_maps();
- while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
- JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
- ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
- for (int i = 0; i < table->Capacity(); i++) {
- if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
- table->RemoveEntry(i);
- }
- }
- weak_map_obj = weak_map->next();
- weak_map->set_next(Smi::FromInt(0));
- }
- set_encountered_weak_maps(Smi::FromInt(0));
-}
-
-
-// We scavange new space simultaneously with sweeping. This is done in two
-// passes.
-//
-// The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space. Forwarding address is written directly into
-// first word of object without any encoding. If object is dead we write
-// NULL as a forwarding address.
-//
-// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead new space objects during traversal of pointers
-// to new space. We should clear them to avoid encountering them during next
-// pointer iteration. This is an issue if the store buffer overflows and we
-// have to scan the entire old space, including dead objects, looking for
-// pointers to new space.
-void MarkCompactCollector::MigrateObject(Address dst,
- Address src,
- int size,
- AllocationSpace dest) {
- HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
- Address src_slot = src;
- Address dst_slot = dst;
- ASSERT(IsAligned(size, kPointerSize));
-
- for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
- Object* value = Memory::Object_at(src_slot);
-
- Memory::Object_at(dst_slot) = value;
-
- if (heap_->InNewSpace(value)) {
- heap_->store_buffer()->Mark(dst_slot);
- } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
- reinterpret_cast<Object**>(dst_slot),
- SlotsBuffer::IGNORE_OVERFLOW);
- }
-
- src_slot += kPointerSize;
- dst_slot += kPointerSize;
- }
-
- if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
- Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
- Address code_entry = Memory::Address_at(code_entry_slot);
-
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT,
- code_entry_slot,
- SlotsBuffer::IGNORE_OVERFLOW);
- }
- }
- } else if (dest == CODE_SPACE) {
- PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
- heap()->MoveBlock(dst, src, size);
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
- SlotsBuffer::RELOCATED_CODE_OBJECT,
- dst,
- SlotsBuffer::IGNORE_OVERFLOW);
- Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
- } else {
- ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
- heap()->MoveBlock(dst, src, size);
- }
- Memory::Address_at(src) = dst;
-}
-
-
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor: public ObjectVisitor {
- public:
- explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
-
- void VisitPointer(Object** p) {
- UpdatePointer(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) UpdatePointer(p);
- }
-
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- Object* target = rinfo->target_object();
- Object* old_target = target;
- VisitPointer(&target);
- // Avoid unnecessary changes that might unnecessary flush the instruction
- // cache.
- if (target != old_target) {
- rinfo->set_target_object(target);
- }
- }
-
- void VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Object* old_target = target;
- VisitPointer(&target);
- if (target != old_target) {
- rinfo->set_target_address(Code::cast(target)->instruction_start());
- }
- }
-
- void VisitCodeAgeSequence(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Object* stub = rinfo->code_age_stub();
- ASSERT(stub != NULL);
- VisitPointer(&stub);
- if (stub != rinfo->code_age_stub()) {
- rinfo->set_code_age_stub(Code::cast(stub));
- }
- }
-
- void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- VisitPointer(&target);
- rinfo->set_call_address(Code::cast(target)->instruction_start());
- }
-
- static inline void UpdateSlot(Heap* heap, Object** slot) {
- Object* obj = *slot;
-
- if (!obj->IsHeapObject()) return;
-
- HeapObject* heap_obj = HeapObject::cast(obj);
-
- MapWord map_word = heap_obj->map_word();
- if (map_word.IsForwardingAddress()) {
- ASSERT(heap->InFromSpace(heap_obj) ||
- MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
- HeapObject* target = map_word.ToForwardingAddress();
- *slot = target;
- ASSERT(!heap->InFromSpace(target) &&
- !MarkCompactCollector::IsOnEvacuationCandidate(target));
- }
- }
-
- private:
- inline void UpdatePointer(Object** p) {
- UpdateSlot(heap_, p);
- }
-
- Heap* heap_;
-};
-
-
-static void UpdatePointer(HeapObject** p, HeapObject* object) {
- ASSERT(*p == object);
-
- Address old_addr = object->address();
-
- Address new_addr = Memory::Address_at(old_addr);
-
- // The new space sweep will overwrite the map word of dead objects
- // with NULL. In this case we do not need to transfer this entry to
- // the store buffer which we are rebuilding.
- if (new_addr != NULL) {
- *p = HeapObject::FromAddress(new_addr);
- } else {
- // We have to zap this pointer, because the store buffer may overflow later,
- // and then we have to scan the entire heap and we don't want to find
- // spurious newspace pointers in the old space.
- // TODO(mstarzinger): This was changed to a sentinel value to track down
- // rare crashes, change it back to Smi::FromInt(0) later.
- *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
- }
-}
-
-
-static HeapObject* UpdateReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
- MapWord map_word = HeapObject::cast(*p)->map_word();
-
- if (map_word.IsForwardingAddress()) {
- return HeapObject::cast(map_word.ToForwardingAddress());
- }
-
- return HeapObject::cast(*p);
-}
-
-
-bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
- int object_size) {
- Object* result;
-
- if (object_size > Page::kMaxNonCodeHeapObjectSize) {
- MaybeObject* maybe_result =
- heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
- object_size,
- LO_SPACE);
- heap()->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
- } else {
- OldSpace* target_space = heap()->TargetSpace(object);
-
- ASSERT(target_space == heap()->old_pointer_space() ||
- target_space == heap()->old_data_space());
- MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
- object_size,
- target_space->identity());
- heap()->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
- }
-
- return false;
-}
-
-
-void MarkCompactCollector::EvacuateNewSpace() {
- // There are soft limits in the allocation code, designed trigger a mark
- // sweep collection by failing allocations. But since we are already in
- // a mark-sweep allocation, there is no sense in trying to trigger one.
- AlwaysAllocateScope scope;
- heap()->CheckNewSpaceExpansionCriteria();
-
- NewSpace* new_space = heap()->new_space();
-
- // Store allocation range before flipping semispaces.
- Address from_bottom = new_space->bottom();
- Address from_top = new_space->top();
-
- // Flip the semispaces. After flipping, to space is empty, from space has
- // live objects.
- new_space->Flip();
- new_space->ResetAllocationInfo();
-
- int survivors_size = 0;
-
- // First pass: traverse all objects in inactive semispace, remove marks,
- // migrate live objects and write forwarding addresses. This stage puts
- // new entries in the store buffer and may cause some pages to be marked
- // scan-on-scavenge.
- SemiSpaceIterator from_it(from_bottom, from_top);
- for (HeapObject* object = from_it.Next();
- object != NULL;
- object = from_it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) {
- mark_bit.Clear();
- // Don't bother decrementing live bytes count. We'll discard the
- // entire page at the end.
- int size = object->Size();
- survivors_size += size;
-
- // Aggressively promote young survivors to the old space.
- if (TryPromoteObject(object, size)) {
- continue;
- }
-
- // Promotion failed. Just migrate object to another semispace.
- MaybeObject* allocation = new_space->AllocateRaw(size);
- if (allocation->IsFailure()) {
- if (!new_space->AddFreshPage()) {
- // Shouldn't happen. We are sweeping linearly, and to-space
- // has the same number of pages as from-space, so there is
- // always room.
- UNREACHABLE();
- }
- allocation = new_space->AllocateRaw(size);
- ASSERT(!allocation->IsFailure());
- }
- Object* target = allocation->ToObjectUnchecked();
-
- MigrateObject(HeapObject::cast(target)->address(),
- object->address(),
- size,
- NEW_SPACE);
- } else {
- // Mark dead objects in the new space with null in their map field.
- Memory::Address_at(object->address()) = NULL;
- }
- }
-
- heap_->IncrementYoungSurvivorsCounter(survivors_size);
- new_space->set_age_mark(new_space->top());
-}
-
-
-void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
- AlwaysAllocateScope always_allocate;
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
- MarkBit::CellType* cells = p->markbits()->cells();
- p->MarkSweptPrecisely();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
- Address cell_base = p->area_start();
- int cell_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base)));
-
- int offsets[16];
-
- for (;
- cell_index < last_cell_index;
- cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT(static_cast<unsigned>(cell_index) ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
- if (cells[cell_index] == 0) continue;
-
- int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
- for (int i = 0; i < live_objects; i++) {
- Address object_addr = cell_base + offsets[i] * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(object_addr);
- ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
- int size = object->Size();
-
- MaybeObject* target = space->AllocateRaw(size);
- if (target->IsFailure()) {
- // OS refused to give us memory.
- V8::FatalProcessOutOfMemory("Evacuation");
- return;
- }
-
- Object* target_object = target->ToObjectUnchecked();
-
- MigrateObject(HeapObject::cast(target_object)->address(),
- object_addr,
- size,
- space->identity());
- ASSERT(object->map_word().IsForwardingAddress());
- }
-
- // Clear marking bits for current cell.
- cells[cell_index] = 0;
- }
- p->ResetLiveBytes();
-}
-
-
-void MarkCompactCollector::EvacuatePages() {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- ASSERT(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- if (p->IsEvacuationCandidate()) {
- // During compaction we might have to request a new page.
- // Check that space still have room for that.
- if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
- EvacuateLiveObjectsFromPage(p);
- } else {
- // Without room for expansion evacuation is not guaranteed to succeed.
- // Pessimistically abandon unevacuated pages.
- for (int j = i; j < npages; j++) {
- Page* page = evacuation_candidates_[j];
- slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
- page->ClearEvacuationCandidate();
- page->SetFlag(Page::RESCAN_ON_EVACUATION);
- }
- return;
- }
- }
- }
-}
-
-
-class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
- public:
- virtual Object* RetainAs(Object* object) {
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MapWord map_word = heap_object->map_word();
- if (map_word.IsForwardingAddress()) {
- return map_word.ToForwardingAddress();
- }
- }
- return object;
- }
-};
-
-
-static inline void UpdateSlot(ObjectVisitor* v,
- SlotsBuffer::SlotType slot_type,
- Address addr) {
- switch (slot_type) {
- case SlotsBuffer::CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
- rinfo.Visit(v);
- break;
- }
- case SlotsBuffer::CODE_ENTRY_SLOT: {
- v->VisitCodeEntry(addr);
- break;
- }
- case SlotsBuffer::RELOCATED_CODE_OBJECT: {
- HeapObject* obj = HeapObject::FromAddress(addr);
- Code::cast(obj)->CodeIterateBody(v);
- break;
- }
- case SlotsBuffer::DEBUG_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
- if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
- break;
- }
- case SlotsBuffer::JS_RETURN_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
- if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
- break;
- }
- case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
- rinfo.Visit(v);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-enum SweepingMode {
- SWEEP_ONLY,
- SWEEP_AND_VISIT_LIVE_OBJECTS
-};
-
-
-enum SkipListRebuildingMode {
- REBUILD_SKIP_LIST,
- IGNORE_SKIP_LIST
-};
-
-
-// Sweep a space precisely. After this has been done the space can
-// be iterated precisely, hitting only the live objects. Code space
-// is always swept precisely because we want to be able to iterate
-// over it. Map space is swept precisely, because it is not compacted.
-// Slots in live objects pointing into evacuation candidates are updated
-// if requested.
-template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
-static void SweepPrecisely(PagedSpace* space,
- Page* p,
- ObjectVisitor* v) {
- ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
- ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
- space->identity() == CODE_SPACE);
- ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
-
- double start_time = 0.0;
- if (FLAG_print_cumulative_gc_stat) {
- start_time = OS::TimeCurrentMillis();
- }
-
- MarkBit::CellType* cells = p->markbits()->cells();
- p->MarkSweptPrecisely();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
- Address free_start = p->area_start();
- int cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(free_start)));
-
- ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
- Address object_address = free_start;
- int offsets[16];
-
- SkipList* skip_list = p->skip_list();
- int curr_region = -1;
- if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
- skip_list->Clear();
- }
-
- for (;
- cell_index < last_cell_index;
- cell_index++, object_address += 32 * kPointerSize) {
- ASSERT(static_cast<unsigned>(cell_index) ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(object_address))));
- int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
- int live_index = 0;
- for ( ; live_objects != 0; live_objects--) {
- Address free_end = object_address + offsets[live_index++] * kPointerSize;
- if (free_end != free_start) {
- space->Free(free_start, static_cast<int>(free_end - free_start));
- }
- HeapObject* live_object = HeapObject::FromAddress(free_end);
- ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
- Map* map = live_object->map();
- int size = live_object->SizeFromMap(map);
- if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
- live_object->IterateBody(map->instance_type(), size, v);
- }
- if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
- int new_region_start =
- SkipList::RegionNumber(free_end);
- int new_region_end =
- SkipList::RegionNumber(free_end + size - kPointerSize);
- if (new_region_start != curr_region ||
- new_region_end != curr_region) {
- skip_list->AddObject(free_end, size);
- curr_region = new_region_end;
- }
- }
- free_start = free_end + size;
- }
- // Clear marking bits for current cell.
- cells[cell_index] = 0;
- }
- if (free_start != p->area_end()) {
- space->Free(free_start, static_cast<int>(p->area_end() - free_start));
- }
- p->ResetLiveBytes();
- if (FLAG_print_cumulative_gc_stat) {
- space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
- }
-}
-
-
-static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
- Page* p = Page::FromAddress(code->address());
-
- if (p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- return false;
- }
-
- Address code_start = code->address();
- Address code_end = code_start + code->Size();
-
- uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
- uint32_t end_index =
- MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
-
- Bitmap* b = p->markbits();
-
- MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
- MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
-
- MarkBit::CellType* start_cell = start_mark_bit.cell();
- MarkBit::CellType* end_cell = end_mark_bit.cell();
-
- if (value) {
- MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
- MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
-
- if (start_cell == end_cell) {
- *start_cell |= start_mask & end_mask;
- } else {
- *start_cell |= start_mask;
- for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
- *cell = ~0;
- }
- *end_cell |= end_mask;
- }
- } else {
- for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
- *cell = 0;
- }
- }
-
- return true;
-}
-
-
-static bool IsOnInvalidatedCodeObject(Address addr) {
- // We did not record any slots in large objects thus
- // we can safely go to the page from the slot address.
- Page* p = Page::FromAddress(addr);
-
- // First check owner's identity because old pointer and old data spaces
- // are swept lazily and might still have non-zero mark-bits on some
- // pages.
- if (p->owner()->identity() != CODE_SPACE) return false;
-
- // In code space only bits on evacuation candidates (but we don't record
- // any slots on them) and under invalidated code objects are non-zero.
- MarkBit mark_bit =
- p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
-
- return mark_bit.Get();
-}
-
-
-void MarkCompactCollector::InvalidateCode(Code* code) {
- if (heap_->incremental_marking()->IsCompacting() &&
- !ShouldSkipEvacuationSlotRecording(code)) {
- ASSERT(compacting_);
-
- // If the object is white than no slots were recorded on it yet.
- MarkBit mark_bit = Marking::MarkBitFrom(code);
- if (Marking::IsWhite(mark_bit)) return;
-
- invalidated_code_.Add(code);
- }
-}
-
-
-bool MarkCompactCollector::MarkInvalidatedCode() {
- bool code_marked = false;
-
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
-
- if (SetMarkBitsUnderInvalidatedCode(code, true)) {
- code_marked = true;
- }
- }
-
- return code_marked;
-}
-
-
-void MarkCompactCollector::RemoveDeadInvalidatedCode() {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
- }
-}
-
-
-void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
- if (code != NULL) {
- code->Iterate(visitor);
- SetMarkBitsUnderInvalidatedCode(code, false);
- }
- }
- invalidated_code_.Rewind(0);
-}
-
-
-void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
- Heap::RelocationLock relocation_lock(heap());
-
- bool code_slots_filtering_required;
- { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
- code_slots_filtering_required = MarkInvalidatedCode();
-
- EvacuateNewSpace();
- }
-
-
- { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
- EvacuatePages();
- }
-
- // Second pass: find pointers to new space and update them.
- PointersUpdatingVisitor updating_visitor(heap());
-
- { GCTracer::Scope gc_scope(tracer_,
- GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
- // Update pointers in to space.
- SemiSpaceIterator to_it(heap()->new_space()->bottom(),
- heap()->new_space()->top());
- for (HeapObject* object = to_it.Next();
- object != NULL;
- object = to_it.Next()) {
- Map* map = object->map();
- object->IterateBody(map->instance_type(),
- object->SizeFromMap(map),
- &updating_visitor);
- }
- }
-
- { GCTracer::Scope gc_scope(tracer_,
- GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
- // Update roots.
- heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- }
-
- { GCTracer::Scope gc_scope(tracer_,
- GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
- StoreBufferRebuildScope scope(heap_,
- heap_->store_buffer(),
- &Heap::ScavengeStoreBufferCallback);
- heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
- }
-
- { GCTracer::Scope gc_scope(tracer_,
- GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
- SlotsBuffer::UpdateSlotsRecordedIn(heap_,
- migration_slots_buffer_,
- code_slots_filtering_required);
- if (FLAG_trace_fragmentation) {
- PrintF(" migration slots buffer: %d\n",
- SlotsBuffer::SizeOfChain(migration_slots_buffer_));
- }
-
- if (compacting_ && was_marked_incrementally_) {
- // It's difficult to filter out slots recorded for large objects.
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- // LargeObjectSpace is not swept yet thus we have to skip
- // dead objects explicitly.
- if (!IsMarked(obj)) continue;
-
- Page* p = Page::FromAddress(obj->address());
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- obj->Iterate(&updating_visitor);
- p->ClearFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
- }
- }
-
- int npages = evacuation_candidates_.length();
- { GCTracer::Scope gc_scope(
- tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- ASSERT(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-
- if (p->IsEvacuationCandidate()) {
- SlotsBuffer::UpdateSlotsRecordedIn(heap_,
- p->slots_buffer(),
- code_slots_filtering_required);
- if (FLAG_trace_fragmentation) {
- PrintF(" page %p slots buffer: %d\n",
- reinterpret_cast<void*>(p),
- SlotsBuffer::SizeOfChain(p->slots_buffer()));
- }
-
- // Important: skip list should be cleared only after roots were updated
- // because root iteration traverses the stack and might have to find
- // code objects from non-updated pc pointing into evacuation candidate.
- SkipList* list = p->skip_list();
- if (list != NULL) list->Clear();
- } else {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
- reinterpret_cast<intptr_t>(p));
- }
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-
- switch (space->identity()) {
- case OLD_DATA_SPACE:
- SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
- break;
- case OLD_POINTER_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
- space, p, &updating_visitor);
- break;
- case CODE_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
- space, p, &updating_visitor);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
-
- // Update pointers from cells.
- HeapObjectIterator cell_iterator(heap_->cell_space());
- for (HeapObject* cell = cell_iterator.Next();
- cell != NULL;
- cell = cell_iterator.Next()) {
- if (cell->IsJSGlobalPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
- }
- }
-
- // Update pointer from the native contexts list.
- updating_visitor.VisitPointer(heap_->native_contexts_list_address());
-
- heap_->string_table()->Iterate(&updating_visitor);
-
- // Update pointers from external string table.
- heap_->UpdateReferencesInExternalStringTable(
- &UpdateReferenceInExternalStringTableEntry);
-
- // Update pointers in the new error object list.
- heap_->error_object_list()->UpdateReferences();
-
- if (!FLAG_watch_ic_patching) {
- // Update JSFunction pointers from the runtime profiler.
- heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
- &updating_visitor);
- }
-
- EvacuationWeakObjectRetainer evacuation_object_retainer;
- heap()->ProcessWeakReferences(&evacuation_object_retainer);
-
- // Visit invalidated code (we ignored all slots on it) and clear mark-bits
- // under it.
- ProcessInvalidatedCode(&updating_visitor);
-
- heap_->isolate()->inner_pointer_to_code_cache()->Flush();
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyEvacuation(heap_);
- }
-#endif
-
- slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
- ASSERT(migration_slots_buffer_ == NULL);
-}
-
-
-void MarkCompactCollector::ReleaseEvacuationCandidates() {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- if (!p->IsEvacuationCandidate()) continue;
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- space->Free(p->area_start(), p->area_size());
- p->set_scan_on_scavenge(false);
- slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
- p->ResetLiveBytes();
- space->ReleasePage(p);
- }
- evacuation_candidates_.Rewind(0);
- compacting_ = false;
-}
-
-
-static const int kStartTableEntriesPerLine = 5;
-static const int kStartTableLines = 171;
-static const int kStartTableInvalidLine = 127;
-static const int kStartTableUnusedEntry = 126;
-
-#define _ kStartTableUnusedEntry
-#define X kStartTableInvalidLine
-// Mark-bit to object start offset table.
-//
-// The line is indexed by the mark bits in a byte. The first number on
-// the line describes the number of live object starts for the line and the
-// other numbers on the line describe the offsets (in words) of the object
-// starts.
-//
-// Since objects are at least 2 words large we don't have entries for two
-// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
-char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
- 0, _, _, _, _, // 0
- 1, 0, _, _, _, // 1
- 1, 1, _, _, _, // 2
- X, _, _, _, _, // 3
- 1, 2, _, _, _, // 4
- 2, 0, 2, _, _, // 5
- X, _, _, _, _, // 6
- X, _, _, _, _, // 7
- 1, 3, _, _, _, // 8
- 2, 0, 3, _, _, // 9
- 2, 1, 3, _, _, // 10
- X, _, _, _, _, // 11
- X, _, _, _, _, // 12
- X, _, _, _, _, // 13
- X, _, _, _, _, // 14
- X, _, _, _, _, // 15
- 1, 4, _, _, _, // 16
- 2, 0, 4, _, _, // 17
- 2, 1, 4, _, _, // 18
- X, _, _, _, _, // 19
- 2, 2, 4, _, _, // 20
- 3, 0, 2, 4, _, // 21
- X, _, _, _, _, // 22
- X, _, _, _, _, // 23
- X, _, _, _, _, // 24
- X, _, _, _, _, // 25
- X, _, _, _, _, // 26
- X, _, _, _, _, // 27
- X, _, _, _, _, // 28
- X, _, _, _, _, // 29
- X, _, _, _, _, // 30
- X, _, _, _, _, // 31
- 1, 5, _, _, _, // 32
- 2, 0, 5, _, _, // 33
- 2, 1, 5, _, _, // 34
- X, _, _, _, _, // 35
- 2, 2, 5, _, _, // 36
- 3, 0, 2, 5, _, // 37
- X, _, _, _, _, // 38
- X, _, _, _, _, // 39
- 2, 3, 5, _, _, // 40
- 3, 0, 3, 5, _, // 41
- 3, 1, 3, 5, _, // 42
- X, _, _, _, _, // 43
- X, _, _, _, _, // 44
- X, _, _, _, _, // 45
- X, _, _, _, _, // 46
- X, _, _, _, _, // 47
- X, _, _, _, _, // 48
- X, _, _, _, _, // 49
- X, _, _, _, _, // 50
- X, _, _, _, _, // 51
- X, _, _, _, _, // 52
- X, _, _, _, _, // 53
- X, _, _, _, _, // 54
- X, _, _, _, _, // 55
- X, _, _, _, _, // 56
- X, _, _, _, _, // 57
- X, _, _, _, _, // 58
- X, _, _, _, _, // 59
- X, _, _, _, _, // 60
- X, _, _, _, _, // 61
- X, _, _, _, _, // 62
- X, _, _, _, _, // 63
- 1, 6, _, _, _, // 64
- 2, 0, 6, _, _, // 65
- 2, 1, 6, _, _, // 66
- X, _, _, _, _, // 67
- 2, 2, 6, _, _, // 68
- 3, 0, 2, 6, _, // 69
- X, _, _, _, _, // 70
- X, _, _, _, _, // 71
- 2, 3, 6, _, _, // 72
- 3, 0, 3, 6, _, // 73
- 3, 1, 3, 6, _, // 74
- X, _, _, _, _, // 75
- X, _, _, _, _, // 76
- X, _, _, _, _, // 77
- X, _, _, _, _, // 78
- X, _, _, _, _, // 79
- 2, 4, 6, _, _, // 80
- 3, 0, 4, 6, _, // 81
- 3, 1, 4, 6, _, // 82
- X, _, _, _, _, // 83
- 3, 2, 4, 6, _, // 84
- 4, 0, 2, 4, 6, // 85
- X, _, _, _, _, // 86
- X, _, _, _, _, // 87
- X, _, _, _, _, // 88
- X, _, _, _, _, // 89
- X, _, _, _, _, // 90
- X, _, _, _, _, // 91
- X, _, _, _, _, // 92
- X, _, _, _, _, // 93
- X, _, _, _, _, // 94
- X, _, _, _, _, // 95
- X, _, _, _, _, // 96
- X, _, _, _, _, // 97
- X, _, _, _, _, // 98
- X, _, _, _, _, // 99
- X, _, _, _, _, // 100
- X, _, _, _, _, // 101
- X, _, _, _, _, // 102
- X, _, _, _, _, // 103
- X, _, _, _, _, // 104
- X, _, _, _, _, // 105
- X, _, _, _, _, // 106
- X, _, _, _, _, // 107
- X, _, _, _, _, // 108
- X, _, _, _, _, // 109
- X, _, _, _, _, // 110
- X, _, _, _, _, // 111
- X, _, _, _, _, // 112
- X, _, _, _, _, // 113
- X, _, _, _, _, // 114
- X, _, _, _, _, // 115
- X, _, _, _, _, // 116
- X, _, _, _, _, // 117
- X, _, _, _, _, // 118
- X, _, _, _, _, // 119
- X, _, _, _, _, // 120
- X, _, _, _, _, // 121
- X, _, _, _, _, // 122
- X, _, _, _, _, // 123
- X, _, _, _, _, // 124
- X, _, _, _, _, // 125
- X, _, _, _, _, // 126
- X, _, _, _, _, // 127
- 1, 7, _, _, _, // 128
- 2, 0, 7, _, _, // 129
- 2, 1, 7, _, _, // 130
- X, _, _, _, _, // 131
- 2, 2, 7, _, _, // 132
- 3, 0, 2, 7, _, // 133
- X, _, _, _, _, // 134
- X, _, _, _, _, // 135
- 2, 3, 7, _, _, // 136
- 3, 0, 3, 7, _, // 137
- 3, 1, 3, 7, _, // 138
- X, _, _, _, _, // 139
- X, _, _, _, _, // 140
- X, _, _, _, _, // 141
- X, _, _, _, _, // 142
- X, _, _, _, _, // 143
- 2, 4, 7, _, _, // 144
- 3, 0, 4, 7, _, // 145
- 3, 1, 4, 7, _, // 146
- X, _, _, _, _, // 147
- 3, 2, 4, 7, _, // 148
- 4, 0, 2, 4, 7, // 149
- X, _, _, _, _, // 150
- X, _, _, _, _, // 151
- X, _, _, _, _, // 152
- X, _, _, _, _, // 153
- X, _, _, _, _, // 154
- X, _, _, _, _, // 155
- X, _, _, _, _, // 156
- X, _, _, _, _, // 157
- X, _, _, _, _, // 158
- X, _, _, _, _, // 159
- 2, 5, 7, _, _, // 160
- 3, 0, 5, 7, _, // 161
- 3, 1, 5, 7, _, // 162
- X, _, _, _, _, // 163
- 3, 2, 5, 7, _, // 164
- 4, 0, 2, 5, 7, // 165
- X, _, _, _, _, // 166
- X, _, _, _, _, // 167
- 3, 3, 5, 7, _, // 168
- 4, 0, 3, 5, 7, // 169
- 4, 1, 3, 5, 7 // 170
-};
-#undef _
-#undef X
-
-
-// Takes a word of mark bits. Returns the number of objects that start in the
-// range. Puts the offsets of the words in the supplied array.
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
- int objects = 0;
- int offset = 0;
-
- // No consecutive 1 bits.
- ASSERT((mark_bits & 0x180) != 0x180);
- ASSERT((mark_bits & 0x18000) != 0x18000);
- ASSERT((mark_bits & 0x1800000) != 0x1800000);
-
- while (mark_bits != 0) {
- int byte = (mark_bits & 0xff);
- mark_bits >>= 8;
- if (byte != 0) {
- ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
- char* table = kStartTable + byte * kStartTableEntriesPerLine;
- int objects_in_these_8_words = table[0];
- ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
- ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
- for (int i = 0; i < objects_in_these_8_words; i++) {
- starts[objects++] = offset + table[1 + i];
- }
- }
- offset += 8;
- }
- return objects;
-}
-
-
-static inline Address DigestFreeStart(Address approximate_free_start,
- uint32_t free_start_cell) {
- ASSERT(free_start_cell != 0);
-
- // No consecutive 1 bits.
- ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
-
- int offsets[16];
- uint32_t cell = free_start_cell;
- int offset_of_last_live;
- if ((cell & 0x80000000u) != 0) {
- // This case would overflow below.
- offset_of_last_live = 31;
- } else {
- // Remove all but one bit, the most significant. This is an optimization
- // that may or may not be worthwhile.
- cell |= cell >> 16;
- cell |= cell >> 8;
- cell |= cell >> 4;
- cell |= cell >> 2;
- cell |= cell >> 1;
- cell = (cell + 1) >> 1;
- int live_objects = MarkWordToObjectStarts(cell, offsets);
- ASSERT(live_objects == 1);
- offset_of_last_live = offsets[live_objects - 1];
- }
- Address last_live_start =
- approximate_free_start + offset_of_last_live * kPointerSize;
- HeapObject* last_live = HeapObject::FromAddress(last_live_start);
- Address free_start = last_live_start + last_live->Size();
- return free_start;
-}
-
-
-static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
- ASSERT(cell != 0);
-
- // No consecutive 1 bits.
- ASSERT((cell & (cell << 1)) == 0);
-
- int offsets[16];
- if (cell == 0x80000000u) { // Avoid overflow below.
- return block_address + 31 * kPointerSize;
- }
- uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
- ASSERT((first_set_bit & cell) == first_set_bit);
- int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
- ASSERT(live_objects == 1);
- USE(live_objects);
- return block_address + offsets[0] * kPointerSize;
-}
-
-
-template<MarkCompactCollector::SweepingParallelism mode>
-static intptr_t Free(PagedSpace* space,
- FreeList* free_list,
- Address start,
- int size) {
- if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
- return space->Free(start, size);
- } else {
- return size - free_list->Free(start, size);
- }
-}
-
-
-// Force instantiation of templatized SweepConservatively method for
-// SWEEP_SEQUENTIALLY mode.
-template intptr_t MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
- PagedSpace*, FreeList*, Page*);
-
-
-// Force instantiation of templatized SweepConservatively method for
-// SWEEP_IN_PARALLEL mode.
-template intptr_t MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
- PagedSpace*, FreeList*, Page*);
-
-
-// Sweeps a space conservatively. After this has been done the larger free
-// spaces have been put on the free list and the smaller ones have been
-// ignored and left untouched. A free space is always either ignored or put
-// on the free list, never split up into two parts. This is important
-// because it means that any FreeSpace maps left actually describe a region of
-// memory that can be ignored when scanning. Dead objects other than free
-// spaces will not contain the free space map.
-template<MarkCompactCollector::SweepingParallelism mode>
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
- FreeList* free_list,
- Page* p) {
- ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
- ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
- free_list != NULL) ||
- (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
- free_list == NULL));
-
- MarkBit::CellType* cells = p->markbits()->cells();
- p->MarkSweptConservatively();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
- int cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_start())));
-
- intptr_t freed_bytes = 0;
-
- // This is the start of the 32 word block that we are currently looking at.
- Address block_address = p->area_start();
-
- // Skip over all the dead objects at the start of the page and mark them free.
- for (;
- cell_index < last_cell_index;
- cell_index++, block_address += 32 * kPointerSize) {
- if (cells[cell_index] != 0) break;
- }
- size_t size = block_address - p->area_start();
- if (cell_index == last_cell_index) {
- freed_bytes += Free<mode>(space, free_list, p->area_start(),
- static_cast<int>(size));
- ASSERT_EQ(0, p->LiveBytes());
- return freed_bytes;
- }
- // Grow the size of the start-of-page free space a little to get up to the
- // first live object.
- Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
- // Free the first free space.
- size = free_end - p->area_start();
- freed_bytes += Free<mode>(space, free_list, p->area_start(),
- static_cast<int>(size));
-
- // The start of the current free area is represented in undigested form by
- // the address of the last 32-word section that contained a live object and
- // the marking bitmap for that cell, which describes where the live object
- // started. Unless we find a large free space in the bitmap we will not
- // digest this pair into a real address. We start the iteration here at the
- // first word in the marking bit map that indicates a live object.
- Address free_start = block_address;
- uint32_t free_start_cell = cells[cell_index];
-
- for ( ;
- cell_index < last_cell_index;
- cell_index++, block_address += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(block_address))));
- uint32_t cell = cells[cell_index];
- if (cell != 0) {
- // We have a live object. Check approximately whether it is more than 32
- // words since the last live object.
- if (block_address - free_start > 32 * kPointerSize) {
- free_start = DigestFreeStart(free_start, free_start_cell);
- if (block_address - free_start > 32 * kPointerSize) {
- // Now that we know the exact start of the free space it still looks
- // like we have a large enough free space to be worth bothering with.
- // so now we need to find the start of the first live object at the
- // end of the free space.
- free_end = StartOfLiveObject(block_address, cell);
- freed_bytes += Free<mode>(space, free_list, free_start,
- static_cast<int>(free_end - free_start));
- }
- }
- // Update our undigested record of where the current free area started.
- free_start = block_address;
- free_start_cell = cell;
- // Clear marking bits for current cell.
- cells[cell_index] = 0;
- }
- }
-
- // Handle the free space at the end of the page.
- if (block_address - free_start > 32 * kPointerSize) {
- free_start = DigestFreeStart(free_start, free_start_cell);
- freed_bytes += Free<mode>(space, free_list, free_start,
- static_cast<int>(block_address - free_start));
- }
-
- p->ResetLiveBytes();
- return freed_bytes;
-}
-
-
-void MarkCompactCollector::SweepInParallel(PagedSpace* space,
- FreeList* private_free_list,
- FreeList* free_list) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
-
- if (p->TryParallelSweeping()) {
- SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
- free_list->Concatenate(private_free_list);
- }
- }
-}
-
-
-void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
- space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
- sweeper == LAZY_CONSERVATIVE ||
- sweeper == PARALLEL_CONSERVATIVE ||
- sweeper == CONCURRENT_CONSERVATIVE);
- space->ClearStats();
-
- PageIterator it(space);
-
- intptr_t freed_bytes = 0;
- int pages_swept = 0;
- bool lazy_sweeping_active = false;
- bool unused_page_present = false;
-
- while (it.has_next()) {
- Page* p = it.next();
-
- ASSERT(p->parallel_sweeping() == 0);
- // Clear sweeping flags indicating that marking bits are still intact.
- p->ClearSweptPrecisely();
- p->ClearSweptConservatively();
-
- if (p->IsEvacuationCandidate()) {
- ASSERT(evacuation_candidates_.length() > 0);
- continue;
- }
-
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- // Will be processed in EvacuateNewSpaceAndCandidates.
- continue;
- }
-
- // One unused page is kept, all further are released before sweeping them.
- if (p->LiveBytes() == 0) {
- if (unused_page_present) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
- reinterpret_cast<intptr_t>(p));
- }
- // Adjust unswept free bytes because releasing a page expects said
- // counter to be accurate for unswept pages.
- space->IncreaseUnsweptFreeBytes(p);
- space->ReleasePage(p);
- continue;
- }
- unused_page_present = true;
- }
-
- if (lazy_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
- reinterpret_cast<intptr_t>(p));
- }
- space->IncreaseUnsweptFreeBytes(p);
- continue;
- }
-
- switch (sweeper) {
- case CONSERVATIVE: {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
- reinterpret_cast<intptr_t>(p));
- }
- SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
- pages_swept++;
- break;
- }
- case LAZY_CONSERVATIVE: {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
- reinterpret_cast<intptr_t>(p));
- }
- freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
- pages_swept++;
- space->SetPagesToSweep(p->next_page());
- lazy_sweeping_active = true;
- break;
- }
- case CONCURRENT_CONSERVATIVE:
- case PARALLEL_CONSERVATIVE: {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
- reinterpret_cast<intptr_t>(p));
- }
- p->set_parallel_sweeping(1);
- space->IncreaseUnsweptFreeBytes(p);
- break;
- }
- case PRECISE: {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
- reinterpret_cast<intptr_t>(p));
- }
- if (space->identity() == CODE_SPACE) {
- SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
- } else {
- SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
- }
- pages_swept++;
- break;
- }
- default: {
- UNREACHABLE();
- }
- }
- }
-
- if (FLAG_gc_verbose) {
- PrintF("SweepSpace: %s (%d pages swept)\n",
- AllocationSpaceName(space->identity()),
- pages_swept);
- }
-
- // Give pages that are queued to be freed back to the OS.
- heap()->FreeQueuedChunks();
-}
-
-
-void MarkCompactCollector::SweepSpaces() {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
-#ifdef DEBUG
- state_ = SWEEP_SPACES;
-#endif
- SweeperType how_to_sweep =
- FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
- if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
- if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
- if (sweep_precisely_) how_to_sweep = PRECISE;
- // Noncompacting collections simply sweep the spaces to clear the mark
- // bits and free the nonlive blocks (for old and map spaces). We sweep
- // the map space last because freeing non-live maps overwrites them and
- // the other spaces rely on possibly non-live maps to get the sizes for
- // non-live objects.
-
- SweepSpace(heap()->old_pointer_space(), how_to_sweep);
- SweepSpace(heap()->old_data_space(), how_to_sweep);
-
- if (how_to_sweep == PARALLEL_CONSERVATIVE ||
- how_to_sweep == CONCURRENT_CONSERVATIVE) {
- // TODO(hpayer): fix race with concurrent sweeper
- StartSweeperThreads();
- }
-
- if (how_to_sweep == PARALLEL_CONSERVATIVE) {
- WaitUntilSweepingCompleted();
- }
-
- RemoveDeadInvalidatedCode();
- SweepSpace(heap()->code_space(), PRECISE);
-
- SweepSpace(heap()->cell_space(), PRECISE);
-
- EvacuateNewSpaceAndCandidates();
-
- // ClearNonLiveTransitions depends on precise sweeping of map space to
- // detect whether unmarked map became dead in this collection or in one
- // of the previous ones.
- SweepSpace(heap()->map_space(), PRECISE);
-
- // Deallocate unmarked objects and clear marked bits for marked objects.
- heap_->lo_space()->FreeUnmarkedObjects();
-
- if (how_to_sweep != CONCURRENT_CONSERVATIVE) {
- FinalizeSweeping();
- }
-}
-
-
-void MarkCompactCollector::EnableCodeFlushing(bool enable) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (heap()->isolate()->debug()->IsLoaded() ||
- heap()->isolate()->debug()->has_break_points()) {
- enable = false;
- }
-#endif
-
- if (enable) {
- if (code_flusher_ != NULL) return;
- code_flusher_ = new CodeFlusher(heap()->isolate());
- } else {
- if (code_flusher_ == NULL) return;
- code_flusher_->EvictAllCandidates();
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-}
-
-
-// TODO(1466) ReportDeleteIfNeeded is not called currently.
-// Our profiling tools do not expect intersections between
-// code objects. We should either reenable it or change our tools.
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
- Isolate* isolate) {
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (obj->IsCode()) {
- GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
- }
-#endif
- if (obj->IsCode()) {
- PROFILE(isolate, CodeDeleteEvent(obj->address()));
- }
-}
-
-
-void MarkCompactCollector::Initialize() {
- MarkCompactMarkingVisitor::Initialize();
- IncrementalMarking::Initialize();
-}
-
-
-bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
- return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
-}
-
-
-bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- SlotType type,
- Address addr,
- AdditionMode mode) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
- }
- ASSERT(buffer->HasSpaceForTypedSlot());
- buffer->Add(reinterpret_cast<ObjectSlot>(type));
- buffer->Add(reinterpret_cast<ObjectSlot>(addr));
- return true;
-}
-
-
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTarget(rmode)) {
- return SlotsBuffer::CODE_TARGET_SLOT;
- } else if (RelocInfo::IsEmbeddedObject(rmode)) {
- return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
- return SlotsBuffer::DEBUG_TARGET_SLOT;
- } else if (RelocInfo::IsJSReturn(rmode)) {
- return SlotsBuffer::JS_RETURN_SLOT;
- }
- UNREACHABLE();
- return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
-}
-
-
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- if (target_page->IsEvacuationCandidate() &&
- (rinfo->host() == NULL ||
- !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotTypeForRMode(rinfo->rmode()),
- rinfo->pc(),
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
- EvictEvacuationCandidate(target_page);
- }
- }
-}
-
-
-void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- if (target_page->IsEvacuationCandidate() &&
- !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotsBuffer::CODE_ENTRY_SLOT,
- slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
- EvictEvacuationCandidate(target_page);
- }
- }
-}
-
-
-void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
- ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
- if (is_compacting()) {
- Code* host = heap()->isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(pc);
- MarkBit mark_bit = Marking::MarkBitFrom(host);
- if (Marking::IsBlack(mark_bit)) {
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
- RecordRelocSlot(&rinfo, target);
- }
- }
-}
-
-
-static inline SlotsBuffer::SlotType DecodeSlotType(
- SlotsBuffer::ObjectSlot slot) {
- return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
-}
-
-
-void SlotsBuffer::UpdateSlots(Heap* heap) {
- PointersUpdatingVisitor v(heap);
-
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- } else {
- ++slot_idx;
- ASSERT(slot_idx < idx_);
- UpdateSlot(&v,
- DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
- }
-}
-
-
-void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
- PointersUpdatingVisitor v(heap);
-
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- }
- } else {
- ++slot_idx;
- ASSERT(slot_idx < idx_);
- Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
- if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(&v,
- DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
- }
- }
-}
-
-
-SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
- return new SlotsBuffer(next_buffer);
-}
-
-
-void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
- delete buffer;
-}
-
-
-void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
- SlotsBuffer* buffer = *buffer_address;
- while (buffer != NULL) {
- SlotsBuffer* next_buffer = buffer->next();
- DeallocateBuffer(buffer);
- buffer = next_buffer;
- }
- *buffer_address = NULL;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mark-compact.h b/src/3rdparty/v8/src/mark-compact.h
deleted file mode 100644
index b5d60fd..0000000
--- a/src/3rdparty/v8/src/mark-compact.h
+++ /dev/null
@@ -1,911 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MARK_COMPACT_H_
-#define V8_MARK_COMPACT_H_
-
-#include "compiler-intrinsics.h"
-#include "spaces.h"
-
-namespace v8 {
-namespace internal {
-
-// Callback function, returns whether an object is alive. The heap size
-// of the object is returned in size. It optionally updates the offset
-// to the first live object in the page (only used for old and map objects).
-typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
-
-// Forward declarations.
-class CodeFlusher;
-class GCTracer;
-class MarkCompactCollector;
-class MarkingVisitor;
-class RootMarkingVisitor;
-
-
-class Marking {
- public:
- explicit Marking(Heap* heap)
- : heap_(heap) {
- }
-
- INLINE(static MarkBit MarkBitFrom(Address addr));
-
- INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
- return MarkBitFrom(reinterpret_cast<Address>(obj));
- }
-
- // Impossible markbits: 01
- static const char* kImpossibleBitPattern;
- INLINE(static bool IsImpossible(MarkBit mark_bit)) {
- return !mark_bit.Get() && mark_bit.Next().Get();
- }
-
- // Black markbits: 10 - this is required by the sweeper.
- static const char* kBlackBitPattern;
- INLINE(static bool IsBlack(MarkBit mark_bit)) {
- return mark_bit.Get() && !mark_bit.Next().Get();
- }
-
- // White markbits: 00 - this is required by the mark bit clearer.
- static const char* kWhiteBitPattern;
- INLINE(static bool IsWhite(MarkBit mark_bit)) {
- return !mark_bit.Get();
- }
-
- // Grey markbits: 11
- static const char* kGreyBitPattern;
- INLINE(static bool IsGrey(MarkBit mark_bit)) {
- return mark_bit.Get() && mark_bit.Next().Get();
- }
-
- INLINE(static void MarkBlack(MarkBit mark_bit)) {
- mark_bit.Set();
- mark_bit.Next().Clear();
- }
-
- INLINE(static void BlackToGrey(MarkBit markbit)) {
- markbit.Next().Set();
- }
-
- INLINE(static void WhiteToGrey(MarkBit markbit)) {
- markbit.Set();
- markbit.Next().Set();
- }
-
- INLINE(static void GreyToBlack(MarkBit markbit)) {
- markbit.Next().Clear();
- }
-
- INLINE(static void BlackToGrey(HeapObject* obj)) {
- BlackToGrey(MarkBitFrom(obj));
- }
-
- INLINE(static void AnyToGrey(MarkBit markbit)) {
- markbit.Set();
- markbit.Next().Set();
- }
-
- // Returns true if the the object whose mark is transferred is marked black.
- bool TransferMark(Address old_start, Address new_start);
-
-#ifdef DEBUG
- enum ObjectColor {
- BLACK_OBJECT,
- WHITE_OBJECT,
- GREY_OBJECT,
- IMPOSSIBLE_COLOR
- };
-
- static const char* ColorName(ObjectColor color) {
- switch (color) {
- case BLACK_OBJECT: return "black";
- case WHITE_OBJECT: return "white";
- case GREY_OBJECT: return "grey";
- case IMPOSSIBLE_COLOR: return "impossible";
- }
- return "error";
- }
-
- static ObjectColor Color(HeapObject* obj) {
- return Color(Marking::MarkBitFrom(obj));
- }
-
- static ObjectColor Color(MarkBit mark_bit) {
- if (IsBlack(mark_bit)) return BLACK_OBJECT;
- if (IsWhite(mark_bit)) return WHITE_OBJECT;
- if (IsGrey(mark_bit)) return GREY_OBJECT;
- UNREACHABLE();
- return IMPOSSIBLE_COLOR;
- }
-#endif
-
- // Returns true if the transferred color is black.
- INLINE(static bool TransferColor(HeapObject* from,
- HeapObject* to)) {
- MarkBit from_mark_bit = MarkBitFrom(from);
- MarkBit to_mark_bit = MarkBitFrom(to);
- bool is_black = false;
- if (from_mark_bit.Get()) {
- to_mark_bit.Set();
- is_black = true; // Looks black so far.
- }
- if (from_mark_bit.Next().Get()) {
- to_mark_bit.Next().Set();
- is_black = false; // Was actually gray.
- }
- return is_black;
- }
-
- private:
- Heap* heap_;
-};
-
-// ----------------------------------------------------------------------------
-// Marking deque for tracing live objects.
-class MarkingDeque {
- public:
- MarkingDeque()
- : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
-
- void Initialize(Address low, Address high) {
- HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
- HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
- array_ = obj_low;
- mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
- top_ = bottom_ = 0;
- overflowed_ = false;
- }
-
- inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
-
- inline bool IsEmpty() { return top_ == bottom_; }
-
- bool overflowed() const { return overflowed_; }
-
- void ClearOverflowed() { overflowed_ = false; }
-
- void SetOverflowed() { overflowed_ = true; }
-
- // Push the (marked) object on the marking stack if there is room,
- // otherwise mark the object as overflowed and wait for a rescan of the
- // heap.
- INLINE(void PushBlack(HeapObject* object)) {
- ASSERT(object->IsHeapObject());
- if (IsFull()) {
- Marking::BlackToGrey(object);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
- SetOverflowed();
- } else {
- array_[top_] = object;
- top_ = ((top_ + 1) & mask_);
- }
- }
-
- INLINE(void PushGrey(HeapObject* object)) {
- ASSERT(object->IsHeapObject());
- if (IsFull()) {
- SetOverflowed();
- } else {
- array_[top_] = object;
- top_ = ((top_ + 1) & mask_);
- }
- }
-
- INLINE(HeapObject* Pop()) {
- ASSERT(!IsEmpty());
- top_ = ((top_ - 1) & mask_);
- HeapObject* object = array_[top_];
- ASSERT(object->IsHeapObject());
- return object;
- }
-
- INLINE(void UnshiftGrey(HeapObject* object)) {
- ASSERT(object->IsHeapObject());
- if (IsFull()) {
- SetOverflowed();
- } else {
- bottom_ = ((bottom_ - 1) & mask_);
- array_[bottom_] = object;
- }
- }
-
- HeapObject** array() { return array_; }
- int bottom() { return bottom_; }
- int top() { return top_; }
- int mask() { return mask_; }
- void set_top(int top) { top_ = top; }
-
- private:
- HeapObject** array_;
- // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
- // empty when top_ == bottom_. It is full when top_ + 1 == bottom
- // (mod mask + 1).
- int top_;
- int bottom_;
- int mask_;
- bool overflowed_;
-
- DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
-};
-
-
-class SlotsBufferAllocator {
- public:
- SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
- void DeallocateBuffer(SlotsBuffer* buffer);
-
- void DeallocateChain(SlotsBuffer** buffer_address);
-};
-
-
-// SlotsBuffer records a sequence of slots that has to be updated
-// after live objects were relocated from evacuation candidates.
-// All slots are either untyped or typed:
-// - Untyped slots are expected to contain a tagged object pointer.
-// They are recorded by an address.
-// - Typed slots are expected to contain an encoded pointer to a heap
-// object where the way of encoding depends on the type of the slot.
-// They are recorded as a pair (SlotType, slot address).
-// We assume that zero-page is never mapped this allows us to distinguish
-// untyped slots from typed slots during iteration by a simple comparison:
-// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
-// is the first element of typed slot's pair.
-class SlotsBuffer {
- public:
- typedef Object** ObjectSlot;
-
- explicit SlotsBuffer(SlotsBuffer* next_buffer)
- : idx_(0), chain_length_(1), next_(next_buffer) {
- if (next_ != NULL) {
- chain_length_ = next_->chain_length_ + 1;
- }
- }
-
- ~SlotsBuffer() {
- }
-
- void Add(ObjectSlot slot) {
- ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
- slots_[idx_++] = slot;
- }
-
- enum SlotType {
- EMBEDDED_OBJECT_SLOT,
- RELOCATED_CODE_OBJECT,
- CODE_TARGET_SLOT,
- CODE_ENTRY_SLOT,
- DEBUG_TARGET_SLOT,
- JS_RETURN_SLOT,
- NUMBER_OF_SLOT_TYPES
- };
-
- static const char* SlotTypeToString(SlotType type) {
- switch (type) {
- case EMBEDDED_OBJECT_SLOT:
- return "EMBEDDED_OBJECT_SLOT";
- case RELOCATED_CODE_OBJECT:
- return "RELOCATED_CODE_OBJECT";
- case CODE_TARGET_SLOT:
- return "CODE_TARGET_SLOT";
- case CODE_ENTRY_SLOT:
- return "CODE_ENTRY_SLOT";
- case DEBUG_TARGET_SLOT:
- return "DEBUG_TARGET_SLOT";
- case JS_RETURN_SLOT:
- return "JS_RETURN_SLOT";
- case NUMBER_OF_SLOT_TYPES:
- return "NUMBER_OF_SLOT_TYPES";
- }
- return "UNKNOWN SlotType";
- }
-
- void UpdateSlots(Heap* heap);
-
- void UpdateSlotsWithFilter(Heap* heap);
-
- SlotsBuffer* next() { return next_; }
-
- static int SizeOfChain(SlotsBuffer* buffer) {
- if (buffer == NULL) return 0;
- return static_cast<int>(buffer->idx_ +
- (buffer->chain_length_ - 1) * kNumberOfElements);
- }
-
- inline bool IsFull() {
- return idx_ == kNumberOfElements;
- }
-
- inline bool HasSpaceForTypedSlot() {
- return idx_ < kNumberOfElements - 1;
- }
-
- static void UpdateSlotsRecordedIn(Heap* heap,
- SlotsBuffer* buffer,
- bool code_slots_filtering_required) {
- while (buffer != NULL) {
- if (code_slots_filtering_required) {
- buffer->UpdateSlotsWithFilter(heap);
- } else {
- buffer->UpdateSlots(heap);
- }
- buffer = buffer->next();
- }
- }
-
- enum AdditionMode {
- FAIL_ON_OVERFLOW,
- IGNORE_OVERFLOW
- };
-
- static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
- return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
- }
-
- INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- ObjectSlot slot,
- AdditionMode mode)) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || buffer->IsFull()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
- }
- buffer->Add(slot);
- return true;
- }
-
- static bool IsTypedSlot(ObjectSlot slot);
-
- static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- SlotType type,
- Address addr,
- AdditionMode mode);
-
- static const int kNumberOfElements = 1021;
-
- private:
- static const int kChainLengthThreshold = 15;
-
- intptr_t idx_;
- intptr_t chain_length_;
- SlotsBuffer* next_;
- ObjectSlot slots_[kNumberOfElements];
-};
-
-
-// CodeFlusher collects candidates for code flushing during marking and
-// processes those candidates after marking has completed in order to
-// reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in two ways:
-// - SharedFunctionInfo references unoptimized code.
-// - JSFunction references either unoptimized or optimized code.
-// We are not allowed to flush unoptimized code for functions that got
-// optimized or inlined into optimized code, because we might bailout
-// into the unoptimized code again during deoptimization.
-class CodeFlusher {
- public:
- explicit CodeFlusher(Isolate* isolate)
- : isolate_(isolate),
- jsfunction_candidates_head_(NULL),
- shared_function_info_candidates_head_(NULL) {}
-
- void AddCandidate(SharedFunctionInfo* shared_info) {
- if (GetNextCandidate(shared_info) == NULL) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
- } else {
- // TODO(mstarzinger): Active in release mode to flush out problems.
- // Should be turned back into an ASSERT or removed completely.
- CHECK(ContainsCandidate(shared_info));
- }
- }
-
- void AddCandidate(JSFunction* function) {
- ASSERT(function->code() == function->shared()->code());
- if (GetNextCandidate(function)->IsUndefined()) {
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
- }
-
- bool ContainsCandidate(SharedFunctionInfo* shared_info);
-
- void EvictCandidate(SharedFunctionInfo* shared_info);
- void EvictCandidate(JSFunction* function);
-
- void ProcessCandidates() {
- ProcessSharedFunctionInfoCandidates();
- ProcessJSFunctionCandidates();
- }
-
- void EvictAllCandidates() {
- EvictJSFunctionCandidates();
- EvictSharedFunctionInfoCandidates();
- }
-
- void IteratePointersToFromSpace(ObjectVisitor* v);
-
- private:
- void ProcessJSFunctionCandidates();
- void ProcessSharedFunctionInfoCandidates();
- void EvictJSFunctionCandidates();
- void EvictSharedFunctionInfoCandidates();
-
- static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
- }
-
- static JSFunction* GetNextCandidate(JSFunction* candidate) {
- Object* next_candidate = candidate->next_function_link();
- return reinterpret_cast<JSFunction*>(next_candidate);
- }
-
- static void SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate) {
- candidate->set_next_function_link(next_candidate);
- }
-
- static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
- ASSERT(undefined->IsUndefined());
- candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
- }
-
- static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
- Object* next_candidate = candidate->code()->gc_metadata();
- return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
- }
-
- static void SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate) {
- candidate->code()->set_gc_metadata(next_candidate);
- }
-
- static void ClearNextCandidate(SharedFunctionInfo* candidate) {
- candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
- }
-
- Isolate* isolate_;
- JSFunction* jsfunction_candidates_head_;
- SharedFunctionInfo* shared_function_info_candidates_head_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
-};
-
-
-// Defined in isolate.h.
-class ThreadLocalTop;
-
-
-// -------------------------------------------------------------------------
-// Mark-Compact collector
-class MarkCompactCollector {
- public:
- // Type of functions to compute forwarding addresses of objects in
- // compacted spaces. Given an object and its size, return a (non-failure)
- // Object* that will be the object after forwarding. There is a separate
- // allocation function for each (compactable) space based on the location
- // of the object before compaction.
- typedef MaybeObject* (*AllocationFunction)(Heap* heap,
- HeapObject* object,
- int object_size);
-
- // Type of functions to encode the forwarding address for an object.
- // Given the object, its size, and the new (non-failure) object it will be
- // forwarded to, encode the forwarding address. For paged spaces, the
- // 'offset' input/output parameter contains the offset of the forwarded
- // object from the forwarding address of the previous live object in the
- // page as input, and is updated to contain the offset to be used for the
- // next live object in the same page. For spaces using a different
- // encoding (i.e., contiguous spaces), the offset parameter is ignored.
- typedef void (*EncodingFunction)(Heap* heap,
- HeapObject* old_object,
- int object_size,
- Object* new_object,
- int* offset);
-
- // Type of functions to process non-live objects.
- typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
-
- // Pointer to member function, used in IterateLiveObjects.
- typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
-
- // Set the global flags, it must be called before Prepare to take effect.
- inline void SetFlags(int flags);
-
- static void Initialize();
-
- void CollectEvacuationCandidates(PagedSpace* space);
-
- void AddEvacuationCandidate(Page* p);
-
- // Prepares for GC by resetting relocation info in old and map spaces and
- // choosing spaces to compact.
- void Prepare(GCTracer* tracer);
-
- // Performs a global garbage collection.
- void CollectGarbage();
-
- enum CompactionMode {
- INCREMENTAL_COMPACTION,
- NON_INCREMENTAL_COMPACTION
- };
-
- bool StartCompaction(CompactionMode mode);
-
- void AbortCompaction();
-
- // During a full GC, there is a stack-allocated GCTracer that is used for
- // bookkeeping information. Return a pointer to that tracer.
- GCTracer* tracer() { return tracer_; }
-
-#ifdef DEBUG
- // Checks whether performing mark-compact collection.
- bool in_use() { return state_ > PREPARE_GC; }
- bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
-#endif
-
- // Determine type of object and emit deletion log event.
- static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
-
- // Distinguishable invalid map encodings (for single word and multiple words)
- // that indicate free regions.
- static const uint32_t kSingleFreeEncoding = 0;
- static const uint32_t kMultiFreeEncoding = 1;
-
- static inline bool IsMarked(Object* obj);
-
- inline Heap* heap() const { return heap_; }
-
- CodeFlusher* code_flusher() { return code_flusher_; }
- inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
- void EnableCodeFlushing(bool enable);
-
- enum SweeperType {
- CONSERVATIVE,
- LAZY_CONSERVATIVE,
- PARALLEL_CONSERVATIVE,
- CONCURRENT_CONSERVATIVE,
- PRECISE
- };
-
- enum SweepingParallelism {
- SWEEP_SEQUENTIALLY,
- SWEEP_IN_PARALLEL
- };
-
-#ifdef VERIFY_HEAP
- void VerifyMarkbitsAreClean();
- static void VerifyMarkbitsAreClean(PagedSpace* space);
- static void VerifyMarkbitsAreClean(NewSpace* space);
- void VerifyWeakEmbeddedMapsInOptimizedCode();
- void VerifyOmittedPrototypeChecks();
-#endif
-
- // Sweep a single page from the given space conservatively.
- // Return a number of reclaimed bytes.
- template<SweepingParallelism type>
- static intptr_t SweepConservatively(PagedSpace* space,
- FreeList* free_list,
- Page* p);
-
- INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
- return Page::FromAddress(reinterpret_cast<Address>(anchor))->
- ShouldSkipEvacuationSlotRecording();
- }
-
- INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
- return Page::FromAddress(reinterpret_cast<Address>(host))->
- ShouldSkipEvacuationSlotRecording();
- }
-
- INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
- return Page::FromAddress(reinterpret_cast<Address>(obj))->
- IsEvacuationCandidate();
- }
-
- INLINE(void EvictEvacuationCandidate(Page* page)) {
- if (FLAG_trace_fragmentation) {
- PrintF("Page %p is too popular. Disabling evacuation.\n",
- reinterpret_cast<void*>(page));
- }
-
- // TODO(gc) If all evacuation candidates are too popular we
- // should stop slots recording entirely.
- page->ClearEvacuationCandidate();
-
- // We were not collecting slots on this page that point
- // to other evacuation candidates thus we have to
- // rescan the page after evacuation to discover and update all
- // pointers to evacuated objects.
- if (page->owner()->identity() == OLD_DATA_SPACE) {
- evacuation_candidates_.RemoveElement(page);
- } else {
- page->SetFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
-
- void RecordRelocSlot(RelocInfo* rinfo, Object* target);
- void RecordCodeEntrySlot(Address slot, Code* target);
- void RecordCodeTargetPatch(Address pc, Code* target);
-
- INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
-
- void MigrateObject(Address dst,
- Address src,
- int size,
- AllocationSpace to_old_space);
-
- bool TryPromoteObject(HeapObject* object, int object_size);
-
- inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
- inline void set_encountered_weak_maps(Object* weak_map) {
- encountered_weak_maps_ = weak_map;
- }
-
- void InvalidateCode(Code* code);
-
- void ClearMarkbits();
-
- bool abort_incremental_marking() const { return abort_incremental_marking_; }
-
- bool is_compacting() const { return compacting_; }
-
- MarkingParity marking_parity() { return marking_parity_; }
-
- // Concurrent and parallel sweeping support.
- void SweepInParallel(PagedSpace* space,
- FreeList* private_free_list,
- FreeList* free_list);
-
- void WaitUntilSweepingCompleted();
-
- intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
-
- bool AreSweeperThreadsActivated();
-
- bool IsConcurrentSweepingInProgress();
-
- void FinalizeSweeping();
-
- // Parallel marking support.
- void MarkInParallel();
-
- void WaitUntilMarkingCompleted();
-
- private:
- MarkCompactCollector();
- ~MarkCompactCollector();
-
- bool MarkInvalidatedCode();
- void RemoveDeadInvalidatedCode();
- void ProcessInvalidatedCode(ObjectVisitor* visitor);
-
- void ReleaseEvacuationCandidates();
-
- void StartSweeperThreads();
-
-#ifdef DEBUG
- enum CollectorState {
- IDLE,
- PREPARE_GC,
- MARK_LIVE_OBJECTS,
- SWEEP_SPACES,
- ENCODE_FORWARDING_ADDRESSES,
- UPDATE_POINTERS,
- RELOCATE_OBJECTS
- };
-
- // The current stage of the collector.
- CollectorState state_;
-#endif
-
- // Global flag that forces sweeping to be precise, so we can traverse the
- // heap.
- bool sweep_precisely_;
-
- bool reduce_memory_footprint_;
-
- bool abort_incremental_marking_;
-
- MarkingParity marking_parity_;
-
- // True if we are collecting slots to perform evacuation from evacuation
- // candidates.
- bool compacting_;
-
- bool was_marked_incrementally_;
-
- // True if concurrent or parallel sweeping is currently in progress.
- bool sweeping_pending_;
-
- // A pointer to the current stack-allocated GC tracer object during a full
- // collection (NULL before and after).
- GCTracer* tracer_;
-
- SlotsBufferAllocator slots_buffer_allocator_;
-
- SlotsBuffer* migration_slots_buffer_;
-
- // Finishes GC, performs heap verification if enabled.
- void Finish();
-
- // -----------------------------------------------------------------------
- // Phase 1: Marking live objects.
- //
- // Before: The heap has been prepared for garbage collection by
- // MarkCompactCollector::Prepare() and is otherwise in its
- // normal state.
- //
- // After: Live objects are marked and non-live objects are unmarked.
-
- friend class RootMarkingVisitor;
- friend class MarkingVisitor;
- friend class MarkCompactMarkingVisitor;
- friend class CodeMarkingVisitor;
- friend class SharedFunctionInfoMarkingVisitor;
-
- // Mark code objects that are active on the stack to prevent them
- // from being flushed.
- void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
-
- void PrepareForCodeFlushing();
-
- // Marking operations for objects reachable from roots.
- void MarkLiveObjects();
-
- void AfterMarking();
-
- // Marks the object black and pushes it on the marking stack.
- // This is for non-incremental marking only.
- INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
-
- // Marks the object black assuming that it is not yet marked.
- // This is for non-incremental marking only.
- INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
-
- // Mark the heap roots and all objects reachable from them.
- void MarkRoots(RootMarkingVisitor* visitor);
-
- // Mark the string table specially. References to internalized strings from
- // the string table are weak.
- void MarkStringTable();
-
- // Mark objects in implicit references groups if their parent object
- // is marked.
- void MarkImplicitRefGroups();
-
- // Mark all objects which are reachable due to host application
- // logic like object groups or implicit references' groups.
- void ProcessExternalMarking(RootMarkingVisitor* visitor);
-
- // Mark objects reachable (transitively) from objects in the marking stack
- // or overflowed in the heap.
- void ProcessMarkingDeque();
-
- // Mark objects reachable (transitively) from objects in the marking
- // stack. This function empties the marking stack, but may leave
- // overflowed objects in the heap, in which case the marking stack's
- // overflow flag will be set.
- void EmptyMarkingDeque();
-
- // Refill the marking stack with overflowed objects from the heap. This
- // function either leaves the marking stack full or clears the overflow
- // flag on the marking stack.
- void RefillMarkingDeque();
-
- // After reachable maps have been marked process per context object
- // literal map caches removing unmarked entries.
- void ProcessMapCaches();
-
- // Callback function for telling whether the object *p is an unmarked
- // heap object.
- static bool IsUnmarkedHeapObject(Object** p);
- static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
-
- // Map transitions from a live map to a dead map must be killed.
- // We replace them with a null descriptor, with the same key.
- void ClearNonLiveReferences();
- void ClearNonLivePrototypeTransitions(Map* map);
- void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
-
- void ClearAndDeoptimizeDependentCode(Map* map);
- void ClearNonLiveDependentCode(Map* map);
-
- // Marking detaches initial maps from SharedFunctionInfo objects
- // to make this reference weak. We need to reattach initial maps
- // back after collection. This is either done during
- // ClearNonLiveTransitions pass or by calling this function.
- void ReattachInitialMaps();
-
- // Mark all values associated with reachable keys in weak maps encountered
- // so far. This might push new object or even new weak maps onto the
- // marking stack.
- void ProcessWeakMaps();
-
- // After all reachable objects have been marked those weak map entries
- // with an unreachable key are removed from all encountered weak maps.
- // The linked list of all encountered weak maps is destroyed.
- void ClearWeakMaps();
-
- // -----------------------------------------------------------------------
- // Phase 2: Sweeping to clear mark bits and free non-live objects for
- // a non-compacting collection.
- //
- // Before: Live objects are marked and non-live objects are unmarked.
- //
- // After: Live objects are unmarked, non-live regions have been added to
- // their space's free list. Active eden semispace is compacted by
- // evacuation.
- //
-
- // If we are not compacting the heap, we simply sweep the spaces except
- // for the large object space, clearing mark bits and adding unmarked
- // regions to each space's free list.
- void SweepSpaces();
-
- void EvacuateNewSpace();
-
- void EvacuateLiveObjectsFromPage(Page* p);
-
- void EvacuatePages();
-
- void EvacuateNewSpaceAndCandidates();
-
- void SweepSpace(PagedSpace* space, SweeperType sweeper);
-
-#ifdef DEBUG
- friend class MarkObjectVisitor;
- static void VisitObject(HeapObject* obj);
-
- friend class UnmarkObjectVisitor;
- static void UnmarkObject(HeapObject* obj);
-#endif
-
- Heap* heap_;
- MarkingDeque marking_deque_;
- CodeFlusher* code_flusher_;
- Object* encountered_weak_maps_;
-
- List<Page*> evacuation_candidates_;
- List<Code*> invalidated_code_;
-
- friend class Heap;
-};
-
-
-const char* AllocationSpaceName(AllocationSpace space);
-
-} } // namespace v8::internal
-
-#endif // V8_MARK_COMPACT_H_
diff --git a/src/3rdparty/v8/src/marking-thread.cc b/src/3rdparty/v8/src/marking-thread.cc
deleted file mode 100644
index ac64381..0000000
--- a/src/3rdparty/v8/src/marking-thread.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "marking-thread.h"
-
-#include "v8.h"
-
-#include "isolate.h"
-#include "v8threads.h"
-
-namespace v8 {
-namespace internal {
-
-MarkingThread::MarkingThread(Isolate* isolate)
- : Thread("MarkingThread"),
- isolate_(isolate),
- heap_(isolate->heap()),
- start_marking_semaphore_(OS::CreateSemaphore(0)),
- end_marking_semaphore_(OS::CreateSemaphore(0)),
- stop_semaphore_(OS::CreateSemaphore(0)) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
- id_ = NoBarrier_AtomicIncrement(&id_counter_, 1);
-}
-
-
-Atomic32 MarkingThread::id_counter_ = -1;
-
-
-void MarkingThread::Run() {
- Isolate::SetIsolateThreadLocals(isolate_, NULL);
-
- while (true) {
- start_marking_semaphore_->Wait();
-
- if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
- return;
- }
-
- end_marking_semaphore_->Signal();
- }
-}
-
-
-void MarkingThread::Stop() {
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- start_marking_semaphore_->Signal();
- stop_semaphore_->Wait();
-}
-
-
-void MarkingThread::StartMarking() {
- start_marking_semaphore_->Signal();
-}
-
-
-void MarkingThread::WaitForMarkingThread() {
- end_marking_semaphore_->Wait();
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/marking-thread.h b/src/3rdparty/v8/src/marking-thread.h
deleted file mode 100644
index 9efa3af..0000000
--- a/src/3rdparty/v8/src/marking-thread.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MARKING_THREAD_H_
-#define V8_MARKING_THREAD_H_
-
-#include "atomicops.h"
-#include "flags.h"
-#include "platform.h"
-#include "v8utils.h"
-
-#include "spaces.h"
-
-#include "heap.h"
-
-namespace v8 {
-namespace internal {
-
-class MarkingThread : public Thread {
- public:
- explicit MarkingThread(Isolate* isolate);
-
- void Run();
- void Stop();
- void StartMarking();
- void WaitForMarkingThread();
-
- ~MarkingThread() {
- delete start_marking_semaphore_;
- delete end_marking_semaphore_;
- delete stop_semaphore_;
- }
-
- private:
- Isolate* isolate_;
- Heap* heap_;
- Semaphore* start_marking_semaphore_;
- Semaphore* end_marking_semaphore_;
- Semaphore* stop_semaphore_;
- volatile AtomicWord stop_thread_;
- int id_;
- static Atomic32 id_counter_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_MARKING_THREAD_H_
diff --git a/src/3rdparty/v8/src/math.js b/src/3rdparty/v8/src/math.js
deleted file mode 100644
index 4686328..0000000
--- a/src/3rdparty/v8/src/math.js
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Keep reference to original values of some global properties. This
-// has the added benefit that the code in this file is isolated from
-// changes to these properties.
-var $floor = MathFloor;
-var $abs = MathAbs;
-
-// Instance class name can only be set on functions. That is the only
-// purpose for MathConstructor.
-function MathConstructor() {}
-%FunctionSetInstanceClassName(MathConstructor, 'Math');
-var $Math = new MathConstructor();
-$Math.__proto__ = $Object.prototype;
-%SetProperty(global, "Math", $Math, DONT_ENUM);
-
-// ECMA 262 - 15.8.2.1
-function MathAbs(x) {
- if (%_IsSmi(x)) return x >= 0 ? x : -x;
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (x === 0) return 0; // To handle -0.
- return x > 0 ? x : -x;
-}
-
-// ECMA 262 - 15.8.2.2
-function MathAcos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_acos(x);
-}
-
-// ECMA 262 - 15.8.2.3
-function MathAsin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_asin(x);
-}
-
-// ECMA 262 - 15.8.2.4
-function MathAtan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan(x);
-}
-
-// ECMA 262 - 15.8.2.5
-// The naming of y and x matches the spec, as does the order in which
-// ToNumber (valueOf) is called.
-function MathAtan2(y, x) {
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan2(y, x);
-}
-
-// ECMA 262 - 15.8.2.6
-function MathCeil(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_ceil(x);
-}
-
-// ECMA 262 - 15.8.2.7
-function MathCos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathCos(x);
-}
-
-// ECMA 262 - 15.8.2.8
-function MathExp(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_exp(x);
-}
-
-// ECMA 262 - 15.8.2.9
-function MathFloor(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- // It's more common to call this with a positive number that's out
- // of range than negative numbers; check the upper bound first.
- if (x < 0x80000000 && x > 0) {
- // Numbers in the range [0, 2^31) can be floored by converting
- // them to an unsigned 32-bit value using the shift operator.
- // We avoid doing so for -0, because the result of Math.floor(-0)
- // has to be -0, which wouldn't be the case with the shift.
- return TO_UINT32(x);
- } else {
- return %Math_floor(x);
- }
-}
-
-// ECMA 262 - 15.8.2.10
-function MathLog(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathLog(x);
-}
-
-// ECMA 262 - 15.8.2.11
-function MathMax(arg1, arg2) { // length == 2
- var length = %_ArgumentsLength();
- if (length == 2) {
- if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
- if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
- if (arg2 > arg1) return arg2;
- if (arg1 > arg2) return arg1;
- if (arg1 == arg2) {
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be
- // a Smi or a heap number.
- return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg2 : arg1;
- }
- // All comparisons failed, one of the arguments must be NaN.
- return 0/0; // Compiler constant-folds this to NaN.
- }
- var r = -1/0; // Compiler constant-folds this to -Infinity.
- for (var i = 0; i < length; i++) {
- var n = %_Arguments(i);
- if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- // Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
- // a Smi or heap number.
- if (NUMBER_IS_NAN(n) || n > r ||
- (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) {
- r = n;
- }
- }
- return r;
-}
-
-// ECMA 262 - 15.8.2.12
-function MathMin(arg1, arg2) { // length == 2
- var length = %_ArgumentsLength();
- if (length == 2) {
- if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
- if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
- if (arg2 > arg1) return arg1;
- if (arg1 > arg2) return arg2;
- if (arg1 == arg2) {
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be
- // a Smi or a heap number.
- return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg1 : arg2;
- }
- // All comparisons failed, one of the arguments must be NaN.
- return 0/0; // Compiler constant-folds this to NaN.
- }
- var r = 1/0; // Compiler constant-folds this to Infinity.
- for (var i = 0; i < length; i++) {
- var n = %_Arguments(i);
- if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be a
- // Smi or a heap number.
- if (NUMBER_IS_NAN(n) || n < r ||
- (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) {
- r = n;
- }
- }
- return r;
-}
-
-// ECMA 262 - 15.8.2.13
-function MathPow(x, y) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- return %_MathPow(x, y);
-}
-
-// ECMA 262 - 15.8.2.14
-function MathRandom() {
- return %_RandomHeapNumber();
-}
-
-// ECMA 262 - 15.8.2.15
-function MathRound(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %RoundNumber(x);
-}
-
-// ECMA 262 - 15.8.2.16
-function MathSin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSin(x);
-}
-
-// ECMA 262 - 15.8.2.17
-function MathSqrt(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSqrt(x);
-}
-
-// ECMA 262 - 15.8.2.18
-function MathTan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathTan(x);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpMath() {
- %CheckIsBootstrapping();
- // Set up math constants.
- // ECMA-262, section 15.8.1.1.
- %OptimizeObjectForAddingMultipleProperties($Math, 8);
- %SetProperty($Math,
- "E",
- 2.7182818284590452354,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.2.
- %SetProperty($Math,
- "LN10",
- 2.302585092994046,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.3.
- %SetProperty($Math,
- "LN2",
- 0.6931471805599453,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.4.
- %SetProperty($Math,
- "LOG2E",
- 1.4426950408889634,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "LOG10E",
- 0.4342944819032518,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "PI",
- 3.1415926535897932,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "SQRT1_2",
- 0.7071067811865476,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "SQRT2",
- 1.4142135623730951,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %ToFastProperties($Math);
-
- // Set up non-enumerable functions of the Math object and
- // set their names.
- InstallFunctions($Math, DONT_ENUM, $Array(
- "random", MathRandom,
- "abs", MathAbs,
- "acos", MathAcos,
- "asin", MathAsin,
- "atan", MathAtan,
- "ceil", MathCeil,
- "cos", MathCos,
- "exp", MathExp,
- "floor", MathFloor,
- "log", MathLog,
- "round", MathRound,
- "sin", MathSin,
- "sqrt", MathSqrt,
- "tan", MathTan,
- "atan2", MathAtan2,
- "pow", MathPow,
- "max", MathMax,
- "min", MathMin
- ));
-}
-
-SetUpMath();
diff --git a/src/3rdparty/v8/src/messages.cc b/src/3rdparty/v8/src/messages.cc
deleted file mode 100644
index de18a4b..0000000
--- a/src/3rdparty/v8/src/messages.cc
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "execution.h"
-#include "messages.h"
-#include "spaces-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// If no message listeners have been registered this one is called
-// by default.
-void MessageHandler::DefaultMessageReport(Isolate* isolate,
- const MessageLocation* loc,
- Handle<Object> message_obj) {
- SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj);
- if (loc == NULL) {
- PrintF("%s\n", *str);
- } else {
- HandleScope scope(isolate);
- Handle<Object> data(loc->script()->name(), isolate);
- SmartArrayPointer<char> data_str;
- if (data->IsString())
- data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
- PrintF("%s:%i: %s\n", *data_str ? *data_str : "<unknown>",
- loc->start_pos(), *str);
- }
-}
-
-
-Handle<JSMessageObject> MessageHandler::MakeMessageObject(
- const char* type,
- MessageLocation* loc,
- Vector< Handle<Object> > args,
- Handle<String> stack_trace,
- Handle<JSArray> stack_frames) {
- Handle<String> type_handle = FACTORY->InternalizeUtf8String(type);
- Handle<FixedArray> arguments_elements =
- FACTORY->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- arguments_elements->set(i, *args[i]);
- }
- Handle<JSArray> arguments_handle =
- FACTORY->NewJSArrayWithElements(arguments_elements);
-
- int start = 0;
- int end = 0;
- Handle<Object> script_handle = FACTORY->undefined_value();
- if (loc) {
- start = loc->start_pos();
- end = loc->end_pos();
- script_handle = GetScriptWrapper(loc->script());
- }
-
- Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Handle<Object>::cast(FACTORY->undefined_value())
- : Handle<Object>::cast(stack_trace);
-
- Handle<Object> stack_frames_handle = stack_frames.is_null()
- ? Handle<Object>::cast(FACTORY->undefined_value())
- : Handle<Object>::cast(stack_frames);
-
- Handle<JSMessageObject> message =
- FACTORY->NewJSMessageObject(type_handle,
- arguments_handle,
- start,
- end,
- script_handle,
- stack_trace_handle,
- stack_frames_handle);
-
- return message;
-}
-
-
-void MessageHandler::ReportMessage(Isolate* isolate,
- MessageLocation* loc,
- Handle<Object> message) {
- // We are calling into embedder's code which can throw exceptions.
- // Thus we need to save current exception state, reset it to the clean one
- // and ignore scheduled exceptions callbacks can throw.
-
- // We pass the exception object into the message handler callback though.
- Object* exception_object = isolate->heap()->undefined_value();
- if (isolate->has_pending_exception()) {
- isolate->pending_exception()->ToObject(&exception_object);
- }
- Handle<Object> exception_handle(exception_object, isolate);
-
- Isolate::ExceptionScope exception_scope(isolate);
- isolate->clear_pending_exception();
- isolate->set_external_caught_exception(false);
-
- v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
- v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception_handle);
-
- v8::NeanderArray global_listeners(FACTORY->message_listeners());
- int global_length = global_listeners.length();
- if (global_length == 0) {
- DefaultMessageReport(isolate, loc, message);
- if (isolate->has_scheduled_exception()) {
- isolate->clear_scheduled_exception();
- }
- } else {
- for (int i = 0; i < global_length; i++) {
- HandleScope scope(isolate);
- if (global_listeners.get(i)->IsUndefined()) continue;
- v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
- Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
- v8::MessageCallback callback =
- FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
- Handle<Object> callback_data(listener.get(1), isolate);
- {
- // Do not allow exceptions to propagate.
- v8::TryCatch try_catch;
- callback(api_message_obj, callback_data->IsUndefined()
- ? api_exception_obj
- : v8::Utils::ToLocal(callback_data));
- }
- if (isolate->has_scheduled_exception()) {
- isolate->clear_scheduled_exception();
- }
- }
- }
-}
-
-
-Handle<String> MessageHandler::GetMessage(Isolate* isolate,
- Handle<Object> data) {
- Factory* factory = isolate->factory();
- Handle<String> fmt_str =
- factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("FormatMessage"));
- Handle<JSFunction> fun =
- Handle<JSFunction>(
- JSFunction::cast(
- isolate->js_builtins_object()->
- GetPropertyNoExceptionThrown(*fmt_str)));
- Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
- Handle<Object> argv[] = { Handle<Object>(message->type(), isolate),
- Handle<Object>(message->arguments(), isolate) };
-
- bool caught_exception;
- Handle<Object> result =
- Execution::TryCall(fun,
- isolate->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
-
- if (caught_exception || !result->IsString()) {
- return factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("<error>"));
- }
- Handle<String> result_string = Handle<String>::cast(result);
- // A string that has been obtained from JS code in this way is
- // likely to be a complicated ConsString of some sort. We flatten it
- // here to improve the efficiency of converting it to a C string and
- // other operations that are likely to take place (see GetLocalizedMessage
- // for example).
- FlattenString(result_string);
- return result_string;
-}
-
-
-SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
- Isolate* isolate,
- Handle<Object> data) {
- HandleScope scope(isolate);
- return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS);
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/messages.h b/src/3rdparty/v8/src/messages.h
deleted file mode 100644
index 3361abe..0000000
--- a/src/3rdparty/v8/src/messages.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The infrastructure used for (localized) message reporting in V8.
-//
-// Note: there's a big unresolved issue about ownership of the data
-// structures used by this framework.
-
-#ifndef V8_MESSAGES_H_
-#define V8_MESSAGES_H_
-
-#include "handles-inl.h"
-
-// Forward declaration of MessageLocation.
-namespace v8 {
-namespace internal {
-class MessageLocation;
-} } // namespace v8::internal
-
-
-class V8Message {
- public:
- V8Message(char* type,
- v8::internal::Handle<v8::internal::JSArray> args,
- const v8::internal::MessageLocation* loc) :
- type_(type), args_(args), loc_(loc) { }
- char* type() const { return type_; }
- v8::internal::Handle<v8::internal::JSArray> args() const { return args_; }
- const v8::internal::MessageLocation* loc() const { return loc_; }
- private:
- char* type_;
- v8::internal::Handle<v8::internal::JSArray> const args_;
- const v8::internal::MessageLocation* loc_;
-};
-
-
-namespace v8 {
-namespace internal {
-
-struct Language;
-class SourceInfo;
-
-class MessageLocation {
- public:
- MessageLocation(Handle<Script> script,
- int start_pos,
- int end_pos)
- : script_(script),
- start_pos_(start_pos),
- end_pos_(end_pos) { }
- MessageLocation() : start_pos_(-1), end_pos_(-1) { }
-
- Handle<Script> script() const { return script_; }
- int start_pos() const { return start_pos_; }
- int end_pos() const { return end_pos_; }
-
- private:
- Handle<Script> script_;
- int start_pos_;
- int end_pos_;
-};
-
-
-// A message handler is a convenience interface for accessing the list
-// of message listeners registered in an environment
-class MessageHandler {
- public:
- // Returns a message object for the API to use.
- static Handle<JSMessageObject> MakeMessageObject(
- const char* type,
- MessageLocation* loc,
- Vector< Handle<Object> > args,
- Handle<String> stack_trace,
- Handle<JSArray> stack_frames);
-
- // Report a formatted message (needs JS allocation).
- static void ReportMessage(Isolate* isolate,
- MessageLocation* loc,
- Handle<Object> message);
-
- static void DefaultMessageReport(Isolate* isolate,
- const MessageLocation* loc,
- Handle<Object> message_obj);
- static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data);
- static SmartArrayPointer<char> GetLocalizedMessage(Isolate* isolate,
- Handle<Object> data);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_MESSAGES_H_
diff --git a/src/3rdparty/v8/src/messages.js b/src/3rdparty/v8/src/messages.js
deleted file mode 100644
index 14ba73f..0000000
--- a/src/3rdparty/v8/src/messages.js
+++ /dev/null
@@ -1,1311 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// -------------------------------------------------------------------
-
-var kMessages = {
- // Error
- cyclic_proto: ["Cyclic __proto__ value"],
- code_gen_from_strings: ["%0"],
- // TypeError
- unexpected_token: ["Unexpected token ", "%0"],
- unexpected_token_number: ["Unexpected number"],
- unexpected_token_string: ["Unexpected string"],
- unexpected_token_identifier: ["Unexpected identifier"],
- unexpected_reserved: ["Unexpected reserved word"],
- unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
- unexpected_eos: ["Unexpected end of input"],
- malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
- unterminated_regexp: ["Invalid regular expression: missing /"],
- regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
- incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
- invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
- invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"],
- invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
- invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
- multiple_defaults_in_switch: ["More than one default clause in switch statement"],
- newline_after_throw: ["Illegal newline after throw"],
- redeclaration: ["%0", " '", "%1", "' has already been declared"],
- no_catch_or_finally: ["Missing catch or finally after try"],
- unknown_label: ["Undefined label '", "%0", "'"],
- uncaught_exception: ["Uncaught ", "%0"],
- stack_trace: ["Stack Trace:\n", "%0"],
- called_non_callable: ["%0", " is not a function"],
- undefined_method: ["Object ", "%1", " has no method '", "%0", "'"],
- property_not_function: ["Property '", "%0", "' of object ", "%1", " is not a function"],
- cannot_convert_to_primitive: ["Cannot convert object to primitive value"],
- not_constructor: ["%0", " is not a constructor"],
- not_defined: ["%0", " is not defined"],
- non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
- non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
- non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"],
- with_expression: ["%0", " has no properties"],
- illegal_invocation: ["Illegal invocation"],
- no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
- apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
- apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"],
- invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
- instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
- instanceof_nonobject_proto: ["Function has non-object prototype '", "%0", "' in instanceof check"],
- null_to_object: ["Cannot convert null to object"],
- reduce_no_initial: ["Reduce of empty array with no initial value"],
- getter_must_be_callable: ["Getter must be a function: ", "%0"],
- setter_must_be_callable: ["Setter must be a function: ", "%0"],
- value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
- proto_object_or_null: ["Object prototype may only be an Object or null"],
- property_desc_object: ["Property description must be an object: ", "%0"],
- redefine_disallowed: ["Cannot redefine property: ", "%0"],
- define_disallowed: ["Cannot define property:", "%0", ", object is not extensible."],
- non_extensible_proto: ["%0", " is not extensible"],
- handler_non_object: ["Proxy.", "%0", " called with non-object as handler"],
- proto_non_object: ["Proxy.", "%0", " called with non-object as prototype"],
- trap_function_expected: ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
- handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
- handler_trap_must_be_callable: ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
- handler_returned_false: ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
- handler_returned_undefined: ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
- proxy_prop_not_configurable: ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
- proxy_non_object_prop_names: ["Trap '", "%1", "' returned non-object ", "%0"],
- proxy_repeated_prop_name: ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
- invalid_weakmap_key: ["Invalid value used as weak map key"],
- not_date_object: ["this is not a Date object."],
- observe_non_object: ["Object.", "%0", " cannot ", "%0", " non-object"],
- observe_non_function: ["Object.", "%0", " cannot deliver to non-function"],
- observe_callback_frozen: ["Object.observe cannot deliver to a frozen function object"],
- observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
- observe_notify_non_notifier: ["notify called on non-notifier object"],
- // RangeError
- invalid_array_length: ["Invalid array length"],
- stack_overflow: ["Maximum call stack size exceeded"],
- invalid_time_value: ["Invalid time value"],
- // SyntaxError
- unable_to_parse: ["Parse error"],
- invalid_regexp_flags: ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
- invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
- illegal_break: ["Illegal break statement"],
- illegal_continue: ["Illegal continue statement"],
- illegal_return: ["Illegal return statement"],
- illegal_let: ["Illegal let declaration outside extended mode"],
- error_loading_debugger: ["Error loading debugger"],
- no_input_to_regexp: ["No input to ", "%0"],
- invalid_json: ["String '", "%0", "' is not valid JSON"],
- circular_structure: ["Converting circular structure to JSON"],
- called_on_non_object: ["%0", " called on non-object"],
- called_on_null_or_undefined: ["%0", " called on null or undefined"],
- array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
- object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
- illegal_access: ["Illegal access"],
- invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
- strict_mode_with: ["Strict mode code may not include a with statement"],
- strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
- too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"],
- too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
- too_many_variables: ["Too many variables declared (only 131071 allowed)"],
- strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
- strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
- strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
- strict_function_name: ["Function name may not be eval or arguments in strict mode"],
- strict_octal_literal: ["Octal literals are not allowed in strict mode."],
- strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
- accessor_data_property: ["Object literal may not have data and accessor property with the same name"],
- accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"],
- strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
- strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_reserved_word: ["Use of future reserved word in strict mode"],
- strict_delete: ["Delete of an unqualified identifier in strict mode."],
- strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
- strict_const: ["Use of const in strict mode."],
- strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
- strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
- strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
- strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
- strict_caller: ["Illegal access to a strict mode caller function."],
- unprotected_let: ["Illegal let declaration in unprotected statement context."],
- unprotected_const: ["Illegal const declaration in unprotected statement context."],
- cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
- redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
- harmony_const_assign: ["Assignment to constant variable."],
- invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
- module_type_error: ["Module '", "%0", "' used improperly"],
- module_export_undefined: ["Export '", "%0", "' is not defined in module"],
-};
-
-
-function FormatString(format, args) {
- var result = "";
- var arg_num = 0;
- for (var i = 0; i < format.length; i++) {
- var str = format[i];
- if (str.length == 2 && %_StringCharCodeAt(str, 0) == 0x25) {
- // Two-char string starts with "%".
- var arg_num = (%_StringCharCodeAt(str, 1) - 0x30) >>> 0;
- if (arg_num < 4) {
- // str is one of %0, %1, %2 or %3.
- try {
- str = NoSideEffectToString(args[arg_num]);
- } catch (e) {
- if (%IsJSModule(args[arg_num]))
- str = "module";
- else if (IS_SPEC_OBJECT(args[arg_num]))
- str = "object";
- else
- str = "#<error>";
- }
- }
- }
- result += str;
- }
- return result;
-}
-
-
-function NoSideEffectToString(obj) {
- if (IS_STRING(obj)) return obj;
- if (IS_NUMBER(obj)) return %_NumberToString(obj);
- if (IS_BOOLEAN(obj)) return x ? 'true' : 'false';
- if (IS_UNDEFINED(obj)) return 'undefined';
- if (IS_NULL(obj)) return 'null';
- if (IS_FUNCTION(obj)) return %_CallFunction(obj, FunctionToString);
- if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) {
- var constructor = %GetDataProperty(obj, "constructor");
- if (typeof constructor == "function") {
- var constructorName = constructor.name;
- if (IS_STRING(constructorName) && constructorName !== "") {
- return "#<" + constructorName + ">";
- }
- }
- }
- if (IsNativeErrorObject(obj)) return %_CallFunction(obj, ErrorToString);
- return %_CallFunction(obj, ObjectToString);
-}
-
-
-// To check if something is a native error we need to check the
-// concrete native error types. It is not sufficient to use instanceof
-// since it possible to create an object that has Error.prototype on
-// its prototype chain. This is the case for DOMException for example.
-function IsNativeErrorObject(obj) {
- switch (%_ClassOf(obj)) {
- case 'Error':
- case 'EvalError':
- case 'RangeError':
- case 'ReferenceError':
- case 'SyntaxError':
- case 'TypeError':
- case 'URIError':
- return true;
- }
- return false;
-}
-
-
-// When formatting internally created error messages, do not
-// invoke overwritten error toString methods but explicitly use
-// the error to string method. This is to avoid leaking error
-// objects between script tags in a browser setting.
-function ToStringCheckErrorObject(obj) {
- if (IsNativeErrorObject(obj)) {
- return %_CallFunction(obj, ErrorToString);
- } else {
- return ToString(obj);
- }
-}
-
-
-function ToDetailString(obj) {
- if (obj != null && IS_OBJECT(obj) && obj.toString === ObjectToString) {
- var constructor = obj.constructor;
- if (typeof constructor == "function") {
- var constructorName = constructor.name;
- if (IS_STRING(constructorName) && constructorName !== "") {
- return "#<" + constructorName + ">";
- }
- }
- }
- return ToStringCheckErrorObject(obj);
-}
-
-
-function MakeGenericError(constructor, type, args) {
- if (IS_UNDEFINED(args)) args = [];
- return new constructor(FormatMessage(type, args));
-}
-
-
-/**
- * Set up the Script function and constructor.
- */
-%FunctionSetInstanceClassName(Script, 'Script');
-%SetProperty(Script.prototype, 'constructor', Script,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-%SetCode(Script, function(x) {
- // Script objects can only be created by the VM.
- throw new $Error("Not supported");
-});
-
-
-// Helper functions; called from the runtime system.
-function FormatMessage(type, args) {
- var format = kMessages[type];
- if (!format) return "<unknown message " + type + ">";
- return FormatString(format, args);
-}
-
-
-function GetLineNumber(message) {
- var start_position = %MessageGetStartPosition(message);
- if (start_position == -1) return kNoLineNumberInfo;
- var script = %MessageGetScript(message);
- var location = script.locationFromPosition(start_position, true);
- if (location == null) return kNoLineNumberInfo;
- return location.line + 1;
-}
-
-
-// Returns the source code line containing the given source
-// position, or the empty string if the position is invalid.
-function GetSourceLine(message) {
- var script = %MessageGetScript(message);
- var start_position = %MessageGetStartPosition(message);
- var location = script.locationFromPosition(start_position, true);
- if (location == null) return "";
- location.restrict();
- return location.sourceText();
-}
-
-
-function MakeTypeError(type, args) {
- return MakeGenericError($TypeError, type, args);
-}
-
-
-function MakeRangeError(type, args) {
- return MakeGenericError($RangeError, type, args);
-}
-
-
-function MakeSyntaxError(type, args) {
- return MakeGenericError($SyntaxError, type, args);
-}
-
-
-function MakeReferenceError(type, args) {
- return MakeGenericError($ReferenceError, type, args);
-}
-
-
-function MakeEvalError(type, args) {
- return MakeGenericError($EvalError, type, args);
-}
-
-
-function MakeError(type, args) {
- return MakeGenericError($Error, type, args);
-}
-
-/**
- * Find a line number given a specific source position.
- * @param {number} position The source position.
- * @return {number} 0 if input too small, -1 if input too large,
- else the line number.
- */
-function ScriptLineFromPosition(position) {
- var lower = 0;
- var upper = this.lineCount() - 1;
- var line_ends = this.line_ends;
-
- // We'll never find invalid positions so bail right away.
- if (position > line_ends[upper]) {
- return -1;
- }
-
- // This means we don't have to safe-guard indexing line_ends[i - 1].
- if (position <= line_ends[0]) {
- return 0;
- }
-
- // Binary search to find line # from position range.
- while (upper >= 1) {
- var i = (lower + upper) >> 1;
-
- if (position > line_ends[i]) {
- lower = i + 1;
- } else if (position <= line_ends[i - 1]) {
- upper = i - 1;
- } else {
- return i;
- }
- }
-
- return -1;
-}
-
-/**
- * Get information on a specific source position.
- * @param {number} position The source position
- * @param {boolean} include_resource_offset Set to true to have the resource
- * offset added to the location
- * @return {SourceLocation}
- * If line is negative or not in the source null is returned.
- */
-function ScriptLocationFromPosition(position,
- include_resource_offset) {
- var line = this.lineFromPosition(position);
- if (line == -1) return null;
-
- // Determine start, end and column.
- var line_ends = this.line_ends;
- var start = line == 0 ? 0 : line_ends[line - 1] + 1;
- var end = line_ends[line];
- if (end > 0 && %_CallFunction(this.source, end - 1, StringCharAt) == '\r') {
- end--;
- }
- var column = position - start;
-
- // Adjust according to the offset within the resource.
- if (include_resource_offset) {
- line += this.line_offset;
- if (line == this.line_offset) {
- column += this.column_offset;
- }
- }
-
- return new SourceLocation(this, position, line, column, start, end);
-}
-
-
-/**
- * Get information on a specific source line and column possibly offset by a
- * fixed source position. This function is used to find a source position from
- * a line and column position. The fixed source position offset is typically
- * used to find a source position in a function based on a line and column in
- * the source for the function alone. The offset passed will then be the
- * start position of the source for the function within the full script source.
- * @param {number} opt_line The line within the source. Default value is 0
- * @param {number} opt_column The column in within the line. Default value is 0
- * @param {number} opt_offset_position The offset from the begining of the
- * source from where the line and column calculation starts.
- * Default value is 0
- * @return {SourceLocation}
- * If line is negative or not in the source null is returned.
- */
-function ScriptLocationFromLine(opt_line, opt_column, opt_offset_position) {
- // Default is the first line in the script. Lines in the script is relative
- // to the offset within the resource.
- var line = 0;
- if (!IS_UNDEFINED(opt_line)) {
- line = opt_line - this.line_offset;
- }
-
- // Default is first column. If on the first line add the offset within the
- // resource.
- var column = opt_column || 0;
- if (line == 0) {
- column -= this.column_offset;
- }
-
- var offset_position = opt_offset_position || 0;
- if (line < 0 || column < 0 || offset_position < 0) return null;
- if (line == 0) {
- return this.locationFromPosition(offset_position + column, false);
- } else {
- // Find the line where the offset position is located.
- var offset_line = this.lineFromPosition(offset_position);
-
- if (offset_line == -1 || offset_line + line >= this.lineCount()) {
- return null;
- }
-
- return this.locationFromPosition(
- this.line_ends[offset_line + line - 1] + 1 + column); // line > 0 here.
- }
-}
-
-
-/**
- * Get a slice of source code from the script. The boundaries for the slice is
- * specified in lines.
- * @param {number} opt_from_line The first line (zero bound) in the slice.
- * Default is 0
- * @param {number} opt_to_column The last line (zero bound) in the slice (non
- * inclusive). Default is the number of lines in the script
- * @return {SourceSlice} The source slice or null of the parameters where
- * invalid
- */
-function ScriptSourceSlice(opt_from_line, opt_to_line) {
- var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset
- : opt_from_line;
- var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount()
- : opt_to_line;
-
- // Adjust according to the offset within the resource.
- from_line -= this.line_offset;
- to_line -= this.line_offset;
- if (from_line < 0) from_line = 0;
- if (to_line > this.lineCount()) to_line = this.lineCount();
-
- // Check parameters.
- if (from_line >= this.lineCount() ||
- to_line < 0 ||
- from_line > to_line) {
- return null;
- }
-
- var line_ends = this.line_ends;
- var from_position = from_line == 0 ? 0 : line_ends[from_line - 1] + 1;
- var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
-
- // Return a source slice with line numbers re-adjusted to the resource.
- return new SourceSlice(this,
- from_line + this.line_offset,
- to_line + this.line_offset,
- from_position, to_position);
-}
-
-
-function ScriptSourceLine(opt_line) {
- // Default is the first line in the script. Lines in the script are relative
- // to the offset within the resource.
- var line = 0;
- if (!IS_UNDEFINED(opt_line)) {
- line = opt_line - this.line_offset;
- }
-
- // Check parameter.
- if (line < 0 || this.lineCount() <= line) {
- return null;
- }
-
- // Return the source line.
- var line_ends = this.line_ends;
- var start = line == 0 ? 0 : line_ends[line - 1] + 1;
- var end = line_ends[line];
- return %_CallFunction(this.source, start, end, StringSubstring);
-}
-
-
-/**
- * Returns the number of source lines.
- * @return {number}
- * Number of source lines.
- */
-function ScriptLineCount() {
- // Return number of source lines.
- return this.line_ends.length;
-}
-
-
-/**
- * If sourceURL comment is available and script starts at zero returns sourceURL
- * comment contents. Otherwise, script name is returned. See
- * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
- * for details on using //@ sourceURL comment to identify scritps that don't
- * have name.
- *
- * @return {?string} script name if present, value for //@ sourceURL comment
- * otherwise.
- */
-function ScriptNameOrSourceURL() {
- if (this.line_offset > 0 || this.column_offset > 0) {
- return this.name;
- }
-
- // The result is cached as on long scripts it takes noticable time to search
- // for the sourceURL.
- if (this.hasCachedNameOrSourceURL) {
- return this.cachedNameOrSourceURL;
- }
- this.hasCachedNameOrSourceURL = true;
-
- // TODO(608): the spaces in a regexp below had to be escaped as \040
- // because this file is being processed by js2c whose handling of spaces
- // in regexps is broken. Also, ['"] are excluded from allowed URLs to
- // avoid matches against sources that invoke evals with sourceURL.
- // A better solution would be to detect these special comments in
- // the scanner/parser.
- var source = ToString(this.source);
- var sourceUrlPos = %StringIndexOf(source, "sourceURL=", 0);
- this.cachedNameOrSourceURL = this.name;
- if (sourceUrlPos > 4) {
- var sourceUrlPattern =
- /\/\/@[\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
- // Don't reuse lastMatchInfo here, so we create a new array with room
- // for four captures (array with length one longer than the index
- // of the fourth capture, where the numbering is zero-based).
- var matchInfo = new InternalArray(CAPTURE(3) + 1);
- var match =
- %_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
- if (match) {
- this.cachedNameOrSourceURL =
- %_SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
- }
- }
- return this.cachedNameOrSourceURL;
-}
-
-
-SetUpLockedPrototype(Script,
- $Array("source", "name", "line_ends", "line_offset", "column_offset",
- "cachedNameOrSourceURL", "hasCachedNameOrSourceURL" ),
- $Array(
- "lineFromPosition", ScriptLineFromPosition,
- "locationFromPosition", ScriptLocationFromPosition,
- "locationFromLine", ScriptLocationFromLine,
- "sourceSlice", ScriptSourceSlice,
- "sourceLine", ScriptSourceLine,
- "lineCount", ScriptLineCount,
- "nameOrSourceURL", ScriptNameOrSourceURL
- )
-);
-
-
-/**
- * Class for source location. A source location is a position within some
- * source with the following properties:
- * script : script object for the source
- * line : source line number
- * column : source column within the line
- * position : position within the source
- * start : position of start of source context (inclusive)
- * end : position of end of source context (not inclusive)
- * Source text for the source context is the character interval
- * [start, end[. In most cases end will point to a newline character.
- * It might point just past the final position of the source if the last
- * source line does not end with a newline character.
- * @param {Script} script The Script object for which this is a location
- * @param {number} position Source position for the location
- * @param {number} line The line number for the location
- * @param {number} column The column within the line for the location
- * @param {number} start Source position for start of source context
- * @param {number} end Source position for end of source context
- * @constructor
- */
-function SourceLocation(script, position, line, column, start, end) {
- this.script = script;
- this.position = position;
- this.line = line;
- this.column = column;
- this.start = start;
- this.end = end;
-}
-
-var kLineLengthLimit = 78;
-
-/**
- * Restrict source location start and end positions to make the source slice
- * no more that a certain number of characters wide.
- * @param {number} opt_limit The with limit of the source text with a default
- * of 78
- * @param {number} opt_before The number of characters to prefer before the
- * position with a default value of 10 less that the limit
- */
-function SourceLocationRestrict(opt_limit, opt_before) {
- // Find the actual limit to use.
- var limit;
- var before;
- if (!IS_UNDEFINED(opt_limit)) {
- limit = opt_limit;
- } else {
- limit = kLineLengthLimit;
- }
- if (!IS_UNDEFINED(opt_before)) {
- before = opt_before;
- } else {
- // If no before is specified center for small limits and perfer more source
- // before the the position that after for longer limits.
- if (limit <= 20) {
- before = $floor(limit / 2);
- } else {
- before = limit - 10;
- }
- }
- if (before >= limit) {
- before = limit - 1;
- }
-
- // If the [start, end[ interval is too big we restrict
- // it in one or both ends. We make sure to always produce
- // restricted intervals of maximum allowed size.
- if (this.end - this.start > limit) {
- var start_limit = this.position - before;
- var end_limit = this.position + limit - before;
- if (this.start < start_limit && end_limit < this.end) {
- this.start = start_limit;
- this.end = end_limit;
- } else if (this.start < start_limit) {
- this.start = this.end - limit;
- } else {
- this.end = this.start + limit;
- }
- }
-}
-
-
-/**
- * Get the source text for a SourceLocation
- * @return {String}
- * Source text for this location.
- */
-function SourceLocationSourceText() {
- return %_CallFunction(this.script.source,
- this.start,
- this.end,
- StringSubstring);
-}
-
-
-SetUpLockedPrototype(SourceLocation,
- $Array("script", "position", "line", "column", "start", "end"),
- $Array(
- "restrict", SourceLocationRestrict,
- "sourceText", SourceLocationSourceText
- )
-);
-
-
-/**
- * Class for a source slice. A source slice is a part of a script source with
- * the following properties:
- * script : script object for the source
- * from_line : line number for the first line in the slice
- * to_line : source line number for the last line in the slice
- * from_position : position of the first character in the slice
- * to_position : position of the last character in the slice
- * The to_line and to_position are not included in the slice, that is the lines
- * in the slice are [from_line, to_line[. Likewise the characters in the slice
- * are [from_position, to_position[.
- * @param {Script} script The Script object for the source slice
- * @param {number} from_line
- * @param {number} to_line
- * @param {number} from_position
- * @param {number} to_position
- * @constructor
- */
-function SourceSlice(script, from_line, to_line, from_position, to_position) {
- this.script = script;
- this.from_line = from_line;
- this.to_line = to_line;
- this.from_position = from_position;
- this.to_position = to_position;
-}
-
-/**
- * Get the source text for a SourceSlice
- * @return {String} Source text for this slice. The last line will include
- * the line terminating characters (if any)
- */
-function SourceSliceSourceText() {
- return %_CallFunction(this.script.source,
- this.from_position,
- this.to_position,
- StringSubstring);
-}
-
-SetUpLockedPrototype(SourceSlice,
- $Array("script", "from_line", "to_line", "from_position", "to_position"),
- $Array("sourceText", SourceSliceSourceText)
-);
-
-
-// Returns the offset of the given position within the containing
-// line.
-function GetPositionInLine(message) {
- var script = %MessageGetScript(message);
- var start_position = %MessageGetStartPosition(message);
- var location = script.locationFromPosition(start_position, false);
- if (location == null) return -1;
- location.restrict();
- return start_position - location.start;
-}
-
-
-function GetStackTraceLine(recv, fun, pos, isGlobal) {
- return new CallSite(recv, fun, pos).toString();
-}
-
-// ----------------------------------------------------------------------------
-// Error implementation
-
-function CallSite(receiver, fun, pos) {
- this.receiver = receiver;
- this.fun = fun;
- this.pos = pos;
-}
-
-function CallSiteGetThis() {
- return this.receiver;
-}
-
-function CallSiteGetTypeName() {
- return GetTypeName(this, false);
-}
-
-function CallSiteIsToplevel() {
- if (this.receiver == null) {
- return true;
- }
- return IS_GLOBAL(this.receiver);
-}
-
-function CallSiteIsEval() {
- var script = %FunctionGetScript(this.fun);
- return script && script.compilation_type == COMPILATION_TYPE_EVAL;
-}
-
-function CallSiteGetEvalOrigin() {
- var script = %FunctionGetScript(this.fun);
- return FormatEvalOrigin(script);
-}
-
-function CallSiteGetScriptNameOrSourceURL() {
- var script = %FunctionGetScript(this.fun);
- return script ? script.nameOrSourceURL() : null;
-}
-
-function CallSiteGetFunction() {
- return this.fun;
-}
-
-function CallSiteGetFunctionName() {
- // See if the function knows its own name
- var name = this.fun.name;
- if (name) {
- return name;
- }
- name = %FunctionGetInferredName(this.fun);
- if (name) {
- return name;
- }
- // Maybe this is an evaluation?
- var script = %FunctionGetScript(this.fun);
- if (script && script.compilation_type == COMPILATION_TYPE_EVAL) {
- return "eval";
- }
- return null;
-}
-
-function CallSiteGetMethodName() {
- // See if we can find a unique property on the receiver that holds
- // this function.
- var ownName = this.fun.name;
- if (ownName && this.receiver &&
- (%_CallFunction(this.receiver,
- ownName,
- ObjectLookupGetter) === this.fun ||
- %_CallFunction(this.receiver,
- ownName,
- ObjectLookupSetter) === this.fun ||
- (IS_OBJECT(this.receiver) &&
- %GetDataProperty(this.receiver, ownName) === this.fun))) {
- // To handle DontEnum properties we guess that the method has
- // the same name as the function.
- return ownName;
- }
- var name = null;
- for (var prop in this.receiver) {
- if (%_CallFunction(this.receiver, prop, ObjectLookupGetter) === this.fun ||
- %_CallFunction(this.receiver, prop, ObjectLookupSetter) === this.fun ||
- (IS_OBJECT(this.receiver) &&
- %GetDataProperty(this.receiver, prop) === this.fun)) {
- // If we find more than one match bail out to avoid confusion.
- if (name) {
- return null;
- }
- name = prop;
- }
- }
- if (name) {
- return name;
- }
- return null;
-}
-
-function CallSiteGetFileName() {
- var script = %FunctionGetScript(this.fun);
- return script ? script.name : null;
-}
-
-function CallSiteGetLineNumber() {
- if (this.pos == -1) {
- return null;
- }
- var script = %FunctionGetScript(this.fun);
- var location = null;
- if (script) {
- location = script.locationFromPosition(this.pos, true);
- }
- return location ? location.line + 1 : null;
-}
-
-function CallSiteGetColumnNumber() {
- if (this.pos == -1) {
- return null;
- }
- var script = %FunctionGetScript(this.fun);
- var location = null;
- if (script) {
- location = script.locationFromPosition(this.pos, true);
- }
- return location ? location.column + 1: null;
-}
-
-function CallSiteIsNative() {
- var script = %FunctionGetScript(this.fun);
- return script ? (script.type == TYPE_NATIVE) : false;
-}
-
-function CallSiteGetPosition() {
- return this.pos;
-}
-
-function CallSiteIsConstructor() {
- var receiver = this.receiver;
- var constructor =
- IS_OBJECT(receiver) ? %GetDataProperty(receiver, "constructor") : null;
- if (!constructor) return false;
- return this.fun === constructor;
-}
-
-function CallSiteToString() {
- var fileName;
- var fileLocation = "";
- if (this.isNative()) {
- fileLocation = "native";
- } else {
- if (this.isEval()) {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName) {
- fileLocation = this.getEvalOrigin();
- fileLocation += ", "; // Expecting source position to follow.
- }
- } else {
- fileName = this.getFileName();
- }
-
- if (fileName) {
- fileLocation += fileName;
- } else {
- // Source code does not originate from a file and is not native, but we
- // can still get the source position inside the source string, e.g. in
- // an eval string.
- fileLocation += "<anonymous>";
- }
- var lineNumber = this.getLineNumber();
- if (lineNumber != null) {
- fileLocation += ":" + lineNumber;
- var columnNumber = this.getColumnNumber();
- if (columnNumber) {
- fileLocation += ":" + columnNumber;
- }
- }
- }
-
- var line = "";
- var functionName = this.getFunctionName();
- var addSuffix = true;
- var isConstructor = this.isConstructor();
- var isMethodCall = !(this.isToplevel() || isConstructor);
- if (isMethodCall) {
- var typeName = GetTypeName(this, true);
- var methodName = this.getMethodName();
- if (functionName) {
- if (typeName &&
- %_CallFunction(functionName, typeName, StringIndexOf) != 0) {
- line += typeName + ".";
- }
- line += functionName;
- if (methodName &&
- (%_CallFunction(functionName, "." + methodName, StringIndexOf) !=
- functionName.length - methodName.length - 1)) {
- line += " [as " + methodName + "]";
- }
- } else {
- line += typeName + "." + (methodName || "<anonymous>");
- }
- } else if (isConstructor) {
- line += "new " + (functionName || "<anonymous>");
- } else if (functionName) {
- line += functionName;
- } else {
- line += fileLocation;
- addSuffix = false;
- }
- if (addSuffix) {
- line += " (" + fileLocation + ")";
- }
- return line;
-}
-
-SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
- "getThis", CallSiteGetThis,
- "getTypeName", CallSiteGetTypeName,
- "isToplevel", CallSiteIsToplevel,
- "isEval", CallSiteIsEval,
- "getEvalOrigin", CallSiteGetEvalOrigin,
- "getScriptNameOrSourceURL", CallSiteGetScriptNameOrSourceURL,
- "getFunction", CallSiteGetFunction,
- "getFunctionName", CallSiteGetFunctionName,
- "getMethodName", CallSiteGetMethodName,
- "getFileName", CallSiteGetFileName,
- "getLineNumber", CallSiteGetLineNumber,
- "getColumnNumber", CallSiteGetColumnNumber,
- "isNative", CallSiteIsNative,
- "getPosition", CallSiteGetPosition,
- "isConstructor", CallSiteIsConstructor,
- "toString", CallSiteToString
-));
-
-
-function FormatEvalOrigin(script) {
- var sourceURL = script.nameOrSourceURL();
- if (sourceURL) {
- return sourceURL;
- }
-
- var eval_origin = "eval at ";
- if (script.eval_from_function_name) {
- eval_origin += script.eval_from_function_name;
- } else {
- eval_origin += "<anonymous>";
- }
-
- var eval_from_script = script.eval_from_script;
- if (eval_from_script) {
- if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
- // eval script originated from another eval.
- eval_origin += " (" + FormatEvalOrigin(eval_from_script) + ")";
- } else {
- // eval script originated from "real" source.
- if (eval_from_script.name) {
- eval_origin += " (" + eval_from_script.name;
- var location = eval_from_script.locationFromPosition(
- script.eval_from_script_position, true);
- if (location) {
- eval_origin += ":" + (location.line + 1);
- eval_origin += ":" + (location.column + 1);
- }
- eval_origin += ")";
- } else {
- eval_origin += " (unknown source)";
- }
- }
- }
-
- return eval_origin;
-}
-
-
-function FormatErrorString(error) {
- try {
- return %_CallFunction(error, ErrorToString);
- } catch (e) {
- try {
- return "<error: " + e + ">";
- } catch (ee) {
- return "<error>";
- }
- }
-}
-
-
-function GetStackFrames(raw_stack) {
- var frames = new InternalArray();
- for (var i = 0; i < raw_stack.length; i += 4) {
- var recv = raw_stack[i];
- var fun = raw_stack[i + 1];
- var code = raw_stack[i + 2];
- var pc = raw_stack[i + 3];
- var pos = %FunctionGetPositionForOffset(code, pc);
- frames.push(new CallSite(recv, fun, pos));
- }
- return frames;
-}
-
-
-function FormatStackTrace(error_string, frames) {
- var lines = new InternalArray();
- lines.push(error_string);
- for (var i = 0; i < frames.length; i++) {
- var frame = frames[i];
- var line;
- try {
- line = frame.toString();
- } catch (e) {
- try {
- line = "<error: " + e + ">";
- } catch (ee) {
- // Any code that reaches this point is seriously nasty!
- line = "<error>";
- }
- }
- lines.push(" at " + line);
- }
- return %_CallFunction(lines, "\n", ArrayJoin);
-}
-
-
-function GetTypeName(obj, requireConstructor) {
- var constructor = obj.receiver.constructor;
- if (!constructor) {
- return requireConstructor ? null :
- %_CallFunction(obj.receiver, ObjectToString);
- }
- var constructorName = constructor.name;
- if (!constructorName) {
- return requireConstructor ? null :
- %_CallFunction(obj.receiver, ObjectToString);
- }
- return constructorName;
-}
-
-
-// Flag to prevent recursive call of Error.prepareStackTrace.
-var formatting_custom_stack_trace = false;
-
-
-function captureStackTrace(obj, cons_opt) {
- var stackTraceLimit = $Error.stackTraceLimit;
- if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return;
- if (stackTraceLimit < 0 || stackTraceLimit > 10000) {
- stackTraceLimit = 10000;
- }
- var stack = %CollectStackTrace(obj,
- cons_opt ? cons_opt : captureStackTrace,
- stackTraceLimit);
-
- // Don't be lazy if the error stack formatting is custom (observable).
- if (IS_FUNCTION($Error.prepareStackTrace) && !formatting_custom_stack_trace) {
- var array = [];
- %MoveArrayContents(GetStackFrames(stack), array);
- formatting_custom_stack_trace = true;
- try {
- obj.stack = $Error.prepareStackTrace(obj, array);
- } catch (e) {
- throw e; // The custom formatting function threw. Rethrow.
- } finally {
- formatting_custom_stack_trace = false;
- }
- return;
- }
-
- var error_string = FormatErrorString(obj);
- // Note that 'obj' and 'this' maybe different when called on objects that
- // have the error object on its prototype chain. The getter replaces itself
- // with a data property as soon as the stack trace has been formatted.
- // The getter must not change the object layout as it may be called after GC.
- var getter = function() {
- if (IS_STRING(stack)) return stack;
- // Stack is still a raw array awaiting to be formatted.
- stack = FormatStackTrace(error_string, GetStackFrames(stack));
- // Release context value.
- error_string = void 0;
- return stack;
- };
- %MarkOneShotGetter(getter);
-
- // The 'stack' property of the receiver is set as data property. If
- // the receiver is the same as holder, this accessor pair is replaced.
- var setter = function(v) {
- %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
- };
-
- %DefineOrRedefineAccessorProperty(obj, 'stack', getter, setter, DONT_ENUM);
-}
-
-
-function SetUpError() {
- // Define special error type constructors.
-
- var DefineError = function(f) {
- // Store the error function in both the global object
- // and the runtime object. The function is fetched
- // from the runtime object when throwing errors from
- // within the runtime system to avoid strange side
- // effects when overwriting the error functions from
- // user code.
- var name = f.name;
- %SetProperty(global, name, f, DONT_ENUM);
- %SetProperty(builtins, '$' + name, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
- // Configure the error function.
- if (name == 'Error') {
- // The prototype of the Error object must itself be an error.
- // However, it can't be an instance of the Error object because
- // it hasn't been properly configured yet. Instead we create a
- // special not-a-true-error-but-close-enough object.
- var ErrorPrototype = function() {};
- %FunctionSetPrototype(ErrorPrototype, $Object.prototype);
- %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
- %FunctionSetPrototype(f, new ErrorPrototype());
- } else {
- %FunctionSetPrototype(f, new $Error());
- }
- %FunctionSetInstanceClassName(f, 'Error');
- %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
- %SetProperty(f.prototype, "name", name, DONT_ENUM);
- %SetCode(f, function(m) {
- if (%_IsConstructCall()) {
- // Define all the expected properties directly on the error
- // object. This avoids going through getters and setters defined
- // on prototype objects.
- %IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
- if (!IS_UNDEFINED(m)) {
- %IgnoreAttributesAndSetProperty(
- this, 'message', ToString(m), DONT_ENUM);
- }
- captureStackTrace(this, f);
- } else {
- return new f(m);
- }
- });
- %SetNativeFlag(f);
- };
-
- DefineError(function Error() { });
- DefineError(function TypeError() { });
- DefineError(function RangeError() { });
- DefineError(function SyntaxError() { });
- DefineError(function ReferenceError() { });
- DefineError(function EvalError() { });
- DefineError(function URIError() { });
-}
-
-SetUpError();
-
-$Error.captureStackTrace = captureStackTrace;
-
-%SetProperty($Error.prototype, 'message', '', DONT_ENUM);
-
-// Global list of error objects visited during ErrorToString. This is
-// used to detect cycles in error toString formatting.
-var visited_errors = new InternalArray();
-var cyclic_error_marker = new $Object();
-
-function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
- // Climb the prototype chain until we find the holder.
- while (error && !%HasLocalProperty(error, name)) {
- error = error.__proto__;
- }
- if (error === null) return void 0;
- if (!IS_OBJECT(error)) return error[name];
- // If the property is an accessor on one of the predefined errors that can be
- // generated statically by the compiler, don't touch it. This is to address
- // http://code.google.com/p/chromium/issues/detail?id=69187
- var desc = %GetOwnProperty(error, name);
- if (desc && desc[IS_ACCESSOR_INDEX]) {
- var isName = name === "name";
- if (error === $ReferenceError.prototype)
- return isName ? "ReferenceError" : void 0;
- if (error === $SyntaxError.prototype)
- return isName ? "SyntaxError" : void 0;
- if (error === $TypeError.prototype)
- return isName ? "TypeError" : void 0;
- }
- // Otherwise, read normally.
- return error[name];
-}
-
-function ErrorToStringDetectCycle(error) {
- if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
- try {
- var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
- name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
- var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
- message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
- if (name === "") return message;
- if (message === "") return name;
- return name + ": " + message;
- } finally {
- visited_errors.length = visited_errors.length - 1;
- }
-}
-
-function ErrorToString() {
- if (!IS_SPEC_OBJECT(this)) {
- throw MakeTypeError("called_on_non_object", ["Error.prototype.toString"]);
- }
-
- try {
- return ErrorToStringDetectCycle(this);
- } catch(e) {
- // If this error message was encountered already return the empty
- // string for it instead of recursively formatting it.
- if (e === cyclic_error_marker) {
- return '';
- }
- throw e;
- }
-}
-
-
-InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
-
-// Boilerplate for exceptions for stack overflows. Used from
-// Isolate::StackOverflow().
-function SetUpStackOverflowBoilerplate() {
- var boilerplate = MakeRangeError('stack_overflow', []);
-
- // The raw stack trace is stored as hidden property of the copy of this
- // boilerplate error object. Note that the receiver 'this' may not be that
- // error object copy, but can be found on the prototype chain of 'this'.
- // When the stack trace is formatted, this accessor property is replaced by
- // a data property.
- var error_string = boilerplate.name + ": " + boilerplate.message;
-
- // The getter must not change the object layout as it may be called after GC.
- function getter() {
- var holder = this;
- while (!IS_ERROR(holder)) {
- holder = %GetPrototype(holder);
- if (holder == null) return MakeSyntaxError('illegal_access', []);
- }
- var stack = %GetOverflowedStackTrace(holder);
- if (IS_STRING(stack)) return stack;
- if (IS_ARRAY(stack)) {
- var result = FormatStackTrace(error_string, GetStackFrames(stack));
- %SetOverflowedStackTrace(holder, result);
- return result;
- }
- return void 0;
- }
- %MarkOneShotGetter(getter);
-
- // The 'stack' property of the receiver is set as data property. If
- // the receiver is the same as holder, this accessor pair is replaced.
- function setter(v) {
- %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
- // Release the stack trace that is stored as hidden property, if exists.
- %SetOverflowedStackTrace(this, void 0);
- }
-
- %DefineOrRedefineAccessorProperty(
- boilerplate, 'stack', getter, setter, DONT_ENUM);
-
- return boilerplate;
-}
-
-var kStackOverflowBoilerplate = SetUpStackOverflowBoilerplate();
diff --git a/src/3rdparty/v8/src/mips/assembler-mips-inl.h b/src/3rdparty/v8/src/mips/assembler-mips-inl.h
deleted file mode 100644
index 0499d36..0000000
--- a/src/3rdparty/v8/src/mips/assembler-mips-inl.h
+++ /dev/null
@@ -1,425 +0,0 @@
-
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-
-#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
-#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
-
-#include "mips/assembler-mips.h"
-
-#include "cpu.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Operand and MemOperand.
-
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- imm32_ = immediate;
- rmode_ = rmode;
-}
-
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
-}
-
-
-bool Operand::is_reg() const {
- return rm_.is_valid();
-}
-
-
-int Register::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(FPU)) {
- return kMaxNumAllocatableRegisters;
- } else {
- return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble;
- }
-}
-
-
-int DoubleRegister::NumRegisters() {
- if (CpuFeatures::IsSupported(FPU)) {
- return FPURegister::kMaxNumRegisters;
- } else {
- return 1;
- }
-}
-
-
-int DoubleRegister::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(FPU)) {
- return FPURegister::kMaxNumAllocatableRegisters;
- } else {
- return 1;
- }
-}
-
-
-int FPURegister::ToAllocationIndex(FPURegister reg) {
- ASSERT(reg.code() % 2 == 0);
- ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters);
- ASSERT(reg.is_valid());
- ASSERT(!reg.is(kDoubleRegZero));
- ASSERT(!reg.is(kLithiumScratchDouble));
- return (reg.code() / 2);
-}
-
-
-// -----------------------------------------------------------------------------
-// RelocInfo.
-
-void RelocInfo::apply(intptr_t delta) {
- if (IsCodeTarget(rmode_)) {
- uint32_t scope1 = (uint32_t) target_address() & ~kImm28Mask;
- uint32_t scope2 = reinterpret_cast<uint32_t>(pc_) & ~kImm28Mask;
-
- if (scope1 != scope2) {
- Assembler::JumpLabelToJumpRegister(pc_);
- }
- }
- if (IsInternalReference(rmode_)) {
- // Absolute code pointer inside code object moves with the code object.
- byte* p = reinterpret_cast<byte*>(pc_);
- int count = Assembler::RelocateInternalReference(p, delta);
- CPU::FlushICache(p, count * sizeof(uint32_t));
- }
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) ||
- rmode_ == RUNTIME_ENTRY ||
- rmode_ == EMBEDDED_OBJECT ||
- rmode_ == EXTERNAL_REFERENCE);
- // Read the address of the word containing the target_address in an
- // instruction stream.
- // The only architecture-independent user of this function is the serializer.
- // The serializer uses it to find out how many raw bytes of instruction to
- // output before the next target.
- // For an instruction like LUI/ORI where the target bits are mixed into the
- // instruction bits, the size of the target will be zero, indicating that the
- // serializer should not step forward in memory after a target is resolved
- // and written. In this case the target_address_address function should
- // return the end of the instructions to be patched, allowing the
- // deserializer to deserialize the instructions as raw bytes and put them in
- // place, ready to be patched with the target. After jump optimization,
- // that is the address of the instruction that follows J/JAL/JR/JALR
- // instruction.
- return reinterpret_cast<Address>(
- pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kSpecialTargetSize;
-}
-
-
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
-}
-
-
-Object** RelocInfo::target_object_address() {
- // Provide a "natural pointer" to the embedded object,
- // which can be de-referenced during heap iteration.
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- reconstructed_obj_ptr_ =
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
- return &reconstructed_obj_ptr_;
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
- return &reconstructed_adr_ptr_;
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
-}
-
-
-static const int kNoCodeAgeSequenceLength = 7;
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
- stub->instruction_start();
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- return Assembler::target_address_at(pc_);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- Assembler::set_target_address_at(pc_, target);
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr instr0 = Assembler::instr_at(pc_);
- Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
- Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
- bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
- (instr1 & kOpcodeMask) == ORI &&
- ((instr2 & kOpcodeMask) == JAL ||
- ((instr2 & kOpcodeMask) == SPECIAL &&
- (instr2 & kFunctionFieldMask) == JALR)));
- return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Assembler.
-
-
-void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
- GrowBuffer();
- }
-}
-
-
-void Assembler::CheckTrampolinePoolQuick() {
- if (pc_offset() >= next_buffer_check_) {
- CheckTrampolinePool();
- }
-}
-
-
-void Assembler::emit(Instr x) {
- if (!is_buffer_growth_blocked()) {
- CheckBuffer();
- }
- *reinterpret_cast<Instr*>(pc_) = x;
- pc_ += kInstrSize;
- CheckTrampolinePoolQuick();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/src/3rdparty/v8/src/mips/assembler-mips.cc b/src/3rdparty/v8/src/mips/assembler-mips.cc
deleted file mode 100644
index 962255d..0000000
--- a/src/3rdparty/v8/src/mips/assembler-mips.cc
+++ /dev/null
@@ -1,2305 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "mips/assembler-mips-inl.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
-// Get the CPU features enabled by the build. For cross compilation the
-// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
-// can be defined to enable FPU instructions when building the
-// snapshot.
-static uint64_t CpuFeaturesImpliedByCompiler() {
- uint64_t answer = 0;
-#ifdef CAN_USE_FPU_INSTRUCTIONS
- answer |= 1u << FPU;
-#endif // def CAN_USE_FPU_INSTRUCTIONS
-
-#ifdef __mips__
- // If the compiler is allowed to use FPU then we can use FPU too in our code
- // generation even when generating snapshots. This won't work for cross
- // compilation.
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
- answer |= 1u << FPU;
-#endif // defined(__mips_hard_float) && __mips_hard_float != 0
-#endif // def __mips__
-
- return answer;
-}
-
-
-const char* DoubleRegister::AllocationIndexToString(int index) {
- if (CpuFeatures::IsSupported(FPU)) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
- } else {
- ASSERT(index == 0);
- return "sfpd0";
- }
-}
-
-
-void CpuFeatures::Probe() {
- unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
- CpuFeaturesImpliedByCompiler());
- ASSERT(supported_ == 0 || supported_ == standard_features);
-#ifdef DEBUG
- initialized_ = true;
-#endif
-
- // Get the features implied by the OS and the compiler settings. This is the
- // minimal set of features which is also allowed for generated code in the
- // snapshot.
- supported_ |= standard_features;
-
- if (Serializer::enabled()) {
- // No probing for features if we might serialize (generate snapshot).
- return;
- }
-
- // If the compiler is allowed to use fpu then we can use fpu too in our
- // code generation.
-#if !defined(__mips__)
- // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
- if (FLAG_enable_fpu) {
- supported_ |= 1u << FPU;
- }
-#else
- // Probe for additional features not already known to be available.
- if (OS::MipsCpuHasFeature(FPU)) {
- // This implementation also sets the FPU flags if
- // runtime detection of FPU returns true.
- supported_ |= 1u << FPU;
- found_by_runtime_probing_ |= 1u << FPU;
- }
-#endif
-}
-
-
-int ToNumber(Register reg) {
- ASSERT(reg.is_valid());
- const int kNumbers[] = {
- 0, // zero_reg
- 1, // at
- 2, // v0
- 3, // v1
- 4, // a0
- 5, // a1
- 6, // a2
- 7, // a3
- 8, // t0
- 9, // t1
- 10, // t2
- 11, // t3
- 12, // t4
- 13, // t5
- 14, // t6
- 15, // t7
- 16, // s0
- 17, // s1
- 18, // s2
- 19, // s3
- 20, // s4
- 21, // s5
- 22, // s6
- 23, // s7
- 24, // t8
- 25, // t9
- 26, // k0
- 27, // k1
- 28, // gp
- 29, // sp
- 30, // fp
- 31, // ra
- };
- return kNumbers[reg.code()];
-}
-
-
-Register ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] = {
- zero_reg,
- at,
- v0, v1,
- a0, a1, a2, a3,
- t0, t1, t2, t3, t4, t5, t6, t7,
- s0, s1, s2, s3, s4, s5, s6, s7,
- t8, t9,
- k0, k1,
- gp,
- sp,
- fp,
- ra
- };
- return kRegisters[num];
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo.
-
-const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on MIPS means that it is a lui/ori instruction, and that is
- // always the case inside code objects.
- return true;
-}
-
-
-// Patch the code at the current address with the supplied instructions.
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand and MemOperand.
-// See assembler-mips-inl.h for inlined constructors.
-
-Operand::Operand(Handle<Object> handle) {
- rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // No relocation needed.
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
- }
-}
-
-
-MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
- offset_ = offset;
-}
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-
-static const int kNegOffset = 0x00008000;
-// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
-// operations as post-increment of sp.
-const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift) | (kPointerSize & kImm16Mask);
-// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift) | (-kPointerSize & kImm16Mask);
-// sw(r, MemOperand(sp, 0))
-const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask);
-// lw(r, MemOperand(sp, 0))
-const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask);
-
-const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask);
-
-const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask);
-
-const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask);
-
-const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask);
-// A mask for the Rt register for push, pop, lw, sw instructions.
-const Instr kRtMask = kRtFieldMask;
-const Instr kLwSwInstrTypeMask = 0xffe00000;
-const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
-const Instr kLwSwOffsetMask = kImm16Mask;
-
-
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-
- last_trampoline_pool_end_ = 0;
- no_trampoline_pool_before_ = 0;
- trampoline_pool_blocked_nesting_ = 0;
- // We leave space (16 * kTrampolineSlotsSize)
- // for BlockTrampolinePoolScope buffer.
- next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
- internal_trampoline_exception_ = false;
- last_bound_pos_ = 0;
-
- trampoline_emitted_ = false;
- unbound_labels_count_ = 0;
- block_buffer_growth_ = false;
-
- ClearRecordedAstId();
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
- }
-}
-
-
-void Assembler::CodeTargetAlign() {
- // No advantage to aligning branch/call targets to more than
- // single instruction, that I am aware of.
- Align(4);
-}
-
-
-Register Assembler::GetRtReg(Instr instr) {
- Register rt;
- rt.code_ = (instr & kRtFieldMask) >> kRtShift;
- return rt;
-}
-
-
-Register Assembler::GetRsReg(Instr instr) {
- Register rs;
- rs.code_ = (instr & kRsFieldMask) >> kRsShift;
- return rs;
-}
-
-
-Register Assembler::GetRdReg(Instr instr) {
- Register rd;
- rd.code_ = (instr & kRdFieldMask) >> kRdShift;
- return rd;
-}
-
-
-uint32_t Assembler::GetRt(Instr instr) {
- return (instr & kRtFieldMask) >> kRtShift;
-}
-
-
-uint32_t Assembler::GetRtField(Instr instr) {
- return instr & kRtFieldMask;
-}
-
-
-uint32_t Assembler::GetRs(Instr instr) {
- return (instr & kRsFieldMask) >> kRsShift;
-}
-
-
-uint32_t Assembler::GetRsField(Instr instr) {
- return instr & kRsFieldMask;
-}
-
-
-uint32_t Assembler::GetRd(Instr instr) {
- return (instr & kRdFieldMask) >> kRdShift;
-}
-
-
-uint32_t Assembler::GetRdField(Instr instr) {
- return instr & kRdFieldMask;
-}
-
-
-uint32_t Assembler::GetSa(Instr instr) {
- return (instr & kSaFieldMask) >> kSaShift;
-}
-
-
-uint32_t Assembler::GetSaField(Instr instr) {
- return instr & kSaFieldMask;
-}
-
-
-uint32_t Assembler::GetOpcodeField(Instr instr) {
- return instr & kOpcodeMask;
-}
-
-
-uint32_t Assembler::GetFunction(Instr instr) {
- return (instr & kFunctionFieldMask) >> kFunctionShift;
-}
-
-
-uint32_t Assembler::GetFunctionField(Instr instr) {
- return instr & kFunctionFieldMask;
-}
-
-
-uint32_t Assembler::GetImmediate16(Instr instr) {
- return instr & kImm16Mask;
-}
-
-
-uint32_t Assembler::GetLabelConst(Instr instr) {
- return instr & ~kImm16Mask;
-}
-
-
-bool Assembler::IsPop(Instr instr) {
- return (instr & ~kRtMask) == kPopRegPattern;
-}
-
-
-bool Assembler::IsPush(Instr instr) {
- return (instr & ~kRtMask) == kPushRegPattern;
-}
-
-
-bool Assembler::IsSwRegFpOffset(Instr instr) {
- return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsLwRegFpOffset(Instr instr) {
- return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsSwRegFpNegOffset(Instr instr) {
- return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
- kSwRegFpNegOffsetPattern);
-}
-
-
-bool Assembler::IsLwRegFpNegOffset(Instr instr) {
- return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
- kLwRegFpNegOffsetPattern);
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the last
-// instruction using the label.
-
-// The link chain is terminated by a value in the instruction of -1,
-// which is an otherwise illegal value (branch -1 is inf loop).
-// The instruction 16-bit offset field addresses 32-bit words, but in
-// code is conv to an 18-bit value addressing bytes, hence the -4 value.
-
-const int kEndOfChain = -4;
-// Determines the end of the Jump chain (a subset of the label link chain).
-const int kEndOfJumpChain = 0;
-
-
-bool Assembler::IsBranch(Instr instr) {
- uint32_t opcode = GetOpcodeField(instr);
- uint32_t rt_field = GetRtField(instr);
- uint32_t rs_field = GetRsField(instr);
- uint32_t label_constant = GetLabelConst(instr);
- // Checks if the instruction is a branch.
- return opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL ||
- (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
- rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
- label_constant == 0; // Emitted label const in reg-exp engine.
-}
-
-
-bool Assembler::IsBeq(Instr instr) {
- return GetOpcodeField(instr) == BEQ;
-}
-
-
-bool Assembler::IsBne(Instr instr) {
- return GetOpcodeField(instr) == BNE;
-}
-
-
-bool Assembler::IsJump(Instr instr) {
- uint32_t opcode = GetOpcodeField(instr);
- uint32_t rt_field = GetRtField(instr);
- uint32_t rd_field = GetRdField(instr);
- uint32_t function_field = GetFunctionField(instr);
- // Checks if the instruction is a jump.
- return opcode == J || opcode == JAL ||
- (opcode == SPECIAL && rt_field == 0 &&
- ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
-}
-
-
-bool Assembler::IsJ(Instr instr) {
- uint32_t opcode = GetOpcodeField(instr);
- // Checks if the instruction is a jump.
- return opcode == J;
-}
-
-
-bool Assembler::IsJal(Instr instr) {
- return GetOpcodeField(instr) == JAL;
-}
-
-bool Assembler::IsJr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
-}
-
-bool Assembler::IsJalr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
-}
-
-
-bool Assembler::IsLui(Instr instr) {
- uint32_t opcode = GetOpcodeField(instr);
- // Checks if the instruction is a load upper immediate.
- return opcode == LUI;
-}
-
-
-bool Assembler::IsOri(Instr instr) {
- uint32_t opcode = GetOpcodeField(instr);
- // Checks if the instruction is a load upper immediate.
- return opcode == ORI;
-}
-
-
-bool Assembler::IsNop(Instr instr, unsigned int type) {
- // See Assembler::nop(type).
- ASSERT(type < 32);
- uint32_t opcode = GetOpcodeField(instr);
- uint32_t function = GetFunctionField(instr);
- uint32_t rt = GetRt(instr);
- uint32_t rd = GetRd(instr);
- uint32_t sa = GetSa(instr);
-
- // Traditional mips nop == sll(zero_reg, zero_reg, 0)
- // When marking non-zero type, use sll(zero_reg, at, type)
- // to avoid use of mips ssnop and ehb special encodings
- // of the sll instruction.
-
- Register nop_rt_reg = (type == 0) ? zero_reg : at;
- bool ret = (opcode == SPECIAL && function == SLL &&
- rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
- sa == type);
-
- return ret;
-}
-
-
-int32_t Assembler::GetBranchOffset(Instr instr) {
- ASSERT(IsBranch(instr));
- return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
-}
-
-
-bool Assembler::IsLw(Instr instr) {
- return ((instr & kOpcodeMask) == LW);
-}
-
-
-int16_t Assembler::GetLwOffset(Instr instr) {
- ASSERT(IsLw(instr));
- return ((instr & kImm16Mask));
-}
-
-
-Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
- ASSERT(IsLw(instr));
-
- // We actually create a new lw instruction based on the original one.
- Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
- | (offset & kImm16Mask);
-
- return temp_instr;
-}
-
-
-bool Assembler::IsSw(Instr instr) {
- return ((instr & kOpcodeMask) == SW);
-}
-
-
-Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
- ASSERT(IsSw(instr));
- return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
-}
-
-
-bool Assembler::IsAddImmediate(Instr instr) {
- return ((instr & kOpcodeMask) == ADDIU);
-}
-
-
-Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
- ASSERT(IsAddImmediate(instr));
- return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
-}
-
-
-bool Assembler::IsAndImmediate(Instr instr) {
- return GetOpcodeField(instr) == ANDI;
-}
-
-
-int Assembler::target_at(int32_t pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm16Mask) == 0) {
- // Emitted label constant, not part of a branch.
- if (instr == 0) {
- return kEndOfChain;
- } else {
- int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- return (imm18 + pos);
- }
- }
- // Check we have a branch or jump instruction.
- ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
- // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
- // the compiler uses arithmectic shifts for signed integers.
- if (IsBranch(instr)) {
- int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
-
- if (imm18 == kEndOfChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
- } else {
- return pos + kBranchPCOffset + imm18;
- }
- } else if (IsLui(instr)) {
- Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
- ASSERT(IsOri(instr_ori));
- int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
-
- if (imm == kEndOfJumpChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
- } else {
- uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
- int32_t delta = instr_address - imm;
- ASSERT(pos > delta);
- return pos - delta;
- }
- } else {
- int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
- if (imm28 == kEndOfJumpChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
- } else {
- uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
- instr_address &= kImm28Mask;
- int32_t delta = instr_address - imm28;
- ASSERT(pos > delta);
- return pos - delta;
- }
- }
-}
-
-
-void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm16Mask) == 0) {
- ASSERT(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- return;
- }
-
- ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
- if (IsBranch(instr)) {
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- ASSERT((imm18 & 3) == 0);
-
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- ASSERT(is_int16(imm16));
-
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
- } else if (IsLui(instr)) {
- Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
- ASSERT(IsOri(instr_ori));
- uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
- ASSERT((imm & 3) == 0);
-
- instr_lui &= ~kImm16Mask;
- instr_ori &= ~kImm16Mask;
-
- instr_at_put(pos + 0 * Assembler::kInstrSize,
- instr_lui | ((imm & kHiMask) >> kLuiShift));
- instr_at_put(pos + 1 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
- } else {
- uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
- imm28 &= kImm28Mask;
- ASSERT((imm28 & 3) == 0);
-
- instr &= ~kImm26Mask;
- uint32_t imm26 = imm28 >> 2;
- ASSERT(is_uint26(imm26));
-
- instr_at_put(pos, instr | (imm26 & kImm26Mask));
- }
-}
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- PrintF("@ %d ", l.pos());
- Instr instr = instr_at(l.pos());
- if ((instr & ~kImm16Mask) == 0) {
- PrintF("value\n");
- } else {
- PrintF("%d\n", instr);
- }
- next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
- int32_t trampoline_pos = kInvalidSlotPos;
- if (L->is_linked() && !trampoline_emitted_) {
- unbound_labels_count_--;
- next_buffer_check_ += kTrampolineSlotsSize;
- }
-
- while (L->is_linked()) {
- int32_t fixup_pos = L->pos();
- int32_t dist = pos - fixup_pos;
- next(L); // Call next before overwriting link with target at fixup_pos.
- Instr instr = instr_at(fixup_pos);
- if (IsBranch(instr)) {
- if (dist > kMaxBranchOffset) {
- if (trampoline_pos == kInvalidSlotPos) {
- trampoline_pos = get_trampoline_entry(fixup_pos);
- CHECK(trampoline_pos != kInvalidSlotPos);
- }
- ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos);
- fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
- }
- target_at_put(fixup_pos, pos);
- } else {
- ASSERT(IsJ(instr) || IsLui(instr));
- target_at_put(fixup_pos, pos);
- }
- }
- L->bind_to(pos);
-
- // Keep track of the last bound label so we don't eliminate any instructions
- // before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
-}
-
-
-void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // Label can only be bound once.
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::next(Label* L) {
- ASSERT(L->is_linked());
- int link = target_at(L->pos());
- if (link == kEndOfChain) {
- L->Unuse();
- } else {
- ASSERT(link >= 0);
- L->link_to(link);
- }
-}
-
-bool Assembler::is_near(Label* L) {
- if (L->is_bound()) {
- return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
- }
- return false;
-}
-
-// We have to use a temporary register for things that can be relocated even
-// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
-// space. There is no guarantee that the relocated location can be similarly
-// encoded.
-bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return !RelocInfo::IsNone(rmode);
-}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- Register rd,
- uint16_t sa,
- SecondaryField func) {
- ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (sa << kSaShift) | func;
- emit(instr);
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- uint16_t msb,
- uint16_t lsb,
- SecondaryField func) {
- ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (msb << kRdShift) | (lsb << kSaShift) | func;
- emit(instr);
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func) {
- ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
- Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
- | (fd.code() << kFdShift) | func;
- emit(instr);
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- FPURegister fr,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func) {
- ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
- Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
- | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
- emit(instr);
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func) {
- ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
- Instr instr = opcode | fmt | (rt.code() << kRtShift)
- | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
- emit(instr);
-}
-
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPUControlRegister fs,
- SecondaryField func) {
- ASSERT(fs.is_valid() && rt.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
- Instr instr =
- opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
- emit(instr);
-}
-
-
-// Instructions with immediate value.
-// Registers are in the order of the instruction encoding, from left to right.
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j) {
- ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (j & kImm16Mask);
- emit(instr);
-}
-
-
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j) {
- ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
- emit(instr);
-}
-
-
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- FPURegister ft,
- int32_t j) {
- ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
- ASSERT(CpuFeatures::IsEnabled(FPU));
- Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
- | (j & kImm16Mask);
- emit(instr);
-}
-
-
-void Assembler::GenInstrJump(Opcode opcode,
- uint32_t address) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- ASSERT(is_uint26(address));
- Instr instr = opcode | address;
- emit(instr);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-// Returns the next free trampoline entry.
-int32_t Assembler::get_trampoline_entry(int32_t pos) {
- int32_t trampoline_entry = kInvalidSlotPos;
-
- if (!internal_trampoline_exception_) {
- if (trampoline_.start() > pos) {
- trampoline_entry = trampoline_.take_slot();
- }
-
- if (kInvalidSlotPos == trampoline_entry) {
- internal_trampoline_exception_ = true;
- }
- }
- return trampoline_entry;
-}
-
-
-uint32_t Assembler::jump_address(Label* L) {
- int32_t target_pos;
-
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link.
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- return kEndOfJumpChain;
- }
- }
-
- uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
- ASSERT((imm & 3) == 0);
-
- return imm;
-}
-
-
-int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t target_pos;
-
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- ASSERT((offset & 3) == 0);
- ASSERT(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link.
- int32_t imm18 = target_pos - at_offset;
- ASSERT((imm18 & 3) == 0);
- int32_t imm16 = imm18 >> 2;
- ASSERT(is_int16(imm16));
- instr_at_put(at_offset, (imm16 & kImm16Mask));
- } else {
- target_pos = kEndOfChain;
- instr_at_put(at_offset, 0);
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- }
- L->link_to(at_offset);
- }
-}
-
-
-//------- Branch and jump instructions --------
-
-void Assembler::b(int16_t offset) {
- beq(zero_reg, zero_reg, offset);
-}
-
-
-void Assembler::bal(int16_t offset) {
- positions_recorder()->WriteRecordedPositions();
- bgezal(zero_reg, offset);
-}
-
-
-void Assembler::beq(Register rs, Register rt, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(BEQ, rs, rt, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bgez(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(REGIMM, rs, BGEZ, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bgezal(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
- GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bgtz(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(BGTZ, rs, zero_reg, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::blez(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(BLEZ, rs, zero_reg, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bltz(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(REGIMM, rs, BLTZ, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bltzal(Register rs, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
- GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::bne(Register rs, Register rt, int16_t offset) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrImmediate(BNE, rs, rt, offset);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::j(int32_t target) {
-#if DEBUG
- // Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- ASSERT(in_range && ((target & 3) == 0));
-#endif
- GenInstrJump(J, target >> 2);
-}
-
-
-void Assembler::jr(Register rs) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
- }
- GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::jal(int32_t target) {
-#ifdef DEBUG
- // Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- ASSERT(in_range && ((target & 3) == 0));
-#endif
- positions_recorder()->WriteRecordedPositions();
- GenInstrJump(JAL, target >> 2);
-}
-
-
-void Assembler::jalr(Register rs, Register rd) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
- GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
-void Assembler::j_or_jr(int32_t target, Register rs) {
- // Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- if (in_range) {
- j(target);
- } else {
- jr(t9);
- }
-}
-
-
-void Assembler::jal_or_jalr(int32_t target, Register rs) {
- // Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
- (kImm26Bits+kImmFieldShift)) == 0;
- if (in_range) {
- jal(target);
- } else {
- jalr(t9);
- }
-}
-
-
-//-------Data-processing-instructions---------
-
-// Arithmetic.
-
-void Assembler::addu(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
-}
-
-
-void Assembler::addiu(Register rd, Register rs, int32_t j) {
- GenInstrImmediate(ADDIU, rs, rd, j);
-}
-
-
-void Assembler::subu(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
-}
-
-
-void Assembler::mul(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
-}
-
-
-void Assembler::mult(Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
-}
-
-
-void Assembler::multu(Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
-}
-
-
-void Assembler::div(Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
-}
-
-
-void Assembler::divu(Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
-}
-
-
-// Logical.
-
-void Assembler::and_(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
-}
-
-
-void Assembler::andi(Register rt, Register rs, int32_t j) {
- ASSERT(is_uint16(j));
- GenInstrImmediate(ANDI, rs, rt, j);
-}
-
-
-void Assembler::or_(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
-}
-
-
-void Assembler::ori(Register rt, Register rs, int32_t j) {
- ASSERT(is_uint16(j));
- GenInstrImmediate(ORI, rs, rt, j);
-}
-
-
-void Assembler::xor_(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
-}
-
-
-void Assembler::xori(Register rt, Register rs, int32_t j) {
- ASSERT(is_uint16(j));
- GenInstrImmediate(XORI, rs, rt, j);
-}
-
-
-void Assembler::nor(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
-}
-
-
-// Shifts.
-void Assembler::sll(Register rd,
- Register rt,
- uint16_t sa,
- bool coming_from_nop) {
- // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
- // generated using the sll instruction. They must be generated using
- // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
- // instructions.
- ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
-}
-
-
-void Assembler::sllv(Register rd, Register rt, Register rs) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
-}
-
-
-void Assembler::srl(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
-}
-
-
-void Assembler::srlv(Register rd, Register rt, Register rs) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
-}
-
-
-void Assembler::sra(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
-}
-
-
-void Assembler::srav(Register rd, Register rt, Register rs) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
-}
-
-
-void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
- // Should be called via MacroAssembler::Ror.
- ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- ASSERT(kArchVariant == kMips32r2);
- Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
- emit(instr);
-}
-
-
-void Assembler::rotrv(Register rd, Register rt, Register rs) {
- // Should be called via MacroAssembler::Ror.
- ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- ASSERT(kArchVariant == kMips32r2);
- Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
- emit(instr);
-}
-
-
-//------------Memory-instructions-------------
-
-// Helper for base-reg + offset, when offset is larger than int16.
-void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
- ASSERT(!src.rm().is(at));
- lui(at, src.offset_ >> kLuiShift);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- addu(at, at, src.rm()); // Add base register.
-}
-
-
-void Assembler::lb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::lbu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::lh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::lhu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::lw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::lwl(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
-}
-
-
-void Assembler::lwr(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
-}
-
-
-void Assembler::sb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::sh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::sw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
- }
-}
-
-
-void Assembler::swl(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
-}
-
-
-void Assembler::swr(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
-}
-
-
-void Assembler::lui(Register rd, int32_t j) {
- ASSERT(is_uint16(j));
- GenInstrImmediate(LUI, zero_reg, rd, j);
-}
-
-
-//-------------Misc-instructions--------------
-
-// Break / Trap instructions.
-void Assembler::break_(uint32_t code, bool break_as_stop) {
- ASSERT((code & ~0xfffff) == 0);
- // We need to invalidate breaks that could be stops as well because the
- // simulator expects a char pointer after the stop instruction.
- // See constants-mips.h for explanation.
- ASSERT((break_as_stop &&
- code <= kMaxStopCode &&
- code > kMaxWatchpointCode) ||
- (!break_as_stop &&
- (code > kMaxStopCode ||
- code <= kMaxWatchpointCode)));
- Instr break_instr = SPECIAL | BREAK | (code << 6);
- emit(break_instr);
-}
-
-
-void Assembler::stop(const char* msg, uint32_t code) {
- ASSERT(code > kMaxWatchpointCode);
- ASSERT(code <= kMaxStopCode);
-#if defined(V8_HOST_ARCH_MIPS)
- break_(0x54321);
-#else // V8_HOST_ARCH_MIPS
- BlockTrampolinePoolFor(2);
- // The Simulator will handle the stop instruction and get the message address.
- // On MIPS stop() is just a special kind of break_().
- break_(code, true);
- emit(reinterpret_cast<Instr>(msg));
-#endif
-}
-
-
-void Assembler::tge(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr = SPECIAL | TGE | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::tlt(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr =
- SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::tltu(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr =
- SPECIAL | TLTU | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::teq(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr =
- SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-void Assembler::tne(Register rs, Register rt, uint16_t code) {
- ASSERT(is_uint10(code));
- Instr instr =
- SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
- emit(instr);
-}
-
-
-// Move from HI/LO register.
-
-void Assembler::mfhi(Register rd) {
- GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
-}
-
-
-void Assembler::mflo(Register rd) {
- GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
-}
-
-
-// Set on less than instructions.
-void Assembler::slt(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
-}
-
-
-void Assembler::sltu(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
-}
-
-
-void Assembler::slti(Register rt, Register rs, int32_t j) {
- GenInstrImmediate(SLTI, rs, rt, j);
-}
-
-
-void Assembler::sltiu(Register rt, Register rs, int32_t j) {
- GenInstrImmediate(SLTIU, rs, rt, j);
-}
-
-
-// Conditional move.
-void Assembler::movz(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
-}
-
-
-void Assembler::movn(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
-}
-
-
-void Assembler::movt(Register rd, Register rs, uint16_t cc) {
- Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 1;
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
-}
-
-
-void Assembler::movf(Register rd, Register rs, uint16_t cc) {
- Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 0;
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
-}
-
-
-// Bit twiddling.
-void Assembler::clz(Register rd, Register rs) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
- GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
-}
-
-
-void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
- // Should be called via MacroAssembler::Ins.
- // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(kArchVariant == kMips32r2);
- GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
-}
-
-
-void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
- // Should be called via MacroAssembler::Ext.
- // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(kArchVariant == kMips32r2);
- GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
-}
-
-
-//--------Coprocessor-instructions----------------
-
-// Load, store, move.
-void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
-}
-
-
-void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
- // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
- // load to two 32-bit loads.
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
-}
-
-
-void Assembler::swc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
-}
-
-
-void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
- // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
- // store to two 32-bit stores.
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
-}
-
-
-void Assembler::mtc1(Register rt, FPURegister fs) {
- GenInstrRegister(COP1, MTC1, rt, fs, f0);
-}
-
-
-void Assembler::mfc1(Register rt, FPURegister fs) {
- GenInstrRegister(COP1, MFC1, rt, fs, f0);
-}
-
-
-void Assembler::ctc1(Register rt, FPUControlRegister fs) {
- GenInstrRegister(COP1, CTC1, rt, fs);
-}
-
-
-void Assembler::cfc1(Register rt, FPUControlRegister fs) {
- GenInstrRegister(COP1, CFC1, rt, fs);
-}
-
-void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
- uint64_t i;
- memcpy(&i, &d, 8);
-
- *lo = i & 0xffffffff;
- *hi = i >> 32;
-}
-
-// Arithmetic.
-
-void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
-}
-
-
-void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
-}
-
-
-void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
-}
-
-
-void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
- FPURegister ft) {
- GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
-}
-
-
-void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
-}
-
-
-void Assembler::abs_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
-}
-
-
-void Assembler::mov_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
-}
-
-
-void Assembler::neg_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
-}
-
-
-void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
-}
-
-
-// Conversions.
-
-void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
-}
-
-
-void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
-}
-
-
-void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
-}
-
-
-void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
-}
-
-
-void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
-}
-
-
-void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
-}
-
-
-void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
-}
-
-
-void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
-}
-
-
-void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
-}
-
-
-void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
-}
-
-
-void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
- GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
-}
-
-
-void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
- GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
-}
-
-
-void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
- GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
-}
-
-
-void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
- GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
-}
-
-
-void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
-}
-
-
-void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
-}
-
-
-void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
-}
-
-
-void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
-}
-
-
-void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
-}
-
-
-void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
-}
-
-
-void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
-}
-
-
-void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
- GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
-}
-
-
-void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
-}
-
-
-void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
-}
-
-
-void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
- GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
-}
-
-
-void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
-}
-
-
-// Conditions.
-void Assembler::c(FPUCondition cond, SecondaryField fmt,
- FPURegister fs, FPURegister ft, uint16_t cc) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
- ASSERT(is_uint3(cc));
- ASSERT((fmt & ~(31 << kRsShift)) == 0);
- Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
- | cc << 8 | 3 << 4 | cond;
- emit(instr);
-}
-
-
-void Assembler::fcmp(FPURegister src1, const double src2,
- FPUCondition cond) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
- ASSERT(src2 == 0.0);
- mtc1(zero_reg, f14);
- cvt_d_w(f14, f14);
- c(cond, D, src1, f14, 0);
-}
-
-
-void Assembler::bc1f(int16_t offset, uint16_t cc) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
- ASSERT(is_uint3(cc));
- Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
- emit(instr);
-}
-
-
-void Assembler::bc1t(int16_t offset, uint16_t cc) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
- ASSERT(is_uint3(cc));
- Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
- emit(instr);
-}
-
-
-// Debugging.
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
- Instr instr = instr_at(pc);
- ASSERT(IsJ(instr) || IsLui(instr));
- if (IsLui(instr)) {
- Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
- ASSERT(IsOri(instr_ori));
- int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
- if (imm == kEndOfJumpChain) {
- return 0; // Number of instructions patched.
- }
- imm += pc_delta;
- ASSERT((imm & 3) == 0);
-
- instr_lui &= ~kImm16Mask;
- instr_ori &= ~kImm16Mask;
-
- instr_at_put(pc + 0 * Assembler::kInstrSize,
- instr_lui | ((imm >> kLuiShift) & kImm16Mask));
- instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
- return 2; // Number of instructions patched.
- } else {
- uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
- if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
- return 0; // Number of instructions patched.
- }
- imm28 += pc_delta;
- imm28 &= kImm28Mask;
- ASSERT((imm28 & 3) == 0);
-
- instr &= ~kImm26Mask;
- uint32_t imm26 = imm28 >> 2;
- ASSERT(is_uint26(imm26));
-
- instr_at_put(pc, instr | (imm26 & kImm26Mask));
- return 1; // Number of instructions patched.
- }
-}
-
-
-void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // The new buffer.
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else if (buffer_size_ < 1*MB) {
- desc.buffer_size = 2*buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1*MB;
- }
- CHECK_GT(desc.buffer_size, 0); // No overflow.
-
- // Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
- RelocateInternalReference(p, pc_delta);
- }
- }
-
- ASSERT(!overflow());
-}
-
-
-void Assembler::db(uint8_t data) {
- CheckBuffer();
- *reinterpret_cast<uint8_t*>(pc_) = data;
- pc_ += sizeof(uint8_t);
-}
-
-
-void Assembler::dd(uint32_t data) {
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
- // These modes do not need an entry in the constant pool.
- }
- if (!RelocInfo::IsNone(rinfo.rmode())) {
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
- RecordedAstId().ToInt(),
- NULL);
- ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
- } else {
- reloc_info_writer.Write(&rinfo);
- }
- }
-}
-
-
-void Assembler::BlockTrampolinePoolFor(int instructions) {
- BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
-void Assembler::CheckTrampolinePool() {
- // Some small sequences of instructions must not be broken up by the
- // insertion of a trampoline pool; such sequences are protected by setting
- // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
- // which are both checked here. Also, recursive calls to CheckTrampolinePool
- // are blocked by trampoline_pool_blocked_nesting_.
- if ((trampoline_pool_blocked_nesting_ > 0) ||
- (pc_offset() < no_trampoline_pool_before_)) {
- // Emission is currently blocked; make sure we try again as soon as
- // possible.
- if (trampoline_pool_blocked_nesting_ > 0) {
- next_buffer_check_ = pc_offset() + kInstrSize;
- } else {
- next_buffer_check_ = no_trampoline_pool_before_;
- }
- return;
- }
-
- ASSERT(!trampoline_emitted_);
- ASSERT(unbound_labels_count_ >= 0);
- if (unbound_labels_count_ > 0) {
- // First we emit jump (2 instructions), then we emit trampoline pool.
- { BlockTrampolinePoolScope block_trampoline_pool(this);
- Label after_pool;
- b(&after_pool);
- nop();
-
- int pool_start = pc_offset();
- for (int i = 0; i < unbound_labels_count_; i++) {
- uint32_t imm32;
- imm32 = jump_address(&after_pool);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and available
- // to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
- }
- jr(at);
- nop();
- }
- bind(&after_pool);
- trampoline_ = Trampoline(pool_start, unbound_labels_count_);
-
- trampoline_emitted_ = true;
- // As we are only going to emit trampoline once, we need to prevent any
- // further emission.
- next_buffer_check_ = kMaxInt;
- }
- } else {
- // Number of branches to unbound label at this point is zero, so we can
- // move next buffer check to maximum.
- next_buffer_check_ = pc_offset() +
- kMaxBranchOffset - kTrampolineSlotsSize * 16;
- }
- return;
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- Instr instr1 = instr_at(pc);
- Instr instr2 = instr_at(pc + kInstrSize);
- // Interpret 2 instructions generated by li: lui/ori
- if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
- // Assemble the 32 bit value.
- return reinterpret_cast<Address>(
- (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
- }
-
- // We should never get here, force a bad address if we do.
- UNREACHABLE();
- return (Address)0x0;
-}
-
-
-// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
-// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
-// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
-// OS::nan_value() returns a qNaN.
-void Assembler::QuietNaN(HeapObject* object) {
- HeapNumber::cast(object)->set_value(OS::nan_value());
-}
-
-
-// On Mips, a target address is stored in a lui/ori instruction pair, each
-// of which load 16 bits of the 32-bit address to a register.
-// Patching the address must replace both instr, and flush the i-cache.
-//
-// There is an optimization below, which emits a nop when the address
-// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
-// and possibly removed.
-void Assembler::set_target_address_at(Address pc, Address target) {
- Instr instr2 = instr_at(pc + kInstrSize);
- uint32_t rt_code = GetRtField(instr2);
- uint32_t* p = reinterpret_cast<uint32_t*>(pc);
- uint32_t itarget = reinterpret_cast<uint32_t>(target);
-
-#ifdef DEBUG
- // Check we have the result from a li macro-instruction, using instr pair.
- Instr instr1 = instr_at(pc);
- CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
-#endif
-
- // Must use 2 instructions to insure patchable code => just use lui and ori.
- // lui rt, upper-16.
- // ori rt rt, lower-16.
- *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
-
- // The following code is an optimization for the common case of Call()
- // or Jump() which is load to register, and jump through register:
- // li(t9, address); jalr(t9) (or jr(t9)).
- // If the destination address is in the same 256 MB page as the call, it
- // is faster to do a direct jal, or j, rather than jump thru register, since
- // that lets the cpu pipeline prefetch the target address. However each
- // time the address above is patched, we have to patch the direct jal/j
- // instruction, as well as possibly revert to jalr/jr if we now cross a
- // 256 MB page. Note that with the jal/j instructions, we do not need to
- // load the register, but that code is left, since it makes it easy to
- // revert this process. A further optimization could try replacing the
- // li sequence with nops.
- // This optimization can only be applied if the rt-code from instr2 is the
- // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
- // mips return. Occasionally this lands after an li().
-
- Instr instr3 = instr_at(pc + 2 * kInstrSize);
- uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- uint32_t target_field =
- static_cast<uint32_t>(itarget & kJumpAddrMask) >>kImmFieldShift;
- bool patched_jump = false;
-
-#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
- // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
- // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
- // apply this workaround for all cores so we don't have to identify the core.
- if (in_range) {
- // The 24k core E156 bug has some very specific requirements, we only check
- // the most simple one: if the address of the delay slot instruction is in
- // the first or last 32 KB of the 256 MB segment.
- uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
- uint32_t ipc_segment_addr = ipc & segment_mask;
- if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
- in_range = false;
- }
-#endif
-
- if (IsJalr(instr3)) {
- // Try to convert JALR to JAL.
- if (in_range && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = JAL | target_field;
- patched_jump = true;
- }
- } else if (IsJr(instr3)) {
- // Try to convert JR to J, skip returns (jr ra).
- bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
- if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = J | target_field;
- patched_jump = true;
- }
- } else if (IsJal(instr3)) {
- if (in_range) {
- // We are patching an already converted JAL.
- *(p+2) = JAL | target_field;
- } else {
- // Patch JAL, but out of range, revert to JALR.
- // JALR rs reg is the rt reg specified in the ORI instruction.
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
- }
- patched_jump = true;
- } else if (IsJ(instr3)) {
- if (in_range) {
- // We are patching an already converted J (jump).
- *(p+2) = J | target_field;
- } else {
- // Trying patch J, but out of range, just go back to JR.
- // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
- }
- patched_jump = true;
- }
-
- CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
-}
-
-void Assembler::JumpLabelToJumpRegister(Address pc) {
- // Address pc points to lui/ori instructions.
- // Jump to label may follow at pc + 2 * kInstrSize.
- uint32_t* p = reinterpret_cast<uint32_t*>(pc);
-#ifdef DEBUG
- Instr instr1 = instr_at(pc);
-#endif
- Instr instr2 = instr_at(pc + 1 * kInstrSize);
- Instr instr3 = instr_at(pc + 2 * kInstrSize);
- bool patched = false;
-
- if (IsJal(instr3)) {
- ASSERT(GetOpcodeField(instr1) == LUI);
- ASSERT(GetOpcodeField(instr2) == ORI);
-
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
- patched = true;
- } else if (IsJ(instr3)) {
- ASSERT(GetOpcodeField(instr1) == LUI);
- ASSERT(GetOpcodeField(instr2) == ORI);
-
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
- patched = true;
- }
-
- if (patched) {
- CPU::FlushICache(pc+2, sizeof(Address));
- }
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/assembler-mips.h b/src/3rdparty/v8/src/mips/assembler-mips.h
deleted file mode 100644
index d108edc..0000000
--- a/src/3rdparty/v8/src/mips/assembler-mips.h
+++ /dev/null
@@ -1,1282 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-
-#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
-#define V8_MIPS_ASSEMBLER_MIPS_H_
-
-#include <stdio.h>
-#include "assembler.h"
-#include "constants-mips.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister.
-
-// Core register.
-struct Register {
- static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
- static const int kSizeInBytes = 4;
- static const int kGPRsPerNonFPUDouble = 2;
-
- inline static int NumAllocatableRegisters();
-
- static int ToAllocationIndex(Register reg) {
- return reg.code() - 2; // zero_reg and 'at' are skipped.
- }
-
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index + 2); // zero_reg and 'at' are skipped.
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "v0",
- "v1",
- "a0",
- "a1",
- "a2",
- "a3",
- "t0",
- "t1",
- "t2",
- "t3",
- "t4",
- "t5",
- "t6",
- "t7",
- };
- return names[index];
- }
-
- static Register from_code(int code) {
- Register r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-#define REGISTER(N, C) \
- const int kRegister_ ## N ## _Code = C; \
- const Register N = { C }
-
-REGISTER(no_reg, -1);
-// Always zero.
-REGISTER(zero_reg, 0);
-// at: Reserved for synthetic instructions.
-REGISTER(at, 1);
-// v0, v1: Used when returning multiple values from subroutines.
-REGISTER(v0, 2);
-REGISTER(v1, 3);
-// a0 - a4: Used to pass non-FP parameters.
-REGISTER(a0, 4);
-REGISTER(a1, 5);
-REGISTER(a2, 6);
-REGISTER(a3, 7);
-// t0 - t9: Can be used without reservation, act as temporary registers and are
-// allowed to be destroyed by subroutines.
-REGISTER(t0, 8);
-REGISTER(t1, 9);
-REGISTER(t2, 10);
-REGISTER(t3, 11);
-REGISTER(t4, 12);
-REGISTER(t5, 13);
-REGISTER(t6, 14);
-REGISTER(t7, 15);
-// s0 - s7: Subroutine register variables. Subroutines that write to these
-// registers must restore their values before exiting so that the caller can
-// expect the values to be preserved.
-REGISTER(s0, 16);
-REGISTER(s1, 17);
-REGISTER(s2, 18);
-REGISTER(s3, 19);
-REGISTER(s4, 20);
-REGISTER(s5, 21);
-REGISTER(s6, 22);
-REGISTER(s7, 23);
-REGISTER(t8, 24);
-REGISTER(t9, 25);
-// k0, k1: Reserved for system calls and interrupt handlers.
-REGISTER(k0, 26);
-REGISTER(k1, 27);
-// gp: Reserved.
-REGISTER(gp, 28);
-// sp: Stack pointer.
-REGISTER(sp, 29);
-// fp: Frame pointer.
-REGISTER(fp, 30);
-// ra: Return address pointer.
-REGISTER(ra, 31);
-
-#undef REGISTER
-
-
-int ToNumber(Register reg);
-
-Register ToRegister(int num);
-
-// Coprocessor register.
-struct FPURegister {
- static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
-
- // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
- // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
- // number of Double regs (64-bit regs, or FPU-reg-pairs).
-
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0.
- // f28: 0.0
- // f30: scratch register.
- static const int kNumReservedRegisters = 2;
- static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
- kNumReservedRegisters;
-
- inline static int NumRegisters();
- inline static int NumAllocatableRegisters();
- inline static int ToAllocationIndex(FPURegister reg);
- static const char* AllocationIndexToString(int index);
-
- static FPURegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index * 2);
- }
-
- static FPURegister from_code(int code) {
- FPURegister r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
- bool is(FPURegister creg) const { return code_ == creg.code_; }
- FPURegister low() const {
- // Find low reg of a Double-reg pair, which is the reg itself.
- ASSERT(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_;
- ASSERT(reg.is_valid());
- return reg;
- }
- FPURegister high() const {
- // Find high reg of a Doubel-reg pair, which is reg + 1.
- ASSERT(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_ + 1;
- ASSERT(reg.is_valid());
- return reg;
- }
-
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void setcode(int f) {
- code_ = f;
- ASSERT(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
-// 32-bit registers, f0 through f31. When used as 'double' they are used
-// in pairs, starting with the even numbered register. So a double operation
-// on f0 really uses f0 and f1.
-// (Modern mips hardware also supports 32 64-bit registers, via setting
-// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
-// but it is not in common use. Someday we will want to support this in v8.)
-
-// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef FPURegister DoubleRegister;
-typedef FPURegister FloatRegister;
-
-const FPURegister no_freg = { -1 };
-
-const FPURegister f0 = { 0 }; // Return value in hard float mode.
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
-
-const Register sfpd_lo = { kRegister_t6_Code };
-const Register sfpd_hi = { kRegister_t7_Code };
-
-// Register aliases.
-// cp is assumed to be a callee saved register.
-// Defined using #define instead of "static const Register&" because Clang
-// complains otherwise when a compilation unit that includes this header
-// doesn't use the variables.
-#define kRootRegister s6
-#define cp s7
-#define kLithiumScratchReg s3
-#define kLithiumScratchReg2 s4
-#define kLithiumScratchDouble f30
-#define kDoubleRegZero f28
-
-// FPU (coprocessor 1) control registers.
-// Currently only FCSR (#31) is implemented.
-struct FPUControlRegister {
- bool is_valid() const { return code_ == kFCSRRegister; }
- bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void setcode(int f) {
- code_ = f;
- ASSERT(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
-const FPUControlRegister FCSR = { kFCSRRegister };
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands.
-
-// Class Operand represents a shifter operand in data processing instructions.
-class Operand BASE_EMBEDDED {
- public:
- // Immediate.
- INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE32));
- INLINE(explicit Operand(const ExternalReference& f));
- INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
- explicit Operand(Handle<Object> handle);
- INLINE(explicit Operand(Smi* value));
-
- // Register.
- INLINE(explicit Operand(Register rm));
-
- // Return true if this is a register operand.
- INLINE(bool is_reg() const);
-
- Register rm() const { return rm_; }
-
- private:
- Register rm_;
- int32_t imm32_; // Valid if rm_ == no_reg.
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
- friend class MacroAssembler;
-};
-
-
-// On MIPS we have only one adressing mode with base_reg + offset.
-// Class MemOperand represents a memory operand in load and store instructions.
-class MemOperand : public Operand {
- public:
- explicit MemOperand(Register rn, int32_t offset = 0);
- int32_t offset() const { return offset_; }
-
- bool OffsetIsInt16Encodable() const {
- return is_int16(offset_);
- }
-
- private:
- int32_t offset_;
-
- friend class Assembler;
-};
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == FPU && !FLAG_enable_fpu) return false;
- return (supported_ & (1u << f)) != 0;
- }
-
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const unsigned old_supported_;
- };
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_;
-
- friend class ExternalReference;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-class Assembler : public AssemblerBase {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler() { }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Label operations & relative jumps (PPUM Appendix D).
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
- void bind(Label* L); // Binds an unbound label L to current code position.
- // Determines if Label is bound and near enough so that branch instruction
- // can be used to reach it, instead of jump instruction.
- bool is_near(Label* L);
-
- // Returns the branch offset to the given label from the current code
- // position. Links the label to the current position if it is still unbound.
- // Manages the jump elimination optimization if the second parameter is true.
- int32_t branch_offset(Label* L, bool jump_elimination_allowed);
- int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t o = branch_offset(L, jump_elimination_allowed);
- ASSERT((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
- }
- uint32_t jump_address(Label* L);
-
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
- // Read/Modify the code target address in the branch/call instruction at pc.
- static Address target_address_at(Address pc);
- static void set_target_address_at(Address pc, Address target);
-
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
- static void JumpLabelToJumpRegister(Address pc);
-
- static void QuietNaN(HeapObject* nan);
-
- // This sets the branch destination (which gets loaded at the call address).
- // This is for calls and branches within generated code. The serializer
- // has already deserialized the lui/ori instructions etc.
- inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(
- instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
- target);
- }
-
- // This sets the branch destination.
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Difference between address of current opcode and target address offset.
- static const int kBranchPCOffset = 4;
-
- // Here we are patching the address in the LUI/ORI instruction pair.
- // These values are used in the serialization process and must be zero for
- // MIPS platform, as Code, Embedded Object or External-reference pointers
- // are split across two consecutive instructions and don't exist separately
- // in the code, so the serializer should not step forwards in memory after
- // a target is resolved and written.
- static const int kSpecialTargetSize = 0;
-
- // Number of consecutive instructions used to store 32bit constant.
- // Before jump-optimizations, this constant was used in
- // RelocInfo::target_address_address() function to tell serializer address of
- // the instruction that follows LUI/ORI instruction pair. Now, with new jump
- // optimization, where jump-through-register instruction that usually
- // follows LUI/ORI pair is substituted with J/JAL, this constant equals
- // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
- static const int kInstructionsFor32BitConstant = 3;
-
- // Distance between the instruction referring to the address of the call
- // target and the return address.
- static const int kCallTargetAddressOffset = 4 * kInstrSize;
-
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 0;
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-
- // Difference between address of current opcode and value read from pc
- // register.
- static const int kPcLoadDelta = 4;
-
- static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize;
-
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceInstructions = 7;
- static const int kDebugBreakSlotInstructions = 4;
- static const int kDebugBreakSlotLength =
- kDebugBreakSlotInstructions * kInstrSize;
-
-
- // ---------------------------------------------------------------------------
- // Code generation.
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2 (>= 4).
- void Align(int m);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Different nop operations are used by the code generator to detect certain
- // states of the generated code.
- enum NopMarkerTypes {
- NON_MARKING_NOP = 0,
- DEBUG_BREAK_NOP,
- // IC markers.
- PROPERTY_ACCESS_INLINED,
- PROPERTY_ACCESS_INLINED_CONTEXT,
- PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
- // Helper values.
- LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
- // Code aging
- CODE_AGE_MARKER_NOP = 6
- };
-
- // Type == 0 is the default non-marking nop. For mips this is a
- // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
- // marking, to avoid conflict with ssnop and ehb instructions.
- void nop(unsigned int type = 0) {
- ASSERT(type < 32);
- Register nop_rt_reg = (type == 0) ? zero_reg : at;
- sll(zero_reg, nop_rt_reg, type, true);
- }
-
-
- // --------Branch-and-jump-instructions----------
- // We don't use likely variant of instructions.
- void b(int16_t offset);
- void b(Label* L) { b(branch_offset(L, false)>>2); }
- void bal(int16_t offset);
- void bal(Label* L) { bal(branch_offset(L, false)>>2); }
-
- void beq(Register rs, Register rt, int16_t offset);
- void beq(Register rs, Register rt, Label* L) {
- beq(rs, rt, branch_offset(L, false) >> 2);
- }
- void bgez(Register rs, int16_t offset);
- void bgezal(Register rs, int16_t offset);
- void bgtz(Register rs, int16_t offset);
- void blez(Register rs, int16_t offset);
- void bltz(Register rs, int16_t offset);
- void bltzal(Register rs, int16_t offset);
- void bne(Register rs, Register rt, int16_t offset);
- void bne(Register rs, Register rt, Label* L) {
- bne(rs, rt, branch_offset(L, false)>>2);
- }
-
- // Never use the int16_t b(l)cond version with a branch offset
- // instead of using the Label* version.
-
- // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
- void j(int32_t target);
- void jal(int32_t target);
- void jalr(Register rs, Register rd = ra);
- void jr(Register target);
- void j_or_jr(int32_t target, Register rs);
- void jal_or_jalr(int32_t target, Register rs);
-
-
- //-------Data-processing-instructions---------
-
- // Arithmetic.
- void addu(Register rd, Register rs, Register rt);
- void subu(Register rd, Register rs, Register rt);
- void mult(Register rs, Register rt);
- void multu(Register rs, Register rt);
- void div(Register rs, Register rt);
- void divu(Register rs, Register rt);
- void mul(Register rd, Register rs, Register rt);
-
- void addiu(Register rd, Register rs, int32_t j);
-
- // Logical.
- void and_(Register rd, Register rs, Register rt);
- void or_(Register rd, Register rs, Register rt);
- void xor_(Register rd, Register rs, Register rt);
- void nor(Register rd, Register rs, Register rt);
-
- void andi(Register rd, Register rs, int32_t j);
- void ori(Register rd, Register rs, int32_t j);
- void xori(Register rd, Register rs, int32_t j);
- void lui(Register rd, int32_t j);
-
- // Shifts.
- // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
- // and may cause problems in normal code. coming_from_nop makes sure this
- // doesn't happen.
- void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
- void sllv(Register rd, Register rt, Register rs);
- void srl(Register rd, Register rt, uint16_t sa);
- void srlv(Register rd, Register rt, Register rs);
- void sra(Register rt, Register rd, uint16_t sa);
- void srav(Register rt, Register rd, Register rs);
- void rotr(Register rd, Register rt, uint16_t sa);
- void rotrv(Register rd, Register rt, Register rs);
-
-
- //------------Memory-instructions-------------
-
- void lb(Register rd, const MemOperand& rs);
- void lbu(Register rd, const MemOperand& rs);
- void lh(Register rd, const MemOperand& rs);
- void lhu(Register rd, const MemOperand& rs);
- void lw(Register rd, const MemOperand& rs);
- void lwl(Register rd, const MemOperand& rs);
- void lwr(Register rd, const MemOperand& rs);
- void sb(Register rd, const MemOperand& rs);
- void sh(Register rd, const MemOperand& rs);
- void sw(Register rd, const MemOperand& rs);
- void swl(Register rd, const MemOperand& rs);
- void swr(Register rd, const MemOperand& rs);
-
-
- //-------------Misc-instructions--------------
-
- // Break / Trap instructions.
- void break_(uint32_t code, bool break_as_stop = false);
- void stop(const char* msg, uint32_t code = kMaxStopCode);
- void tge(Register rs, Register rt, uint16_t code);
- void tgeu(Register rs, Register rt, uint16_t code);
- void tlt(Register rs, Register rt, uint16_t code);
- void tltu(Register rs, Register rt, uint16_t code);
- void teq(Register rs, Register rt, uint16_t code);
- void tne(Register rs, Register rt, uint16_t code);
-
- // Move from HI/LO register.
- void mfhi(Register rd);
- void mflo(Register rd);
-
- // Set on less than.
- void slt(Register rd, Register rs, Register rt);
- void sltu(Register rd, Register rs, Register rt);
- void slti(Register rd, Register rs, int32_t j);
- void sltiu(Register rd, Register rs, int32_t j);
-
- // Conditional move.
- void movz(Register rd, Register rs, Register rt);
- void movn(Register rd, Register rs, Register rt);
- void movt(Register rd, Register rs, uint16_t cc = 0);
- void movf(Register rd, Register rs, uint16_t cc = 0);
-
- // Bit twiddling.
- void clz(Register rd, Register rs);
- void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
- void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
-
- //--------Coprocessor-instructions----------------
-
- // Load, store, and move.
- void lwc1(FPURegister fd, const MemOperand& src);
- void ldc1(FPURegister fd, const MemOperand& src);
-
- void swc1(FPURegister fs, const MemOperand& dst);
- void sdc1(FPURegister fs, const MemOperand& dst);
-
- void mtc1(Register rt, FPURegister fs);
- void mfc1(Register rt, FPURegister fs);
-
- void ctc1(Register rt, FPUControlRegister fs);
- void cfc1(Register rt, FPUControlRegister fs);
-
- // Arithmetic.
- void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
- void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
- void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
- void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
- void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
- void abs_d(FPURegister fd, FPURegister fs);
- void mov_d(FPURegister fd, FPURegister fs);
- void neg_d(FPURegister fd, FPURegister fs);
- void sqrt_d(FPURegister fd, FPURegister fs);
-
- // Conversion.
- void cvt_w_s(FPURegister fd, FPURegister fs);
- void cvt_w_d(FPURegister fd, FPURegister fs);
- void trunc_w_s(FPURegister fd, FPURegister fs);
- void trunc_w_d(FPURegister fd, FPURegister fs);
- void round_w_s(FPURegister fd, FPURegister fs);
- void round_w_d(FPURegister fd, FPURegister fs);
- void floor_w_s(FPURegister fd, FPURegister fs);
- void floor_w_d(FPURegister fd, FPURegister fs);
- void ceil_w_s(FPURegister fd, FPURegister fs);
- void ceil_w_d(FPURegister fd, FPURegister fs);
-
- void cvt_l_s(FPURegister fd, FPURegister fs);
- void cvt_l_d(FPURegister fd, FPURegister fs);
- void trunc_l_s(FPURegister fd, FPURegister fs);
- void trunc_l_d(FPURegister fd, FPURegister fs);
- void round_l_s(FPURegister fd, FPURegister fs);
- void round_l_d(FPURegister fd, FPURegister fs);
- void floor_l_s(FPURegister fd, FPURegister fs);
- void floor_l_d(FPURegister fd, FPURegister fs);
- void ceil_l_s(FPURegister fd, FPURegister fs);
- void ceil_l_d(FPURegister fd, FPURegister fs);
-
- void cvt_s_w(FPURegister fd, FPURegister fs);
- void cvt_s_l(FPURegister fd, FPURegister fs);
- void cvt_s_d(FPURegister fd, FPURegister fs);
-
- void cvt_d_w(FPURegister fd, FPURegister fs);
- void cvt_d_l(FPURegister fd, FPURegister fs);
- void cvt_d_s(FPURegister fd, FPURegister fs);
-
- // Conditions and branches.
- void c(FPUCondition cond, SecondaryField fmt,
- FPURegister ft, FPURegister fs, uint16_t cc = 0);
-
- void bc1f(int16_t offset, uint16_t cc = 0);
- void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
- void bc1t(int16_t offset, uint16_t cc = 0);
- void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
- void fcmp(FPURegister src1, const double src2, FPUCondition cond);
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* label) {
- return pc_offset() - label->pos();
- }
-
- // Check the number of instructions generated from label to here.
- int InstructionsGeneratedSince(Label* label) {
- return SizeOfCodeGeneratedSince(label) / kInstrSize;
- }
-
- // Class for scoping postponing the trampoline pool generation.
- class BlockTrampolinePoolScope {
- public:
- explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockTrampolinePool();
- }
- ~BlockTrampolinePoolScope() {
- assem_->EndBlockTrampolinePool();
- }
-
- private:
- Assembler* assem_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
- };
-
- // Class for postponing the assembly buffer growth. Typically used for
- // sequences of instructions that must be emitted as a unit, before
- // buffer growth (and relocation) can occur.
- // This blocking scope is not nestable.
- class BlockGrowBufferScope {
- public:
- explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockGrowBuffer();
- }
- ~BlockGrowBufferScope() {
- assem_->EndBlockGrowBuffer();
- }
-
- private:
- Assembler* assem_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
- };
-
- // Debugging.
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) {
- ASSERT(recorded_ast_id_.IsNone());
- recorded_ast_id_ = ast_id;
- }
-
- TypeFeedbackId RecordedAstId() {
- ASSERT(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
- }
-
- void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
- static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
-
- // Writes a single byte or word of data in the code stream. Used for
- // inline tables, e.g., jump-tables.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- // Postpone the generation of the trampoline pool for the specified number of
- // instructions.
- void BlockTrampolinePoolFor(int instructions);
-
- // Check if there is less than kGap bytes available in the buffer.
- // If this is the case, we need to grow the buffer before emitting
- // an instruction or relocation information.
- inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
-
- // Get the number of bytes available in the buffer.
- inline int available_space() const { return reloc_info_writer.pos() - pc_; }
-
- // Read/patch instructions.
- static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- static void instr_at_put(byte* pc, Instr instr) {
- *reinterpret_cast<Instr*>(pc) = instr;
- }
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
-
- // Check if an instruction is a branch of some kind.
- static bool IsBranch(Instr instr);
- static bool IsBeq(Instr instr);
- static bool IsBne(Instr instr);
-
- static bool IsJump(Instr instr);
- static bool IsJ(Instr instr);
- static bool IsLui(Instr instr);
- static bool IsOri(Instr instr);
-
- static bool IsJal(Instr instr);
- static bool IsJr(Instr instr);
- static bool IsJalr(Instr instr);
-
- static bool IsNop(Instr instr, unsigned int type);
- static bool IsPop(Instr instr);
- static bool IsPush(Instr instr);
- static bool IsLwRegFpOffset(Instr instr);
- static bool IsSwRegFpOffset(Instr instr);
- static bool IsLwRegFpNegOffset(Instr instr);
- static bool IsSwRegFpNegOffset(Instr instr);
-
- static Register GetRtReg(Instr instr);
- static Register GetRsReg(Instr instr);
- static Register GetRdReg(Instr instr);
-
- static uint32_t GetRt(Instr instr);
- static uint32_t GetRtField(Instr instr);
- static uint32_t GetRs(Instr instr);
- static uint32_t GetRsField(Instr instr);
- static uint32_t GetRd(Instr instr);
- static uint32_t GetRdField(Instr instr);
- static uint32_t GetSa(Instr instr);
- static uint32_t GetSaField(Instr instr);
- static uint32_t GetOpcodeField(Instr instr);
- static uint32_t GetFunction(Instr instr);
- static uint32_t GetFunctionField(Instr instr);
- static uint32_t GetImmediate16(Instr instr);
- static uint32_t GetLabelConst(Instr instr);
-
- static int32_t GetBranchOffset(Instr instr);
- static bool IsLw(Instr instr);
- static int16_t GetLwOffset(Instr instr);
- static Instr SetLwOffset(Instr instr, int16_t offset);
-
- static bool IsSw(Instr instr);
- static Instr SetSwOffset(Instr instr, int16_t offset);
- static bool IsAddImmediate(Instr instr);
- static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
-
- static bool IsAndImmediate(Instr instr);
-
- void CheckTrampolinePool();
-
- protected:
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
-
- int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
- // Decode branch instruction at pos and return branch target pos.
- int target_at(int32_t pos);
-
- // Patch branch instruction at pos to branch to given branch target pos.
- void target_at_put(int32_t pos, int32_t target_pos);
-
- // Say if we need to relocate with this mode.
- bool MustUseReg(RelocInfo::Mode rmode);
-
- // Record reloc info for current pc_.
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- // Block the emission of the trampoline pool before pc_offset.
- void BlockTrampolinePoolBefore(int pc_offset) {
- if (no_trampoline_pool_before_ < pc_offset)
- no_trampoline_pool_before_ = pc_offset;
- }
-
- void StartBlockTrampolinePool() {
- trampoline_pool_blocked_nesting_++;
- }
-
- void EndBlockTrampolinePool() {
- trampoline_pool_blocked_nesting_--;
- }
-
- bool is_trampoline_pool_blocked() const {
- return trampoline_pool_blocked_nesting_ > 0;
- }
-
- bool has_exception() const {
- return internal_trampoline_exception_;
- }
-
- void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
-
- bool is_trampoline_emitted() const {
- return trampoline_emitted_;
- }
-
- // Temporarily block automatic assembly buffer growth.
- void StartBlockGrowBuffer() {
- ASSERT(!block_buffer_growth_);
- block_buffer_growth_ = true;
- }
-
- void EndBlockGrowBuffer() {
- ASSERT(block_buffer_growth_);
- block_buffer_growth_ = false;
- }
-
- bool is_buffer_growth_blocked() const {
- return block_buffer_growth_;
- }
-
- private:
- // Buffer size and constant pool distance are checked together at regular
- // intervals of kBufferCheckInterval emitted bytes.
- static const int kBufferCheckInterval = 1*KB/2;
-
- // Code generation.
- // The relocation writer's position is at least kGap bytes below the end of
- // the generated instructions. This is so that multi-instruction sequences do
- // not have to check for overflow. The same is true for writes of large
- // relocation info entries.
- static const int kGap = 32;
-
-
- // Repeated checking whether the trampoline pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated.
- static const int kCheckConstIntervalInst = 32;
- static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
-
- int next_buffer_check_; // pc offset of next buffer check.
-
- // Emission of the trampoline pool may be blocked in some code sequences.
- int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_trampoline_pool_before_; // Block emission before this pc offset.
-
- // Keep track of the last emitted pool to guarantee a maximal distance.
- int last_trampoline_pool_end_; // pc offset of the end of the last pool.
-
- // Automatic growth of the assembly buffer may be blocked for some sequences.
- bool block_buffer_growth_; // Block growth when true.
-
- // Relocation information generation.
- // Each relocation is encoded as a variable size value.
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
- RelocInfoWriter reloc_info_writer;
-
- // The bound position, before this we cannot do instruction elimination.
- int last_bound_pos_;
-
- // Code emission.
- inline void CheckBuffer();
- void GrowBuffer();
- inline void emit(Instr x);
- inline void CheckTrampolinePoolQuick();
-
- // Instruction generation.
- // We have 3 different kind of encoding layout on MIPS.
- // However due to many different types of objects encoded in the same fields
- // we have quite a few aliases for each mode.
- // Using the same structure to refer to Register and FPURegister would spare a
- // few aliases, but mixing both does not look clean to me.
- // Anyway we could surely implement this differently.
-
- void GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- Register rd,
- uint16_t sa = 0,
- SecondaryField func = NULLSF);
-
- void GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- uint16_t msb,
- uint16_t lsb,
- SecondaryField func);
-
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
-
- void GenInstrRegister(Opcode opcode,
- FPURegister fr,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
-
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
-
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPUControlRegister fs,
- SecondaryField func = NULLSF);
-
-
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register r1,
- FPURegister r2,
- int32_t j);
-
-
- void GenInstrJump(Opcode opcode,
- uint32_t address);
-
- // Helpers.
- void LoadRegPlusOffsetToAt(const MemOperand& src);
-
- // Labels.
- void print(Label* L);
- void bind_to(Label* L, int pos);
- void next(Label* L);
-
- // One trampoline consists of:
- // - space for trampoline slots,
- // - space for labels.
- //
- // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
- // Space for trampoline slots preceeds space for labels. Each label is of one
- // instruction size, so total amount for labels is equal to
- // label_count * kInstrSize.
- class Trampoline {
- public:
- Trampoline() {
- start_ = 0;
- next_slot_ = 0;
- free_slot_count_ = 0;
- end_ = 0;
- }
- Trampoline(int start, int slot_count) {
- start_ = start;
- next_slot_ = start;
- free_slot_count_ = slot_count;
- end_ = start + slot_count * kTrampolineSlotsSize;
- }
- int start() {
- return start_;
- }
- int end() {
- return end_;
- }
- int take_slot() {
- int trampoline_slot = kInvalidSlotPos;
- if (free_slot_count_ <= 0) {
- // We have run out of space on trampolines.
- // Make sure we fail in debug mode, so we become aware of each case
- // when this happens.
- ASSERT(0);
- // Internal exception will be caught.
- } else {
- trampoline_slot = next_slot_;
- free_slot_count_--;
- next_slot_ += kTrampolineSlotsSize;
- }
- return trampoline_slot;
- }
-
- private:
- int start_;
- int end_;
- int next_slot_;
- int free_slot_count_;
- };
-
- int32_t get_trampoline_entry(int32_t pos);
- int unbound_labels_count_;
- // If trampoline is emitted, generated code is becoming large. As this is
- // already a slow case which can possibly break our code generation for the
- // extreme case, we use this information to trigger different mode of
- // branch instruction generation, where we use jump instructions rather
- // than regular branch instructions.
- bool trampoline_emitted_;
- static const int kTrampolineSlotsSize = 4 * kInstrSize;
- static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
- static const int kInvalidSlotPos = -1;
-
- Trampoline trampoline_;
- bool internal_trampoline_exception_;
-
- friend class RegExpMacroAssemblerMIPS;
- friend class RelocInfo;
- friend class CodePatcher;
- friend class BlockTrampolinePoolScope;
-
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
- friend class EnsureSpace;
-};
-
-
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) {
- assembler->CheckBuffer();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/builtins-mips.cc b/src/3rdparty/v8/src/mips/builtins-mips.cc
deleted file mode 100644
index 58c213b..0000000
--- a/src/3rdparty/v8/src/mips/builtins-mips.cc
+++ /dev/null
@@ -1,1941 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments excluding receiver
- // -- a1 : called function (only guaranteed when
- // -- extra_args requires it)
- // -- cp : context
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument
- // -- sp[4 * agrc] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(a1);
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects s0 to contain the number of arguments
- // including the receiver and the extra arguments.
- __ Addu(s0, a0, num_extra_args + 1);
- __ sll(s1, s0, kPointerSizeLog2);
- __ Subu(s1, s1, kPointerSize);
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
-
-
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the native context.
-
- __ lw(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ lw(result,
- MemOperand(result,
- Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
-}
-
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ lw(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the Array function from the native context.
- __ lw(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, zero_reg);
- __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
- if (initial_capacity == 0) {
- __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ Addu(scratch1, result, Operand(JSArray::kSize));
- __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array (untagged)
- // scratch2: start of next object
- __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
- __ sw(scratch3, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kPointerSize);
- __ li(scratch3, Operand(Smi::FromInt(initial_capacity)));
- STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
- __ sw(scratch3, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kPointerSize);
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- for (int i = 0; i < initial_capacity; i++) {
- __ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
- }
- } else {
- Label loop, entry;
- __ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
- __ Branch(&entry);
- __ bind(&loop);
- __ sw(scratch3, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, lt, scratch1, Operand(scratch2));
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array_storage,
- Register elements_array_end,
- Register scratch1,
- Register scratch2,
- bool fill_with_hole,
- Label* gc_required) {
- // Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ Assert(
- ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested number of elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ li(elements_array_end,
- (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
- __ sra(scratch1, array_size, kSmiTagSize);
- __ Addu(elements_array_end, elements_array_end, scratch1);
- __ AllocateInNewSpace(
- elements_array_end,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array_storage: initial map
- // array_size: size of array (smi)
- __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
- __ sw(elements_array_storage,
- FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // array_size: size of array (smi)
- __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
- __ sw(elements_array_storage,
- FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ And(elements_array_storage,
- elements_array_storage,
- Operand(~kHeapObjectTagMask));
- // Initialize the fixed array and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // elements_array_storage: elements array (untagged)
- // array_size: size of array (smi)
- __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ sw(scratch1, MemOperand(elements_array_storage));
- __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
- // Length of the FixedArray is the number of pre-allocated elements if
- // the actual JSArray has length 0 and the size of the JSArray for non-empty
- // JSArrays. The length of a FixedArray is stored as a smi.
- STATIC_ASSERT(kSmiTag == 0);
-
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ sw(array_size, MemOperand(elements_array_storage));
- __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
- // Calculate elements array and elements array end.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // array_size: smi-tagged size of elements array
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(elements_array_end, elements_array_storage, elements_array_end);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
- __ Branch(&entry);
- __ bind(&loop);
- __ sw(scratch1, MemOperand(elements_array_storage));
- __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// a0: argc
-// a1: constructor (built-in Array function)
-// ra: return address
-// sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in a1 needs to be preserved for
-// entering the generic code. In both cases argc in a0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments or one.
- __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
- // Handle construction of an empty array.
- __ bind(&empty_array);
- AllocateEmptyJSArray(masm,
- a1,
- a2,
- a3,
- t0,
- t1,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
- // Set up return value, remove receiver from stack and return.
- __ mov(v0, a2);
- __ Addu(sp, sp, Operand(kPointerSize));
- __ Ret();
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ Branch(&argc_two_or_more, ne, a0, Operand(1));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ lw(a2, MemOperand(sp)); // Get the argument from the stack.
- __ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
- __ Drop(1); // Adjust stack.
- __ mov(a0, zero_reg); // Treat this as a call with argc of zero.
- __ Branch(&empty_array);
-
- __ bind(&not_empty_array);
- __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
- __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is too large to actually allocate an elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(call_generic_code, Ugreater_equal, a2,
- Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
-
- // a0: argc
- // a1: constructor
- // a2: array_size (smi)
- // sp[0]: argument
- AllocateJSArray(masm,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
-
- // Set up return value, remove receiver and argument from stack and return.
- __ mov(v0, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ sll(a2, a0, kSmiTagSize); // Convert argc to a smi.
-
- // a0: argc
- // a1: constructor
- // a2: array_size (smi)
- // sp[0]: last argument
- AllocateJSArray(masm,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
-
- // Fill arguments as array elements. Copy from the top of the stack (last
- // element) to the array backing store filling it backwards. Note:
- // elements_array_end points after the backing store.
- // a0: argc
- // a3: JSArray
- // t0: elements_array storage start (untagged)
- // t1: elements_array_end (untagged)
- // sp[0]: last argument
-
- Label loop, entry;
- __ Branch(USE_DELAY_SLOT, &entry);
- __ mov(t3, sp);
- __ bind(&loop);
- __ lw(a2, MemOperand(t3));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(a2, &has_non_smi_element);
- }
- __ Addu(t3, t3, kPointerSize);
- __ Addu(t1, t1, -kPointerSize);
- __ sw(a2, MemOperand(t1));
- __ bind(&entry);
- __ Branch(&loop, lt, t0, Operand(t1));
-
- __ bind(&finish);
- __ mov(sp, t3);
-
- // Remove caller arguments and receiver from the stack, setup return value and
- // return.
- // a0: argc
- // a3: JSArray
- // sp[0]: receiver
- __ Addu(sp, sp, Operand(kPointerSize));
- __ mov(v0, a3);
- __ Ret();
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(
- a2, t5, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(a3, t0);
- __ Branch(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // a3: JSArray
- __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- a2,
- t5,
- &cant_transition_map);
- __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ RecordWriteField(a3,
- HeapObject::kMapOffset,
- a2,
- t5,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- Label loop2;
- __ bind(&loop2);
- __ lw(a2, MemOperand(t3));
- __ Addu(t3, t3, kPointerSize);
- __ Subu(t1, t1, kPointerSize);
- __ sw(a2, MemOperand(t1));
- __ Branch(&loop2, lt, t0, Operand(t1));
- __ Branch(&finish);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, a1);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray functions should be maps.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for InternalArray function",
- t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, t0);
- __ Assert(eq, "Unexpected initial map for InternalArray function",
- t0, Operand(MAP_TYPE));
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- GenerateLoadArrayFunction(masm, a1);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function (1)",
- t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, t0);
- __ Assert(eq, "Unexpected initial map for Array function (2)",
- t0, Operand(MAP_TYPE));
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a2 : type info cell
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a3, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function (3)",
- t0, Operand(zero_reg));
- __ GetObjectType(a1, a3, t0);
- __ Assert(eq, "Unexpected initial map for Array function (4)",
- t0, Operand(MAP_TYPE));
-
- // We should either have undefined in a2 or a valid jsglobalpropertycell
- Label okay_here;
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
- __ Branch(&okay_here, eq, a2, Operand(undefined_sentinel));
- __ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, "Expected property cell in register a3",
- a3, Operand(global_property_cell_map));
- __ bind(&okay_here);
- }
-
- if (FLAG_optimize_constructed_arrays) {
- Label not_zero_case, not_one_case;
- __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
- ArrayNoArgumentConstructorStub no_argument_stub;
- __ TailCallStub(&no_argument_stub);
-
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
- ArraySingleArgumentConstructorStub single_argument_stub;
- __ TailCallStub(&single_argument_stub);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub n_argument_stub;
- __ TailCallStub(&n_argument_stub);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
-
- Register function = a1;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
- __ Assert(eq, "Unexpected String function", function, Operand(a2));
- }
-
- // Load the first arguments in a0 and get rid of the rest.
- Label no_arguments;
- __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
- // First args = sp[(argc - 1) * 4].
- __ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
- __ lw(a0, MemOperand(sp));
- // sp now point to args[0], drop args[0] + receiver.
- __ Drop(2);
-
- Register argument = a2;
- Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- a0, // Input.
- argument, // Result.
- a3, // Scratch.
- t0, // Scratch.
- t1, // Scratch.
- false, // Is it a Smi?
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
- __ bind(&argument_is_string);
-
- // ----------- S t a t e -------------
- // -- a2 : argument converted to string
- // -- a1 : constructor function
- // -- ra : return address
- // -----------------------------------
-
- Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- v0, // Result.
- a3, // Scratch.
- t0, // Scratch.
- &gc_required,
- TAG_OBJECT);
-
- // Initialising the String Object.
- Register map = a3;
- __ LoadGlobalFunctionInitialMap(function, map, t0);
- if (FLAG_debug_code) {
- __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ Assert(eq, "Unexpected string wrapper instance size",
- t0, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ Assert(eq, "Unexpected unused properties of string wrapper",
- t0, Operand(zero_reg));
- }
- __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset));
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
- __ Ret();
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- __ JumpIfSmi(a0, &convert_argument);
-
- // Is it a String?
- __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ And(t0, a3, Operand(kIsNotStringMask));
- __ Branch(&convert_argument, ne, t0, Operand(zero_reg));
- __ mov(argument, a0);
- __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
- __ Branch(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into a2.
- __ bind(&convert_argument);
- __ push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(v0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- }
- __ pop(function);
- __ mov(argument, v0);
- __ Branch(&argument_is_string);
-
- // Load the empty string into a2, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ Branch(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
- __ Ret();
-}
-
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
-}
-
-
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- __ push(a1); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(t1);
- // Restore receiver.
- __ pop(a1);
-
- // Tear down internal frame.
- }
-
- GenerateTailCallToSharedCode(masm);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- Isolate* isolate = masm->isolate();
-
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve the two incoming parameters on the stack.
- __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
- __ MultiPushReversed(a0.bit() | a1.bit());
-
- // Use t7 to hold undefined, which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
- Label rt_call, allocated;
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- if (FLAG_inline_new) {
- Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ li(a2, Operand(debug_step_in_fp));
- __ lw(a2, MemOperand(a2));
- __ Branch(&rt_call, ne, a2, Operand(zero_reg));
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, a3, t4);
- __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
- __ lbu(t0, constructor_count);
- __ Subu(t0, t0, Operand(1));
- __ sb(t0, constructor_count);
- __ Branch(&allocate, ne, t0, Operand(zero_reg));
-
- __ Push(a1, a2);
-
- __ push(a1); // Constructor.
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(a2);
- __ pop(a1);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t4: JSObject (not tagged)
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- __ Addu(t5, t5, Operand(3*kPointerSize));
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
- // Fill all the in-object properties with appropriate filler.
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t5: First in-object property of JSObject (not tagged)
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t6, t4, t0); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ sll(t0, a0, kPointerSizeLog2);
- __ addu(a0, t5, t0);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ Assert(le, "Unexpected number of pre-allocated property fields.",
- a0, Operand(t6));
- }
- __ InitializeFieldsWithFiller(t5, a0, t7);
- // To allow for truncation.
- __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
- }
- __ InitializeFieldsWithFiller(t5, t6, t7);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- __ Addu(t4, t4, Operand(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not fall through to runtime call if it is.
- // a1: constructor function
- // t4: JSObject
- // t5: start of next object (not tagged)
- __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ Addu(a3, a3, Operand(t6));
- __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
- __ subu(a3, a3, t6);
-
- // Done if no extra properties are to be allocated.
- __ Branch(&allocated, eq, a3, Operand(zero_reg));
- __ Assert(greater_equal, "Property allocation count failed.",
- a3, Operand(zero_reg));
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // a1: constructor
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: start of next object
- __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- a0,
- t5,
- t6,
- a2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // a1: constructor
- // a3: number of elements in properties array (untagged)
- // t4: JSObject
- // t5: start of next object
- __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
- __ mov(a2, t5);
- __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
- __ sll(a0, a3, kSmiTagSize);
- __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
- __ Addu(a2, a2, Operand(2 * kPointerSize));
-
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-
- // Initialize the fields to undefined.
- // a1: constructor
- // a2: First element of FixedArray (not tagged)
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: FixedArray (not tagged)
- __ sll(t3, a3, kPointerSizeLog2);
- __ addu(t6, a2, t3); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
- }
- __ jmp(&entry);
- __ bind(&loop);
- __ sw(t7, MemOperand(a2));
- __ addiu(a2, a2, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, less, a2, Operand(t6));
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject.
- // a1: constructor function
- // t4: JSObject
- // t5: FixedArray (not tagged)
- __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // t4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(t4, t5);
- }
-
- __ bind(&rt_call);
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- __ push(a1); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(t4, v0);
-
- // Receiver for constructor call allocated.
- // t4: JSObject
- __ bind(&allocated);
- __ push(t4);
- __ push(t4);
-
- // Reload the number of arguments from the stack.
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 2 * kPointerSize));
- __ lw(a3, MemOperand(sp, 3 * kPointerSize));
-
- // Set up pointer to last argument.
- __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Set up number of arguments for function call below.
- __ srl(a0, a3, kSmiTagSize);
-
- // Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // a3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- Label loop, entry;
- __ jmp(&entry);
- __ bind(&loop);
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
- __ lw(t1, MemOperand(t0));
- __ push(t1);
- __ bind(&entry);
- __ Addu(a3, a3, Operand(-2));
- __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
-
- // Call the function.
- // a0: number of arguments
- // a1: constructor function
- if (is_api_function) {
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context from the frame.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(v0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a1, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Symbols are "objects".
- __ lbu(a3, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ Branch(&exit, eq, a3, Operand(SYMBOL_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ lw(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 2 * kPointerSize));
-
- // Leave construct frame.
- }
-
- __ sll(t0, a1, kPointerSizeLog2 - 1);
- __ Addu(sp, sp, t0);
- __ Addu(sp, sp, kPointerSize);
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
- __ Ret();
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Called from JSEntryStub::GenerateBody
-
- // ----------- S t a t e -------------
- // -- a0: code entry
- // -- a1: function
- // -- a2: receiver_pointer
- // -- a3: argc
- // -- s0: argv
- // -----------------------------------
-
- // Clear the context before we push it when entering the JS frame.
- __ mov(cp, zero_reg);
-
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Set up the context from the function argument.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ Push(a1, a2);
-
- // Copy arguments to the stack in a loop.
- // a3: argc
- // s0: argv, i.e. points to first arg
- Label loop, entry;
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t2, s0, t0);
- __ b(&entry);
- __ nop(); // Branch delay slot nop.
- // t2 points past last arg.
- __ bind(&loop);
- __ lw(t0, MemOperand(s0)); // Read next parameter.
- __ addiu(s0, s0, kPointerSize);
- __ lw(t0, MemOperand(t0)); // Dereference handle.
- __ push(t0); // Push parameter.
- __ bind(&entry);
- __ Branch(&loop, ne, s0, Operand(t2));
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ mov(s1, t0);
- __ mov(s2, t0);
- __ mov(s3, t0);
- __ mov(s4, t0);
- __ mov(s5, t0);
- // s6 holds the root address. Do not clobber.
- // s7 is cp. Do not init.
-
- // Invoke the code and pass argc as a0.
- __ mov(a0, a3);
- if (is_construct) {
- // No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Leave internal frame.
- }
-
- __ Jump(ra);
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- // Call the runtime function.
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
-
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
-
- // Tear down temporary frame.
- }
-
- // Do a tail-call of the compiled function.
- __ Jump(t9);
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
-
- // Tear down temporary frame.
- }
-
- // Do a tail-call of the compiled function.
- __ Jump(t9);
-}
-
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- __ mov(a0, ra);
- // Adjust a0 to point to the head of the PlatformCodeAge sequence
- __ Subu(a0, a0,
- Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
- // Restore the original return address of the function
- __ mov(ra, at);
-
- // The following registers must be saved and restored when calling through to
- // the runtime:
- // a0 - contains return address (beginning of patch sequence)
- // a1 - function object
- RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
- FrameScope scope(masm, StackFrame::MANUAL);
- __ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- __ MultiPop(saved_regs);
- __ Jump(a0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
- }
-
- __ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
- __ Jump(ra); // Jump to miss handler
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- }
-
- // Get the full codegen state from the stack and untag it -> t2.
- __ lw(t2, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(t2);
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ Branch(&with_tos_register,
- ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
- __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&with_tos_register);
- __ lw(v0, MemOperand(sp, 1 * kPointerSize));
- __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
-
- __ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&unknown_state);
- __ stop("no cases left");
-}
-
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- RegList saved_regs =
- (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
- __ MultiPush(saved_regs);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ MultiPop(saved_regs);
- __ Ret();
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CpuFeatures::IsSupported(FPU)) {
- __ Abort("Unreachable code: Cannot optimize without FPU support.");
- return;
- }
-
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
-
- // Untag the AST id and push it on the stack.
- __ SmiUntag(v0);
- __ push(v0);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- // 1. Make sure we have at least one argument.
- // a0: actual number of arguments
- { Label done;
- __ Branch(&done, ne, a0, Operand(zero_reg));
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ push(t2);
- __ Addu(a0, a0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- // a0: actual number of arguments
- Label slow, non_function;
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
- __ lw(a1, MemOperand(at));
- __ JumpIfSmi(a1, &non_function);
- __ GetObjectType(a1, a2, a2);
- __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // 3a. Patch the first argument if necessary when calling a function.
- // a0: actual number of arguments
- // a1: function
- Label shift_arguments;
- __ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION.
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
-
- // Do not transform the receiver for native (Compilerhints already in a3).
- __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
-
- // Compute the receiver in non-strict mode.
- // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a2, sp, at);
- __ lw(a2, MemOperand(a2, -kPointerSize));
- // a0: actual number of arguments
- // a1: function
- // a2: first argument
- __ JumpIfSmi(a2, &convert_to_object, t2);
-
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ Branch(&use_global_receiver, eq, a2, Operand(a3));
- __ LoadRoot(a3, Heap::kNullValueRootIndex);
- __ Branch(&use_global_receiver, eq, a2, Operand(a3));
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- __ bind(&convert_to_object);
- // Enter an internal frame in order to preserve argument count.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ push(a0);
-
- __ push(a2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a2, v0);
-
- __ pop(a0);
- __ sra(a0, a0, kSmiTagSize); // Un-tag.
- // Leave internal frame.
- }
- // Restore the function to a1, and the flag to t0.
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
- __ lw(a1, MemOperand(at));
- __ li(t0, Operand(0, RelocInfo::NONE32));
- __ Branch(&patch_receiver);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a3, sp, at);
- __ sw(a2, MemOperand(a3, -kPointerSize));
-
- __ Branch(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ li(t0, Operand(1, RelocInfo::NONE32)); // Indicate function proxy.
- __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
-
- __ bind(&non_function);
- __ li(t0, Operand(2, RelocInfo::NONE32)); // Indicate non-function.
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- // a0: actual number of arguments
- // a1: function
- // t0: call type (0: JS function, 1: function proxy, 2: non-function)
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a2, sp, at);
- __ sw(a1, MemOperand(a2, -kPointerSize));
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // a0: actual number of arguments
- // a1: function
- // t0: call type (0: JS function, 1: function proxy, 2: non-function)
- __ bind(&shift_arguments);
- { Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a2, sp, at);
-
- __ bind(&loop);
- __ lw(at, MemOperand(a2, -kPointerSize));
- __ sw(at, MemOperand(a2));
- __ Subu(a2, a2, Operand(kPointerSize));
- __ Branch(&loop, ne, a2, Operand(sp));
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ Subu(a0, a0, Operand(1));
- __ Pop();
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // a0: actual number of arguments
- // a1: function
- // t0: call type (0: JS function, 1: function proxy, 2: non-function)
- { Label function, non_proxy;
- __ Branch(&function, eq, t0, Operand(zero_reg));
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(a2, zero_reg);
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ Branch(&non_proxy, ne, t0, Operand(1));
-
- __ push(a1); // Re-add proxy object as additional argument.
- __ Addu(a0, a0, Operand(1));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- // a0: actual number of arguments
- // a1: function
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2,
- FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ sra(a2, a2, kSmiTagSize);
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ SetCallKind(t1, CALL_AS_METHOD);
- // Check formal and actual parameter counts.
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
-
- ParameterCount expected(0);
- __ InvokeCode(a3, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ push(a0);
- __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
- __ push(a0);
- // Returns (in v0) number of arguments to copy to stack as Smi.
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
- // Make a2 the space we have left. The stack might already be overflowed
- // here which will cause a2 to become negative.
- __ subu(a2, sp, a2);
- // Check if the arguments will overflow the stack.
- __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Branch(&okay, gt, a2, Operand(t3)); // Signed comparison.
-
- // Out of stack space.
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ push(a1);
- __ push(v0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
-
- // Push current limit and index.
- __ bind(&okay);
- __ push(v0); // Limit.
- __ mov(a1, zero_reg); // Initial index.
- __ push(a1);
-
- // Get the receiver.
- __ lw(a0, MemOperand(fp, kRecvOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // Change context eagerly to get the right global object if necessary.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in a1.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
-
- // Do not transform the receiver for native (Compilerhints already in a2).
- __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(a0, &call_to_object);
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ Branch(&use_global_receiver, eq, a0, Operand(a1));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&use_global_receiver, eq, a0, Operand(a2));
-
- // Check if the receiver is already a JavaScript object.
- // a0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Convert the receiver to a regular object.
- // a0: receiver
- __ bind(&call_to_object);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
- __ Branch(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // a0: receiver
- __ bind(&push_receiver);
- __ push(a0);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Branch(&entry);
-
- // Load the current argument from the arguments array and push it to the
- // stack.
- // a0: current argument index
- __ bind(&loop);
- __ lw(a1, MemOperand(fp, kArgsOffset));
- __ push(a1);
- __ push(a0);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(v0);
-
- // Use inline caching to access the arguments.
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Addu(a0, a0, Operand(1 << kSmiTagSize));
- __ sw(a0, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ lw(a1, MemOperand(fp, kLimitOffset));
- __ Branch(&loop, ne, a0, Operand(a1));
-
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(a0);
- __ sra(a0, a0, kSmiTagSize);
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- frame_scope.GenerateLeaveFrame();
- __ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
-
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(a1); // Add function proxy as last argument.
- __ Addu(a0, a0, Operand(1));
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- // Tear down the internal frame and remove function, receiver and args.
- }
-
- __ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ sll(a0, a0, kSmiTagSize);
- __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
- __ Addu(fp, sp, Operand(3 * kPointerSize));
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- v0 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then tear down the parameters.
- __ lw(a1, MemOperand(fp, -3 * kPointerSize));
- __ mov(sp, fp);
- __ MultiPop(fp.bit() | ra.bit());
- __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(sp, sp, t0);
- // Adjust for the receiver.
- __ Addu(sp, sp, Operand(kPointerSize));
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // State setup as expected by MacroAssembler::InvokePrologue.
- // ----------- S t a t e -------------
- // -- a0: actual arguments count
- // -- a1: function (passed through to callee)
- // -- a2: expected arguments count
- // -- a3: callee code entry
- // -- t1: call kind information
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
-
- Label enough, too_few;
- __ Branch(&dont_adapt_arguments, eq,
- a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- // We use Uless as the number of argument should always be greater than 0.
- __ Branch(&too_few, Uless, a0, Operand(a2));
-
- { // Enough parameters: actual >= expected.
- // a0: actual number of arguments as a smi
- // a1: function
- // a2: expected number of arguments
- // a3: code entry to call
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into a0 and copy end address into a2.
- __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a0, fp, a0);
- // Adjust for return address and receiver.
- __ Addu(a0, a0, Operand(2 * kPointerSize));
- // Compute copy end address.
- __ sll(a2, a2, kPointerSizeLog2);
- __ subu(a2, a0, a2);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // a0: copy start address
- // a1: function
- // a2: copy end address
- // a3: code entry to call
-
- Label copy;
- __ bind(&copy);
- __ lw(t0, MemOperand(a0));
- __ push(t0);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
- __ addiu(a0, a0, -kPointerSize); // In delay slot.
-
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into a0 and copy end address is fp.
- // a0: actual number of arguments as a smi
- // a1: function
- // a2: expected number of arguments
- // a3: code entry to call
- __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a0, fp, a0);
- // Adjust for return address and receiver.
- __ Addu(a0, a0, Operand(2 * kPointerSize));
- // Compute copy end address. Also adjust for return address.
- __ Addu(t3, fp, kPointerSize);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // a0: copy start address
- // a1: function
- // a2: expected number of arguments
- // a3: code entry to call
- // t3: copy end address
- Label copy;
- __ bind(&copy);
- __ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver.
- __ Subu(sp, sp, kPointerSize);
- __ Subu(a0, a0, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t3));
- __ sw(t0, MemOperand(sp)); // In the delay slot.
-
- // Fill the remaining expected arguments with undefined.
- // a1: function
- // a2: expected number of arguments
- // a3: code entry to call
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ sll(t2, a2, kPointerSizeLog2);
- __ Subu(a2, fp, Operand(t2));
- __ Addu(a2, a2, Operand(-4 * kPointerSize)); // Adjust for frame.
-
- Label fill;
- __ bind(&fill);
- __ Subu(sp, sp, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2));
- __ sw(t0, MemOperand(sp));
- }
-
- // Call the entry point.
- __ bind(&invoke);
-
- __ Call(a3);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Ret();
-
-
- // -------------------------------------------
- // Don't adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ Jump(a3);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/code-stubs-mips.cc b/src/3rdparty/v8/src/mips/code-stubs-mips.cc
deleted file mode 100644
index 6abccaf..0000000
--- a/src/3rdparty/v8/src/mips/code-stubs-mips.cc
+++ /dev/null
@@ -1,8292 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a3, a2, a1, a0 };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a1, a0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a0, a1 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
-}
-
-
-static void InitializeArrayConstructorDescriptor(Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // register state
- // a1 -- constructor function
- // a2 -- type info cell with elements kind
- // a0 -- number of arguments to the constructor function
- static Register registers[] = { a1, a2 };
- descriptor->register_param_count_ = 2;
- // stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &a0;
- descriptor->register_params_ = registers;
- descriptor->extra_expression_stack_count_ = 1;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc);
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* rhs_not_nan,
- Label* slow,
- bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-
-
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
- Register scratch1, Register scratch2,
- Label* not_a_heap_number) {
- __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
- __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
- __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
-}
-
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in a0.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(a0, &check_heap_number);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- __ bind(&check_heap_number);
- EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- __ bind(&call_builtin);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(a3);
-
- // Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- v0,
- a1,
- a2,
- &gc,
- TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
-
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
- __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
- __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
- __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
- __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ lw(a1,
- FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ And(at, a1, a1);
- __ Branch(&check_optimized, ne, at, Operand(zero_reg));
- }
- __ bind(&install_unoptimized);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
- __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
- __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Return result. The argument function info has been popped already.
- __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
- __ Ret();
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
-
- // a2 holds native context, a1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into t0.
- __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
- __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
- __ Branch(&install_optimized, eq, a2, Operand(t1));
-
- // Iterate through the rest of map backwards. t0 holds an index as a Smi.
- Label loop;
- __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
-
- __ Branch(&install_unoptimized, eq, t0,
- Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ Subu(t0, t0, Operand(
- Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
- __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, t1, Operand(at));
- __ lw(t1, MemOperand(t1));
- __ Branch(&loop, ne, a2, Operand(t1));
- // Hit: fetch the optimized code.
- __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, t1, Operand(at));
- __ Addu(t1, t1, Operand(kPointerSize));
- __ lw(t0, MemOperand(t1));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(),
- 1, t2, t3);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
-
- // Now link a function into a list of optimized functions.
- __ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(t0, v0);
- __ RecordWriteContextSlot(
- a2,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- t0,
- a1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Push(cp, a3, t0);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- v0,
- a1,
- a2,
- &gc,
- TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ li(a1, Operand(Smi::FromInt(0)));
- __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-
- // Copy the qml global object from the surrounding context.
- __ lw(a1,
- MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ sw(a1,
- MemOperand(v0, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
-
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(1);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(a3, &after_sentinel);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ Assert(eq, message, a3, Operand(zero_reg));
- }
- __ lw(a3, GlobalObjectOperand());
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
- __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
- __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
- __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
- __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
-
- // Copy the qml global object from the surrounding context.
- __ lw(a1, ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX));
- __ sw(a1, ContextOperand(v0, Context::QML_GLOBAL_OBJECT_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(2);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- // a3: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
-
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- v0,
- a1,
- a2,
- fail,
- TAG_OBJECT);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ li(a2, Operand(Handle<Map>(masm->isolate()->heap()->
- allocation_site_info_map())));
- __ sw(a2, FieldMemOperand(v0, allocation_info_start));
- __ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize));
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(v0, i));
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ Addu(a2, v0, Operand(JSArray::kSize));
- }
- __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ lw(a3, MemOperand(sp, 2 * kPointerSize));
- __ lw(a0, MemOperand(sp, 1 * kPointerSize));
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a3, t0);
- __ lw(a3, MemOperand(t0));
- __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case, eq, a3, Operand(t1));
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
- __ Branch(&check_fast_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- __ bind(&check_fast_elements);
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ Branch(&double_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(a3);
- __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadRoot(at, expected_map_index);
- __ Assert(eq, message, a3, Operand(at));
- __ pop(a3);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
-
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
- public:
- ConvertToDoubleStub(Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return ConvertToDouble; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
- Register exponent = result1_;
- Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
- Label not_special;
- // Convert from Smi to integer.
- __ sra(source_, source_, kSmiTagSize);
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ And(exponent, source_, Operand(HeapNumber::kSignMask));
- // Subtract from 0 if source was negative.
- __ subu(at, zero_reg, source_);
- __ Movn(source_, at, exponent);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ Branch(&not_special, gt, source_, Operand(1));
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- // Safe to use 'at' as dest reg here.
- __ Or(at, exponent, Operand(exponent_word_for_1));
- __ Movn(exponent, at, source_); // Write exp when source not 0.
- // 1, 0 and -1 all have 0 for the second word.
- __ Ret(USE_DELAY_SLOT);
- __ mov(mantissa, zero_reg);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ Clz(zeros_, source_);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here.
- __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
- __ subu(mantissa, mantissa, zeros_);
- __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
- __ Or(exponent, exponent, mantissa);
-
- // Shift up the source chopping the top bit off.
- __ Addu(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ sllv(source_, source_, zeros_);
- // Compute lower part of fraction (last 12 bits).
- __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
- // And the top (top 20 bits).
- __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
-
- __ Ret(USE_DELAY_SLOT);
- __ or_(exponent, exponent, source_);
-}
-
-
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ sra(scratch1, a0, kSmiTagSize);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ sra(scratch1, a1, kSmiTagSize);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- if (destination == kCoreRegisters) {
- __ Move(a2, a3, f14);
- __ Move(a0, a1, f12);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from a0 to a3 and a2 in double format.
- __ mov(scratch1, a0);
- ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
- __ push(ra);
- __ Call(stub1.GetCode(masm->isolate()));
- // Write Smi from a1 to a1 and a0 in double format.
- __ mov(scratch1, a1);
- ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(ra);
- }
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- Label is_smi, done;
-
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(FPU) &&
- destination == kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- // Load the double from tagged HeapNumber to double register.
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
- // point in generating even more instructions.
- __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ lw(dst2, FieldMemOperand(object,
- HeapNumber::kValueOffset + kPointerSize));
- }
- __ Branch(&done);
-
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Convert smi to double using FPU instructions.
- __ mtc1(scratch1, dst);
- __ cvt_d_w(dst, dst);
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ Move(dst1, dst2, dst);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, object);
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(ra);
- __ Call(stub.GetCode(masm->isolate()));
- __ pop(ra);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- Label done;
- Label not_in_int32_range;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- &not_in_int32_range);
- __ jmp(&done);
-
- __ bind(&not_in_int32_range);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst_mantissa,
- Register dst_exponent,
- Register scratch2,
- FPURegister single_scratch) {
- ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst_mantissa));
- ASSERT(!int_scratch.is(dst_exponent));
-
- Label done;
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(int_scratch, single_scratch);
- __ cvt_d_w(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst_exponent | dst_mantissa |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ mov(dst_exponent, int_scratch);
- __ mov(dst_mantissa, int_scratch);
- __ Branch(&done, eq, int_scratch, Operand(zero_reg));
-
- // Preload the sign of the value.
- __ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask));
- // Get the absolute value of the object (as an unsigned integer).
- Label skip_sub;
- __ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg));
- __ Subu(int_scratch, zero_reg, int_scratch);
- __ bind(&skip_sub);
-
- // Get mantissa[51:20].
-
- // Get the position of the first set bit.
- __ Clz(dst_mantissa, int_scratch);
- __ li(scratch2, 31);
- __ Subu(dst_mantissa, scratch2, dst_mantissa);
-
- // Set the exponent.
- __ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
- __ Ins(dst_exponent, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ li(scratch2, Operand(1));
- __ sllv(scratch2, scratch2, dst_mantissa);
- __ li(at, -1);
- __ Xor(scratch2, scratch2, at);
- __ And(int_scratch, int_scratch, scratch2);
-
- // Get the number of bits to set in the lower part of the mantissa.
- __ Subu(scratch2, dst_mantissa,
- Operand(HeapNumber::kMantissaBitsInTopWord));
- __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
- // Set the higher 20 bits of the mantissa.
- __ srlv(at, int_scratch, scratch2);
- __ or_(dst_exponent, dst_exponent, at);
- __ li(at, 32);
- __ subu(scratch2, at, scratch2);
- __ sllv(dst_mantissa, int_scratch, scratch2);
- __ Branch(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ li(at, HeapNumber::kMantissaBitsInTopWord);
- __ subu(scratch2, at, dst_mantissa);
- __ sllv(scratch2, int_scratch, scratch2);
- __ Or(dst_exponent, dst_exponent, scratch2);
- // Set dst_mantissa to 0.
- __ mov(dst_mantissa, zero_reg);
- }
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DoubleRegister double_dst,
- DoubleRegister double_scratch,
- Register dst_mantissa,
- Register dst_exponent,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
- dst_exponent, scratch2, single_scratch);
- __ Branch(&done);
-
- __ bind(&obj_is_not_smi);
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Load the double value.
- __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- scratch1,
- double_dst,
- at,
- double_scratch,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
-
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
-
- } else {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers.
- bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
- if (save_registers) {
- // Save both output registers, because the other one probably holds
- // an important value too.
- __ Push(dst_exponent, dst_mantissa);
- }
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- Label zero;
- __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask));
- __ Or(scratch1, scratch1, Operand(dst_mantissa));
- __ Branch(&zero, eq, scratch1, Operand(zero_reg));
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- Label restore_input_and_miss;
- DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
- &restore_input_and_miss);
-
- // dst_* were trashed. Reload the double value.
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
- __ Branch(&done);
-
- __ bind(&restore_input_and_miss);
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ Branch(not_int32);
-
- __ bind(&zero);
- if (save_registers) {
- __ Drop(2);
- }
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DoubleRegister double_scratch0,
- DoubleRegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
-
- Label done, maybe_undefined;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
-
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Load the double value.
- __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- } else {
- // Load the double value in the destination registers.
- __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
- __ Or(dst, scratch2, Operand(dst));
- __ Branch(&done, eq, dst, Operand(zero_reg));
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ srlv(dst, dst, scratch3);
- // Set the implicit first bit.
- __ li(at, 32);
- __ subu(scratch3, at, scratch3);
- __ sllv(scratch2, scratch2, scratch3);
- __ Or(dst, dst, scratch2);
- // Set the sign.
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- Label skip_sub;
- __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
- __ Subu(dst, zero_reg, dst);
- __ bind(&skip_sub);
- }
- __ Branch(&done);
-
- __ bind(&maybe_undefined);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(not_int32, ne, object, Operand(at));
- // |undefined| is truncated to 0.
- __ li(dst, Operand(Smi::FromInt(0)));
- // Fall through.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src_exponent,
- Register src_mantissa,
- Register dst,
- Register scratch,
- Label* not_int32) {
- // Get exponent alone in scratch.
- __ Ext(scratch,
- src_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Substract the bias from the exponent.
- __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
-
- // src1: higher (exponent) part of the double value.
- // src2: lower (mantissa) part of the double value.
- // scratch: unbiased exponent.
-
- // Fast cases. Check for obvious non 32-bit integer values.
- // Negative exponent cannot yield 32-bit integers.
- __ Branch(not_int32, lt, scratch, Operand(zero_reg));
- // Exponent greater than 31 cannot yield 32-bit integers.
- // Also, a positive value with an exponent equal to 31 is outside of the
- // signed 32-bit integer range.
- // Another way to put it is that if (exponent - signbit) > 30 then the
- // number cannot be represented as an int32.
- Register tmp = dst;
- __ srl(at, src_exponent, 31);
- __ subu(tmp, scratch, at);
- __ Branch(not_int32, gt, tmp, Operand(30));
- // - Bits [21:0] in the mantissa are not null.
- __ And(tmp, src_mantissa, 0x3fffff);
- __ Branch(not_int32, ne, tmp, Operand(zero_reg));
-
- // Otherwise the exponent needs to be big enough to shift left all the
- // non zero bits left. So we need the (30 - exponent) last bits of the
- // 31 higher bits of the mantissa to be null.
- // Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
-
- // Get the 32 higher bits of the mantissa in dst.
- __ Ext(dst,
- src_mantissa,
- HeapNumber::kMantissaBitsInTopWord,
- 32 - HeapNumber::kMantissaBitsInTopWord);
- __ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ or_(dst, dst, at);
-
- // Create the mask and test the lower bits (of the higher bits).
- __ li(at, 32);
- __ subu(scratch, at, scratch);
- __ li(src_mantissa, 1);
- __ sllv(src_exponent, src_mantissa, scratch);
- __ Subu(src_exponent, src_exponent, Operand(1));
- __ And(src_exponent, dst, src_exponent);
- __ Branch(not_int32, ne, src_exponent, Operand(zero_reg));
-}
-
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is saved.
- // We currently always use s0 to pass it.
- ASSERT(heap_number_result.is(s0));
-
- // Push the current return address before the C call.
- __ push(ra);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- if (!IsMipsSoftFloatABI) {
- CpuFeatures::Scope scope(FPU);
- // We are not using MIPS FPU instructions, and parameters for the runtime
- // function call are prepaired in a0-a3 registers, but function we are
- // calling is compiled with hard-float flag and expecting hard float ABI
- // (parameters in f12/f14 registers). We need to copy parameters from
- // a0-a3 registers to f12/f14 register pairs.
- __ Move(f12, a0, a1);
- __ Move(f14, a2, a3);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number.
- if (!IsMipsSoftFloatABI) {
- CpuFeatures::Scope scope(FPU);
- // Double returned in register f0.
- __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- // Double returned in registers v0 and v1.
- __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
- __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
- }
- // Place heap_number_result in v0 and return to the pushed return address.
- __ pop(ra);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-}
-
-
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(a1) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a2) &&
- sign_.is(a3)) {
- return true;
- }
- if (the_int_.is(a2) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a3) &&
- sign_.is(a0)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
- WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-// See comment for class, this does NOT work for int32's that are in Smi range.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- Label max_negative_int;
- // the_int_ has the answer which is a signed int32 but not a Smi.
- // We test for the special value that has a different exponent.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- // Test sign, and save for later conditionals.
- __ And(sign_, the_int_, Operand(0x80000000u));
- __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
-
- // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
- uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ li(scratch_, Operand(non_smi_exponent));
- // Set the sign bit in scratch_ if the value was negative.
- __ or_(scratch_, scratch_, sign_);
- // Subtract from 0 if the value was negative.
- __ subu(at, zero_reg, the_int_);
- __ Movn(the_int_, at, sign_);
- // We should be masking the implict first digit of the mantissa away here,
- // but it just ends up combining harmlessly with the last digit of the
- // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
- // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ srl(at, the_int_, shift_distance);
- __ or_(scratch_, scratch_, at);
- __ sw(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kExponentOffset));
- __ sll(scratch_, the_int_, 32 - shift_distance);
- __ sw(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kMantissaOffset));
- __ Ret();
-
- __ bind(&max_negative_int);
- // The max negative int32 is stored as a positive number in the mantissa of
- // a double because it uses a sign bit instead of using two's complement.
- // The actual mantissa bits stored are all 0 because the implicit most
- // significant 1 bit is not stored.
- non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ sw(scratch_,
- FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(scratch_, zero_reg);
- __ sw(scratch_,
- FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
- __ Ret();
-}
-
-
-// Handle the case where the lhs and rhs are the same object.
-// Equality is almost reflexive (everything but NaN), so this is a test
-// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc) {
- Label not_identical;
- Label heap_number, return_equal;
- Register exp_mask_reg = t5;
-
- __ Branch(&not_identical, ne, a0, Operand(a1));
-
- __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == less || cc == greater) {
- __ GetObjectType(a0, t4, t4);
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ GetObjectType(a0, t4, t4);
- __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == less_equal || cc == greater_equal) {
- __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ Branch(&return_equal, ne, a0, Operand(t2));
- if (cc == le) {
- // undefined <= undefined should fail.
- __ li(v0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ li(v0, Operand(LESS));
- }
- __ Ret();
- }
- }
- }
-
- __ bind(&return_equal);
-
- if (cc == less) {
- __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
- } else if (cc == greater) {
- __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
- } else {
- __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
- }
- __ Ret();
-
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ And(t3, t2, Operand(exp_mask_reg));
- // If all bits not set (ne cond), then not a NaN, objects are equal.
- __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
- // Or with all low-bits of mantissa.
- __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
- __ Or(v0, t3, Operand(t2));
- // For equal we already have the right value in v0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load v0 with the failing
- // value if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq, v0, Operand(zero_reg));
- if (cc == le) {
- __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
- }
- }
- __ Ret();
- }
- // No fall through here.
-
- __ bind(&not_identical);
-}
-
-
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* both_loaded_as_doubles,
- Label* slow,
- bool strict) {
- ASSERT((lhs.is(a0) && rhs.is(a1)) ||
- (lhs.is(a1) && rhs.is(a0)));
-
- Label lhs_is_smi;
- __ JumpIfSmi(lhs, &lhs_is_smi);
- // Rhs is a Smi.
- // Check whether the non-smi is a heap number.
- __ GetObjectType(lhs, t4, t4);
- if (strict) {
- // If lhs was not a number and rhs was a Smi then strict equality cannot
- // succeed. Return non-equal (lhs is already not zero).
- __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
- __ mov(v0, lhs);
- } else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
- // the runtime.
- __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
- }
-
- // Rhs is a smi, lhs is a number.
- // Convert smi rhs to double.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ sra(at, rhs, kSmiTagSize);
- __ mtc1(at, f14);
- __ cvt_d_w(f14, f14);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- } else {
- // Load lhs to a double in a2, a3.
- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
-
- // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
- __ mov(t6, rhs);
- ConvertToDoubleStub stub1(a1, a0, t6, t5);
- __ push(ra);
- __ Call(stub1.GetCode(masm->isolate()));
-
- __ pop(ra);
- }
-
- // We now have both loaded as doubles.
- __ jmp(both_loaded_as_doubles);
-
- __ bind(&lhs_is_smi);
- // Lhs is a Smi. Check whether the non-smi is a heap number.
- __ GetObjectType(rhs, t4, t4);
- if (strict) {
- // If lhs was not a number and rhs was a Smi then strict equality cannot
- // succeed. Return non-equal.
- __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
- __ li(v0, Operand(1));
- } else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
- // the runtime.
- __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
- }
-
- // Lhs is a smi, rhs is a number.
- // Convert smi lhs to double.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ sra(at, lhs, kSmiTagSize);
- __ mtc1(at, f12);
- __ cvt_d_w(f12, f12);
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- // Convert lhs to a double format. t5 is scratch.
- __ mov(t6, lhs);
- ConvertToDoubleStub stub2(a3, a2, t6, t5);
- __ push(ra);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(ra);
- // Load rhs to a double in a1, a0.
- if (rhs.is(a0)) {
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- }
- }
- // Fall through to both_loaded_as_doubles.
-}
-
-
-void EmitNanCheck(MacroAssembler* masm, Condition cc) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Lhs and rhs are already loaded to f12 and f14 register pairs.
- __ Move(t0, t1, f14);
- __ Move(t2, t3, f12);
- } else {
- // Lhs and rhs are already loaded to GP registers.
- __ mov(t0, a0); // a0 has LS 32 bits of rhs.
- __ mov(t1, a1); // a1 has MS 32 bits of rhs.
- __ mov(t2, a2); // a2 has LS 32 bits of lhs.
- __ mov(t3, a3); // a3 has MS 32 bits of lhs.
- }
- Register rhs_exponent = exp_first ? t0 : t1;
- Register lhs_exponent = exp_first ? t2 : t3;
- Register rhs_mantissa = exp_first ? t1 : t0;
- Register lhs_mantissa = exp_first ? t3 : t2;
- Label one_is_nan, neither_is_nan;
- Label lhs_not_nan_exp_mask_is_loaded;
-
- Register exp_mask_reg = t4;
- __ li(exp_mask_reg, HeapNumber::kExponentMask);
- __ and_(t5, lhs_exponent, exp_mask_reg);
- __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
-
- __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
-
- __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
-
- __ li(exp_mask_reg, HeapNumber::kExponentMask);
- __ bind(&lhs_not_nan_exp_mask_is_loaded);
- __ and_(t5, rhs_exponent, exp_mask_reg);
-
- __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
-
- __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
-
- __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in v0 to make the comparison fail.
-
- if (cc == lt || cc == le) {
- __ li(v0, Operand(GREATER));
- } else {
- __ li(v0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
- // f12 and f14 have the two doubles. Neither is a NaN.
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- // We use a call_was and return manually because we need arguments slots to
- // be freed.
-
- Label return_result_not_equal, return_result_equal;
- if (cc == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Lhs and rhs are already loaded to f12 and f14 register pairs.
- __ Move(t0, t1, f14);
- __ Move(t2, t3, f12);
- } else {
- // Lhs and rhs are already loaded to GP registers.
- __ mov(t0, a0); // a0 has LS 32 bits of rhs.
- __ mov(t1, a1); // a1 has MS 32 bits of rhs.
- __ mov(t2, a2); // a2 has LS 32 bits of lhs.
- __ mov(t3, a3); // a3 has MS 32 bits of lhs.
- }
- Register rhs_exponent = exp_first ? t0 : t1;
- Register lhs_exponent = exp_first ? t2 : t3;
- Register rhs_mantissa = exp_first ? t1 : t0;
- Register lhs_mantissa = exp_first ? t3 : t2;
-
- __ xor_(v0, rhs_mantissa, lhs_mantissa);
- __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
-
- __ subu(v0, rhs_exponent, lhs_exponent);
- __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
- // 0, -0 case.
- __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
- __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
- __ or_(t4, rhs_exponent, lhs_exponent);
- __ or_(t4, t4, rhs_mantissa);
-
- __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
-
- __ bind(&return_result_equal);
-
- __ li(v0, Operand(EQUAL));
- __ Ret();
- }
-
- __ bind(&return_result_not_equal);
-
- if (!CpuFeatures::IsSupported(FPU)) {
- __ push(ra);
- __ PrepareCallCFunction(0, 2, t4);
- if (!IsMipsSoftFloatABI) {
- // We are not using MIPS FPU instructions, and parameters for the runtime
- // function call are prepaired in a0-a3 registers, but function we are
- // calling is compiled with hard-float flag and expecting hard float ABI
- // (parameters in f12/f14 registers). We need to copy parameters from
- // a0-a3 registers to f12/f14 register pairs.
- __ Move(f12, a0, a1);
- __ Move(f14, a2, a3);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
- __ pop(ra); // Because this function returns int, result is in v0.
- __ Ret();
- } else {
- CpuFeatures::Scope scope(FPU);
- Label equal, less_than;
- __ BranchF(&equal, NULL, eq, f12, f14);
- __ BranchF(&less_than, NULL, lt, f12, f14);
-
- // Not equal, not less, not NaN, must be greater.
-
- __ li(v0, Operand(GREATER));
- __ Ret();
-
- __ bind(&equal);
- __ li(v0, Operand(EQUAL));
- __ Ret();
-
- __ bind(&less_than);
- __ li(v0, Operand(LESS));
- __ Ret();
- }
-}
-
-
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- // If either operand is a JS object or an oddball value, then they are
- // not equal since their pointers are different.
- // There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- Label first_non_object;
- // Get the type of the first operand into a2 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
- __ GetObjectType(lhs, a2, a2);
- __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Return non-zero.
- Label return_not_equal;
- __ bind(&return_not_equal);
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(1));
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
-
- __ GetObjectType(rhs, a3, a3);
- __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Check for oddballs: true, false, null, undefined.
- __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
-
- // Now that we have the types we might as well check for
- // internalized-internalized.
- // Ensure that no non-strings have the internalized bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ And(t2, a2, Operand(a3));
- __ And(t0, t2, Operand(kIsInternalizedMask));
- __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
-}
-
-
-static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* both_loaded_as_doubles,
- Label* not_heap_numbers,
- Label* slow) {
- __ GetObjectType(lhs, a3, a2);
- __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
- __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
- // If first was a heap number & second wasn't, go to slow case.
- __ Branch(slow, ne, a3, Operand(a2));
-
- // Both are heap numbers. Load them up then jump to the code we have
- // for that.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
- if (rhs.is(a0)) {
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- }
- }
- __ jmp(both_loaded_as_doubles);
-}
-
-
-// Fast negative check for internalized-to-internalized equality.
-static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
- ASSERT((lhs.is(a0) && rhs.is(a1)) ||
- (lhs.is(a1) && rhs.is(a0)));
-
- // a2 is object type of lhs.
- // Ensure that no non-strings have the internalized bit set.
- Label object_test;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ And(at, a2, Operand(kIsNotStringMask));
- __ Branch(&object_test, ne, at, Operand(zero_reg));
- __ And(at, a2, Operand(kIsInternalizedMask));
- __ Branch(possible_strings, eq, at, Operand(zero_reg));
- __ GetObjectType(rhs, a3, a3);
- __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
- __ And(at, a3, Operand(kIsInternalizedMask));
- __ Branch(possible_strings, eq, at, Operand(zero_reg));
-
- // Both are internalized strings. We already checked they weren't the same
- // pointer so they are not equal.
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(1)); // Non-zero indicates not equal.
-
- __ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
- __ and_(a0, a2, a3);
- __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
- __ Ret(USE_DELAY_SLOT);
- __ xori(v0, a0, 1 << Map::kIsUndetectable);
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ sra(mask, mask, kSmiTagSize + 1);
- __ Addu(mask, mask, -1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ lw(scratch2, MemOperand(scratch1, kPointerSize));
- __ lw(scratch1, MemOperand(scratch1, 0));
- __ Xor(scratch1, scratch1, Operand(scratch2));
- __ And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- __ Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- __ lw(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- __ Branch(not_found);
- } else {
- // Note that there is no cache check for non-FPU case, even though
- // it seems there could be. May be a tiny opimization for non-FPU
- // cores.
- __ Branch(not_found);
- }
- }
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ sra(scratch, object, 1); // Shift away the tag.
- __ And(scratch, mask, Operand(scratch));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch, scratch, kPointerSizeLog2 + 1);
- __ Addu(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- Register probe = mask;
- __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ Branch(not_found, ne, object, Operand(probe));
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ lw(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
-
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ lw(a1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
- __ DropAndRet(1);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
-}
-
-
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
- }
- // We could be strict about internalized/string here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-// On entry a1 and a2 are the values to be compared.
-// On exit a0 is 0, positive or negative to indicate the result of
-// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Register lhs = a1;
- Register rhs = a0;
- Condition cc = GetCondition();
-
- Label miss;
- ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
-
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles;
-
- Label not_two_smis, smi_done;
- __ Or(a2, a1, a0);
- __ JumpIfNotSmi(a2, &not_two_smis);
- __ sra(a1, a1, 1);
- __ sra(a0, a0, 1);
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, a1, a0);
- __ bind(&not_two_smis);
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
- {
- // This is optimized for reading the code and not benchmarked for
- // speed or amount of instructions. The code is not ordered for speed
- // or anything like this
- Label miss, user_compare;
-
- // No global compare if both operands are SMIs
- __ And(a2, a1, Operand(a0));
- __ JumpIfSmi(a2, &miss);
-
-
- // We need to check if lhs and rhs are both objects, if not we are
- // jumping out of the function. We will keep the 'map' in t0 (lhs) and
- // t1 (rhs) for later usage.
- __ GetObjectType(a0, t0, a3);
- __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
-
- __ GetObjectType(a1, t1, a3);
- __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
-
- // Check if the UseUserComparison flag is set by using the map of t0 for lhs
- __ lbu(t0, FieldMemOperand(t0, Map::kBitField2Offset));
- __ And(t0, t0, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&user_compare,
- eq,
- t0,
- Operand(1 << Map::kUseUserObjectComparison));
-
-
- // Check if the UseUserComparison flag is _not_ set by using the map of t1
- // for rhs and then jump to the miss label.
- __ lbu(t1, FieldMemOperand(t1, Map::kBitField2Offset));
- __ And(t1, t1, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&miss, ne, t1, Operand(1 << Map::kUseUserObjectComparison));
-
- // Invoke the runtime function here
- __ bind(&user_compare);
- __ Push(a0, a1);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- // We exit here without doing anything
- __ bind(&miss);
- }
-
- // Handle the case where the objects are identical. Either returns the answer
- // or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
-
- // If either is a Smi (we know that not both are), then they can only
- // be strictly equal if the other is a HeapNumber.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ And(t2, lhs, Operand(rhs));
- __ JumpIfNotSmi(t2, &not_smis, t0);
- // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
- // 1) Return the answer.
- // 2) Go to slow.
- // 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to rhs_not_nan.
- // In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison and the numbers have been loaded into f12 and f14 as doubles,
- // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
- EmitSmiNonsmiComparison(masm, lhs, rhs,
- &both_loaded_as_doubles, &slow, strict());
-
- __ bind(&both_loaded_as_doubles);
- // f12, f14 are the double representations of the left hand side
- // and the right hand side if we have FPU. Otherwise a2, a3 represent
- // left hand side and a0, a1 represent right hand side.
-
- Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- Label nan;
- __ li(t0, Operand(LESS));
- __ li(t1, Operand(GREATER));
- __ li(t2, Operand(EQUAL));
-
- // Check if either rhs or lhs is NaN.
- __ BranchF(NULL, &nan, eq, f12, f14);
-
- // Check if LESS condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(OLT, D, f12, f14);
- __ Movt(v0, t0);
- // Use previous check to store conditionally to v0 oposite condition
- // (GREATER). If rhs is equal to lhs, this will be corrected in next
- // check.
- __ Movf(v0, t1);
- // Check if EQUAL condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(EQ, D, f12, f14);
- __ Movt(v0, t2);
-
- __ Ret();
-
- __ bind(&nan);
- // NaN comparisons always fail.
- // Load whatever we need in v0 to make the comparison fail.
- if (cc == lt || cc == le) {
- __ li(v0, Operand(GREATER));
- } else {
- __ li(v0, Operand(LESS));
- }
- __ Ret();
- } else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, cc);
-
- // Compares two doubles that are not NaNs. Returns the answer.
- // Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc);
- }
-
- __ bind(&not_smis);
- // At this point we know we are dealing with two different objects,
- // and neither of them is a Smi. The objects are in lhs_ and rhs_.
- if (strict()) {
- // This returns non-equal for some object types, or falls through if it
- // was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
- }
-
- Label check_for_internalized_strings;
- Label flat_string_check;
- // Check for heap-number-heap-number comparison. Can jump to slow case,
- // or load both doubles and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to
- // check_for_internalized_strings.
- // In this case a2 will contain the type of lhs_.
- EmitCheckForTwoHeapNumbers(masm,
- lhs,
- rhs,
- &both_loaded_as_doubles,
- &check_for_internalized_strings,
- &flat_string_check);
-
- __ bind(&check_for_internalized_strings);
- if (cc == eq && !strict()) {
- // Returns an answer for two internalized strings or two
- // detectable objects.
- // Otherwise jumps to string case or not both strings case.
- // Assumes that a2 is the type of lhs_ on entry.
- EmitCheckForInternalizedStringsOrObjects(
- masm, lhs, rhs, &flat_string_check, &slow);
- }
-
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
- __ bind(&flat_string_check);
-
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
-
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
- if (cc == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
- a2,
- a3,
- t0);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
- a2,
- a3,
- t0,
- t1);
- }
- // Never falls through to here.
-
- __ bind(&slow);
- // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
- // a1 (rhs) second.
- __ Push(lhs, rhs);
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- native = Builtins::COMPARE;
- int ncr; // NaN compare result.
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- ASSERT(cc == gt || cc == ge); // Remaining cases.
- ncr = LESS;
- }
- __ li(a0, Operand(Smi::FromInt(ncr)));
- __ push(a0);
- }
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub uses FPU instructions.
- CpuFeatures::Scope scope(FPU);
-
- Label patch;
- const Register map = t5.is(tos_) ? t3 : t5;
-
- // undefined -> false.
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value.
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- __ And(at, tos_, kSmiTagMask);
- // tos_ contains the correct return value already
- __ Ret(eq, at, Operand(zero_reg));
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(tos_, &patch);
- }
-
- if (types_.NeedsMap()) {
- __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, at, Operand(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- __ Movn(tos_, zero_reg, at);
- __ Ret(ne, at, Operand(zero_reg));
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // Spec object -> true.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- // tos_ contains the correct non-zero return value already.
- __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Label skip;
- __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
- __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
- __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- __ bind(&skip);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // Heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&not_heap_number, ne, map, Operand(at));
- Label zero_or_nan, number;
- __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ bind(&zero_or_nan);
- __ mov(tos_, zero_reg);
- __ bind(&number);
- __ Ret();
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- __ LoadRoot(at, value);
- __ Subu(at, at, tos_); // This is a check for equality for the movz below.
- // The value of a root is never NULL, so we can avoid loading a non-null
- // value into tos_ when we want to return 'true'.
- if (!result) {
- __ Movz(tos_, zero_reg, at);
- }
- __ Ret(eq, at, Operand(zero_reg));
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ Move(a3, tos_);
- __ li(a2, Operand(Smi::FromInt(tos_.code())));
- __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
- __ Push(a3, a2, a1);
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ MultiPush(kJSCallerSaved | ra.bit());
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
- __ MultiPushFPU(kCallerSavedFPU);
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
- const Register scratch = a1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ li(a0, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
- __ MultiPopFPU(kCallerSavedFPU);
- }
-
- __ MultiPop(kJSCallerSaved | ra.bit());
- __ Ret();
-}
-
-
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Argument is in a0 and v0 at this point, so we can overwrite a0.
- __ li(a2, Operand(Smi::FromInt(op_)));
- __ li(a1, Operand(Smi::FromInt(mode_)));
- __ li(a0, Operand(Smi::FromInt(operand_type_)));
- __ Push(v0, a2, a1, a0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow) {
- __ JumpIfNotSmi(a0, non_smi);
-
- // The result of negating zero or the smallest negative smi is not a smi.
- __ And(t0, a0, ~0x80000000);
- __ Branch(slow, eq, t0, Operand(zero_reg));
-
- // Return '0 - value'.
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, zero_reg, a0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi) {
- __ JumpIfNotSmi(a0, non_smi);
-
- // Flip bits and revert inverted smi-tag.
- __ Neg(v0, a0);
- __ And(v0, v0, ~kSmiTagMask);
- __ Ret();
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
- // a0 is a heap number. Get a new heap number in a1.
- if (mode_ == UNARY_OVERWRITE) {
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- } else {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a1, v0);
- __ pop(a0);
- }
-
- __ bind(&heapnumber_allocated);
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
- __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
- __ mov(v0, a1);
- }
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(
- MacroAssembler* masm,
- Label* slow) {
- Label impossible;
-
- EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
- // Convert the heap number in a0 to an untagged integer in a1.
- __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ Neg(a1, a1);
- __ Addu(a2, a1, Operand(0x40000000));
- __ Branch(&try_float, lt, a2, Operand(zero_reg));
-
- // Tag the result as a smi and we're done.
- __ SmiTag(v0, a1);
- __ Ret();
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- // Allocate a new heap number without zapping v0, which we need if it fails.
- __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(v0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a2, v0); // Move the new heap number into a2.
- // Get the heap number into v0, now that the new heap number is in a2.
- __ pop(v0);
- }
-
- // Convert the heap number in v0 to an untagged integer in a1.
- // This can't go slow-case because it's the same number we already
- // converted once again.
- __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
- // Negate the result.
- __ Xor(a1, a1, -1);
-
- __ bind(&heapnumber_allocated);
- __ mov(v0, a2); // Move newly allocated heap number to v0.
- }
-
- if (CpuFeatures::IsSupported(FPU)) {
- // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
- CpuFeatures::Scope scope(FPU);
- __ mtc1(a1, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&impossible);
- if (FLAG_debug_code) {
- __ stop("Incorrect assumption in bit-not stub");
- }
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(
- MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ push(a0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(FPU);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(a1, a0);
-
- __ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ push(a2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
- Register left = a1;
- Register right = a0;
-
- Register scratch1 = t0;
- Register scratch2 = t1;
-
- ASSERT(right.is(a0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::MUL: {
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(scratch1, right);
- // Do multiplication.
- // lo = lower 32 bits of scratch1 * left.
- // hi = higher 32 bits of scratch1 * left.
- __ Mult(left, scratch1);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ sra(scratch1, scratch1, 31);
- __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
- // Go slow on zero result to handle -0.
- __ mflo(v0);
- __ Ret(ne, v0, Operand(zero_reg));
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ Addu(scratch2, right, left);
- Label skip;
- // ARM uses the 'pl' condition, which is 'ge'.
- // Negating it results in 'lt'.
- __ Branch(&skip, lt, scratch2, Operand(zero_reg));
- ASSERT(Smi::FromInt(0) == 0);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
- __ bind(&skip);
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- }
- break;
- case Token::DIV: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by zero before getting the result.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividsor (right) is
- // positive, otherwise it is a -0 case.
- // Quotient is in 'lo', remainder is in 'hi'.
- // Check for no remainder first.
- __ mfhi(scratch1);
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- __ mflo(scratch1);
- __ Branch(&done, ne, scratch1, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- __ Ret();
- }
- break;
- case Token::MOD: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by 0 before calling mfhi.
- // Check for zero on the right hand side.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividend (left) is
- // positive (or 0), otherwise it is a -0 case.
- // Remainder is in 'hi'.
- __ mfhi(scratch2);
- __ Branch(&done, ne, scratch2, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch1, scratch2, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ SmiTag(v0, scratch2);
- __ Ret();
- }
- break;
- case Token::BIT_OR:
- __ Ret(USE_DELAY_SLOT);
- __ or_(v0, left, right);
- break;
- case Token::BIT_AND:
- __ Ret(USE_DELAY_SLOT);
- __ and_(v0, left, right);
- break;
- case Token::BIT_XOR:
- __ Ret(USE_DELAY_SLOT);
- __ xor_(v0, left, right);
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ srav(scratch1, left, scratch1);
- // Smi tag result.
- __ And(v0, scratch1, ~kSmiTagMask);
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srlv(v0, scratch1, scratch2);
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ And(scratch1, v0, Operand(0xc0000000));
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- // Smi tag result.
- __ SmiTag(v0);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ sllv(scratch1, scratch1, scratch2);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
- Register scratch3 = t0;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
- // depending on whether FPU is available or not.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(FPU) &&
- op != Token::MOD ?
- FloatingPointHelper::kFPURegisters :
- FloatingPointHelper::kCoreRegisters;
-
- // Allocate new heap number for result.
- Register result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load the operands.
- if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
- } else {
- // Load right operand to f14 or a2/a3.
- if (right_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, right, destination, f14, f16, a2, a3, heap_number_map,
- scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, right, f14, a2, a3, heap_number_map,
- scratch1, scratch2, fail);
- }
- // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, left, destination, f12, f16, a0, a1, heap_number_map,
- scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, left, f12, a0, a1, heap_number_map,
- scratch1, scratch2, fail);
- }
- }
-
- // Calculate the result.
- if (destination == FloatingPointHelper::kFPURegisters) {
- // Using FPU registers:
- // f12: Left value.
- // f14: Right value.
- CpuFeatures::Scope scope(FPU);
- switch (op) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, result);
- } else {
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op,
- result,
- scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(a3, left);
- __ SmiUntag(a2, right);
- } else {
- // Convert operands to 32-bit integers. Right in a2 and left in a3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
- }
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- if (CpuFeatures::IsSupported(FPU)) {
- __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
- } else {
- __ Branch(not_numbers, lt, a2, Operand(zero_reg));
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
- // Check that the *signed* result fits in a smi.
- __ Addu(a3, a2, Operand(0x40000000));
- __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
- __ SmiTag(v0, a2);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = t1;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // a2: Answer as signed int32.
- // t1: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to v0, which is the
- // result.
- __ mov(v0, t1);
-
- if (CpuFeatures::IsSupported(FPU)) {
- // Convert the int32 in a2 to the heap number in a0. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(FPU);
- __ mtc1(a2, f0);
- if (op == Token::SHR) {
- __ Cvt_d_uw(f0, f0, f22);
- } else {
- __ cvt_d_w(f0, f0);
- }
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a0 as scratch. v0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
- __ TailCallStub(&stub);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
- Label not_smis;
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
-
- // Perform combined smi check on both operands.
- __ Or(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smis, call_runtime;
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = a1;
- Register right = a0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ GetObjectType(left, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
- FPURegister double_scratch = f0;
- FPURegister single_scratch = f6;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ Or(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers a0 and a1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
- ? FloatingPointHelper::kFPURegisters
- : FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- f14,
- f16,
- a2,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- f12,
- f16,
- t0,
- t1,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
-
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ != Token::DIV) {
- // These operations produce an integer result.
- // Try to return a smi if we can.
- // Otherwise return a heap number if allowed, or jump to type
- // transition.
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- scratch1,
- f10,
- at,
- f16,
- except_flag);
-
- if (result_type_ <= BinaryOpIC::INT32) {
- // If except_flag != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, except_flag, Operand(zero_reg));
- }
-
- // Check if the result fits in a smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- // If not try to return a heap number.
- __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
- // Check for minus zero. Return heap number for minus zero.
- Label not_zero;
- __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
- __ mfc1(scratch2, f11);
- __ And(scratch2, scratch2, HeapNumber::kSignMask);
- __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
- __ bind(&not_zero);
-
- // Tag the result and return.
- __ SmiTag(v0, scratch1);
- __ Ret();
- } else {
- // DIV just falls through to allocating a heap number.
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER
- : BinaryOpIC::INT32)) {
- // We are using FPU registers so s0 is available.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ mov(v0, heap_number_result);
- __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- // We preserved a0 and a1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(t1, t0);
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
-
- // Load the left value from the value saved on the stack.
- __ Pop(a1, a0);
-
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ Branch(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- Register scratch3 = t1;
- // Convert operands to 32-bit integers. Right in a2 and left in a3. The
- // registers a0 and a1 (right and left) are preserved for the runtime
- // call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- f2,
- &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- __ And(a2, a2, Operand(0x1f));
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- __ And(a2, a2, Operand(0x1f));
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (a2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- // The non FPU code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(FPU)) {
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number,
- lt,
- a2,
- Operand(zero_reg));
- } else {
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &call_runtime,
- lt,
- a2,
- Operand(zero_reg));
- }
- break;
- case Token::SHL:
- __ And(a2, a2, Operand(0x1f));
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ Addu(scratch1, a2, Operand(0x40000000));
- // If not try to return a heap number. (We know the result is an int32.)
- __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
- // Tag the result and return.
- __ SmiTag(v0, a2);
- __ Ret();
-
- __ bind(&return_heap_number);
- heap_number_result = t1;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ mtc1(a2, double_scratch);
- __ cvt_d_w(double_scratch, double_scratch);
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ mtc1(a2, double_scratch);
- __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
- }
-
- // Store the result.
- __ mov(v0, heap_number_result);
- __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a0 as scratch. v0 is preserved and returned.
- __ mov(v0, t1);
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
- __ TailCallStub(&stub);
- }
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&check, ne, a1, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, ne, a0, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = a1;
- Register right = a0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ GetObjectType(left, a2, a2);
- __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(a0) && !result.is(a1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ Branch(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, overwritable_operand);
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(a1, a0);
-}
-
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in f4, double result goes
- // into f4.
- // Tagged case: tagged input on top of stack and in a0,
- // tagged result (heap number) goes into v0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = t5;
- const Register scratch1 = t3;
- const Register cache_entry = a0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- if (tagged) {
- // Argument is a number and is on stack and in a0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(a0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into a2, a3.
- __ sra(t0, a0, kSmiTagSize);
- __ mtc1(t0, f4);
- __ cvt_d_w(f4, f4);
- __ Move(a2, a3, f4);
- __ Branch(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(a0,
- a1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Store the
- // low and high words into a2, a3.
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
- } else {
- // Input is untagged double in f4. Output goes to f4.
- __ Move(a2, a3, f4);
- }
- __ bind(&loaded);
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ Xor(a1, a2, a3);
- __ sra(t0, a1, 16);
- __ Xor(a1, a1, t0);
- __ sra(t0, a1, 8);
- __ Xor(a1, a1, t0);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // a1 = TranscendentalCache::hash(double value).
- __ li(cache_entry, Operand(
- ExternalReference::transcendental_cache_array_address(
- masm->isolate())));
- // a0 points to cache array.
- __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
- Isolate::Current()->transcendental_cache()->caches_[0])));
- // a0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
- __ sll(t0, a1, 1);
- __ Addu(a1, a1, t0);
- __ sll(t0, a1, 2);
- __ Addu(cache_entry, cache_entry, t0);
-
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ lw(t0, MemOperand(cache_entry, 0));
- __ lw(t1, MemOperand(cache_entry, 4));
- __ lw(t2, MemOperand(cache_entry, 8));
- __ Branch(&calculate, ne, a2, Operand(t0));
- __ Branch(&calculate, ne, a3, Operand(t1));
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into v0.
- __ Drop(1);
- __ mov(v0, t2);
- } else {
- // Load result into f4.
- __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(FPU))
-
- __ bind(&calculate);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
- masm->isolate()),
- 1,
- 1);
- } else {
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatures::Scope scope(FPU);
-
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // a0: precalculated cache entry address.
- // a2 and a3: parts of the double value.
- // Store a0, a2 and a3 on stack for later before calling C function.
- __ Push(a3, a2, cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ Pop(a3, a2, cache_entry);
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
- __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
-
- __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
- __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
- __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, cache_entry);
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
- __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
- __ bind(&no_update);
-
- // We return the value in f4 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ li(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- __ push(ra);
- __ PrepareCallCFunction(2, scratch);
- if (IsMipsSoftFloatABI) {
- __ Move(a0, a1, f4);
- } else {
- __ mov_d(f12, f4);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- Isolate* isolate = masm->isolate();
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(
- ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(
- ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(
- ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(ra);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope fpu_scope(FPU);
- const Register base = a1;
- const Register exponent = a2;
- const Register heapnumbermap = t1;
- const Register heapnumber = v0;
- const DoubleRegister double_base = f2;
- const DoubleRegister double_exponent = f4;
- const DoubleRegister double_result = f0;
- const DoubleRegister double_scratch = f6;
- const FPURegister single_scratch = f8;
- const Register scratch = t5;
- const Register scratch2 = t3;
-
- Label call_runtime, done, int_exponent;
- if (exponent_type_ == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack to double registers.
- __ lw(base, MemOperand(sp, 1 * kPointerSize));
- __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
-
- __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
- __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
- __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
-
- __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent);
-
- __ bind(&base_is_smi);
- __ mtc1(scratch, single_scratch);
- __ cvt_d_w(double_base, single_scratch);
- __ bind(&unpack_exponent);
-
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
- __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
- __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
- __ ldc1(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
- // Base is already in double_base.
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
- __ ldc1(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type_ != INTEGER) {
- Label int_exponent_convert;
- // Detect integer exponents stored as double.
- __ EmitFPUTruncate(kRoundToMinusInf,
- scratch,
- double_exponent,
- at,
- double_scratch,
- scratch2,
- kCheckForInexactConversion);
- // scratch2 == 0 means there was no conversion error.
- __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
-
- if (exponent_type_ == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label not_plus_half;
-
- // Test for 0.5.
- __ Move(double_scratch, 0.5);
- __ BranchF(USE_DELAY_SLOT,
- &not_plus_half,
- NULL,
- ne,
- double_exponent,
- double_scratch);
- // double_scratch can be overwritten in the delay slot.
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ Move(double_scratch, -V8_INFINITY);
- __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
- __ neg_d(double_result, double_scratch);
-
- // Add +0 to convert -0 to +0.
- __ add_d(double_scratch, double_base, kDoubleRegZero);
- __ sqrt_d(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&not_plus_half);
- __ Move(double_scratch, -0.5);
- __ BranchF(USE_DELAY_SLOT,
- &call_runtime,
- NULL,
- ne,
- double_exponent,
- double_scratch);
- // double_scratch can be overwritten in the delay slot.
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ Move(double_scratch, -V8_INFINITY);
- __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
- __ Move(double_result, kDoubleRegZero);
-
- // Add +0 to convert -0 to +0.
- __ add_d(double_scratch, double_base, kDoubleRegZero);
- __ Move(double_result, 1);
- __ sqrt_d(double_scratch, double_scratch);
- __ div_d(double_result, double_result, double_scratch);
- __ jmp(&done);
- }
-
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch2);
- __ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- }
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
- __ jmp(&done);
-
- __ bind(&int_exponent_convert);
- }
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
-
- // Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type_ == INTEGER) {
- __ mov(scratch, exponent);
- } else {
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
- }
-
- __ mov_d(double_scratch, double_base); // Back up base.
- __ Move(double_result, 1.0);
-
- // Get absolute value of exponent.
- Label positive_exponent;
- __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
- __ Subu(scratch, zero_reg, scratch);
- __ bind(&positive_exponent);
-
- Label while_true, no_carry, loop_end;
- __ bind(&while_true);
-
- __ And(scratch2, scratch, 1);
-
- __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
- __ mul_d(double_result, double_result, double_scratch);
- __ bind(&no_carry);
-
- __ sra(scratch, scratch, 1);
-
- __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
- __ mul_d(double_scratch, double_scratch, double_scratch);
-
- __ Branch(&while_true);
-
- __ bind(&loop_end);
-
- __ Branch(&done, ge, exponent, Operand(zero_reg));
- __ Move(double_scratch, 1.0);
- __ div_d(double_result, double_scratch, double_result);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
-
- // double_exponent may not contain the exponent value if the input was a
- // smi. We set it with exponent value before bailing out.
- __ mtc1(exponent, single_scratch);
- __ cvt_d_w(double_exponent, single_scratch);
-
- // Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(
- heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
- __ sdc1(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- ASSERT(heapnumber.is(v0));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
- __ DropAndRet(2);
- } else {
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- }
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
-
- __ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
- __ Ret();
- }
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return true;
-}
-
-
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CEntryStub::GenerateAheadOfTime(isolate);
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-}
-
-
-void CodeStub::GenerateFPStubs(Isolate* isolate) {
- SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
- CEntryStub save_doubles(1, mode);
- StoreBufferOverflowStub stub(mode);
- // These stubs might already be in the snapshot, detect that and don't
- // regenerate, which would lead to code stub initialization state being messed
- // up.
- Code* save_doubles_code = NULL;
- Code* store_buffer_overflow_code = NULL;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope2(FPU);
- save_doubles_code = *save_doubles.GetCode(isolate);
- store_buffer_overflow_code = *stub.GetCode(isolate);
- } else {
- save_doubles_code = *save_doubles.GetCode(isolate);
- store_buffer_overflow_code = *stub.GetCode(isolate);
- }
- save_doubles_code->set_is_pregenerated(true);
- store_buffer_overflow_code->set_is_pregenerated(true);
- }
- ISOLATE->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
-}
-
-
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ andi(scratch, value, 0xf);
- __ Branch(oom_label, eq, scratch, Operand(0xf));
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // v0: result parameter for PerformGC, if any
- // s0: number of arguments including receiver (C callee-saved)
- // s1: pointer to the first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- Isolate* isolate = masm->isolate();
-
- if (do_gc) {
- // Move result passed in v0 into a0 to call PerformGC.
- __ mov(a0, v0);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ li(a0, Operand(scope_depth));
- __ lw(a1, MemOperand(a0));
- __ Addu(a1, a1, Operand(1));
- __ sw(a1, MemOperand(a0));
- }
-
- // Prepare arguments for C routine.
- // a0 = argc
- __ mov(a0, s0);
- // a1 = argv (set in the delay slot after find_ra below).
-
- // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
- // also need to reserve the 4 argument slots on the stack.
-
- __ AssertStackIsAligned();
-
- __ li(a2, Operand(ExternalReference::isolate_address()));
-
- // To let the GC traverse the return address of the exit frames, we need to
- // know where the return address is. The CEntryStub is unmovable, so
- // we can store the address on the stack to be able to find it again and
- // we never have to restore it, because it will not change.
- { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- // This branch-and-link sequence is needed to find the current PC on mips,
- // saved to the ra register.
- // Use masm-> here instead of the double-underscore macro since extra
- // coverage code can interfere with the proper calculation of ra.
- Label find_ra;
- masm->bal(&find_ra); // bal exposes branch delay slot.
- masm->mov(a1, s1);
- masm->bind(&find_ra);
-
- // Adjust the value in ra to point to the correct return location, 2nd
- // instruction past the real call into C code (the jalr(t9)), and push it.
- // This is the return address of the exit frame.
- const int kNumInstructionsToJump = 5;
- masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
- masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
- // Stack space reservation moved to the branch delay slot below.
- // Stack is still aligned.
-
- // Call the C routine.
- masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
- masm->jalr(t9);
- // Set up sp in the delay slot.
- masm->addiu(sp, sp, -kCArgsSlotsSize);
- // Make sure the stored 'ra' points to this position.
- ASSERT_EQ(kNumInstructionsToJump,
- masm->InstructionsGeneratedSince(&find_ra));
- }
-
- if (always_allocate) {
- // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
- __ li(a2, Operand(scope_depth));
- __ lw(a3, MemOperand(a2));
- __ Subu(a3, a3, Operand(1));
- __ sw(a3, MemOperand(a2));
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ addiu(a2, v0, 1);
- __ andi(t0, a2, kFailureTagMask);
- __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
- // Restore stack (remove arg slots) in branch delay slot.
- __ addiu(sp, sp, kCArgsSlotsSize);
-
-
- // Exit C frame and return.
- // v0:v1: result
- // sp: stack pointer
- // fp: frame pointer
- __ LeaveExitFrame(save_doubles_, s0, true);
-
- // Check if we should retry or throw exception.
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
- __ Branch(&retry, eq, t0, Operand(zero_reg));
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ lw(v0, MemOperand(t0));
- __ sw(a3, MemOperand(t0));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
- __ Branch(throw_termination_exception, eq, v0, Operand(t0));
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry);
- // Last failure (v0) will be moved to (a0) for parameter when retrying.
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // s0: number of arguments including receiver
- // s1: size of arguments excluding receiver
- // s2: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
- // The reason for this is that these arguments would need to be saved anyway
- // so it's faster to set them up directly.
- // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
-
- // Compute the argv pointer in a callee-saved register.
- __ Addu(s1, sp, s1);
-
- // Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
-
- // s0: number of arguments (C callee-saved)
- // s1: pointer to first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ li(a0, Operand(false, RelocInfo::NONE32));
- __ li(a2, Operand(external_caught));
- __ sw(a0, MemOperand(a2));
-
- // Set pending exception and v0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, v0, t0, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(v0, MemOperand(a2));
- // Fall through to the next label.
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(v0);
-
- __ bind(&throw_normal_exception);
- __ Throw(v0);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, handler_entry, exit;
- Isolate* isolate = masm->isolate();
-
- // Registers:
- // a0: entry address
- // a1: function
- // a2: receiver
- // a3: argc
- //
- // Stack:
- // 4 args slots
- // args
-
- // Save callee saved registers on the stack.
- __ MultiPush(kCalleeSaved | ra.bit());
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Save callee-saved FPU registers.
- __ MultiPushFPU(kCalleeSavedFPU);
- // Set up the reserved register for 0.0.
- __ Move(kDoubleRegZero, 0.0);
- }
-
-
- // Load argv in s0 register.
- int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(FPU)) {
- offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
- }
-
- __ InitializeRootRegister();
- __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
-
- // We build an EntryFrame.
- __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ li(t2, Operand(Smi::FromInt(marker)));
- __ li(t1, Operand(Smi::FromInt(marker)));
- __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- isolate)));
- __ lw(t0, MemOperand(t0));
- __ Push(t3, t2, t1, t0);
- // Set up frame pointer for the frame to be pushed.
- __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: receiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // caller fp |
- // function slot | entry frame
- // context slot |
- // bad fp (0xff...f) |
- // callee saved registers + ra
- // 4 args slots
- // args
-
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
- __ li(t1, Operand(ExternalReference(js_entry_sp)));
- __ lw(t2, MemOperand(t1));
- __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
- __ sw(fp, MemOperand(t1));
- __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- Label cont;
- __ b(&cont);
- __ nop(); // Branch delay slot nop.
- __ bind(&non_outermost_js);
- __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
- __ bind(&cont);
- __ push(t0);
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
- __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
- __ b(&exit); // b exposes branch delay slot.
- __ nop(); // Branch delay slot nop.
-
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
- __ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bal(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(t1, MemOperand(t0));
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: receiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
-
- if (is_construct) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
- __ li(t0, Operand(construct_entry));
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
- __ li(t0, Operand(entry));
- }
- __ lw(t9, MemOperand(t0)); // Deref address.
-
- // Call JSEntryTrampoline.
- __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- __ Call(t9);
-
- // Unlink this frame from the handler chain.
- __ PopTryHandler();
-
- __ bind(&exit); // v0 holds result
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(t1);
- __ Branch(&non_outermost_js_2,
- ne,
- t1,
- Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ li(t1, Operand(ExternalReference(js_entry_sp)));
- __ sw(zero_reg, MemOperand(t1));
- __ bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ pop(t1);
- __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- isolate)));
- __ sw(t1, MemOperand(t0));
-
- // Reset the stack to the callee saved registers.
- __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Restore callee-saved fpu registers.
- __ MultiPopFPU(kCalleeSavedFPU);
- }
-
- // Restore callee saved registers from the stack.
- __ MultiPop(kCalleeSaved | ra.bit());
- // Return.
- __ Jump(ra);
-}
-
-
-// Uses registers a0 to t0.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: a0 or at sp + 1 * kPointerSize.
-// * function: a1 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register t0.
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
- // ReturnTrueFalse is only implemented for inlined call sites.
- ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = a0; // Object (lhs).
- Register map = a3; // Map of the object.
- const Register function = a1; // Function (rhs).
- const Register prototype = t0; // Prototype of the function.
- const Register inline_site = t5;
- const Register scratch = a2;
-
- const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
-
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ lw(object, MemOperand(sp, 1 * kPointerSize));
- __ lw(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- Label miss;
- __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
- __ Branch(&miss, ne, function, Operand(at));
- __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
- __ Branch(&miss, ne, map, Operand(at));
- __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- ASSERT(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- // The offset was stored in t0 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- __ LoadFromSafepointRegisterSlot(scratch, t0);
- __ Subu(inline_site, ra, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
- __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- }
-
- // Register mapping: a3 is object map and t0 is function prototype.
- // Get prototype of object into a2.
- __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
-
- // We don't need map any more. Use it as a scratch register.
- Register scratch2 = map;
- map = no_reg;
-
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ Branch(&is_instance, eq, scratch, Operand(prototype));
- __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
- __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ Branch(&loop);
-
- __ bind(&is_instance);
- ASSERT(Smi::FromInt(0) == 0);
- if (!HasCallSiteInlineCheck()) {
- __ mov(v0, zero_reg);
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Patch the call site to return true.
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- ASSERT_EQ(Smi::FromInt(0), 0);
- __ mov(v0, zero_reg);
- }
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ li(v0, Operand(Smi::FromInt(1)));
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Patch the call site to return false.
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- }
-
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ GetObjectType(function, scratch2, scratch);
- __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Null is not instance of anything.
- __ Branch(&object_not_null,
- ne,
- scratch,
- Operand(masm->isolate()->factory()->null_value()));
- __ li(v0, Operand(Smi::FromInt(1)));
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ li(v0, Operand(Smi::FromInt(1)));
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- __ li(v0, Operand(Smi::FromInt(1)));
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(a0, a1);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- __ mov(a0, v0);
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
- }
-}
-
-
-void ArrayLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = a0;
- }
-
- StubCompiler::GenerateLoadArrayLength(masm, receiver, a3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->prototype_string()));
- receiver = a1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = a0;
- }
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = a0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
- support_wrapper_);
-
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a1,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a2;
- value = a0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : key
- // -----------------------------------
- receiver = a1;
- value = a0;
- }
- Register scratch = a3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ GetObjectType(scratch, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&miss, eq, scratch, Operand(at));
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::GenerateStoreMiss(masm, kind());
-}
-
-
-Register InstanceofStub::left() { return a0; }
-
-
-Register InstanceofStub::right() { return a1; }
-
-
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, v0, reg_, inobject_, index_);
- __ Ret();
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
-
- // Check that the key is a smiGenerateReadElement.
- Label slow;
- __ JumpIfNotSmi(a1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor,
- eq,
- a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Check index (a1) against formal parameters count limit passed in
- // through register a0. Use unsigned comparison to get negative
- // check for free.
- __ Branch(&slow, hs, a1, Operand(a0));
-
- // Read the argument from the stack and return it.
- __ subu(a3, a0, a1);
- __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, fp, Operand(t3));
- __ lw(v0, MemOperand(a3, kDisplacement));
- __ Ret();
-
- // Arguments adaptor case: Check index (a1) against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
-
- // Read the argument from the adaptor frame and return it.
- __ subu(a3, a0, a1);
- __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, a2, Operand(t3));
- __ lw(v0, MemOperand(a3, kDisplacement));
- __ Ret();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(a1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime,
- ne,
- a2,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sw(a2, MemOperand(sp, 0 * kPointerSize));
- __ sll(t3, a2, 1);
- __ Addu(a3, a3, Operand(t3));
- __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
- __ sw(a3, MemOperand(sp, 1 * kPointerSize));
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
- // Stack layout:
- // sp[0] : number of parameters (tagged)
- // sp[4] : address of receiver argument
- // sp[8] : function
- // Registers used over whole function:
- // t2 : allocated object (tagged)
- // t5 : mapped parameter count (tagged)
-
- __ lw(a1, MemOperand(sp, 0 * kPointerSize));
- // a1 = parameter count (tagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame,
- eq,
- a2,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // No adaptor, parameter count = argument count.
- __ mov(a2, a1);
- __ b(&try_allocate);
- __ nop(); // Branch delay slot nop.
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(t6, a2, 1);
- __ Addu(a3, a3, Operand(t6));
- __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sw(a3, MemOperand(sp, 1 * kPointerSize));
-
- // a1 = parameter count (tagged)
- // a2 = argument count (tagged)
- // Compute the mapped parameter count = min(a1, a2) in a1.
- Label skip_min;
- __ Branch(&skip_min, lt, a1, Operand(a2));
- __ mov(a1, a2);
- __ bind(&skip_min);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- Label param_map_size;
- ASSERT_EQ(0, Smi::FromInt(0));
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
- __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
- __ sll(t5, a1, 1);
- __ addiu(t5, t5, kParameterMapHeaderSize);
- __ bind(&param_map_size);
-
- // 2. Backing store.
- __ sll(t6, a2, 1);
- __ Addu(t5, t5, Operand(t6));
- __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
-
- // v0 = address of new object(s) (tagged)
- // a2 = argument count (tagged)
- // Get the arguments boilerplate from the current native context into t0.
- const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
-
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
- Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
- __ lw(t0, MemOperand(t0, kNormalOffset));
- __ bind(&skip2_ne);
-
- __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
- __ lw(t0, MemOperand(t0, kAliasedOffset));
- __ bind(&skip2_eq);
-
- // v0 = address of new object (tagged)
- // a1 = mapped parameter count (tagged)
- // a2 = argument count (tagged)
- // t0 = address of boilerplate object (tagged)
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ lw(a3, FieldMemOperand(t0, i));
- __ sw(a3, FieldMemOperand(v0, i));
- }
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ lw(a3, MemOperand(sp, 2 * kPointerSize));
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ sw(a2, FieldMemOperand(v0, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, t0 will point there, otherwise
- // it will point to the backing store.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // v0 = address of new object (tagged)
- // a1 = mapped parameter count (tagged)
- // a2 = argument count (tagged)
- // t0 = address of parameter map or backing store (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- Label skip3;
- __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
- // Move backing store address to a3, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(a3, t0);
- __ bind(&skip3);
-
- __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
-
- __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
- __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
- __ Addu(t2, a1, Operand(Smi::FromInt(2)));
- __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ sll(t6, a1, 1);
- __ Addu(t2, t0, Operand(t6));
- __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
- __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(t2, a1);
- __ lw(t5, MemOperand(sp, 0 * kPointerSize));
- __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Subu(t5, t5, Operand(a1));
- __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
- __ sll(t6, t2, 1);
- __ Addu(a3, t0, Operand(t6));
- __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
-
- // t2 = loop variable (tagged)
- // a1 = mapping index (tagged)
- // a3 = address of backing store (tagged)
- // t0 = address of parameter map (tagged)
- // t1 = temporary scratch (a.o., for address calculation)
- // t3 = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ Subu(t2, t2, Operand(Smi::FromInt(1)));
- __ sll(t1, t2, 1);
- __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Addu(t6, t0, t1);
- __ sw(t5, MemOperand(t6));
- __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Addu(t6, a3, t1);
- __ sw(t3, MemOperand(t6));
- __ Addu(t5, t5, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
-
- __ bind(&skip_parameter_map);
- // a2 = argument count (tagged)
- // a3 = address of backing store (tagged)
- // t1 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
- __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ mov(t5, a1);
- __ lw(t0, MemOperand(sp, 1 * kPointerSize));
- __ sll(t6, t5, 1);
- __ Subu(t0, t0, Operand(t6));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ Subu(t0, t0, Operand(kPointerSize));
- __ lw(t2, MemOperand(t0, 0));
- __ sll(t6, t5, 1);
- __ Addu(t1, a3, Operand(t6));
- __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
- __ Addu(t5, t5, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, t5, Operand(a2));
-
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- // Do the runtime call to allocate the arguments object.
- // a2 = argument count (tagged)
- __ bind(&runtime);
- __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame,
- eq,
- a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Get the length from the frame.
- __ lw(a1, MemOperand(sp, 0));
- __ Branch(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sw(a1, MemOperand(sp, 0));
- __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, a2, Operand(at));
-
- __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sw(a3, MemOperand(sp, 1 * kPointerSize));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
- __ srl(a1, a1, kSmiTagSize);
-
- __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(a1,
- v0,
- a2,
- a3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT |
- SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
- __ lw(t0, MemOperand(t0, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
-
- // Copy the JS object part.
- __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ lw(a1, MemOperand(sp, 0 * kPointerSize));
- __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
-
- Label done;
- __ Branch(&done, eq, a1, Operand(zero_reg));
-
- // Get the parameters pointer from the stack.
- __ lw(a2, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
- __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
- __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
- // Untag the length for the loop.
- __ srl(a1, a1, kSmiTagSize);
-
- // Copy the fixed array slots.
- Label loop;
- // Set up t0 to point to the first array slot.
- __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement a2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Addu(a2, a2, Operand(-kPointerSize));
- __ lw(a3, MemOperand(a2));
- // Post-increment t0 with kPointerSize on each iteration.
- __ sw(a3, MemOperand(t0));
- __ Addu(t0, t0, Operand(kPointerSize));
- __ Subu(a1, a1, Operand(1));
- __ Branch(&loop, ne, a1, Operand(zero_reg));
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ DropAndRet(3);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // sp[0]: last_match_info (expected JSArray)
- // sp[4]: previous index
- // sp[8]: subject string
- // sp[12]: JSRegExp object
-
- const int kLastMatchInfoOffset = 0 * kPointerSize;
- const int kPreviousIndexOffset = 1 * kPointerSize;
- const int kSubjectOffset = 2 * kPointerSize;
- const int kJSRegExpOffset = 3 * kPointerSize;
-
- Isolate* isolate = masm->isolate();
-
- Label runtime;
- // Allocation of registers for this function. These are in callee save
- // registers and will be preserved by the call to the native RegExp code, as
- // this code is called using the normal C calling convention. When calling
- // directly from generated code the native RegExp code will not do a GC and
- // therefore the content of these registers are safe to use after the call.
- // MIPS - using s0..s2, since we are not using CEntry Stub.
- Register subject = s0;
- Register regexp_data = s1;
- Register last_match_info_elements = s2;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(
- isolate);
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
- __ li(a0, Operand(address_of_regexp_stack_memory_size));
- __ lw(a0, MemOperand(a0, 0));
- __ Branch(&runtime, eq, a0, Operand(zero_reg));
-
- // Check that the first argument is a JSRegExp object.
- __ lw(a0, MemOperand(sp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a0, &runtime);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ And(t0, regexp_data, Operand(kSmiTagMask));
- __ Check(nz,
- "Unexpected type for RegExp data, FixedArray expected",
- t0,
- Operand(zero_reg));
- __ GetObjectType(regexp_data, a0, a0);
- __ Check(eq,
- "Unexpected type for RegExp data, FixedArray expected",
- a0,
- Operand(FIXED_ARRAY_TYPE));
- }
-
- // regexp_data: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
- __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
-
- // regexp_data: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ lw(a2,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // Or number_of_captures * 2 <= offsets vector size - 2
- // Multiplying by 2 comes for free since a2 is smi-tagged.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ Branch(
- &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
-
- // Reset offset for possibly sliced string.
- __ mov(t0, zero_reg);
- __ lw(subject, MemOperand(sp, kSubjectOffset));
- __ JumpIfSmi(subject, &runtime);
- __ mov(a3, subject); // Make a copy of the original subject string.
- __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- // subject: subject string
- // a3: subject string
- // a0: subject string instance type
- // regexp_data: RegExp data (FixedArray)
- // Handle subject string according to its encoding and representation:
- // (1) Sequential string? If yes, go to (5).
- // (2) Anything but sequential or cons? If yes, go to (6).
- // (3) Cons string. If the string is flat, replace subject with first string.
- // Otherwise bailout.
- // (4) Is subject external? If yes, go to (7).
- // (5) Sequential string. Load regexp code according to encoding.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (6) Not a long external string? If yes, go to (8).
- // (7) External string. Make it, offset-wise, look like a sequential string.
- // Go to (5).
- // (8) Short external string or not a string? If yes, bail out to runtime.
- // (9) Sliced string. Replace subject with parent. Go to (4).
-
- Label seq_string /* 5 */, external_string /* 7 */,
- check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
- not_long_external /* 8 */;
-
- // (1) Sequential string? If yes, go to (5).
- __ And(a1,
- a0,
- Operand(kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
-
- // (2) Anything but sequential or cons? If yes, go to (6).
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- // Go to (6).
- __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
-
- // (3) Cons string. Check that it's flat.
- // Replace subject with first string and reload instance type.
- __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(a1, Heap::kempty_stringRootIndex);
- __ Branch(&runtime, ne, a0, Operand(a1));
- __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
-
- // (4) Is subject external? If yes, go to (7).
- __ bind(&check_underlying);
- __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(at, a0, Operand(kStringRepresentationMask));
- // The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
- __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
-
- // (5) Sequential string. Load regexp code according to encoding.
- __ bind(&seq_string);
- // subject: sequential subject string (or look-alike, external string)
- // a3: original subject string
- // Load previous index and check range before a3 is overwritten. We have to
- // use a3 instead of subject here because subject might have been only made
- // to look like a sequential string when it actually is an external string.
- __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(a1, &runtime);
- __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
- __ Branch(&runtime, ls, a3, Operand(a1));
- __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
-
- STATIC_ASSERT(kStringEncodingMask == 4);
- STATIC_ASSERT(kOneByteStringTag == 4);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
- __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
- __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
- __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
- __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
-
- // (E) Carry on. String handling is done.
- // t9: irregexp code
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // a smi (code flushing support).
- __ JumpIfSmi(t9, &runtime);
-
- // a1: previous index
- // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // t9: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(),
- 1, a0, a2);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = 4;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers, meaning we
- // treat the return address as argument 5. Thus every argument after that
- // needs to be shifted back by 1. Since DirectCEntryStub will handle
- // allocating space for the c argument slots, we don't need to calculate
- // that into the argument positions on the stack. This is how the stack will
- // look (sp meaning the value of sp at this moment):
- // [sp + 5] - Argument 9
- // [sp + 4] - Argument 8
- // [sp + 3] - Argument 7
- // [sp + 2] - Argument 6
- // [sp + 1] - Argument 5
- // [sp + 0] - saved ra
-
- // Argument 9: Pass current isolate address.
- // CFunctionArgumentOperand handles MIPS stack argument slots.
- __ li(a0, Operand(ExternalReference::isolate_address()));
- __ sw(a0, MemOperand(sp, 5 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ li(a0, Operand(1));
- __ sw(a0, MemOperand(sp, 4 * kPointerSize));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- __ li(a0, Operand(address_of_regexp_stack_memory_address));
- __ lw(a0, MemOperand(a0, 0));
- __ li(a2, Operand(address_of_regexp_stack_memory_size));
- __ lw(a2, MemOperand(a2, 0));
- __ addu(a0, a0, a2);
- __ sw(a0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(a0, zero_reg);
- __ sw(a0, MemOperand(sp, 2 * kPointerSize));
-
- // Argument 5: static offsets vector buffer.
- __ li(a0, Operand(
- ExternalReference::address_of_static_offsets_vector(isolate)));
- __ sw(a0, MemOperand(sp, 1 * kPointerSize));
-
- // For arguments 4 and 3 get string length, calculate start of string data
- // and calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
- // Load the length from the original subject string from the previous stack
- // frame. Therefore we have to use fp, which points exactly to two pointer
- // sizes below the previous sp. (Because creating a new stack frame pushes
- // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
- __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
- // If slice offset is not 0, load the length from the original sliced string.
- // Argument 4, a3: End of string data
- // Argument 3, a2: Start of string data
- // Prepare start and end index of the input.
- __ sllv(t1, t0, a3);
- __ addu(t0, t2, t1);
- __ sllv(t1, a1, a3);
- __ addu(a2, t0, t1);
-
- __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
- __ sra(t2, t2, kSmiTagSize);
- __ sllv(t1, t2, a3);
- __ addu(a3, t0, t1);
- // Argument 2 (a1): Previous index.
- // Already there
-
- // Argument 1 (a0): Subject string.
- __ mov(a0, subject);
-
- // Locate the code entry and call it.
- __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub;
- stub.GenerateCall(masm, t9);
-
- __ LeaveExitFrame(false, no_reg);
-
- // v0: result
- // subject: subject string (callee saved)
- // regexp_data: RegExp data (callee saved)
- // last_match_info_elements: Last match info elements (callee saved)
- // Check the result.
- Label success;
- __ Branch(&success, eq, v0, Operand(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
- Label failure;
- __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ li(a1, Operand(isolate->factory()->the_hole_value()));
- __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ lw(v0, MemOperand(a2, 0));
- __ Branch(&runtime, eq, v0, Operand(a1));
-
- __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
- Label termination_exception;
- __ Branch(&termination_exception, eq, v0, Operand(a0));
-
- __ Throw(v0);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(v0);
-
- __ bind(&failure);
- // For failure and exception return null.
- __ li(v0, Operand(isolate->factory()->null_value()));
- __ DropAndRet(4);
-
- // Process the result from the native regexp code.
- __ bind(&success);
- __ lw(a1,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- // Multiplying by 2 comes for free since r1 is smi-tagged.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ Addu(a1, a1, Operand(2)); // a1 was a smi.
-
- __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(a0, &runtime);
- __ GetObjectType(a0, a2, a2);
- __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
- // Check that the JSArray is in fast case.
- __ lw(last_match_info_elements,
- FieldMemOperand(a0, JSArray::kElementsOffset));
- __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(&runtime, ne, a0, Operand(at));
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ lw(a0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
- __ sra(at, a0, kSmiTagSize);
- __ Branch(&runtime, gt, a2, Operand(at));
-
- // a1: number of capture registers
- // subject: subject string
- // Store the capture count.
- __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
- __ sw(a2, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
- // Store last subject and last input.
- __ sw(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
- __ mov(a2, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- subject,
- t3,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ mov(subject, a2);
- __ sw(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- subject,
- t3,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
- __ li(a2, Operand(address_of_static_offsets_vector));
-
- // a1: number of capture registers
- // a2: offsets vector
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wrapping after zero.
- __ Addu(a0,
- last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
- __ bind(&next_capture);
- __ Subu(a1, a1, Operand(1));
- __ Branch(&done, lt, a1, Operand(zero_reg));
- // Read the value from the static offsets vector buffer.
- __ lw(a3, MemOperand(a2, 0));
- __ addiu(a2, a2, kPointerSize);
- // Store the smi value in the last match info.
- __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
- __ sw(a3, MemOperand(a0, 0));
- __ Branch(&next_capture, USE_DELAY_SLOT);
- __ addiu(a0, a0, kPointerSize); // In branch delay slot.
-
- __ bind(&done);
-
- // Return last match info.
- __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
- __ DropAndRet(4);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-
- // Deferred code for string handling.
- // (6) Not a long external string? If yes, go to (8).
- __ bind(&not_seq_nor_cons);
- // Go to (8).
- __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
-
- // (7) External string. Make it, offset-wise, look like a sequential string.
- __ bind(&external_string);
- __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ And(at, a0, Operand(kIsIndirectStringMask));
- __ Assert(eq,
- "external string expected, but not found",
- at,
- Operand(zero_reg));
- }
- __ lw(subject,
- FieldMemOperand(subject, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Subu(subject,
- subject,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ jmp(&seq_string); // Go to (5).
-
- // (8) Short external string or not a string? If yes, bail out to runtime.
- __ bind(&not_long_external);
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ Branch(&runtime, ne, at, Operand(zero_reg));
-
- // (9) Sliced string. Replace subject with parent. Go to (4).
- // Load offset into t0 and replace subject string with parent.
- __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ sra(t0, t0, kSmiTagSize);
- __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- __ jmp(&check_underlying); // Go to (4).
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ lw(a1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(a1, &slowcase);
- __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
- __ Addu(a2, t1, Operand(objects_size));
- __ AllocateInNewSpace(
- a2, // In: Size, in words.
- v0, // Out: Start of allocation (tagged).
- a3, // Scratch register.
- t0, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // v0: Start of allocated area, object-tagged.
- // a1: Number of elements in array, as smi.
- // t1: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
- __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
- __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ lw(a1, MemOperand(sp, kPointerSize * 0));
- __ lw(a2, MemOperand(sp, kPointerSize * 1));
- __ lw(t2, MemOperand(sp, kPointerSize * 2));
- __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
- __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
- __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // v0: JSArray, tagged.
- // a3: FixedArray, tagged.
- // t1: Number of elements in array, untagged.
-
- // Set map.
- __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ sll(t2, t1, kSmiTagSize);
- __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with undefined.
- // v0: JSArray, tagged.
- // a2: undefined.
- // a3: Start of elements in FixedArray.
- // t1: Number of elements to fill.
- Label loop;
- __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
- __ addu(t1, t1, a3); // Point past last element to store.
- __ bind(&loop);
- __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
- __ sw(a2, MemOperand(a3));
- __ Branch(&loop, USE_DELAY_SLOT);
- __ addiu(a3, a3, kPointerSize); // In branch delay slot.
-
- __ bind(&done);
- __ DropAndRet(3);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // a1 : the function to call
- // a2 : cache cell for call target
- ASSERT(!FLAG_optimize_constructed_arrays);
- Label done;
-
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
-
- // Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ Branch(&done, eq, a3, Operand(a1));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, eq, a3, Operand(at));
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
- __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
- // An uninitialized cache is patched with the function.
- // Store a1 in the delay slot. This may or may not get overwritten depending
- // on the result of the comparison.
- __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
-
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-
- __ bind(&done);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // a1 : the function to call
- // a2 : cache cell for call target
- ASSERT(FLAG_optimize_constructed_arrays);
- Label initialize, done, miss, megamorphic, not_array_function;
-
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
-
- // Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ Branch(&done, eq, a3, Operand(a1));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, eq, a3, Operand(at));
-
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
- __ Branch(&miss, ne, a3, Operand(terminal_kind_sentinel));
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&megamorphic, ne, a1, Operand(a3));
- __ jmp(&done);
-
- __ bind(&miss);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialize, eq, a3, Operand(at));
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&not_array_function, ne, a1, Operand(a3));
-
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- GetInitialFastElementsKind());
- __ li(a3, Operand(initial_kind_sentinel));
- __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- __ Branch(&done);
-
- __ bind(&not_array_function);
- __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // a1 : the function to call
- // a2 : cache cell for call target
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call, ne, t0, Operand(at));
- // Patch the receiver on the stack with the global receiver object.
- __ lw(a3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
- }
-
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
- // Get the map of the function object.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Fast-case: Invoke the function now.
- // a1: pushed function
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call_as_function, eq, t0, Operand(at));
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- }
- // Check for function proxy.
- __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ push(a1); // Put proxy as additional argument.
- __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(t1, CALL_AS_METHOD);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
- __ li(a0, Operand(argc_)); // Set up the number of arguments.
- __ mov(a2, zero_reg);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // a0 : number of arguments
- // a1 : the function to call
- // a2 : cache cell for call target
- Label slow, non_function_call;
-
- // Check that the function is not a smi.
- __ JumpIfSmi(a1, &non_function_call);
- // Check that the function is a JSFunction.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? a3 : a2;
- __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(jmp_reg, FieldMemOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
-
- // a0: number of arguments
- // a1: called object
- // a3: object type
- Label do_call;
- __ bind(&slow);
- __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-// StringCharCodeAtGenerator.
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
- ASSERT(!t0.is(index_));
- ASSERT(!t0.is(result_));
- ASSERT(!t0.is(object_));
-
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ And(t0, result_, Operand(kIsNotStringMask));
- __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
-
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
- __ Branch(index_out_of_range_, ls, t0, Operand(index_));
-
- __ sra(index_, index_, kSmiTagSize);
-
- StringCharLoadGenerator::Generate(masm,
- object_,
- index_,
- result_,
- &call_runtime_);
-
- __ sll(result_, result_, kSmiTagSize);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- result_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- // Consumed by runtime conversion function:
- __ Push(object_, index_);
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
-
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
-
- __ Move(index_, v0);
- __ pop(object_);
- // Reload the instance type.
- __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ Branch(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ sll(index_, index_, kSmiTagSize);
- __ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
-
- __ Move(result_, v0);
-
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
-
- ASSERT(!t0.is(result_));
- ASSERT(!t0.is(code_));
-
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
- __ And(t0,
- code_,
- Operand(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
- __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(result_, result_, t0);
- __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case_, eq, result_, Operand(t0));
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- __ Move(result_, v0);
-
- call_helper.AfterCall(masm);
- __ Branch(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for
- // very short strings.
- if (!ascii) {
- __ addu(count, count, count);
- }
- __ Branch(&done, eq, count, Operand(zero_reg));
- __ addu(count, dest, count); // Count now points to the last dest byte.
-
- __ bind(&loop);
- __ lbu(scratch, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ Branch(&loop, lt, dest, Operand(count));
-
- __ bind(&done);
-}
-
-
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
-
-
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
- __ And(scratch4, dest, Operand(kPointerAlignmentMask));
- __ Check(eq,
- "Destination of copy not aligned.",
- scratch4,
- Operand(zero_reg));
- }
-
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
-
- if (!ascii) {
- __ addu(count, count, count);
- }
- __ Branch(&done, eq, count, Operand(zero_reg));
-
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ Subu(scratch1, count, Operand(8));
- __ Addu(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ And(scratch4, dest, Operand(kReadAlignmentMask));
- Label dest_aligned;
- __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
- Label aligned_loop;
- __ bind(&aligned_loop);
- __ lbu(scratch1, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch1, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ addiu(scratch4, scratch4, 1);
- __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
- __ bind(&dest_aligned);
- }
-
- Label simple_loop;
-
- __ And(scratch4, src, Operand(kReadAlignmentMask));
- __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
-
- // Loop for src/dst that are not aligned the same way.
- // This loop uses lwl and lwr instructions. These instructions
- // depend on the endianness, and the implementation assumes little-endian.
- {
- Label loop;
- __ bind(&loop);
- __ lwr(scratch1, MemOperand(src));
- __ Addu(src, src, Operand(kReadAlignment));
- __ lwl(scratch1, MemOperand(src, -1));
- __ sw(scratch1, MemOperand(dest));
- __ Addu(dest, dest, Operand(kReadAlignment));
- __ Subu(scratch2, limit, dest);
- __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
- }
-
- __ Branch(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dest, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ lw(scratch1, MemOperand(src));
- __ Addu(src, src, Operand(kReadAlignment));
- __ sw(scratch1, MemOperand(dest));
- __ Addu(dest, dest, Operand(kReadAlignment));
- __ Subu(scratch2, limit, dest);
- __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
- }
-
- // Copy bytes from src to dest until dest hits limit.
- __ bind(&byte_loop);
- // Test if dest has already reached the limit.
- __ Branch(&done, ge, dest, Operand(limit));
- __ lbu(scratch1, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch1, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ Branch(&byte_loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ Subu(scratch, c1, Operand(static_cast<int>('0')));
- __ Branch(&not_array_index,
- Ugreater,
- scratch,
- Operand(static_cast<int>('9' - '0')));
- __ Subu(scratch, c2, Operand(static_cast<int>('0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register.
- Label tmp;
- __ sll(scratch1, c2, kBitsPerByte);
- __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
- __ Or(c1, c1, scratch1);
- __ bind(&tmp);
- __ Branch(
- not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ sll(scratch, c2, kBitsPerByte);
- __ Or(chars, chars, scratch);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load string table.
- // Load address of first element of the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
- __ sra(mask, mask, 1);
- __ Addu(mask, mask, -1);
-
- // Calculate untagged address of the first element of the string table.
- Register first_string_table_element = string_table;
- __ Addu(first_string_table_element, string_table,
- Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers.
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_string_table_element: address of the first element of
- // the string table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the string table.
- const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- if (i > 0) {
- __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ And(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ sll(scratch, candidate, kPointerSizeLog2);
- __ Addu(scratch, scratch, first_string_table_element);
- __ lw(candidate, MemOperand(scratch));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ GetObjectType(candidate, scratch, scratch);
- __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
-
- __ Branch(not_found, eq, undefined, Operand(candidate));
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "oddball in string table is not undefined or the hole",
- scratch, Operand(candidate));
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
- __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ mov(v0, result);
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = seed + character + ((seed + character) << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ SmiUntag(hash);
- __ addu(hash, hash, character);
- __ sll(at, hash, 10);
- __ addu(hash, hash, at);
- // hash ^= hash >> 6;
- __ srl(at, hash, 6);
- __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ addu(hash, hash, character);
- // hash += hash << 10;
- __ sll(at, hash, 10);
- __ addu(hash, hash, at);
- // hash ^= hash >> 6;
- __ srl(at, hash, 6);
- __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ sll(at, hash, 3);
- __ addu(hash, hash, at);
- // hash ^= hash >> 11;
- __ srl(at, hash, 11);
- __ xor_(hash, hash, at);
- // hash += hash << 15;
- __ sll(at, hash, 15);
- __ addu(hash, hash, at);
-
- __ li(at, Operand(String::kHashBitMask));
- __ and_(hash, hash, at);
-
- // if (hash == 0) hash = 27;
- __ ori(at, zero_reg, StringHasher::kZeroHash);
- __ Movz(hash, at, hash);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
- // Stack frame on entry.
- // ra: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- const int kToOffset = 0 * kPointerSize;
- const int kFromOffset = 1 * kPointerSize;
- const int kStringOffset = 2 * kPointerSize;
-
- __ lw(a2, MemOperand(sp, kToOffset));
- __ lw(a3, MemOperand(sp, kFromOffset));
- STATIC_ASSERT(kFromOffset == kToOffset + 4);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-
- // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
- // safe in this case.
- __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
- __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
- // Both a2 and a3 are untagged integers.
-
- __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
-
- __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
- __ Subu(a2, a2, a3);
-
- // Make sure first argument is a string.
- __ lw(v0, MemOperand(sp, kStringOffset));
- __ JumpIfSmi(v0, &runtime);
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ And(t0, a1, Operand(kIsNotStringMask));
-
- __ Branch(&runtime, ne, t0, Operand(zero_reg));
-
- Label single_char;
- __ Branch(&single_char, eq, a2, Operand(1));
-
- // Short-cut for the case of trivial substring.
- Label return_v0;
- // v0: original string
- // a2: result string length
- __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
- __ sra(t0, t0, 1);
- // Return original string.
- __ Branch(&return_v0, eq, a2, Operand(t0));
- // Longer than original string's length or negative: unsafe arguments.
- __ Branch(&runtime, hi, a2, Operand(t0));
- // Shorter than original string's length: an actual substring.
-
- // Deal with different string types: update the index if necessary
- // and put the underlying string into t1.
- // v0: original string
- // a1: instance type
- // a2: length
- // a3: from index (untagged)
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ And(t0, a1, Operand(kIsIndirectStringMask));
- __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
- // t0 is used as a scratch register and can be overwritten in either case.
- __ And(t0, a1, Operand(kSlicedNotConsMask));
- __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
- // Cons string. Check whether it is flat, then fetch first part.
- __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
- __ LoadRoot(t0, Heap::kempty_stringRootIndex);
- __ Branch(&runtime, ne, t1, Operand(t0));
- __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
- // Update instance type.
- __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
- __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
- __ sra(t0, t0, 1); // Add offset to index.
- __ Addu(a3, a3, t0);
- // Update instance type.
- __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(t1, v0);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // t1: underlying subject string
- // a1: instance type of underlying subject string
- // a2: length
- // a3: adjusted start index (untagged)
- // Short slice. Copy instead of slicing.
- __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ And(t0, a1, Operand(kStringEncodingMask));
- __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
- __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
- __ jmp(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
- __ bind(&set_slice_header);
- __ sll(a3, a3, 1);
- __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
- __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
- __ jmp(&return_v0);
-
- __ bind(&copy_routine);
- }
-
- // t1: underlying subject string
- // a1: instance type of underlying subject string
- // a2: length
- // a3: adjusted start index (untagged)
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t0, a1, Operand(kExternalStringTag));
- __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ And(t0, a1, Operand(kShortExternalStringTag));
- __ Branch(&runtime, ne, t0, Operand(zero_reg));
- __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
- // t1 already points to the first character of underlying string.
- __ jmp(&allocate_result);
-
- __ bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&allocate_result);
- // Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ And(t0, a1, Operand(kStringEncodingMask));
- __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
-
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
-
- // Locate first character of substring to copy.
- __ Addu(t1, t1, a3);
-
- // Locate first character of result.
- __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // v0: result string
- // a1: first character of result string
- // a2: result string length
- // t1: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
- __ jmp(&return_v0);
-
- // Allocate and copy the resulting two-byte string.
- __ bind(&two_byte_sequential);
- __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
-
- // Locate first character of substring to copy.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ sll(t0, a3, 1);
- __ Addu(t1, t1, t0);
- // Locate first character of result.
- __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // v0: result string.
- // a1: first character of result.
- // a2: result length.
- // t1: first character of substring to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
-
- __ bind(&return_v0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
- __ DropAndRet(3);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-
- __ bind(&single_char);
- // v0: original string
- // a1: instance type
- // a2: length
- // a3: from index (untagged)
- __ SmiTag(a3, a3);
- StringCharAtGenerator generator(
- v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ DropAndRet(3);
- generator.SkipSlow(masm, &runtime);
-}
-
-
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ lw(length, FieldMemOperand(left, String::kLengthOffset));
- __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ Branch(&check_zero_length, eq, length, Operand(scratch2));
- __ bind(&strings_not_equal);
- __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
- __ Ret();
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&compare_chars, ne, length, Operand(zero_reg));
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-
- // Compare characters.
- __ bind(&compare_chars);
-
- GenerateAsciiCharsCompareLoop(masm,
- left, right, length, scratch2, scratch3, v0,
- &strings_not_equal);
-
- // Characters are equal.
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- Label result_not_equal, compare_lengths;
- // Find minimum length and length difference.
- __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ Subu(scratch3, scratch1, Operand(scratch2));
- Register length_delta = scratch3;
- __ slt(scratch4, scratch2, scratch1);
- __ Movn(scratch1, scratch2, scratch4);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
-
- // Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4, v0,
- &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use length_delta as result if it's zero.
- __ mov(scratch2, length_delta);
- __ mov(scratch4, zero_reg);
- __ mov(v0, zero_reg);
-
- __ bind(&result_not_equal);
- // Conditionally update the result based either on length_delta or
- // the last comparion performed in the loop above.
- Label ret;
- __ Branch(&ret, eq, scratch2, Operand(scratch4));
- __ li(v0, Operand(Smi::FromInt(GREATER)));
- __ Branch(&ret, gt, scratch2, Operand(scratch4));
- __ li(v0, Operand(Smi::FromInt(LESS)));
- __ bind(&ret);
- __ Ret();
-}
-
-
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* chars_not_equal) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ Addu(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ Addu(left, left, Operand(scratch1));
- __ Addu(right, right, Operand(scratch1));
- __ Subu(length, zero_reg, length);
- Register index = length; // index = -length;
-
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ Addu(scratch3, left, index);
- __ lbu(scratch1, MemOperand(scratch3));
- __ Addu(scratch3, right, index);
- __ lbu(scratch2, MemOperand(scratch3));
- __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
- __ Addu(index, index, 1);
- __ Branch(&loop, ne, index, Operand(zero_reg));
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
- __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
-
- Label not_same;
- __ Branch(&not_same, ne, a0, Operand(a1));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
- __ DropAndRet(2);
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
-
- // Compare flat ASCII strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfEitherSmi(a0, a1, &call_runtime);
- // Load instance types.
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ Or(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // a0: first string
- // a1: second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- // These tests use zero-length check on string-length whch is an Smi.
- // Assert that Smi::FromInt(0) is really 0.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(Smi::FromInt(0) == 0);
- __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
- __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
- __ mov(v0, a0); // Assume we'll return first string (from a0).
- __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
- __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
- __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
- __ and_(t4, t4, t5); // Branch if both strings were non-empty.
- __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&strings_not_empty);
- }
-
- // Untag both string-lengths.
- __ sra(a2, a2, kSmiTagSize);
- __ sra(a3, a3, kSmiTagSize);
-
- // Both strings are non-empty.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ Addu(t2, a2, Operand(a3));
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return a string here.
- __ Branch(&longer_than_two, ne, t2, Operand(2));
-
- // Check that both strings are non-external ASCII strings.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in a2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode).
- __ li(t2, Operand(2));
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
- __ And(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kStringEncodingMask));
- __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
- __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
- // t0: first instance type.
- // t1: second instance type.
- // Branch to if _both_ instances have kAsciiDataHintMask set.
- __ And(at, t0, Operand(kAsciiDataHintMask));
- __ and_(at, at, t1);
- __ Branch(&ascii_data, ne, at, Operand(zero_reg));
- __ Xor(t0, t0, Operand(t1));
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ And(t0, t0, Operand(kOneByteStringTag | kAsciiDataHintTag));
- __ Branch(&ascii_data, eq, t0,
- Operand(kOneByteStringTag | kAsciiDataHintTag));
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
- __ Branch(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t2: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- // Check whether both strings have same encoding
- __ Xor(t3, t0, Operand(t1));
- __ And(t3, t3, Operand(kStringEncodingMask));
- __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t0, Operand(kStringRepresentationMask));
-
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_first_add;
- __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_first_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t0, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_second_add;
- __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_second_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t1, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // t3: first character of first string
- // a1: first character of second string
- // a2: length of first string
- // a3: length of second string
- // t2: sum of lengths.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(t4, t1, Operand(kStringEncodingMask));
- __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
-
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
-
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string.
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ GetObjectType(arg, scratch1, scratch1);
- __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- false,
- &not_cached);
- __ mov(arg, scratch1);
- __ sw(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
- __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
- __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ And(scratch2, scratch2, scratch4);
- __ Branch(slow, ne, scratch2, Operand(scratch4));
- __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ sw(arg, MemOperand(sp, stack_offset));
-
- __ bind(&done);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
- Label miss;
- __ Or(a2, a1, a0);
- __ JumpIfNotSmi(a2, &miss);
-
- if (GetCondition() == eq) {
- // For equality we do not care about the sign of the result.
- __ Subu(v0, a0, a1);
- } else {
- // Untag before subtracting to avoid handling overflow.
- __ SmiUntag(a1);
- __ SmiUntag(a0);
- __ Subu(v0, a1, a0);
- }
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
-
- Label generic_stub;
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss;
-
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(a1, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(a0, &miss);
- }
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or FPU is unsupported.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(a0, &right_smi);
- __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ Subu(a2, a0, Operand(kHeapObjectTag));
- __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&left);
- __ bind(&right_smi);
- __ SmiUntag(a2, a0); // Can't clobber a0 yet.
- FPURegister single_scratch = f6;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f2, single_scratch);
-
- __ bind(&left);
- __ JumpIfSmi(a1, &left_smi);
- __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
- __ Subu(a2, a1, Operand(kHeapObjectTag));
- __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&done);
- __ bind(&left_smi);
- __ SmiUntag(a2, a1); // Can't clobber a1 yet.
- single_scratch = f8;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f0, single_scratch);
-
- __ bind(&done);
-
- // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
- Label fpu_eq, fpu_lt;
- // Test if equal, and also handle the unordered/NaN case.
- __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
-
- // Test if less (unordered case is already handled).
- __ BranchF(&fpu_lt, NULL, lt, f0, f2);
-
- // Otherwise it's greater, so just fall thru, and return.
- __ li(v0, Operand(GREATER));
- __ Ret();
-
- __ bind(&fpu_eq);
- __ li(v0, Operand(EQUAL));
- __ Ret();
-
- __ bind(&fpu_lt);
- __ li(v0, Operand(LESS));
- __ Ret();
- }
-
- __ bind(&unordered);
- __ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&miss, ne, a0, Operand(at));
- __ JumpIfSmi(a1, &unordered);
- __ GetObjectType(a1, a2, a2);
- __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
- __ jmp(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&unordered, eq, a1, Operand(at));
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
- Label miss;
-
- // Registers containing left and right operands respectively.
- Register left = a1;
- Register right = a0;
- Register tmp1 = a2;
- Register tmp2 = a3;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are internalized strings.
- __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ And(tmp1, tmp1, Operand(tmp2));
- __ And(tmp1, tmp1, kIsInternalizedMask);
- __ Branch(&miss, eq, tmp1, Operand(zero_reg));
- // Make sure a0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(a0));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(v0, right);
- // Internalized strings are compared by identity.
- __ Ret(ne, left, Operand(right));
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASSERT(GetCondition() == eq);
- Label miss;
-
- // Registers containing left and right operands respectively.
- Register left = a1;
- Register right = a0;
- Register tmp1 = a2;
- Register tmp2 = a3;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
- __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
-
- Label succeed1;
- __ And(at, tmp1, Operand(kIsInternalizedMask));
- __ Branch(&succeed1, ne, at, Operand(zero_reg));
- __ Branch(&miss, ne, tmp1, Operand(SYMBOL_TYPE));
- __ bind(&succeed1);
-
- Label succeed2;
- __ And(at, tmp2, Operand(kIsInternalizedMask));
- __ Branch(&succeed2, ne, at, Operand(zero_reg));
- __ Branch(&miss, ne, tmp2, Operand(SYMBOL_TYPE));
- __ bind(&succeed2);
-
- // Use a0 as result
- __ mov(v0, a0);
-
- // Unique names are compared by identity.
- Label done;
- __ Branch(&done, ne, left, Operand(right));
- // Make sure a0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(a0));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ bind(&done);
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
- Label miss;
-
- bool equality = Token::IsEqualityOp(op_);
-
- // Registers containing left and right operands respectively.
- Register left = a1;
- Register right = a0;
- Register tmp1 = a2;
- Register tmp2 = a3;
- Register tmp3 = t0;
- Register tmp4 = t1;
- Register tmp5 = t2;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are strings. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ Or(tmp3, tmp1, tmp2);
- __ And(tmp5, tmp3, Operand(kIsNotStringMask));
- __ Branch(&miss, ne, tmp5, Operand(zero_reg));
-
- // Fast check for identical strings.
- Label left_ne_right;
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&left_ne_right, ne, left, Operand(right));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, zero_reg); // In the delay slot.
- __ bind(&left_ne_right);
-
- // Handle not identical strings.
-
- // Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical.
- if (equality) {
- ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ And(tmp3, tmp1, Operand(tmp2));
- __ And(tmp5, tmp3, Operand(kIsInternalizedMask));
- Label is_symbol;
- __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
- // Make sure a0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(a0));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0); // In the delay slot.
- __ bind(&is_symbol);
- }
-
- // Check that both strings are sequential ASCII.
- Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- tmp1, tmp2, tmp3, tmp4, &runtime);
-
- // Compare flat ASCII strings. Returns when done.
- if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, tmp4);
- }
-
- // Handle more complex cases in runtime.
- __ bind(&runtime);
- __ Push(left, right);
- if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
- } else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
- Label miss;
- __ And(a2, a1, Operand(a0));
- __ JumpIfSmi(a2, &miss);
-
- // Compare lhs, a2 holds the map, a3 holds the type_reg
- __ GetObjectType(a0, a2, a3);
- __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
-
-
- // Compare rhs, a2 holds the map, a3 holds the type_reg
- __ GetObjectType(a1, a2, a3);
- __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
-
- ASSERT(GetCondition() == eq);
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, a0, a1);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- Label miss;
- __ And(a2, a1, a0);
- __ JumpIfSmi(a2, &miss);
- __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
-
- // Check object in a0
- __ Branch(&miss, ne, a2, Operand(known_map_));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
-
- // Check object in a1
- __ Branch(&miss, ne, a3, Operand(known_map_));
- __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
- __ And(a3, a3, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&miss, eq, a3, Operand(1 << Map::kUseUserObjectComparison));
-
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, a0, a1);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a0);
- __ push(ra);
- __ Push(a1, a0);
- __ li(t0, Operand(Smi::FromInt(op_)));
- __ addiu(sp, sp, -kPointerSize);
- __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
- __ sw(t0, MemOperand(sp)); // In the delay slot.
- // Compute the entry point of the rewritten stub.
- __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ Pop(a1, a0, ra);
- }
- __ Jump(a2);
-}
-
-
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // No need to pop or drop anything, LeaveExitFrame will restore the old
- // stack, thus dropping the allocated space for the return value.
- // The saved ra is after the reserved stack space for the 4 args.
- __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
-
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- // In case of an error the return address may point to a memory area
- // filled with kZapValue by the GC.
- // Dereference the address and check for this.
- __ lw(t0, MemOperand(t9));
- __ Assert(ne, "Received invalid return address.", t0,
- Operand(reinterpret_cast<uint32_t>(kZapValue)));
- }
- __ Jump(t9);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ExternalReference function) {
- __ li(t9, Operand(function));
- this->GenerateCall(masm, t9);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- Register target) {
- __ Move(t9, target);
- __ AssertStackIsAligned();
- // Allocate space for arg slots.
- __ Subu(sp, sp, kCArgsSlotsSize);
-
- // Block the trampoline pool through the whole function to make sure the
- // number of generated instructions is constant.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
-
- // We need to get the current 'pc' value, which is not available on MIPS.
- Label find_ra;
- masm->bal(&find_ra); // ra = pc + 8.
- masm->nop(); // Branch delay slot nop.
- masm->bind(&find_ra);
-
- const int kNumInstructionsToJump = 6;
- masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
- // Push return address (accessible to GC through exit frame pc).
- // This spot for ra was reserved in EnterExitFrame.
- masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
- intptr_t loc =
- reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
- masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
- // Call the function.
- masm->Jump(t9);
- // Make sure the stored 'ra' points to this position.
- ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
-}
-
-
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0) {
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ lw(index, FieldMemOperand(properties, kCapacityOffset));
- __ Subu(index, index, Operand(1));
- __ And(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ sll(at, index, 1);
- __ Addu(index, index, at);
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- Register tmp = properties;
- __ sll(scratch0, index, 1);
- __ Addu(tmp, properties, scratch0);
- __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- ASSERT(!tmp.is(entity_name));
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ Branch(done, eq, entity_name, Operand(tmp));
-
- if (i != kInlinedProbes - 1) {
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
- // Stop if found the property.
- __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
-
- Label the_hole;
- __ Branch(&the_hole, eq, entity_name, Operand(tmp));
-
- // Check if the entry name is not a internalized string.
- __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ lbu(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ And(scratch0, entity_name, Operand(kIsInternalizedMask));
- __ Branch(miss, eq, scratch0, Operand(zero_reg));
-
- __ bind(&the_hole);
-
- // Restore the properties.
- __ lw(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- }
- }
-
- const int spill_mask =
- (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
- a2.bit() | a1.bit() | a0.bit() | v0.bit());
-
- __ MultiPush(spill_mask);
- __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ li(a1, Operand(Handle<String>(name)));
- StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ mov(at, v0);
- __ MultiPop(spill_mask);
-
- __ Branch(done, eq, at, Operand(zero_reg));
- __ Branch(miss, ne, at, Operand(zero_reg));
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- ASSERT(!elements.is(scratch1));
- ASSERT(!elements.is(scratch2));
- ASSERT(!name.is(scratch1));
- ASSERT(!name.is(scratch2));
-
- __ AssertString(name);
-
- // Compute the capacity mask.
- __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
- __ Subu(scratch1, scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
- __ Addu(scratch2, scratch2, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
- }
- __ srl(scratch2, scratch2, String::kHashShift);
- __ And(scratch2, scratch1, scratch2);
-
- // Scale the index by multiplying by the element size.
- ASSERT(StringDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
-
- __ sll(at, scratch2, 1);
- __ Addu(scratch2, scratch2, at);
-
- // Check if the key is identical to the name.
- __ sll(at, scratch2, 2);
- __ Addu(scratch2, elements, at);
- __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
- __ Branch(done, eq, name, Operand(at));
- }
-
- const int spill_mask =
- (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
- a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ MultiPush(spill_mask);
- if (name.is(a0)) {
- ASSERT(!elements.is(a1));
- __ Move(a1, name);
- __ Move(a0, elements);
- } else {
- __ Move(a0, elements);
- __ Move(a1, name);
- }
- StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ mov(scratch2, a2);
- __ mov(at, v0);
- __ MultiPop(spill_mask);
-
- __ Branch(done, ne, at, Operand(zero_reg));
- __ Branch(miss, eq, at, Operand(zero_reg));
-}
-
-
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Registers:
- // result: StringDictionary to probe
- // a1: key
- // : StringDictionary to probe.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Register result = v0;
- Register dictionary = a0;
- Register key = a1;
- Register index = a2;
- Register mask = a3;
- Register hash = t0;
- Register undefined = t1;
- Register entry_key = t2;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ sra(mask, mask, kSmiTagSize);
- __ Subu(mask, mask, Operand(1));
-
- __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
-
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
- __ Addu(index, hash, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
- } else {
- __ mov(index, hash);
- }
- __ srl(index, index, String::kHashShift);
- __ And(index, mask, index);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- // index *= 3.
- __ mov(at, index);
- __ sll(index, index, 1);
- __ Addu(index, index, at);
-
-
- ASSERT_EQ(kSmiTagSize, 1);
- __ sll(index, index, 2);
- __ Addu(index, index, dictionary);
- __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
-
- // Stop if found the property.
- __ Branch(&in_dictionary, eq, entry_key, Operand(key));
-
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a internalized string.
- __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ lbu(entry_key,
- FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ And(result, entry_key, Operand(kIsInternalizedMask));
- __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ Ret(USE_DELAY_SLOT);
- __ mov(result, zero_reg);
- }
-
- __ bind(&in_dictionary);
- __ Ret(USE_DELAY_SLOT);
- __ li(result, 1);
-
- __ bind(&not_in_dictionary);
- __ Ret(USE_DELAY_SLOT);
- __ mov(result, zero_reg);
-}
-
-
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
- // FastNewClosureStub::Generate
- { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-
-bool RecordWriteStub::IsPregenerated() {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(FPU);
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two branch+nop instructions are generated with labels so as to
- // get the offset fixed up correctly by the bind(Label*) call. We patch it
- // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
- // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
- // incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
- __ nop();
- __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
- __ nop();
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
-
- PatchBranchIntoNop(masm, 0);
- PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(a0));
- __ Move(address, regs_.address());
- __ Move(a0, regs_.object());
- __ Move(a1, address);
- __ li(a2, Operand(ExternalReference::isolate_address()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
- __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
- __ lw(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
- __ sw(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-
- // Get the value from the slot.
- __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : element value to store
- // -- a1 : array literal
- // -- a2 : map of array literal
- // -- a3 : element index as smi
- // -- t0 : array literal index in function as smi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- __ CheckFastElements(a2, t1, &double_elements);
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(a0, &smi_element);
- __ CheckFastSmiElements(a2, t1, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(a1, a3, a0);
- __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
- __ Push(t1, t0);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, t1, t2);
- __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sw(a0, MemOperand(t2, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, t1, t2);
- __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3,
- // Overwrites all regs after this.
- t1, t2, t3, t5, a2,
- &slow_elements);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-}
-
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- bool save_fp_regs = CpuFeatures::IsSupported(FPU);
- CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ lw(a1, MemOperand(fp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ sll(a1, a1, kPointerSizeLog2);
- __ Addu(sp, sp, a1);
- __ Ret();
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
- ProfileEntryHookStub stub;
- __ push(ra);
- __ CallStub(&stub);
- __ pop(ra);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // The entry hook is a "push ra" instruction, followed by a call.
- // Note: on MIPS "push" is 2 instruction
- const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
-
- // Save live volatile registers.
- __ Push(ra, t1, a1);
- const int32_t kNumSavedRegs = 3;
-
- // Compute the function's address for the first argument.
- __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
-
- // The caller's return address is above the saved temporaries.
- // Grab that for the second argument to the hook.
- __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
-
- // Align the stack if necessary.
- int frame_alignment = masm->ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
- __ mov(t1, sp);
- ASSERT(IsPowerOf2(frame_alignment));
- __ And(sp, sp, Operand(-frame_alignment));
- }
-
-#if defined(V8_HOST_ARCH_MIPS)
- __ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
- __ lw(at, MemOperand(at));
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- Address trampoline_address = reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(EntryHookTrampoline));
- ApiFunction dispatcher(trampoline_address);
- __ li(at, Operand(ExternalReference(&dispatcher,
- ExternalReference::BUILTIN_CALL,
- masm->isolate())));
-#endif
- __ Call(at);
-
- // Restore the stack pointer if needed.
- if (frame_alignment > kPointerSize) {
- __ mov(sp, t1);
- }
-
- __ Pop(ra, t1, a1);
- __ Ret();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/code-stubs-mips.h b/src/3rdparty/v8/src/mips/code-stubs-mips.h
deleted file mode 100644
index cc7ac28..0000000
--- a/src/3rdparty/v8/src/mips/code-stubs-mips.h
+++ /dev/null
@@ -1,794 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_CODE_STUBS_ARM_H_
-#define V8_MIPS_CODE_STUBS_ARM_H_
-
-#include "ic-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
- void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- StringCompareStub() { }
-
- // Compare two flat ASCII strings and returns result in v0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in v0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* chars_not_equal);
-};
-
-
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch,
- Register scratch2)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch),
- sign_(scratch2) {
- ASSERT(IntRegisterBits::is_valid(the_int_.code()));
- ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
- ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
- ASSERT(SignRegisterBits::is_valid(sign_.code()));
- }
-
- bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
- Register sign_;
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
- class SignRegisterBits: public BitField<int, 12, 4> {};
-
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code())
- | SignRegisterBits::encode(sign_.code());
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- const unsigned offset = masm->instr_at(pos) & kImm16Mask;
- masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
- (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
- ASSERT(Assembler::IsBne(masm->instr_at(pos)));
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- const unsigned offset = masm->instr_at(pos) & kImm16Mask;
- masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
- (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
- ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
- }
-
- static Mode GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- 2 * Assembler::kInstrSize);
-
- if (Assembler::IsBeq(first_instruction)) {
- return INCREMENTAL;
- }
-
- ASSERT(Assembler::IsBne(first_instruction));
-
- if (Assembler::IsBeq(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(Assembler::IsBne(second_instruction));
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->pop(scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
- masm->MultiPushFPU(kCallerSavedFPU);
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
- masm->MultiPopFPU(kCallerSavedFPU);
- }
- masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 5> {};
- class ValueBits: public BitField<int, 5, 5> {};
- class AddressBits: public BitField<int, 10, 5> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM and MIPS.
-class RegExpCEntryStub: public PlatformCodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub: public PlatformCodeStub {
- public:
- DirectCEntryStub() {}
- void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm,
- ExternalReference function);
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Major MajorKey() { return DirectCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination {
- kFPURegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |int_scratch| to a double, storing
- // the result either in |double_dst| or |dst2:dst1|, depending on
- // |destination|.
- // Warning: The value in |int_scratch| will be changed in the process!
- static void ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- FPURegister single_scratch);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- FPURegister double_dst,
- FPURegister double_scratch,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when FPU is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
- Label* not_int32);
-
- // Generate non FPU code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when FPU is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in v0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- // Loads the objects from |object| into floating point registers.
- // Depending on |destination| the value ends up either in |dst| or
- // in |dst1|/|dst2|. If |destination| is kFPURegisters, then FPU
- // must be supported. If kCoreRegisters are requested and FPU is
- // supported, |dst| will be scratched. If |object| is neither smi nor
- // heap number, |not_number| is jumped to with |object| still intact.
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-
-class StringDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0);
-
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- Major MajorKey() { return StringDictionaryLookup; }
-
- int MinorKey() {
- return LookupModeBits::encode(mode_);
- }
-
- class LookupModeBits: public BitField<LookupMode, 0, 1> {};
-
- LookupMode mode_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_CODE_STUBS_ARM_H_
diff --git a/src/3rdparty/v8/src/mips/codegen-mips.cc b/src/3rdparty/v8/src/mips/codegen-mips.cc
deleted file mode 100644
index bbb1a31..0000000
--- a/src/3rdparty/v8/src/mips/codegen-mips.cc
+++ /dev/null
@@ -1,729 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen.h"
-#include "macro-assembler.h"
-#include "simulator-mips.h"
-
-namespace v8 {
-namespace internal {
-
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- return NULL;
-}
-
-
-#define __ masm.
-
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_mips_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(FPU)) return &exp;
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-
- {
- CpuFeatures::Scope use_fpu(FPU);
- DoubleRegister input = f12;
- DoubleRegister result = f0;
- DoubleRegister double_scratch1 = f4;
- DoubleRegister double_scratch2 = f6;
- Register temp1 = t0;
- Register temp2 = t1;
- Register temp3 = t2;
-
- if (!IsMipsSoftFloatABI) {
- // Input value is in f12 anyway, nothing to do.
- } else {
- __ Move(input, a0, a1);
- }
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(
- &masm, input, result, double_scratch1, double_scratch2,
- temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- if (!IsMipsSoftFloatABI) {
- // Result is already in f0, nothing to do.
- } else {
- __ Move(a0, a1, result);
- }
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#else
- fast_exp_mips_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
-#undef __
-
-
-UnaryMathFunction CreateSqrtFunction() {
- return &sqrt;
-}
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : target map, scratch for subsequent call
- // -- t0 : scratch (elements)
- // -----------------------------------
- if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq,
- allocation_site_info_found);
- }
-
- // Set transitioned map.
- __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ RecordWriteField(a2,
- HeapObject::kMapOffset,
- a3,
- t5,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : target map, scratch for subsequent call
- // -- t0 : scratch (elements)
- // -----------------------------------
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool fpu_supported = CpuFeatures::IsSupported(FPU);
-
- Register scratch = t6;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&only_change_map, eq, at, Operand(t0));
-
- __ push(ra);
- __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
- // t0: source FixedArray
- // t1: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- __ sll(scratch, t1, 2);
- __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
- __ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
- // t2: destination FixedDoubleArray, not tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
- __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
- __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
- // Update receiver's map.
-
- __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ RecordWriteField(a2,
- HeapObject::kMapOffset,
- a3,
- t5,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ Addu(a3, t2, Operand(kHeapObjectTag));
- __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
- __ RecordWriteField(a2,
- JSObject::kElementsOffset,
- a3,
- t5,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
-
- // Prepare for conversion loop.
- __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
- __ sll(t2, t1, 2);
- __ Addu(t2, t2, t3);
- __ li(t0, Operand(kHoleNanLower32));
- __ li(t1, Operand(kHoleNanUpper32));
- // t0: kHoleNanLower32
- // t1: kHoleNanUpper32
- // t2: end of destination FixedDoubleArray, not tagged
- // t3: begin of FixedDoubleArray element fields, not tagged
-
- if (!fpu_supported) __ Push(a1, a0);
-
- __ Branch(&entry);
-
- __ bind(&only_change_map);
- __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ RecordWriteField(a2,
- HeapObject::kMapOffset,
- a3,
- t5,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Branch(&done);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(ra);
- __ Branch(fail);
-
- // Convert and copy elements.
- __ bind(&loop);
- __ lw(t5, MemOperand(a3));
- __ Addu(a3, a3, kIntSize);
- // t5: current element
- __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
-
- // Normal smi, convert to double and store.
- if (fpu_supported) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(t5, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(t3));
- __ Addu(t3, t3, kDoubleSize);
- } else {
- FloatingPointHelper::ConvertIntToDouble(masm,
- t5,
- FloatingPointHelper::kCoreRegisters,
- f0,
- a0,
- a1,
- t7,
- f0);
- __ sw(a0, MemOperand(t3)); // mantissa
- __ sw(a1, MemOperand(t3, kIntSize)); // exponent
- __ Addu(t3, t3, kDoubleSize);
- }
- __ Branch(&entry);
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ SmiTag(t5);
- __ Or(t5, t5, Operand(1));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "object found in smi-only array", at, Operand(t5));
- }
- __ sw(t0, MemOperand(t3)); // mantissa
- __ sw(t1, MemOperand(t3, kIntSize)); // exponent
- __ Addu(t3, t3, kDoubleSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, t3, Operand(t2));
-
- if (!fpu_supported) __ Pop(a1, a0);
- __ pop(ra);
- __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : target map, scratch for subsequent call
- // -- t0 : scratch (elements)
- // -----------------------------------
- Label entry, loop, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&only_change_map, eq, at, Operand(t0));
-
- __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
-
- __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
- // t0: source FixedArray
- // t1: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- __ sll(a0, t1, 1);
- __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
- __ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
- // t2: destination FixedArray, not tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
- __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
- __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
-
- // Prepare for conversion loop.
- __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
- __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
- __ Addu(t2, t2, Operand(kHeapObjectTag));
- __ sll(t1, t1, 1);
- __ Addu(t1, a3, t1);
- __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses.
- // a3: begin of destination FixedArray element fields, not tagged
- // t0: begin of source FixedDoubleArray element fields, not tagged, +4
- // t1: end of destination FixedArray, not tagged
- // t2: destination FixedArray
- // t3: the-hole pointer
- // t5: heap number map
- __ Branch(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
-
- __ Branch(fail);
-
- __ bind(&loop);
- __ lw(a1, MemOperand(t0));
- __ Addu(t0, t0, kDoubleSize);
- // a1: current element's upper 32 bit
- // t0: address of next element's upper 32 bit
- __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
- // a2: new heap number
- __ lw(a0, MemOperand(t0, -12));
- __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
- __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
- __ mov(a0, a3);
- __ sw(a2, MemOperand(a3));
- __ Addu(a3, a3, kIntSize);
- __ RecordWrite(t2,
- a0,
- a2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Branch(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ sw(t3, MemOperand(a3));
- __ Addu(a3, a3, kIntSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, a3, Operand(t1));
-
- __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
- __ RecordWriteField(a2,
- JSObject::kElementsOffset,
- t2,
- t5,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(ra);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ RecordWriteField(a2,
- HeapObject::kMapOffset,
- a3,
- t5,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- // Fetch the instance type of the receiver into result register.
- __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ And(at, result, Operand(kIsIndirectStringMask));
- __ Branch(&check_sequential, eq, at, Operand(zero_reg));
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ And(at, result, Operand(kSlicedNotConsMask));
- __ Branch(&cons_string, eq, at, Operand(zero_reg));
-
- // Handle slices.
- Label indirect_string_loaded;
- __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ sra(at, result, kSmiTagSize);
- __ Addu(index, index, at);
- __ jmp(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(at, Heap::kempty_stringRootIndex);
- __ Branch(call_runtime, ne, result, Operand(at));
- // Get the first of the two strings and load its instance type.
- __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(at, result, Operand(kStringRepresentationMask));
- __ Branch(&external_string, ne, at, Operand(zero_reg));
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Addu(string,
- string,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ jmp(&check_encoding);
-
- // Handle external strings.
- __ bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ And(at, result, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found",
- at, Operand(zero_reg));
- }
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ And(at, result, Operand(kShortExternalStringMask));
- __ Branch(call_runtime, ne, at, Operand(zero_reg));
- __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label ascii, done;
- __ bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(at, result, Operand(kStringEncodingMask));
- __ Branch(&ascii, ne, at, Operand(zero_reg));
- // Two-byte string.
- __ sll(at, index, 1);
- __ Addu(at, string, at);
- __ lhu(result, MemOperand(at));
- __ jmp(&done);
- __ bind(&ascii);
- // Ascii string.
- __ Addu(at, string, index);
- __ lbu(result, MemOperand(at));
- __ bind(&done);
-}
-
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ And(at, index, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi index", at, Operand(zero_reg));
- __ And(at, value, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi value", at, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, String::kLengthOffset));
- __ Check(lt, "Index is too large", index, Operand(at));
-
- __ Check(ge, "Index is negative", index, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
-
- __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
- }
-
- __ Addu(at,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ SmiUntag(index);
- __ Addu(at, at, index);
- __ sb(value, MemOperand(at));
- } else {
- // No need to untag a smi for two-byte addressing.
- __ Addu(at, at, index);
- __ sh(value, MemOperand(at));
- }
-}
-
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3) {
- ASSERT(!input.is(result));
- ASSERT(!input.is(double_scratch1));
- ASSERT(!input.is(double_scratch2));
- ASSERT(!result.is(double_scratch1));
- ASSERT(!result.is(double_scratch2));
- ASSERT(!double_scratch1.is(double_scratch2));
- ASSERT(!temp1.is(temp2));
- ASSERT(!temp1.is(temp3));
- ASSERT(!temp2.is(temp3));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ ldc1(double_scratch1, ExpConstant(0, temp3));
- __ Move(result, kDoubleRegZero);
- __ BranchF(&done, NULL, ge, double_scratch1, input);
- __ ldc1(double_scratch2, ExpConstant(1, temp3));
- __ ldc1(result, ExpConstant(2, temp3));
- __ BranchF(&done, NULL, ge, input, double_scratch2);
- __ ldc1(double_scratch1, ExpConstant(3, temp3));
- __ ldc1(result, ExpConstant(4, temp3));
- __ mul_d(double_scratch1, double_scratch1, input);
- __ add_d(double_scratch1, double_scratch1, result);
- __ Move(temp2, temp1, double_scratch1);
- __ sub_d(double_scratch1, double_scratch1, result);
- __ ldc1(result, ExpConstant(6, temp3));
- __ ldc1(double_scratch2, ExpConstant(5, temp3));
- __ mul_d(double_scratch1, double_scratch1, double_scratch2);
- __ sub_d(double_scratch1, double_scratch1, input);
- __ sub_d(result, result, double_scratch1);
- __ mul_d(input, double_scratch1, double_scratch1);
- __ mul_d(result, result, input);
- __ srl(temp1, temp2, 11);
- __ ldc1(double_scratch2, ExpConstant(7, temp3));
- __ mul_d(result, result, double_scratch2);
- __ sub_d(result, result, double_scratch1);
- __ ldc1(double_scratch2, ExpConstant(8, temp3));
- __ add_d(result, result, double_scratch2);
- __ li(at, 0x7ff);
- __ And(temp2, temp2, at);
- __ Addu(temp1, temp1, Operand(0x3ff));
- __ sll(temp1, temp1, 20);
-
- // Must not call ExpConstant() after overwriting temp3!
- __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ sll(at, temp2, 3);
- __ addu(at, at, temp3);
- __ lw(at, MemOperand(at));
- __ Addu(temp3, temp3, Operand(kPointerSize));
- __ sll(temp2, temp2, 3);
- __ addu(temp2, temp2, temp3);
- __ lw(temp2, MemOperand(temp2));
- __ Or(temp1, temp1, temp2);
- __ Move(input, at, temp1);
- __ mul_d(result, result, input);
- __ bind(&done);
-}
-
-
-// nop(CODE_AGE_MARKER_NOP)
-static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found in FUNCTIONS
- static bool initialized = false;
- static uint32_t sequence[kNoCodeAgeSequenceLength];
- byte* byte_sequence = reinterpret_cast<byte*>(sequence);
- *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
- if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->Push(ra, fp, cp, a1);
- patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex);
- patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
- initialized = true;
- }
- return byte_sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = !memcmp(sequence, young_sequence, young_length);
- ASSERT(result ||
- Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Memory::Address_at(
- sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
- // Mark this code sequence for FindPlatformCodeAgeSequence()
- patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
- // Save the function's original return address
- // (it will be clobbered by Call(t9))
- patcher.masm()->mov(at, ra);
- // Load the stub address to t9 and call it
- patcher.masm()->li(t9,
- Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
- patcher.masm()->Call(t9);
- // Record the stub address in the empty space for GetCodeAgeAndParity()
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/codegen-mips.h b/src/3rdparty/v8/src/mips/codegen-mips.h
deleted file mode 100644
index d429443..0000000
--- a/src/3rdparty/v8/src/mips/codegen-mips.h
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_H_
-#define V8_MIPS_CODEGEN_MIPS_H_
-
-
-#include "ast.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- CodeGenerator() {
- InitializeAstVisitor();
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Expression* type);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/constants-mips.cc b/src/3rdparty/v8/src/mips/constants-mips.cc
deleted file mode 100644
index ddfa891..0000000
--- a/src/3rdparty/v8/src/mips/constants-mips.cc
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "constants-mips.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Registers.
-
-
-// These register names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-const char* Registers::names_[kNumSimuRegisters] = {
- "zero_reg",
- "at",
- "v0", "v1",
- "a0", "a1", "a2", "a3",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9",
- "k0", "k1",
- "gp",
- "sp",
- "fp",
- "ra",
- "LO", "HI",
- "pc"
-};
-
-// List of alias names which can be used when referring to MIPS registers.
-const Registers::RegisterAlias Registers::aliases_[] = {
- {0, "zero"},
- {23, "cp"},
- {30, "s8"},
- {30, "s8_fp"},
- {kInvalidRegister, NULL}
-};
-
-const char* Registers::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumSimuRegisters)) {
- result = names_[reg];
- } else {
- result = "noreg";
- }
- return result;
-}
-
-
-int Registers::Number(const char* name) {
- // Look through the canonical names.
- for (int i = 0; i < kNumSimuRegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- return i;
- }
- }
-
- // Look through the alias names.
- int i = 0;
- while (aliases_[i].reg != kInvalidRegister) {
- if (strcmp(aliases_[i].name, name) == 0) {
- return aliases_[i].reg;
- }
- i++;
- }
-
- // No register with the reguested name found.
- return kInvalidRegister;
-}
-
-
-const char* FPURegisters::names_[kNumFPURegisters] = {
- "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
- "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
- "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
-};
-
-// List of alias names which can be used when referring to MIPS registers.
-const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
- {kInvalidRegister, NULL}
-};
-
-const char* FPURegisters::Name(int creg) {
- const char* result;
- if ((0 <= creg) && (creg < kNumFPURegisters)) {
- result = names_[creg];
- } else {
- result = "nocreg";
- }
- return result;
-}
-
-
-int FPURegisters::Number(const char* name) {
- // Look through the canonical names.
- for (int i = 0; i < kNumFPURegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- return i;
- }
- }
-
- // Look through the alias names.
- int i = 0;
- while (aliases_[i].creg != kInvalidRegister) {
- if (strcmp(aliases_[i].name, name) == 0) {
- return aliases_[i].creg;
- }
- i++;
- }
-
- // No Cregister with the reguested name found.
- return kInvalidFPURegister;
-}
-
-
-// -----------------------------------------------------------------------------
-// Instructions.
-
-bool Instruction::IsForbiddenInBranchDelay() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
- case J:
- case JAL:
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- case BEQL:
- case BNEL:
- case BLEZL:
- case BGTZL:
- return true;
- case REGIMM:
- switch (RtFieldRaw()) {
- case BLTZ:
- case BGEZ:
- case BLTZAL:
- case BGEZAL:
- return true;
- default:
- return false;
- };
- break;
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JR:
- case JALR:
- return true;
- default:
- return false;
- };
- break;
- default:
- return false;
- };
-}
-
-
-bool Instruction::IsLinkingInstruction() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
- case JAL:
- return true;
- case REGIMM:
- switch (RtFieldRaw()) {
- case BGEZAL:
- case BLTZAL:
- return true;
- default:
- return false;
- };
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JALR:
- return true;
- default:
- return false;
- };
- default:
- return false;
- };
-}
-
-
-bool Instruction::IsTrap() const {
- if (OpcodeFieldRaw() != SPECIAL) {
- return false;
- } else {
- switch (FunctionFieldRaw()) {
- case BREAK:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- return true;
- default:
- return false;
- };
- }
-}
-
-
-Instruction::Type Instruction::InstructionType() const {
- switch (OpcodeFieldRaw()) {
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JR:
- case JALR:
- case BREAK:
- case SLL:
- case SRL:
- case SRA:
- case SLLV:
- case SRLV:
- case SRAV:
- case MFHI:
- case MFLO:
- case MULT:
- case MULTU:
- case DIV:
- case DIVU:
- case ADD:
- case ADDU:
- case SUB:
- case SUBU:
- case AND:
- case OR:
- case XOR:
- case NOR:
- case SLT:
- case SLTU:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- case MOVZ:
- case MOVN:
- case MOVCI:
- return kRegisterType;
- default:
- return kUnsupported;
- };
- break;
- case SPECIAL2:
- switch (FunctionFieldRaw()) {
- case MUL:
- case CLZ:
- return kRegisterType;
- default:
- return kUnsupported;
- };
- break;
- case SPECIAL3:
- switch (FunctionFieldRaw()) {
- case INS:
- case EXT:
- return kRegisterType;
- default:
- return kUnsupported;
- };
- break;
- case COP1: // Coprocessor instructions.
- switch (RsFieldRawNoAssert()) {
- case BC1: // Branch on coprocessor condition.
- return kImmediateType;
- default:
- return kRegisterType;
- };
- break;
- case COP1X:
- return kRegisterType;
- // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
- case REGIMM:
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- case ADDI:
- case ADDIU:
- case SLTI:
- case SLTIU:
- case ANDI:
- case ORI:
- case XORI:
- case LUI:
- case BEQL:
- case BNEL:
- case BLEZL:
- case BGTZL:
- case LB:
- case LH:
- case LWL:
- case LW:
- case LBU:
- case LHU:
- case LWR:
- case SB:
- case SH:
- case SWL:
- case SW:
- case SWR:
- case LWC1:
- case LDC1:
- case SWC1:
- case SDC1:
- return kImmediateType;
- // 26 bits immediate type instructions. e.g.: j imm26.
- case J:
- case JAL:
- return kJumpType;
- default:
- return kUnsupported;
- };
- return kUnsupported;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/constants-mips.h b/src/3rdparty/v8/src/mips/constants-mips.h
deleted file mode 100644
index e7c55f5..0000000
--- a/src/3rdparty/v8/src/mips/constants-mips.h
+++ /dev/null
@@ -1,803 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_CONSTANTS_H_
-#define V8_MIPS_CONSTANTS_H_
-
-// UNIMPLEMENTED_ macro for MIPS.
-#ifdef DEBUG
-#define UNIMPLEMENTED_MIPS() \
- v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
- __FILE__, __LINE__, __func__)
-#else
-#define UNIMPLEMENTED_MIPS()
-#endif
-
-#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
-
-enum ArchVariants {
- kMips32r2,
- kMips32r1,
- kLoongson
-};
-
-#ifdef _MIPS_ARCH_MIPS32R2
- static const ArchVariants kArchVariant = kMips32r2;
-#elif _MIPS_ARCH_LOONGSON
-// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
-// which predates (and is a subset of) the mips32r2 and r1 architectures.
- static const ArchVariants kArchVariant = kLoongson;
-#else
- static const ArchVariants kArchVariant = kMips32r1;
-#endif
-
-
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
-// Use floating-point coprocessor instructions. This flag is raised when
-// -mhard-float is passed to the compiler.
-const bool IsMipsSoftFloatABI = false;
-#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
-// Not using floating-point coprocessor instructions. This flag is raised when
-// -msoft-float is passed to the compiler.
-const bool IsMipsSoftFloatABI = true;
-#else
-const bool IsMipsSoftFloatABI = true;
-#endif
-
-
-// Defines constants and accessor classes to assemble, disassemble and
-// simulate MIPS32 instructions.
-//
-// See: MIPS32 Architecture For Programmers
-// Volume II: The MIPS32 Instruction Set
-// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Registers and FPURegisters.
-
-// Number of general purpose registers.
-const int kNumRegisters = 32;
-const int kInvalidRegister = -1;
-
-// Number of registers with HI, LO, and pc.
-const int kNumSimuRegisters = 35;
-
-// In the simulator, the PC register is simulated as the 34th register.
-const int kPCRegister = 34;
-
-// Number coprocessor registers.
-const int kNumFPURegisters = 32;
-const int kInvalidFPURegister = -1;
-
-// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
-const int kFCSRRegister = 31;
-const int kInvalidFPUControlRegister = -1;
-const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
-
-// FCSR constants.
-const uint32_t kFCSRInexactFlagBit = 2;
-const uint32_t kFCSRUnderflowFlagBit = 3;
-const uint32_t kFCSROverflowFlagBit = 4;
-const uint32_t kFCSRDivideByZeroFlagBit = 5;
-const uint32_t kFCSRInvalidOpFlagBit = 6;
-
-const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
-const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
-const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
-const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
-const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
-
-const uint32_t kFCSRFlagMask =
- kFCSRInexactFlagMask |
- kFCSRUnderflowFlagMask |
- kFCSROverflowFlagMask |
- kFCSRDivideByZeroFlagMask |
- kFCSRInvalidOpFlagMask;
-
-const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
-
-// Helper functions for converting between register numbers and names.
-class Registers {
- public:
- // Return the name of the register.
- static const char* Name(int reg);
-
- // Lookup the register number for the name provided.
- static int Number(const char* name);
-
- struct RegisterAlias {
- int reg;
- const char* name;
- };
-
- static const int32_t kMaxValue = 0x7fffffff;
- static const int32_t kMinValue = 0x80000000;
-
- private:
- static const char* names_[kNumSimuRegisters];
- static const RegisterAlias aliases_[];
-};
-
-// Helper functions for converting between register numbers and names.
-class FPURegisters {
- public:
- // Return the name of the register.
- static const char* Name(int reg);
-
- // Lookup the register number for the name provided.
- static int Number(const char* name);
-
- struct RegisterAlias {
- int creg;
- const char* name;
- };
-
- private:
- static const char* names_[kNumFPURegisters];
- static const RegisterAlias aliases_[];
-};
-
-
-// -----------------------------------------------------------------------------
-// Instructions encoding constants.
-
-// On MIPS all instructions are 32 bits.
-typedef int32_t Instr;
-
-// Special Software Interrupt codes when used in the presence of the MIPS
-// simulator.
-enum SoftwareInterruptCodes {
- // Transition to C code.
- call_rt_redirected = 0xfffff
-};
-
-// On MIPS Simulator breakpoints can have different codes:
-// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
-// the simulator will run through them and print the registers.
-// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
-// instructions (see Assembler::stop()).
-// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
-// debugger.
-const uint32_t kMaxWatchpointCode = 31;
-const uint32_t kMaxStopCode = 127;
-STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
-
-
-// ----- Fields offset and length.
-const int kOpcodeShift = 26;
-const int kOpcodeBits = 6;
-const int kRsShift = 21;
-const int kRsBits = 5;
-const int kRtShift = 16;
-const int kRtBits = 5;
-const int kRdShift = 11;
-const int kRdBits = 5;
-const int kSaShift = 6;
-const int kSaBits = 5;
-const int kFunctionShift = 0;
-const int kFunctionBits = 6;
-const int kLuiShift = 16;
-
-const int kImm16Shift = 0;
-const int kImm16Bits = 16;
-const int kImm26Shift = 0;
-const int kImm26Bits = 26;
-const int kImm28Shift = 0;
-const int kImm28Bits = 28;
-
-// In branches and jumps immediate fields point to words, not bytes,
-// and are therefore shifted by 2.
-const int kImmFieldShift = 2;
-
-const int kFrBits = 5;
-const int kFrShift = 21;
-const int kFsShift = 11;
-const int kFsBits = 5;
-const int kFtShift = 16;
-const int kFtBits = 5;
-const int kFdShift = 6;
-const int kFdBits = 5;
-const int kFCccShift = 8;
-const int kFCccBits = 3;
-const int kFBccShift = 18;
-const int kFBccBits = 3;
-const int kFBtrueShift = 16;
-const int kFBtrueBits = 1;
-
-// ----- Miscellaneous useful masks.
-// Instruction bit masks.
-const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
-const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
-const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
-const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
-const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
-const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
-const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
-const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
-const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
-// Misc masks.
-const int kHiMask = 0xffff << 16;
-const int kLoMask = 0xffff;
-const int kSignMask = 0x80000000;
-const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
-
-// ----- MIPS Opcodes and Function Fields.
-// We use this presentation to stay close to the table representation in
-// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
-enum Opcode {
- SPECIAL = 0 << kOpcodeShift,
- REGIMM = 1 << kOpcodeShift,
-
- J = ((0 << 3) + 2) << kOpcodeShift,
- JAL = ((0 << 3) + 3) << kOpcodeShift,
- BEQ = ((0 << 3) + 4) << kOpcodeShift,
- BNE = ((0 << 3) + 5) << kOpcodeShift,
- BLEZ = ((0 << 3) + 6) << kOpcodeShift,
- BGTZ = ((0 << 3) + 7) << kOpcodeShift,
-
- ADDI = ((1 << 3) + 0) << kOpcodeShift,
- ADDIU = ((1 << 3) + 1) << kOpcodeShift,
- SLTI = ((1 << 3) + 2) << kOpcodeShift,
- SLTIU = ((1 << 3) + 3) << kOpcodeShift,
- ANDI = ((1 << 3) + 4) << kOpcodeShift,
- ORI = ((1 << 3) + 5) << kOpcodeShift,
- XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift,
-
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
- BEQL = ((2 << 3) + 4) << kOpcodeShift,
- BNEL = ((2 << 3) + 5) << kOpcodeShift,
- BLEZL = ((2 << 3) + 6) << kOpcodeShift,
- BGTZL = ((2 << 3) + 7) << kOpcodeShift,
-
- SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
- SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
-
- LB = ((4 << 3) + 0) << kOpcodeShift,
- LH = ((4 << 3) + 1) << kOpcodeShift,
- LWL = ((4 << 3) + 2) << kOpcodeShift,
- LW = ((4 << 3) + 3) << kOpcodeShift,
- LBU = ((4 << 3) + 4) << kOpcodeShift,
- LHU = ((4 << 3) + 5) << kOpcodeShift,
- LWR = ((4 << 3) + 6) << kOpcodeShift,
- SB = ((5 << 3) + 0) << kOpcodeShift,
- SH = ((5 << 3) + 1) << kOpcodeShift,
- SWL = ((5 << 3) + 2) << kOpcodeShift,
- SW = ((5 << 3) + 3) << kOpcodeShift,
- SWR = ((5 << 3) + 6) << kOpcodeShift,
-
- LWC1 = ((6 << 3) + 1) << kOpcodeShift,
- LDC1 = ((6 << 3) + 5) << kOpcodeShift,
-
- SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift,
-
- COP1X = ((1 << 4) + 3) << kOpcodeShift
-};
-
-enum SecondaryField {
- // SPECIAL Encoding of Function Field.
- SLL = ((0 << 3) + 0),
- MOVCI = ((0 << 3) + 1),
- SRL = ((0 << 3) + 2),
- SRA = ((0 << 3) + 3),
- SLLV = ((0 << 3) + 4),
- SRLV = ((0 << 3) + 6),
- SRAV = ((0 << 3) + 7),
-
- JR = ((1 << 3) + 0),
- JALR = ((1 << 3) + 1),
- MOVZ = ((1 << 3) + 2),
- MOVN = ((1 << 3) + 3),
- BREAK = ((1 << 3) + 5),
-
- MFHI = ((2 << 3) + 0),
- MFLO = ((2 << 3) + 2),
-
- MULT = ((3 << 3) + 0),
- MULTU = ((3 << 3) + 1),
- DIV = ((3 << 3) + 2),
- DIVU = ((3 << 3) + 3),
-
- ADD = ((4 << 3) + 0),
- ADDU = ((4 << 3) + 1),
- SUB = ((4 << 3) + 2),
- SUBU = ((4 << 3) + 3),
- AND = ((4 << 3) + 4),
- OR = ((4 << 3) + 5),
- XOR = ((4 << 3) + 6),
- NOR = ((4 << 3) + 7),
-
- SLT = ((5 << 3) + 2),
- SLTU = ((5 << 3) + 3),
-
- TGE = ((6 << 3) + 0),
- TGEU = ((6 << 3) + 1),
- TLT = ((6 << 3) + 2),
- TLTU = ((6 << 3) + 3),
- TEQ = ((6 << 3) + 4),
- TNE = ((6 << 3) + 6),
-
- // SPECIAL2 Encoding of Function Field.
- MUL = ((0 << 3) + 2),
- CLZ = ((4 << 3) + 0),
- CLO = ((4 << 3) + 1),
-
- // SPECIAL3 Encoding of Function Field.
- EXT = ((0 << 3) + 0),
- INS = ((0 << 3) + 4),
-
- // REGIMM encoding of rt Field.
- BLTZ = ((0 << 3) + 0) << 16,
- BGEZ = ((0 << 3) + 1) << 16,
- BLTZAL = ((2 << 3) + 0) << 16,
- BGEZAL = ((2 << 3) + 1) << 16,
-
- // COP1 Encoding of rs Field.
- MFC1 = ((0 << 3) + 0) << 21,
- CFC1 = ((0 << 3) + 2) << 21,
- MFHC1 = ((0 << 3) + 3) << 21,
- MTC1 = ((0 << 3) + 4) << 21,
- CTC1 = ((0 << 3) + 6) << 21,
- MTHC1 = ((0 << 3) + 7) << 21,
- BC1 = ((1 << 3) + 0) << 21,
- S = ((2 << 3) + 0) << 21,
- D = ((2 << 3) + 1) << 21,
- W = ((2 << 3) + 4) << 21,
- L = ((2 << 3) + 5) << 21,
- PS = ((2 << 3) + 6) << 21,
- // COP1 Encoding of Function Field When rs=S.
- ROUND_L_S = ((1 << 3) + 0),
- TRUNC_L_S = ((1 << 3) + 1),
- CEIL_L_S = ((1 << 3) + 2),
- FLOOR_L_S = ((1 << 3) + 3),
- ROUND_W_S = ((1 << 3) + 4),
- TRUNC_W_S = ((1 << 3) + 5),
- CEIL_W_S = ((1 << 3) + 6),
- FLOOR_W_S = ((1 << 3) + 7),
- CVT_D_S = ((4 << 3) + 1),
- CVT_W_S = ((4 << 3) + 4),
- CVT_L_S = ((4 << 3) + 5),
- CVT_PS_S = ((4 << 3) + 6),
- // COP1 Encoding of Function Field When rs=D.
- ADD_D = ((0 << 3) + 0),
- SUB_D = ((0 << 3) + 1),
- MUL_D = ((0 << 3) + 2),
- DIV_D = ((0 << 3) + 3),
- SQRT_D = ((0 << 3) + 4),
- ABS_D = ((0 << 3) + 5),
- MOV_D = ((0 << 3) + 6),
- NEG_D = ((0 << 3) + 7),
- ROUND_L_D = ((1 << 3) + 0),
- TRUNC_L_D = ((1 << 3) + 1),
- CEIL_L_D = ((1 << 3) + 2),
- FLOOR_L_D = ((1 << 3) + 3),
- ROUND_W_D = ((1 << 3) + 4),
- TRUNC_W_D = ((1 << 3) + 5),
- CEIL_W_D = ((1 << 3) + 6),
- FLOOR_W_D = ((1 << 3) + 7),
- CVT_S_D = ((4 << 3) + 0),
- CVT_W_D = ((4 << 3) + 4),
- CVT_L_D = ((4 << 3) + 5),
- C_F_D = ((6 << 3) + 0),
- C_UN_D = ((6 << 3) + 1),
- C_EQ_D = ((6 << 3) + 2),
- C_UEQ_D = ((6 << 3) + 3),
- C_OLT_D = ((6 << 3) + 4),
- C_ULT_D = ((6 << 3) + 5),
- C_OLE_D = ((6 << 3) + 6),
- C_ULE_D = ((6 << 3) + 7),
- // COP1 Encoding of Function Field When rs=W or L.
- CVT_S_W = ((4 << 3) + 0),
- CVT_D_W = ((4 << 3) + 1),
- CVT_S_L = ((4 << 3) + 0),
- CVT_D_L = ((4 << 3) + 1),
- // COP1 Encoding of Function Field When rs=PS.
- // COP1X Encoding of Function Field.
- MADD_D = ((4 << 3) + 1),
-
- NULLSF = 0
-};
-
-
-// ----- Emulated conditions.
-// On MIPS we use this enum to abstract from conditionnal branch instructions.
-// the 'U' prefix is used to specify unsigned comparisons.
-enum Condition {
- // Any value < 0 is considered no_condition.
- kNoCondition = -1,
-
- overflow = 0,
- no_overflow = 1,
- Uless = 2,
- Ugreater_equal= 3,
- equal = 4,
- not_equal = 5,
- Uless_equal = 6,
- Ugreater = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
- greater_equal = 13,
- less_equal = 14,
- greater = 15,
-
- cc_always = 16,
-
- // Aliases.
- carry = Uless,
- not_carry = Ugreater_equal,
- zero = equal,
- eq = equal,
- not_zero = not_equal,
- ne = not_equal,
- nz = not_equal,
- sign = negative,
- not_sign = positive,
- mi = negative,
- pl = positive,
- hi = Ugreater,
- ls = Uless_equal,
- ge = greater_equal,
- lt = less,
- gt = greater,
- le = less_equal,
- hs = Ugreater_equal,
- lo = Uless,
- al = cc_always,
-
- cc_default = kNoCondition
-};
-
-
-// Returns the equivalent of !cc.
-// Negation of the default kNoCondition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc) {
- ASSERT(cc != cc_always);
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case Uless:
- return Ugreater;
- case Ugreater:
- return Uless;
- case Ugreater_equal:
- return Uless_equal;
- case Uless_equal:
- return Ugreater_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-// ----- Coprocessor conditions.
-enum FPUCondition {
- kNoFPUCondition = -1,
-
- F = 0, // False.
- UN = 1, // Unordered.
- EQ = 2, // Equal.
- UEQ = 3, // Unordered or Equal.
- OLT = 4, // Ordered or Less Than.
- ULT = 5, // Unordered or Less Than.
- OLE = 6, // Ordered or Less Than or Equal.
- ULE = 7 // Unordered or Less Than or Equal.
-};
-
-
-// FPU rounding modes.
-enum FPURoundingMode {
- RN = 0 << 0, // Round to Nearest.
- RZ = 1 << 0, // Round towards zero.
- RP = 2 << 0, // Round towards Plus Infinity.
- RM = 3 << 0, // Round towards Minus Infinity.
-
- // Aliases.
- kRoundToNearest = RN,
- kRoundToZero = RZ,
- kRoundToPlusInf = RP,
- kRoundToMinusInf = RM
-};
-
-const uint32_t kFPURoundingModeMask = 3 << 0;
-
-enum CheckForInexactConversion {
- kCheckForInexactConversion,
- kDontCheckForInexactConversion
-};
-
-
-// -----------------------------------------------------------------------------
-// Hints.
-
-// Branch hints are not used on the MIPS. They are defined so that they can
-// appear in shared function signatures, but will be ignored in MIPS
-// implementations.
-enum Hint {
- no_hint = 0
-};
-
-
-inline Hint NegateHint(Hint hint) {
- return no_hint;
-}
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-// These constants are declared in assembler-mips.cc, as they use named
-// registers and other constants.
-
-// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
-// operations as post-increment of sp.
-extern const Instr kPopInstruction;
-// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-extern const Instr kPushInstruction;
-// sw(r, MemOperand(sp, 0))
-extern const Instr kPushRegPattern;
-// lw(r, MemOperand(sp, 0))
-extern const Instr kPopRegPattern;
-extern const Instr kLwRegFpOffsetPattern;
-extern const Instr kSwRegFpOffsetPattern;
-extern const Instr kLwRegFpNegOffsetPattern;
-extern const Instr kSwRegFpNegOffsetPattern;
-// A mask for the Rt register for push, pop, lw, sw instructions.
-extern const Instr kRtMask;
-extern const Instr kLwSwInstrTypeMask;
-extern const Instr kLwSwInstrArgumentMask;
-extern const Instr kLwSwOffsetMask;
-
-// Break 0xfffff, reserved for redirected real time call.
-const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
-// A nop instruction. (Encoding of sll 0 0 0).
-const Instr nopInstr = 0;
-
-class Instruction {
- public:
- enum {
- kInstrSize = 4,
- kInstrSizeLog2 = 2,
- // On MIPS PC cannot actually be directly accessed. We behave as if PC was
- // always the value of the current instruction being executed.
- kPCReadOffset = 0
- };
-
- // Get the raw instruction bits.
- inline Instr InstructionBits() const {
- return *reinterpret_cast<const Instr*>(this);
- }
-
- // Set the raw instruction bits to value.
- inline void SetInstructionBits(Instr value) {
- *reinterpret_cast<Instr*>(this) = value;
- }
-
- // Read one particular bit out of the instruction bits.
- inline int Bit(int nr) const {
- return (InstructionBits() >> nr) & 1;
- }
-
- // Read a bit field out of the instruction bits.
- inline int Bits(int hi, int lo) const {
- return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
- }
-
- // Instruction type.
- enum Type {
- kRegisterType,
- kImmediateType,
- kJumpType,
- kUnsupported = -1
- };
-
- // Get the encoding type of the instruction.
- Type InstructionType() const;
-
-
- // Accessors for the different named fields used in the MIPS encoding.
- inline Opcode OpcodeValue() const {
- return static_cast<Opcode>(
- Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
- }
-
- inline int RsValue() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kRsShift + kRsBits - 1, kRsShift);
- }
-
- inline int RtValue() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kRtShift + kRtBits - 1, kRtShift);
- }
-
- inline int RdValue() const {
- ASSERT(InstructionType() == kRegisterType);
- return Bits(kRdShift + kRdBits - 1, kRdShift);
- }
-
- inline int SaValue() const {
- ASSERT(InstructionType() == kRegisterType);
- return Bits(kSaShift + kSaBits - 1, kSaShift);
- }
-
- inline int FunctionValue() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
- }
-
- inline int FdValue() const {
- return Bits(kFdShift + kFdBits - 1, kFdShift);
- }
-
- inline int FsValue() const {
- return Bits(kFsShift + kFsBits - 1, kFsShift);
- }
-
- inline int FtValue() const {
- return Bits(kFtShift + kFtBits - 1, kFtShift);
- }
-
- inline int FrValue() const {
- return Bits(kFrShift + kFrBits -1, kFrShift);
- }
-
- // Float Compare condition code instruction bits.
- inline int FCccValue() const {
- return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
- }
-
- // Float Branch condition code instruction bits.
- inline int FBccValue() const {
- return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
- }
-
- // Float Branch true/false instruction bit.
- inline int FBtrueValue() const {
- return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
- }
-
- // Return the fields at their original place in the instruction encoding.
- inline Opcode OpcodeFieldRaw() const {
- return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
- }
-
- inline int RsFieldRaw() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return InstructionBits() & kRsFieldMask;
- }
-
- // Same as above function, but safe to call within InstructionType().
- inline int RsFieldRawNoAssert() const {
- return InstructionBits() & kRsFieldMask;
- }
-
- inline int RtFieldRaw() const {
- ASSERT(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return InstructionBits() & kRtFieldMask;
- }
-
- inline int RdFieldRaw() const {
- ASSERT(InstructionType() == kRegisterType);
- return InstructionBits() & kRdFieldMask;
- }
-
- inline int SaFieldRaw() const {
- ASSERT(InstructionType() == kRegisterType);
- return InstructionBits() & kSaFieldMask;
- }
-
- inline int FunctionFieldRaw() const {
- return InstructionBits() & kFunctionFieldMask;
- }
-
- // Get the secondary field according to the opcode.
- inline int SecondaryValue() const {
- Opcode op = OpcodeFieldRaw();
- switch (op) {
- case SPECIAL:
- case SPECIAL2:
- return FunctionValue();
- case COP1:
- return RsValue();
- case REGIMM:
- return RtValue();
- default:
- return NULLSF;
- }
- }
-
- inline int32_t Imm16Value() const {
- ASSERT(InstructionType() == kImmediateType);
- return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
- }
-
- inline int32_t Imm26Value() const {
- ASSERT(InstructionType() == kJumpType);
- return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
- }
-
- // Say if the instruction should not be used in a branch delay slot.
- bool IsForbiddenInBranchDelay() const;
- // Say if the instruction 'links'. e.g. jal, bal.
- bool IsLinkingInstruction() const;
- // Say if the instruction is a break or a trap.
- bool IsTrap() const;
-
- // Instructions are read of out a code stream. The only way to get a
- // reference to an instruction is to convert a pointer. There is no way
- // to allocate or create instances of class Instruction.
- // Use the At(pc) function to create references to Instruction.
- static Instruction* At(byte* pc) {
- return reinterpret_cast<Instruction*>(pc);
- }
-
- private:
- // We need to prevent the creation of instances of class Instruction.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
-};
-
-
-// -----------------------------------------------------------------------------
-// MIPS assembly various constants.
-
-// C/C++ argument slots size.
-const int kCArgSlotCount = 4;
-const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
-// JS argument slots size.
-const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
-// Assembly builtins argument slots size.
-const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
-
-const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
-
-} } // namespace v8::internal
-
-#endif // #ifndef V8_MIPS_CONSTANTS_H_
diff --git a/src/3rdparty/v8/src/mips/cpu-mips.cc b/src/3rdparty/v8/src/mips/cpu-mips.cc
deleted file mode 100644
index 93ebeda..0000000
--- a/src/3rdparty/v8/src/mips/cpu-mips.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for arm independent of OS goes here.
-
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#ifdef __mips
-#include <asm/cachectl.h>
-#endif // #ifdef __mips
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-
-#include "simulator.h" // For cache flushing.
-
-namespace v8 {
-namespace internal {
-
-
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(FPU);
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // Nothing to do, flushing no instructions.
- if (size == 0) {
- return;
- }
-
-#if !defined (USE_SIMULATOR)
-#if defined(ANDROID)
- // Bionic cacheflush can typically run in userland, avoiding kernel call.
- char *end = reinterpret_cast<char *>(start) + size;
- cacheflush(
- reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
-#else // ANDROID
- int res;
- // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
- res = syscall(__NR_cacheflush, start, size, ICACHE);
- if (res) {
- V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
- }
-#endif // ANDROID
-#else // USE_SIMULATOR.
- // Not generating mips instructions for C-code. This means that we are
- // building a mips emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#endif // USE_SIMULATOR.
-}
-
-
-void CPU::DebugBreak() {
-#ifdef __mips
- asm volatile("break");
-#endif // #ifdef __mips
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/debug-mips.cc b/src/3rdparty/v8/src/mips/debug-mips.cc
deleted file mode 100644
index 3be1e4d..0000000
--- a/src/3rdparty/v8/src/mips/debug-mips.cc
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- // Mips return sequence:
- // mov sp, fp
- // lw fp, sp(0)
- // lw ra, sp(4)
- // addiu sp, sp, 8
- // addiu sp, sp, N
- // jr ra
- // nop (in branch delay slot)
-
- // Make sure this constant matches the number if instrucntions we emit.
- ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
- CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
- // li and Call pseudo-instructions emit two instructions each.
- patcher.masm()->li(v8::internal::t9,
- Operand(reinterpret_cast<int32_t>(
- Isolate::Current()->debug()->debug_break_return()->entry())));
- patcher.masm()->Call(v8::internal::t9);
- patcher.masm()->nop();
- patcher.masm()->nop();
- patcher.masm()->nop();
-
- // TODO(mips): Open issue about using breakpoint instruction instead of nops.
- // patcher.masm()->bkpt(0);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceInstructions);
-}
-
-
-// A debug break in the exit code is identified by the JS frame exit code
-// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Patch the code changing the debug break slot code from:
- // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
- // nop(DEBUG_BREAK_NOP)
- // nop(DEBUG_BREAK_NOP)
- // nop(DEBUG_BREAK_NOP)
- // to a call to the debug break slot code.
- // li t9, address (lui t9 / ori t9 instruction pair)
- // call t9 (jalr t9 / nop instruction pair)
- CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
- patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
- Isolate::Current()->debug()->debug_break_slot()->entry())));
- patcher.masm()->Call(v8::internal::t9);
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
-
-#define __ ACCESS_MASM(masm)
-
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ And(at, reg, 0xc0000000);
- __ Assert(
- eq, "Unable to encode value as smi", at, Operand(zero_reg));
- }
- __ sll(reg, reg, kSmiTagSize);
- }
- }
- __ MultiPush(object_regs | non_object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ PrepareCEntryArgs(0); // No arguments.
- __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ MultiPop(object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ srl(reg, reg, kSmiTagSize);
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ li(reg, kDebugZapValue);
- }
- }
- }
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- __ li(t9, Operand(
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate())));
- __ lw(t9, MemOperand(t9));
- __ Jump(t9);
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC load (from ic-mips.cc).
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- [sp] : receiver
- // -----------------------------------
- // Registers a0 and a2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, a0.bit() | a2.bit(), 0);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-mips.cc).
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- // Registers a0, a1, and a2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit(), 0);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC call (from ic-mips.cc).
- // ----------- S t a t e -------------
- // -- a2: name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a2.bit(), 0);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that v0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- a2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a0 : number of arguments (not smi)
- // -- a1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a0 : number of arguments (not smi)
- // -- a1 : constructor function
- // -- a2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the trampoline pool in the debug break slot code.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(MacroAssembler::DEBUG_BREAK_NOP);
- }
- ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
- masm->InstructionsGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on mips");
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on mips");
-}
-
-
-const bool Debug::kFrameDropperSupported = false;
-
-#undef __
-
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc b/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
deleted file mode 100644
index 8e96cd5..0000000
--- a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
+++ /dev/null
@@ -1,1120 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-
-int Deoptimizer::patch_size() {
- const int kCallInstructionSizeInWords = 4;
- return kCallInstructionSizeInWords * Assembler::kInstrSize;
-}
-
-
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
- Address code_start_address = code->instruction_start();
-
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
-
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
- int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
- RelocInfo::NONE32);
- int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
- ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
- ASSERT(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
- ASSERT(prev_call_address == NULL ||
- call_address >= prev_call_address + patch_size());
- ASSERT(call_address + patch_size() <= code->instruction_end());
-
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
- }
-}
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- // This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
- // The call of the stack guard check has the following form:
- // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
- // beq at, zero_reg, ok
- // lui t9, <stack guard address> upper
- // ori t9, <stack guard address> lower
- // jalr t9
- // nop
- // ----- pc_after points here
-
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-
- // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->addiu(at, zero_reg, 1);
-
- // Replace the stack check address in the load-immediate (lui/ori pair)
- // with the entry address of the replacement code.
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(check_code->entry()));
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- replacement_code->entry());
-
- // We patched the code to the following form:
- // addiu at, zero_reg, 1
- // beq at, zero_reg, ok ;; Not changed
- // lui t9, <on-stack replacement address> upper
- // ori t9, <on-stack replacement address> lower
- // jalr t9 ;; Not changed
- // nop ;; Not changed
- // ----- pc_after points here
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- // Exact opposite of the function above.
- const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Assembler::IsAddImmediate(
- Assembler::instr_at(pc_after - 6 * kInstrSize)));
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-
- // Restore the sltu instruction so beq can be taken again.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->slt(at, a3, zero_reg);
-
- // Replace the on-stack replacement address in the load-immediate (lui/ori
- // pair) with the entry address of the normal stack-check code.
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
- //
- // FROM TO
- // | .... | | .... |
- // +-------------------------+ +-------------------------+
- // | JSFunction continuation | | JSFunction continuation |
- // +-------------------------+ +-------------------------+
- // | | saved frame (fp) | | saved frame (fp) |
- // | +=========================+<-fp +=========================+<-fp
- // | | JSFunction context | | JSFunction context |
- // v +-------------------------+ +-------------------------|
- // | COMPILED_STUB marker | | STUB_FAILURE marker |
- // +-------------------------+ +-------------------------+
- // | | | caller args.arguments_ |
- // | ... | +-------------------------+
- // | | | caller args.length_ |
- // |-------------------------|<-sp +-------------------------+
- // | caller args pointer |
- // +-------------------------+
- // | caller stack param 1 |
- // parameters in registers +-------------------------+
- // and spilled to stack | .... |
- // +-------------------------+
- // | caller stack param n |
- // +-------------------------+<-sp
- // s0-s1 = number of parameters
- // s2 = failure handler address
- // fp = saved frame
- // cp = JSFunction context
- //
-
- ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
- int major_key = compiled_code_->major_key();
- CodeStubInterfaceDescriptor* descriptor =
- isolate_->code_stub_interface_descriptor(major_key);
-
- // The output frame must have room for all pushed register parameters
- // and the standard stack frame slots. Include space for an argument
- // object to the callee and optionally the space to pass the argument
- // object to the stub failure handler.
- int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
- sizeof(Arguments) + kPointerSize;
- int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
- int input_frame_size = input_->GetFrameSize();
- int output_frame_size = height_in_bytes + fixed_frame_size;
- if (trace_) {
- PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
- CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
- height_in_bytes);
- }
-
- // The stub failure trampoline is a single frame.
-
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, NULL);
- output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ASSERT(frame_index == 0);
- output_[frame_index] = output_frame;
- // The top address for the output frame can be computed from the input
- // frame pointer and the output frame's height. Subtract space for the
- // context and function slots.
- intptr_t top_address = input_->GetRegister(fp.code()) - (2 * kPointerSize) -
- height_in_bytes;
- output_frame->SetTop(top_address);
-
- // Read caller's PC (JSFunction continuation) from the input frame.
- intptr_t input_frame_offset = input_frame_size - kPointerSize;
- intptr_t output_frame_offset = output_frame_size - kPointerSize;
- intptr_t value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Read caller's FP from the input frame, and set this frame's FP.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- intptr_t frame_ptr = input_->GetRegister(fp.code());
- output_frame->SetRegister(fp.code(), frame_ptr);
- output_frame->SetFp(frame_ptr);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // The context can be gotten from the input frame.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(cp.code(), value);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_frame_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (stub fail sentinel)\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- int caller_arg_count = 0;
- if (descriptor->stack_parameter_count_ != NULL) {
- caller_arg_count =
- input_->GetRegister(descriptor->stack_parameter_count_->code());
- }
-
- // Build the Arguments object for the caller's parameters and a pointer to it.
- output_frame_offset -= kPointerSize;
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (caller_arg_count - 1) * kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.arguments\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = caller_arg_count;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.length\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = frame_ptr - (output_frame_size - output_frame_offset) -
- StandardFrameConstants::kMarkerOffset + kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args*\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Copy the register parameters to the failure frame.
- for (int i = 0; i < descriptor->register_param_count_; ++i) {
- output_frame_offset -= kPointerSize;
- DoTranslateCommand(iterator, 0, output_frame_offset);
- }
-
- ASSERT(0 == output_frame_offset);
-
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-
- ApiFunction function(descriptor->deoptimization_handler_);
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
- output_frame->SetRegister(s0.code(), params);
- output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
- output_frame->SetRegister(s2.code(), handler);
-
- // Compute this frame's PC, state, and continuation.
- Code* trampoline = NULL;
- int extra = descriptor->extra_expression_stack_count_;
- StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
- ASSERT(trampoline != NULL);
- output_frame->SetPc(reinterpret_cast<intptr_t>(
- trampoline->instruction_start()));
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(notify_failure->entry()));
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 8 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // Constructor function being invoked by the stub.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-// This code is very similar to ia32/arm code, but relies on register names
-// (fp, sp) and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- output_frame->SetRegister(fp.code(), fp_value);
- }
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
-#define __ masm()->
-
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
-
- Isolate* isolate = masm()->isolate();
-
- // Unlike on ARM we don't save all the registers, just the useful ones.
- // For the rest, there are gaps on the stack, so the offsets remain the same.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit() | ra.bit();
-
- const int kDoubleRegsSize =
- kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Save all FPU registers before messing with them.
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ sdc1(fpu_reg, MemOperand(sp, offset));
- }
- } else {
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
- }
-
- // Push saved_regs (needed to populate FrameDescription::registers_).
- // Leave gaps for other registers.
- __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
- for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
- __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
- }
- }
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id from the stack.
- __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible (a3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register t0.
- if (type() == EAGER) {
- __ mov(a3, zero_reg);
- // Correct one word for bailout id.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(a3, ra);
- // Correct one word for bailout id.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(a3, ra);
- // Correct two words for bailout id and return address.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
-
- __ Subu(t0, fp, t0);
-
- // Allocate a new deoptimizer object.
- // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
- __ PrepareCallCFunction(6, t1);
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ li(a1, Operand(type())); // bailout type,
- // a2: bailout id already loaded.
- // a3: code address or 0 already loaded.
- __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
- __ li(t1, Operand(ExternalReference::isolate_address()));
- __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
-
- // Preserve "deoptimizer" object in register v0 and get the input
- // frame descriptor pointer to a1 (deoptimizer->input_);
- // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
- __ mov(a0, v0);
- __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- ASSERT(Register::kNumRegisters == kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((saved_regs & (1 << i)) != 0) {
- __ lw(a2, MemOperand(sp, i * kPointerSize));
- __ sw(a2, MemOperand(a1, offset));
- } else if (FLAG_debug_code) {
- __ li(a2, kDebugZapValue);
- __ sw(a2, MemOperand(a1, offset));
- }
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Copy FPU registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ ldc1(f0, MemOperand(sp, src_offset));
- __ sdc1(f0, MemOperand(a1, dst_offset));
- }
- }
-
- // Remove the bailout id, eventually return address, and the saved registers
- // from the stack.
- if (type() == EAGER || type() == OSR) {
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
-
- // Compute a pointer to the unwinding limit in register a2; that is
- // the first stack slot not part of the input frame.
- __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
- __ Addu(a2, a2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ Branch(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(t0);
- __ sw(t0, MemOperand(a3, 0));
- __ addiu(a3, a3, sizeof(uint32_t));
- __ bind(&pop_loop_header);
- __ Branch(&pop_loop, ne, a2, Operand(sp));
-
- // Compute the output frame in the deoptimizer.
- __ push(a0); // Preserve deoptimizer object across call.
- // a0: deoptimizer object; a1: scratch.
- __ PrepareCallCFunction(1, a1);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
- }
- __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
- // Outer loop state: t0 = current "FrameDescription** output_",
- // a1 = one past the last FrameDescription**.
- __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
- __ sll(a1, a1, kPointerSizeLog2); // Count to offset.
- __ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ lw(a2, MemOperand(t0, 0)); // output_[ix]
- __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ Subu(a3, a3, Operand(sizeof(uint32_t)));
- __ Addu(t2, a2, Operand(a3));
- __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
- __ push(t3);
- __ bind(&inner_loop_header);
- __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
-
- __ Addu(t0, t0, Operand(kPointerSize));
- __ bind(&outer_loop_header);
- __ Branch(&outer_push_loop, lt, t0, Operand(a1));
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ ldc1(fpu_reg, MemOperand(a1, src_offset));
- }
- }
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
- __ push(t2);
- }
-
- __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
- __ push(t2);
- __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
- __ push(t2);
-
-
- // Technically restoring 'at' should work unless zero_reg is also restored
- // but it's safer to check for this.
- ASSERT(!(at.bit() & restored_regs));
- // Restore the registers from the last output frame.
- __ mov(at, a2);
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ lw(ToRegister(i), MemOperand(at, offset));
- }
- }
-
- __ InitializeRootRegister();
-
- __ pop(at); // Get continuation, leave pc on stack.
- __ pop(ra);
- __ Jump(at);
- __ stop("Unreachable.");
-}
-
-
-// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
-
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
- Label table_start;
- __ bind(&table_start);
- for (int i = 0; i < count(); i++) {
- Label start;
- __ bind(&start);
- if (type() != EAGER) {
- // Emulate ia32 like call by pushing return address to stack.
- __ addiu(sp, sp, -2 * kPointerSize);
- __ sw(ra, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ addiu(sp, sp, -1 * kPointerSize);
- }
- // Jump over the remaining deopt entries (including this one).
- // This code is always reached by calling Jump, which puts the target (label
- // start) into t9.
- const int remaining_entries = (count() - i) * table_entry_size_;
- __ Addu(t9, t9, remaining_entries);
- // 'at' was clobbered so we can only load the current entry value here.
- __ li(at, i);
- __ jr(t9); // Expose delay slot.
- __ sw(at, MemOperand(sp, 0 * kPointerSize)); // In the delay slot.
-
- // Pad the rest of the code.
- while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
- __ nop();
- }
-
- ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
- }
-
- ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
- count() * table_entry_size_);
-}
-
-#undef __
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/disasm-mips.cc b/src/3rdparty/v8/src/mips/disasm-mips.cc
deleted file mode 100644
index 0eca71f..0000000
--- a/src/3rdparty/v8/src/mips/disasm-mips.cc
+++ /dev/null
@@ -1,1064 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A Disassembler object is used to disassemble a block of code instruction by
-// instruction. The default implementation of the NameConverter object can be
-// overriden to modify register names or to do symbol lookup on addresses.
-//
-// The example below will disassemble a block of code and print it to stdout.
-//
-// NameConverter converter;
-// Disassembler d(converter);
-// for (byte* pc = begin; pc < end;) {
-// v8::internal::EmbeddedVector<char, 256> buffer;
-// byte* prev_pc = pc;
-// pc += d.InstructionDecode(buffer, pc);
-// printf("%p %08x %s\n",
-// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
-// }
-//
-// The Disassembler class also has a convenience method to disassemble a block
-// of code into a FILE*, meaning that the above functionality could also be
-// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
-
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "mips/constants-mips.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-//------------------------------------------------------------------------------
-
-// Decoder decodes and disassembles instructions into an output buffer.
-// It uses the converter to convert register names and call destinations into
-// more informative description.
-class Decoder {
- public:
- Decoder(const disasm::NameConverter& converter,
- v8::internal::Vector<char> out_buffer)
- : converter_(converter),
- out_buffer_(out_buffer),
- out_buffer_pos_(0) {
- out_buffer_[out_buffer_pos_] = '\0';
- }
-
- ~Decoder() {}
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(byte* instruction);
-
- private:
- // Bottleneck functions to print into the out_buffer.
- void PrintChar(const char ch);
- void Print(const char* str);
-
- // Printing of common values.
- void PrintRegister(int reg);
- void PrintFPURegister(int freg);
- void PrintRs(Instruction* instr);
- void PrintRt(Instruction* instr);
- void PrintRd(Instruction* instr);
- void PrintFs(Instruction* instr);
- void PrintFt(Instruction* instr);
- void PrintFd(Instruction* instr);
- void PrintSa(Instruction* instr);
- void PrintSd(Instruction* instr);
- void PrintSs1(Instruction* instr);
- void PrintSs2(Instruction* instr);
- void PrintBc(Instruction* instr);
- void PrintCc(Instruction* instr);
- void PrintFunction(Instruction* instr);
- void PrintSecondaryField(Instruction* instr);
- void PrintUImm16(Instruction* instr);
- void PrintSImm16(Instruction* instr);
- void PrintXImm16(Instruction* instr);
- void PrintXImm26(Instruction* instr);
- void PrintCode(Instruction* instr); // For break and trap instructions.
- // Printing of instruction name.
- void PrintInstructionName(Instruction* instr);
-
- // Handle formatting of instructions and their options.
- int FormatRegister(Instruction* instr, const char* option);
- int FormatFPURegister(Instruction* instr, const char* option);
- int FormatOption(Instruction* instr, const char* option);
- void Format(Instruction* instr, const char* format);
- void Unknown(Instruction* instr);
-
- // Each of these functions decodes one particular instruction type.
- void DecodeTypeRegister(Instruction* instr);
- void DecodeTypeImmediate(Instruction* instr);
- void DecodeTypeJump(Instruction* instr);
-
- const disasm::NameConverter& converter_;
- v8::internal::Vector<char> out_buffer_;
- int out_buffer_pos_;
-
- DISALLOW_COPY_AND_ASSIGN(Decoder);
-};
-
-
-// Support for assertions in the Decoder formatting functions.
-#define STRING_STARTS_WITH(string, compare_string) \
- (strncmp(string, compare_string, strlen(compare_string)) == 0)
-
-
-// Append the ch to the output buffer.
-void Decoder::PrintChar(const char ch) {
- out_buffer_[out_buffer_pos_++] = ch;
-}
-
-
-// Append the str to the output buffer.
-void Decoder::Print(const char* str) {
- char cur = *str++;
- while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- PrintChar(cur);
- cur = *str++;
- }
- out_buffer_[out_buffer_pos_] = 0;
-}
-
-
-// Print the register name according to the active name converter.
-void Decoder::PrintRegister(int reg) {
- Print(converter_.NameOfCPURegister(reg));
-}
-
-
-void Decoder::PrintRs(Instruction* instr) {
- int reg = instr->RsValue();
- PrintRegister(reg);
-}
-
-
-void Decoder::PrintRt(Instruction* instr) {
- int reg = instr->RtValue();
- PrintRegister(reg);
-}
-
-
-void Decoder::PrintRd(Instruction* instr) {
- int reg = instr->RdValue();
- PrintRegister(reg);
-}
-
-
-// Print the FPUregister name according to the active name converter.
-void Decoder::PrintFPURegister(int freg) {
- Print(converter_.NameOfXMMRegister(freg));
-}
-
-
-void Decoder::PrintFs(Instruction* instr) {
- int freg = instr->RsValue();
- PrintFPURegister(freg);
-}
-
-
-void Decoder::PrintFt(Instruction* instr) {
- int freg = instr->RtValue();
- PrintFPURegister(freg);
-}
-
-
-void Decoder::PrintFd(Instruction* instr) {
- int freg = instr->RdValue();
- PrintFPURegister(freg);
-}
-
-
-// Print the integer value of the sa field.
-void Decoder::PrintSa(Instruction* instr) {
- int sa = instr->SaValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
-}
-
-
-// Print the integer value of the rd field, when it is not used as reg.
-void Decoder::PrintSd(Instruction* instr) {
- int sd = instr->RdValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
-}
-
-
-// Print the integer value of the rd field, when used as 'ext' size.
-void Decoder::PrintSs1(Instruction* instr) {
- int ss = instr->RdValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
-}
-
-
-// Print the integer value of the rd field, when used as 'ins' size.
-void Decoder::PrintSs2(Instruction* instr) {
- int ss = instr->RdValue();
- int pos = instr->SaValue();
- out_buffer_pos_ +=
- OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
-}
-
-
-// Print the integer value of the cc field for the bc1t/f instructions.
-void Decoder::PrintBc(Instruction* instr) {
- int cc = instr->FBccValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
-}
-
-
-// Print the integer value of the cc field for the FP compare instructions.
-void Decoder::PrintCc(Instruction* instr) {
- int cc = instr->FCccValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
-}
-
-
-// Print 16-bit unsigned immediate value.
-void Decoder::PrintUImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Value();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
-}
-
-
-// Print 16-bit signed immediate value.
-void Decoder::PrintSImm16(Instruction* instr) {
- int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
-}
-
-
-// Print 16-bit hexa immediate value.
-void Decoder::PrintXImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Value();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
-}
-
-
-// Print 26-bit immediate value.
-void Decoder::PrintXImm26(Instruction* instr) {
- uint32_t imm = instr->Imm26Value() << kImmFieldShift;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
-}
-
-
-// Print 26-bit immediate value.
-void Decoder::PrintCode(Instruction* instr) {
- if (instr->OpcodeFieldRaw() != SPECIAL)
- return; // Not a break or trap instruction.
- switch (instr->FunctionFieldRaw()) {
- case BREAK: {
- int32_t code = instr->Bits(25, 6);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "0x%05x (%d)", code, code);
- break;
- }
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE: {
- int32_t code = instr->Bits(15, 6);
- out_buffer_pos_ +=
- OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
- break;
- }
- default: // Not a break or trap instruction.
- break;
- };
-}
-
-
-// Printing of instruction name.
-void Decoder::PrintInstructionName(Instruction* instr) {
-}
-
-
-// Handle all register based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatRegister(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'r');
- if (format[1] == 's') { // 'rs: Rs register.
- int reg = instr->RsValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'rt: rt register.
- int reg = instr->RtValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'rd: rd register.
- int reg = instr->RdValue();
- PrintRegister(reg);
- return 2;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Handle all FPUregister based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'f');
- if (format[1] == 's') { // 'fs: fs register.
- int reg = instr->FsValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'ft: ft register.
- int reg = instr->FtValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'fd: fd register.
- int reg = instr->FdValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'r') { // 'fr: fr register.
- int reg = instr->FrValue();
- PrintFPURegister(reg);
- return 2;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// FormatOption takes a formatting string and interprets it based on
-// the current instructions. The format string points to the first
-// character of the option string (the option escape has already been
-// consumed by the caller.) FormatOption returns the number of
-// characters that were consumed from the formatting string.
-int Decoder::FormatOption(Instruction* instr, const char* format) {
- switch (format[0]) {
- case 'c': { // 'code for break or trap instructions.
- ASSERT(STRING_STARTS_WITH(format, "code"));
- PrintCode(instr);
- return 4;
- }
- case 'i': { // 'imm16u or 'imm26.
- if (format[3] == '1') {
- ASSERT(STRING_STARTS_WITH(format, "imm16"));
- if (format[5] == 's') {
- ASSERT(STRING_STARTS_WITH(format, "imm16s"));
- PrintSImm16(instr);
- } else if (format[5] == 'u') {
- ASSERT(STRING_STARTS_WITH(format, "imm16u"));
- PrintSImm16(instr);
- } else {
- ASSERT(STRING_STARTS_WITH(format, "imm16x"));
- PrintXImm16(instr);
- }
- return 6;
- } else {
- ASSERT(STRING_STARTS_WITH(format, "imm26x"));
- PrintXImm26(instr);
- return 6;
- }
- }
- case 'r': { // 'r: registers.
- return FormatRegister(instr, format);
- }
- case 'f': { // 'f: FPUregisters.
- return FormatFPURegister(instr, format);
- }
- case 's': { // 'sa.
- switch (format[1]) {
- case 'a': {
- ASSERT(STRING_STARTS_WITH(format, "sa"));
- PrintSa(instr);
- return 2;
- }
- case 'd': {
- ASSERT(STRING_STARTS_WITH(format, "sd"));
- PrintSd(instr);
- return 2;
- }
- case 's': {
- if (format[2] == '1') {
- ASSERT(STRING_STARTS_WITH(format, "ss1")); /* ext size */
- PrintSs1(instr);
- return 3;
- } else {
- ASSERT(STRING_STARTS_WITH(format, "ss2")); /* ins size */
- PrintSs2(instr);
- return 3;
- }
- }
- }
- }
- case 'b': { // 'bc - Special for bc1 cc field.
- ASSERT(STRING_STARTS_WITH(format, "bc"));
- PrintBc(instr);
- return 2;
- }
- case 'C': { // 'Cc - Special for c.xx.d cc field.
- ASSERT(STRING_STARTS_WITH(format, "Cc"));
- PrintCc(instr);
- return 2;
- }
- };
- UNREACHABLE();
- return -1;
-}
-
-
-// Format takes a formatting string for a whole instruction and prints it into
-// the output buffer. All escaped options are handed to FormatOption to be
-// parsed further.
-void Decoder::Format(Instruction* instr, const char* format) {
- char cur = *format++;
- while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- if (cur == '\'') { // Single quote is used as the formatting escape.
- format += FormatOption(instr, format);
- } else {
- out_buffer_[out_buffer_pos_++] = cur;
- }
- cur = *format++;
- }
- out_buffer_[out_buffer_pos_] = '\0';
-}
-
-
-// For currently unimplemented decodings the disassembler calls Unknown(instr)
-// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instruction* instr) {
- Format(instr, "unknown");
-}
-
-
-void Decoder::DecodeTypeRegister(Instruction* instr) {
- switch (instr->OpcodeFieldRaw()) {
- case COP1: // Coprocessor instructions.
- switch (instr->RsFieldRaw()) {
- case BC1: // bc1 handled in DecodeTypeImmediate.
- UNREACHABLE();
- break;
- case MFC1:
- Format(instr, "mfc1 'rt, 'fs");
- break;
- case MFHC1:
- Format(instr, "mfhc1 'rt, 'fs");
- break;
- case MTC1:
- Format(instr, "mtc1 'rt, 'fs");
- break;
- // These are called "fs" too, although they are not FPU registers.
- case CTC1:
- Format(instr, "ctc1 'rt, 'fs");
- break;
- case CFC1:
- Format(instr, "cfc1 'rt, 'fs");
- break;
- case MTHC1:
- Format(instr, "mthc1 'rt, 'fs");
- break;
- case D:
- switch (instr->FunctionFieldRaw()) {
- case ADD_D:
- Format(instr, "add.d 'fd, 'fs, 'ft");
- break;
- case SUB_D:
- Format(instr, "sub.d 'fd, 'fs, 'ft");
- break;
- case MUL_D:
- Format(instr, "mul.d 'fd, 'fs, 'ft");
- break;
- case DIV_D:
- Format(instr, "div.d 'fd, 'fs, 'ft");
- break;
- case ABS_D:
- Format(instr, "abs.d 'fd, 'fs");
- break;
- case MOV_D:
- Format(instr, "mov.d 'fd, 'fs");
- break;
- case NEG_D:
- Format(instr, "neg.d 'fd, 'fs");
- break;
- case SQRT_D:
- Format(instr, "sqrt.d 'fd, 'fs");
- break;
- case CVT_W_D:
- Format(instr, "cvt.w.d 'fd, 'fs");
- break;
- case CVT_L_D: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "cvt.l.d 'fd, 'fs");
- } else {
- Unknown(instr);
- }
- break;
- }
- case TRUNC_W_D:
- Format(instr, "trunc.w.d 'fd, 'fs");
- break;
- case TRUNC_L_D: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "trunc.l.d 'fd, 'fs");
- } else {
- Unknown(instr);
- }
- break;
- }
- case ROUND_W_D:
- Format(instr, "round.w.d 'fd, 'fs");
- break;
- case FLOOR_W_D:
- Format(instr, "floor.w.d 'fd, 'fs");
- break;
- case CEIL_W_D:
- Format(instr, "ceil.w.d 'fd, 'fs");
- break;
- case CVT_S_D:
- Format(instr, "cvt.s.d 'fd, 'fs");
- break;
- case C_F_D:
- Format(instr, "c.f.d 'fs, 'ft, 'Cc");
- break;
- case C_UN_D:
- Format(instr, "c.un.d 'fs, 'ft, 'Cc");
- break;
- case C_EQ_D:
- Format(instr, "c.eq.d 'fs, 'ft, 'Cc");
- break;
- case C_UEQ_D:
- Format(instr, "c.ueq.d 'fs, 'ft, 'Cc");
- break;
- case C_OLT_D:
- Format(instr, "c.olt.d 'fs, 'ft, 'Cc");
- break;
- case C_ULT_D:
- Format(instr, "c.ult.d 'fs, 'ft, 'Cc");
- break;
- case C_OLE_D:
- Format(instr, "c.ole.d 'fs, 'ft, 'Cc");
- break;
- case C_ULE_D:
- Format(instr, "c.ule.d 'fs, 'ft, 'Cc");
- break;
- default:
- Format(instr, "unknown.cop1.d");
- break;
- }
- break;
- case S:
- UNIMPLEMENTED_MIPS();
- break;
- case W:
- switch (instr->FunctionFieldRaw()) {
- case CVT_S_W: // Convert word to float (single).
- Format(instr, "cvt.s.w 'fd, 'fs");
- break;
- case CVT_D_W: // Convert word to double.
- Format(instr, "cvt.d.w 'fd, 'fs");
- break;
- default:
- UNREACHABLE();
- }
- break;
- case L:
- switch (instr->FunctionFieldRaw()) {
- case CVT_D_L: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "cvt.d.l 'fd, 'fs");
- } else {
- Unknown(instr);
- }
- break;
- }
- case CVT_S_L: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "cvt.s.l 'fd, 'fs");
- } else {
- Unknown(instr);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- break;
- case PS:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- }
- break;
- case COP1X:
- switch (instr->FunctionFieldRaw()) {
- case MADD_D:
- Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft");
- break;
- default:
- UNREACHABLE();
- };
- break;
- case SPECIAL:
- switch (instr->FunctionFieldRaw()) {
- case JR:
- Format(instr, "jr 'rs");
- break;
- case JALR:
- Format(instr, "jalr 'rs");
- break;
- case SLL:
- if ( 0x0 == static_cast<int>(instr->InstructionBits()))
- Format(instr, "nop");
- else
- Format(instr, "sll 'rd, 'rt, 'sa");
- break;
- case SRL:
- if (instr->RsValue() == 0) {
- Format(instr, "srl 'rd, 'rt, 'sa");
- } else {
- if (kArchVariant == kMips32r2) {
- Format(instr, "rotr 'rd, 'rt, 'sa");
- } else {
- Unknown(instr);
- }
- }
- break;
- case SRA:
- Format(instr, "sra 'rd, 'rt, 'sa");
- break;
- case SLLV:
- Format(instr, "sllv 'rd, 'rt, 'rs");
- break;
- case SRLV:
- if (instr->SaValue() == 0) {
- Format(instr, "srlv 'rd, 'rt, 'rs");
- } else {
- if (kArchVariant == kMips32r2) {
- Format(instr, "rotrv 'rd, 'rt, 'rs");
- } else {
- Unknown(instr);
- }
- }
- break;
- case SRAV:
- Format(instr, "srav 'rd, 'rt, 'rs");
- break;
- case MFHI:
- Format(instr, "mfhi 'rd");
- break;
- case MFLO:
- Format(instr, "mflo 'rd");
- break;
- case MULT:
- Format(instr, "mult 'rs, 'rt");
- break;
- case MULTU:
- Format(instr, "multu 'rs, 'rt");
- break;
- case DIV:
- Format(instr, "div 'rs, 'rt");
- break;
- case DIVU:
- Format(instr, "divu 'rs, 'rt");
- break;
- case ADD:
- Format(instr, "add 'rd, 'rs, 'rt");
- break;
- case ADDU:
- Format(instr, "addu 'rd, 'rs, 'rt");
- break;
- case SUB:
- Format(instr, "sub 'rd, 'rs, 'rt");
- break;
- case SUBU:
- Format(instr, "subu 'rd, 'rs, 'rt");
- break;
- case AND:
- Format(instr, "and 'rd, 'rs, 'rt");
- break;
- case OR:
- if (0 == instr->RsValue()) {
- Format(instr, "mov 'rd, 'rt");
- } else if (0 == instr->RtValue()) {
- Format(instr, "mov 'rd, 'rs");
- } else {
- Format(instr, "or 'rd, 'rs, 'rt");
- }
- break;
- case XOR:
- Format(instr, "xor 'rd, 'rs, 'rt");
- break;
- case NOR:
- Format(instr, "nor 'rd, 'rs, 'rt");
- break;
- case SLT:
- Format(instr, "slt 'rd, 'rs, 'rt");
- break;
- case SLTU:
- Format(instr, "sltu 'rd, 'rs, 'rt");
- break;
- case BREAK:
- Format(instr, "break, code: 'code");
- break;
- case TGE:
- Format(instr, "tge 'rs, 'rt, code: 'code");
- break;
- case TGEU:
- Format(instr, "tgeu 'rs, 'rt, code: 'code");
- break;
- case TLT:
- Format(instr, "tlt 'rs, 'rt, code: 'code");
- break;
- case TLTU:
- Format(instr, "tltu 'rs, 'rt, code: 'code");
- break;
- case TEQ:
- Format(instr, "teq 'rs, 'rt, code: 'code");
- break;
- case TNE:
- Format(instr, "tne 'rs, 'rt, code: 'code");
- break;
- case MOVZ:
- Format(instr, "movz 'rd, 'rs, 'rt");
- break;
- case MOVN:
- Format(instr, "movn 'rd, 'rs, 'rt");
- break;
- case MOVCI:
- if (instr->Bit(16)) {
- Format(instr, "movt 'rd, 'rs, 'bc");
- } else {
- Format(instr, "movf 'rd, 'rs, 'bc");
- }
- break;
- default:
- UNREACHABLE();
- }
- break;
- case SPECIAL2:
- switch (instr->FunctionFieldRaw()) {
- case MUL:
- Format(instr, "mul 'rd, 'rs, 'rt");
- break;
- case CLZ:
- Format(instr, "clz 'rd, 'rs");
- break;
- default:
- UNREACHABLE();
- }
- break;
- case SPECIAL3:
- switch (instr->FunctionFieldRaw()) {
- case INS: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
- } else {
- Unknown(instr);
- }
- break;
- }
- case EXT: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
- } else {
- Unknown(instr);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void Decoder::DecodeTypeImmediate(Instruction* instr) {
- switch (instr->OpcodeFieldRaw()) {
- // ------------- REGIMM class.
- case COP1:
- switch (instr->RsFieldRaw()) {
- case BC1:
- if (instr->FBtrueValue()) {
- Format(instr, "bc1t 'bc, 'imm16u");
- } else {
- Format(instr, "bc1f 'bc, 'imm16u");
- }
- break;
- default:
- UNREACHABLE();
- };
- break; // Case COP1.
- case REGIMM:
- switch (instr->RtFieldRaw()) {
- case BLTZ:
- Format(instr, "bltz 'rs, 'imm16u");
- break;
- case BLTZAL:
- Format(instr, "bltzal 'rs, 'imm16u");
- break;
- case BGEZ:
- Format(instr, "bgez 'rs, 'imm16u");
- break;
- case BGEZAL:
- Format(instr, "bgezal 'rs, 'imm16u");
- break;
- default:
- UNREACHABLE();
- }
- break; // Case REGIMM.
- // ------------- Branch instructions.
- case BEQ:
- Format(instr, "beq 'rs, 'rt, 'imm16u");
- break;
- case BNE:
- Format(instr, "bne 'rs, 'rt, 'imm16u");
- break;
- case BLEZ:
- Format(instr, "blez 'rs, 'imm16u");
- break;
- case BGTZ:
- Format(instr, "bgtz 'rs, 'imm16u");
- break;
- // ------------- Arithmetic instructions.
- case ADDI:
- Format(instr, "addi 'rt, 'rs, 'imm16s");
- break;
- case ADDIU:
- Format(instr, "addiu 'rt, 'rs, 'imm16s");
- break;
- case SLTI:
- Format(instr, "slti 'rt, 'rs, 'imm16s");
- break;
- case SLTIU:
- Format(instr, "sltiu 'rt, 'rs, 'imm16u");
- break;
- case ANDI:
- Format(instr, "andi 'rt, 'rs, 'imm16x");
- break;
- case ORI:
- Format(instr, "ori 'rt, 'rs, 'imm16x");
- break;
- case XORI:
- Format(instr, "xori 'rt, 'rs, 'imm16x");
- break;
- case LUI:
- Format(instr, "lui 'rt, 'imm16x");
- break;
- // ------------- Memory instructions.
- case LB:
- Format(instr, "lb 'rt, 'imm16s('rs)");
- break;
- case LH:
- Format(instr, "lh 'rt, 'imm16s('rs)");
- break;
- case LWL:
- Format(instr, "lwl 'rt, 'imm16s('rs)");
- break;
- case LW:
- Format(instr, "lw 'rt, 'imm16s('rs)");
- break;
- case LBU:
- Format(instr, "lbu 'rt, 'imm16s('rs)");
- break;
- case LHU:
- Format(instr, "lhu 'rt, 'imm16s('rs)");
- break;
- case LWR:
- Format(instr, "lwr 'rt, 'imm16s('rs)");
- break;
- case SB:
- Format(instr, "sb 'rt, 'imm16s('rs)");
- break;
- case SH:
- Format(instr, "sh 'rt, 'imm16s('rs)");
- break;
- case SWL:
- Format(instr, "swl 'rt, 'imm16s('rs)");
- break;
- case SW:
- Format(instr, "sw 'rt, 'imm16s('rs)");
- break;
- case SWR:
- Format(instr, "swr 'rt, 'imm16s('rs)");
- break;
- case LWC1:
- Format(instr, "lwc1 'ft, 'imm16s('rs)");
- break;
- case LDC1:
- Format(instr, "ldc1 'ft, 'imm16s('rs)");
- break;
- case SWC1:
- Format(instr, "swc1 'ft, 'imm16s('rs)");
- break;
- case SDC1:
- Format(instr, "sdc1 'ft, 'imm16s('rs)");
- break;
- default:
- UNREACHABLE();
- break;
- };
-}
-
-
-void Decoder::DecodeTypeJump(Instruction* instr) {
- switch (instr->OpcodeFieldRaw()) {
- case J:
- Format(instr, "j 'imm26x");
- break;
- case JAL:
- Format(instr, "jal 'imm26x");
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-// Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte* instr_ptr) {
- Instruction* instr = Instruction::At(instr_ptr);
- // Print raw instruction bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
- switch (instr->InstructionType()) {
- case Instruction::kRegisterType: {
- DecodeTypeRegister(instr);
- break;
- }
- case Instruction::kImmediateType: {
- DecodeTypeImmediate(instr);
- break;
- }
- case Instruction::kJumpType: {
- DecodeTypeJump(instr);
- break;
- }
- default: {
- Format(instr, "UNSUPPORTED");
- UNSUPPORTED_MIPS();
- }
- }
- return Instruction::kInstrSize;
-}
-
-
-} } // namespace v8::internal
-
-
-
-//------------------------------------------------------------------------------
-
-namespace disasm {
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Registers::Name(reg);
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- return v8::internal::FPURegisters::Name(reg);
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // MIPS does not have the concept of a byte register.
- return "nobytereg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // The default name converter is called for unknown code. So we will not try
- // to access any memory.
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- v8::internal::Decoder d(converter_, buffer);
- return d.InstructionDecode(instruction);
-}
-
-
-// The MIPS assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return -1;
-}
-
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
- }
-}
-
-
-#undef UNSUPPORTED
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/frames-mips.cc b/src/3rdparty/v8/src/mips/frames-mips.cc
deleted file mode 100644
index faaa0e0..0000000
--- a/src/3rdparty/v8/src/mips/frames-mips.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "frames-inl.h"
-#include "mips/assembler-mips-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/frames-mips.h b/src/3rdparty/v8/src/mips/frames-mips.h
deleted file mode 100644
index 188e7d1..0000000
--- a/src/3rdparty/v8/src/mips/frames-mips.h
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#ifndef V8_MIPS_FRAMES_MIPS_H_
-#define V8_MIPS_FRAMES_MIPS_H_
-
-namespace v8 {
-namespace internal {
-
-// Register lists.
-// Note that the bit values must match those used in actual instruction
-// encoding.
-const int kNumRegs = 32;
-
-const RegList kJSCallerSaved =
- 1 << 2 | // v0
- 1 << 3 | // v1
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- 1 << 8 | // t0
- 1 << 9 | // t1
- 1 << 10 | // t2
- 1 << 11 | // t3
- 1 << 12 | // t4
- 1 << 13 | // t5
- 1 << 14 | // t6
- 1 << 15; // t7
-
-const int kNumJSCallerSaved = 14;
-
-
-// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
-int JSCallerSavedCode(int n);
-
-
-// Callee-saved registers preserved when switching from C to JavaScript.
-const RegList kCalleeSaved =
- 1 << 16 | // s0
- 1 << 17 | // s1
- 1 << 18 | // s2
- 1 << 19 | // s3
- 1 << 20 | // s4
- 1 << 21 | // s5
- 1 << 22 | // s6 (roots in Javascript code)
- 1 << 23 | // s7 (cp in Javascript code)
- 1 << 30; // fp/s8
-
-const int kNumCalleeSaved = 9;
-
-const RegList kCalleeSavedFPU =
- 1 << 20 | // f20
- 1 << 22 | // f22
- 1 << 24 | // f24
- 1 << 26 | // f26
- 1 << 28 | // f28
- 1 << 30; // f30
-
-const int kNumCalleeSavedFPU = 6;
-
-const RegList kCallerSavedFPU =
- 1 << 0 | // f0
- 1 << 2 | // f2
- 1 << 4 | // f4
- 1 << 6 | // f6
- 1 << 8 | // f8
- 1 << 10 | // f10
- 1 << 12 | // f12
- 1 << 14 | // f14
- 1 << 16 | // f16
- 1 << 18; // f18
-
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 24;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters =
- kNumJSCallerSaved + kNumCalleeSaved;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-const int kUndefIndex = -1;
-// Map with indexes on stack that corresponds to codes of saved registers.
-const int kSafepointRegisterStackIndexMap[kNumRegs] = {
- kUndefIndex, // zero_reg
- kUndefIndex, // at
- 0, // v0
- 1, // v1
- 2, // a0
- 3, // a1
- 4, // a2
- 5, // a3
- 6, // t0
- 7, // t1
- 8, // t2
- 9, // t3
- 10, // t4
- 11, // t5
- 12, // t6
- 13, // t7
- 14, // s0
- 15, // s1
- 16, // s2
- 17, // s3
- 18, // s4
- 19, // s5
- 20, // s6
- 21, // s7
- kUndefIndex, // t8
- kUndefIndex, // t9
- kUndefIndex, // k0
- kUndefIndex, // k1
- kUndefIndex, // gp
- kUndefIndex, // sp
- 22, // fp
- kUndefIndex
-};
-
-
-// ----------------------------------------------------
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset = -3 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- // See some explanation in MacroAssembler::EnterExitFrame.
- // This marks the top of the extra allocated stack space.
- static const int kStackSpaceOffset = -3 * kPointerSize;
-
- static const int kCodeOffset = -2 * kPointerSize;
-
- static const int kSPOffset = -1 * kPointerSize;
-
- // The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = +0 * kPointerSize;
- // The calling JS function is between FP and PC.
- static const int kCallerPCOffset = +1 * kPointerSize;
-
- // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
- static const int kCallerSPOffset = +2 * kPointerSize;
-
- // FP-relative displacement of the caller's SP.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-} } // namespace v8::internal
-
-#endif
diff --git a/src/3rdparty/v8/src/mips/full-codegen-mips.cc b/src/3rdparty/v8/src/mips/full-codegen-mips.cc
deleted file mode 100644
index 9173422..0000000
--- a/src/3rdparty/v8/src/mips/full-codegen-mips.cc
+++ /dev/null
@@ -1,4645 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-// Note on Mips implementation:
-//
-// The result_register() for mips is the 'v0' register, which is defined
-// by the ABI to contain function return values. However, the first
-// parameter to a function is defined to be 'a0'. So there are many
-// places where we have to move a previous result in v0 to a0 for the
-// next call: mov(a0, v0). This is not needed on the other architectures.
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-#include "mips/code-stubs-mips.h"
-#include "mips/macro-assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-// A patch site is a location in the code which it is possible to patch. This
-// class has a number of methods to emit the code which is patchable and the
-// method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
-// (raw 16 bit immediate value is used) is the delta from the pc to the first
-// instruction of the patchable code.
-// The marker instruction is effectively a NOP (dest is zero_reg) and will
-// never be emitted by normal code.
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- // When initially emitting this ensure that a jump is always generated to skip
- // the inlined smi code.
- void EmitJumpIfNotSmi(Register reg, Label* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ bind(&patch_site_);
- __ andi(at, reg, 0);
- // Always taken before patched.
- __ Branch(target, eq, at, Operand(zero_reg));
- }
-
- // When initially emitting this ensure that a jump is never generated to skip
- // the inlined smi code.
- void EmitJumpIfSmi(Register reg, Label* target) {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- __ bind(&patch_site_);
- __ andi(at, reg, 0);
- // Never taken before patched.
- __ Branch(target, ne, at, Operand(zero_reg));
- }
-
- void EmitPatchInfo() {
- if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
- __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- } else {
- __ nop(); // Signals no inlined code.
- }
- }
-
- private:
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// o a1: the JS function object being called (i.e. ourselves)
-// o cp: our context
-// o fp: our caller's frame pointer
-// o sp: stack pointer
-// o ra: return address
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-mips.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop-at");
- }
-#endif
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). t1 is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
- int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ sw(a2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
- }
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- int locals_count = info->scope()->num_stack_slots();
-
- info->set_prologue_offset(masm_->pc_offset());
- // The following three instructions must remain together and unmodified for
- // code aging to work properly.
- __ Push(ra, fp, cp, a1);
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- // Adjust fp to point to caller's fp.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
-
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(at);
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment cmnt(masm_, "[ Allocate context");
- // Argument to NewContext is the function, which is still in a1.
- __ push(a1);
- if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register = false;
- // Context is returned in both v0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ lw(a0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ sw(a0, target);
-
- // Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
- }
- }
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(a3, a1);
- }
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ Addu(a2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ li(a1, Operand(Smi::FromInt(num_parameters)));
- __ Push(a3, a2, a1);
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
- }
- ArgumentsAccessStub stub(type);
- __ CallStub(&stub);
-
- SetVar(arguments, v0, a1, a2);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(t0));
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- }
- EmitReturnSequence();
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- ASSERT(Smi::FromInt(0) == 0);
- __ mov(v0, zero_reg);
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ li(a2, Operand(profiling_counter_));
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- __ Subu(a3, a3, Operand(Smi::FromInt(delta)));
- __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- if (isolate()->IsDebuggerActive()) {
- // Detect debug break requests as soon as possible.
- reset_value = FLAG_interrupt_budget >> 4;
- }
- __ li(a2, Operand(profiling_counter_));
- __ li(a3, Operand(Smi::FromInt(reset_value)));
- __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
- // to make sure it is constant. Branch may emit a skip-or-jump sequence
- // instead of the normal Branch. It seems that the "skip" part of that
- // sequence is about as long as this Branch would be so it is safe to ignore
- // that.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ slt(at, a3, zero_reg);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- InterruptStub stub;
- __ CallStub(&stub);
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
- EmitProfilingCounterReset();
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ Branch(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in v0.
- __ push(v0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ Branch(&ok, ge, a3, Operand(zero_reg));
- __ push(v0);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(a2);
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- InterruptStub stub;
- __ CallStub(&stub);
- }
- __ pop(v0);
- EmitProfilingCounterReset();
- __ bind(&ok);
- }
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- __ RecordJSReturn();
- masm_->mov(sp, fp);
- masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
- masm_->Addu(sp, sp, Operand(sp_delta));
- masm_->Jump(ra);
- }
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ Branch(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ Branch(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- __ li(result_register(), Operand(lit));
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates cannot be pushed directly.
- __ li(result_register(), Operand(lit));
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ Branch(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ Branch(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ Branch(false_label_);
- } else {
- if (true_label_ != fall_through_) __ Branch(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ Branch(false_label_);
- } else {
- if (true_label_ != fall_through_) __ Branch(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ li(result_register(), Operand(lit));
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ sw(reg, MemOperand(sp, 0));
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ Branch(&done);
- __ bind(materialize_false);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ push(at);
- __ Branch(&done);
- __ bind(materialize_false);
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- __ push(at);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(at, value_root_index);
- __ push(at);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ Branch(true_label_);
- } else {
- if (false_label_ != fall_through_) __ Branch(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (CpuFeatures::IsSupported(FPU)) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub, condition->test_id());
- __ mov(at, zero_reg);
- } else {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- }
- Split(ne, v0, Operand(at), if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cc,
- Register lhs,
- const Operand& rhs,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ Branch(if_true, cc, lhs, rhs);
- } else if (if_true == fall_through) {
- __ Branch(if_false, NegateCondition(cc), lhs, rhs);
- } else {
- __ Branch(if_true, cc, lhs, rhs);
- __ Branch(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- ASSERT(var->IsStackAllocated());
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return MemOperand(fp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- // Use destination as scratch.
- MemOperand location = VarOperand(var, dest);
- __ lw(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!scratch0.is(src));
- ASSERT(!scratch0.is(scratch1));
- ASSERT(!scratch1.is(src));
- MemOperand location = VarOperand(var, scratch0);
- __ sw(src, location);
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- __ RecordWriteContextSlot(scratch0,
- location.offset(),
- src,
- scratch1,
- kRAHasBeenSaved,
- kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- Label skip;
- if (should_normalize) __ Branch(&skip);
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- Split(eq, a0, Operand(t0), if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
- __ Check(ne, "Declaration in with context.",
- a1, Operand(t0));
- __ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
- __ Check(ne, "Declaration in catch context.",
- a1, Operand(t0));
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ sw(t0, StackOperand(variable));
- }
- break;
-
- case Variable::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ sw(at, ContextOperand(cp, variable->index()));
- // No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ li(a2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- ASSERT(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ li(a1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, a2, a1, a0);
- } else {
- ASSERT(Smi::FromInt(0) == 0);
- __ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
- __ Push(cp, a2, a1, a0);
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ sw(result_register(), StackOperand(variable));
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ sw(result_register(), ContextOperand(cp, variable->index()));
- int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- a2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ li(a2, Operand(variable->name()));
- __ li(a1, Operand(Smi::FromInt(NONE)));
- __ Push(cp, a2, a1);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
-
- // Load instance object.
- __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
- __ lw(a1, ContextOperand(a1, variable->interface()->Index()));
- __ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
-
- // Assign it.
- __ sw(a1, ContextOperand(cp, variable->index()));
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(cp,
- Context::SlotOffset(variable->index()),
- a1,
- a3,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
-
- // Traverse into body.
- Visit(declaration->module());
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- // The context is the first argument.
- __ li(a1, Operand(pairs));
- __ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(cp, a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
- __ mov(a0, result_register()); // CompareStub requires args in a0, a1.
-
- // Perform the comparison as if via '==='.
- __ lw(a1, MemOperand(sp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ or_(a2, a1, a0);
- patch_site.EmitJumpIfNotSmi(a2, &slow_case);
-
- __ Branch(&next_test, ne, a1, Operand(a0));
- __ Drop(1); // Switch value is no longer needed.
- __ Branch(clause->body_target());
-
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
- patch_site.EmitPatchInfo();
-
- __ Branch(&next_test, ne, v0, Operand(zero_reg));
- __ Drop(1); // Switch value is no longer needed.
- __ Branch(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ Branch(nested_statement.break_label());
- } else {
- __ Branch(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&exit, eq, a0, Operand(at));
- Register null_value = t1;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ Branch(&exit, eq, a0, Operand(null_value));
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
- __ mov(a0, v0);
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(a0, &convert);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ bind(&convert);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a0, v0);
- __ bind(&done_convert);
- __ push(a0);
-
- // Check for proxies.
- Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
- __ lw(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ Branch(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(a0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array;
- __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kMetaMapRootIndex);
- __ Branch(&fixed_array, ne, a2, Operand(at));
-
- // We got a map in register v0. Get the enumeration cache from it.
- Label no_descriptors;
- __ bind(&use_cache);
-
- __ EnumLength(a1, v0);
- __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
-
- __ LoadInstanceDescriptors(v0, a2);
- __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
- __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ push(v0); // Map.
- __ li(a0, Operand(Smi::FromInt(0)));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(a2, a1, a0);
- __ jmp(&loop);
-
- __ bind(&no_descriptors);
- __ Drop(1);
- __ jmp(&exit);
-
- // We got a fixed array in register v0. Iterate through that.
- Label non_proxy;
- __ bind(&fixed_array);
-
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(a1, cell);
- __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ sw(a2, FieldMemOperand(a1, JSGlobalPropertyCell::kValueOffset));
-
- __ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
- __ li(a1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ Push(a1, v0); // Smi and array
- __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ li(a0, Operand(Smi::FromInt(0)));
- __ Push(a1, a0); // Fixed array length (as smi) and initial index.
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&loop);
- // Load the current count to a0, load the length to a1.
- __ lw(a0, MemOperand(sp, 0 * kPointerSize));
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- __ Branch(loop_statement.break_label(), hs, a0, Operand(a1));
-
- // Get the current entry of the array into register a3.
- __ lw(a2, MemOperand(sp, 2 * kPointerSize));
- __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ addu(t0, a2, t0); // Array base + scaled (smi) index.
- __ lw(a3, MemOperand(t0)); // Current entry.
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register a2.
- __ lw(a2, MemOperand(sp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ lw(a1, MemOperand(sp, 4 * kPointerSize));
- __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ Branch(&update_each, eq, t0, Operand(a2));
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- ASSERT_EQ(Smi::FromInt(0), 0);
- __ Branch(&update_each, eq, a2, Operand(zero_reg));
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ push(a1); // Enumerable.
- __ push(a3); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ mov(a3, result_register());
- __ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg));
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register a3.
- __ bind(&update_each);
- __ mov(result_register(), a3);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for the going to the next element by incrementing
- // the index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_label());
- __ pop(a0);
- __ Addu(a0, a0, Operand(Smi::FromInt(1)));
- __ push(a0);
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ Branch(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_label());
- __ Drop(5);
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ li(a0, Operand(info));
- __ push(a0);
- __ CallStub(&stub);
- } else {
- __ li(a0, Operand(info));
- __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, a0, a1);
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
- TypeofState typeof_state,
- Label* slow) {
- Register current = cp;
- Register next = a1;
- Register temp = a2;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
- }
- // Load next context in chain.
- __ lw(next, ContextOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at native context.
- __ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kNativeContextMapRootIndex);
- __ Branch(&fast, eq, temp, Operand(t0));
- // Check that extension is NULL.
- __ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
- // Load next context in chain.
- __ lw(next, ContextOperand(next, Context::PREVIOUS_INDEX));
- __ Branch(&loop);
- __ bind(&fast);
- }
-
- __ lw(a0, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ li(a2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- ASSERT(var->IsContextSlot());
- Register context = cp;
- Register next = a3;
- Register temp = t0;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
- }
- __ lw(next, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is NULL.
- __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
- __ Branch(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (local->mode() == CONST) {
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
- } else { // LET || CONST_HARMONY
- __ Branch(done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ Branch(done);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in a2 and the global
- // object (receiver) in a0.
- __ lw(a0, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ li(a2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(v0);
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- ASSERT(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(v0, var);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
- }
- context()->Plug(v0);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case Variable::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
- __ li(a1, Operand(var->name()));
- __ Push(cp, a1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
- context()->Plug(v0);
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // t1 = materialized value (RegExp literal)
- // t0 = JS function, literals array
- // a3 = literal index
- // a2 = RegExp pattern
- // a1 = RegExp flags
- // a0 = RegExp literal clone
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ lw(t1, FieldMemOperand(t0, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, t1, Operand(at));
-
- // Create regexp literal using runtime function.
- // Result will be in v0.
- __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a2, Operand(expr->pattern()));
- __ li(a1, Operand(expr->flags()));
- __ Push(t0, a3, a2, a1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(t1, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(t1);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ push(a0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(t1);
-
- __ bind(&allocated);
-
- // After this, registers are used as follows:
- // v0: Newly allocated regexp.
- // t1: Materialized regexp.
- // a2: temp.
- __ CopyFields(v0, t1, a2.bit(), size / kPointerSize);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ push(a1);
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
- __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a1, Operand(constant_properties));
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ li(a0, Operand(Smi::FromInt(flags)));
- int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- __ CallStub(&stub);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in v0.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(v0); // Save result on stack.
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ mov(a0, result_register());
- __ li(a2, Operand(key->handle()));
- __ lw(a1, MemOperand(sp));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- // Duplicate receiver on stack.
- __ lw(a0, MemOperand(sp));
- __ push(a0);
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ li(a0, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
- __ push(a0);
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
- break;
- case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ lw(a0, MemOperand(sp)); // Duplicate receiver.
- __ push(a0);
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
- __ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ lw(a0, MemOperand(sp));
- __ push(a0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(v0);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
-
- Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements =
- IsFastObjectElementsKind(constant_elements_kind);
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
-
- __ mov(a0, result_register());
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
- __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a1, Operand(constant_elements));
- __ Push(a3, a2, a1);
- if (has_fast_elements && constant_elements_values->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
- length);
- __ CallStub(&stub);
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
- 1, a1, a2);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
-
- if (has_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(v0);
- result_saved = true;
- }
-
- VisitForAccumulatorValue(subexpr);
-
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ lw(t2, MemOperand(sp)); // Copy of array literal.
- __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
- __ sw(result_register(), FieldMemOperand(a1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(a1, offset, result_register(), a2,
- kRAHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ lw(a1, MemOperand(sp)); // Copy of array literal.
- __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
- __ li(a3, Operand(Smi::FromInt(i)));
- __ li(t0, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(a0, result_register());
- StoreArrayLiteralElementStub stub;
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(v0);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ push(result_register());
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY:
- // We need the key and receiver on both the stack and in v0 and a1.
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- __ lw(a1, MemOperand(sp, 0));
- __ push(v0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(v0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(v0);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ mov(a0, result_register());
- __ li(a2, Operand(key->handle()));
- // Call load IC. It has arguments receiver and property name a0 and a2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- __ mov(a0, result_register());
- // Call keyed load IC. It has arguments key and receiver in a0 and a1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = a2;
- Register scratch2 = a3;
-
- // Get the arguments.
- Register left = a1;
- Register right = a0;
- __ pop(left);
- __ mov(a0, result_register());
-
- // Perform combined smi check on both operands.
- __ Or(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
- switch (op) {
- case Token::SAR:
- __ Branch(&stub_call);
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ srav(right, left, scratch1);
- __ And(v0, right, Operand(~kSmiTagMask));
- break;
- case Token::SHL: {
- __ Branch(&stub_call);
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ sllv(scratch1, scratch1, scratch2);
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- break;
- }
- case Token::SHR: {
- __ Branch(&stub_call);
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srlv(scratch1, scratch1, scratch2);
- __ And(scratch2, scratch1, 0xc0000000);
- __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- break;
- }
- case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
- __ BranchOnOverflow(&stub_call, scratch1);
- break;
- case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
- __ BranchOnOverflow(&stub_call, scratch1);
- break;
- case Token::MUL: {
- __ SmiUntag(scratch1, right);
- __ Mult(left, scratch1);
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ sra(scratch1, scratch1, 31);
- __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
- __ mflo(v0);
- __ Branch(&done, ne, v0, Operand(zero_reg));
- __ Addu(scratch2, right, left);
- __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
- ASSERT(Smi::FromInt(0) == 0);
- __ mov(v0, zero_reg);
- break;
- }
- case Token::BIT_OR:
- __ Or(v0, left, Operand(right));
- break;
- case Token::BIT_AND:
- __ And(v0, left, Operand(right));
- break;
- case Token::BIT_XOR:
- __ Xor(v0, left, Operand(right));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
- __ mov(a0, result_register());
- __ pop(a1);
- BinaryOpStub stub(op, mode);
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(result_register()); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ mov(a1, result_register());
- __ pop(a0); // Restore value.
- __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(result_register()); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(a1, result_register());
- __ pop(a2);
- __ pop(a0); // Restore value.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
- // Global var, const, or let.
- __ mov(a0, result_register());
- __ li(a2, Operand(var->name()));
- __ lw(a1, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (op == Token::INIT_CONST) {
- // Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ lw(a1, StackOperand(var));
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a1, Operand(t0));
- __ sw(result_register(), StackOperand(var));
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(v0);
- __ li(a0, Operand(var->name()));
- __ Push(cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- if (var->IsLookupSlot()) {
- __ push(v0); // Value.
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, a1, a0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, a1);
- __ lw(a3, location);
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&assign, ne, a3, Operand(t0));
- __ li(a3, Operand(var->name()));
- __ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- // Perform the assignment.
- __ bind(&assign);
- __ sw(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(a3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
- }
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
- MemOperand location = VarOperand(var, a1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
- // Check for an uninitialized let binding.
- __ lw(a2, location);
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Check(eq, "Let binding re-initialization.", a2, Operand(t0));
- }
- // Perform the assignment.
- __ sw(v0, location);
- if (var->IsContextSlot()) {
- __ mov(a3, v0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(v0); // Value.
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, a1, a0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- }
- // Non-initializing assignments to consts are ignored.
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ mov(a0, result_register()); // Load the value.
- __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
- __ pop(a1);
-
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- // Call keyed store IC.
- // The arguments are:
- // - a0 is the value,
- // - a1 is the key,
- // - a2 is the receiver.
- __ mov(a0, result_register());
- __ pop(a1); // Key.
- __ pop(a2);
-
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(v0);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(a1);
- EmitKeyedPropertyLoad(expr);
- context()->Plug(v0);
- }
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId id) {
- ic_total_count_++;
- __ Call(code, rmode, id);
-}
-
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ li(a2, Operand(name));
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(a1);
- __ push(v0);
- __ push(a1);
-
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
-
- // Record call targets.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ li(a2, Operand(cell));
-
- CallFunctionStub stub(arg_count, flags);
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
- } else {
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- }
- __ push(a1);
-
- // Push the receiver of the enclosing function.
- int receiver_offset = 2 + info_->scope()->num_parameters();
- __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
- __ push(a1);
- // Push the language mode.
- __ li(a1, Operand(Smi::FromInt(language_mode())));
- __ push(a1);
-
- // Push the start position of the scope the calls resides in.
- __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
- __ push(a1);
-
- // Push the qml mode flag.
- __ li(a1, Operand(Smi::FromInt(is_qml_mode())));
- __ push(a1);
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
-
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ push(a2); // Reserved receiver slot.
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(a1);
- EmitResolvePossiblyDirectEval(arg_count);
-
- // The runtime call returns a pair of values in v0 (function) and
- // v1 (receiver). Touch up the stack with the right values.
- __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ sw(v1, MemOperand(sp, arg_count * kPointerSize));
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ lw(a0, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ push(a0);
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
- }
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in v0)
- // and the object holding it (returned in v1).
- __ push(context_register());
- __ li(a2, Operand(proxy->name()));
- __ push(a2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ Push(v0, v1); // Function, receiver.
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ Branch(&call);
- __ bind(&done);
- // Push function.
- __ push(v0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
- __ push(a1);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found
- // by LoadContextSlot. That object could be the hole if the
- // receiver is implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
- RelocInfo::CODE_TARGET);
- } else {
- EmitKeyedCallWithIC(expr, property->key());
- }
- } else {
- // Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
- // Load global receiver object.
- __ lw(a1, GlobalObjectOperand());
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
- __ push(a1);
- // Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into a1 and a0.
- __ li(a0, Operand(arg_count));
- __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ li(a2, Operand(cell));
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(t0, v0, Operand(kSmiTagMask));
- Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
- Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(if_true, eq, v0, Operand(at));
- __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ And(at, a1, Operand(1 << Map::kIsUndetectable));
- __ Branch(if_false, ne, at, Operand(zero_reg));
- __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
- __ And(at, a1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(v0);
-
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
- __ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ Branch(if_true, ne, t0, Operand(zero_reg));
-
- // Check for fast case object. Generate false result for slow case object.
- __ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kHashTableMapRootIndex);
- __ Branch(if_false, eq, a2, Operand(t0));
-
- // Look for valueOf name in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(a3, a1);
- __ Branch(&done, eq, a3, Operand(zero_reg));
-
- __ LoadInstanceDescriptors(a1, t0);
- // t0: descriptor array.
- // a3: valid entries in the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ li(at, Operand(DescriptorArray::kDescriptorSize));
- __ Mul(a3, a3, at);
- // Calculate location of the first key name.
- __ Addu(t0, t0, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
- // Calculate the end of the descriptor array.
- __ mov(a2, t0);
- __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a2, a2, t1);
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- // The use of t2 to store the valueOf string assumes that it is not otherwise
- // used in the loop below.
- __ li(t2, Operand(FACTORY->value_of_string()));
- __ jmp(&entry);
- __ bind(&loop);
- __ lw(a3, MemOperand(t0, 0));
- __ Branch(if_false, eq, a3, Operand(t2));
- __ Addu(t0, t0, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ Branch(&loop, ne, t0, Operand(a2));
-
- __ bind(&done);
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
- __ JumpIfSmi(a2, if_false);
- __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
- __ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ Branch(if_false, ne, a2, Operand(a3));
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a2);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a2, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a2);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
- __ Branch(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(JS_ARRAY_TYPE),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne,
- a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in a1 and the formal
- // parameter count in a0.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a1, v0);
- __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label exit;
- // Get the number of formal parameters.
- __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&exit, ne, a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(v0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ GetObjectType(v0, v0, a1); // Map is now in v0.
- __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
- __ GetObjectType(v0, a1, a1);
- __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
-
- // v0 now contains the constructor function. Grab the
- // instance class name from there.
- __ lw(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
- __ Branch(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex);
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ LoadRoot(v0, Heap::kObject_stringRootIndex);
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(v0, Heap::kNullValueRootIndex);
-
- // All done.
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- // Save the new heap number in callee-saved register s0, since
- // we call out to external C code below.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(s0, a1, a2, t6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
-
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(s0, v0); // Save result in s0, so it is saved thru CFunc call.
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in v0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(FPU)) {
- __ PrepareCallCFunction(1, a0);
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatures::Scope scope(FPU);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a1, Operand(0x41300000));
- // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a1);
- // Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a1);
- // Subtract and store the result in the heap number.
- __ sub_d(f0, f12, f14);
- __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
- __ mov(v0, s0);
- } else {
- __ PrepareCallCFunction(2, a0);
- __ mov(a0, s0);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
-
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(v0, &done);
- // If the object is not a value type, return the object.
- __ GetObjectType(v0, a1, a1);
- __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
-
- __ lw(v0, FieldMemOperand(v0, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label runtime, done, not_date_object;
- Register object = v0;
- Register result = v0;
- Register scratch0 = t5;
- Register scratch1 = a1;
-
- __ JumpIfSmi(object, &not_date_object);
- __ GetObjectType(object, scratch1, scratch1);
- __ Branch(&not_date_object, ne, scratch1, Operand(JS_DATE_TYPE));
-
- if (index->value() == 0) {
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch1, Operand(stamp));
- __ lw(scratch1, MemOperand(scratch1));
- __ lw(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch1, Operand(scratch0));
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ li(a1, Operand(index));
- __ Move(a0, object);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
- }
-
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(a2);
- __ pop(a1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(a2);
- __ pop(a1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(FPU)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(a1); // v0 = value. a1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(a1, &done);
-
- // If the object is not a value type, return the value.
- __ GetObjectType(a1, a2, a2);
- __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
-
- // Store the value.
- __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(a2, v0);
- __ RecordWriteField(
- a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(v0, a1);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(a1);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ mov(a0, result_register());
-
- Register object = a1;
- Register index = a0;
- Register result = v0;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Load the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ mov(a0, result_register());
-
- Register object = a1;
- Register index = a0;
- Register scratch = a3;
- Register result = v0;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ li(result, Operand(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(v0, &runtime);
- __ GetObjectType(v0, a1, a1);
- __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
-
- // InvokeFunction requires the function in a1. Move it in there.
- __ mov(a1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(a1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(v0);
- __ CallRuntime(Runtime::kCall, args->length());
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- context()->Plug(v0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = v0;
- Register cache = a1;
- __ lw(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ lw(cache,
- ContextOperand(
- cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ lw(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
-
- Label done, not_found;
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ lw(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // a2 now holds finger offset as a smi.
- __ Addu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // a3 now points to the start of fixed array elements.
- __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
- __ addu(a3, a3, at);
- // a3 now points to key of indexed element of cache.
- __ lw(a2, MemOperand(a3));
- __ Branch(&not_found, ne, key, Operand(a2));
-
- __ lw(v0, MemOperand(a3, kPointerSize));
- __ Branch(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = v0;
- Register left = a1;
- Register tmp = a2;
- Register tmp2 = a3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1)); // Result (right) in v0.
- __ pop(left);
-
- Label done, fail, ok;
- __ Branch(&ok, eq, left, Operand(right));
- // Fail if either is a non-HeapObject.
- __ And(tmp, left, Operand(right));
- __ JumpIfSmi(tmp, &fail);
- __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
- __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ Branch(&fail, ne, tmp, Operand(tmp2));
- __ lw(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ lw(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ Branch(&ok, eq, tmp, Operand(tmp2));
- __ bind(&fail);
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
- __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(v0);
-
- __ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
- __ IndexFromHash(v0, v0);
-
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = v0;
- Register elements = no_reg; // Will be v0.
- Register result = no_reg; // Will be v0.
- Register separator = a1;
- Register array_length = a2;
- Register result_pos = no_reg; // Will be a2.
- Register string_length = a3;
- Register string = t0;
- Register element = t1;
- Register elements_end = t2;
- Register scratch1 = t3;
- Register scratch2 = t5;
- Register scratch3 = t4;
-
- // Separator operand is on the stack.
- __ pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ GetObjectType(array, scratch1, scratch2);
- __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
-
- // If the array has length zero, return the empty string.
- __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length);
- __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ Branch(&done);
-
- __ bind(&non_trivial_array);
-
- // Get the FixedArray containing array's elements.
- elements = array;
- __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- array = no_reg; // End of array's live range.
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, zero_reg);
- __ Addu(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(elements_end, array_length, kPointerSizeLog2);
- __ Addu(elements_end, element, elements_end);
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (smi).
- // element: Current array element.
- // elements_end: Array end.
- if (generate_debug_code_) {
- __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
- array_length, Operand(zero_reg));
- }
- __ bind(&loop);
- __ lw(string, MemOperand(element));
- __ Addu(element, element, kPointerSize);
- __ JumpIfSmi(string, &bailout);
- __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
- __ BranchOnOverflow(&bailout, scratch3);
- __ Branch(&loop, lt, element, Operand(elements_end));
-
- // If array_length is 1, return elements[0], a string.
- __ Branch(&not_size_one_array, ne, array_length, Operand(1));
- __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Branch(&done);
-
- __ bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array.
- // string_length: Sum of string lengths (smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ JumpIfSmi(separator, &bailout);
- __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string. array_length is not
- // smi but the other values are, so the result is a smi.
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ Subu(string_length, string_length, Operand(scratch1));
- __ Mult(array_length, scratch1);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ mfhi(scratch2);
- __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
- __ mflo(scratch2);
- __ And(scratch3, scratch2, Operand(0x80000000));
- __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
- __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
- __ BranchOnOverflow(&bailout, scratch3);
- __ SmiUntag(string_length);
-
- // Get first element in the array to free up the elements register to be used
- // for the result.
- __ Addu(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- result = elements; // End of live range for elements.
- elements = no_reg;
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array.
- __ AllocateAsciiString(result,
- string_length,
- scratch1,
- scratch2,
- elements_end,
- &bailout);
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- __ sll(elements_end, array_length, kPointerSizeLog2);
- __ Addu(elements_end, element, elements_end);
- result_pos = array_length; // End of live range for array_length.
- array_length = no_reg;
- __ Addu(result_pos,
- result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // Check the length of the separator.
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ li(at, Operand(Smi::FromInt(1)));
- __ Branch(&one_char_separator, eq, scratch1, Operand(at));
- __ Branch(&long_separator, gt, scratch1, Operand(at));
-
- // Empty separator case.
- __ bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ lw(string, MemOperand(element));
- __ Addu(element, element, kPointerSize);
- __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(string, result_pos, string_length, scratch1);
- // End while (element < elements_end).
- __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
- ASSERT(result.is(v0));
- __ Branch(&done);
-
- // One-character separator case.
- __ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
- __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator.
- __ jmp(&one_char_separator_loop_entry);
-
- __ bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
-
- // Copy the separator character to the result.
- __ sb(separator, MemOperand(result_pos));
- __ Addu(result_pos, result_pos, 1);
-
- // Copy next array element to the result.
- __ bind(&one_char_separator_loop_entry);
- __ lw(string, MemOperand(element));
- __ Addu(element, element, kPointerSize);
- __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(string, result_pos, string_length, scratch1);
- // End while (element < elements_end).
- __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
- ASSERT(result.is(v0));
- __ Branch(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- __ lw(string_length, FieldMemOperand(separator, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Addu(string,
- separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
-
- __ bind(&long_separator);
- __ lw(string, MemOperand(element));
- __ Addu(element, element, kPointerSize);
- __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(string, result_pos, string_length, scratch1);
- // End while (element < elements_end).
- __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
- ASSERT(result.is(v0));
- __ Branch(&done);
-
- __ bind(&bailout);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ lw(a0, GlobalObjectOperand());
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
- __ push(a0);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- __ li(a2, Operand(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ li(a1, Operand(Smi::FromInt(strict_mode_flag)));
- __ push(a1);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(v0);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
- if (var->IsUnallocated()) {
- __ lw(a2, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
- __ Push(a2, a1, a0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(v0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(var->is_this());
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ li(a2, Operand(var->name()));
- __ push(a2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(v0);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- // We handle value contexts explicitly rather than simply visiting
- // for control and plugging the control flow into the context,
- // because we need to prepare a pair of extra administrative AST ids
- // for the optimizing compiler.
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
- __ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- if (context()->IsStackValue()) __ push(v0);
- __ jmp(&done);
- __ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- if (context()->IsStackValue()) __ push(v0);
- __ bind(&done);
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(v0);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- __ mov(a0, result_register());
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // GenericUnaryOpStub expects the argument to be in a0.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- __ mov(a0, result_register());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ li(at, Operand(Smi::FromInt(0)));
- __ push(at);
- }
- if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in the accumulator.
- VisitForAccumulatorValue(prop->obj());
- __ push(v0);
- EmitNamedPropertyLoad(prop);
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ lw(a1, MemOperand(sp, 0));
- __ push(v0);
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- __ JumpIfSmi(v0, &no_conversion);
- __ mov(a0, v0);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(v0);
- break;
- case NAMED_PROPERTY:
- __ sw(v0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ sw(v0, MemOperand(sp, 2 * kPointerSize));
- break;
- }
- }
- }
- __ mov(a0, result_register());
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- __ li(a1, Operand(Smi::FromInt(count_value)));
- __ AdduAndCheckForOverflow(v0, a0, a1, t0);
- __ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
-
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(v0, &done);
- __ bind(&stub_call);
- }
- __ mov(a1, a0);
- __ li(a0, Operand(Smi::FromInt(count_value)));
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
-
- // Store the value returned in v0.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(v0);
- }
- // For all contexts except EffectConstant we have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(v0);
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(a0, result_register()); // Value.
- __ li(a2, Operand(prop->key()->AsLiteral()->handle())); // Name.
- __ pop(a1); // Receiver.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(v0);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ mov(a0, result_register()); // Value.
- __ pop(a1); // Key.
- __ pop(a2); // Receiver.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(v0);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ lw(a0, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ li(a2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallIC(ic);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(v0);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ li(a0, Operand(proxy->name()));
- __ Push(cp, a0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(v0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_string())) {
- __ JumpIfSmi(v0, if_true);
- __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
- __ JumpIfSmi(v0, if_false);
- // Check for undetectable objects => false.
- __ GetObjectType(v0, v0, a1);
- __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
- __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
- __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
- Split(eq, a1, Operand(zero_reg),
- if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(if_true, eq, v0, Operand(at));
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(if_true, eq, v0, Operand(at));
- __ JumpIfSmi(v0, if_false);
- // Check for undetectable objects => true.
- __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
- __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
- Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
- __ JumpIfSmi(v0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ GetObjectType(v0, v0, a1);
- __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
- Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
- if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
- __ JumpIfSmi(v0, if_false);
- if (!FLAG_harmony_typeof) {
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(if_true, eq, v0, Operand(at));
- }
- if (FLAG_harmony_symbols) {
- __ GetObjectType(v0, v0, a1);
- __ Branch(if_true, eq, a1, Operand(SYMBOL_TYPE));
- }
- // Check for JS objects => true.
- __ GetObjectType(v0, v0, a1);
- __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
- __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- // Check for undetectable objects => false.
- __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
- __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
- Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cc = CompareIC::ComputeCondition(op);
- __ mov(a0, result_register());
- __ pop(a1);
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ Or(a2, a0, Operand(a1));
- patch_site.EmitJumpIfNotSmi(a2, &slow_case);
- Split(cc, a1, Operand(a0), if_true, if_false, NULL);
- __ bind(&slow_case);
- }
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ mov(a0, result_register());
- __ LoadRoot(a1, nil_value);
- if (expr->op() == Token::EQ_STRICT) {
- Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
- } else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- __ Branch(if_true, eq, a0, Operand(a1));
- __ LoadRoot(a1, other_nil_value);
- __ Branch(if_true, eq, a0, Operand(a1));
- __ JumpIfSmi(a0, if_false);
- // It can be an undetectable object.
- __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
- __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
- Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ lw(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(v0);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return v0;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return cp;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ sw(value, MemOperand(fp, frame_offset));
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ lw(dst, ContextOperand(cp, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ li(at, Operand(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts created by a call to eval have the same closure as the
- // context calling eval, not the anonymous closure containing the eval
- // code. Fetch it from the context.
- __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
- } else {
- ASSERT(declaration_scope->is_function_scope());
- __ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- __ push(at);
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(a1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta).
- __ Subu(a1, ra, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- STATIC_ASSERT(0 == kSmiTag);
- __ Addu(a1, a1, Operand(a1)); // Convert to smi.
-
- // Store result register while executing finally block.
- __ push(a1);
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ li(at, Operand(pending_message_obj));
- __ lw(a1, MemOperand(at));
- __ push(a1);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ li(at, Operand(has_pending_message));
- __ lw(a1, MemOperand(at));
- __ SmiTag(a1);
- __ push(a1);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ li(at, Operand(pending_message_script));
- __ lw(a1, MemOperand(at));
- __ push(a1);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(a1));
- // Restore pending message from stack.
- __ pop(a1);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ li(at, Operand(pending_message_script));
- __ sw(a1, MemOperand(at));
-
- __ pop(a1);
- __ SmiUntag(a1);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ li(at, Operand(has_pending_message));
- __ sw(a1, MemOperand(at));
-
- __ pop(a1);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ li(at, Operand(pending_message_obj));
- __ sw(a1, MemOperand(at));
-
- // Restore result register from stack.
- __ pop(a1);
-
- // Uncook return address and return.
- __ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ sra(a1, a1, 1); // Un-smi-tag value.
- __ Addu(at, a1, Operand(masm_->CodeObject()));
- __ Jump(at);
-}
-
-
-#undef __
-
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ lw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ PopTryHandler();
- __ Call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/ic-mips.cc b/src/3rdparty/v8/src/mips/ic-mips.cc
deleted file mode 100644
index 24b1b0f..0000000
--- a/src/3rdparty/v8/src/mips/ic-mips.cc
+++ /dev/null
@@ -1,1682 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen.h"
-#include "code-stubs.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register scratch0,
- Register scratch1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // elements: holds the property dictionary on fall through.
- // Scratch registers:
- // scratch0: used to holds the receiver map.
- // scratch1: used to holds the receiver instance type, receiver bit mask
- // and elements map.
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- __ GetObjectType(receiver, scratch0, scratch1);
- __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // If this assert fails, we have to check upper bound too.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
-
- // Check that the global object does not require access checks.
- __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
- __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor)));
- __ Branch(miss, ne, scratch1, Operand(zero_reg));
-
- __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
- __ Branch(miss, ne, scratch1, Operand(scratch0));
-}
-
-
-// Helper function used from LoadIC/CallIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ And(at,
- scratch1,
- Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ Branch(miss, ne, at, Operand(zero_reg));
-
- // Get the value at the masked, scaled index and return.
- __ lw(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
- __ Branch(miss, ne, at, Operand(zero_reg));
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ sw(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- Register scratch,
- int interceptor_bit,
- Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
- __ Branch(slow, ne, at, Operand(zero_reg));
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
- //
- // scratch2 - used to hold the loaded value.
-
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode (not dictionary).
- __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(not_fast_array, ne, scratch1, Operand(at));
- } else {
- __ AssertFastElements(elements);
- }
-
- // Check that the key (index) is within bounds.
- __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(out_of_range, hs, key, Operand(scratch1));
-
- // Fast case: Do the load.
- __ Addu(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(at, at, scratch1);
- __ lw(scratch2, MemOperand(at));
-
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ Branch(out_of_range, eq, scratch2, Operand(at));
- __ mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or an internalized string.
-// Falls through if a key is an internalized string.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_internalized) {
- // The key is not a smi.
- // Is it a string?
- __ GetObjectType(key, map, hash);
- __ Branch(not_internalized, ge, hash, Operand(FIRST_NONSTRING_TYPE));
-
- // Is the string an array index, with cached numeric value?
- __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
- __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
- __ Branch(index_string, eq, at, Operand(zero_reg));
-
- // Is the string internalized?
- // map: key map
- __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ And(at, hash, Operand(kIsInternalizedMask));
- __ Branch(not_internalized, eq, at, Operand(zero_reg));
-}
-
-
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- a1 : receiver
- // -- a2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, a1, a2, a3, t0, t1, t2);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(a1, &number, t1);
- __ GetObjectType(a1, a3, a3);
- __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, a1);
- __ Branch(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, a1);
- __ Branch(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&boolean, eq, a1, Operand(t0));
- __ LoadRoot(t1, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t1));
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, a1, a2, a3, t0, t1, t2);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // a1: function
-
- // Check that the value isn't a smi.
- __ JumpIfSmi(a1, miss);
-
- // Check that the value is a JSFunction.
- __ GetObjectType(a1, scratch, scratch);
- __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(a1, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
-
- // a0: elements
- // Search the dictionary - put result in register a1.
- GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
-
- GenerateFunctionTailCall(masm, argc, &miss, t0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
- }
-
- // Get the receiver of the function from the stack.
- __ lw(a3, MemOperand(sp, argc*kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ Push(a3, a2);
-
- // Call the entry.
- __ PrepareCEntryArgs(2);
- __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to a1 and leave the internal frame.
- __ mov(a1, v0);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ lw(a2, MemOperand(sp, argc * kPointerSize));
- __ JumpIfSmi(a2, &invoke);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- __ sw(a2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(a2, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
-
- __ bind(&do_call);
- // receiver in a1 is not used after this point.
- // a2: key
- // a1: function
-
- GenerateFunctionTailCall(masm, argc, &slow_call, a0);
-
- __ bind(&check_number_dictionary);
- // a2: key
- // a3: elements map
- // t0: elements pointer
- // Check whether the elements is a number dictionary.
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&slow_load, ne, a3, Operand(at));
- __ sra(a0, a2, kSmiTagSize);
- // a0: untagged index
- __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a2); // Save the key.
- __ Push(a1, a2); // Pass the receiver and the key.
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(a2); // Restore the key.
- }
- __ mov(a1, v0);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
-
- // The key is known to be internalized.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
- __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
-
- GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor an internalized string,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub,
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(a3, a2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ JumpIfSmi(a2, &miss);
- __ IsObjectJSStringType(a2, a0, &miss);
-
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, a0, a2, a3, t0, t1, t2);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
-
- // a1: elements
- GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
-
- __ mov(a3, a0);
- __ Push(a3, a2);
-
- // Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
- // Check that the receiver is a JSObject. Because of the map check
- // later, we do not need to check for interceptors or whether it
- // requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ GetObjectType(object, scratch1, scratch2);
- __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // Check that the key is a positive smi.
- __ And(scratch1, key, Operand(0x80000001));
- __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
-
- // Load the elements into scratch1 and check its map.
- __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1,
- scratch2,
- Heap::kNonStrictArgumentsElementsMapRootIndex,
- slow_case,
- DONT_DO_SMI_CHECK);
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
- __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
-
- // Load element index and check whether it is the hole.
- const int kOffset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ li(scratch3, Operand(kPointerSize >> 1));
- __ Mul(scratch3, key, scratch3);
- __ Addu(scratch3, scratch3, Operand(kOffset));
-
- __ Addu(scratch2, scratch1, scratch3);
- __ lw(scratch2, MemOperand(scratch2));
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ li(scratch3, Operand(kPointerSize >> 1));
- __ Mul(scratch3, scratch2, scratch3);
- __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
- __ Addu(scratch2, scratch1, scratch3);
- return MemOperand(scratch2);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- __ CheckMap(backing_store,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- slow_case,
- DONT_DO_SMI_CHECK);
- __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
- __ li(scratch, Operand(kPointerSize >> 1));
- __ Mul(scratch, key, scratch);
- __ Addu(scratch,
- scratch,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(scratch, backing_store, scratch);
- return MemOperand(scratch);
-}
-
-
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, mapped_location);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a2.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
- __ lw(a2, unmapped_location);
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- __ Branch(&slow, eq, a2, Operand(a3));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a2);
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Store address is returned in register (of MemOperand) mapped_location.
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
- __ sw(a0, mapped_location);
- __ mov(t5, a0);
- ASSERT_EQ(mapped_location.offset(), 0);
- __ RecordWrite(a3, mapped_location.rm(), t5,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0); // (In delay slot) return the value stored in v0.
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a3.
- // Store address is returned in register (of MemOperand) unmapped_location.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
- __ sw(a0, unmapped_location);
- __ mov(t5, a0);
- ASSERT_EQ(unmapped_location.offset(), 0);
- __ RecordWrite(a3, unmapped_location.rm(), t5,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0); // (In delay slot) return the value stored in v0.
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Load receiver.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
- __ lw(a1, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow, a3);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
- __ lw(a1, unmapped_location);
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- __ Branch(&slow, eq, a1, Operand(a3));
- GenerateFunctionTailCall(masm, argc, &slow, a3);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
-
- __ Push(a1, a0);
-
- // Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
-
- __ Push(a1, a0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = a0;
- Register receiver = a1;
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(a2, a3, &check_number_dictionary);
-
- GenerateFastArrayLoad(
- masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
-
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // a0: key
- // a3: elements map
- // t0: elements
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&slow, ne, a3, Operand(at));
- __ sra(a2, a0, kSmiTagSize);
- __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
- __ Ret();
-
- // Slow case, key and receiver still in a0 and a1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
- 1,
- a2,
- a3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
-
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
- __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&probe_dictionary, eq, t0, Operand(at));
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
- __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
- __ sra(at, t0, String::kHashShift);
- __ xor_(a3, a3, at);
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(a3, a3, Operand(mask));
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
- __ li(t0, Operand(cache_keys));
- __ sll(at, a3, kPointerSizeLog2 + 1);
- __ addu(t0, t0, at);
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
- __ Branch(&try_next_entry, ne, a2, Operand(t1));
- __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
- __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
- __ bind(&try_next_entry);
- }
-
- __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
- __ Branch(&slow, ne, a2, Operand(t1));
- __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
- __ Branch(&slow, ne, a0, Operand(t1));
-
- // Get field offset.
- // a0 : key
- // a1 : receiver
- // a2 : receiver's map
- // a3 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ li(t0, Operand(cache_field_offsets));
- __ sll(at, a3, kPointerSizeLog2);
- __ addu(at, t0, at);
- __ lw(t1, MemOperand(at, kPointerSize * i));
- __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
- __ Subu(t1, t1, t2);
- __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
- if (i != 0) {
- __ Branch(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ addu(t2, t2, t1); // Index from start of object.
- __ Subu(a1, a1, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ sll(at, t2, kPointerSizeLog2);
- __ addu(at, a1, at);
- __ lw(v0, MemOperand(at));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1,
- a2,
- a3);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
- __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
- __ sll(t0, t1, kPointerSizeLog2);
- __ Addu(t0, t0, a1);
- __ lw(v0, MemOperand(t0));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1,
- a2,
- a3);
- __ Ret();
-
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // a1: receiver
- // a0: key
- // a3: elements
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
- // Load the property to v0.
- GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1,
- a2,
- a3);
- __ Ret();
-
- __ bind(&index_string);
- __ IndexFromHash(a3, key);
- // Now jump to the place where smi keys are handled.
- __ Branch(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key (index)
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- Register receiver = a1;
- Register index = a0;
- Register scratch = a3;
- Register result = v0;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(a2, a1, a0);
- __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
- __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
- __ Push(a1, a0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
-
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- Register scratch_value = t0;
- Register address = t1;
- if (check_map == kCheckMap) {
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(fast_double, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- }
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
-
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
- __ sw(value, MemOperand(address));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
- __ sw(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
- __ Branch(slow, ne, elements_map, Operand(at));
- }
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- elements, // Overwritten.
- a3, // Scratch regs...
- t0,
- t1,
- t2,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&non_double_value, ne, t0, Operand(at));
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- t0,
- slow);
- ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- t0,
- slow);
- ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
- slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- t0,
- slow);
- ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
-
- // Register usage.
- Register value = a0;
- Register key = a1;
- Register receiver = a2;
- Register receiver_map = a3;
- Register elements_map = t2;
- Register elements = t3; // Elements array of the receiver.
- // t0 and t1 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
- __ Branch(&slow, ne, t0, Operand(zero_reg));
- // Check if the object is a JS array or not.
- __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
- // Check that the object is some kind of JSObject.
- __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
-
- // Object case: Check key against length in the elements array.
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&fast_object, lo, key, Operand(t0));
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // a0: value.
- // a1: key.
- // a2: receiver.
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- // Only support writing to array[array.length].
- __ Branch(&slow, ne, key, Operand(t0));
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&slow, hs, key, Operand(t0));
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(
- &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
-
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Branch(&extra, hs, key, Operand(t0));
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(a1, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
- __ Branch(&slow, ne, t0, Operand(zero_reg));
-
- // Get the map of the receiver.
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
- __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
- // Everything is fine, call runtime.
- __ Push(a1, a0); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(ExternalReference(
- IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(a2, a1, a0);
-
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- // We can't use MultiPush as the order of the registers is important.
- __ Push(a2, a1, a0);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- a2 : receiver
- // -- a3 : target map
- // -- ra : return address
- // -----------------------------------
- // Must return the modified receiver in v0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a2);
- __ bind(&fail);
- }
-
- __ push(a2);
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- a2 : receiver
- // -- a3 : target map
- // -- ra : return address
- // -----------------------------------
- // Must return the modified receiver in v0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a2);
- __ bind(&fail);
- }
-
- __ push(a2);
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, a1, a2, a3, t0, t1, t2);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- __ Push(a1, a2, a0);
- // Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
- masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
-
- GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- __ Push(a1, a2, a0);
-
- __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
- __ li(a0, Operand(Smi::FromInt(strict_mode)));
- __ Push(a1, a0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address andi_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a andi at, rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(andi_instruction_address);
- return Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- Address andi_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a andi at, rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(andi_instruction_address);
- if (!(Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
- return;
- }
-
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int delta = Assembler::GetImmediate16(instr);
- delta += Assembler::GetRs(instr) * kImm16Mask;
- // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
- // signals that nothing was inlined.
- if (delta == 0) {
- return;
- }
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
- address, andi_instruction_address, delta);
- }
-#endif
-
- Address patch_address =
- andi_instruction_address - delta * Instruction::kInstrSize;
- Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
- // This is patching a conditional "jump if not smi/jump if smi" site.
- // Enabling by changing from
- // andi at, rx, 0
- // Branch <target>, eq, at, Operand(zero_reg)
- // to:
- // andi at, rx, #kSmiTagMask
- // Branch <target>, ne, at, Operand(zero_reg)
- // and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
- Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
- if (check == ENABLE_INLINED_SMI_CHECK) {
- ASSERT(Assembler::IsAndImmediate(instr_at_patch));
- ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
- patcher.masm()->andi(at, reg, kSmiTagMask);
- } else {
- ASSERT(check == DISABLE_INLINED_SMI_CHECK);
- ASSERT(Assembler::IsAndImmediate(instr_at_patch));
- patcher.masm()->andi(at, reg, 0);
- }
- ASSERT(Assembler::IsBranch(branch_instr));
- if (Assembler::IsBeq(branch_instr)) {
- patcher.ChangeBranchCondition(ne);
- } else {
- ASSERT(Assembler::IsBne(branch_instr));
- patcher.ChangeBranchCondition(eq);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc b/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
deleted file mode 100644
index 16d7c26..0000000
--- a/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
+++ /dev/null
@@ -1,6106 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "mips/lithium-codegen-mips.h"
-#include "mips/lithium-gap-resolver-mips.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) const { }
-
- virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
- CpuFeatures::Scope scope(FPU);
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(GetStackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
- PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
-}
-
-
-void LChunkBuilder::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
-#endif
-
- // a1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
-
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ sw(a2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
- }
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ Push(ra, fp, cp);
- __ Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- } else {
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ Push(ra, fp, cp, a1);
- // Add unused load of ip to ensure prologue sequence is identical for
- // full-codegen and lithium-codegen.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- // Adj. FP to point to saved FP.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- }
- frame_is_built_ = true;
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ Subu(sp, sp, Operand(slots * kPointerSize));
- __ push(a0);
- __ push(a1);
- __ Addu(a0, sp, Operand(slots * kPointerSize));
- __ li(a1, Operand(kSlotsZapValue));
- Label loop;
- __ bind(&loop);
- __ Subu(a0, a0, Operand(kPointerSize));
- __ sw(a1, MemOperand(a0, 2 * kPointerSize));
- __ Branch(&loop, ne, a0, Operand(sp));
- __ pop(a1);
- __ pop(a0);
- } else {
- __ Subu(sp, sp, Operand(slots * kPointerSize));
- }
- }
-
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is in a1.
- __ push(a1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both v0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ lw(a0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ sw(a0, target);
- // Update the write barrier. This clobbers a3 and a0.
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
- instr->CompileToNative(this);
- }
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred build frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
- frame_is_built_ = true;
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- }
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred destroy frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(frame_is_built_);
- __ pop(at);
- __ MultiPop(cp.bit() | fp.bit() | ra.bit());
- frame_is_built_ = false;
- }
- __ jmp(code->exit());
- }
- }
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeoptJumpTable() {
- // Check that the jump table is accessible from everywhere in the function
- // code, i.e. that offsets to the table can be encoded in the 16bit signed
- // immediate of a branch instruction.
- // To simplify we consider the code size from the first instruction to the
- // end of the jump table.
- if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 12)) {
- Abort("Generated code is too large");
- }
-
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ RecordComment("[ Deoptimization jump table");
- Label table_start;
- __ bind(&table_start);
- Label needs_frame_not_call;
- Label needs_frame_is_call;
- for (int i = 0; i < deopt_jump_table_.length(); i++) {
- __ bind(&deopt_jump_table_[i].label);
- Address entry = deopt_jump_table_[i].address;
- bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
- if (deopt_jump_table_[i].needs_frame) {
- if (is_lazy_deopt) {
- if (needs_frame_is_call.is_bound()) {
- __ Branch(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- __ Call(t9);
- }
- } else {
- if (needs_frame_not_call.is_bound()) {
- __ Branch(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- __ Jump(t9);
- }
- }
- } else {
- if (is_lazy_deopt) {
- __ Call(t9);
- } else {
- __ Jump(t9);
- }
- }
- }
- __ RecordComment("]");
-
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
- if (op->IsRegister()) {
- return ToRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort("EmitLoadRegister: Unsupported double immediate.");
- } else {
- ASSERT(r.IsTagged());
- if (literal->IsSmi()) {
- __ li(scratch, Operand(literal));
- } else {
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
- }
- }
- return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
- __ lw(scratch, ToMemOperand(op));
- return scratch;
- }
- UNREACHABLE();
- return scratch;
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- FloatRegister flt_scratch,
- DoubleRegister dbl_scratch) {
- if (op->IsDoubleRegister()) {
- return ToDoubleRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ li(at, Operand(static_cast<int32_t>(literal->Number())));
- __ mtc1(at, flt_scratch);
- __ cvt_d_w(dbl_scratch, flt_scratch);
- return dbl_scratch;
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- } else if (r.IsTagged()) {
- Abort("unsupported tagged immediate");
- }
- } else if (op->IsStackSlot() || op->IsArgument()) {
- MemOperand mem_op = ToMemOperand(op);
- __ ldc1(dbl_scratch, mem_op);
- return dbl_scratch;
- }
- UNREACHABLE();
- return dbl_scratch;
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort("ToOperand Unsupported double immediate.");
- }
- ASSERT(r.IsTagged());
- return Operand(constant->handle());
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort("ToOperand IsDoubleRegister unimplemented");
- return Operand(0);
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand(0);
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- ASSERT(!op->IsRegister());
- ASSERT(!op->IsDoubleRegister());
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()));
-}
-
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- ASSERT(op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
- bool has_closure_id = !info()->closure().is_null() &&
- *info()->closure() != *environment->closure();
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- }
-
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
- }
- }
-
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ Call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
-
- __ CallRuntime(function, num_arguments);
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr) {
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment,
- Register src1,
- const Operand& src2) {
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
- if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- return;
- }
-
- if (FLAG_trap_on_deopt) {
- Label skip;
- if (cc != al) {
- __ Branch(&skip, NegateCondition(cc), src1, src2);
- }
- __ stop("trap_on_deopt");
- __ bind(&skip);
- }
-
- ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
- if (cc == al && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
- }
- } else {
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
- deopt_jump_table_.Add(table_entry, zone());
- }
- __ Branch(&deopt_jump_table_.last().label, cc, src1, src2);
- }
-}
-
-
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
- maps.Add(map, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- ASSERT(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
- LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- __ lw(a0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- Register scratch = scratch0();
- const Register left = ToRegister(instr->left());
- const Register result = ToRegister(instr->result());
-
- Label done;
-
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register scratch = scratch0();
- ASSERT(!left.is(scratch));
- __ mov(scratch, left);
- int32_t p2constant = HConstant::cast(
- instr->hydrogen()->right())->Integer32Value();
- ASSERT(p2constant != 0);
- // Result always takes the sign of the dividend (left).
- p2constant = abs(p2constant);
-
- Label positive_dividend;
- __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
- __ subu(result, zero_reg, left);
- __ And(result, result, p2constant - 1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result, zero_reg, result);
- __ bind(&positive_dividend);
- __ And(result, scratch, p2constant - 1);
- } else {
- // div runs in the background while we check for special cases.
- Register right = EmitLoadRegister(instr->right(), scratch);
- __ div(left, right);
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
- }
-
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
- __ bind(&left_not_min_int);
- }
-
- __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
- __ mfhi(result);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
- const Register result = ToRegister(instr->result());
-
- // On MIPS div is asynchronous - it will run in the background while we
- // check for special cases.
- __ div(left, right);
-
- // Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
- __ bind(&left_not_min_int);
- }
-
- __ mfhi(result);
- DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
- __ mflo(result);
-}
-
-
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
- DoubleRegister addend = ToDoubleRegister(instr->addend());
- DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
- DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- ASSERT(addend.is(ToDoubleRegister(instr->result())));
-
- __ madd_d(addend, addend, multiplier, multiplicand);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
- // Note that result may alias left.
- Register left = ToRegister(instr->left());
- LOperand* right_op = instr->right();
-
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
- if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
- if (bailout_on_minus_zero && (constant < 0)) {
- // The case of a null constant will be handled separately.
- // If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
- }
-
- switch (constant) {
- case -1:
- __ Subu(result, zero_reg, left);
- break;
- case 0:
- if (bailout_on_minus_zero) {
- // If left is strictly negative and the constant is null, the
- // result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
- }
- __ mov(result, zero_reg);
- break;
- case 1:
- // Nothing to do.
- __ Move(result, left);
- break;
- default:
- // Multiplying by powers of two and powers of two plus or minus
- // one can be done faster with shifted operands.
- // For other constants we emit standard code.
- int32_t mask = constant >> 31;
- uint32_t constant_abs = (constant + mask) ^ mask;
-
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ sll(result, left, shift);
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ sll(result, left, shift);
- __ Addu(result, result, left);
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ sll(result, left, shift);
- __ Subu(result, result, left);
- }
-
- // Correct the sign of the result is the constant is negative.
- if (constant < 0) {
- __ Subu(result, zero_reg, result);
- }
-
- } else {
- // Generate standard code.
- __ li(at, constant);
- __ Mul(result, left, at);
- }
- }
-
- } else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ Or(ToRegister(instr->temp()), left, right);
- }
-
- if (can_overflow) {
- // hi:lo = left * right.
- __ mult(left, right);
- __ mfhi(scratch);
- __ mflo(result);
- __ sra(at, result, 31);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
- } else {
- __ Mul(result, left, right);
- }
-
- if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ Branch(&done, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt,
- instr->environment(),
- ToRegister(instr->temp()),
- Operand(zero_reg));
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->left();
- LOperand* right_op = instr->right();
- ASSERT(left_op->IsRegister());
- Register left = ToRegister(left_op);
- Register result = ToRegister(instr->result());
- Operand right(no_reg);
-
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
- right = Operand(EmitLoadRegister(right_op, at));
- } else {
- ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
- right = ToOperand(right_op);
- }
-
- switch (instr->op()) {
- case Token::BIT_AND:
- __ And(result, left, right);
- break;
- case Token::BIT_OR:
- __ Or(result, left, right);
- break;
- case Token::BIT_XOR:
- __ Xor(result, left, right);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
- // result may alias either of them.
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
-
- if (right_op->IsRegister()) {
- // No need to mask the right operand on MIPS, it is built into the variable
- // shift instructions.
- switch (instr->op()) {
- case Token::ROR:
- __ Ror(result, left, Operand(ToRegister(right_op)));
- break;
- case Token::SAR:
- __ srav(result, left, ToRegister(right_op));
- break;
- case Token::SHR:
- __ srlv(result, left, ToRegister(right_op));
- if (instr->can_deopt()) {
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
- }
- break;
- case Token::SHL:
- __ sllv(result, left, ToRegister(right_op));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // Mask the right_op operand.
- int value = ToInteger32(LConstantOperand::cast(right_op));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ Ror(result, left, Operand(shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sra(result, left, shift_count);
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ srl(result, left, shift_count);
- } else {
- if (instr->can_deopt()) {
- __ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
- }
- __ Move(result, left);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ sll(result, left, shift_count);
- } else {
- __ Move(result, left);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, at);
- __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
- }
- } else { // can_overflow.
- Register overflow = scratch0();
- Register scratch = scratch1();
- if (right->IsStackSlot() ||
- right->IsArgument() ||
- right->IsConstantOperand()) {
- Register right_reg = EmitLoadRegister(right, scratch);
- __ SubuAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- right_reg,
- overflow); // Reg at also used as scratch.
- } else {
- ASSERT(right->IsRegister());
- // Due to overflow check macros not supporting constant operands,
- // handling the IsConstantOperand case was moved to prev if clause.
- __ SubuAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- ToRegister(right),
- overflow); // Reg at also used as scratch.
- }
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ li(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- DoubleRegister result = ToDoubleRegister(instr->result());
- CpuFeatures::Scope scope(FPU);
- double v = instr->value();
- __ Move(result, v);
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
- if (value->IsSmi()) {
- __ li(ToRegister(instr->result()), Operand(value));
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- Label done;
-
- // If the object is a smi return the object.
- __ Move(result, input);
- __ JumpIfSmi(input, &done);
-
- // If the object is not a value type, return the object.
- __ GetObjectType(input, map, map);
- __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
- __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- Label runtime, done;
- ASSERT(object.is(a0));
- ASSERT(result.is(v0));
- ASSERT(!scratch.is(scratch0()));
- ASSERT(!scratch.is(object));
-
- __ And(at, object, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
- __ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
-
- if (index->value() == 0) {
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch, Operand(stamp));
- __ lw(scratch, MemOperand(scratch));
- __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch, Operand(scratch0()));
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ li(a1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ Nor(result, zero_reg, Operand(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), at);
- __ push(input_reg);
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, at);
- __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
- }
- } else { // can_overflow.
- Register overflow = scratch0();
- Register scratch = scratch1();
- if (right->IsStackSlot() ||
- right->IsArgument() ||
- right->IsConstantOperand()) {
- Register right_reg = EmitLoadRegister(right, scratch);
- __ AdduAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- right_reg,
- overflow); // Reg at also used as scratch.
- } else {
- ASSERT(right->IsRegister());
- // Due to overflow check macros not supporting constant operands,
- // handling the IsConstantOperand case was moved to prev if clause.
- __ AdduAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- ToRegister(right),
- overflow); // Reg at also used as scratch.
- }
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- if (instr->hydrogen()->representation().IsInteger32()) {
- Register left_reg = ToRegister(left);
- Operand right_op = (right->IsRegister() || right->IsConstantOperand())
- ? ToOperand(right)
- : Operand(EmitLoadRegister(right, at));
- Register result_reg = ToRegister(instr->result());
- Label return_right, done;
- if (!result_reg.is(left_reg)) {
- __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
- __ mov(result_reg, left_reg);
- __ Branch(&done);
- }
- __ Branch(&done, condition, left_reg, right_op);
- __ bind(&return_right);
- __ Addu(result_reg, zero_reg, right_op);
- __ bind(&done);
- } else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
- CpuFeatures::Scope scope(FPU);
- FPURegister left_reg = ToDoubleRegister(left);
- FPURegister right_reg = ToDoubleRegister(right);
- FPURegister result_reg = ToDoubleRegister(instr->result());
- Label check_nan_left, check_zero, return_left, return_right, done;
- __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
- __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
- __ Branch(&return_right);
-
- __ bind(&check_zero);
- // left == right != 0.
- __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- __ neg_d(left_reg, left_reg);
- __ sub_d(result_reg, left_reg, right_reg);
- __ neg_d(result_reg, result_reg);
- } else {
- __ add_d(result_reg, left_reg, right_reg);
- }
- __ Branch(&done);
-
- __ bind(&check_nan_left);
- // left == NaN.
- __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
- __ bind(&return_right);
- if (!right_reg.is(result_reg)) {
- __ mov_d(result_reg, right_reg);
- }
- __ Branch(&done);
-
- __ bind(&return_left);
- if (!left_reg.is(result_reg)) {
- __ mov_d(result_reg, left_reg);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- __ add_d(result, left, right);
- break;
- case Token::SUB:
- __ sub_d(result, left, right);
- break;
- case Token::MUL:
- __ mul_d(result, left, right);
- break;
- case Token::DIV:
- __ div_d(result, left, right);
- break;
- case Token::MOD: {
- // Save a0-a3 on the stack.
- RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
- __ MultiPush(saved_regs);
-
- __ PrepareCallCFunction(0, 2, scratch0());
- __ SetCallCDoubleArguments(left, right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 0, 2);
- // Move the result in the double result register.
- __ GetCFunctionDoubleResult(result);
-
- // Restore saved register.
- __ MultiPop(saved_regs);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->left()).is(a1));
- ASSERT(ToRegister(instr->right()).is(a0));
- ASSERT(ToRegister(instr->result()).is(v0));
-
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- // Other arch use a nop here, to signal that there is no inlined
- // patchable code. Mips does not need the nop, since our marker
- // instruction (andi zero_reg) will never be used in normal code.
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block,
- Condition cc, Register src1, const Operand& src2) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ Branch(chunk_->GetAssemblyLabel(right_block),
- NegateCondition(cc), src1, src2);
- } else if (right_block == next_block) {
- __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
- } else {
- __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
- __ Branch(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-void LCodeGen::EmitBranchF(int left_block, int right_block,
- Condition cc, FPURegister src1, FPURegister src2) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
- NegateCondition(cc), src1, src2);
- } else if (right_block == next_block) {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
- } else {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
- __ Branch(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->value());
- EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
- } else if (r.IsDouble()) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister reg = ToDoubleRegister(instr->value());
- // Test the double value. Zero and NaN are false.
- EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, eq, reg, Operand(at));
- } else if (type.IsSmi()) {
- EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
-
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
- // undefined -> false.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(false_label, eq, reg, Operand(at));
- }
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
- // Boolean -> its value.
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(true_label, eq, reg, Operand(at));
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- __ Branch(false_label, eq, reg, Operand(at));
- }
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
- // 'null' -> false.
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(false_label, eq, reg, Operand(at));
- }
-
- if (expected.Contains(ToBooleanStub::SMI)) {
- // Smis: 0 -> false, all other -> true.
- __ Branch(false_label, eq, reg, Operand(zero_reg));
- __ JumpIfSmi(reg, true_label);
- } else if (expected.NeedsMap()) {
- // If we need a map later and have a Smi -> deopt.
- __ And(at, reg, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
- }
-
- const Register map = scratch0();
- if (expected.NeedsMap()) {
- __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
- // Undetectable -> false.
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, at, Operand(1 << Map::kIsUndetectable));
- __ Branch(false_label, ne, at, Operand(zero_reg));
- }
- }
-
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
- // spec object -> true.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
- }
-
- if (expected.Contains(ToBooleanStub::STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
- __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
- __ Branch(true_label, ne, at, Operand(zero_reg));
- __ Branch(false_label);
- __ bind(&not_string);
- }
-
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- CpuFeatures::Scope scope(FPU);
- // heap number -> false iff +0, -0, or NaN.
- DoubleRegister dbl_scratch = double_scratch0();
- Label not_heap_number;
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&not_heap_number, ne, map, Operand(at));
- __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
- // Falls through if dbl_scratch == 0.
- __ Branch(false_label);
- __ bind(&not_heap_number);
- }
-
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = kNoCondition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::LT:
- cond = is_unsigned ? lo : lt;
- break;
- case Token::GT:
- cond = is_unsigned ? hi : gt;
- break;
- case Token::LTE:
- cond = is_unsigned ? ls : le;
- break;
- case Token::GTE:
- cond = is_unsigned ? hs : ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- Condition cond = TokenToCondition(instr->op(), false);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- CpuFeatures::Scope scope(FPU);
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- FPURegister left_reg = ToDoubleRegister(left);
- FPURegister right_reg = ToDoubleRegister(right);
-
- // If a NaN is involved, i.e. the result is unordered,
- // jump to false block label.
- __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
- left_reg, right_reg);
-
- EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
- } else {
- Register cmp_left;
- Operand cmp_right = Operand(0);
-
- if (right->IsConstantOperand()) {
- cmp_left = ToRegister(left);
- cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
- } else if (left->IsConstantOperand()) {
- cmp_left = ToRegister(right);
- cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
- // We transposed the operands. Reverse the condition.
- cond = ReverseCondition(cond);
- } else {
- cmp_left = ToRegister(left);
- cmp_right = Operand(ToRegister(right));
- }
-
- EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
- }
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- EmitBranch(true_block, false_block, eq, left, Operand(right));
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitBranch(true_block, false_block, eq, left,
- Operand(instr->hydrogen()->right()));
-}
-
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register scratch = scratch0();
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(at, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, eq, reg, Operand(at));
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
- __ LoadRoot(at, other_nil_value); // In the delay slot.
- __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
- __ JumpIfSmi(reg, false_label); // In the delay slot.
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(scratch, scratch, 1 << Map::kIsUndetectable);
- EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object) {
- __ JumpIfSmi(input, is_not_object);
-
- __ LoadRoot(temp2, Heap::kNullValueRootIndex);
- __ Branch(is_object, eq, input, Operand(temp2));
-
- // Load map.
- __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
- __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
- __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
-
- // Load instance type and check that it is in object type range.
- __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ Branch(is_not_object,
- lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-
- return le;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = scratch0();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond =
- EmitIsObject(reg, temp1, temp2, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond, temp2,
- Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
- __ GetObjectType(input, temp1, temp1);
-
- return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond =
- EmitIsString(reg, temp1, false_label);
-
- EmitBranch(true_block, false_block, true_cond, temp1,
- Operand(FIRST_NONSTRING_TYPE));
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Register input_reg = EmitLoadRegister(instr->value(), at);
- __ And(at, input_reg, kSmiTagMask);
- EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
- __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
- __ And(at, temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = ComputeCompareCondition(op);
-
- EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return hs;
- if (from == FIRST_TYPE) return ls;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
-
- __ GetObjectType(input, scratch, scratch);
- EmitBranch(true_block,
- false_block,
- BranchCondition(instr->hydrogen()),
- scratch,
- Operand(TestType(instr->hydrogen())));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ lw(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
-}
-
-
-// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String>class_name,
- Register input,
- Register temp,
- Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!input.is(temp2));
- ASSERT(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
- __ GetObjectType(input, temp, temp2);
- __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
- } else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ GetObjectType(input, temp, temp2);
- __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
-
- // Objects with a non-function constructor have class 'Object'.
- __ GetObjectType(temp, temp2, temp2);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
- __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
- } else {
- __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ lw(temp, FieldMemOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
-
- // End with the address of this class_name instance in temp register.
- // On MIPS, the caller must do the comparison with Handle<String>class_name.
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = scratch0();
- Register temp2 = ToRegister(instr->temp());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- Label true_label, done;
- ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
- ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
- Register result = ToRegister(instr->result());
- ASSERT(result.is(v0));
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-
- __ Branch(&true_label, eq, result, Operand(zero_reg));
- __ li(result, Operand(factory()->false_value()));
- __ Branch(&done);
- __ bind(&true_label);
- __ li(result, Operand(factory()->true_value()));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- virtual LInstruction* instr() { return instr_; }
- Label* map_check() { return &map_check_; }
-
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- ASSERT(object.is(a0));
- ASSERT(result.is(v0));
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = temp;
- __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
-
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ li(at, Operand(Handle<Object>(cell)));
- __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
- __ Branch(&cache_miss, ne, map, Operand(at));
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
- __ Branch(&done);
-
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
- __ bind(&cache_miss);
- // Null is not instance of anything.
- __ LoadRoot(temp, Heap::kNullValueRootIndex);
- __ Branch(&false_result, eq, object, Operand(temp));
-
- // String values is not instance of anything.
- Condition cc = __ IsObjectStringType(object, temp, temp);
- __ Branch(&false_result, cc, temp, Operand(zero_reg));
-
- // Go to the deferred code.
- __ Branch(deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Register result = ToRegister(instr->result());
- ASSERT(result.is(v0));
-
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- // Get the temp register reserved by the instruction. This needs to be t0 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
- ASSERT(temp.is(t0));
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 7;
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
- __ StoreToSafepointRegisterSlot(temp, temp);
- }
- CallCodeGeneric(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
- // restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
-}
-
-
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // On MIPS there is no need for a "no inlined smi code" marker (nop).
-
- Condition condition = ComputeCompareCondition(op);
- // A minor optimization that relies on LoadRoot always emitting one
- // instruction.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
- Label done;
- __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in v0.
- __ push(v0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
- if (NeedsEagerFrame()) {
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ Pop(ra, fp);
- if (!info()->IsStub()) {
- __ Addu(sp, sp, Operand(sp_delta));
- }
- }
- __ Jump(ra);
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(a0));
- ASSERT(ToRegister(instr->result()).is(v0));
-
- __ li(a2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Register cell = scratch0();
-
- // Load the cell.
- __ li(cell, Operand(instr->hydrogen()->cell()));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We use a temp to check the payload.
- Register payload = ToRegister(instr->temp());
- __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
- }
-
- // Store the value.
- __ sw(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(a1));
- ASSERT(ToRegister(instr->value()).is(a0));
-
- __ li(a2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
-
- __ lw(result, ContextOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
- } else {
- Label is_not_hole;
- __ Branch(&is_not_hole, ne, result, Operand(at));
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, target);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
- } else {
- __ Branch(&skip_assignment, ne, scratch, Operand(at));
- }
- }
-
- __ sw(value, target);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context,
- target.offset(),
- value,
- scratch0(),
- kRAHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
- } else {
- __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
- DeoptimizeIf(ne, env, result, Operand(Handle<Map>(current->map())));
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- Register object_map = scratch0();
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMapAndBranch(
- object_map, map, &check_passed,
- eq, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- __ Branch(&next);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ Branch(&done);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ li(a2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(a0));
- ASSERT(ToRegister(instr->result()).is(v0));
-
- // Name is always in a2.
- __ li(a2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register scratch = scratch0();
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function. Load map into the
- // result register.
- __ GetObjectType(function, result, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
-
- // Get the prototype or initial map from the function.
- __ lw(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ GetObjectType(result, scratch, scratch);
- __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
- // Get the prototype from the initial map.
- __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
- __ Branch(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- __ bind(&non_instance);
- __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- Register scratch = scratch0();
-
- __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
- __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
- __ Branch(&done, eq, scratch, Operand(at));
- // |scratch| still contains |input|'s map.
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ Ext(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ Branch(&fail, lt, scratch,
- Operand(GetInitialFastElementsKind()));
- __ Branch(&done, le, scratch,
- Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ Branch(&fail, lt, scratch,
- Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ Branch(&done, le, scratch,
- Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->object());
- __ lw(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them, add one more.
- __ subu(length, length, index);
- __ Addu(length, length, Operand(1));
- __ sll(length, length, kPointerSizeLog2);
- __ Addu(at, arguments, Operand(length));
- __ lw(result, MemOperand(at, 0));
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- FPURegister result = ToDoubleRegister(instr->result());
- if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
- } else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
- }
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
- }
- } else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Register value = external_pointer;
- __ lw(value, MemOperand(scratch0(), additional_offset));
- __ And(sfpd_lo, value, Operand(kBinary32MantissaMask));
-
- __ srl(scratch0(), value, kBinary32MantissaBits);
- __ And(scratch0(), scratch0(),
- Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ Xor(at, scratch0(), Operand(0x00));
- __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
-
- __ Xor(at, scratch0(), Operand(0xff));
- Label skip;
- __ Branch(&skip, ne, at, Operand(zero_reg));
- __ li(scratch0(), Operand(0x7ff));
- __ bind(&skip);
- __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
-
- // Rebias exponent.
- __ Addu(scratch0(),
- scratch0(),
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ And(sfpd_hi, value, Operand(kBinary32SignMask));
- __ sll(at, scratch0(), HeapNumber::kMantissaBitsInTopWord);
- __ Or(sfpd_hi, sfpd_hi, at);
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ srl(at, sfpd_lo, kMantissaShiftForHiWord);
- __ Or(sfpd_hi, sfpd_hi, at);
- __ sll(sfpd_lo, sfpd_lo, kMantissaShiftForLoWord);
-
- } else {
- __ lw(sfpd_lo, MemOperand(scratch0(), additional_offset));
- __ lw(sfpd_hi, MemOperand(scratch0(),
- additional_offset + kPointerSize));
- }
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ lb(result, mem_operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ lbu(result, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ lh(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ lhu(result, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ lw(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ lw(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DoubleRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
-
- int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- ((constant_key + instr->additional_index()) << element_size_shift);
- if (!key_is_constant) {
- __ sll(scratch, key, shift_size);
- __ Addu(elements, elements, scratch);
- }
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ Addu(elements, elements, Operand(base_offset));
- __ ldc1(result, MemOperand(elements));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
- }
- } else {
- __ lw(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
- __ lw(sfpd_lo, MemOperand(elements, base_offset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- ASSERT(kPointerSize == sizeof(kHoleNanLower32));
- DeoptimizeIf(eq, instr->environment(), sfpd_hi, Operand(kHoleNanUpper32));
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
- } else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ lw(result, FieldMemOperand(store_base, offset));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ And(scratch, result, Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-MemOperand LCodeGen::PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int additional_index,
- int additional_offset) {
- if (additional_index != 0 && !key_is_constant) {
- additional_index *= 1 << (element_size - shift_size);
- __ Addu(scratch0(), key, Operand(additional_index));
- }
-
- if (key_is_constant) {
- return MemOperand(base,
- (constant_key << element_size) + additional_offset);
- }
-
- if (additional_index == 0) {
- if (shift_size >= 0) {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
- } else {
- ASSERT_EQ(-1, shift_size);
- __ srl(scratch0(), key, 1);
- __ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
- }
- }
-
- if (shift_size >= 0) {
- __ sll(scratch0(), scratch0(), shift_size);
- __ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
- } else {
- ASSERT_EQ(-1, shift_size);
- __ srl(scratch0(), scratch0(), 1);
- __ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(a1));
- ASSERT(ToRegister(instr->key()).is(a0));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register scratch = scratch0();
- Register temp = scratch1();
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ Subu(result, sp, 2 * kPointerSize);
- } else {
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
- __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
- __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
- __ Branch(&done, eq, fp, Operand(elem));
-
- // Arguments adaptor frame present. Get argument length from there.
- __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(result,
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register scratch = scratch0();
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
-
- // Do not transform the receiver to object for strict mode
- // functions.
- __ lw(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lw(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
-
- // Do not transform the receiver to object for builtins.
- int32_t strict_mode_function_mask =
- 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
- int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
- __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
- __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
-
- // Normal function. Replace undefined or null with global receiver.
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ Branch(&global_object, eq, receiver, Operand(scratch));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ Branch(&global_object, eq, receiver, Operand(scratch));
-
- // Deoptimize if the receiver is not a JS object.
- __ And(scratch, receiver, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
-
- __ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr->environment(),
- scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(&receiver_ok);
-
- __ bind(&global_object);
- __ lw(receiver, GlobalObjectOperand());
- __ lw(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
-}
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- ASSERT(receiver.is(a0)); // Used for parameter count.
- ASSERT(function.is(a1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(v0));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ push(receiver);
- __ Move(receiver, length);
- // The arguments are at a one pointer size offset from elements.
- __ Addu(elements, elements, Operand(1 * kPointerSize));
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
- __ sll(scratch, length, 2);
- __ bind(&loop);
- __ Addu(scratch, elements, scratch);
- __ lw(scratch, MemOperand(scratch));
- __ push(scratch);
- __ Subu(length, length, Operand(1));
- __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
- __ sll(scratch, length, 2);
-
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in receiver which is a0, as expected
- // by InvokeFunction.
- ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort("DoPushArgument not implemented for double type.");
- } else {
- Register argument_reg = EmitLoadRegister(argument, at);
- __ push(argument_reg);
- }
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- __ mov(result, cp);
- return;
- }
- }
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ lw(result,
- MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
- __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
- // The context is the first argument.
- __ Push(cp, scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(cp, instr->qml_global()
- ? Context::QML_GLOBAL_OBJECT_INDEX
- : Context::GLOBAL_OBJECT_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global_object());
- Register result = ToRegister(instr->result());
- __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- A1State a1_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- if (can_invoke_directly) {
- if (a1_state == A1_UNINITIALIZED) {
- __ LoadHeapObject(a1, function);
- }
-
- // Change context.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Set r0 to arguments count if adaption is not needed. Assumes that r0
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ li(a0, Operand(arity));
- }
-
- // Invoke function.
- __ SetCallKind(t1, call_kind);
- __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ Call(at);
-
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
- }
-
- // Restore context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- __ mov(a0, v0);
- CallKnownFunction(instr->function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- A1_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Deoptimize if not a heap number.
- __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
-
- Label done;
- Register exponent = scratch0();
- scratch = no_reg;
- __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it.
- __ Move(result, input);
- __ And(at, exponent, Operand(HeapNumber::kSignMask));
- __ Branch(&done, eq, at, Operand(zero_reg));
-
- // Input is negative. Reverse its sign.
- // Preserve the value of all registers.
- {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(a1) ? a0 : a1;
- Register tmp2 = input.is(a2) ? a0 : a2;
- Register tmp3 = input.is(a3) ? a0 : a3;
- Register tmp4 = input.is(t0) ? a0 : t0;
-
- // exponent: floating point exponent value.
-
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ Branch(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(v0))
- __ mov(tmp1, v0);
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
- __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
- __ StoreToSafepointRegisterSlot(tmp1, result);
- }
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Label done;
- __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
- __ mov(result, input);
- ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
- __ subu(result, zero_reg, input);
- // Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(FPU);
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LUnaryMathOperation* instr_;
- };
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- FPURegister input = ToDoubleRegister(instr->value());
- FPURegister result = ToDoubleRegister(instr->result());
- __ abs_d(result, input);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else {
- // Representation is tagged.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input, deferred->entry());
- // If smi, handle it directly.
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register except_flag = ToRegister(instr->temp());
-
- __ EmitFPUTruncate(kRoundToMinusInf,
- result,
- input,
- scratch1,
- double_scratch0(),
- except_flag);
-
- // Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- Label done;
- __ Branch(&done, ne, result, Operand(zero_reg));
- __ mfc1(scratch1, input.high());
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
- Register scratch = scratch0();
- Label done, check_sign_on_zero;
-
- // Extract exponent bits.
- __ mfc1(result, input.high());
- __ Ext(scratch,
- result,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // If the number is in ]-0.5, +0.5[, the result is +/- 0.
- Label skip1;
- __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
- __ mov(result, zero_reg);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Branch(&check_sign_on_zero);
- } else {
- __ Branch(&done);
- }
- __ bind(&skip1);
-
- // The following conversion will not work with numbers
- // outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr->environment(), scratch,
- Operand(HeapNumber::kExponentBias + 32));
-
- // Save the original sign for later comparison.
- __ And(scratch, result, Operand(HeapNumber::kSignMask));
-
- __ Move(double_scratch0(), 0.5);
- __ add_d(double_scratch0(), input, double_scratch0());
-
- // Check sign of the result: if the sign changed, the input
- // value was in ]0.5, 0[ and the result should be -0.
- __ mfc1(result, double_scratch0().high());
- __ Xor(result, result, Operand(scratch));
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr->environment(), result,
- Operand(zero_reg));
- } else {
- Label skip2;
- // ARM uses 'mi' here, which is 'lt'
- // Negating it results in 'ge'
- __ Branch(&skip2, ge, result, Operand(zero_reg));
- __ mov(result, zero_reg);
- __ Branch(&done);
- __ bind(&skip2);
- }
-
- Register except_flag = scratch;
- __ EmitFPUTruncate(kRoundToMinusInf,
- result,
- double_scratch0(),
- at,
- double_scratch1,
- except_flag);
-
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ Branch(&done, ne, result, Operand(zero_reg));
- __ bind(&check_sign_on_zero);
- __ mfc1(scratch, input.high());
- __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ sqrt_d(result, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->temp());
-
- ASSERT(!input.is(result));
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done;
- __ Move(temp, -V8_INFINITY);
- __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
- // Set up Infinity in the delay slot.
- // result is overwritten if the branch is not taken.
- __ neg_d(result, temp);
-
- // Add +0 to convert -0 to +0.
- __ add_d(result, input, kDoubleRegZero);
- __ sqrt_d(result, result);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- CpuFeatures::Scope scope(FPU);
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(f4));
- ASSERT(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(a2));
- ASSERT(ToDoubleRegister(instr->left()).is(f2));
- ASSERT(ToDoubleRegister(instr->result()).is(f0));
-
- if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(a2, &no_deopt);
- __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
- __ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
- CpuFeatures::Scope scope(FPU);
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(f0));
- ASSERT(ToRegister(instr->global_object()).is(a0));
-
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
- // a2: FixedArray of the native context's random seeds
-
- // Load state[0].
- __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
- __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
- // Load state[1].
- __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
- // a1: state[0].
- // a0: state[1].
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ And(a3, a1, Operand(0xFFFF));
- __ li(t0, Operand(18273));
- __ Mul(a3, a3, t0);
- __ srl(a1, a1, 16);
- __ Addu(a1, a3, a1);
- // Save state[0].
- __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ And(a3, a0, Operand(0xFFFF));
- __ li(t0, Operand(36969));
- __ Mul(a3, a3, t0);
- __ srl(a0, a0, 16),
- __ Addu(a0, a3, a0);
- // Save state[1].
- __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ And(a0, a0, Operand(0x3FFFF));
- __ sll(a1, a1, 14);
- __ Addu(v0, a0, a1);
-
- __ bind(deferred->exit());
-
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a2, Operand(0x41300000));
- // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a2);
- // Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a2);
- // Subtract to get the result.
- __ sub_d(f0, f12, f14);
-}
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in v0.
-}
-
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(
- masm(), input, result, double_scratch1, double_scratch2,
- temp1, temp2, scratch0());
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
- default:
- Abort("Unimplemented type of LUnaryMathOperation.");
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(a1));
- ASSERT(instr->HasPointerMap());
-
- if (instr->known_function().is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- CallKnownFunction(instr->known_function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- A1_CONTAINS_TARGET);
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ li(a2, Operand(instr->name()));
- CallCode(ic, mode, instr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(a1));
- ASSERT(ToRegister(instr->result()).is(v0));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ li(a2, Operand(instr->name()));
- CallCode(ic, mode, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- CallKnownFunction(instr->target(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- A1_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->constructor()).is(a1));
- ASSERT(ToRegister(instr->result()).is(v0));
-
- __ li(a0, Operand(instr->arity()));
- if (FLAG_optimize_constructed_arrays) {
- // No cell in a2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
- isolate());
- __ li(a2, Operand(undefined_value));
- }
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(ToRegister(instr->constructor()).is(a1));
- ASSERT(ToRegister(instr->result()).is(v0));
- ASSERT(FLAG_optimize_constructed_arrays);
-
- __ li(a0, Operand(instr->arity()));
- __ li(a2, Operand(instr->hydrogen()->property_cell()));
- Handle<Code> array_construct_code =
- isolate()->builtins()->ArrayConstructCode();
-
- CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- int offset = instr->offset();
-
- ASSERT(!object.is(value));
-
- if (!instr->transition().is_null()) {
- __ li(scratch, Operand(instr->transition()));
- __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- scratch,
- temp,
- kRAHasBeenSaved,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
- }
-
- // Do the store.
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ sw(value, FieldMemOperand(object, offset));
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- scratch,
- kRAHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- } else {
- __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ sw(value, FieldMemOperand(scratch, offset));
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(scratch,
- offset,
- value,
- object,
- kRAHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(a1));
- ASSERT(ToRegister(instr->value()).is(a0));
-
- // Name is always in a2.
- __ li(a2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ li(at, Operand(Smi::FromInt(constant_index)));
- } else {
- __ li(at, Operand(constant_index));
- }
- DeoptimizeIf(hs,
- instr->environment(),
- at,
- Operand(ToRegister(instr->length())));
- } else {
- DeoptimizeIf(hs,
- instr->environment(),
- ToRegister(instr->index()),
- Operand(ToRegister(instr->length())));
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- CpuFeatures::Scope scope(FPU);
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- FPURegister value(ToDoubleRegister(instr->value()));
- if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key <<
- element_size_shift);
- } else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
- }
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(scratch0(), additional_offset));
- }
- } else {
- Register value(ToRegister(instr->value()));
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ sb(value, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ sh(value, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sw(value, mem_operand);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, elements, Operand(scratch));
- __ Addu(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- Label is_nan;
- // Check for NaN. All NaNs must be canonicalized.
- __ BranchF(NULL, &is_nan, eq, value, value);
- __ Branch(&not_nan);
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ bind(&is_nan);
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- }
-
- __ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
- element_size_shift));
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
- : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
- } else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ sw(value, FieldMemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kRAHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(a2));
- ASSERT(ToRegister(instr->key()).is(a1));
- ASSERT(ToRegister(instr->value()).is(a0));
-
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
- Register scratch = scratch0();
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- __ Branch(&not_applicable, ne, scratch, Operand(from_map));
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ li(new_map_reg, Operand(to_map));
- __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- // Write barrier.
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- scratch, kRAHasBeenSaved, kDontSaveFPRegs);
- } else if (FLAG_compiled_transitions) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ mov(a0, object_reg);
- __ li(a1, Operand(to_map));
- TransitionElementsKindStub stub(from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(a2));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(a3));
- __ li(new_map_reg, Operand(to_map));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(a2));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(a3));
- __ li(new_map_reg, Operand(to_map));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
- } else {
- UNREACHABLE();
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- Label fail;
- __ TestJSArrayForAllocationSiteInfo(object, temp, ne, &fail);
- DeoptimizeIf(al, instr->environment());
- __ bind(&fail);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
- __ push(scratch);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- __ AssertSmi(v0);
- __ SmiUntag(v0);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- ASSERT(!char_code.is(result));
-
- __ Branch(deferred->entry(), hi,
- char_code, Operand(String::kMaxOneByteCharCode));
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ sll(scratch, char_code, kPointerSizeLog2);
- __ Addu(result, result, scratch);
- __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ Branch(deferred->entry(), eq, result, Operand(scratch));
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ lw(result, FieldMemOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- CpuFeatures::Scope scope(FPU);
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- FPURegister single_scratch = double_scratch0().low();
- if (input->IsStackSlot()) {
- Register scratch = scratch0();
- __ lw(scratch, ToMemOperand(input));
- __ mtc1(scratch, single_scratch);
- } else {
- __ mtc1(ToRegister(input), single_scratch);
- }
- __ cvt_d_w(ToDoubleRegister(output), single_scratch);
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatures::Scope scope(FPU);
- LOperand* input = instr->value();
- LOperand* output = instr->result();
-
- FPURegister dbl_scratch = double_scratch0();
- __ mtc1(ToRegister(input), dbl_scratch);
- __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagI* instr_;
- };
-
- Register src = ToRegister(instr->value());
- Register dst = ToRegister(instr->result());
- Register overflow = scratch0();
-
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
- __ SmiTagCheckOverflow(dst, src, overflow);
- __ BranchOnOverflow(deferred->entry(), overflow);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
- __ SmiTag(reg, reg);
- __ bind(deferred->exit());
-}
-
-
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
- masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- masm->sll(loword, hiword, mantissa_shift_for_lo_word);
- masm->srl(hiword, hiword, mantissa_shift_for_hi_word);
- masm->Or(hiword, scratch, hiword);
- } else {
- masm->mov(loword, zero_reg);
- masm->sll(hiword, hiword, mantissa_shift_for_hi_word);
- masm->Or(hiword, scratch, hiword);
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- masm->li(scratch, 1 << HeapNumber::kExponentShift);
- masm->nor(scratch, scratch, scratch);
- masm->and_(hiword, hiword, scratch);
- }
-}
-
-
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
- Register src = ToRegister(value);
- Register dst = ToRegister(instr->result());
- DoubleRegister dbl_scratch = double_scratch0();
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ Xor(src, src, Operand(0x80000000));
- }
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(src, dbl_scratch);
- __ cvt_d_w(dbl_scratch, dbl_scratch);
- } else {
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, f0,
- sfpd_lo, sfpd_hi,
- scratch0(), f2);
- }
- } else {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
- } else {
- Label no_leading_zero, done;
- __ And(at, src, Operand(0x80000000));
- __ Branch(&no_leading_zero, ne, at, Operand(zero_reg));
-
- // Integer has one leading zeros.
- GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 1);
- __ Branch(&done);
-
- __ bind(&no_leading_zero);
- GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 0);
- __ Branch(&done);
- }
- }
-
- if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, t1);
- __ Branch(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(zero_reg, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- __ Move(dst, v0);
- __ Subu(dst, dst, kHeapObjectTag);
-
- // Done. Put the value in dbl_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
- } else {
- __ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
- __ sw(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
- }
- __ Addu(dst, dst, kHeapObjectTag);
- __ StoreToSafepointRegisterSlot(dst, dst);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagD* instr_;
- };
-
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
- Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
- __ Move(reg, scratch0(), input_reg);
- Label canonicalize;
- __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
- __ li(reg, factory()->the_hole_value());
- __ Branch(&done);
- __ bind(&canonicalize);
- __ Move(input_reg,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- } else {
- Label not_hole;
- __ Branch(&not_hole, ne, sfpd_hi, Operand(kHoleNanUpper32));
- __ li(reg, factory()->the_hole_value());
- __ Branch(&done);
- __ bind(&not_hole);
- __ And(scratch, sfpd_hi, Operand(0x7ff00000));
- __ Branch(&no_special_nan_handling, ne, scratch, Operand(0x7ff00000));
- Label special_nan_handling;
- __ And(at, sfpd_hi, Operand(0x000FFFFF));
- __ Branch(&special_nan_handling, ne, at, Operand(zero_reg));
- __ Branch(&no_special_nan_handling, eq, sfpd_lo, Operand(zero_reg));
- __ bind(&special_nan_handling);
- double canonical_nan =
- FixedDoubleArray::canonical_not_the_hole_nan_as_double();
- uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
- __ li(sfpd_lo,
- Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
- __ li(sfpd_hi,
- Operand(static_cast<uint32_t>(casted_nan >> 32)));
- }
- }
-
- __ bind(&no_special_nan_handling);
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- // We want the untagged address first for performance
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
- DONT_TAG_RESULT);
- } else {
- __ Branch(deferred->entry());
- }
- __ bind(deferred->exit());
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
- } else {
- __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
- __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
- }
- // Now that we have finished with the object's real address tag it
- __ Addu(reg, reg, kHeapObjectTag);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ mov(reg, zero_reg);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- __ Subu(v0, v0, kHeapObjectTag);
- __ StoreToSafepointRegisterSlot(v0, reg);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- if (instr->needs_check()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- // If the input is a HeapObject, value of scratch won't be zero.
- __ And(scratch, input, Operand(kHeapObjectTag));
- __ SmiUntag(result, input);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
- } else {
- __ SmiUntag(result, input);
- }
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- DoubleRegister result_reg,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
- Register scratch = scratch0();
- CpuFeatures::Scope scope(FPU);
-
- Label load_smi, heap_number, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
- // Heap number map check.
- __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env, scratch, Operand(at));
- } else {
- Label heap_number;
- __ Branch(&heap_number, eq, scratch, Operand(at));
-
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
-
- // Convert undefined to NaN.
- __ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
- __ Branch(&done);
-
- __ bind(&heap_number);
- }
- // Heap number to double register conversion.
- __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ mfc1(at, result_reg.low());
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ mfc1(scratch, result_reg.high());
- DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
- }
- __ Branch(&done);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ SmiUntag(scratch, input_reg);
- DeoptimizeIf(Ugreater_equal, env, scratch, Operand(zero_reg));
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- __ Move(result_reg,
- FixedDoubleArray::hole_nan_as_double());
- __ Branch(&done);
- } else {
- __ SmiUntag(scratch, input_reg);
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- // Smi to double register conversion
- __ bind(&load_smi);
- // scratch: untagged value of input_reg
- __ mtc1(scratch, result_reg);
- __ cvt_d_w(result_reg, result_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->value());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DoubleRegister double_scratch = double_scratch0();
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
-
- ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
- Label done;
-
- // The input is a tagged HeapObject.
- // Heap number map check.
- __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- // This 'at' value and scratch1 map value are used for tests in both clauses
- // of the if.
-
- if (instr->truncating()) {
- CpuFeatures::Scope scope(FPU);
- Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch.low();
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label heap_number;
- __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
- ASSERT(ToRegister(instr->result()).is(input_reg));
- __ mov(input_reg, zero_reg);
- __ Branch(&done);
-
- __ bind(&heap_number);
- __ ldc1(double_scratch2,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- __ EmitECMATruncate(input_reg,
- double_scratch2,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
- } else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
-
- // Load the double value.
- __ ldc1(double_scratch,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- input_reg,
- double_scratch,
- scratch1,
- double_scratch2,
- except_flag,
- kCheckForInexactConversion);
-
- // Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Branch(&done, ne, input_reg, Operand(zero_reg));
-
- __ mfc1(scratch1, double_scratch.high());
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // Let the deferred code handle the HeapObject case.
- __ JumpIfNotSmi(input_reg, deferred->entry());
-
- // Smi to int32 conversion.
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
-
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
- HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
- }
- }
- }
-
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DoubleRegister double_input = ToDoubleRegister(instr->value());
-
- if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch0().low();
- __ EmitECMATruncate(result_reg,
- double_input,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
- } else {
- Register except_flag = scratch2;
-
- __ EmitFPUTruncate(kRoundToMinusInf,
- result_reg,
- double_input,
- scratch1,
- double_scratch0(),
- except_flag,
- kCheckForInexactConversion);
-
- // Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ GetObjectType(input, scratch, scratch);
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
- } else {
- DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (IsPowerOf2(mask)) {
- ASSERT(tag == 0 || IsPowerOf2(tag));
- __ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
- at, Operand(zero_reg));
- } else {
- __ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
- Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ li(at, Operand(Handle<Object>(cell)));
- __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(at));
- } else {
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(target));
- }
-}
-
-
-void LCodeGen::DoCheckMapCommon(Register map_reg,
- Handle<Map> map,
- CompareMapMode mode,
- LEnvironment* env) {
- Label success;
- __ CompareMapAndBranch(map_reg, map, &success, eq, &success, mode);
- DeoptimizeIf(al, env);
- __ bind(&success);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- Register map_reg = scratch0();
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
- Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMapAndBranch(
- map_reg, map, &success, eq, &success, REQUIRE_EXACT_MAP);
- }
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- CpuFeatures::Scope vfp_scope(FPU);
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- CpuFeatures::Scope vfp_scope(FPU);
- Register unclamped_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampUint8(result_reg, unclamped_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- CpuFeatures::Scope vfp_scope(FPU);
- Register scratch = scratch0();
- Register input_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
- Label is_smi, done, heap_number;
-
- // Both smi and heap number cases are handled.
- __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
-
- // Check for heap number
- __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- DeoptimizeIf(ne, instr->environment(), input_reg,
- Operand(factory()->undefined_value()));
- __ mov(result_reg, zero_reg);
- __ jmp(&done);
-
- // Heap number
- __ bind(&heap_number);
- __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
- HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
- __ jmp(&done);
-
- __ bind(&is_smi);
- __ ClampUint8(result_reg, scratch);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
- Register prototype_reg = ToRegister(instr->temp());
- Register map_reg = ToRegister(instr->temp2());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- __ LoadHeapObject(prototype_reg,
- prototypes->at(prototypes->length() - 1));
- } else {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(prototype_reg, prototypes->at(i));
- __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg,
- maps->at(i),
- ALLOW_ELEMENT_TRANSITION_MAPS,
- instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Register scratch2 = ToRegister(instr->temp2());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ sw(scratch, FieldMemOperand(result, property_offset));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ li(a0, Operand(Smi::FromInt(instance_size)));
- __ push(a0);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- flags);
- } else {
- Register size = ToRegister(instr->size());
- __ AllocateInNewSpace(size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- flags);
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(size, size);
- __ push(size);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
- // Load map into a2.
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- DeoptimizeIf(ne,
- instr->environment(),
- a2,
- Operand(boilerplate_elements_kind));
- }
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(a3, literals);
- __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ li(a1, Operand(isolate()->factory()->empty_fixed_array()));
- __ Push(a3, a2, a1);
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(a2));
- ASSERT(!result.is(a2));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ Addu(a2, result, Operand(elements_offset));
- } else {
- __ lw(a2, FieldMemOperand(source, i));
- }
- __ sw(a2, FieldMemOperand(result, object_offset + i));
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ Addu(a2, result, Operand(*offset));
- __ sw(a2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- } else {
- __ li(a2, Operand(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ li(a2, Operand(Handle<Map>(isolate()->heap()->
- allocation_site_info_map())));
- __ sw(a2, FieldMemOperand(result, object_size));
- __ sw(source, FieldMemOperand(result, object_size + kPointerSize));
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ lw(a2, FieldMemOperand(source, i));
- __ sw(a2, FieldMemOperand(result, elements_offset + i));
- }
-
- // Copy elements backing store content.
- int elements_length = has_elements ? elements->length() : 0;
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- // We only support little endian mode...
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ li(a2, Operand(value_low));
- __ sw(a2, FieldMemOperand(result, total_offset));
- __ li(a2, Operand(value_high));
- __ sw(a2, FieldMemOperand(result, total_offset + 4));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ Addu(a2, result, Operand(*offset));
- __ sw(a2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- } else {
- __ li(a2, Operand(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
- // Load map into a2.
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- DeoptimizeIf(ne, instr->environment(), a2,
- Operand(boilerplate_elements_kind));
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ push(a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(a3, literals);
- __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a1, Operand(constant_properties));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- __ li(a0, Operand(Smi::FromInt(flags)));
-
- // Pick the right runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- __ Push(a3, a2, a1, a0);
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ Push(a3, a2, a1, a0);
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).is(a0));
- ASSERT(ToRegister(instr->result()).is(v0));
- __ push(a0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- Label materialized;
- // Registers will be used as follows:
- // t3 = literals array.
- // a1 = regexp literal.
- // a0 = regexp literal clone.
- // a2 and t0-t2 are used as temporaries.
- int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(t3, instr->hydrogen()->literals());
- __ lw(a1, FieldMemOperand(t3, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, a1, Operand(at));
-
- // Create regexp literal using runtime function
- // Result will be in v0.
- __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(t1, Operand(instr->hydrogen()->pattern()));
- __ li(t0, Operand(instr->hydrogen()->flags()));
- __ Push(t3, t2, t1, t0);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(a1, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(a1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ lw(a3, FieldMemOperand(a1, i));
- __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
- __ sw(a3, FieldMemOperand(v0, i));
- __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
- __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ li(a1, Operand(shared_info));
- __ push(a1);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ li(a2, Operand(shared_info));
- __ li(a1, Operand(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, a2, a1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- Register input = ToRegister(instr->value());
- __ push(input);
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Register cmp1 = no_reg;
- Operand cmp2 = Operand(no_reg);
-
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal(),
- cmp1,
- cmp2);
-
- ASSERT(cmp1.is_valid());
- ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
-
- if (final_branch_condition != kNoCondition) {
- EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name,
- Register& cmp1,
- Operand& cmp2) {
- // This function utilizes the delay slot heavily. This is used to load
- // values that are always usable without depending on the type of the input
- // register.
- Condition final_branch_condition = kNoCondition;
- Register scratch = scratch0();
- if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- cmp1 = input;
- cmp2 = Operand(at);
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ GetObjectType(input, input, scratch);
- __ Branch(USE_DELAY_SLOT, false_label,
- ge, scratch, Operand(FIRST_NONSTRING_TYPE));
- // input is an object so we can load the BitFieldOffset even if we take the
- // other branch.
- __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
- __ And(at, at, 1 << Map::kIsUndetectable);
- cmp1 = at;
- cmp2 = Operand(zero_reg);
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->boolean_string())) {
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- cmp1 = at;
- cmp2 = Operand(input);
- final_branch_condition = eq;
-
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- cmp1 = at;
- cmp2 = Operand(input);
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->undefined_string())) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- // The first instruction of JumpIfSmi is an And - it is safe in the delay
- // slot.
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
- __ And(at, at, 1 << Map::kIsUndetectable);
- cmp1 = at;
- cmp2 = Operand(zero_reg);
- final_branch_condition = ne;
-
- } else if (type_name->Equals(heap()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
- __ GetObjectType(input, scratch, input);
- __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
- cmp1 = input;
- cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
- if (!FLAG_harmony_typeof) {
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- }
- if (FLAG_harmony_symbols) {
- // input is an object, it is safe to use GetObjectType in the delay slot.
- __ GetObjectType(input, input, scratch);
- __ Branch(USE_DELAY_SLOT, true_label, eq, scratch, Operand(SYMBOL_TYPE));
- // Still an object, so the InstanceType can be loaded.
- __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
- __ Branch(USE_DELAY_SLOT, false_label,
- lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- } else {
- // input is an object, it is safe to use GetObjectType in the delay slot.
- __ GetObjectType(input, input, scratch);
- __ Branch(USE_DELAY_SLOT, false_label,
- lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- }
- // Still an object, so the InstanceType can be loaded.
- __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
- __ Branch(USE_DELAY_SLOT, false_label,
- gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- // Still an object, so the BitField can be loaded.
- // Check for undetectable objects => false.
- __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
- __ And(at, at, 1 << Map::kIsUndetectable);
- cmp1 = at;
- cmp2 = Operand(zero_reg);
- final_branch_condition = eq;
-
- } else {
- cmp1 = at;
- cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
- __ Branch(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp1, scratch0());
-
- EmitBranch(true_block, false_block, eq, temp1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- ASSERT(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, temp2,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt() {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Register object = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- Register strict = scratch0();
- __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(object, key, strict);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- Register obj = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- __ Push(key, obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStackCheck* instr_;
- };
-
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&done, hs, sp, Operand(at));
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
- __ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- } else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
- EnsureSpaceForLazyDeopt();
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- Register result = ToRegister(instr->result());
- Register object = ToRegister(instr->object());
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(at));
-
- Register null_value = t1;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
-
- __ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
-
- Label use_cache, call_runtime;
- ASSERT(object.is(a0));
- __ CheckEnumCache(null_value, &call_runtime);
-
- __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ Branch(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- ASSERT(result.is(v0));
- __ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
- __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ jmp(&done);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ lw(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
- __ lw(result,
- FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- Label out_of_object, done;
- __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
- __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
-
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- __ Addu(scratch, object, scratch);
- __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
- __ Branch(&done);
-
- __ bind(&out_of_object);
- __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- __ Subu(scratch, result, scratch);
- __ lw(result, FieldMemOperand(scratch,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(&done);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h b/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
deleted file mode 100644
index b4476c4..0000000
--- a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-
-#include "mips/lithium-mips.h"
-#include "mips/lithium-gap-resolver-mips.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- translations_(info->zone()),
- deferred_(8, info->zone()),
- osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
- bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- FloatRegister flt_scratch,
- DoubleRegister dbl_scratch);
- int ToInteger32(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- void DoCheckMapCommon(Register map_reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- MemOperand PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int additional_index,
- int additional_offset);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
-
- LPlatformChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- Register scratch0() { return kLithiumScratchReg; }
- Register scratch1() { return kLithiumScratchReg2; }
- DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
-
- int GetNextEmittedBlock(int block);
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register temporary2);
-
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return info()->num_parameters(); }
-
- void Abort(const char* reason);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
- bool GenerateSafepointTable();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr);
-
- enum A1State {
- A1_UNINITIALIZED,
- A1_CONTAINS_TARGET
- };
-
- // Generate a direct call to a known function. Expects the function
- // to be in a1.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- A1State a1_state);
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
- LEnvironment* environment,
- Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordPosition(int position);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
- void EmitBranch(int left_block,
- int right_block,
- Condition cc,
- Register src1,
- const Operand& src2);
- void EmitBranchF(int left_block,
- int right_block,
- Condition cc,
- FPURegister src1,
- FPURegister src2);
- void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input,
- DoubleRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- // Returns two registers in cmp1 and cmp2 that can be used in the
- // Branch instruction after EmitTypeofIs.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name,
- Register& cmp1,
- Operand& cmp2);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
- void EnsureSpaceForLazyDeopt();
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> deopt_jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
- int last_lazy_deopt_pc_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope BASE_EMBEDDED {
- public:
- PushSafepointRegistersScope(LCodeGen* codegen,
- Safepoint::Kind kind)
- : codegen_(codegen) {
- ASSERT(codegen_->info()->is_calling());
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = kind;
-
- switch (codegen_->expected_safepoint_kind_) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PushSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PushSafepointRegistersAndDoubles();
- break;
- default:
- UNREACHABLE();
- }
- }
-
- ~PushSafepointRegistersScope() {
- Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
- ASSERT((kind & Safepoint::kWithRegisters) != 0);
- switch (kind) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PopSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PopSafepointRegistersAndDoubles();
- break;
- default:
- UNREACHABLE();
- }
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
deleted file mode 100644
index a4a4411..0000000
--- a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "mips/lithium-gap-resolver-mips.h"
-#include "mips/lithium-codegen-mips.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32, owner->zone()),
- root_index_(0),
- in_cycle_(false),
- saved_destination_(NULL) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when by reaching this move again.
- PerformMove(i);
- if (in_cycle_) {
- RestoreValue();
- }
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
-
- // We can only find a cycle, when doing a depth-first traversal of moves,
- // be encountering the starting move again. So by spilling the source of
- // the starting move, we break the cycle. All moves are then unblocked,
- // and the starting move is completed by writing the spilled value to
- // its destination. All other moves from the spilled source have been
- // completed prior to breaking the cycle.
- // An additional complication is that moves to MemOperands with large
- // offsets (more than 1K or 4K) require us to spill this spilled value to
- // the stack, to free up the register.
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
- ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
- ASSERT(!in_cycle_);
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
- if (source->IsRegister()) {
- __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- CpuFeatures::Scope scope(FPU);
- __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(FPU);
- __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
- // This move will be done by restoring the saved value to the destination.
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- ASSERT(in_cycle_);
- ASSERT(saved_destination_ != NULL);
-
- // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
- if (saved_destination_->IsRegister()) {
- __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
- } else if (saved_destination_->IsStackSlot()) {
- __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- CpuFeatures::Scope scope(FPU);
- __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
- kLithiumScratchDouble);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(FPU);
- __ sdc1(kLithiumScratchDouble,
- cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ mov(cgen_->ToRegister(destination), source_register);
- } else {
- ASSERT(destination->IsStackSlot());
- __ sw(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ lw(cgen_->ToRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- if (!destination_operand.OffsetIsInt16Encodable()) {
- CpuFeatures::Scope scope(FPU);
- // 'at' is overwritten while saving the value to the destination.
- // Therefore we can't use 'at'. It is OK if the read from the source
- // destroys 'at', since that happens before the value is read.
- // This uses only a single reg of the double reg-pair.
- __ lwc1(kLithiumScratchDouble, source_operand);
- __ swc1(kLithiumScratchDouble, destination_operand);
- } else {
- __ lw(at, source_operand);
- __ sw(at, destination_operand);
- }
- } else {
- __ lw(kLithiumScratchReg, source_operand);
- __ sw(kLithiumScratchReg, destination_operand);
- }
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
- __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else {
- ASSERT(destination->IsStackSlot());
- ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsInteger32(constant_source)) {
- __ li(kLithiumScratchReg,
- Operand(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(kLithiumScratchReg,
- cgen_->ToHandle(constant_source));
- }
- __ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleRegister()) {
- CpuFeatures::Scope scope(FPU);
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- __ sdc1(source_register, destination_operand);
- }
-
- } else if (source->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(FPU);
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- // kLithiumScratchDouble was used to break the cycle,
- // but kLithiumScratchReg is free.
- MemOperand source_high_operand =
- cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ lw(kLithiumScratchReg, source_operand);
- __ sw(kLithiumScratchReg, destination_operand);
- __ lw(kLithiumScratchReg, source_high_operand);
- __ sw(kLithiumScratchReg, destination_high_operand);
- } else {
- __ ldc1(kLithiumScratchDouble, source_operand);
- __ sdc1(kLithiumScratchDouble, destination_operand);
- }
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.h b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.h
deleted file mode 100644
index 2506e38..0000000
--- a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/lithium-mips.cc b/src/3rdparty/v8/src/mips/lithium-mips.cc
deleted file mode 100644
index 6170eb9..0000000
--- a/src/3rdparty/v8/src/mips/lithium-mips.cc
+++ /dev/null
@@ -1,2398 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-allocator-inl.h"
-#include "mips/lithium-mips.h"
-#include "mips/lithium-codegen-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sll-t";
- case Token::SAR: return "sra-t";
- case Token::SHR: return "srl-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[a2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
- // Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
- return spill_slot_count_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LCodeGen::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
- }
- }
-
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* left_operand = UseFixed(left, a1);
- LOperand* right_operand = UseFixed(right, a0);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- instr->set_hydrogen_value(current);
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
-
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment.
- Representation rep = value->representation();
- HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
- return AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(
- new(zone()) LArgumentsLength(UseRegister(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), a0),
- UseFixed(instr->right(), a1));
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), a0),
- FixedTemp(t0));
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegisterAtStart(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), a1);
- LOperand* receiver = UseFixed(instr->receiver(), a0);
- LOperand* length = UseFixed(instr->length(), a2);
- LOperand* elements = UseFixed(instr->elements(), a3);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = Use(instr->argument());
- return new(zone()) LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context,
- instr->qml_global()));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* function = UseFixed(instr->function(), a1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
- return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
- } else if (op == kMathPowHalf) {
- // Input cannot be the same as the result.
- // See lithium-codegen-mips.cc::DoMathPowHalf.
- LOperand* input = UseFixedDouble(instr->value(), f8);
- LOperand* temp = FixedTemp(f6);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- return DefineFixedDouble(result, f4);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
-
- LOperand* temp = (op == kMathRound) ? FixedTemp(f6) :
- (op == kMathFloor) ? TempRegister() : NULL;
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineAsRegister(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
- LOperand* key = UseFixed(instr->key(), a2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- LCallGlobal* result = new(zone()) LCallGlobal(instr->qml_global());
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), a1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), a1);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* function = UseFixed(instr->function(), a1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), v0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineAsRegister(new(zone()) LBitI(left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LBitNotI(value));
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- // TODO(1042) The fixed register allocation
- // is needed because we call TypeRecordingBinaryOpStub from
- // the generated code, which requires registers a0
- // and a1 to be used. We should remove that
- // when we provide a native implementation.
- LOperand* dividend = UseFixed(instr->left(), a0);
- LOperand* divisor = UseFixed(instr->right(), a1);
- return AssignEnvironment(AssignPointerMap(
- DefineFixed(new(zone()) LDivI(dividend, divisor), v0)));
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LModI* mod;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
- } else {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- mod = new(zone()) LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(f20),
- FixedTemp(f22));
- }
-
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
- return AssignEnvironment(DefineAsRegister(mod));
- } else {
- return DefineAsRegister(mod);
- }
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right = UseFixedDouble(instr->right(), f4);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, f2), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
- temp = TempRegister();
- } else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- }
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- AssignEnvironment(mul);
- }
- return DefineAsRegister(mul);
-
- } else if (instr->representation().IsDouble()) {
- if (kArchVariant == kMips32r2) {
- if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) {
- HAdd* add = HAdd::cast(instr->uses().value());
- if (instr == add->left()) {
- // This mul is the lhs of an add. The add and mul will be folded
- // into a multiply-add.
- return NULL;
- }
- if (instr == add->right() && !add->left()->IsMul()) {
- // This mul is the rhs of an add, where the lhs is not another mul.
- // The add and mul will be folded into a multiply-add.
- return NULL;
- }
- }
- }
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineAsRegister(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
- LOperand* addend_op = UseRegisterAtStart(addend);
- return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
- multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- if (kArchVariant == kMips32r2) {
- if (instr->left()->IsMul())
- return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
-
- if (instr->right()->IsMul()) {
- ASSERT(!instr->left()->IsMul());
- return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
- }
- }
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
- } else {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return DefineAsRegister(new(zone()) LMathMinMax(left, right));
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), f4) :
- UseFixed(instr->right(), a2);
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, f0),
- instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), a0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, f0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LCmpT* result = new(zone()) LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- } else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- return new(zone()) LCmpConstantEqAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()),
- temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
- temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsUndetectableAndBranch(
- UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
- TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LDateField* result =
- new(zone()) LDateField(object, FixedTemp(a1), instr->index());
- return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = UseRegister(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), a0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* res = NULL;
- if (instr->value()->type().IsSmi()) {
- res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = FixedTemp(f22);
- res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
- temp1,
- temp2,
- temp3));
- res = AssignEnvironment(res);
- }
- return res;
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
-
- // Make sure that the temp and result_temp registers are
- // different.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- Define(result, result_temp);
- return AssignPointerMap(result);
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
- return AssignEnvironment(DefineAsRegister(res));
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineAsRegister(new(zone()) LSmiTag(value));
- } else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- }
- } else {
- ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value())));
- } else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(Define(result, temp1));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- // Revisit this decision, here and 8 lines below.
- return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(f22)));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new(zone()) LClampIToUint8(reg));
- } else {
- ASSERT(input_rep.IsTagged());
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve f22 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(f22));
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), v0));
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new(zone()) LConstantD);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), a0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to check the value in the cell in the case where we perform
- // a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
- : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), a1);
- LOperand* value = UseFixed(instr->value(), a0);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), a0);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, v0), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), a0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
-
- if (!instr->is_external()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
- } else {
- ASSERT(instr->representation().IsTagged());
- obj = UseRegisterAtStart(instr->elements());
- }
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- // float->double conversion on non-VFP2 requires an extra scratch
- // register. For convenience, just mark the elements register as "UseTemp"
- // so that it can be used as a temp during the float->double conversion
- // after it's no longer needed after the float load.
- bool needs_temp =
- !CpuFeatures::IsSupported(FPU) &&
- (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
- LOperand* external_pointer = needs_temp
- ? UseTempRegister(instr->elements())
- : UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
-
- DefineAsRegister(result);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- return can_deoptimize ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), a1);
- LOperand* key = UseFixed(instr->key(), a0);
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), v0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* val = NULL;
- LOperand* key = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- key = UseRegisterOrConstantAtStart(instr->key());
- val = UseTempRegister(instr->value());
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
-
- return new(zone()) LStoreKeyed(object, key, val);
- }
-
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
-
- return new(zone()) LStoreKeyed(external_pointer, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), a2);
- LOperand* key = UseFixed(instr->key(), a1);
- LOperand* val = UseFixed(instr->value(), a0);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* new_map_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
- return DefineSameAsFirst(result);
- } else if (FLAG_compiled_transitions) {
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, NULL);
- return AssignPointerMap(result);
- } else {
- LOperand* object = UseFixed(instr->object(), a0);
- LOperand* fixed_object_reg = FixedTemp(a2);
- LOperand* new_map_reg = FixedTemp(a3);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = instr->is_in_object()
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), a1);
- LOperand* val = UseFixed(instr->value(), a0);
-
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), v0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result =
- new(zone()) LAllocateObject(TempRegister(), TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseFixed(instr->object(), a0);
- LOperand* key = UseFixed(instr->key(), a1);
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- ASSERT(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- ASSERT(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
- Register reg = descriptor->register_params_[instr->index()];
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), a0));
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new(zone()) LLazyBailout;
- result = AssignEnvironment(result);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
- } else {
- ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
- }
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* object = UseRegisterAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* object = UseFixed(instr->enumerable(), a0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
- return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/lithium-mips.h b/src/3rdparty/v8/src/mips/lithium-mips.h
deleted file mode 100644
index 26340de..0000000
--- a/src/3rdparty/v8/src/mips/lithium-mips.h
+++ /dev/null
@@ -1,2683 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_LITHIUM_MIPS_H_
-#define V8_MIPS_LITHIUM_MIPS_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(AllocateObject) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeclareGlobals) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(DummyUse) \
- V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(In) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(Uint32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(MapEnumLength) \
- V(MathExp) \
- V(MathMinMax) \
- V(ModI) \
- V(MulI) \
- V(MultiplyAddD) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(Random) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(StringLength) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(Throw) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
- void MarkAsCall() { is_call_ = true; }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- bool ClobbersDoubleRegisters() const { return is_call_; }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- // Iterator interface.
- friend class InputIterator;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- bool is_call_;
- bool is_save_doubles_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
-
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
- static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap: public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
-
- private:
- int block_id_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- virtual bool IsControl() const { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-};
-
-
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 3> {
- public:
- // Used when the right hand is a constant power of 2.
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
- }
-
- // Used for the standard case.
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 0> {
- public:
- LDivI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 1> {
- public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplyAddD(LOperand* addend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = addend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* addend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
- public:
- LUnaryMathOperation(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
- public:
- LMathExp(LOperand* value,
- LOperand* double_temp,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsNilAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
- public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
- public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
-};
-
-
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LDateField: public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-
- private:
- Smi* index_;
-};
-
-
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
-class LThrow: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRandom(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- Token::Value op() const { return op_; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LReturn(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_external() const {
- return hydrogen()->is_external();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedGeneric(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
- }
-
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalObject(LOperand* context, bool qml_global) {
- inputs_[0] = context;
- qml_global_ = qml_global;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- bool qml_global() { return qml_global_; }
-
- private:
- bool qml_global_;
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
- }
-
- LOperand* key() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagU(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
- public:
- LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
- public:
- LTaggedToI(LOperand* value,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* new_map_temp,
- LOperand* fixed_object_temp) {
- inputs_[0] = object;
- temps_[0] = new_map_temp;
- temps_[1] = fixed_object_temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
- }
-
- LOperand* char_code() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- LOperand* string() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
- public:
- LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampDToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
- public:
- LAllocateObject(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LAllocate: public LTemplateInstruction<1, 2, 2> {
- public:
- LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
- }
-
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk: public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- zone_(graph->zone()),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* reason);
-
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Zone* zone_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_LITHIUM_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc b/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
deleted file mode 100644
index b8eb084..0000000
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
+++ /dev/null
@@ -1,5553 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <limits.h> // For LONG_MIN, LONG_MAX.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index) {
- lw(destination, MemOperand(s6, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond,
- Register src1, const Operand& src2) {
- Branch(2, NegateCondition(cond), src1, src2);
- lw(destination, MemOperand(s6, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Heap::RootListIndex index) {
- sw(source, MemOperand(s6, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond,
- Register src1, const Operand& src2) {
- Branch(2, NegateCondition(cond), src1, src2);
- sw(source, MemOperand(s6, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- li(result, Operand(cell));
- lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- li(result, Operand(object));
- }
-}
-
-
-// Push and pop all registers that can hold pointers.
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of kNumSafepointRegisters values on the
- // stack, so adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
- if (num_unsaved > 0) {
- Subu(sp, sp, Operand(num_unsaved * kPointerSize));
- }
- MultiPush(kSafepointSavedRegisters);
-}
-
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- MultiPop(kSafepointSavedRegisters);
- if (num_unsaved > 0) {
- Addu(sp, sp, Operand(num_unsaved * kPointerSize));
- }
-}
-
-
-void MacroAssembler::PushSafepointRegistersAndDoubles() {
- PushSafepointRegisters();
- Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
- for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
- FPURegister reg = FPURegister::FromAllocationIndex(i);
- sdc1(reg, MemOperand(sp, i * kDoubleSize));
- }
-}
-
-
-void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
- FPURegister reg = FPURegister::FromAllocationIndex(i);
- ldc1(reg, MemOperand(sp, i * kDoubleSize));
- }
- Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
- PopSafepointRegisters();
-}
-
-
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
- Register dst) {
- sw(src, SafepointRegistersAndDoublesSlot(dst));
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
- sw(src, SafepointRegisterSlot(dst));
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- lw(dst, SafepointRegisterSlot(src));
-}
-
-
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the highest encoding,
- // which means that lowest encodings are closest to the stack pointer.
- return kSafepointRegisterStackIndexMap[reg_code];
-}
-
-
-MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
- UNIMPLEMENTED_MIPS();
- // General purpose registers are pushed last on the stack.
- int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
- int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
- return MemOperand(sp, doubles_size + register_offset);
-}
-
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch) {
- ASSERT(cc == eq || cc == ne);
- And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- Branch(branch, cc, scratch,
- Operand(ExternalReference::new_space_start(isolate())));
-}
-
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!AreAliased(value, dst, t8, object));
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
-
- Addu(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
-
- RecordWrite(object,
- dst,
- value,
- ra_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
- }
-}
-
-
-// Will clobber 4 registers: object, address, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- RAStatus ra_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!AreAliased(object, address, value, t8));
- ASSERT(!AreAliased(object, address, value, t9));
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
-
- if (emit_debug_code()) {
- lw(at, MemOperand(address));
- Assert(
- eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
- }
-
- Label done;
-
- if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- JumpIfSmi(value, &done);
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- eq,
- &done);
-
- // Record the actual write.
- if (ra_status == kRAHasNotBeenSaved) {
- push(ra);
- }
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
- if (ra_status == kRAHasNotBeenSaved) {
- pop(ra);
- }
-
- bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
- }
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- li(t8, Operand(store_buffer));
- lw(scratch, MemOperand(t8));
- // Store pointer to buffer and increment buffer top.
- sw(address, MemOperand(scratch));
- Addu(scratch, scratch, kPointerSize);
- // Write back new top of buffer.
- sw(scratch, MemOperand(t8));
- // Call stub on end of buffer.
- // Check for end of buffer.
- And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kFallThroughAtEnd) {
- Branch(&done, eq, t8, Operand(zero_reg));
- } else {
- ASSERT(and_then == kReturnAtEnd);
- Ret(eq, t8, Operand(zero_reg));
- }
- push(ra);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
- CallStub(&store_buffer_overflow);
- pop(ra);
- bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Allocation support.
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!holder_reg.is(at));
- ASSERT(!scratch.is(at));
-
- // Load current lexical context from the stack frame.
- lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- Check(ne, "we should not have an empty lexical context",
- scratch, Operand(zero_reg));
-#endif
-
- // Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- lw(scratch, FieldMemOperand(scratch, offset));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the native_context_map.
- lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, "JSGlobalObject::native_context should be a native context.",
- holder_reg, Operand(at));
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- Branch(&same_contexts, eq, scratch, Operand(at));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
- push(holder_reg); // Temporarily save holder on the stack.
- mov(holder_reg, at); // Move at to its holding place.
- LoadRoot(at, Heap::kNullValueRootIndex);
- Check(ne, "JSGlobalProxy::context() should not be null.",
- holder_reg, Operand(at));
-
- lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, "JSGlobalObject::native_context should be a native context.",
- holder_reg, Operand(at));
- // Restore at is not needed. at is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore at to holder's context.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- lw(scratch, FieldMemOperand(scratch, token_offset));
- lw(at, FieldMemOperand(at, token_offset));
- Branch(miss, ne, scratch, Operand(at));
-
- bind(&same_contexts);
-}
-
-
-void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
- // First of all we assign the hash seed to scratch.
- LoadRoot(scratch, Heap::kHashSeedRootIndex);
- SmiUntag(scratch);
-
- // Xor original key with a seed.
- xor_(reg0, reg0, scratch);
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- nor(scratch, reg0, zero_reg);
- sll(at, reg0, 15);
- addu(reg0, scratch, at);
-
- // hash = hash ^ (hash >> 12);
- srl(at, reg0, 12);
- xor_(reg0, reg0, at);
-
- // hash = hash + (hash << 2);
- sll(at, reg0, 2);
- addu(reg0, reg0, at);
-
- // hash = hash ^ (hash >> 4);
- srl(at, reg0, 4);
- xor_(reg0, reg0, at);
-
- // hash = hash * 2057;
- sll(scratch, reg0, 11);
- sll(at, reg0, 3);
- addu(reg0, reg0, at);
- addu(reg0, reg0, scratch);
-
- // hash = hash ^ (hash >> 16);
- srl(at, reg0, 16);
- xor_(reg0, reg0, at);
-}
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register reg0,
- Register reg1,
- Register reg2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // reg0 - holds the untagged key on entry and holds the hash once computed.
- //
- // reg1 - Used to hold the capacity mask of the dictionary.
- //
- // reg2 - Used for the index into the dictionary.
- // at - Temporary (avoid MacroAssembler instructions also using 'at').
- Label done;
-
- GetNumberHash(reg0, reg1);
-
- // Compute the capacity mask.
- lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- sra(reg1, reg1, kSmiTagSize);
- Subu(reg1, reg1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use reg2 for index calculations and keep the hash intact in reg0.
- mov(reg2, reg0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(reg2, reg2, reg1);
-
- // Scale the index by multiplying by the element size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- sll(at, reg2, 1); // 2x.
- addu(reg2, reg2, at); // reg2 = reg2 * 3.
-
- // Check if the key is identical to the name.
- sll(at, reg2, kPointerSizeLog2);
- addu(reg2, elements, at);
-
- lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
- if (i != kProbes - 1) {
- Branch(&done, eq, key, Operand(at));
- } else {
- Branch(miss, ne, key, Operand(at));
- }
- }
-
- bind(&done);
- // Check that the value is a normal property.
- // reg2: elements + (index * kPointerSize).
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
- And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
- Branch(miss, ne, at, Operand(zero_reg));
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- lw(result, FieldMemOperand(reg2, kValueOffset));
-}
-
-
-// ---------------------------------------------------------------------------
-// Instruction macros.
-
-void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- addu(rd, rs, rt.rm());
- } else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- addu(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- subu(rd, rs, rt.rm());
- } else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- subu(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- if (kArchVariant == kLoongson) {
- mult(rs, rt.rm());
- mflo(rd);
- } else {
- mul(rd, rs, rt.rm());
- }
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- if (kArchVariant == kLoongson) {
- mult(rs, at);
- mflo(rd);
- } else {
- mul(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Mult(Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- mult(rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- mult(rs, at);
- }
-}
-
-
-void MacroAssembler::Multu(Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- multu(rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- multu(rs, at);
- }
-}
-
-
-void MacroAssembler::Div(Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- div(rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- div(rs, at);
- }
-}
-
-
-void MacroAssembler::Divu(Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- divu(rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- divu(rs, at);
- }
-}
-
-
-void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- and_(rd, rs, rt.rm());
- } else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- andi(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- and_(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- or_(rd, rs, rt.rm());
- } else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- ori(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- or_(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- xor_(rd, rs, rt.rm());
- } else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- xori(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- xor_(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- nor(rd, rs, rt.rm());
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- nor(rd, rs, at);
- }
-}
-
-
-void MacroAssembler::Neg(Register rs, const Operand& rt) {
- ASSERT(rt.is_reg());
- ASSERT(!at.is(rs));
- ASSERT(!at.is(rt.rm()));
- li(at, -1);
- xor_(rs, rt.rm(), at);
-}
-
-
-void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- slt(rd, rs, rt.rm());
- } else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- slti(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- slt(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- sltu(rd, rs, rt.rm());
- } else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- sltiu(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- sltu(rd, rs, at);
- }
- }
-}
-
-
-void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
- if (kArchVariant == kMips32r2) {
- if (rt.is_reg()) {
- rotrv(rd, rs, rt.rm());
- } else {
- rotr(rd, rs, rt.imm32_);
- }
- } else {
- if (rt.is_reg()) {
- subu(at, zero_reg, rt.rm());
- sllv(at, rs, at);
- srlv(rd, rs, rt.rm());
- or_(rd, rd, at);
- } else {
- if (rt.imm32_ == 0) {
- srl(rd, rs, 0);
- } else {
- srl(at, rs, rt.imm32_);
- sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
- or_(rd, rd, at);
- }
- }
- }
-}
-
-//------------Pseudo-instructions-------------
-
-void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
- ASSERT(!j.is_reg());
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
- // Normal load of an immediate value which does not need Relocation Info.
- if (is_int16(j.imm32_)) {
- addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kHiMask)) {
- ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kImm16Mask)) {
- lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
- } else {
- lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
- ori(rd, rd, (j.imm32_ & kImm16Mask));
- }
- } else {
- if (MustUseReg(j.rmode_)) {
- RecordRelocInfo(j.rmode_, j.imm32_);
- }
- // We always need the same number of instructions as we may need to patch
- // this code to load another value which may need 2 instructions to load.
- lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
- ori(rd, rd, (j.imm32_ & kImm16Mask));
- }
-}
-
-
-void MacroAssembler::MultiPush(RegList regs) {
- int16_t num_to_push = NumberOfBitsSet(regs);
- int16_t stack_offset = num_to_push * kPointerSize;
-
- Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
- sw(ToRegister(i), MemOperand(sp, stack_offset));
- }
- }
-}
-
-
-void MacroAssembler::MultiPushReversed(RegList regs) {
- int16_t num_to_push = NumberOfBitsSet(regs);
- int16_t stack_offset = num_to_push * kPointerSize;
-
- Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
- sw(ToRegister(i), MemOperand(sp, stack_offset));
- }
- }
-}
-
-
-void MacroAssembler::MultiPop(RegList regs) {
- int16_t stack_offset = 0;
-
- for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
- lw(ToRegister(i), MemOperand(sp, stack_offset));
- stack_offset += kPointerSize;
- }
- }
- addiu(sp, sp, stack_offset);
-}
-
-
-void MacroAssembler::MultiPopReversed(RegList regs) {
- int16_t stack_offset = 0;
-
- for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
- lw(ToRegister(i), MemOperand(sp, stack_offset));
- stack_offset += kPointerSize;
- }
- }
- addiu(sp, sp, stack_offset);
-}
-
-
-void MacroAssembler::MultiPushFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
- int16_t num_to_push = NumberOfBitsSet(regs);
- int16_t stack_offset = num_to_push * kDoubleSize;
-
- Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
- stack_offset -= kDoubleSize;
- sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
- }
- }
-}
-
-
-void MacroAssembler::MultiPushReversedFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
- int16_t num_to_push = NumberOfBitsSet(regs);
- int16_t stack_offset = num_to_push * kDoubleSize;
-
- Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
- stack_offset -= kDoubleSize;
- sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
- }
- }
-}
-
-
-void MacroAssembler::MultiPopFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
- int16_t stack_offset = 0;
-
- for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
- ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
- stack_offset += kDoubleSize;
- }
- }
- addiu(sp, sp, stack_offset);
-}
-
-
-void MacroAssembler::MultiPopReversedFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
- int16_t stack_offset = 0;
-
- for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
- if ((regs & (1 << i)) != 0) {
- ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
- stack_offset += kDoubleSize;
- }
- }
- addiu(sp, sp, stack_offset);
-}
-
-
-void MacroAssembler::FlushICache(Register address, unsigned instructions) {
- RegList saved_regs = kJSCallerSaved | ra.bit();
- MultiPush(saved_regs);
- AllowExternalCallThatCantCauseGC scope(this);
-
- // Save to a0 in case address == t0.
- Move(a0, address);
- PrepareCallCFunction(2, t0);
-
- li(a1, instructions * kInstrSize);
- CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
- MultiPop(saved_regs);
-}
-
-
-void MacroAssembler::Ext(Register rt,
- Register rs,
- uint16_t pos,
- uint16_t size) {
- ASSERT(pos < 32);
- ASSERT(pos + size < 33);
-
- if (kArchVariant == kMips32r2) {
- ext_(rt, rs, pos, size);
- } else {
- // Move rs to rt and shift it left then right to get the
- // desired bitfield on the right side and zeroes on the left.
- int shift_left = 32 - (pos + size);
- sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
-
- int shift_right = 32 - size;
- if (shift_right > 0) {
- srl(rt, rt, shift_right);
- }
- }
-}
-
-
-void MacroAssembler::Ins(Register rt,
- Register rs,
- uint16_t pos,
- uint16_t size) {
- ASSERT(pos < 32);
- ASSERT(pos + size <= 32);
- ASSERT(size != 0);
-
- if (kArchVariant == kMips32r2) {
- ins_(rt, rs, pos, size);
- } else {
- ASSERT(!rt.is(t8) && !rs.is(t8));
- Subu(at, zero_reg, Operand(1));
- srl(at, at, 32 - size);
- and_(t8, rs, at);
- sll(t8, t8, pos);
- sll(at, at, pos);
- nor(at, at, zero_reg);
- and_(at, rt, at);
- or_(rt, t8, at);
- }
-}
-
-
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- FPURegister fs,
- FPURegister scratch) {
- // Move the data from fs to t8.
- mfc1(t8, fs);
- Cvt_d_uw(fd, t8, scratch);
-}
-
-
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- Register rs,
- FPURegister scratch) {
- // Convert rs to a FP value in fd (and fd + 1).
- // We do this by converting rs minus the MSB to avoid sign conversion,
- // then adding 2^31 to the result (if needed).
-
- ASSERT(!fd.is(scratch));
- ASSERT(!rs.is(t9));
- ASSERT(!rs.is(at));
-
- // Save rs's MSB to t9.
- Ext(t9, rs, 31, 1);
- // Remove rs's MSB.
- Ext(at, rs, 0, 31);
- // Move the result to fd.
- mtc1(at, fd);
-
- // Convert fd to a real FP value.
- cvt_d_w(fd, fd);
-
- Label conversion_done;
-
- // If rs's MSB was 0, it's done.
- // Otherwise we need to add that to the FP register.
- Branch(&conversion_done, eq, t9, Operand(zero_reg));
-
- // Load 2^31 into f20 as its float representation.
- li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
- mtc1(zero_reg, scratch);
- // Add it to fd.
- add_d(fd, fd, scratch);
-
- bind(&conversion_done);
-}
-
-
-void MacroAssembler::Trunc_uw_d(FPURegister fd,
- FPURegister fs,
- FPURegister scratch) {
- Trunc_uw_d(fs, t8, scratch);
- mtc1(t8, fd);
-}
-
-void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
- trunc_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
- } else {
- trunc_w_d(fd, fs);
- }
-}
-
-void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
- round_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
- } else {
- round_w_d(fd, fs);
- }
-}
-
-
-void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
- floor_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
- } else {
- floor_w_d(fd, fs);
- }
-}
-
-
-void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
- ceil_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
- } else {
- ceil_w_d(fd, fs);
- }
-}
-
-
-void MacroAssembler::Trunc_uw_d(FPURegister fd,
- Register rs,
- FPURegister scratch) {
- ASSERT(!fd.is(scratch));
- ASSERT(!rs.is(at));
-
- // Load 2^31 into scratch as its float representation.
- li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
- mtc1(zero_reg, scratch);
- // Test if scratch > fd.
- // If fd < 2^31 we can convert it normally.
- Label simple_convert;
- BranchF(&simple_convert, NULL, lt, fd, scratch);
-
- // First we subtract 2^31 from fd, then trunc it to rs
- // and add 2^31 to rs.
- sub_d(scratch, fd, scratch);
- trunc_w_d(scratch, scratch);
- mfc1(rs, scratch);
- Or(rs, rs, 1 << 31);
-
- Label done;
- Branch(&done);
- // Simple conversion.
- bind(&simple_convert);
- trunc_w_d(scratch, fd);
- mfc1(rs, scratch);
-
- bind(&done);
-}
-
-
-void MacroAssembler::BranchF(Label* target,
- Label* nan,
- Condition cc,
- FPURegister cmp1,
- FPURegister cmp2,
- BranchDelaySlot bd) {
- if (cc == al) {
- Branch(bd, target);
- return;
- }
-
- ASSERT(nan || target);
- // Check for unordered (NaN) cases.
- if (nan) {
- c(UN, D, cmp1, cmp2);
- bc1t(nan);
- }
-
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- switch (cc) {
- case Uless:
- case less:
- c(OLT, D, cmp1, cmp2);
- bc1t(target);
- break;
- case Ugreater:
- case greater:
- c(ULE, D, cmp1, cmp2);
- bc1f(target);
- break;
- case Ugreater_equal:
- case greater_equal:
- c(ULT, D, cmp1, cmp2);
- bc1f(target);
- break;
- case Uless_equal:
- case less_equal:
- c(OLE, D, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ne:
- c(EQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
- };
- }
-
- if (bd == PROTECT) {
- nop();
- }
-}
-
-
-void MacroAssembler::Move(FPURegister dst, double imm) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
- // Handle special values first.
- bool force_load = dst.is(kDoubleRegZero);
- if (value.bits == zero.bits && !force_load) {
- mov_d(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits && !force_load) {
- neg_d(dst, kDoubleRegZero);
- } else {
- uint32_t lo, hi;
- DoubleAsTwoUInt32(imm, &lo, &hi);
- // Move the low part of the double into the lower of the corresponding FPU
- // register of FPU register pair.
- if (lo != 0) {
- li(at, Operand(lo));
- mtc1(at, dst);
- } else {
- mtc1(zero_reg, dst);
- }
- // Move the high part of the double into the higher of the corresponding FPU
- // register of FPU register pair.
- if (hi != 0) {
- li(at, Operand(hi));
- mtc1(at, dst.high());
- } else {
- mtc1(zero_reg, dst.high());
- }
- }
-}
-
-
-void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
- Label done;
- Branch(&done, ne, rt, Operand(zero_reg));
- mov(rd, rs);
- bind(&done);
- } else {
- movz(rd, rs, rt);
- }
-}
-
-
-void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
- Label done;
- Branch(&done, eq, rt, Operand(zero_reg));
- mov(rd, rs);
- bind(&done);
- } else {
- movn(rd, rs, rt);
- }
-}
-
-
-void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
- // Tests an FP condition code and then conditionally move rs to rd.
- // We do not currently use any FPU cc bit other than bit 0.
- ASSERT(cc == 0);
- ASSERT(!(rs.is(t8) || rd.is(t8)));
- Label done;
- Register scratch = t8;
- // For testing purposes we need to fetch content of the FCSR register and
- // than test its cc (floating point condition code) bit (for cc = 0, it is
- // 24. bit of the FCSR).
- cfc1(scratch, FCSR);
- // For the MIPS I, II and III architectures, the contents of scratch is
- // UNPREDICTABLE for the instruction immediately following CFC1.
- nop();
- srl(scratch, scratch, 16);
- andi(scratch, scratch, 0x0080);
- Branch(&done, eq, scratch, Operand(zero_reg));
- mov(rd, rs);
- bind(&done);
- } else {
- movt(rd, rs, cc);
- }
-}
-
-
-void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
- // Tests an FP condition code and then conditionally move rs to rd.
- // We do not currently use any FPU cc bit other than bit 0.
- ASSERT(cc == 0);
- ASSERT(!(rs.is(t8) || rd.is(t8)));
- Label done;
- Register scratch = t8;
- // For testing purposes we need to fetch content of the FCSR register and
- // than test its cc (floating point condition code) bit (for cc = 0, it is
- // 24. bit of the FCSR).
- cfc1(scratch, FCSR);
- // For the MIPS I, II and III architectures, the contents of scratch is
- // UNPREDICTABLE for the instruction immediately following CFC1.
- nop();
- srl(scratch, scratch, 16);
- andi(scratch, scratch, 0x0080);
- Branch(&done, ne, scratch, Operand(zero_reg));
- mov(rd, rs);
- bind(&done);
- } else {
- movf(rd, rs, cc);
- }
-}
-
-
-void MacroAssembler::Clz(Register rd, Register rs) {
- if (kArchVariant == kLoongson) {
- ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
- Register mask = t8;
- Register scratch = t9;
- Label loop, end;
- mov(at, rs);
- mov(rd, zero_reg);
- lui(mask, 0x8000);
- bind(&loop);
- and_(scratch, at, mask);
- Branch(&end, ne, scratch, Operand(zero_reg));
- addiu(rd, rd, 1);
- Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
- srl(mask, mask, 1);
- bind(&end);
- } else {
- clz(rd, rs);
- }
-}
-
-
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-// This method implementation differs from the ARM version for performance
-// reasons.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32) {
- Label right_exponent, done;
- // Get exponent word (ENDIAN issues).
- lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, zero_reg);
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- // If we have a match of the int32-but-not-Smi exponent then skip some logic.
- Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
- // If the exponent is higher than that then go to not_int32 case. This
- // catches numbers that don't fit in a signed int32, infinities and NaNs.
- Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- Subu(scratch2, scratch2, Operand(zero_exponent));
- // Dest already has a Smi zero.
- Branch(&done, lt, scratch2, Operand(zero_reg));
- if (!CpuFeatures::IsSupported(FPU)) {
- // We have a shifted exponent between 0 and 30 in scratch2.
- srl(dest, scratch2, HeapNumber::kExponentShift);
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- li(at, Operand(30));
- subu(dest, at, dest);
- }
- bind(&right_exponent);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // MIPS FPU instructions implementing double precision to integer
- // conversion using round to zero. Since the FP value was qualified
- // above, the resulting integer should be a legal int32.
- // The original 'Exponent' word is still in scratch.
- lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
- trunc_w_d(double_scratch, double_scratch);
- mfc1(dest, double_scratch);
- } else {
- // On entry, dest has final downshift, scratch has original sign/exp/mant.
- // Save sign bit in top bit of dest.
- And(scratch2, scratch, Operand(0x80000000));
- Or(dest, dest, Operand(scratch2));
- // Put back the implicit 1, just above mantissa field.
- Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
-
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance. But we want to clear the sign-bit so shift one more bit
- // left, then shift right one bit.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- sll(scratch, scratch, shift_distance + 1);
- srl(scratch, scratch, 1);
-
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
- // The width of the field here is the same as the shift amount above.
- const int field_width = shift_distance;
- Ext(scratch2, scratch2, 32-shift_distance, field_width);
- Ins(scratch, scratch2, 0, field_width);
- // Move down according to the exponent.
- srlv(scratch, scratch, dest);
- // Prepare the negative version of our integer.
- subu(scratch2, zero_reg, scratch);
- // Trick to check sign bit (msb) held in dest, count leading zero.
- // 0 indicates negative, save negative version with conditional move.
- Clz(dest, dest);
- Movz(scratch, scratch2, dest);
- mov(dest, scratch);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
- DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
- Register except_flag,
- CheckForInexactConversion check_inexact) {
- ASSERT(!result.is(scratch));
- ASSERT(!double_input.is(double_scratch));
- ASSERT(!except_flag.is(scratch));
-
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatures::Scope scope(FPU);
- Label done;
-
- // Clear the except flag (0 = no exception)
- mov(except_flag, zero_reg);
-
- // Test for values that can be exactly represented as a signed 32-bit integer.
- cvt_w_d(double_scratch, double_input);
- mfc1(result, double_scratch);
- cvt_d_w(double_scratch, double_scratch);
- BranchF(&done, NULL, eq, double_input, double_scratch);
-
- int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
-
- if (check_inexact == kDontCheckForInexactConversion) {
- // Ignore inexact exceptions.
- except_mask &= ~kFCSRInexactFlagMask;
- }
-
- // Save FCSR.
- cfc1(scratch, FCSR);
- // Disable FPU exceptions.
- ctc1(zero_reg, FCSR);
-
- // Do operation based on rounding mode.
- switch (rounding_mode) {
- case kRoundToNearest:
- Round_w_d(double_scratch, double_input);
- break;
- case kRoundToZero:
- Trunc_w_d(double_scratch, double_input);
- break;
- case kRoundToPlusInf:
- Ceil_w_d(double_scratch, double_input);
- break;
- case kRoundToMinusInf:
- Floor_w_d(double_scratch, double_input);
- break;
- } // End of switch-statement.
-
- // Retrieve FCSR.
- cfc1(except_flag, FCSR);
- // Restore FCSR.
- ctc1(scratch, FCSR);
- // Move the converted value into the result register.
- mfc1(result, double_scratch);
-
- // Check for fpu exceptions.
- And(except_flag, except_flag, Operand(except_mask));
-
- bind(&done);
-}
-
-
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
- // Extract the biased exponent in result.
- Ext(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Check for Infinity and NaNs, which should return 0.
- Subu(scratch, result, HeapNumber::kExponentMask);
- Movz(result, zero_reg, scratch);
- Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Express exponent as delta to (number of mantissa bits + 31).
- Subu(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
-
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- Branch(&normal_exponent, le, result, Operand(zero_reg));
- mov(result, zero_reg);
- Branch(&done);
-
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
-
- // Save the sign.
- Register sign = result;
- result = no_reg;
- And(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
- // to check for this specific case.
- Label high_shift_needed, high_shift_done;
- Branch(&high_shift_needed, lt, scratch, Operand(32));
- mov(input_high, zero_reg);
- Branch(&high_shift_done);
- bind(&high_shift_needed);
-
- // Set the implicit 1 before the mantissa part in input_high.
- Or(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- sllv(input_high, input_high, scratch);
-
- bind(&high_shift_done);
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- li(at, 32);
- subu(scratch, at, scratch);
- Branch(&pos_shift, ge, scratch, Operand(zero_reg));
-
- // Negate scratch.
- Subu(scratch, zero_reg, scratch);
- sllv(input_low, input_low, scratch);
- Branch(&shift_done);
-
- bind(&pos_shift);
- srlv(input_low, input_low, scratch);
-
- bind(&shift_done);
- Or(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- mov(scratch, sign);
- result = sign;
- sign = no_reg;
- Subu(result, zero_reg, input_high);
- Movz(result, input_high, scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::EmitECMATruncate(Register result,
- FPURegister double_input,
- FPURegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3) {
- CpuFeatures::Scope scope(FPU);
- ASSERT(!scratch2.is(result));
- ASSERT(!scratch3.is(result));
- ASSERT(!scratch3.is(scratch2));
- ASSERT(!scratch.is(result) &&
- !scratch.is(scratch2) &&
- !scratch.is(scratch3));
- ASSERT(!single_scratch.is(double_input));
-
- Label done;
- Label manual;
-
- // Clear cumulative exception flags and save the FCSR.
- cfc1(scratch2, FCSR);
- ctc1(zero_reg, FCSR);
- // Try a conversion to a signed integer.
- trunc_w_d(single_scratch, double_input);
- mfc1(result, single_scratch);
- // Retrieve and restore the FCSR.
- cfc1(scratch, FCSR);
- ctc1(scratch2, FCSR);
- // Check for overflow and NaNs.
- And(scratch,
- scratch,
- kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
- // If we had no exceptions we are done.
- Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Load the double value and perform a manual truncation.
- Register input_high = scratch2;
- Register input_low = scratch3;
- Move(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::GetLeastBitsFromSmi(Register dst,
- Register src,
- int num_least_bits) {
- Ext(dst, src, kSmiTagSize, num_least_bits);
-}
-
-
-void MacroAssembler::GetLeastBitsFromInt32(Register dst,
- Register src,
- int num_least_bits) {
- And(dst, src, Operand((1 << num_least_bits) - 1));
-}
-
-
-// Emulated condtional branches do not emit a nop in the branch delay slot.
-//
-// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
- (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
- (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-
-
-void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
- BranchShort(offset, bdslot);
-}
-
-
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchShort(offset, cond, rs, rt, bdslot);
-}
-
-
-void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
- if (L->is_bound()) {
- if (is_near(L)) {
- BranchShort(L, bdslot);
- } else {
- Jr(L, bdslot);
- }
- } else {
- if (is_trampoline_emitted()) {
- Jr(L, bdslot);
- } else {
- BranchShort(L, bdslot);
- }
- }
-}
-
-
-void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- if (L->is_bound()) {
- if (is_near(L)) {
- BranchShort(L, cond, rs, rt, bdslot);
- } else {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
- }
- } else {
- if (is_trampoline_emitted()) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
- } else {
- BranchShort(L, cond, rs, rt, bdslot);
- }
- }
-}
-
-
-void MacroAssembler::Branch(Label* L,
- Condition cond,
- Register rs,
- Heap::RootListIndex index,
- BranchDelaySlot bdslot) {
- LoadRoot(at, index);
- Branch(L, cond, rs, Operand(at), bdslot);
-}
-
-
-void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
- b(offset);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- ASSERT(!rs.is(zero_reg));
- Register r2 = no_reg;
- Register scratch = at;
-
- if (rt.is_reg()) {
- // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
- // rt.
- r2 = rt.rm_;
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
- // Signed comparison.
- case greater:
- if (r2.is(zero_reg)) {
- bgtz(rs, offset);
- } else {
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (r2.is(zero_reg)) {
- bgez(rs, offset);
- } else {
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (r2.is(zero_reg)) {
- bltz(rs, offset);
- } else {
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (r2.is(zero_reg)) {
- blez(rs, offset);
- } else {
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (r2.is(zero_reg)) {
- bgtz(rs, offset);
- } else {
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (r2.is(zero_reg)) {
- bgez(rs, offset);
- } else {
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
- } else {
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (r2.is(zero_reg)) {
- b(offset);
- } else {
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- beq(rs, r2, offset);
- break;
- case ne:
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- bne(rs, r2, offset);
- break;
- // Signed comparison.
- case greater:
- if (rt.imm32_ == 0) {
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (rt.imm32_ == 0) {
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (rt.imm32_ == 0) {
- bltz(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (rt.imm32_ == 0) {
- blez(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (rt.imm32_ == 0) {
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (rt.imm32_ == 0) {
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (rt.imm32_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (rt.imm32_ == 0) {
- b(offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
-
- b(shifted_branch_offset(L, false));
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
-
- int32_t offset;
- Register r2 = no_reg;
- Register scratch = at;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
- break;
- case eq:
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
- break;
- case ne:
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
- break;
- // Signed comparison.
- case greater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
- } else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
- } else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
- } else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
- } else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
- } else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
- break;
- case eq:
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
- break;
- case ne:
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
- break;
- // Signed comparison.
- case greater:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
- } else {
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- } else {
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- } else {
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
- } else {
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
- } else {
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- } else {
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (rt.imm32_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- } else {
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else {
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
- }
- // Check that offset could actually hold on an int16_t.
- ASSERT(is_int16(offset));
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
- BranchAndLinkShort(offset, bdslot);
-}
-
-
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchAndLinkShort(offset, cond, rs, rt, bdslot);
-}
-
-
-void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
- if (L->is_bound()) {
- if (is_near(L)) {
- BranchAndLinkShort(L, bdslot);
- } else {
- Jalr(L, bdslot);
- }
- } else {
- if (is_trampoline_emitted()) {
- Jalr(L, bdslot);
- } else {
- BranchAndLinkShort(L, bdslot);
- }
- }
-}
-
-
-void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- if (L->is_bound()) {
- if (is_near(L)) {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
- } else {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
- bind(&skip);
- }
- } else {
- if (is_trampoline_emitted()) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
- bind(&skip);
- } else {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
- }
- }
-}
-
-
-// We need to use a bgezal or bltzal, but they can't be used directly with the
-// slt instructions. We could use sub or add instead but we would miss overflow
-// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLinkShort(int16_t offset,
- BranchDelaySlot bdslot) {
- bal(offset);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Register r2 = no_reg;
- Register scratch = at;
-
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
-
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
-
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
-
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
-
- default:
- UNREACHABLE();
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
- bal(shifted_branch_offset(L, false));
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
-
- int32_t offset;
- Register r2 = no_reg;
- Register scratch = at;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
-
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
-
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
-
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // Check that offset could actually hold on an int16_t.
- ASSERT(is_int16(offset));
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Jump(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (cond == cc_always) {
- jr(target);
- } else {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Branch(2, NegateCondition(cond), rs, rt);
- jr(target);
- }
- // Emit a nop in the branch delay slot if required.
- if (bd == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Jump(intptr_t target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- Label skip;
- if (cond != cc_always) {
- Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
- }
- // The first instruction of 'li' may be placed in the delay slot.
- // This is not an issue, t9 is expected to be clobbered anyway.
- li(t9, Operand(target, rmode));
- Jump(t9, al, zero_reg, Operand(zero_reg), bd);
- bind(&skip);
-}
-
-
-void MacroAssembler::Jump(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
-}
-
-
-void MacroAssembler::Jump(Handle<Code> code,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
-}
-
-
-int MacroAssembler::CallSize(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- int size = 0;
-
- if (cond == cc_always) {
- size += 1;
- } else {
- size += 3;
- }
-
- if (bd == PROTECT)
- size += 1;
-
- return size * kInstrSize;
-}
-
-
-// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
- if (cond == cc_always) {
- jalr(target);
- } else {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Branch(2, NegateCondition(cond), rs, rt);
- jalr(target);
- }
- // Emit a nop in the branch delay slot if required.
- if (bd == PROTECT)
- nop();
-
- ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
-}
-
-
-int MacroAssembler::CallSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- int size = CallSize(t9, cond, rs, rt, bd);
- return size + 2 * kInstrSize;
-}
-
-
-void MacroAssembler::Call(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
- int32_t target_int = reinterpret_cast<int32_t>(target);
- // Must record previous source positions before the
- // li() generates a new code target.
- positions_recorder()->WriteRecordedPositions();
- li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
- Call(t9, cond, rs, rt, bd);
- ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
-}
-
-
-int MacroAssembler::CallSize(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- return CallSize(reinterpret_cast<Address>(code.location()),
- rmode, cond, rs, rt, bd);
-}
-
-
-void MacroAssembler::Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- SetRecordedAstId(ast_id);
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
- }
- Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
- ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
-}
-
-
-void MacroAssembler::Ret(Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
- Jump(ra, cond, rs, rt, bd);
-}
-
-
-void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
-
- uint32_t imm28;
- imm28 = jump_address(L);
- imm28 &= kImm28Mask;
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
- j(imm28);
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
-
- uint32_t imm32;
- imm32 = jump_address(L);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
- }
- jr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
-
- uint32_t imm32;
- imm32 = jump_address(L);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
- }
- jalr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-void MacroAssembler::DropAndRet(int drop) {
- Ret(USE_DELAY_SLOT);
- addiu(sp, sp, drop * kPointerSize);
-}
-
-void MacroAssembler::DropAndRet(int drop,
- Condition cond,
- Register r1,
- const Operand& r2) {
- // Both Drop and Ret need to be conditional.
- Label skip;
- if (cond != cc_always) {
- Branch(&skip, NegateCondition(cond), r1, r2);
- }
-
- Drop(drop);
- Ret();
-
- if (cond != cc_always) {
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::Drop(int count,
- Condition cond,
- Register reg,
- const Operand& op) {
- if (count <= 0) {
- return;
- }
-
- Label skip;
-
- if (cond != al) {
- Branch(&skip, NegateCondition(cond), reg, op);
- }
-
- addiu(sp, sp, count * kPointerSize);
-
- if (cond != al) {
- bind(&skip);
- }
-}
-
-
-
-void MacroAssembler::Swap(Register reg1,
- Register reg2,
- Register scratch) {
- if (scratch.is(no_reg)) {
- Xor(reg1, reg1, Operand(reg2));
- Xor(reg2, reg2, Operand(reg1));
- Xor(reg1, reg1, Operand(reg2));
- } else {
- mov(scratch, reg1);
- mov(reg1, reg2);
- mov(reg2, scratch);
- }
-}
-
-
-void MacroAssembler::Call(Label* target) {
- BranchAndLink(target);
-}
-
-
-void MacroAssembler::Push(Handle<Object> handle) {
- li(at, Operand(handle));
- push(at);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-void MacroAssembler::DebugBreak() {
- PrepareCEntryArgs(0);
- PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-// ---------------------------------------------------------------------------
-// Exception handling.
-
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // For the JSEntry handler, we must preserve a0-a3 and s0.
- // t1-t3 are available. We will build up the handler from the bottom by
- // pushing on the stack.
- // Set up the code object (t1) and the state (t2) for pushing.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- li(t1, Operand(CodeObject()), CONSTANT_SIZE);
- li(t2, Operand(state));
-
- // Push the frame pointer, context, state, and code object.
- if (kind == StackHandler::JS_ENTRY) {
- ASSERT_EQ(Smi::FromInt(0), 0);
- // The second zero_reg indicates no context.
- // The first zero_reg is the NULL frame pointer.
- // The operands are reversed to match the order of MultiPush/Pop.
- Push(zero_reg, zero_reg, t2, t1);
- } else {
- MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
- }
-
- // Link the current handler as the next handler.
- li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- lw(t1, MemOperand(t2));
- push(t1);
- // Set this new handler as the current one.
- sw(sp, MemOperand(t2));
-}
-
-
-void MacroAssembler::PopTryHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- pop(a1);
- Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
- li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- sw(a1, MemOperand(at));
-}
-
-
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // v0 = exception, a1 = code object, a2 = state.
- lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
- Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- srl(a2, a2, StackHandler::kKindWidth); // Handler index.
- sll(a2, a2, kPointerSizeLog2);
- Addu(a2, a3, a2);
- lw(a2, MemOperand(a2)); // Smi-tagged offset.
- Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- sra(t9, a2, kSmiTagSize);
- Addu(t9, t9, a1);
- Jump(t9); // Jump.
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in v0.
- Move(v0, value);
-
- // Drop the stack pointer to the top of the top handler.
- li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
- isolate())));
- lw(sp, MemOperand(a3));
-
- // Restore the next handler.
- pop(a2);
- sw(a2, MemOperand(a3));
-
- // Get the code object (a1) and state (a2). Restore the context and frame
- // pointer.
- MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
- // or cp.
- Label done;
- Branch(&done, eq, cp, Operand(zero_reg));
- sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- bind(&done);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in v0.
- if (!value.is(v0)) {
- mov(v0, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- lw(sp, MemOperand(a3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind);
- bind(&fetch_next);
- lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
- And(a2, a2, Operand(StackHandler::KindField::kMask));
- Branch(&fetch_next, ne, a2, Operand(zero_reg));
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(a2);
- sw(a2, MemOperand(a3));
-
- // Get the code object (a1) and state (a2). Clear the context and frame
- // pointer (0 was saved in the handler).
- MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
- }
- jmp(gc_required);
- return;
- }
-
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!scratch1.is(t9));
- ASSERT(!scratch2.is(t9));
- ASSERT(!result.is(t9));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- ASSERT_EQ(0, object_size & kObjectAlignmentMask);
-
- // Check relative positions of allocation top and limit addresses.
- // ARM adds additional checks to make sure the ldm instruction can be
- // used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
-
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
- li(topaddr, Operand(new_space_allocation_top));
- li(obj_size_reg, Operand(object_size));
-
- // This code stores a temporary value in t9.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
- }
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- Addu(scratch2, result, Operand(obj_size_reg));
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
- sw(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Addu(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
- }
- jmp(gc_required);
- return;
- }
-
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!object_size.is(t9));
- ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
-
- // Check relative positions of allocation top and limit addresses.
- // ARM adds additional checks to make sure the ldm instruction can be
- // used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
-
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(new_space_allocation_top));
-
- // This code stores a temporary value in t9.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
- }
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. Object size may be in words so a shift is
- // required to get the number of bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- sll(scratch2, object_size, kPointerSizeLog2);
- Addu(scratch2, result, scratch2);
- } else {
- Addu(scratch2, result, Operand(object_size));
- }
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
-
- // Update allocation top. result temporarily holds the new top.
- if (emit_debug_code()) {
- And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
- }
- sw(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Addu(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- And(object, object, Operand(~kHeapObjectTagMask));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- li(scratch, Operand(new_space_allocation_top));
- lw(scratch, MemOperand(scratch));
- Check(less, "Undo allocation of non allocated memory",
- object, Operand(scratch));
-#endif
- // Write the address of the object to un-allocate as the current top.
- li(scratch, Operand(new_space_allocation_top));
- sw(object, MemOperand(scratch));
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- sll(scratch1, length, 1); // Length in bytes, not chars.
- addiu(scratch1, scratch1,
- kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string
- // while observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-// Allocates a heap number or jumps to the label if the young space is full and
-// a scavenge is needed.
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* need_gc,
- TaggingMode tagging_mode) {
- // Allocate an object in the heap for the heap number and tag it as a heap
- // object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- tagging_mode == TAG_RESULT ? TAG_OBJECT :
- NO_ALLOCATION_FLAGS);
-
- // Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (tagging_mode == TAG_RESULT) {
- sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
-}
-
-
-void MacroAssembler::AllocateHeapNumberWithValue(Register result,
- FPURegister value,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
- sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
-}
-
-
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- ASSERT((temps & dst.bit()) == 0);
- ASSERT((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
-
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < kNumRegisters; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.code_ = i;
- break;
- }
- }
- ASSERT(!tmp.is(no_reg));
-
- for (int i = 0; i < field_count; i++) {
- lw(tmp, FieldMemOperand(src, i * kPointerSize));
- sw(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
- // Align src before copying in word size chunks.
- bind(&align_loop);
- Branch(&done, eq, length, Operand(zero_reg));
- bind(&align_loop_1);
- And(scratch, src, kPointerSize - 1);
- Branch(&word_loop, eq, scratch, Operand(zero_reg));
- lbu(scratch, MemOperand(src));
- Addu(src, src, 1);
- sb(scratch, MemOperand(dst));
- Addu(dst, dst, 1);
- Subu(length, length, Operand(1));
- Branch(&byte_loop_1, ne, length, Operand(zero_reg));
-
- // Copy bytes in word size chunks.
- bind(&word_loop);
- if (emit_debug_code()) {
- And(scratch, src, kPointerSize - 1);
- Assert(eq, "Expecting alignment for CopyBytes",
- scratch, Operand(zero_reg));
- }
- Branch(&byte_loop, lt, length, Operand(kPointerSize));
- lw(scratch, MemOperand(src));
- Addu(src, src, kPointerSize);
-
- // TODO(kalmard) check if this can be optimized to use sw in most cases.
- // Can't use unaligned access - copy byte by byte.
- sb(scratch, MemOperand(dst, 0));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 3));
- Addu(dst, dst, 4);
-
- Subu(length, length, Operand(kPointerSize));
- Branch(&word_loop);
-
- // Copy the last bytes if any left.
- bind(&byte_loop);
- Branch(&done, eq, length, Operand(zero_reg));
- bind(&byte_loop_1);
- lbu(scratch, MemOperand(src));
- Addu(src, src, 1);
- sb(scratch, MemOperand(dst));
- Addu(dst, dst, 1);
- Subu(length, length, Operand(1));
- Branch(&byte_loop_1, ne, length, Operand(zero_reg));
- bind(&done);
-}
-
-
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- Branch(&entry);
- bind(&loop);
- sw(filler, MemOperand(start_offset));
- Addu(start_offset, start_offset, kPointerSize);
- bind(&entry);
- Branch(&loop, lt, start_offset, Operand(end_offset));
-}
-
-
-void MacroAssembler::CheckFastElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, ls, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail,
- int elements_offset) {
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
- Register mantissa_reg = scratch2;
- Register exponent_reg = scratch3;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
-
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
- lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
-
- lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
- sw(exponent_reg, FieldMemOperand(scratch1, offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
- lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- jmp(&have_double_value);
-
- bind(&smi_value);
- Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
- sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- Addu(scratch1, scratch1, scratch2);
- // scratch1 is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = elements_reg;
- SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- f0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- f2);
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- sdc1(f0, MemOperand(scratch1, 0));
- } else {
- sw(mantissa_reg, MemOperand(scratch1, 0));
- sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
- bind(&done);
-}
-
-
-void MacroAssembler::CompareMapAndBranch(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to,
- CompareMapMode mode) {
- lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMapAndBranch(scratch, map, early_success, cond, branch_to, mode);
-}
-
-
-void MacroAssembler::CompareMapAndBranch(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to,
- CompareMapMode mode) {
- Operand right = Operand(map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- Branch(early_success, eq, obj_map, right);
- right = Operand(Handle<Map>(current_map));
- }
- }
- }
-
- Branch(branch_to, cond, obj_map, right);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- Label success;
- CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
- bind(&success);
-}
-
-
-void MacroAssembler::DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
- bind(&fail);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(at, index);
- Branch(fail, ne, scratch, Operand(at));
-}
-
-
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
- CpuFeatures::Scope scope(FPU);
- if (IsMipsSoftFloatABI) {
- Move(dst, v0, v1);
- } else {
- Move(dst, f0); // Reg f0 is o32 ABI FP return value.
- }
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- CpuFeatures::Scope scope(FPU);
- if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
- } else {
- Move(a0, a1, dreg);
- }
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
- CpuFeatures::Scope scope(FPU);
- if (!IsMipsSoftFloatABI) {
- if (dreg2.is(f12)) {
- ASSERT(!dreg1.is(f14));
- Move(f14, dreg2);
- Move(f12, dreg1);
- } else {
- Move(f12, dreg1);
- Move(f14, dreg2);
- }
- } else {
- Move(a0, a1, dreg1);
- Move(a2, a3, dreg2);
- }
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
- Register reg) {
- CpuFeatures::Scope scope(FPU);
- if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
- Move(a2, reg);
- } else {
- Move(a2, reg);
- Move(a0, a1, dreg);
- }
-}
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be t1 to
- // follow the calling convention which requires the call type to be
- // in t1.
- ASSERT(dst.is(t1));
- if (call_kind == CALL_AS_FUNCTION) {
- li(dst, Operand(Smi::FromInt(1)));
- } else {
- li(dst, Operand(Smi::FromInt(0)));
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// JavaScript invokes.
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- bool definitely_matches = false;
- *definitely_mismatches = false;
- Label regular_invoke;
-
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline:
- // a0: actual arguments count
- // a1: function (passed through to callee)
- // a2: expected arguments count
- // a3: callee code entry
-
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract if values are
- // passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(a0));
- ASSERT(expected.is_immediate() || expected.reg().is(a2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
-
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- li(a0, Operand(actual.immediate()));
- const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- if (expected.immediate() == sentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- *definitely_mismatches = true;
- li(a2, Operand(expected.immediate()));
- }
- }
- } else if (actual.is_immediate()) {
- Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
- li(a0, Operand(actual.immediate()));
- } else {
- Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
- }
-
- if (!definitely_matches) {
- if (!code_constant.is_null()) {
- li(a3, Operand(code_constant));
- addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
- }
-
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(t1, call_kind);
- Call(adaptor);
- call_wrapper.AfterCall();
- if (!*definitely_mismatches) {
- Branch(done);
- }
- } else {
- SetCallKind(t1, call_kind);
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&regular_invoke);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
-
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(t1, call_kind);
- Call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, call_kind);
- Jump(code);
- }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
-
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(t1, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(t1, call_kind);
- Jump(code, rmode);
- }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Contract with called JS functions requires that function is passed in a1.
- ASSERT(function.is(a1));
- Register expected_reg = a2;
- Register code_reg = a3;
-
- lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- lw(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
- sra(expected_reg, expected_reg, kSmiTagSize);
- lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Get the function and setup the context.
- LoadHeapObject(a1, function);
- lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- ParameterCount expected(function->shared()->formal_parameter_count());
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-}
-
-
-void MacroAssembler::IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail) {
- ASSERT(kNotStringTag != 0);
-
- lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(scratch, scratch, Operand(kIsNotStringMask));
- Branch(fail, ne, scratch, Operand(zero_reg));
-}
-
-
-// ---------------------------------------------------------------------------
-// Support functions.
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- GetObjectType(function, result, scratch);
- Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- if (miss_on_bound_function) {
- lw(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- lw(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- And(scratch, scratch,
- Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
- Branch(miss, ne, scratch, Operand(zero_reg));
- }
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- Branch(&non_instance, ne, scratch, Operand(zero_reg));
-
- // Get the prototype or initial map from the function.
- lw(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(t8, Heap::kTheHoleValueRootIndex);
- Branch(miss, eq, result, Operand(t8));
-
- // If the function does not have an initial map, we're done.
- Label done;
- GetObjectType(result, scratch, scratch);
- Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
- // Get the prototype from the initial map.
- lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- lw(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::GetObjectType(Register object,
- Register map,
- Register type_reg) {
- lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
-}
-
-
-// -----------------------------------------------------------------------------
-// Runtime calls.
-
-void MacroAssembler::CallStub(CodeStub* stub,
- TypeFeedbackId ast_id,
- Condition cond,
- Register r1,
- const Operand& r2,
- BranchDelaySlot bd) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
- cond, r1, r2, bd);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- int stack_space) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
-
- // Allocate HandleScope in callee-save registers.
- li(s3, Operand(next_address));
- lw(s0, MemOperand(s3, kNextOffset));
- lw(s1, MemOperand(s3, kLimitOffset));
- lw(s2, MemOperand(s3, kLevelOffset));
- Addu(s2, s2, Operand(1));
- sw(s2, MemOperand(s3, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, a0);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- // The O32 ABI requires us to pass a pointer in a0 where the returned struct
- // (4 bytes) will be placed. This is also built into the Simulator.
- // Set up the pointer to the returned value (a0). It was allocated in
- // EnterExitFrame.
- addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
- stub.GenerateCall(this, function);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, a0);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- // As mentioned above, on MIPS a pointer is returned - we need to dereference
- // it to get the actual return value (which is also a pointer).
- lw(v0, MemOperand(v0));
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- Label skip;
- LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- Branch(&skip, eq, v0, Operand(zero_reg));
- lw(a0, MemOperand(v0));
- bind(&skip);
- mov(v0, a0);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- sw(s0, MemOperand(s3, kNextOffset));
- if (emit_debug_code()) {
- lw(a1, MemOperand(s3, kLevelOffset));
- Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
- }
- Subu(s2, s2, Operand(1));
- sw(s2, MemOperand(s3, kLevelOffset));
- lw(at, MemOperand(s3, kLimitOffset));
- Branch(&delete_allocated_handles, ne, s1, Operand(at));
-
- // Check if the function scheduled an exception.
- bind(&leave_exit_frame);
- LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
- lw(t1, MemOperand(at));
- Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
- li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0, true);
-
- bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- sw(s1, MemOperand(s3, kLimitOffset));
- mov(s0, v0);
- mov(a0, v0);
- PrepareCallCFunction(1, s1);
- li(a0, Operand(ExternalReference::isolate_address()));
- CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
- 1);
- mov(v0, s0);
- jmp(&leave_exit_frame);
-}
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addiu(sp, sp, num_arguments * kPointerSize);
- }
- LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash,
- Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- sll(index, hash, kSmiTagSize);
-}
-
-
-void MacroAssembler::ObjectToDoubleFPURegister(Register object,
- FPURegister result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* not_number,
- ObjectToDoubleFlags flags) {
- Label done;
- if ((flags & OBJECT_NOT_SMI) == 0) {
- Label not_smi;
- JumpIfNotSmi(object, &not_smi);
- // Remove smi tag and convert to double.
- sra(scratch1, object, kSmiTagSize);
- mtc1(scratch1, result);
- cvt_d_w(result, result);
- Branch(&done);
- bind(&not_smi);
- }
- // Check for heap number and load double value from it.
- lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
- Branch(not_number, ne, scratch1, Operand(heap_number_map));
-
- if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
- // If exponent is all ones the number is either a NaN or +/-Infinity.
- Register exponent = scratch1;
- Register mask_reg = scratch2;
- lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- li(mask_reg, HeapNumber::kExponentMask);
-
- And(exponent, exponent, mask_reg);
- Branch(not_number, eq, exponent, Operand(mask_reg));
- }
- ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
- bind(&done);
-}
-
-
-void MacroAssembler::SmiToDoubleFPURegister(Register smi,
- FPURegister value,
- Register scratch1) {
- sra(scratch1, smi, kSmiTagSize);
- mtc1(scratch1, value);
- cvt_d_w(value, value);
-}
-
-
-void MacroAssembler::AdduAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- ASSERT(!dst.is(overflow_dst));
- ASSERT(!dst.is(scratch));
- ASSERT(!overflow_dst.is(scratch));
- ASSERT(!overflow_dst.is(left));
- ASSERT(!overflow_dst.is(right));
-
- if (left.is(right) && dst.is(left)) {
- ASSERT(!dst.is(t9));
- ASSERT(!scratch.is(t9));
- ASSERT(!left.is(t9));
- ASSERT(!right.is(t9));
- ASSERT(!overflow_dst.is(t9));
- mov(t9, right);
- right = t9;
- }
-
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- addu(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
- and_(overflow_dst, overflow_dst, scratch);
- } else if (dst.is(right)) {
- mov(scratch, right); // Preserve right.
- addu(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, overflow_dst, scratch);
- } else {
- addu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
-}
-
-
-void MacroAssembler::SubuAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- ASSERT(!dst.is(overflow_dst));
- ASSERT(!dst.is(scratch));
- ASSERT(!overflow_dst.is(scratch));
- ASSERT(!overflow_dst.is(left));
- ASSERT(!overflow_dst.is(right));
- ASSERT(!scratch.is(left));
- ASSERT(!scratch.is(right));
-
- // This happens with some crankshaft code. Since Subu works fine if
- // left == right, let's not make that restriction here.
- if (left.is(right)) {
- mov(dst, zero_reg);
- mov(overflow_dst, zero_reg);
- return;
- }
-
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- subu(dst, left, right); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
- xor_(scratch, scratch, right); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else if (dst.is(right)) {
- mov(scratch, right); // Preserve right.
- subu(dst, left, right); // Right is overwritten.
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, scratch); // Original right.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- subu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // All parameters are on the stack. v0 has the return value after call.
-
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- PrepareCEntryArgs(num_arguments);
- PrepareCEntryFunction(ExternalReference(f, isolate()));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- PrepareCEntryArgs(function->nargs);
- PrepareCEntryFunction(ExternalReference(function, isolate()));
- SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
- CEntryStub stub(1, mode);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments,
- BranchDelaySlot bd) {
- PrepareCEntryArgs(num_arguments);
- PrepareCEntryFunction(ext);
-
- CEntryStub stub(1);
- CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- PrepareCEntryArgs(num_arguments);
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
- BranchDelaySlot bd) {
- PrepareCEntryFunction(builtin);
- CEntryStub stub(1);
- Jump(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- al,
- zero_reg,
- Operand(zero_reg),
- bd);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- GetBuiltinEntry(t9, id);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(t9));
- SetCallKind(t1, CALL_AS_METHOD);
- Call(t9);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, CALL_AS_METHOD);
- Jump(t9);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the builtins object into target register.
- lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
- // Load the JavaScript builtin function from the builtins object.
- lw(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(a1));
- GetBuiltinFunction(a1, id);
- // Load the code entry point from the builtins object.
- lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- li(scratch1, Operand(value));
- li(scratch2, Operand(ExternalReference(counter)));
- sw(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- li(scratch2, Operand(ExternalReference(counter)));
- lw(scratch1, MemOperand(scratch2));
- Addu(scratch1, scratch1, Operand(value));
- sw(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- li(scratch2, Operand(ExternalReference(counter)));
- lw(scratch1, MemOperand(scratch2));
- Subu(scratch1, scratch1, Operand(value));
- sw(scratch1, MemOperand(scratch2));
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Debugging.
-
-void MacroAssembler::Assert(Condition cc, const char* msg,
- Register rs, Operand rt) {
- if (emit_debug_code())
- Check(cc, msg, rs, rt);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(at, index);
- Check(eq, "Register did not match expected root", reg, Operand(at));
- }
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- ASSERT(!elements.is(at));
- Label ok;
- push(elements);
- lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- Branch(&ok, eq, elements, Operand(at));
- LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
- Branch(&ok, eq, elements, Operand(at));
- LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
- Branch(&ok, eq, elements, Operand(at));
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- pop(elements);
- }
-}
-
-
-void MacroAssembler::Check(Condition cc, const char* msg,
- Register rs, Operand rt) {
- Label L;
- Branch(&L, cc, rs, rt);
- Abort(msg);
- // Will not return here.
- bind(&L);
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- Label abort_start;
- bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
-
- li(a0, Operand(p0));
- push(a0);
- li(a0, Operand(Smi::FromInt(p1 - p0)));
- push(a0);
- // Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
- // Will not return here.
- if (is_trampoline_pool_blocked()) {
- // If the calling code cares about the exact number of
- // instructions generated, we insert padding here to keep the size
- // of the Abort macro constant.
- // Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedAbortInstructions = 14;
- int abort_instructions = InstructionsGeneratedSince(&abort_start);
- ASSERT(abort_instructions <= kExpectedAbortInstructions);
- while (abort_instructions++ < kExpectedAbortInstructions) {
- nop();
- }
- }
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in esi).
- Move(dst, cp);
- }
-}
-
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- // Load the global or builtins object from the current context.
- lw(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check that the function's map is the same as the expected cached map.
- lw(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- lw(at, FieldMemOperand(scratch, offset));
- Branch(no_map_match, ne, map_in_out, Operand(at));
-
- // Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- lw(map_in_out, FieldMemOperand(scratch, offset));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- lw(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- lw(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- lw(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- lw(function, MemOperand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- lw(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- lw(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- lw(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- Branch(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- addiu(sp, sp, -5 * kPointerSize);
- li(t8, Operand(Smi::FromInt(type)));
- li(t9, Operand(CodeObject()), CONSTANT_SIZE);
- sw(ra, MemOperand(sp, 4 * kPointerSize));
- sw(fp, MemOperand(sp, 3 * kPointerSize));
- sw(cp, MemOperand(sp, 2 * kPointerSize));
- sw(t8, MemOperand(sp, 1 * kPointerSize));
- sw(t9, MemOperand(sp, 0 * kPointerSize));
- addiu(fp, sp, 3 * kPointerSize);
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- mov(sp, fp);
- lw(fp, MemOperand(sp, 0 * kPointerSize));
- lw(ra, MemOperand(sp, 1 * kPointerSize));
- addiu(sp, sp, 2 * kPointerSize);
-}
-
-
-void MacroAssembler::EnterExitFrame(bool save_doubles,
- int stack_space) {
- // Set up the frame structure on the stack.
- STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
- STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
- STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
-
- // This is how the stack will look:
- // fp + 2 (==kCallerSPDisplacement) - old stack's end
- // [fp + 1 (==kCallerPCOffset)] - saved old ra
- // [fp + 0 (==kCallerFPOffset)] - saved old fp
- // [fp - 1 (==kSPOffset)] - sp of the called function
- // [fp - 2 (==kCodeOffset)] - CodeObject
- // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
- // new stack (will contain saved ra)
-
- // Save registers.
- addiu(sp, sp, -4 * kPointerSize);
- sw(ra, MemOperand(sp, 3 * kPointerSize));
- sw(fp, MemOperand(sp, 2 * kPointerSize));
- addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
-
- if (emit_debug_code()) {
- sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
- }
-
- // Accessed from ExitFrame::code_slot.
- li(t8, Operand(CodeObject()), CONSTANT_SIZE);
- sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
-
- // Save the frame pointer and the context in top.
- li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- sw(fp, MemOperand(t8));
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- sw(cp, MemOperand(t8));
-
- const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- if (save_doubles) {
- CpuFeatures::Scope scope(FPU);
- // The stack must be allign to 0 modulo 8 for stores with sdc1.
- ASSERT(kDoubleSize == frame_alignment);
- if (frame_alignment > 0) {
- ASSERT(IsPowerOf2(frame_alignment));
- And(sp, sp, Operand(-frame_alignment)); // Align stack.
- }
- int space = FPURegister::kMaxNumRegisters * kDoubleSize;
- Subu(sp, sp, Operand(space));
- // Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
- FPURegister reg = FPURegister::from_code(i);
- sdc1(reg, MemOperand(sp, i * kDoubleSize));
- }
- }
-
- // Reserve place for the return address, stack space and an optional slot
- // (used by the DirectCEntryStub to hold the return value if a struct is
- // returned) and align the frame preparing for calling the runtime function.
- ASSERT(stack_space >= 0);
- Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
- if (frame_alignment > 0) {
- ASSERT(IsPowerOf2(frame_alignment));
- And(sp, sp, Operand(-frame_alignment)); // Align stack.
- }
-
- // Set the exit frame sp value to point just before the return address
- // location.
- addiu(at, sp, kPointerSize);
- sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count,
- bool do_return) {
- // Optionally restore all double registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(FPU);
- // Remember: we only need to restore every 2nd double FPU value.
- lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
- for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
- FPURegister reg = FPURegister::from_code(i);
- ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
- }
- }
-
- // Clear top frame.
- li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- sw(zero_reg, MemOperand(t8));
-
- // Restore current context from top and clear it in debug mode.
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- lw(cp, MemOperand(t8));
-#ifdef DEBUG
- sw(a3, MemOperand(t8));
-#endif
-
- // Pop the arguments, restore registers, and return.
- mov(sp, fp); // Respect ABI stack constraint.
- lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
- lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
-
- if (argument_count.is_valid()) {
- sll(t8, argument_count, kPointerSizeLog2);
- addu(sp, sp, t8);
- }
-
- if (do_return) {
- Ret(USE_DELAY_SLOT);
- // If returning, the instruction in the delay slot will be the addiu below.
- }
- addiu(sp, sp, 8);
-}
-
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- sll(scratch1, length, kSmiTagSize);
- LoadRoot(scratch2, map_index);
- sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
- li(scratch1, Operand(String::kEmptyHashField));
- sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
-int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_MIPS)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one Mips
- // platform for another Mips platform with a different alignment.
- return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_MIPS)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so this is controlled from a
- // flag.
- return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_MIPS)
-}
-
-
-void MacroAssembler::AssertStackIsAligned() {
- if (emit_debug_code()) {
- const int frame_alignment = ActivationFrameAlignment();
- const int frame_alignment_mask = frame_alignment - 1;
-
- if (frame_alignment > kPointerSize) {
- Label alignment_as_expected;
- ASSERT(IsPowerOf2(frame_alignment));
- andi(at, sp, frame_alignment_mask);
- Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
- // Don't use Check here, as it will call Runtime_Abort re-entering here.
- stop("Unexpected stack alignment");
- bind(&alignment_as_expected);
- }
- }
-}
-
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
- Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero) {
- Subu(scratch, reg, Operand(1));
- Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
- scratch, Operand(zero_reg));
- and_(at, scratch, reg); // In the delay slot.
- Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
-}
-
-
-void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
- ASSERT(!reg.is(overflow));
- mov(overflow, reg); // Save original value.
- SmiTag(reg);
- xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
-}
-
-
-void MacroAssembler::SmiTagCheckOverflow(Register dst,
- Register src,
- Register overflow) {
- if (dst.is(src)) {
- // Fall back to slower case.
- SmiTagCheckOverflow(dst, overflow);
- } else {
- ASSERT(!dst.is(src));
- ASSERT(!dst.is(overflow));
- ASSERT(!src.is(overflow));
- SmiTag(dst, src);
- xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
- }
-}
-
-
-void MacroAssembler::UntagAndJumpIfSmi(Register dst,
- Register src,
- Label* smi_case) {
- JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
- SmiUntag(dst, src);
-}
-
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
- Register src,
- Label* non_smi_case) {
- JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
- SmiUntag(dst, src);
-}
-
-void MacroAssembler::JumpIfSmi(Register value,
- Label* smi_label,
- Register scratch,
- BranchDelaySlot bd) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
-}
-
-void MacroAssembler::JumpIfNotSmi(Register value,
- Label* not_smi_label,
- Register scratch,
- BranchDelaySlot bd) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register reg1,
- Register reg2,
- Label* on_not_both_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1, kSmiTagMask);
- or_(at, reg1, reg2);
- JumpIfNotSmi(at, on_not_both_smi);
-}
-
-
-void MacroAssembler::JumpIfEitherSmi(Register reg1,
- Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1, kSmiTagMask);
- // Both Smi tags must be 1 (not Smi).
- and_(at, reg1, reg2);
- JumpIfSmi(at, on_either_smi);
-}
-
-
-void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Check(ne, "Operand is a smi", at, Operand(zero_reg));
- }
-}
-
-
-void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Check(eq, "Operand is a smi", at, Operand(zero_reg));
- }
-}
-
-
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi and not a string", t0, Operand(zero_reg));
- push(object);
- lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
- pop(object);
- }
-}
-
-
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- if (emit_debug_code()) {
- ASSERT(!src.is(at));
- LoadRoot(at, root_value_index);
- Check(eq, message, src, Operand(at));
- }
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number) {
- lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
-}
-
-
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
-}
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that neither is a smi.
- STATIC_ASSERT(kSmiTag == 0);
- And(scratch1, first, Operand(second));
- JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
- andi(scratch1, first, kFlatAsciiStringMask);
- Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
- andi(scratch2, second, kFlatAsciiStringMask);
- Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- And(scratch, type, Operand(kFlatAsciiStringMask));
- Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
-}
-
-
-static const int kRegisterPassedArguments = 4;
-
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments) {
- int stack_passed_words = 0;
- num_reg_arguments += 2 * num_double_arguments;
-
- // Up to four simple arguments are passed in registers a0..a3.
- if (num_reg_arguments > kRegisterPassedArguments) {
- stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
- }
- stack_passed_words += kCArgSlotCount;
- return stack_passed_words;
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- int num_double_arguments,
- Register scratch) {
- int frame_alignment = ActivationFrameAlignment();
-
- // Up to four simple arguments are passed in registers a0..a3.
- // Those four arguments must have reserved argument slots on the stack for
- // mips, even though those argument slots are not normally used.
- // Remaining arguments are pushed on the stack, above (higher address than)
- // the argument slots.
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
- if (frame_alignment > kPointerSize) {
- // Make stack end at alignment and make room for num_arguments - 4 words
- // and the original value of sp.
- mov(scratch, sp);
- Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
- And(sp, sp, Operand(-frame_alignment));
- sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
- }
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- Register scratch) {
- PrepareCallCFunction(num_reg_arguments, 0, scratch);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments) {
- li(t8, Operand(function));
- CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
-}
-
-
-void MacroAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
- ASSERT(has_frame());
- // Make sure that the stack is aligned before calling a C function unless
- // running in the simulator. The simulator has its own alignment check which
- // provides more information.
- // The argument stots are presumed to have been set up by
- // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
-
-#if defined(V8_HOST_ARCH_MIPS)
- if (emit_debug_code()) {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- And(at, sp, Operand(frame_alignment_mask));
- Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
- // Don't use Check here, as it will call Runtime_Abort possibly
- // re-entering here.
- stop("Unexpected alignment in CallCFunction");
- bind(&alignment_as_expected);
- }
- }
-#endif // V8_HOST_ARCH_MIPS
-
- // Just call directly. The function called cannot cause a GC, or
- // allow preemption, so the return address in the link register
- // stays correct.
-
- if (!function.is(t9)) {
- mov(t9, function);
- function = t9;
- }
-
- Call(function);
-
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
-
- if (OS::ActivationFrameAlignment() > kPointerSize) {
- lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
- }
-}
-
-
-#undef BRANCH_ARGS_CHECK
-
-
-void MacroAssembler::PatchRelocatedValue(Register li_location,
- Register scratch,
- Register new_value) {
- lw(scratch, MemOperand(li_location));
- // At this point scratch is a lui(at, ...) instruction.
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be a lui.",
- scratch, Operand(LUI));
- lw(scratch, MemOperand(li_location));
- }
- srl(t9, new_value, kImm16Bits);
- Ins(scratch, t9, 0, kImm16Bits);
- sw(scratch, MemOperand(li_location));
-
- lw(scratch, MemOperand(li_location, kInstrSize));
- // scratch is now ori(at, ...).
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be an ori.",
- scratch, Operand(ORI));
- lw(scratch, MemOperand(li_location, kInstrSize));
- }
- Ins(scratch, new_value, 0, kImm16Bits);
- sw(scratch, MemOperand(li_location, kInstrSize));
-
- // Update the I-cache so the new lui and ori can be executed.
- FlushICache(li_location, 2);
-}
-
-void MacroAssembler::GetRelocatedValue(Register li_location,
- Register value,
- Register scratch) {
- lw(value, MemOperand(li_location));
- if (emit_debug_code()) {
- And(value, value, kOpcodeMask);
- Check(eq, "The instruction should be a lui.",
- value, Operand(LUI));
- lw(value, MemOperand(li_location));
- }
-
- // value now holds a lui instruction. Extract the immediate.
- sll(value, value, kImm16Bits);
-
- lw(scratch, MemOperand(li_location, kInstrSize));
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction should be an ori.",
- scratch, Operand(ORI));
- lw(scratch, MemOperand(li_location, kInstrSize));
- }
- // "scratch" now holds an ori instruction. Extract the immediate.
- andi(scratch, scratch, kImm16Mask);
-
- // Merge the results.
- or_(value, value, scratch);
-}
-
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met) {
- And(scratch, object, Operand(~Page::kPageAlignmentMask));
- lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- And(scratch, scratch, Operand(mask));
- Branch(condition_met, cc, scratch, Operand(zero_reg));
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- And(t8, t9, Operand(mask_scratch));
- Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
- // Shift left 1 by adding.
- Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
- Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
- And(t8, t9, Operand(mask_scratch));
- Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
- jmp(&other_color);
-
- bind(&word_boundary);
- lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
- And(t9, t9, Operand(1));
- Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
- bind(&other_color);
-}
-
-
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- ASSERT(!AreAliased(value, scratch, t8, no_reg));
- Label is_data_object;
- lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- Branch(&is_data_object, eq, t8, Operand(scratch));
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(not_data_object, ne, t8, Operand(zero_reg));
- bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
- Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- sll(t8, t8, kPointerSizeLog2);
- Addu(bitmap_reg, bitmap_reg, t8);
- li(t8, Operand(1));
- sllv(mask_reg, t8, mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- And(t8, mask_scratch, load_scratch);
- Branch(&done, ne, t8, Operand(zero_reg));
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // sll may overflow, making the check conservative.
- sll(t8, mask_scratch, 1);
- And(t8, load_scratch, t8);
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(map));
- li(length, HeapNumber::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- And(t8, instance_type, Operand(kExternalStringTag));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- li(length, ExternalString::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- lw(t9, FieldMemOperand(value, String::kLengthOffset));
- And(t8, instance_type, Operand(kStringEncodingMask));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- srl(t9, t9, 1);
- bind(&skip);
- }
- Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- And(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Or(t8, t8, Operand(mask_scratch));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Addu(t8, t8, Operand(length));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::EnumLength(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
- And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
-}
-
-
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Register empty_fixed_array_value = t2;
- LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Label next, start;
- mov(a2, a0);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
-
- EnumLength(a3, a1);
- Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
-
- jmp(&start);
-
- bind(&next);
- lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLength(a3, a1);
- Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
-
- bind(&start);
-
- // Check that there are no elements. Register r2 contains the current JS
- // object we've reached through the prototype chain.
- lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
- Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
-
- lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
- Branch(&next, ne, a2, Operand(null_value));
-}
-
-
-void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- ASSERT(!output_reg.is(input_reg));
- Label done;
- li(output_reg, Operand(255));
- // Normal branch: nop in delay slot.
- Branch(&done, gt, input_reg, Operand(output_reg));
- // Use delay slot in this branch.
- Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
- mov(output_reg, zero_reg); // In delay slot.
- mov(output_reg, input_reg); // Value is in range 0..255.
- bind(&done);
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg) {
- Label above_zero;
- Label done;
- Label in_bounds;
-
- Move(temp_double_reg, 0.0);
- BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
-
- // Double value is less than zero, NaN or Inf, return 0.
- mov(result_reg, zero_reg);
- Branch(&done);
-
- // Double value is >= 255, return 255.
- bind(&above_zero);
- Move(temp_double_reg, 255.0);
- BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
- li(result_reg, Operand(255));
- Branch(&done);
-
- // In 0-255 range, round and truncate.
- bind(&in_bounds);
- cvt_w_d(temp_double_reg, input_reg);
- mfc1(result_reg, temp_double_reg);
- bind(&done);
-}
-
-
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
- Register receiver_reg,
- Register scratch_reg,
- Condition cond,
- Label* allocation_info_present) {
- Label no_info_available;
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- Addu(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
- Branch(&no_info_available, lt, scratch_reg, Operand(new_space_start));
- li(at, Operand(new_space_allocation_top));
- lw(at, MemOperand(at));
- Branch(&no_info_available, gt, scratch_reg, Operand(at));
- lw(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
- Branch(allocation_info_present, cond, scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
- bind(&no_info_available);
-}
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
-}
-
-
-CodePatcher::CodePatcher(byte* address, int instructions)
- : address_(address),
- instructions_(instructions),
- size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void CodePatcher::Emit(Instr instr) {
- masm()->emit(instr);
-}
-
-
-void CodePatcher::Emit(Address addr) {
- masm()->emit(reinterpret_cast<Instr>(addr));
-}
-
-
-void CodePatcher::ChangeBranchCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- ASSERT(Assembler::IsBranch(instr));
- uint32_t opcode = Assembler::GetOpcodeField(instr);
- // Currently only the 'eq' and 'ne' cond values are supported and the simple
- // branch instructions (with opcode being the branch type).
- // There are some special cases (see Assembler::IsBranch()) so extending this
- // would be tricky.
- ASSERT(opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL);
- opcode = (cond == eq) ? BEQ : BNE;
- instr = (instr & ~kOpcodeMask) | opcode;
- masm_.emit(instr);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.h b/src/3rdparty/v8/src/mips/macro-assembler-mips.h
deleted file mode 100644
index 11ebc86..0000000
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.h
+++ /dev/null
@@ -1,1583 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-
-#include "assembler.h"
-#include "mips/assembler-mips.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declaration.
-class JumpTarget;
-
-// Reserved Register Usage Summary.
-//
-// Registers t8, t9, and at are reserved for use by the MacroAssembler.
-//
-// The programmer should know that the MacroAssembler may clobber these three,
-// but won't touch other registers except in special cases.
-//
-// Per the MIPS ABI, register t9 must be used for indirect function call
-// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
-// trying to update gp register for position-independent-code. Whenever
-// MIPS generated code calls C code, it must be via t9 register.
-
-
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
- // words instead of bytes.
- SIZE_IN_WORDS = 1 << 2
-};
-
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
-// Flags used for the ObjectToDoubleFPURegister function.
-enum ObjectToDoubleFlags {
- // No special flags.
- NO_OBJECT_TO_DOUBLE_FLAGS = 0,
- // Object is known to be a non smi.
- OBJECT_NOT_SMI = 1 << 0,
- // Don't load NaNs or infinities, branch to the non number case instead.
- AVOID_NANS_AND_INFINITIES = 1 << 1
-};
-
-// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
-enum BranchDelaySlot {
- USE_DELAY_SLOT,
- PROTECT
-};
-
-// Flags used for the li macro-assembler function.
-enum LiFlags {
- // If the constant value can be represented in just 16 bits, then
- // optimize the li to use a single instruction, rather than lui/ori pair.
- OPTIMIZE_SIZE = 0,
- // Always use 2 instructions (lui/ori pair), even if the constant could
- // be loaded with just one, so that this value is patchable later.
- CONSTANT_SIZE = 1
-};
-
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-inline MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
-}
-
-
-static inline MemOperand QmlGlobalObjectOperand() {
- return ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX);
-}
-
-
-// Generate a MemOperand for loading a field from an object.
-inline MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate a MemOperand for storing arguments 5..N on the stack
-// when calling CallCFunction().
-inline MemOperand CFunctionArgumentOperand(int index) {
- ASSERT(index > kCArgSlotCount);
- // Argument 5 takes the slot just past the four Arg-slots.
- int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
- return MemOperand(sp, offset);
-}
-
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // Arguments macros.
-#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
-#define COND_ARGS cond, r1, r2
-
- // Cases when relocation is not needed.
-#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
- void Name(target_type target, BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, target_type target) { \
- Name(target, bd); \
- } \
- void Name(target_type target, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- COND_TYPED_ARGS) { \
- Name(target, COND_ARGS, bd); \
- }
-
-#define DECLARE_BRANCH_PROTOTYPES(Name) \
- DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
- DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
-
- DECLARE_BRANCH_PROTOTYPES(Branch)
- DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
-
-#undef DECLARE_BRANCH_PROTOTYPES
-#undef COND_TYPED_ARGS
-#undef COND_ARGS
-
-
- // Jump, Call, and Ret pseudo instructions implementing inter-working.
-#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
- const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
-
- void Jump(Register target, COND_ARGS);
- void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
- void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- static int CallSize(Register target, COND_ARGS);
- void Call(Register target, COND_ARGS);
- static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
- void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- COND_ARGS);
- void Call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- COND_ARGS);
- void Ret(COND_ARGS);
- inline void Ret(BranchDelaySlot bd, Condition cond = al,
- Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
- Ret(cond, rs, rt, bd);
- }
-
- void Branch(Label* L,
- Condition cond,
- Register rs,
- Heap::RootListIndex index,
- BranchDelaySlot bdslot = PROTECT);
-
-#undef COND_ARGS
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the sp register.
- void Drop(int count,
- Condition cond = cc_always,
- Register reg = no_reg,
- const Operand& op = Operand(no_reg));
-
- // Trivial case of DropAndRet that utilizes the delay slot and only emits
- // 2 instructions.
- void DropAndRet(int drop);
-
- void DropAndRet(int drop,
- Condition cond,
- Register reg,
- const Operand& op);
-
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1, Register reg2, Register scratch = no_reg);
-
- void Call(Label* target);
-
- inline void Move(Register dst, Register src) {
- if (!dst.is(src)) {
- mov(dst, src);
- }
- }
-
- inline void Move(FPURegister dst, FPURegister src) {
- if (!dst.is(src)) {
- mov_d(dst, src);
- }
- }
-
- inline void Move(Register dst_low, Register dst_high, FPURegister src) {
- mfc1(dst_low, src);
- mfc1(dst_high, FPURegister::from_code(src.code() + 1));
- }
-
- inline void Move(FPURegister dst, Register src_low, Register src_high) {
- mtc1(src_low, dst);
- mtc1(src_high, FPURegister::from_code(dst.code() + 1));
- }
-
- // Conditional move.
- void Move(FPURegister dst, double imm);
- void Movz(Register rd, Register rs, Register rt);
- void Movn(Register rd, Register rs, Register rt);
- void Movt(Register rd, Register rs, uint16_t cc = 0);
- void Movf(Register rd, Register rs, uint16_t cc = 0);
-
- void Clz(Register rd, Register rs);
-
- // Jump unconditionally to given label.
- // We NEED a nop in the branch delay slot, as it used by v8, for example in
- // CodeGenerator::ProcessDeferred().
- // Currently the branch delay slot is filled by the MacroAssembler.
- // Use rather b(Label) for code generation.
- void jmp(Label* L) {
- Branch(L);
- }
-
- // Load an object from the root table.
- void LoadRoot(Register destination,
- Heap::RootListIndex index);
- void LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond, Register src1, const Operand& src2);
-
- // Store an object to the root table.
- void StoreRoot(Register source,
- Heap::RootListIndex index);
- void StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond, Register src1, const Operand& src2);
-
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- li(result, object);
- }
- }
-
- // ---------------------------------------------------------------------------
- // GC Support
-
- void IncrementalMarkingRecordWriteHelper(Register object,
- Register value,
- Register address);
-
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- ra_status,
- save_fp,
- remembered_set_action,
- smi_check);
- }
-
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
-
- // ---------------------------------------------------------------------------
- // Inline caching support.
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- void GetNumberHash(Register reg0, Register scratch);
-
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register reg0,
- Register reg1,
- Register reg2);
-
-
- inline void MarkCode(NopMarkerTypes type) {
- nop(type);
- }
-
- // Check if the given instruction is a 'type' marker.
- // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
- // nop(type)). These instructions are generated to mark special location in
- // the code, like some special IC code.
- static inline bool IsMarkedCode(Instr instr, int type) {
- ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
- return IsNop(instr, type);
- }
-
-
- static inline int GetCodeMarker(Instr instr) {
- uint32_t opcode = ((instr & kOpcodeMask));
- uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
- uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
- uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
-
- // Return <n> if we have a sll zero_reg, zero_reg, n
- // else return -1.
- bool sllzz = (opcode == SLL &&
- rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- rs == static_cast<uint32_t>(ToNumber(zero_reg)));
- int type =
- (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
- ASSERT((type == -1) ||
- ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
- return type;
- }
-
-
-
- // ---------------------------------------------------------------------------
- // Allocation support.
-
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocates a heap number or jumps to the gc_required label if the young
- // space is full and a scavenge is needed. All registers are clobbered also
- // when control continues at the gc_required label.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT);
- void AllocateHeapNumberWithValue(Register result,
- FPURegister value,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // ---------------------------------------------------------------------------
- // Instruction macros.
-
-#define DEFINE_INSTRUCTION(instr) \
- void instr(Register rd, Register rs, const Operand& rt); \
- void instr(Register rd, Register rs, Register rt) { \
- instr(rd, rs, Operand(rt)); \
- } \
- void instr(Register rs, Register rt, int32_t j) { \
- instr(rs, rt, Operand(j)); \
- }
-
-#define DEFINE_INSTRUCTION2(instr) \
- void instr(Register rs, const Operand& rt); \
- void instr(Register rs, Register rt) { \
- instr(rs, Operand(rt)); \
- } \
- void instr(Register rs, int32_t j) { \
- instr(rs, Operand(j)); \
- }
-
- DEFINE_INSTRUCTION(Addu);
- DEFINE_INSTRUCTION(Subu);
- DEFINE_INSTRUCTION(Mul);
- DEFINE_INSTRUCTION2(Mult);
- DEFINE_INSTRUCTION2(Multu);
- DEFINE_INSTRUCTION2(Div);
- DEFINE_INSTRUCTION2(Divu);
-
- DEFINE_INSTRUCTION(And);
- DEFINE_INSTRUCTION(Or);
- DEFINE_INSTRUCTION(Xor);
- DEFINE_INSTRUCTION(Nor);
- DEFINE_INSTRUCTION2(Neg);
-
- DEFINE_INSTRUCTION(Slt);
- DEFINE_INSTRUCTION(Sltu);
-
- // MIPS32 R2 instruction macro.
- DEFINE_INSTRUCTION(Ror);
-
-#undef DEFINE_INSTRUCTION
-#undef DEFINE_INSTRUCTION2
-
-
- // ---------------------------------------------------------------------------
- // Pseudo-instructions.
-
- void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
-
- // Load int32 in the rd register.
- void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
- inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
- li(rd, Operand(j), mode);
- }
- inline void li(Register dst, Handle<Object> value,
- LiFlags mode = OPTIMIZE_SIZE) {
- li(dst, Operand(value), mode);
- }
-
- // Push multiple registers on the stack.
- // Registers are saved in numerical order, with higher numbered registers
- // saved in higher memory addresses.
- void MultiPush(RegList regs);
- void MultiPushReversed(RegList regs);
-
- void MultiPushFPU(RegList regs);
- void MultiPushReversedFPU(RegList regs);
-
- // Lower case push() for compatibility with arch-independent code.
- void push(Register src) {
- Addu(sp, sp, Operand(-kPointerSize));
- sw(src, MemOperand(sp, 0));
- }
-
- // Push a handle.
- void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
-
- // Push two registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2) {
- Subu(sp, sp, Operand(2 * kPointerSize));
- sw(src1, MemOperand(sp, 1 * kPointerSize));
- sw(src2, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push three registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3) {
- Subu(sp, sp, Operand(3 * kPointerSize));
- sw(src1, MemOperand(sp, 2 * kPointerSize));
- sw(src2, MemOperand(sp, 1 * kPointerSize));
- sw(src3, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Register src4) {
- Subu(sp, sp, Operand(4 * kPointerSize));
- sw(src1, MemOperand(sp, 3 * kPointerSize));
- sw(src2, MemOperand(sp, 2 * kPointerSize));
- sw(src3, MemOperand(sp, 1 * kPointerSize));
- sw(src4, MemOperand(sp, 0 * kPointerSize));
- }
-
- void Push(Register src, Condition cond, Register tst1, Register tst2) {
- // Since we don't have conditional execution we use a Branch.
- Branch(3, cond, tst1, Operand(tst2));
- Subu(sp, sp, Operand(kPointerSize));
- sw(src, MemOperand(sp, 0));
- }
-
- // Pops multiple values from the stack and load them in the
- // registers specified in regs. Pop order is the opposite as in MultiPush.
- void MultiPop(RegList regs);
- void MultiPopReversed(RegList regs);
-
- void MultiPopFPU(RegList regs);
- void MultiPopReversedFPU(RegList regs);
-
- // Lower case pop() for compatibility with arch-independent code.
- void pop(Register dst) {
- lw(dst, MemOperand(sp, 0));
- Addu(sp, sp, Operand(kPointerSize));
- }
-
- // Pop two registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2) {
- ASSERT(!src1.is(src2));
- lw(src2, MemOperand(sp, 0 * kPointerSize));
- lw(src1, MemOperand(sp, 1 * kPointerSize));
- Addu(sp, sp, 2 * kPointerSize);
- }
-
- // Pop three registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Register src3) {
- lw(src3, MemOperand(sp, 0 * kPointerSize));
- lw(src2, MemOperand(sp, 1 * kPointerSize));
- lw(src1, MemOperand(sp, 2 * kPointerSize));
- Addu(sp, sp, 3 * kPointerSize);
- }
-
- void Pop(uint32_t count = 1) {
- Addu(sp, sp, Operand(count * kPointerSize));
- }
-
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
- void PushSafepointRegistersAndDoubles();
- void PopSafepointRegistersAndDoubles();
- // Store value in register src in the safepoint stack slot for
- // register dst.
- void StoreToSafepointRegisterSlot(Register src, Register dst);
- void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
- // Load the value of the src register from its safepoint stack slot
- // into register dst.
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
- // Does not handle errors.
- void FlushICache(Register address, unsigned instructions);
-
- // MIPS32 R2 instruction macro.
- void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
-
- // ---------------------------------------------------------------------------
- // FPU macros. These do not handle special cases like NaN or +- inf.
-
- // Convert unsigned word to double.
- void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
- void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
-
- // Convert double to unsigned word.
- void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
- void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
-
- void Trunc_w_d(FPURegister fd, FPURegister fs);
- void Round_w_d(FPURegister fd, FPURegister fs);
- void Floor_w_d(FPURegister fd, FPURegister fs);
- void Ceil_w_d(FPURegister fd, FPURegister fs);
- // Wrapper function for the different cmp/branch types.
- void BranchF(Label* target,
- Label* nan,
- Condition cc,
- FPURegister cmp1,
- FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
-
- // Alternate (inline) version for better readability with USE_DELAY_SLOT.
- inline void BranchF(BranchDelaySlot bd,
- Label* target,
- Label* nan,
- Condition cc,
- FPURegister cmp1,
- FPURegister cmp2) {
- BranchF(target, nan, cc, cmp1, cmp2, bd);
- };
-
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If FPU is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32);
-
- // Truncates a double using a specific rounding mode, and writes the value
- // to the result register.
- // The except_flag will contain any exceptions caused by the instruction.
- // If check_inexact is kDontCheckForInexactConversion, then the inexact
- // exception is masked.
- void EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
- DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
- Register except_flag,
- CheckForInexactConversion check_inexact
- = kDontCheckForInexactConversion);
-
- // Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the singed 32bit
- // integer range to a 32bit signed integer.
- // Expects the double value loaded in input_high and input_low.
- // Exits with the answer in 'result'.
- // Note that this code does not work for values in the 32bit range!
- void EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void EmitECMATruncate(Register result,
- FPURegister double_input,
- FPURegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3);
-
- // Enter exit frame.
- // argc - argument count to be dropped by LeaveExitFrame.
- // save_doubles - saves FPU registers on stack, currently disabled.
- // stack_space - extra stack space.
- void EnterExitFrame(bool save_doubles,
- int stack_space = 0);
-
- // Leave the current exit frame.
- void LeaveExitFrame(bool save_doubles,
- Register arg_count,
- bool do_return = false);
-
- // Get the actual activation frame alignment for target environment.
- static int ActivationFrameAlignment();
-
- // Make sure the stack is aligned. Only emits code in debug mode.
- void AssertStackIsAligned();
-
- void LoadContext(Register dst, int context_chain_length);
-
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch);
-
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- li(kRootRegister, Operand(roots_array_start));
- }
-
- // -------------------------------------------------------------------------
- // JavaScript invokes.
-
- // Set up call kind marking in t1. The method takes t1 as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
-
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
- void IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // -------------------------------------------------------------------------
- // Debugger Support.
-
- void DebugBreak();
-#endif
-
-
- // -------------------------------------------------------------------------
- // Exception handling.
-
- // Push a new try handler and link into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- // Must preserve the result register.
- void PopTryHandler();
-
- // Passes thrown value to the handler of top of the try handler chain.
- void Throw(Register value);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain.
- void ThrowUncatchable(Register value);
-
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
-
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch);
-
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
- // -------------------------------------------------------------------------
- // Support functions.
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
-
- void GetObjectType(Register function,
- Register map,
- Register type_reg);
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail, in which
- // case scratch2, scratch3 and scratch4 are unmodified.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- // All regs below here overwritten.
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail,
- int elements_offset = 0);
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
- // "branch_to" if the result of the comparison is "cond". If multiple map
- // compares are required, the compare sequences branches to early_success.
- void CompareMapAndBranch(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMapAndBranch(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specificed map.
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type);
-
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
-
- // Load and check the instance type of an object for being a string.
- // Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string.
- Condition IsObjectStringType(Register obj,
- Register type,
- Register result) {
- lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
- lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
- And(type, type, Operand(kIsNotStringMask));
- ASSERT_EQ(0, kStringTag);
- return eq;
- }
-
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // Get the number of least significant bits from a register.
- void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
- void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
-
- // Load the value of a number object into a FPU double register. If the
- // object is not a number a jump to the label not_number is performed
- // and the FPU double register is unchanged.
- void ObjectToDoubleFPURegister(
- Register object,
- FPURegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* not_number,
- ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
-
- // Load the value of a smi object into a FPU double register. The register
- // scratch1 can be the same register as smi in which case smi will hold the
- // untagged value afterwards.
- void SmiToDoubleFPURegister(Register smi,
- FPURegister value,
- Register scratch1);
-
- // -------------------------------------------------------------------------
- // Overflow handling functions.
- // Usage: first call the appropriate arithmetic function, then call one of the
- // jump functions with the overflow_dst register as the second parameter.
-
- void AdduAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch = at);
-
- void SubuAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch = at);
-
- void BranchOnOverflow(Label* label,
- Register overflow_check,
- BranchDelaySlot bd = PROTECT) {
- Branch(label, lt, overflow_check, Operand(zero_reg), bd);
- }
-
- void BranchOnNoOverflow(Label* label,
- Register overflow_check,
- BranchDelaySlot bd = PROTECT) {
- Branch(label, ge, overflow_check, Operand(zero_reg), bd);
- }
-
- void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
- Ret(lt, overflow_check, Operand(zero_reg), bd);
- }
-
- void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
- Ret(ge, overflow_check, Operand(zero_reg), bd);
- }
-
- // -------------------------------------------------------------------------
- // Runtime calls.
-
- // See comments at the beginning of CEntryStub::Generate.
- inline void PrepareCEntryArgs(int num_args) {
- li(s0, num_args);
- li(s1, (num_args - 1) * kPointerSize);
- }
-
- inline void PrepareCEntryFunction(const ExternalReference& ref) {
- li(s2, Operand(ref));
- }
-
- // Call a code stub.
- void CallStub(CodeStub* stub,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = cc_always,
- Register r1 = zero_reg,
- const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
-
- // Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
-
- void CallJSExitStub(CodeStub* stub);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext,
- int num_arguments,
- BranchDelaySlot bd = PROTECT);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-
- // Before calling a C-function from generated code, align arguments on stack
- // and add space for the four mips argument slots.
- // After aligning the frame, non-register arguments must be stored on the
- // stack, after the argument-slots using helper: CFunctionArgumentOperand().
- // The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_reg_arguments,
- int num_double_registers,
- Register scratch);
- void PrepareCallCFunction(int num_reg_arguments,
- Register scratch);
-
- // Arguments 1-4 are placed in registers a0 thru a3 respectively.
- // Arguments 5..n are stored to stack using following:
- // sw(t0, CFunctionArgumentOperand(5));
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments);
- void GetCFunctionDoubleResult(const DoubleRegister dst);
-
- // There are two ways of passing double arguments on MIPS, depending on
- // whether soft or hard floating point ABI is used. These functions
- // abstract parameter passing for the three different ways we call
- // C functions from generated code.
- void SetCallCDoubleArguments(DoubleRegister dreg);
- void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
- void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call JS arguments space and
- // the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
-
- // Jump to the builtin routine.
- void JumpToExternalReference(const ExternalReference& builtin,
- BranchDelaySlot bd = PROTECT);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the code object for the given builtin in the target register and
- // setup the function in a1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- struct Unresolved {
- int pc;
- uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
- const char* name;
- };
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
- // -------------------------------------------------------------------------
- // StatsCounter support.
-
- void SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
-
-
- // -------------------------------------------------------------------------
- // Debugging.
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, const char* msg, Register rs, Operand rt);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg, Register rs, Operand rt);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- // ---------------------------------------------------------------------------
- // Number utilities.
-
- // Check whether the value of reg is a power of two and not zero. If not
- // control continues at the label not_power_of_two. If reg is a power of two
- // the register scratch contains the value of (reg - 1) when control falls
- // through.
- void JumpIfNotPowerOfTwoOrZero(Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero);
-
- // -------------------------------------------------------------------------
- // Smi utilities.
-
- void SmiTag(Register reg) {
- Addu(reg, reg, reg);
- }
-
- // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
- void SmiTagCheckOverflow(Register reg, Register overflow);
- void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
-
- void SmiTag(Register dst, Register src) {
- Addu(dst, src, src);
- }
-
- void SmiUntag(Register reg) {
- sra(reg, reg, kSmiTagSize);
- }
-
- void SmiUntag(Register dst, Register src) {
- sra(dst, src, kSmiTagSize);
- }
-
- // Untag the source value into destination and jump if source is a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
- // Untag the source value into destination and jump if source is not a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
- // Jump the register contains a smi.
- void JumpIfSmi(Register value,
- Label* smi_label,
- Register scratch = at,
- BranchDelaySlot bd = PROTECT);
-
- // Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value,
- Label* not_smi_label,
- Register scratch = at,
- BranchDelaySlot bd = PROTECT);
-
- // Jump if either of the registers contain a non-smi.
- void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
-
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
- // Abort execution if argument is not a string, enabled via --debug-code.
- void AssertString(Register object);
-
- // Abort execution if argument is not the root value with the given index,
- // enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
-
- // ---------------------------------------------------------------------------
- // HeapNumber utilities.
-
- void JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number);
-
- // -------------------------------------------------------------------------
- // String utilities.
-
- // Checks if both instance types are sequential ASCII strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Check if instance type is sequential ASCII string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
-
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Test that both first and second are sequential ASCII strings.
- // Check that they are non-smis.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- void ClampUint8(Register output_reg, Register input_reg);
-
- void ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg);
-
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void EnumLength(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
-
- template<typename Field>
- void DecodeField(Register reg) {
- static const int shift = Field::kShift;
- static const int mask = (Field::kMask >> shift) << kSmiTagSize;
- srl(reg, reg, shift);
- And(reg, reg, Operand(mask));
- }
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- // Patch the relocated value (lui/ori pair).
- void PatchRelocatedValue(Register li_location,
- Register scratch,
- Register new_value);
- // Get the relocatad value (loaded data) from the lui/ori pair.
- void GetRelocatedValue(Register li_location,
- Register value,
- Register scratch);
-
- // Expects object in a0 and returns map with validated enum cache
- // in a0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
-
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, jump to allocation_info_present
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg,
- Condition cond,
- Label* allocation_info_present);
-
- private:
- void CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
- void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- void BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void J(Label* L, BranchDelaySlot bdslot);
- void Jr(Label* L, BranchDelaySlot bdslot);
- void Jalr(Label* L, BranchDelaySlot bdslot);
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
- Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
- MemOperand SafepointRegisterSlot(Register reg);
- MemOperand SafepointRegistersAndDoublesSlot(Register reg);
-
- bool generating_stub_;
- bool allow_stub_calls_;
- bool has_frame_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-};
-
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int instructions);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr instr);
-
- // Emit an address directly.
- void Emit(Address addr);
-
- // Change the condition part of an instruction leaving the rest of the current
- // instruction unchanged.
- void ChangeBranchCondition(Condition cond);
-
- private:
- byte* address_; // The address of the code being patched.
- int instructions_; // Number of instructions of the expected patch size.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
-
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
deleted file mode 100644
index 1ae2a7a..0000000
--- a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
+++ /dev/null
@@ -1,1397 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "mips/regexp-macro-assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - t7 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - t1 : Pointer to current code object (Code*) including heap object tag.
- * - t2 : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - t3 : Currently loaded character. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - t4 : Points to tip of backtrack stack
- * - t5 : Unused.
- * - t6 : End of input (points to byte after last character in input).
- * - fp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - sp : Points to tip of C stack.
- *
- * The remaining registers are free for computations.
- * Each call to a public method should retain this convention.
- *
- * The stack will have the following structure:
- *
- * - fp[64] Isolate* isolate (address of the current isolate)
- * - fp[60] direct_call (if 1, direct call from JavaScript code,
- * if 0, call through the runtime system).
- * - fp[56] stack_area_base (High end of the memory area to use as
- * backtracking stack).
- * - fp[52] capture array size (may fit multiple sets of matches)
- * - fp[48] int* capture_array (int[num_saved_registers_], for output).
- * - fp[44] secondary link/return address used by native call.
- * --- sp when called ---
- * - fp[40] return address (lr).
- * - fp[36] old frame pointer (r11).
- * - fp[0..32] backup of registers s0..s7.
- * --- frame pointer ----
- * - fp[-4] end of input (address of end of string).
- * - fp[-8] start of input (address of first character in string).
- * - fp[-12] start index (character index of start).
- * - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] success counter (only for global regexps to count matches).
- * - fp[-24] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
- * non-position.
- * - fp[-28] At start (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - fp[-32] register 0 (Only positions must be stored in the first
- * - register 1 num_saved_registers_ registers)
- * - ...
- * - register num_registers-1
- * --- sp ---
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers start out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code and the remaining arguments are passed in registers, e.g. by calling the
- * code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * Address secondary_return_address, // Only used by native call.
- * int* capture_output_array,
- * byte* stack_area_base,
- * bool direct_call = false)
- * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in mips/simulator-mips.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the ra register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_(),
- internal_failure_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- // If the code gets too big or corrupted, an internal exception will be
- // raised, and we will exit right away.
- __ bind(&internal_failure_label_);
- __ li(v0, Operand(FAILURE));
- __ Ret();
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
- internal_failure_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerMIPS::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ Addu(current_input_offset(),
- current_input_offset(), Operand(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ lw(a0, register_location(reg));
- __ Addu(a0, a0, Operand(by));
- __ sw(a0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerMIPS::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(a0);
- __ Addu(a0, a0, code_pointer());
- __ Jump(a0);
-}
-
-
-void RegExpMacroAssemblerMIPS::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
- BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
- BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
-
- // If we did, are we still at the start of the input?
- __ lw(a1, MemOperand(frame_pointer(), kInputStart));
- __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
- BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
- // If we did, are we still at the start of the input?
- __ lw(a1, MemOperand(frame_pointer(), kInputStart));
- __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
- BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
- BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- if (on_failure == NULL) {
- // Instead of inlining a backtrack for each test, (re)use the global
- // backtrack target.
- on_failure = &backtrack_label_;
- }
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
- if (cp_offset != 0) {
- int byte_offset = cp_offset * char_size();
- __ Addu(a0, a0, Operand(byte_offset));
- }
-
- // a0 : Address of characters to match against str.
- int stored_high_byte = 0;
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ lbu(a1, MemOperand(a0, 0));
- __ addiu(a0, a0, char_size());
- ASSERT(str[i] <= String::kMaxOneByteCharCode);
- BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
- } else {
- __ lhu(a1, MemOperand(a0, 0));
- __ addiu(a0, a0, char_size());
- uc16 match_char = str[i];
- int match_high_byte = (match_char >> 8);
- if (match_high_byte == 0) {
- BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
- } else {
- if (match_high_byte != stored_high_byte) {
- __ li(a2, Operand(match_high_byte));
- stored_high_byte = match_high_byte;
- }
- __ Addu(a3, a2, Operand(match_char & 0xff));
- BranchOrBacktrack(on_failure, ne, a1, Operand(a3));
- }
- }
- }
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
- Label backtrack_non_equal;
- __ lw(a0, MemOperand(backtrack_stackpointer(), 0));
- __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
- __ Addu(backtrack_stackpointer(),
- backtrack_stackpointer(),
- Operand(kPointerSize));
- __ bind(&backtrack_non_equal);
- BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ lw(a0, register_location(start_reg)); // Index of start of capture.
- __ lw(a1, register_location(start_reg + 1)); // Index of end of capture.
- __ Subu(a1, a1, a0); // Length of capture.
-
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
- __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
-
- __ Addu(t5, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
-
- if (mode_ == ASCII) {
- Label success;
- Label fail;
- Label loop_check;
-
- // a0 - offset of start of capture.
- // a1 - length of capture.
- __ Addu(a0, a0, Operand(end_of_input_address()));
- __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
- __ Addu(a1, a0, Operand(a1));
-
- // a0 - Address of start of capture.
- // a1 - Address of end of capture.
- // a2 - Address of current input position.
-
- Label loop;
- __ bind(&loop);
- __ lbu(a3, MemOperand(a0, 0));
- __ addiu(a0, a0, char_size());
- __ lbu(t0, MemOperand(a2, 0));
- __ addiu(a2, a2, char_size());
-
- __ Branch(&loop_check, eq, t0, Operand(a3));
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case.
- __ Or(t0, t0, Operand(0x20)); // Also convert input character.
- __ Branch(&fail, ne, t0, Operand(a3));
- __ Subu(a3, a3, Operand('a'));
-#ifndef ENABLE_LATIN_1
- __ Branch(&fail, hi, a3, Operand('z' - 'a')); // Is a3 a lowercase letter?
-#else
- __ Branch(&loop_check, ls, a3, Operand('z' - 'a'));
- // Latin-1: Check for values in range [224,254] but not 247.
- __ Subu(a3, a3, Operand(224 - 'a'));
- // Weren't Latin-1 letters.
- __ Branch(&fail, hi, a3, Operand(254 - 224));
- // Check for 247.
- __ Branch(&fail, eq, a3, Operand(247 - 224));
-#endif
-
- __ bind(&loop_check);
- __ Branch(&loop, lt, a0, Operand(a1));
- __ jmp(&success);
-
- __ bind(&fail);
- GoTo(on_no_match);
-
- __ bind(&success);
- // Compute new value of character position after the matched part.
- __ Subu(current_input_offset(), a2, end_of_input_address());
- } else {
- ASSERT(mode_ == UC16);
- // Put regexp engine registers on stack.
- RegList regexp_registers_to_retain = current_input_offset().bit() |
- current_character().bit() | backtrack_stackpointer().bit();
- __ MultiPush(regexp_registers_to_retain);
-
- int argument_count = 4;
- __ PrepareCallCFunction(argument_count, a2);
-
- // a0 - offset of start of capture.
- // a1 - length of capture.
-
- // Put arguments into arguments registers.
- // Parameters are
- // a0: Address byte_offset1 - Address captured substring's start.
- // a1: Address byte_offset2 - Address of current character position.
- // a2: size_t byte_length - length of capture in bytes(!).
- // a3: Isolate* isolate.
-
- // Address of start of capture.
- __ Addu(a0, a0, Operand(end_of_input_address()));
- // Length of capture.
- __ mov(a2, a1);
- // Save length in callee-save register for use on return.
- __ mov(s3, a1);
- // Address of current input position.
- __ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
- // Isolate.
- __ li(a3, Operand(ExternalReference::isolate_address()));
-
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
- }
-
- // Restore regexp engine registers.
- __ MultiPop(regexp_registers_to_retain);
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
-
- // Check if function returned non-zero for success or zero for failure.
- BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
- // On success, increment position by length of capture.
- __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
- }
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- Label success;
-
- // Find length of back-referenced capture.
- __ lw(a0, register_location(start_reg));
- __ lw(a1, register_location(start_reg + 1));
- __ Subu(a1, a1, a0); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
-
- __ Addu(t5, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
-
- // Compute pointers to match string and capture string.
- __ Addu(a0, a0, Operand(end_of_input_address()));
- __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
- __ Addu(a1, a1, Operand(a0));
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ lbu(a3, MemOperand(a0, 0));
- __ addiu(a0, a0, char_size());
- __ lbu(t0, MemOperand(a2, 0));
- __ addiu(a2, a2, char_size());
- } else {
- ASSERT(mode_ == UC16);
- __ lhu(a3, MemOperand(a0, 0));
- __ addiu(a0, a0, char_size());
- __ lhu(t0, MemOperand(a2, 0));
- __ addiu(a2, a2, char_size());
- }
- BranchOrBacktrack(on_no_match, ne, a3, Operand(t0));
- __ Branch(&loop, lt, a0, Operand(a1));
-
- // Move current character position to position after match.
- __ Subu(current_input_offset(), a2, end_of_input_address());
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- __ And(a0, current_character(), Operand(mask));
- Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
- BranchOrBacktrack(on_equal, eq, a0, rhs);
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- __ And(a0, current_character(), Operand(mask));
- Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
- BranchOrBacktrack(on_not_equal, ne, a0, rhs);
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ Subu(a0, current_character(), Operand(minus));
- __ And(a0, a0, Operand(mask));
- BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ Subu(a0, current_character(), Operand(from));
- // Unsigned lower-or-same condition.
- BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ Subu(a0, current_character(), Operand(from));
- // Unsigned higher condition.
- BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ li(a0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ And(a1, current_character(), Operand(kTableSize - 1));
- __ Addu(a0, a0, a1);
- } else {
- __ Addu(a0, a0, current_character());
- }
-
- __ lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
- BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
-}
-
-
-bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check.
- switch (type) {
- case 's':
- // Match space-characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ Branch(&success, eq, current_character(), Operand(' '));
- // Check range 0x09..0x0d.
- __ Subu(a0, current_character(), Operand('\t'));
- BranchOrBacktrack(on_no_match, hi, a0, Operand('\r' - '\t'));
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- BranchOrBacktrack(on_no_match, eq, current_character(), Operand(' '));
- __ Subu(a0, current_character(), Operand('\t'));
- BranchOrBacktrack(on_no_match, ls, a0, Operand('\r' - '\t'));
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9').
- __ Subu(a0, current_character(), Operand('0'));
- BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
- return true;
- case 'D':
- // Match non ASCII-digits.
- __ Subu(a0, current_character(), Operand('0'));
- BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
- __ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Subu(a0, a0, Operand(0x0b));
- BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Subu(a0, a0, Operand(0x2028 - 0x0b));
- BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
- __ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Subu(a0, a0, Operand(0x0b));
- if (mode_ == ASCII) {
- BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
- } else {
- Label done;
- BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Subu(a0, a0, Operand(0x2028 - 0x0b));
- BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
- __ bind(&done);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ li(a0, Operand(map));
- __ Addu(a0, a0, current_character());
- __ lbu(a0, MemOperand(a0, 0));
- BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ Branch(&done, hi, current_character(), Operand('z'));
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ li(a0, Operand(map));
- __ Addu(a0, a0, current_character());
- __ lbu(a0, MemOperand(a0, 0));
- BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerMIPS::Fail() {
- __ li(v0, Operand(FAILURE));
- __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
- Label return_v0;
- if (masm_->has_exception()) {
- // If the code gets corrupted due to long regular expressions and lack of
- // space on trampolines, an internal exception flag is set. If this case
- // is detected, we will jump into exit sequence right away.
- __ bind_to(&entry_label_, internal_failure_label_.pos());
- } else {
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL,
- // no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
- RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
- s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
- RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
- __ MultiPush(argument_registers | registers_to_retain | ra.bit());
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
- __ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ mov(a0, zero_reg);
- __ push(a0); // Make room for success counter and initialize it to 0.
- __ push(a0); // Make room for "position - 1" constant (value irrelevant).
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ li(a0, Operand(stack_limit));
- __ lw(a0, MemOperand(a0));
- __ Subu(a0, sp, a0);
- // Handle it if the stack pointer is already below the stack limit.
- __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ li(v0, Operand(EXCEPTION));
- __ jmp(&return_v0);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(a0);
- // If returned value is non-zero, we exit with the returned value as result.
- __ Branch(&return_v0, ne, v0, Operand(zero_reg));
-
- __ bind(&stack_ok);
- // Allocate space on stack for registers.
- __ Subu(sp, sp, Operand(num_registers_ * kPointerSize));
- // Load string end.
- __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- // Load input start.
- __ lw(a0, MemOperand(frame_pointer(), kInputStart));
- // Find negative length (offset of start relative to end).
- __ Subu(current_input_offset(), a0, end_of_input_address());
- // Set a0 to address of char before start of the input string
- // (effectively string position -1).
- __ lw(a1, MemOperand(frame_pointer(), kStartIndex));
- __ Subu(a0, current_input_offset(), Operand(char_size()));
- __ sll(t5, a1, (mode_ == UC16) ? 1 : 0);
- __ Subu(a0, a0, t5);
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- // Initialize code pointer register
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
- __ li(current_character(), Operand('\n'));
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1.
- if (num_saved_registers_ > 8) {
- // Address of register 0.
- __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
- __ li(a2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ sw(a0, MemOperand(a1));
- __ Addu(a1, a1, Operand(-kPointerSize));
- __ Subu(a2, a2, Operand(1));
- __ Branch(&init_loop, ne, a2, Operand(zero_reg));
- } else {
- for (int i = 0; i < num_saved_registers_; i++) {
- __ sw(a0, register_location(i));
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
-
- __ jmp(&start_label_);
-
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // Copy captures to output.
- __ lw(a1, MemOperand(frame_pointer(), kInputStart));
- __ lw(a0, MemOperand(frame_pointer(), kRegisterOutput));
- __ lw(a2, MemOperand(frame_pointer(), kStartIndex));
- __ Subu(a1, end_of_input_address(), a1);
- // a1 is length of input in bytes.
- if (mode_ == UC16) {
- __ srl(a1, a1, 1);
- }
- // a1 is length of input in characters.
- __ Addu(a1, a1, Operand(a2));
- // a1 is length of string in characters.
-
- ASSERT_EQ(0, num_saved_registers_ % 2);
- // Always an even number of capture registers. This allows us to
- // unroll the loop once to add an operation between a load of a register
- // and the following use of that register.
- for (int i = 0; i < num_saved_registers_; i += 2) {
- __ lw(a2, register_location(i));
- __ lw(a3, register_location(i + 1));
- if (i == 0 && global_with_zero_length_check()) {
- // Keep capture start in a4 for the zero-length check later.
- __ mov(t7, a2);
- }
- if (mode_ == UC16) {
- __ sra(a2, a2, 1);
- __ Addu(a2, a2, a1);
- __ sra(a3, a3, 1);
- __ Addu(a3, a3, a1);
- } else {
- __ Addu(a2, a1, Operand(a2));
- __ Addu(a3, a1, Operand(a3));
- }
- __ sw(a2, MemOperand(a0));
- __ Addu(a0, a0, kPointerSize);
- __ sw(a3, MemOperand(a0));
- __ Addu(a0, a0, kPointerSize);
- }
- }
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- __ lw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ lw(a2, MemOperand(frame_pointer(), kRegisterOutput));
- // Increment success counter.
- __ Addu(a0, a0, 1);
- __ sw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ Subu(a1, a1, num_saved_registers_);
- // Check whether we have enough room for another set of capture results.
- __ mov(v0, a0);
- __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
-
- __ sw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
- // Advance the location for output.
- __ Addu(a2, a2, num_saved_registers_ * kPointerSize);
- __ sw(a2, MemOperand(frame_pointer(), kRegisterOutput));
-
- // Prepare a0 to initialize registers with its value in the next run.
- __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- // t7: capture start index
- // Not a zero-length match, restart.
- __ Branch(
- &load_char_start_regexp, ne, current_input_offset(), Operand(t7));
- // Offset from the end is zero if we already reached the end.
- __ Branch(&exit_label_, eq, current_input_offset(),
- Operand(zero_reg));
- // Advance current position after a zero-length match.
- __ Addu(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
- }
-
- __ Branch(&load_char_start_regexp);
- } else {
- __ li(v0, Operand(SUCCESS));
- }
- }
- // Exit and return v0.
- __ bind(&exit_label_);
- if (global()) {
- __ lw(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- }
-
- __ bind(&return_v0);
- // Skip sp past regexp registers and local variables..
- __ mov(sp, frame_pointer());
- // Restore registers s0..s7 and return (restoring ra to pc).
- __ MultiPop(registers_to_retain | ra.bit());
- __ Ret();
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code.
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
- // Put regexp engine registers on stack.
- RegList regexp_registers_to_retain = current_input_offset().bit() |
- current_character().bit() | backtrack_stackpointer().bit();
- __ MultiPush(regexp_registers_to_retain);
- CallCheckStackGuardState(a0);
- __ MultiPop(regexp_registers_to_retain);
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ Branch(&return_v0, ne, v0, Operand(zero_reg));
-
- // String might have moved: Reload end of string from frame.
- __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
- // Put regexp engine registers on stack first.
- RegList regexp_registers = current_input_offset().bit() |
- current_character().bit();
- __ MultiPush(regexp_registers);
- Label grow_failed;
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, a0);
- __ mov(a0, backtrack_stackpointer());
- __ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
- __ li(a2, Operand(ExternalReference::isolate_address()));
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // Restore regexp registers.
- __ MultiPop(regexp_registers);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
- // Otherwise use return value as new stack pointer.
- __ mov(backtrack_stackpointer(), v0);
- // Restore saved registers and continue.
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ li(v0, Operand(EXCEPTION));
- __ jmp(&return_v0);
- }
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code = FACTORY->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
-}
-
-
-void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ lw(a0, register_location(reg));
- BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
-}
-
-
-void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ lw(a0, register_location(reg));
- BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
-}
-
-
-void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ lw(a0, register_location(reg));
- BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerMIPS::Implementation() {
- return kMIPSImplementation;
-}
-
-
-void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
- Pop(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
- Pop(a0);
- __ sw(a0, register_location(register_index));
-}
-
-
-void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
- } else {
- Label after_constant;
- __ Branch(&after_constant);
- int offset = masm_->pc_offset();
- int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
- __ emit(0);
- masm_->label_at_put(label, offset);
- __ bind(&after_constant);
- if (is_int16(cp_offset)) {
- __ lw(a0, MemOperand(code_pointer(), cp_offset));
- } else {
- __ Addu(a0, code_pointer(), cp_offset);
- __ lw(a0, MemOperand(a0, 0));
- }
- }
- Push(a0);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
- Push(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ lw(a0, register_location(register_index));
- Push(a0);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
- __ lw(current_input_offset(), register_location(reg));
-}
-
-
-void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
- __ lw(backtrack_stackpointer(), register_location(reg));
- __ lw(a0, MemOperand(frame_pointer(), kStackHighEnd));
- __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
-}
-
-
-void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ Branch(&after_position,
- ge,
- current_input_offset(),
- Operand(-by * char_size()));
- __ li(current_input_offset(), -by * char_size());
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ li(a0, Operand(to));
- __ sw(a0, register_location(register_index));
-}
-
-
-bool RegExpMacroAssemblerMIPS::Succeed() {
- __ jmp(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ sw(current_input_offset(), register_location(reg));
- } else {
- __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
- __ sw(a0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ sw(a0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
- __ lw(a1, MemOperand(frame_pointer(), kStackHighEnd));
- __ Subu(a0, backtrack_stackpointer(), a1);
- __ sw(a0, register_location(reg));
-}
-
-
-bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
- return false;
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
- __ mov(a2, frame_pointer());
- // Code* of self.
- __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
- // a0 becomes return address pointer.
- ExternalReference stack_guard_check =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles(isolate);
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
- // Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
-
- if (*code_handle != re_code) { // Return address no longer valid.
- int delta = code_handle->address() - re_code->address();
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- Handle<String> subject_tmp = subject;
- int slice_offset = 0;
-
- // Extract the underlying string and the slice offset.
- if (StringShape(*subject_tmp).IsCons()) {
- subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
- } else if (StringShape(*subject_tmp).IsSliced()) {
- SlicedString* slice = SlicedString::cast(*subject_tmp);
- subject_tmp = Handle<String>(slice->parent());
- slice_offset = slice->offset();
- }
-
- // String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
- StringShape(*subject_tmp).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject_tmp,
- start_index + slice_offset);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = static_cast<int>(end_address - start_address);
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
- // Subject string might have been a ConsString that underwent
- // short-circuiting during GC. That will not change start_address but
- // will change pointer inside the subject handle.
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- }
-
- return 0;
-}
-
-
-MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- BranchOrBacktrack(on_outside_input,
- ge,
- current_input_offset(),
- Operand(-cp_offset * char_size()));
-}
-
-
-void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
- Condition condition,
- Register rs,
- const Operand& rt) {
- if (condition == al) { // Unconditional.
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ Branch(&backtrack_label_, condition, rs, rt);
- return;
- }
- __ Branch(to, condition, rs, rt);
-}
-
-
-void RegExpMacroAssemblerMIPS::SafeCall(Label* to,
- Condition cond,
- Register rs,
- const Operand& rt) {
- __ BranchAndLink(to, cond, rs, rt);
-}
-
-
-void RegExpMacroAssemblerMIPS::SafeReturn() {
- __ pop(ra);
- __ Addu(t5, ra, Operand(masm_->CodeObject()));
- __ Jump(t5);
-}
-
-
-void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
- __ bind(name);
- __ Subu(ra, ra, Operand(masm_->CodeObject()));
- __ push(ra);
-}
-
-
-void RegExpMacroAssemblerMIPS::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- __ Addu(backtrack_stackpointer(),
- backtrack_stackpointer(),
- Operand(-kPointerSize));
- __ sw(source, MemOperand(backtrack_stackpointer()));
-}
-
-
-void RegExpMacroAssemblerMIPS::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ lw(target, MemOperand(backtrack_stackpointer()));
- __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize);
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckPreemption() {
- // Check for preemption.
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ li(a0, Operand(stack_limit));
- __ lw(a0, MemOperand(a0));
- SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
-}
-
-
-void RegExpMacroAssemblerMIPS::CheckStackLimit() {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
-
- __ li(a0, Operand(stack_limit));
- __ lw(a0, MemOperand(a0));
- SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
-}
-
-
-void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ li(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ lw(sp, MemOperand(sp, 16));
- }
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
-}
-
-
-void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- Register offset = current_input_offset();
- if (cp_offset != 0) {
- // t7 is not being used to store the capture start index at this point.
- __ Addu(t7, current_input_offset(), Operand(cp_offset * char_size()));
- offset = t7;
- }
- // We assume that we cannot do unaligned loads on MIPS, so this function
- // must only be used to load a single character at a time.
- ASSERT(characters == 1);
- __ Addu(t5, end_of_input_address(), Operand(offset));
- if (mode_ == ASCII) {
- __ lbu(current_character(), MemOperand(t5, 0));
- } else {
- ASSERT(mode_ == UC16);
- __ lhu(current_character(), MemOperand(t5, 0));
- }
-}
-
-
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the return address.
- __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- const int return_address_offset = kCArgsSlotsSize;
- __ Addu(a0, sp, return_address_offset);
- __ sw(ra, MemOperand(a0, 0));
- __ mov(t9, t1);
- __ Call(t9);
- __ lw(ra, MemOperand(sp, return_address_offset));
- __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- __ Jump(ra);
-}
-
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h
deleted file mode 100644
index 8dd52a4..0000000
--- a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#include "macro-assembler.h"
-#include "code.h"
-#include "mips/macro-assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
- virtual ~RegExpMacroAssemblerMIPS();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
-
- // Above the frame pointer - Stored registers and stack passed parameters.
- // Registers s0 to s7, fp, and ra.
- static const int kStoredRegisters = kFramePointer;
- // Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
- // Stack frame header.
- static const int kStackFrameHeader = kReturnAddress + kPointerSize;
- // Stack parameters placed by caller.
- static const int kRegisterOutput = kStackFrameHeader + 20;
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-
- // Below the frame pointer.
- // Register parameters stored by setup code.
- static const int kInputEnd = kFramePointer - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- MemOperand register_location(int register_index);
-
- // Register holding the current input position as negative offset from
- // the end of the string.
- inline Register current_input_offset() { return t2; }
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return t3; }
-
- // Register holding address of the end of the input string.
- inline Register end_of_input_address() { return t6; }
-
- // Register holding the frame address. Local variables, parameters and
- // regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return t4; }
-
- // Register holding pointer to the current code object.
- inline Register code_pointer() { return t1; }
-
- // Byte size of chars in the string to match (decided by the Mode argument).
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Label* to,
- Condition condition,
- Register rs,
- const Operand& rt);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to,
- Condition cond,
- Register rs,
- const Operand& rt);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // and increments it by a word size.
- inline void Pop(Register target);
-
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1).
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
- Label internal_failure_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-}} // namespace v8::internal
-
-#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/simulator-mips.cc b/src/3rdparty/v8/src/mips/simulator-mips.cc
deleted file mode 100644
index be9f369..0000000
--- a/src/3rdparty/v8/src/mips/simulator-mips.cc
+++ /dev/null
@@ -1,2908 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <math.h>
-#include <limits.h>
-#include <cstdarg>
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "cpu.h"
-#include "disasm.h"
-#include "assembler.h"
-#include "globals.h" // Need the BitCast.
-#include "mips/constants-mips.h"
-#include "mips/simulator-mips.h"
-
-
-// Only build the simulator if not compiling for real MIPS hardware.
-#if defined(USE_SIMULATOR)
-
-namespace v8 {
-namespace internal {
-
-// Utils functions.
-bool HaveSameSign(int32_t a, int32_t b) {
- return ((a ^ b) >= 0);
-}
-
-
-uint32_t get_fcsr_condition_bit(uint32_t cc) {
- if (cc == 0) {
- return 23;
- } else {
- return 24 + cc;
- }
-}
-
-
-// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent was through
-// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
-// Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
-
-// The MipsDebugger class is used by the simulator while debugging simulated
-// code.
-class MipsDebugger {
- public:
- explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
- ~MipsDebugger();
-
- void Stop(Instruction* instr);
- void Debug();
- // Print all registers with a nice formatting.
- void PrintAllRegs();
- void PrintAllRegsIncludingFPU();
-
- private:
- // We set the breakpoint code to 0xfffff to easily recognize it.
- static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
- static const Instr kNopInstr = 0x0;
-
- Simulator* sim_;
-
- int32_t GetRegisterValue(int regnum);
- int32_t GetFPURegisterValueInt(int regnum);
- int64_t GetFPURegisterValueLong(int regnum);
- float GetFPURegisterValueFloat(int regnum);
- double GetFPURegisterValueDouble(int regnum);
- bool GetValue(const char* desc, int32_t* value);
-
- // Set or delete a breakpoint. Returns true if successful.
- bool SetBreakpoint(Instruction* breakpc);
- bool DeleteBreakpoint(Instruction* breakpc);
-
- // Undo and redo all breakpoints. This is needed to bracket disassembly and
- // execution to skip past breakpoints when run from the debugger.
- void UndoBreakpoints();
- void RedoBreakpoints();
-};
-
-
-MipsDebugger::~MipsDebugger() {
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void MipsDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->Bits(25, 6);
- // Retrieve the encoded address, which comes just after this stop.
- char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
- char* msg = *msg_address;
- ASSERT(msg != NULL);
-
- // Update this stop description.
- if (!watched_stops[code].desc) {
- watched_stops[code].desc = msg;
- }
-
- if (strlen(msg) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", str);
- fflush(coverage_log);
- }
- // Overwrite the instruction and address with nops.
- instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
- }
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstructionSize);
-}
-
-
-#else // GENERATED_CODE_COVERAGE
-
-#define UNSUPPORTED() printf("Unsupported instruction.\n");
-
-static void InitializeCoverage() {}
-
-
-void MipsDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->Bits(25, 6);
- // Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
- Instruction::kInstrSize);
- // Update this stop description.
- if (!sim_->watched_stops[code].desc) {
- sim_->watched_stops[code].desc = msg;
- }
- PrintF("Simulator hit %s (%u)\n", msg, code);
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
- Debug();
-}
-#endif // GENERATED_CODE_COVERAGE
-
-
-int32_t MipsDebugger::GetRegisterValue(int regnum) {
- if (regnum == kNumSimuRegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_register(regnum);
- }
-}
-
-
-int32_t MipsDebugger::GetFPURegisterValueInt(int regnum) {
- if (regnum == kNumFPURegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_fpu_register(regnum);
- }
-}
-
-
-int64_t MipsDebugger::GetFPURegisterValueLong(int regnum) {
- if (regnum == kNumFPURegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_fpu_register_long(regnum);
- }
-}
-
-
-float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
- if (regnum == kNumFPURegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_fpu_register_float(regnum);
- }
-}
-
-
-double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
- if (regnum == kNumFPURegisters) {
- return sim_->get_pc();
- } else {
- return sim_->get_fpu_register_double(regnum);
- }
-}
-
-
-bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
- int regnum = Registers::Number(desc);
- int fpuregnum = FPURegisters::Number(desc);
-
- if (regnum != kInvalidRegister) {
- *value = GetRegisterValue(regnum);
- return true;
- } else if (fpuregnum != kInvalidFPURegister) {
- *value = GetFPURegisterValueInt(fpuregnum);
- return true;
- } else if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%i", value) == 1;
- }
- return false;
-}
-
-
-bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
- // Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
- return false;
- }
-
- // Set the breakpoint.
- sim_->break_pc_ = breakpc;
- sim_->break_instr_ = breakpc->InstructionBits();
- // Not setting the breakpoint instruction in the code itself. It will be set
- // when the debugger shell continues.
- return true;
-}
-
-
-bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-
- sim_->break_pc_ = NULL;
- sim_->break_instr_ = 0;
- return true;
-}
-
-
-void MipsDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-}
-
-
-void MipsDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
- }
-}
-
-
-void MipsDebugger::PrintAllRegs() {
-#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
-
- PrintF("\n");
- // at, v0, a0.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(1), REG_INFO(2), REG_INFO(4));
- // v1, a1.
- PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- "", REG_INFO(3), REG_INFO(5));
- // a2.
- PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
- // a3.
- PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
- PrintF("\n");
- // t0-t7, s0-s7
- for (int i = 0; i < 8; i++) {
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(8+i), REG_INFO(16+i));
- }
- PrintF("\n");
- // t8, k0, LO.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(24), REG_INFO(26), REG_INFO(32));
- // t9, k1, HI.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(25), REG_INFO(27), REG_INFO(33));
- // sp, fp, gp.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(29), REG_INFO(30), REG_INFO(28));
- // pc.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(31), REG_INFO(34));
-
-#undef REG_INFO
-#undef FPU_REG_INFO
-}
-
-
-void MipsDebugger::PrintAllRegsIncludingFPU() {
-#define FPU_REG_INFO(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
- GetFPURegisterValueInt(n+1), \
- GetFPURegisterValueInt(n), \
- GetFPURegisterValueDouble(n)
-
- PrintAllRegs();
-
- PrintF("\n\n");
- // f0, f1, f2, ... f31.
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(6) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(8) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(10));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(12));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(14));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(16));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(18));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(20));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(22));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(24));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(26));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(28));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(30));
-
-#undef REG_INFO
-#undef FPU_REG_INFO
-}
-
-
-void MipsDebugger::Debug() {
- intptr_t last_pc = -1;
- bool done = false;
-
-#define COMMAND_SIZE 63
-#define ARG_SIZE 255
-
-#define STR(a) #a
-#define XSTR(a) STR(a)
-
- char cmd[COMMAND_SIZE + 1];
- char arg1[ARG_SIZE + 1];
- char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
-
- // Make sure to have a proper terminating character if reaching the limit.
- cmd[COMMAND_SIZE] = 0;
- arg1[ARG_SIZE] = 0;
- arg2[ARG_SIZE] = 0;
-
- // Undo all set breakpoints while running in the debugger shell. This will
- // make them invisible to all commands.
- UndoBreakpoints();
-
- while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
- if (last_pc != sim_->get_pc()) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // Use a reasonably large buffer.
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
- last_pc = sim_->get_pc();
- }
- char* line = ReadLine("sim> ");
- if (line == NULL) {
- break;
- } else {
- char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != NULL) {
- line = last_input;
- } else {
- // Ownership is transferred to sim_;
- sim_->set_last_debugger_input(line);
- }
- // Use sscanf to parse the individual parts of the command line. At the
- // moment no command expects more than two parameters.
- int argc = SScanF(line,
- "%" XSTR(COMMAND_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s",
- cmd, arg1, arg2);
- if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
- if (!(instr->IsTrap()) ||
- instr->InstructionBits() == rtCallRedirInstr) {
- sim_->InstructionDecode(
- reinterpret_cast<Instruction*>(sim_->get_pc()));
- } else {
- // Allow si to jump over generated breakpoints.
- PrintF("/!\\ Jumping over generated breakpoint.\n");
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
- }
- } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
- // Execute the one instruction we broke at with breakpoints disabled.
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- // Leave the debugger shell.
- done = true;
- } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (argc == 2) {
- int32_t value;
- float fvalue;
- if (strcmp(arg1, "all") == 0) {
- PrintAllRegs();
- } else if (strcmp(arg1, "allf") == 0) {
- PrintAllRegsIncludingFPU();
- } else {
- int regnum = Registers::Number(arg1);
- int fpuregnum = FPURegisters::Number(arg1);
-
- if (regnum != kInvalidRegister) {
- value = GetRegisterValue(regnum);
- PrintF("%s: 0x%08x %d \n", arg1, value, value);
- } else if (fpuregnum != kInvalidFPURegister) {
- if (fpuregnum % 2 == 1) {
- value = GetFPURegisterValueInt(fpuregnum);
- fvalue = GetFPURegisterValueFloat(fpuregnum);
- PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
- } else {
- double dfvalue;
- int32_t lvalue1 = GetFPURegisterValueInt(fpuregnum);
- int32_t lvalue2 = GetFPURegisterValueInt(fpuregnum + 1);
- dfvalue = GetFPURegisterValueDouble(fpuregnum);
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
- FPURegisters::Name(fpuregnum+1),
- FPURegisters::Name(fpuregnum),
- lvalue1,
- lvalue2,
- dfvalue);
- }
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- }
- } else {
- if (argc == 3) {
- if (strcmp(arg2, "single") == 0) {
- int32_t value;
- float fvalue;
- int fpuregnum = FPURegisters::Number(arg1);
-
- if (fpuregnum != kInvalidFPURegister) {
- value = GetFPURegisterValueInt(fpuregnum);
- fvalue = GetFPURegisterValueFloat(fpuregnum);
- PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("print <fpu register> single\n");
- }
- } else {
- PrintF("print <register> or print <fpu register> single\n");
- }
- }
- } else if ((strcmp(cmd, "po") == 0)
- || (strcmp(cmd, "printobject") == 0)) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
- PrintF("%s: \n", arg1);
-#ifdef DEBUG
- obj->PrintLn();
-#else
- obj->ShortPrint();
- PrintF("\n");
-#endif
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("printobject <value>\n");
- }
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int32_t* cur = NULL;
- int32_t* end = NULL;
- int next_arg = 1;
-
- if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
- } else { // Command "mem".
- int32_t value;
- if (!GetValue(arg1, &value)) {
- PrintF("%s unrecognized\n", arg1);
- continue;
- }
- cur = reinterpret_cast<int32_t*>(value);
- next_arg++;
- }
-
- int32_t words;
- if (argc == next_arg) {
- words = 10;
- } else if (argc == next_arg + 1) {
- if (!GetValue(argv[next_arg], &words)) {
- words = 10;
- }
- }
- end = cur + words;
-
- while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d",
- reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- int value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (current_heap->Contains(obj) || ((value & 1) == 0)) {
- PrintF(" (");
- if ((value & 1) == 0) {
- PrintF("smi %d", value / 2);
- } else {
- obj->ShortPrint();
- }
- PrintF(")");
- }
- PrintF("\n");
- cur++;
- }
-
- } else if ((strcmp(cmd, "disasm") == 0) ||
- (strcmp(cmd, "dpc") == 0) ||
- (strcmp(cmd, "di") == 0)) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // Use a reasonably large buffer.
- v8::internal::EmbeddedVector<char, 256> buffer;
-
- byte* cur = NULL;
- byte* end = NULL;
-
- if (argc == 1) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
- } else if (argc == 2) {
- int regnum = Registers::Number(arg1);
- if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
- // The argument is an address or a register name.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(value);
- // Disassemble 10 instructions at <arg1>.
- end = cur + (10 * Instruction::kInstrSize);
- }
- } else {
- // The argument is the number of instructions.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- // Disassemble <arg1> instructions.
- end = cur + (value * Instruction::kInstrSize);
- }
- }
- } else {
- int32_t value1;
- int32_t value2;
- if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
- }
- }
-
- while (cur < end) {
- dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(cur), buffer.start());
- cur += Instruction::kInstrSize;
- }
- } else if (strcmp(cmd, "gdb") == 0) {
- PrintF("relinquishing control to gdb\n");
- v8::internal::OS::DebugBreak();
- PrintF("regaining control from gdb\n");
- } else if (strcmp(cmd, "break") == 0) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
- PrintF("setting breakpoint failed\n");
- }
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("break <address>\n");
- }
- } else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
- PrintF("deleting breakpoint failed\n");
- }
- } else if (strcmp(cmd, "flags") == 0) {
- PrintF("No flags on MIPS !\n");
- } else if (strcmp(cmd, "stop") == 0) {
- int32_t value;
- intptr_t stop_pc = sim_->get_pc() -
- 2 * Instruction::kInstrSize;
- Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
- Instruction* msg_address =
- reinterpret_cast<Instruction*>(stop_pc +
- Instruction::kInstrSize);
- if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
- // Remove the current stop.
- if (sim_->IsStopInstruction(stop_instr)) {
- stop_instr->SetInstructionBits(kNopInstr);
- msg_address->SetInstructionBits(kNopInstr);
- } else {
- PrintF("Not at debugger stop.\n");
- }
- } else if (argc == 3) {
- // Print information about all/the specified breakpoint(s).
- if (strcmp(arg1, "info") == 0) {
- if (strcmp(arg2, "all") == 0) {
- PrintF("Stop information:\n");
- for (uint32_t i = kMaxWatchpointCode + 1;
- i <= kMaxStopCode;
- i++) {
- sim_->PrintStopInfo(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->PrintStopInfo(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "enable") == 0) {
- // Enable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = kMaxWatchpointCode + 1;
- i <= kMaxStopCode;
- i++) {
- sim_->EnableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->EnableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "disable") == 0) {
- // Disable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = kMaxWatchpointCode + 1;
- i <= kMaxStopCode;
- i++) {
- sim_->DisableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->DisableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- }
- } else {
- PrintF("Wrong usage. Use help command for more information.\n");
- }
- } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
- // Print registers and disassemble.
- PrintAllRegs();
- PrintF("\n");
-
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // Use a reasonably large buffer.
- v8::internal::EmbeddedVector<char, 256> buffer;
-
- byte* cur = NULL;
- byte* end = NULL;
-
- if (argc == 1) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
- } else if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(value);
- // no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstrSize);
- }
- } else {
- int32_t value1;
- int32_t value2;
- if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
- }
- }
-
- while (cur < end) {
- dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(cur), buffer.start());
- cur += Instruction::kInstrSize;
- }
- } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
- PrintF("cont\n");
- PrintF(" continue execution (alias 'c')\n");
- PrintF("stepi\n");
- PrintF(" step one instruction (alias 'si')\n");
- PrintF("print <register>\n");
- PrintF(" print register content (alias 'p')\n");
- PrintF(" use register name 'all' to print all registers\n");
- PrintF("printobject <register>\n");
- PrintF(" print an object from a register (alias 'po')\n");
- PrintF("stack [<words>]\n");
- PrintF(" dump stack content, default dump 10 words)\n");
- PrintF("mem <address> [<words>]\n");
- PrintF(" dump memory content, default dump 10 words)\n");
- PrintF("flags\n");
- PrintF(" print flags\n");
- PrintF("disasm [<instructions>]\n");
- PrintF("disasm [<address/register>]\n");
- PrintF("disasm [[<address/register>] <instructions>]\n");
- PrintF(" disassemble code, default is 10 instructions\n");
- PrintF(" from pc (alias 'di')\n");
- PrintF("gdb\n");
- PrintF(" enter gdb\n");
- PrintF("break <address>\n");
- PrintF(" set a break point on the address\n");
- PrintF("del\n");
- PrintF(" delete the breakpoint\n");
- PrintF("stop feature:\n");
- PrintF(" Description:\n");
- PrintF(" Stops are debug instructions inserted by\n");
- PrintF(" the Assembler::stop() function.\n");
- PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the Debugger.\n");
- PrintF(" All stop codes are watched:\n");
- PrintF(" - They can be enabled / disabled: the Simulator\n");
- PrintF(" will / won't stop when hitting them.\n");
- PrintF(" - The Simulator keeps track of how many times they \n");
- PrintF(" are met. (See the info command.) Going over a\n");
- PrintF(" disabled stop still increases its counter. \n");
- PrintF(" Commands:\n");
- PrintF(" stop info all/<code> : print infos about number <code>\n");
- PrintF(" or all stop(s).\n");
- PrintF(" stop enable/disable all/<code> : enables / disables\n");
- PrintF(" all or number <code> stop(s)\n");
- PrintF(" stop unstop\n");
- PrintF(" ignore the stop instruction at the current location\n");
- PrintF(" from now on\n");
- } else {
- PrintF("Unknown command: %s\n", cmd);
- }
- }
- }
-
- // Add all the breakpoints back to stop execution and enter the debugger
- // shell when hit.
- RedoBreakpoints();
-
-#undef COMMAND_SIZE
-#undef ARG_SIZE
-
-#undef STR
-#undef XSTR
-}
-
-
-static bool ICacheMatch(void* one, void* two) {
- ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
- return one == two;
-}
-
-
-static uint32_t ICacheHash(void* key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
-}
-
-
-static bool AllOnOnePage(uintptr_t start, int size) {
- intptr_t start_page = (start & ~CachePage::kPageMask);
- intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
- return start_page == end_page;
-}
-
-
-void Simulator::set_last_debugger_input(char* input) {
- DeleteArray(last_debugger_input_);
- last_debugger_input_ = input;
-}
-
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
- void* start_addr,
- size_t size) {
- intptr_t start = reinterpret_cast<intptr_t>(start_addr);
- int intra_line = (start & CachePage::kLineMask);
- start -= intra_line;
- size += intra_line;
- size = ((size - 1) | CachePage::kLineMask) + 1;
- int offset = (start & CachePage::kPageMask);
- while (!AllOnOnePage(start, size - 1)) {
- int bytes_to_flush = CachePage::kPageSize - offset;
- FlushOnePage(i_cache, start, bytes_to_flush);
- start += bytes_to_flush;
- size -= bytes_to_flush;
- ASSERT_EQ(0, start & CachePage::kPageMask);
- offset = 0;
- }
- if (size != 0) {
- FlushOnePage(i_cache, start, size);
- }
-}
-
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
- ICacheHash(page),
- true);
- if (entry->value == NULL) {
- CachePage* new_page = new CachePage();
- entry->value = new_page;
- }
- return reinterpret_cast<CachePage*>(entry->value);
-}
-
-
-// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
- intptr_t start,
- int size) {
- ASSERT(size <= CachePage::kPageSize);
- ASSERT(AllOnOnePage(start, size - 1));
- ASSERT((start & CachePage::kLineMask) == 0);
- ASSERT((size & CachePage::kLineMask) == 0);
- void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
- int offset = (start & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* valid_bytemap = cache_page->ValidityByte(offset);
- memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
-}
-
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
- intptr_t address = reinterpret_cast<intptr_t>(instr);
- void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
- void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
- int offset = (address & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* cache_valid_byte = cache_page->ValidityByte(offset);
- bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
- char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
- if (cache_hit) {
- // Check that the data in memory matches the contents of the I-cache.
- CHECK(memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize) == 0);
- } else {
- // Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
- *cache_valid_byte = CachePage::LINE_VALID;
- }
-}
-
-
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
-Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
- Initialize(isolate);
- // Set up simulator support first. Some of this information is needed to
- // setup the architecture state.
- stack_ = reinterpret_cast<char*>(malloc(stack_size_));
- pc_modified_ = false;
- icount_ = 0;
- break_count_ = 0;
- break_pc_ = NULL;
- break_instr_ = 0;
-
- // Set up architecture state.
- // All registers are initialized to zero to start with.
- for (int i = 0; i < kNumSimuRegisters; i++) {
- registers_[i] = 0;
- }
- for (int i = 0; i < kNumFPURegisters; i++) {
- FPUregisters_[i] = 0;
- }
- FCSR_ = 0;
-
- // The sp is initialized to point to the bottom (high address) of the
- // allocated stack area. To be safe in potential stack underflows we leave
- // some buffer below.
- registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size_ - 64;
- // The ra and pc are initialized to a known bad value that will cause an
- // access violation if the simulator ever tries to execute it.
- registers_[pc] = bad_ra;
- registers_[ra] = bad_ra;
- InitializeCoverage();
- for (int i = 0; i < kNumExceptions; i++) {
- exceptions[i] = 0;
- }
-
- last_debugger_input_ = NULL;
-}
-
-
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a swi (software-interrupt) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the swi instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(void* external_function, ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(rtCallRedirInstr),
- type_(type),
- next_(NULL) {
- Isolate* isolate = Isolate::Current();
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(void* external_function,
- ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
- Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
- }
- return new Redirection(external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-void* Simulator::RedirectExternalReference(void* external_function,
- ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
-
-// Get the active Simulator for the current thread.
-Simulator* Simulator::current(Isolate* isolate) {
- v8::internal::Isolate::PerIsolateThreadData* isolate_data =
- isolate->FindOrAllocatePerThreadDataForThisThread();
- ASSERT(isolate_data != NULL);
- ASSERT(isolate_data != NULL);
-
- Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread/isolate goes away.
- sim = new Simulator(isolate);
- isolate_data->set_simulator(sim);
- }
- return sim;
-}
-
-
-// Sets the register in the architecture state. It will also deal with updating
-// Simulator internal state for special registers such as PC.
-void Simulator::set_register(int reg, int32_t value) {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
- if (reg == pc) {
- pc_modified_ = true;
- }
-
- // Zero register always holds 0.
- registers_[reg] = (reg == 0) ? 0 : value;
-}
-
-
-void Simulator::set_dw_register(int reg, const int* dbl) {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
- registers_[reg] = dbl[0];
- registers_[reg + 1] = dbl[1];
-}
-
-
-void Simulator::set_fpu_register(int fpureg, int32_t value) {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
- FPUregisters_[fpureg] = value;
-}
-
-
-void Simulator::set_fpu_register_float(int fpureg, float value) {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
- *BitCast<float*>(&FPUregisters_[fpureg]) = value;
-}
-
-
-void Simulator::set_fpu_register_double(int fpureg, double value) {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- *BitCast<double*>(&FPUregisters_[fpureg]) = value;
-}
-
-
-// Get the register from the architecture state. This function does handle
-// the special case of accessing the PC register.
-int32_t Simulator::get_register(int reg) const {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
- if (reg == 0)
- return 0;
- else
- return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
-}
-
-
-double Simulator::get_double_from_register_pair(int reg) {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
-
- double dm_val = 0.0;
- // Read the bits from the unsigned integer register_[] array
- // into the double precision floating point value and return it.
- char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
- return(dm_val);
-}
-
-
-int32_t Simulator::get_fpu_register(int fpureg) const {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return FPUregisters_[fpureg];
-}
-
-
-int64_t Simulator::get_fpu_register_long(int fpureg) const {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *BitCast<int64_t*>(
- const_cast<int32_t*>(&FPUregisters_[fpureg]));
-}
-
-
-float Simulator::get_fpu_register_float(int fpureg) const {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return *BitCast<float*>(
- const_cast<int32_t*>(&FPUregisters_[fpureg]));
-}
-
-
-double Simulator::get_fpu_register_double(int fpureg) const {
- ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
-}
-
-
-// For use in calls that take two double values, constructed either
-// from a0-a3 or f12 and f14.
-void Simulator::GetFpArgs(double* x, double* y) {
- if (!IsMipsSoftFloatABI) {
- *x = get_fpu_register_double(12);
- *y = get_fpu_register_double(14);
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
-
- // Registers a0 and a1 -> x.
- reg_buffer[0] = get_register(a0);
- reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
-
- // Registers a2 and a3 -> y.
- reg_buffer[0] = get_register(a2);
- reg_buffer[1] = get_register(a3);
- memcpy(y, buffer, sizeof(buffer));
- }
-}
-
-
-// For use in calls that take one double value, constructed either
-// from a0 and a1 or f12.
-void Simulator::GetFpArgs(double* x) {
- if (!IsMipsSoftFloatABI) {
- *x = get_fpu_register_double(12);
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- // Registers a0 and a1 -> x.
- reg_buffer[0] = get_register(a0);
- reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from a0 and a1 or f12 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (!IsMipsSoftFloatABI) {
- *x = get_fpu_register_double(12);
- *y = get_register(a2);
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- // Registers 0 and 1 -> x.
- reg_buffer[0] = get_register(a0);
- reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
-
- // Register 2 -> y.
- reg_buffer[0] = get_register(a2);
- memcpy(y, buffer, sizeof(*y));
- }
-}
-
-
-// The return value is either in v0/v1 or f0.
-void Simulator::SetFpResult(const double& result) {
- if (!IsMipsSoftFloatABI) {
- set_fpu_register_double(0, result);
- } else {
- char buffer[2 * sizeof(registers_[0])];
- int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- memcpy(buffer, &result, sizeof(buffer));
- // Copy result to v0 and v1.
- set_register(v0, reg_buffer[0]);
- set_register(v1, reg_buffer[1]);
- }
-}
-
-
-// Helper functions for setting and testing the FCSR register's bits.
-void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
- if (value) {
- FCSR_ |= (1 << cc);
- } else {
- FCSR_ &= ~(1 << cc);
- }
-}
-
-
-bool Simulator::test_fcsr_bit(uint32_t cc) {
- return FCSR_ & (1 << cc);
-}
-
-
-// Sets the rounding error codes in FCSR based on the result of the rounding.
-// Returns true if the operation was invalid.
-bool Simulator::set_fcsr_round_error(double original, double rounded) {
- bool ret = false;
-
- if (!isfinite(original) || !isfinite(rounded)) {
- set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
- ret = true;
- }
-
- if (original != rounded) {
- set_fcsr_bit(kFCSRInexactFlagBit, true);
- }
-
- if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
- set_fcsr_bit(kFCSRUnderflowFlagBit, true);
- ret = true;
- }
-
- if (rounded > INT_MAX || rounded < INT_MIN) {
- set_fcsr_bit(kFCSROverflowFlagBit, true);
- // The reference is not really clear but it seems this is required:
- set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
- ret = true;
- }
-
- return ret;
-}
-
-
-// Raw access to the PC register.
-void Simulator::set_pc(int32_t value) {
- pc_modified_ = true;
- registers_[pc] = value;
-}
-
-
-bool Simulator::has_bad_pc() const {
- return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
-}
-
-
-// Raw access to the PC register without the special adjustment when reading.
-int32_t Simulator::get_pc() const {
- return registers_[pc];
-}
-
-
-// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
-// interrupt is caused. On others it does a funky rotation thing. For now we
-// simply disallow unaligned reads, but at some point we may want to move to
-// emulating the rotate behaviour. Note that simulator runs have the runtime
-// system running directly on the host system and only generated code is
-// executed in the simulator. Since the host is typically IA32 we will not
-// get the correct MIPS-like behaviour on unaligned accesses.
-
-int Simulator::ReadW(int32_t addr, Instruction* instr) {
- if (addr >=0 && addr < 0x400) {
- // This has to be a NULL-dereference, drop into debugger.
- PrintF("Memory read from bad address: 0x%08x, pc=0x%08x\n",
- addr, reinterpret_cast<intptr_t>(instr));
- MipsDebugger dbg(this);
- dbg.Debug();
- }
- if ((addr & kPointerAlignmentMask) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
- }
- PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- MipsDebugger dbg(this);
- dbg.Debug();
- return 0;
-}
-
-
-void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
- if (addr >= 0 && addr < 0x400) {
- // This has to be a NULL-dereference, drop into debugger.
- PrintF("Memory write to bad address: 0x%08x, pc=0x%08x\n",
- addr, reinterpret_cast<intptr_t>(instr));
- MipsDebugger dbg(this);
- dbg.Debug();
- }
- if ((addr & kPointerAlignmentMask) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- MipsDebugger dbg(this);
- dbg.Debug();
-}
-
-
-double Simulator::ReadD(int32_t addr, Instruction* instr) {
- if ((addr & kDoubleAlignmentMask) == 0) {
- double* ptr = reinterpret_cast<double*>(addr);
- return *ptr;
- }
- PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- OS::Abort();
- return 0;
-}
-
-
-void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
- if ((addr & kDoubleAlignmentMask) == 0) {
- double* ptr = reinterpret_cast<double*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- OS::Abort();
-}
-
-
-uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
- if ((addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
- }
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- OS::Abort();
- return 0;
-}
-
-
-int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
- if ((addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
- }
- PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- OS::Abort();
- return 0;
-}
-
-
-void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
- if ((addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- OS::Abort();
-}
-
-
-void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
- if ((addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- return;
- }
- PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- OS::Abort();
-}
-
-
-uint32_t Simulator::ReadBU(int32_t addr) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr & 0xff;
-}
-
-
-int32_t Simulator::ReadB(int32_t addr) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return *ptr;
-}
-
-
-void Simulator::WriteB(int32_t addr, uint8_t value) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
-}
-
-
-void Simulator::WriteB(int32_t addr, int8_t value) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- *ptr = value;
-}
-
-
-// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 1024;
-}
-
-
-// Unsupported instructions use Format to print an error and stop execution.
-void Simulator::Format(Instruction* instr, const char* format) {
- PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- reinterpret_cast<intptr_t>(instr), format);
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in runtime.cc
-// uses the ObjectPair which is essentially two 32-bit values stuffed into a
-// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the v1 result register contains a bogus
-// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3,
- int32_t arg4,
- int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
-
-// This signature supports direct call in to API function native callback
-// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-
-// This signature supports direct call to accessor getter callback.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
-
-// Software interrupt instructions are used by the simulator to call into the
-// C-based V8 runtime. They are also used for debugging with simulator.
-void Simulator::SoftwareInterrupt(Instruction* instr) {
- // There are several instructions that could get us here,
- // the break_ instruction, or several variants of traps. All
- // Are "SPECIAL" class opcode, and are distinuished by function.
- int32_t func = instr->FunctionFieldRaw();
- uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
-
- // We first check if we met a call_rt_redirected.
- if (instr->InstructionBits() == rtCallRedirInstr) {
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
- int32_t arg0 = get_register(a0);
- int32_t arg1 = get_register(a1);
- int32_t arg2 = get_register(a2);
- int32_t arg3 = get_register(a3);
-
- int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
- // Args 4 and 5 are on the stack after the reserved space for args 0..3.
- int32_t arg4 = stack_pointer[4];
- int32_t arg5 = stack_pointer[5];
-
- bool fp_call =
- (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
-
- if (!IsMipsSoftFloatABI) {
- // With the hard floating point calling convention, double
- // arguments are passed in FPU registers. Fetch the arguments
- // from there and call the builtin using soft floating point
- // convention.
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
- arg2 = get_fpu_register(f14);
- arg3 = get_fpu_register(f15);
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
- arg2 = get_register(a2);
- break;
- default:
- break;
- }
- }
-
- // This is dodgy but it works because the C entry stubs are never moved.
- // See comment in codegen-arm.cc and bug 1242173.
- int32_t saved_ra = get_register(ra);
-
- intptr_t external =
- reinterpret_cast<intptr_t>(redirection->external_function());
-
- // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
- // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
- // simulator. Soft-float has additional abstraction of ExternalReference,
- // to support serialization.
- if (fp_call) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- if (::v8::internal::FLAG_trace_sim) {
- double dval0, dval1;
- int32_t ival;
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
- PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
- PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
- PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- uint64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t gpreg_pair[2];
- memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
- set_register(v0, gpreg_pair[0]);
- set_register(v1, gpreg_pair[1]);
- }
- } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- // See DirectCEntryStub::GenerateCall for explanation of register usage.
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x\n",
- FUNCTION_ADDR(target), arg1);
- }
- v8::Handle<v8::Value> result = target(arg1);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- // See DirectCEntryStub::GenerateCall for explanation of register usage.
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- FUNCTION_ADDR(target), arg1, arg2);
- }
- v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF(
- "Call to host function at %p "
- "args %08x, %08x, %08x, %08x, %08x, %08x\n",
- FUNCTION_ADDR(target),
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5);
- }
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- set_register(v0, static_cast<int32_t>(result));
- set_register(v1, static_cast<int32_t>(result >> 32));
- }
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x : %08x\n", get_register(v1), get_register(v0));
- }
- set_register(ra, saved_ra);
- set_pc(get_register(ra));
-
- } else if (func == BREAK && code <= kMaxStopCode) {
- if (IsWatchpoint(code)) {
- PrintWatchpoint(code);
- } else {
- IncreaseStopCounter(code);
- HandleStop(code, instr);
- }
- } else {
- // All remaining break_ codes, and all traps are handled here.
- MipsDebugger dbg(this);
- dbg.Debug();
- }
-}
-
-
-// Stop helper functions.
-bool Simulator::IsWatchpoint(uint32_t code) {
- return (code <= kMaxWatchpointCode);
-}
-
-
-void Simulator::PrintWatchpoint(uint32_t code) {
- MipsDebugger dbg(this);
- ++break_count_;
- PrintF("\n---- break %d marker: %3d (instr count: %8d) ----------"
- "----------------------------------",
- code, break_count_, icount_);
- dbg.PrintAllRegs(); // Print registers and continue running.
-}
-
-
-void Simulator::HandleStop(uint32_t code, Instruction* instr) {
- // Stop if it is enabled, otherwise go on jumping over the stop
- // and the message address.
- if (IsEnabledStop(code)) {
- MipsDebugger dbg(this);
- dbg.Stop(instr);
- } else {
- set_pc(get_pc() + 2 * Instruction::kInstrSize);
- }
-}
-
-
-bool Simulator::IsStopInstruction(Instruction* instr) {
- int32_t func = instr->FunctionFieldRaw();
- uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6));
- return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode;
-}
-
-
-bool Simulator::IsEnabledStop(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- ASSERT(code > kMaxWatchpointCode);
- return !(watched_stops[code].count & kStopDisabledBit);
-}
-
-
-void Simulator::EnableStop(uint32_t code) {
- if (!IsEnabledStop(code)) {
- watched_stops[code].count &= ~kStopDisabledBit;
- }
-}
-
-
-void Simulator::DisableStop(uint32_t code) {
- if (IsEnabledStop(code)) {
- watched_stops[code].count |= kStopDisabledBit;
- }
-}
-
-
-void Simulator::IncreaseStopCounter(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
- PrintF("Stop counter for code %i has overflowed.\n"
- "Enabling this code and reseting the counter to 0.\n", code);
- watched_stops[code].count = 0;
- EnableStop(code);
- } else {
- watched_stops[code].count++;
- }
-}
-
-
-// Print a stop status.
-void Simulator::PrintStopInfo(uint32_t code) {
- if (code <= kMaxWatchpointCode) {
- PrintF("That is a watchpoint, not a stop.\n");
- return;
- } else if (code > kMaxStopCode) {
- PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
- return;
- }
- const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
- int32_t count = watched_stops[code].count & ~kStopDisabledBit;
- // Don't print the state of unused breakpoints.
- if (count != 0) {
- if (watched_stops[code].desc) {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
- code, code, state, count, watched_stops[code].desc);
- } else {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
- code, code, state, count);
- }
- }
-}
-
-
-void Simulator::SignalExceptions() {
- for (int i = 1; i < kNumExceptions; i++) {
- if (exceptions[i] != 0) {
- V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
- }
- }
-}
-
-
-// Handle execution based on instruction types.
-
-void Simulator::ConfigureTypeRegister(Instruction* instr,
- int32_t& alu_out,
- int64_t& i64hilo,
- uint64_t& u64hilo,
- int32_t& next_pc,
- bool& do_interrupt) {
- // Every local variable declared here needs to be const.
- // This is to make sure that changed values are sent back to
- // DecodeTypeRegister correctly.
-
- // Instruction fields.
- const Opcode op = instr->OpcodeFieldRaw();
- const int32_t rs_reg = instr->RsValue();
- const int32_t rs = get_register(rs_reg);
- const uint32_t rs_u = static_cast<uint32_t>(rs);
- const int32_t rt_reg = instr->RtValue();
- const int32_t rt = get_register(rt_reg);
- const uint32_t rt_u = static_cast<uint32_t>(rt);
- const int32_t rd_reg = instr->RdValue();
- const uint32_t sa = instr->SaValue();
-
- const int32_t fs_reg = instr->FsValue();
-
-
- // ---------- Configuration.
- switch (op) {
- case COP1: // Coprocessor instructions.
- switch (instr->RsFieldRaw()) {
- case BC1: // Handled in DecodeTypeImmed, should never come here.
- UNREACHABLE();
- break;
- case CFC1:
- // At the moment only FCSR is supported.
- ASSERT(fs_reg == kFCSRRegister);
- alu_out = FCSR_;
- break;
- case MFC1:
- alu_out = get_fpu_register(fs_reg);
- break;
- case MFHC1:
- UNIMPLEMENTED_MIPS();
- break;
- case CTC1:
- case MTC1:
- case MTHC1:
- // Do the store in the execution step.
- break;
- case S:
- case D:
- case W:
- case L:
- case PS:
- // Do everything in the execution step.
- break;
- default:
- UNIMPLEMENTED_MIPS();
- };
- break;
- case COP1X:
- break;
- case SPECIAL:
- switch (instr->FunctionFieldRaw()) {
- case JR:
- case JALR:
- next_pc = get_register(instr->RsValue());
- break;
- case SLL:
- alu_out = rt << sa;
- break;
- case SRL:
- if (rs_reg == 0) {
- // Regular logical right shift of a word by a fixed number of
- // bits instruction. RS field is always equal to 0.
- alu_out = rt_u >> sa;
- } else {
- // Logical right-rotate of a word by a fixed number of bits. This
- // is special case of SRL instruction, added in MIPS32 Release 2.
- // RS field is equal to 00001.
- alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
- }
- break;
- case SRA:
- alu_out = rt >> sa;
- break;
- case SLLV:
- alu_out = rt << rs;
- break;
- case SRLV:
- if (sa == 0) {
- // Regular logical right-shift of a word by a variable number of
- // bits instruction. SA field is always equal to 0.
- alu_out = rt_u >> rs;
- } else {
- // Logical right-rotate of a word by a variable number of bits.
- // This is special case od SRLV instruction, added in MIPS32
- // Release 2. SA field is equal to 00001.
- alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
- }
- break;
- case SRAV:
- alu_out = rt >> rs;
- break;
- case MFHI:
- alu_out = get_register(HI);
- break;
- case MFLO:
- alu_out = get_register(LO);
- break;
- case MULT:
- i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
- break;
- case MULTU:
- u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
- break;
- case ADD:
- if (HaveSameSign(rs, rt)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
- }
- }
- alu_out = rs + rt;
- break;
- case ADDU:
- alu_out = rs + rt;
- break;
- case SUB:
- if (!HaveSameSign(rs, rt)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
- }
- }
- alu_out = rs - rt;
- break;
- case SUBU:
- alu_out = rs - rt;
- break;
- case AND:
- alu_out = rs & rt;
- break;
- case OR:
- alu_out = rs | rt;
- break;
- case XOR:
- alu_out = rs ^ rt;
- break;
- case NOR:
- alu_out = ~(rs | rt);
- break;
- case SLT:
- alu_out = rs < rt ? 1 : 0;
- break;
- case SLTU:
- alu_out = rs_u < rt_u ? 1 : 0;
- break;
- // Break and trap instructions.
- case BREAK:
-
- do_interrupt = true;
- break;
- case TGE:
- do_interrupt = rs >= rt;
- break;
- case TGEU:
- do_interrupt = rs_u >= rt_u;
- break;
- case TLT:
- do_interrupt = rs < rt;
- break;
- case TLTU:
- do_interrupt = rs_u < rt_u;
- break;
- case TEQ:
- do_interrupt = rs == rt;
- break;
- case TNE:
- do_interrupt = rs != rt;
- break;
- case MOVN:
- case MOVZ:
- case MOVCI:
- // No action taken on decode.
- break;
- case DIV:
- case DIVU:
- // div and divu never raise exceptions.
- break;
- default:
- UNREACHABLE();
- };
- break;
- case SPECIAL2:
- switch (instr->FunctionFieldRaw()) {
- case MUL:
- alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
- break;
- case CLZ:
- alu_out = __builtin_clz(rs_u);
- break;
- default:
- UNREACHABLE();
- };
- break;
- case SPECIAL3:
- switch (instr->FunctionFieldRaw()) {
- case INS: { // Mips32r2 instruction.
- // Interpret rd field as 5-bit msb of insert.
- uint16_t msb = rd_reg;
- // Interpret sa field as 5-bit lsb of insert.
- uint16_t lsb = sa;
- uint16_t size = msb - lsb + 1;
- uint32_t mask = (1 << size) - 1;
- alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
- break;
- }
- case EXT: { // Mips32r2 instruction.
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg;
- // Interpret sa field as 5-bit lsb of extract.
- uint16_t lsb = sa;
- uint16_t size = msb + 1;
- uint32_t mask = (1 << size) - 1;
- alu_out = (rs_u & (mask << lsb)) >> lsb;
- break;
- }
- default:
- UNREACHABLE();
- };
- break;
- default:
- UNREACHABLE();
- };
-}
-
-
-void Simulator::DecodeTypeRegister(Instruction* instr) {
- // Instruction fields.
- const Opcode op = instr->OpcodeFieldRaw();
- const int32_t rs_reg = instr->RsValue();
- const int32_t rs = get_register(rs_reg);
- const uint32_t rs_u = static_cast<uint32_t>(rs);
- const int32_t rt_reg = instr->RtValue();
- const int32_t rt = get_register(rt_reg);
- const uint32_t rt_u = static_cast<uint32_t>(rt);
- const int32_t rd_reg = instr->RdValue();
-
- const int32_t fr_reg = instr->FrValue();
- const int32_t fs_reg = instr->FsValue();
- const int32_t ft_reg = instr->FtValue();
- const int32_t fd_reg = instr->FdValue();
- int64_t i64hilo = 0;
- uint64_t u64hilo = 0;
-
- // ALU output.
- // It should not be used as is. Instructions using it should always
- // initialize it first.
- int32_t alu_out = 0x12345678;
-
- // For break and trap instructions.
- bool do_interrupt = false;
-
- // For jr and jalr.
- // Get current pc.
- int32_t current_pc = get_pc();
- // Next pc
- int32_t next_pc = 0;
-
- // Set up the variables if needed before executing the instruction.
- ConfigureTypeRegister(instr,
- alu_out,
- i64hilo,
- u64hilo,
- next_pc,
- do_interrupt);
-
- // ---------- Raise exceptions triggered.
- SignalExceptions();
-
- // ---------- Execution.
- switch (op) {
- case COP1:
- switch (instr->RsFieldRaw()) {
- case BC1: // Branch on coprocessor condition.
- UNREACHABLE();
- break;
- case CFC1:
- set_register(rt_reg, alu_out);
- case MFC1:
- set_register(rt_reg, alu_out);
- break;
- case MFHC1:
- UNIMPLEMENTED_MIPS();
- break;
- case CTC1:
- // At the moment only FCSR is supported.
- ASSERT(fs_reg == kFCSRRegister);
- FCSR_ = registers_[rt_reg];
- break;
- case MTC1:
- FPUregisters_[fs_reg] = registers_[rt_reg];
- break;
- case MTHC1:
- UNIMPLEMENTED_MIPS();
- break;
- case S:
- float f;
- switch (instr->FunctionFieldRaw()) {
- case CVT_D_S:
- f = get_fpu_register_float(fs_reg);
- set_fpu_register_double(fd_reg, static_cast<double>(f));
- break;
- case CVT_W_S:
- case CVT_L_S:
- case TRUNC_W_S:
- case TRUNC_L_S:
- case ROUND_W_S:
- case ROUND_L_S:
- case FLOOR_W_S:
- case FLOOR_L_S:
- case CEIL_W_S:
- case CEIL_L_S:
- case CVT_PS_S:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- }
- break;
- case D:
- double ft, fs;
- uint32_t cc, fcsr_cc;
- int64_t i64;
- fs = get_fpu_register_double(fs_reg);
- ft = get_fpu_register_double(ft_reg);
- cc = instr->FCccValue();
- fcsr_cc = get_fcsr_condition_bit(cc);
- switch (instr->FunctionFieldRaw()) {
- case ADD_D:
- set_fpu_register_double(fd_reg, fs + ft);
- break;
- case SUB_D:
- set_fpu_register_double(fd_reg, fs - ft);
- break;
- case MUL_D:
- set_fpu_register_double(fd_reg, fs * ft);
- break;
- case DIV_D:
- set_fpu_register_double(fd_reg, fs / ft);
- break;
- case ABS_D:
- set_fpu_register_double(fd_reg, fs < 0 ? -fs : fs);
- break;
- case MOV_D:
- set_fpu_register_double(fd_reg, fs);
- break;
- case NEG_D:
- set_fpu_register_double(fd_reg, -fs);
- break;
- case SQRT_D:
- set_fpu_register_double(fd_reg, sqrt(fs));
- break;
- case C_UN_D:
- set_fcsr_bit(fcsr_cc, isnan(fs) || isnan(ft));
- break;
- case C_EQ_D:
- set_fcsr_bit(fcsr_cc, (fs == ft));
- break;
- case C_UEQ_D:
- set_fcsr_bit(fcsr_cc, (fs == ft) || (isnan(fs) || isnan(ft)));
- break;
- case C_OLT_D:
- set_fcsr_bit(fcsr_cc, (fs < ft));
- break;
- case C_ULT_D:
- set_fcsr_bit(fcsr_cc, (fs < ft) || (isnan(fs) || isnan(ft)));
- break;
- case C_OLE_D:
- set_fcsr_bit(fcsr_cc, (fs <= ft));
- break;
- case C_ULE_D:
- set_fcsr_bit(fcsr_cc, (fs <= ft) || (isnan(fs) || isnan(ft)));
- break;
- case CVT_W_D: // Convert double to word.
- // Rounding modes are not yet supported.
- ASSERT((FCSR_ & 3) == 0);
- // In rounding mode 0 it should behave like ROUND.
- case ROUND_W_D: // Round double to word (round half to even).
- {
- double rounded = floor(fs + 0.5);
- int32_t result = static_cast<int32_t>(rounded);
- if ((result & 1) != 0 && result - fs == 0.5) {
- // If the number is halfway between two integers,
- // round to the even one.
- result--;
- }
- set_fpu_register(fd_reg, result);
- if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
- }
- }
- break;
- case TRUNC_W_D: // Truncate double to word (round towards 0).
- {
- double rounded = trunc(fs);
- int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
- if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
- }
- }
- break;
- case FLOOR_W_D: // Round double to word towards negative infinity.
- {
- double rounded = floor(fs);
- int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
- if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
- }
- }
- break;
- case CEIL_W_D: // Round double to word towards positive infinity.
- {
- double rounded = ceil(fs);
- int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
- if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
- }
- }
- break;
- case CVT_S_D: // Convert double to float (single).
- set_fpu_register_float(fd_reg, static_cast<float>(fs));
- break;
- case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
- double rounded = trunc(fs);
- i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- }
- case TRUNC_L_D: { // Mips32r2 instruction.
- double rounded = trunc(fs);
- i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- }
- case ROUND_L_D: { // Mips32r2 instruction.
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
- i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- }
- case FLOOR_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(floor(fs));
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- case CEIL_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(ceil(fs));
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
- break;
- case C_F_D:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- }
- break;
- case W:
- switch (instr->FunctionFieldRaw()) {
- case CVT_S_W: // Convert word to float (single).
- alu_out = get_fpu_register(fs_reg);
- set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
- break;
- case CVT_D_W: // Convert word to double.
- alu_out = get_fpu_register(fs_reg);
- set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
- break;
- default:
- UNREACHABLE();
- };
- break;
- case L:
- switch (instr->FunctionFieldRaw()) {
- case CVT_D_L: // Mips32r2 instruction.
- // Watch the signs here, we want 2 32-bit vals
- // to make a sign-64.
- i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
- i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
- set_fpu_register_double(fd_reg, static_cast<double>(i64));
- break;
- case CVT_S_L:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- }
- break;
- case PS:
- break;
- default:
- UNREACHABLE();
- };
- break;
- case COP1X:
- switch (instr->FunctionFieldRaw()) {
- case MADD_D:
- double fr, ft, fs;
- fr = get_fpu_register_double(fr_reg);
- fs = get_fpu_register_double(fs_reg);
- ft = get_fpu_register_double(ft_reg);
- set_fpu_register_double(fd_reg, fs * ft + fr);
- break;
- default:
- UNREACHABLE();
- };
- break;
- case SPECIAL:
- switch (instr->FunctionFieldRaw()) {
- case JR: {
- Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
- set_pc(next_pc);
- pc_modified_ = true;
- break;
- }
- case JALR: {
- Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
- set_pc(next_pc);
- pc_modified_ = true;
- break;
- }
- // Instructions using HI and LO registers.
- case MULT:
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(i64hilo >> 32));
- break;
- case MULTU:
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(u64hilo >> 32));
- break;
- case DIV:
- // Divide by zero and overflow was not checked in the configuration
- // step - div and divu do not raise exceptions. On division by 0 and
- // on overflow (INT_MIN/-1), the result will be UNPREDICTABLE.
- if (rt != 0 && !(rs == INT_MIN && rt == -1)) {
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
- }
- break;
- case DIVU:
- if (rt_u != 0) {
- set_register(LO, rs_u / rt_u);
- set_register(HI, rs_u % rt_u);
- }
- break;
- // Break and trap instructions.
- case BREAK:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- if (do_interrupt) {
- SoftwareInterrupt(instr);
- }
- break;
- // Conditional moves.
- case MOVN:
- if (rt) set_register(rd_reg, rs);
- break;
- case MOVCI: {
- uint32_t cc = instr->FBccValue();
- uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
- if (instr->Bit(16)) { // Read Tf bit.
- if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
- } else {
- if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
- }
- break;
- }
- case MOVZ:
- if (!rt) set_register(rd_reg, rs);
- break;
- default: // For other special opcodes we do the default operation.
- set_register(rd_reg, alu_out);
- };
- break;
- case SPECIAL2:
- switch (instr->FunctionFieldRaw()) {
- case MUL:
- set_register(rd_reg, alu_out);
- // HI and LO are UNPREDICTABLE after the operation.
- set_register(LO, Unpredictable);
- set_register(HI, Unpredictable);
- break;
- default: // For other special2 opcodes we do the default operation.
- set_register(rd_reg, alu_out);
- }
- break;
- case SPECIAL3:
- switch (instr->FunctionFieldRaw()) {
- case INS:
- // Ins instr leaves result in Rt, rather than Rd.
- set_register(rt_reg, alu_out);
- break;
- case EXT:
- // Ext instr leaves result in Rt, rather than Rd.
- set_register(rt_reg, alu_out);
- break;
- default:
- UNREACHABLE();
- };
- break;
- // Unimplemented opcodes raised an error in the configuration step before,
- // so we can use the default here to set the destination register in common
- // cases.
- default:
- set_register(rd_reg, alu_out);
- };
-}
-
-
-// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
-void Simulator::DecodeTypeImmediate(Instruction* instr) {
- // Instruction fields.
- Opcode op = instr->OpcodeFieldRaw();
- int32_t rs = get_register(instr->RsValue());
- uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtValue(); // Destination register.
- int32_t rt = get_register(rt_reg);
- int16_t imm16 = instr->Imm16Value();
-
- int32_t ft_reg = instr->FtValue(); // Destination register.
-
- // Zero extended immediate.
- uint32_t oe_imm16 = 0xffff & imm16;
- // Sign extended immediate.
- int32_t se_imm16 = imm16;
-
- // Get current pc.
- int32_t current_pc = get_pc();
- // Next pc.
- int32_t next_pc = bad_ra;
-
- // Used for conditional branch instructions.
- bool do_branch = false;
- bool execute_branch_delay_instruction = false;
-
- // Used for arithmetic instructions.
- int32_t alu_out = 0;
- // Floating point.
- double fp_out = 0.0;
- uint32_t cc, cc_value, fcsr_cc;
-
- // Used for memory instructions.
- int32_t addr = 0x0;
- // Value to be written in memory.
- uint32_t mem_value = 0x0;
-
- // ---------- Configuration (and execution for REGIMM).
- switch (op) {
- // ------------- COP1. Coprocessor instructions.
- case COP1:
- switch (instr->RsFieldRaw()) {
- case BC1: // Branch on coprocessor condition.
- cc = instr->FBccValue();
- fcsr_cc = get_fcsr_condition_bit(cc);
- cc_value = test_fcsr_bit(fcsr_cc);
- do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
- break;
- default:
- UNREACHABLE();
- };
- break;
- // ------------- REGIMM class.
- case REGIMM:
- switch (instr->RtFieldRaw()) {
- case BLTZ:
- do_branch = (rs < 0);
- break;
- case BLTZAL:
- do_branch = rs < 0;
- break;
- case BGEZ:
- do_branch = rs >= 0;
- break;
- case BGEZAL:
- do_branch = rs >= 0;
- break;
- default:
- UNREACHABLE();
- };
- switch (instr->RtFieldRaw()) {
- case BLTZ:
- case BLTZAL:
- case BGEZ:
- case BGEZAL:
- // Branch instructions common part.
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + kBranchReturnOffset);
- }
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
- default:
- break;
- };
- break; // case REGIMM.
- // ------------- Branch instructions.
- // When comparing to zero, the encoding of rt field is always 0, so we don't
- // need to replace rt with zero.
- case BEQ:
- do_branch = (rs == rt);
- break;
- case BNE:
- do_branch = rs != rt;
- break;
- case BLEZ:
- do_branch = rs <= 0;
- break;
- case BGTZ:
- do_branch = rs > 0;
- break;
- // ------------- Arithmetic instructions.
- case ADDI:
- if (HaveSameSign(rs, se_imm16)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] =
- rs < (Registers::kMinValue - se_imm16);
- }
- }
- alu_out = rs + se_imm16;
- break;
- case ADDIU:
- alu_out = rs + se_imm16;
- break;
- case SLTI:
- alu_out = (rs < se_imm16) ? 1 : 0;
- break;
- case SLTIU:
- alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
- break;
- case ANDI:
- alu_out = rs & oe_imm16;
- break;
- case ORI:
- alu_out = rs | oe_imm16;
- break;
- case XORI:
- alu_out = rs ^ oe_imm16;
- break;
- case LUI:
- alu_out = (oe_imm16 << 16);
- break;
- // ------------- Memory instructions.
- case LB:
- addr = rs + se_imm16;
- alu_out = ReadB(addr);
- break;
- case LH:
- addr = rs + se_imm16;
- alu_out = ReadH(addr, instr);
- break;
- case LWL: {
- // al_offset is offset of the effective address within an aligned word.
- uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
- uint8_t byte_shift = kPointerAlignmentMask - al_offset;
- uint32_t mask = (1 << byte_shift * 8) - 1;
- addr = rs + se_imm16 - al_offset;
- alu_out = ReadW(addr, instr);
- alu_out <<= byte_shift * 8;
- alu_out |= rt & mask;
- break;
- }
- case LW:
- addr = rs + se_imm16;
- alu_out = ReadW(addr, instr);
- break;
- case LBU:
- addr = rs + se_imm16;
- alu_out = ReadBU(addr);
- break;
- case LHU:
- addr = rs + se_imm16;
- alu_out = ReadHU(addr, instr);
- break;
- case LWR: {
- // al_offset is offset of the effective address within an aligned word.
- uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
- uint8_t byte_shift = kPointerAlignmentMask - al_offset;
- uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
- addr = rs + se_imm16 - al_offset;
- alu_out = ReadW(addr, instr);
- alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
- alu_out |= rt & mask;
- break;
- }
- case SB:
- addr = rs + se_imm16;
- break;
- case SH:
- addr = rs + se_imm16;
- break;
- case SWL: {
- uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
- uint8_t byte_shift = kPointerAlignmentMask - al_offset;
- uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
- addr = rs + se_imm16 - al_offset;
- mem_value = ReadW(addr, instr) & mask;
- mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
- break;
- }
- case SW:
- addr = rs + se_imm16;
- break;
- case SWR: {
- uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
- uint32_t mask = (1 << al_offset * 8) - 1;
- addr = rs + se_imm16 - al_offset;
- mem_value = ReadW(addr, instr);
- mem_value = (rt << al_offset * 8) | (mem_value & mask);
- break;
- }
- case LWC1:
- addr = rs + se_imm16;
- alu_out = ReadW(addr, instr);
- break;
- case LDC1:
- addr = rs + se_imm16;
- fp_out = ReadD(addr, instr);
- break;
- case SWC1:
- case SDC1:
- addr = rs + se_imm16;
- break;
- default:
- UNREACHABLE();
- };
-
- // ---------- Raise exceptions triggered.
- SignalExceptions();
-
- // ---------- Execution.
- switch (op) {
- // ------------- Branch instructions.
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- // Branch instructions common part.
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstrSize);
- }
- } else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
- }
- break;
- // ------------- Arithmetic instructions.
- case ADDI:
- case ADDIU:
- case SLTI:
- case SLTIU:
- case ANDI:
- case ORI:
- case XORI:
- case LUI:
- set_register(rt_reg, alu_out);
- break;
- // ------------- Memory instructions.
- case LB:
- case LH:
- case LWL:
- case LW:
- case LBU:
- case LHU:
- case LWR:
- set_register(rt_reg, alu_out);
- break;
- case SB:
- WriteB(addr, static_cast<int8_t>(rt));
- break;
- case SH:
- WriteH(addr, static_cast<uint16_t>(rt), instr);
- break;
- case SWL:
- WriteW(addr, mem_value, instr);
- break;
- case SW:
- WriteW(addr, rt, instr);
- break;
- case SWR:
- WriteW(addr, mem_value, instr);
- break;
- case LWC1:
- set_fpu_register(ft_reg, alu_out);
- break;
- case LDC1:
- set_fpu_register_double(ft_reg, fp_out);
- break;
- case SWC1:
- addr = rs + se_imm16;
- WriteW(addr, get_fpu_register(ft_reg), instr);
- break;
- case SDC1:
- addr = rs + se_imm16;
- WriteD(addr, get_fpu_register_double(ft_reg), instr);
- break;
- default:
- break;
- };
-
-
- if (execute_branch_delay_instruction) {
- // Execute branch delay slot
- // We don't check for end_sim_pc. First it should not be met as the current
- // pc is valid. Secondly a jump should always execute its branch delay slot.
- Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
- }
-
- // If needed update pc after the branch delay execution.
- if (next_pc != bad_ra) {
- set_pc(next_pc);
- }
-}
-
-
-// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
-void Simulator::DecodeTypeJump(Instruction* instr) {
- // Get current pc.
- int32_t current_pc = get_pc();
- // Get unchanged bits of pc.
- int32_t pc_high_bits = current_pc & 0xf0000000;
- // Next pc.
- int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
-
- // Execute branch delay slot.
- // We don't check for end_sim_pc. First it should not be met as the current pc
- // is valid. Secondly a jump should always execute its branch delay slot.
- Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
-
- // Update pc and ra if necessary.
- // Do this after the branch delay execution.
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
- }
- set_pc(next_pc);
- pc_modified_ = true;
-}
-
-
-// Executes the current instruction.
-void Simulator::InstructionDecode(Instruction* instr) {
- if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
- }
- pc_modified_ = false;
- if (::v8::internal::FLAG_trace_sim) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // Use a reasonably large buffer.
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
- PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr),
- buffer.start());
- }
-
- switch (instr->InstructionType()) {
- case Instruction::kRegisterType:
- DecodeTypeRegister(instr);
- break;
- case Instruction::kImmediateType:
- DecodeTypeImmediate(instr);
- break;
- case Instruction::kJumpType:
- DecodeTypeJump(instr);
- break;
- default:
- UNSUPPORTED();
- }
- if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int32_t>(instr) +
- Instruction::kInstrSize);
- }
-}
-
-
-
-void Simulator::Execute() {
- // Get the PC to simulate. Cannot use the accessor here as we need the
- // raw PC value and not the one used as input to arithmetic instructions.
- int program_counter = get_pc();
- if (::v8::internal::FLAG_stop_sim_at == 0) {
- // Fast version of the dispatch loop without checking whether the simulator
- // should be stopping at a particular executed instruction.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- InstructionDecode(instr);
- program_counter = get_pc();
- }
- } else {
- // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
- // we reach the particular instuction count.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- MipsDebugger dbg(this);
- dbg.Debug();
- } else {
- InstructionDecode(instr);
- }
- program_counter = get_pc();
- }
- }
-}
-
-
-void Simulator::CallInternal(byte* entry) {
- // Prepare to execute the code at entry.
- set_register(pc, reinterpret_cast<int32_t>(entry));
- // Put down marker for end of simulation. The simulator will stop simulation
- // when the PC reaches this value. By saving the "end simulation" value into
- // the LR the simulation stops when returning to this call point.
- set_register(ra, end_sim_pc);
-
- // Remember the values of callee-saved registers.
- // The code below assumes that r9 is not used as sb (static base) in
- // simulator code and therefore is regarded as a callee-saved register.
- int32_t s0_val = get_register(s0);
- int32_t s1_val = get_register(s1);
- int32_t s2_val = get_register(s2);
- int32_t s3_val = get_register(s3);
- int32_t s4_val = get_register(s4);
- int32_t s5_val = get_register(s5);
- int32_t s6_val = get_register(s6);
- int32_t s7_val = get_register(s7);
- int32_t gp_val = get_register(gp);
- int32_t sp_val = get_register(sp);
- int32_t fp_val = get_register(fp);
-
- // Set up the callee-saved registers with a known value. To be able to check
- // that they are preserved properly across JS execution.
- int32_t callee_saved_value = icount_;
- set_register(s0, callee_saved_value);
- set_register(s1, callee_saved_value);
- set_register(s2, callee_saved_value);
- set_register(s3, callee_saved_value);
- set_register(s4, callee_saved_value);
- set_register(s5, callee_saved_value);
- set_register(s6, callee_saved_value);
- set_register(s7, callee_saved_value);
- set_register(gp, callee_saved_value);
- set_register(fp, callee_saved_value);
-
- // Start the simulation.
- Execute();
-
- // Check that the callee-saved registers have been preserved.
- CHECK_EQ(callee_saved_value, get_register(s0));
- CHECK_EQ(callee_saved_value, get_register(s1));
- CHECK_EQ(callee_saved_value, get_register(s2));
- CHECK_EQ(callee_saved_value, get_register(s3));
- CHECK_EQ(callee_saved_value, get_register(s4));
- CHECK_EQ(callee_saved_value, get_register(s5));
- CHECK_EQ(callee_saved_value, get_register(s6));
- CHECK_EQ(callee_saved_value, get_register(s7));
- CHECK_EQ(callee_saved_value, get_register(gp));
- CHECK_EQ(callee_saved_value, get_register(fp));
-
- // Restore callee-saved registers with the original value.
- set_register(s0, s0_val);
- set_register(s1, s1_val);
- set_register(s2, s2_val);
- set_register(s3, s3_val);
- set_register(s4, s4_val);
- set_register(s5, s5_val);
- set_register(s6, s6_val);
- set_register(s7, s7_val);
- set_register(gp, gp_val);
- set_register(sp, sp_val);
- set_register(fp, fp_val);
-}
-
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments.
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(a0, va_arg(parameters, int32_t));
- set_register(a1, va_arg(parameters, int32_t));
- set_register(a2, va_arg(parameters, int32_t));
- set_register(a3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kCArgsSlotsSize);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
- CallInternal(entry);
-
- // Pop stack passed arguments.
- CHECK_EQ(entry_stack, get_register(sp));
- set_register(sp, original_stack);
-
- int32_t result = get_register(v0);
- return result;
-}
-
-
-double Simulator::CallFP(byte* entry, double d0, double d1) {
- if (!IsMipsSoftFloatABI) {
- set_fpu_register_double(f12, d0);
- set_fpu_register_double(f14, d1);
- } else {
- int buffer[2];
- ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
- set_dw_register(a0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
- set_dw_register(a2, buffer);
- }
- CallInternal(entry);
- if (!IsMipsSoftFloatABI) {
- return get_fpu_register_double(f0);
- } else {
- return get_double_from_register_pair(v0);
- }
-}
-
-
-uintptr_t Simulator::PushAddress(uintptr_t address) {
- int new_sp = get_register(sp) - sizeof(uintptr_t);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
- *stack_slot = address;
- set_register(sp, new_sp);
- return new_sp;
-}
-
-
-uintptr_t Simulator::PopAddress() {
- int current_sp = get_register(sp);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
- uintptr_t address = *stack_slot;
- set_register(sp, current_sp + sizeof(uintptr_t));
- return address;
-}
-
-
-#undef UNSUPPORTED
-
-} } // namespace v8::internal
-
-#endif // USE_SIMULATOR
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mips/simulator-mips.h b/src/3rdparty/v8/src/mips/simulator-mips.h
deleted file mode 100644
index 67f5953..0000000
--- a/src/3rdparty/v8/src/mips/simulator-mips.h
+++ /dev/null
@@ -1,445 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Declares a Simulator for MIPS instructions if we are not generating a native
-// MIPS binary. This Simulator allows us to run and debug MIPS code generation
-// on regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
-// which will start execution in the Simulator or forwards to the real entry
-// on a MIPS HW platform.
-
-#ifndef V8_MIPS_SIMULATOR_MIPS_H_
-#define V8_MIPS_SIMULATOR_MIPS_H_
-
-#include "allocation.h"
-#include "constants-mips.h"
-
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native mips platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- entry(p0, p1, p2, p3, p4)
-
-typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, int, Address, int, Isolate*);
-
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-// The fifth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on mips uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
-
-#else // !defined(USE_SIMULATOR)
-// Running with a simulator.
-
-#include "hashmap.h"
-#include "assembler.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Utility functions
-
-class CachePage {
- public:
- static const int LINE_VALID = 0;
- static const int LINE_INVALID = 1;
-
- static const int kPageShift = 12;
- static const int kPageSize = 1 << kPageShift;
- static const int kPageMask = kPageSize - 1;
- static const int kLineShift = 2; // The cache line is only 4 bytes right now.
- static const int kLineLength = 1 << kLineShift;
- static const int kLineMask = kLineLength - 1;
-
- CachePage() {
- memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
- }
-
- char* ValidityByte(int offset) {
- return &validity_map_[offset >> kLineShift];
- }
-
- char* CachedData(int offset) {
- return &data_[offset];
- }
-
- private:
- char data_[kPageSize]; // The cached data.
- static const int kValidityMapSize = kPageSize >> kLineShift;
- char validity_map_[kValidityMapSize]; // One byte per line.
-};
-
-class Simulator {
- public:
- friend class MipsDebugger;
-
- // Registers are declared in order. See SMRL chapter 2.
- enum Register {
- no_reg = -1,
- zero_reg = 0,
- at,
- v0, v1,
- a0, a1, a2, a3,
- t0, t1, t2, t3, t4, t5, t6, t7,
- s0, s1, s2, s3, s4, s5, s6, s7,
- t8, t9,
- k0, k1,
- gp,
- sp,
- s8,
- ra,
- // LO, HI, and pc.
- LO,
- HI,
- pc, // pc must be the last register.
- kNumSimuRegisters,
- // aliases
- fp = s8
- };
-
- // Coprocessor registers.
- // Generated code will always use doubles. So we will only use even registers.
- enum FPURegister {
- f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
- f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
- f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
- f26, f27, f28, f29, f30, f31,
- kNumFPURegisters
- };
-
- explicit Simulator(Isolate* isolate);
- ~Simulator();
-
- // The currently executing Simulator instance. Potentially there can be one
- // for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
-
- // Accessors for register state. Reading the pc value adheres to the MIPS
- // architecture specification and is off by a 8 from the currently executing
- // instruction.
- void set_register(int reg, int32_t value);
- void set_dw_register(int dreg, const int* dbl);
- int32_t get_register(int reg) const;
- double get_double_from_register_pair(int reg);
- // Same for FPURegisters.
- void set_fpu_register(int fpureg, int32_t value);
- void set_fpu_register_float(int fpureg, float value);
- void set_fpu_register_double(int fpureg, double value);
- int32_t get_fpu_register(int fpureg) const;
- int64_t get_fpu_register_long(int fpureg) const;
- float get_fpu_register_float(int fpureg) const;
- double get_fpu_register_double(int fpureg) const;
- void set_fcsr_bit(uint32_t cc, bool value);
- bool test_fcsr_bit(uint32_t cc);
- bool set_fcsr_round_error(double original, double rounded);
-
- // Special case of set_register and get_register to access the raw PC value.
- void set_pc(int32_t value);
- int32_t get_pc() const;
-
- // Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
-
- // Executes MIPS instructions until the PC reaches end_sim_pc.
- void Execute();
-
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int32_t Call(byte* entry, int argument_count, ...);
- // Alternative: call a 2-argument double function.
- double CallFP(byte* entry, double d0, double d1);
-
- // Push an address onto the JS stack.
- uintptr_t PushAddress(uintptr_t address);
-
- // Pop an address from the JS stack.
- uintptr_t PopAddress();
-
- // Debugger input.
- void set_last_debugger_input(char* input);
- char* last_debugger_input() { return last_debugger_input_; }
-
- // ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
-
- // Returns true if pc register contains one of the 'special_values' defined
- // below (bad_ra, end_sim_pc).
- bool has_bad_pc() const;
-
- private:
- enum special_values {
- // Known bad pc value to ensure that the simulator does not execute
- // without being properly setup.
- bad_ra = -1,
- // A pc value used to signal the simulator to stop execution. Generally
- // the ra is set to this value on transition from native C code to
- // simulated execution, so that the simulator can "return" to the native
- // C code.
- end_sim_pc = -2,
- // Unpredictable value.
- Unpredictable = 0xbadbeaf
- };
-
- // Unsupported instructions use Format to print an error and stop execution.
- void Format(Instruction* instr, const char* format);
-
- // Read and write memory.
- inline uint32_t ReadBU(int32_t addr);
- inline int32_t ReadB(int32_t addr);
- inline void WriteB(int32_t addr, uint8_t value);
- inline void WriteB(int32_t addr, int8_t value);
-
- inline uint16_t ReadHU(int32_t addr, Instruction* instr);
- inline int16_t ReadH(int32_t addr, Instruction* instr);
- // Note: Overloaded on the sign of the value.
- inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
-
- inline int ReadW(int32_t addr, Instruction* instr);
- inline void WriteW(int32_t addr, int value, Instruction* instr);
-
- inline double ReadD(int32_t addr, Instruction* instr);
- inline void WriteD(int32_t addr, double value, Instruction* instr);
-
- // Operations depending on endianness.
- // Get Double Higher / Lower word.
- inline int32_t GetDoubleHIW(double* addr);
- inline int32_t GetDoubleLOW(double* addr);
- // Set Double Higher / Lower word.
- inline int32_t SetDoubleHIW(double* addr);
- inline int32_t SetDoubleLOW(double* addr);
-
- // Executing is handled based on the instruction type.
- void DecodeTypeRegister(Instruction* instr);
-
- // Helper function for DecodeTypeRegister.
- void ConfigureTypeRegister(Instruction* instr,
- int32_t& alu_out,
- int64_t& i64hilo,
- uint64_t& u64hilo,
- int32_t& next_pc,
- bool& do_interrupt);
-
- void DecodeTypeImmediate(Instruction* instr);
- void DecodeTypeJump(Instruction* instr);
-
- // Used for breakpoints and traps.
- void SoftwareInterrupt(Instruction* instr);
-
- // Stop helper functions.
- bool IsWatchpoint(uint32_t code);
- void PrintWatchpoint(uint32_t code);
- void HandleStop(uint32_t code, Instruction* instr);
- bool IsStopInstruction(Instruction* instr);
- bool IsEnabledStop(uint32_t code);
- void EnableStop(uint32_t code);
- void DisableStop(uint32_t code);
- void IncreaseStopCounter(uint32_t code);
- void PrintStopInfo(uint32_t code);
-
-
- // Executes one instruction.
- void InstructionDecode(Instruction* instr);
- // Execute one instruction placed in a branch delay slot.
- void BranchDelayInstructionDecode(Instruction* instr) {
- if (instr->InstructionBits() == nopInstr) {
- // Short-cut generic nop instructions. They are always valid and they
- // never change the simulator state.
- set_register(pc, reinterpret_cast<int32_t>(instr) +
- Instruction::kInstrSize);
- return;
- }
-
- if (instr->IsForbiddenInBranchDelay()) {
- V8_Fatal(__FILE__, __LINE__,
- "Eror:Unexpected %i opcode in a branch delay slot.",
- instr->OpcodeValue());
- }
- InstructionDecode(instr);
- }
-
- // ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
-
- enum Exception {
- none,
- kIntegerOverflow,
- kIntegerUnderflow,
- kDivideByZero,
- kNumExceptions
- };
- int16_t exceptions[kNumExceptions];
-
- // Exceptions.
- void SignalExceptions();
-
- // Runtime call support.
- static void* RedirectExternalReference(void* external_function,
- ExternalReference::Type type);
-
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
- void SetFpResult(const double& result);
-
- void CallInternal(byte* entry);
-
- // Architecture state.
- // Registers.
- int32_t registers_[kNumSimuRegisters];
- // Coprocessor Registers.
- int32_t FPUregisters_[kNumFPURegisters];
- // FPU control register.
- uint32_t FCSR_;
-
- // Simulator support.
- // Allocate 1MB for stack.
- static const size_t stack_size_ = 1 * 1024*1024;
- char* stack_;
- bool pc_modified_;
- int icount_;
- int break_count_;
-
- // Debugger input.
- char* last_debugger_input_;
-
- // Icache simulation.
- v8::internal::HashMap* i_cache_;
-
- v8::internal::Isolate* isolate_;
-
- // Registered breakpoints.
- Instruction* break_pc_;
- Instr break_instr_;
-
- // Stop is disabled if bit 31 is set.
- static const uint32_t kStopDisabledBit = 1 << 31;
-
- // A stop is enabled, meaning the simulator will stop when meeting the
- // instruction, if bit 31 of watched_stops[code].count is unset.
- // The value watched_stops[code].count & ~(1 << 31) indicates how many times
- // the breakpoint was hit or gone through.
- struct StopCountAndDesc {
- uint32_t count;
- char* desc;
- };
- StopCountAndDesc watched_stops[kMaxStopCode + 1];
-};
-
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // !defined(USE_SIMULATOR)
-#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/stub-cache-mips.cc b/src/3rdparty/v8/src/mips/stub-cache-mips.cc
deleted file mode 100644
index fd467fa..0000000
--- a/src/3rdparty/v8/src/mips/stub-cache-mips.cc
+++ /dev/null
@@ -1,4149 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // Number of the cache entry, not scaled.
- Register offset,
- Register scratch,
- Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- ASSERT(value_off_addr > key_off_addr);
- ASSERT((value_off_addr - key_off_addr) % 4 == 0);
- ASSERT((value_off_addr - key_off_addr) < (256 * 4));
- ASSERT(map_off_addr > key_off_addr);
- ASSERT((map_off_addr - key_off_addr) % 4 == 0);
- ASSERT((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ sll(offset_scratch, offset, 1);
- __ Addu(offset_scratch, offset_scratch, offset);
-
- // Calculate the base address of the entry.
- __ li(base_addr, Operand(key_offset));
- __ sll(at, offset_scratch, kPointerSizeLog2);
- __ Addu(base_addr, base_addr, at);
-
- // Check that the key in the entry matches the name.
- __ lw(at, MemOperand(base_addr, 0));
- __ Branch(&miss, ne, name, Operand(at));
-
- // Check the map matches.
- __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Branch(&miss, ne, at, Operand(scratch2));
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
- __ Branch(&miss, ne, flags_reg, Operand(flags));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be internalized and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register scratch0,
- Register scratch1) {
- ASSERT(name->IsInternalizedString());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
-
- // Check that receiver is a JSObject.
- __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Load properties array.
- Register properties = scratch0;
- __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ Branch(miss_label, ne, map, Operand(tmp));
-
- // Restore the temporarily used register.
- __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- ASSERT(sizeof(Entry) == 12);
-
- // Make sure the flags does not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
- ASSERT(!extra.is(receiver));
- ASSERT(!extra.is(name));
- ASSERT(!extra.is(scratch));
- ASSERT(!extra2.is(receiver));
- ASSERT(!extra2.is(name));
- ASSERT(!extra2.is(scratch));
- ASSERT(!extra2.is(extra));
-
- // Check register validity.
- ASSERT(!scratch.is(no_reg));
- ASSERT(!extra.is(no_reg));
- ASSERT(!extra2.is(no_reg));
- ASSERT(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
- __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Addu(scratch, scratch, at);
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ srl(scratch, scratch, kHeapObjectTagSize);
- __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
- __ And(scratch, scratch, Operand(mask));
-
- // Probe the primary table.
- ProbeTable(isolate,
- masm,
- flags,
- kPrimary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ srl(at, name, kHeapObjectTagSize);
- __ Subu(scratch, scratch, at);
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
- __ And(scratch, scratch, Operand(mask2));
-
- // Probe the secondary table.
- ProbeTable(isolate,
- masm,
- flags,
- kSecondary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ lw(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- __ lw(prototype,
- FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ lw(prototype,
- FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
- Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ lw(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ASSERT(!prototype.is(at));
- __ li(at, isolate->global_object());
- __ Branch(miss, ne, prototype, Operand(at));
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
- // Load its initial map. The global functions all have initial maps.
- __ li(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index) {
- DoGenerateFastPropertyLoad(
- masm, dst, src, index.is_inobject(holder), index.translate(holder));
-}
-
-
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
- int offset = index * kPointerSize;
- if (!inobject) {
- // Calculate the offset into the properties array.
- offset = offset + FixedArray::kHeaderSize;
- __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- src = dst;
- }
- __ lw(dst, FieldMemOperand(src, offset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Load length directly from the JS array.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Ret();
-}
-
-
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi, t0);
-
- // Check that the object is a string.
- __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ And(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ Branch(non_string_object,
- ne,
- scratch2,
- Operand(static_cast<int32_t>(kStringTag)));
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length directly from the string.
- __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
-
- // Unwrap the value and check if the wrapped value is a string.
- __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(v0, scratch1);
- __ Ret();
-}
-
-
-// Generate StoreField code, value is passed in a0 register.
-// After executing generated code, the receiver_reg and name_reg
-// may be clobbered.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- // a0 : value.
- Label exit;
-
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
- }
-
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, mode);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ push(receiver_reg);
- __ li(a2, Operand(transition));
- __ Push(a2, a0);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3, 1);
- return;
- }
-
- if (!transition.is_null()) {
- // Update the map of the object.
- __ li(scratch1, Operand(transition));
- __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ sw(a0, FieldMemOperand(receiver_reg, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(a0, &exit, scratch1);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, a0);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array.
- __ lw(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ sw(a0, FieldMemOperand(scratch1, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(a0, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, a0);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- }
-
- // Return the value (register v0).
- __ bind(&exit);
- __ mov(v0, a0);
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- Handle<Code> code = (kind == Code::STORE_IC)
- ? masm->isolate()->builtins()->StoreIC_Miss()
- : masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateCallFunction(MacroAssembler* masm,
- Handle<Object> object,
- const ParameterCount& arguments,
- Label* miss,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- a0: receiver
- // -- a1: function to call
- // -----------------------------------
- // Check that the function really is a function.
- __ JumpIfSmi(a1, miss);
- __ GetObjectType(a1, a3, a3);
- __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(a1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ li(scratch, Operand(interceptor));
- __ Push(scratch, receiver, holder);
- __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
- __ li(scratch, Operand(ExternalReference::isolate_address()));
- __ push(scratch);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ PrepareCEntryArgs(6);
- __ PrepareCEntryFunction(ref);
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-}
-
-
-static const int kFastApiCallArguments = 4;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- ASSERT(Smi::FromInt(0) == 0);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(zero_reg);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
-
-
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : last JS argument
- // -- ...
- // -- sp[(argc + 3) * 4] : first JS argument
- // -- sp[(argc + 4) * 4] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(t1, function);
- __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ li(a0, api_call_info);
- __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
- } else {
- __ li(t2, call_data);
- }
-
- __ li(t3, Operand(ExternalReference::isolate_address()));
- // Store JS function, call data and isolate.
- __ sw(t1, MemOperand(sp, 1 * kPointerSize));
- __ sw(t2, MemOperand(sp, 2 * kPointerSize));
- __ sw(t3, MemOperand(sp, 3 * kPointerSize));
-
- // Prepare arguments.
- __ Addu(a2, sp, Operand(3 * kPointerSize));
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
- // struct from the function (which is currently the case). This means we pass
- // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
- // will handle setting up a0.
-
- // a1 = v8::Arguments&
- // Arguments is built at sp + 1 (sp is a reserved spot for ra).
- __ Addu(a1, sp, kPointerSize);
-
- // v8::Arguments::implicit_args_
- __ sw(a2, MemOperand(a1, 0 * kPointerSize));
- // v8::Arguments::values_
- __ Addu(t0, a2, Operand(argc * kPointerSize));
- __ sw(t0, MemOperand(a1, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
- __ li(t0, Operand(argc));
- __ sw(t0, MemOperand(a1, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
- __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference ref =
- ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
- masm->isolate());
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
- Counters* counters = masm->isolate()->counters();
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ Branch(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- // Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 6);
- // Restore the name_ register.
- __ pop(name_);
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- }
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_ic_state_;
-};
-
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ li(scratch, Operand(cell));
- __ lw(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(miss, ne, scratch, Operand(at));
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If FPU is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(ival, f0);
- __ cvt_s_w(f0, f0);
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ swc1(f0, MemOperand(scratch1, 0));
- } else {
- // FPU is not available, do manual conversions.
-
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ And(fval, ival, Operand(kBinary32SignMask));
- // Negate value if it is negative.
- __ subu(scratch1, zero_reg, ival);
- __ Movn(ival, scratch1, fval);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ Branch(&not_special, gt, ival, Operand(1));
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ Xor(scratch1, ival, Operand(1));
- __ li(scratch2, exponent_word_for_1);
- __ or_(scratch2, fval, scratch2);
- __ Movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
- __ Branch(&done);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ Clz(zeros, ival);
-
- // Compute exponent and or it into the exponent register.
- __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
- __ subu(scratch1, scratch1, zeros);
-
- __ sll(scratch1, scratch1, kBinary32ExponentShift);
- __ or_(fval, fval, scratch1);
-
- // Shift up the source chopping the top bit off.
- __ Addu(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ sllv(ival, ival, zeros);
- // And the top (top 20 bits).
- __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
- __ or_(fval, fval, scratch1);
-
- __ bind(&done);
-
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ sw(fval, MemOperand(scratch1, 0));
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void StubCompiler::GenerateTailCall(Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- int save_at_depth,
- Label* miss,
- PrototypeCheckType check) {
- Handle<JSObject> first = object;
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsInternalizedString()) {
- name = factory()->InternalizeString(name);
- }
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- Register map_reg = scratch1;
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- Handle<Map> current_map(current->map());
- // CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
- } else {
- __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (heap()->InNewSpace(*prototype)) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
- __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- __ li(reg, Operand(prototype));
- }
- }
-
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
-
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
- Label* miss) {
- if (!miss->is_unused()) {
- __ Branch(success);
- __ bind(miss);
- GenerateLoadMiss(masm(), kind());
- }
-}
-
-
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success,
- Handle<ExecutableAccessorInfo> callback) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- ASSERT(!reg.is(scratch2()));
- ASSERT(!reg.is(scratch3()));
- ASSERT(!reg.is(scratch4()));
-
- // Load the properties dictionary.
- Register dictionary = scratch4();
- __ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &miss,
- &probe_done,
- dictionary,
- this->name(),
- scratch2(),
- scratch3());
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3();
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lw(scratch2(), FieldMemOperand(pointer, kValueOffset));
- __ Branch(&miss, ne, scratch2(), Operand(callback));
- }
-
- HandlerFrontendFooter(success, &miss);
- return reg;
-}
-
-
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- if (!last->HasFastProperties()) {
- __ lw(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ lw(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset));
- __ Branch(&miss, ne, scratch2(),
- Operand(isolate()->factory()->null_value()));
- }
-
- HandlerFrontendFooter(success, &miss);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex index) {
- GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
- __ Ret();
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
- // Return the constant value.
- __ LoadHeapObject(v0, value);
- __ Ret();
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver());
- __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
- if (heap()->InNewSpace(callback->data())) {
- __ li(scratch3(), callback);
- __ lw(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
- } else {
- __ li(scratch3(), Handle<Object>(callback->data(),
- callback->GetIsolate()));
- }
- __ Subu(sp, sp, 4 * kPointerSize);
- __ sw(reg, MemOperand(sp, 3 * kPointerSize));
- __ sw(scratch3(), MemOperand(sp, 2 * kPointerSize));
- __ li(scratch3(), Operand(ExternalReference::isolate_address()));
- __ sw(scratch3(), MemOperand(sp, 1 * kPointerSize));
- __ sw(name(), MemOperand(sp, 0 * kPointerSize));
-
- __ mov(a2, scratch2()); // Saved in case scratch2 == a1.
- __ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
-
- // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
- // struct from the function (which is currently the case). This means we pass
- // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
- // will handle setting up a0.
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // Create AccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object** args_) as the data.
- __ sw(a2, MemOperand(sp, kPointerSize));
- // a2 (second argument - see note above) = AccessorInfo&
- __ Addu(a2, sp, kPointerSize);
-
- const int kStackUnwindSpace = 5;
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference ref =
- ExternalReference(&fun,
- ExternalReference::DIRECT_GETTER_CALL,
- masm()->isolate());
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* callback =
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
- compile_followup_inline = callback->getter() != NULL &&
- callback->IsCompatibleReceiver(*object);
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method).
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
- // Leave the internal frame.
- }
- GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- PushInterceptorArguments(masm(), receiver(), holder_reg,
- this->name(), interceptor_holder);
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Branch(miss, ne, a2, Operand(name));
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(a0, miss);
- CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ li(a3, Operand(cell));
- __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(a1, miss);
- __ GetObjectType(a1, a3, a3);
- __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
-
- // Check the shared function info. Make sure it hasn't changed.
- __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
- __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Branch(miss, ne, t0, Operand(a3));
- } else {
- __ Branch(miss, ne, a1, Operand(function));
- }
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
-
- // Get the receiver of the function from the stack into a0.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(a0, &miss, t0);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
- GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- Register receiver = a1;
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
- name, &miss);
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
- } else {
- Label call_builtin;
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- Register elements = t2;
- Register end_elements = t1;
- // Get the elements array of the object.
- __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- v0,
- Heap::kFixedArrayMapRootIndex,
- &check_double,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into v0 and calculate new length.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
-
- // Check if value is a smi.
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(t0, &with_write_barrier);
-
- // Save new length.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- a0,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ lw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(a0, a0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&call_builtin, gt, a0, Operand(t0));
-
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(
- t0, a0, elements, a3, t1, a2, t5,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&with_write_barrier);
-
- __ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(a3, t3, &not_fast_object);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(a3, t3, &call_builtin);
-
- __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&call_builtin, eq, t3, Operand(at));
- // edx: receiver
- // a3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- a3,
- t3,
- &try_holey_map);
- __ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- a3,
- t3,
- &call_builtin);
- __ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(a3, a3, &call_builtin);
- }
-
- // Save new length.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
-
- __ RecordWrite(elements,
- end_elements,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&attempt_to_grow_elements);
- // v0: array's length + 1.
- // t0: elements' length.
-
- if (!FLAG_inline_new) {
- __ Branch(&call_builtin);
- }
-
- __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(a2, &no_fast_elements_check);
- __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(t3, t3, &call_builtin);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(
- masm()->isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(
- masm()->isolate());
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
- __ li(t3, Operand(new_space_allocation_top));
- __ lw(a3, MemOperand(t3));
- __ Branch(&call_builtin, ne, end_elements, Operand(a3));
-
- __ li(t5, Operand(new_space_allocation_limit));
- __ lw(t5, MemOperand(t5));
- __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
- __ Branch(&call_builtin, hi, a3, Operand(t5));
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ sw(a3, MemOperand(t3));
- // Push the argument.
- __ sw(a2, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ sw(a3, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
- __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Ret();
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- masm()->isolate()),
- argc + 1,
- 1);
- }
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss, return_undefined, call_builtin;
- Register receiver = a1;
- Register elements = a3;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
- t0, v0, name, &miss);
-
- // Get the elements array of the object.
- __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- v0,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into t0 and calculate new length.
- __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Subu(t0, t0, Operand(Smi::FromInt(1)));
- __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
-
- // Get the last element.
- __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(elements, elements, t1);
- __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Branch(&call_builtin, eq, v0, Operand(t2));
-
- // Set the array's length.
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&return_undefined);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
- masm()->isolate()),
- argc + 1,
- 1);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
-
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- v0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- v0, holder, a1, a3, t0, name, &miss);
-
- Register receiver = a1;
- Register index = t1;
- Register result = v0;
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in a2.
- __ li(a2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- v0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- v0, holder, a1, a3, t0, name, &miss);
-
- Register receiver = v0;
- Register index = t1;
- Register scratch = a3;
- Register result = v0;
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in a2.
- __ li(a2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a1, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = a1;
- __ lw(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ And(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, v0);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // a2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- if (!CpuFeatures::IsSupported(FPU)) {
- return Handle<Code>::null();
- }
-
- CpuFeatures::Scope scope_fpu(FPU);
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss, slow;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into v0.
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ And(t0, v0, Operand(kSmiTagMask));
- __ Drop(argc + 1, eq, t0, Operand(zero_reg));
- __ Ret(eq, t0, Operand(zero_reg));
-
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
-
- Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
-
- // If fpu is enabled, we use the floor instruction.
-
- // Load the HeapNumber value.
- __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
-
- // Backup FCSR.
- __ cfc1(a3, FCSR);
- // Clearing FCSR clears the exception mask with no side-effects.
- __ ctc1(zero_reg, FCSR);
- // Convert the argument to an integer.
- __ floor_w_d(f0, f0);
-
- // Start checking for special cases.
- // Get the argument exponent and clear the sign bit.
- __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
- __ And(t2, t1, Operand(~HeapNumber::kSignMask));
- __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
-
- // Retrieve FCSR and check for fpu errors.
- __ cfc1(t5, FCSR);
- __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
- __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
-
- // Check for NaN, Infinity, and -Infinity.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
- >> HeapNumber::kMantissaBitsInTopWord));
- __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
- // We had an overflow or underflow in the conversion. Check if we
- // have a big exponent.
- // If greater or equal, the argument is already round and in v0.
- __ Branch(&restore_fcsr_and_return, ge, t3,
- Operand(HeapNumber::kMantissaBits));
- __ Branch(&wont_fit_smi);
-
- __ bind(&no_fpu_error);
- // Move the result back to v0.
- __ mfc1(v0, f0);
- // Check if the result fits into a smi.
- __ Addu(a1, v0, Operand(0x40000000));
- __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
- // Tag the result.
- STATIC_ASSERT(kSmiTag == 0);
- __ sll(v0, v0, kSmiTagSize);
-
- // Check for -0.
- __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
- // t1 already holds the HeapNumber exponent.
- __ And(t0, t1, Operand(HeapNumber::kSignMask));
- // If our HeapNumber is negative it was -0, so load its address and return.
- // Else v0 is loaded with 0, so we can also just return.
- __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- __ bind(&restore_fcsr_and_return);
- // Restore FCSR and return.
- __ ctc1(a3, FCSR);
-
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&wont_fit_smi);
- // Restore FCSR and fall to slow case.
- __ ctc1(a3, FCSR);
-
- __ bind(&slow);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // a2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into v0.
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(v0, &not_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ sra(t0, v0, kBitsPerInt - 1);
- __ Xor(a1, v0, t0);
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ Subu(v0, a1, t0);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ Branch(&slow, lt, v0, Operand(zero_reg));
-
- // Smi case done.
- __ Drop(argc + 1);
- __ Ret();
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(&not_smi);
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ And(t0, a1, Operand(HeapNumber::kSignMask));
- __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
- __ Drop(argc + 1);
- __ Ret();
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
- __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
- __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ Drop(argc + 1);
- __ Ret();
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // a2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
-
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(a1, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, a0, a3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
-
- ReserveSpaceForFastApiCall(masm(), a0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
- depth, &miss);
-
- GenerateFastApiDirectCall(masm(), optimization, argc);
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(a1, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(masm()->isolate()->counters()->call_const(),
- 1, a0, a3);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
- name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc * kPointerSize));
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ GetObjectType(a1, a1, a3);
- __ Branch(&miss, ne, a3, Operand(SYMBOL_TYPE));
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(a1, &fast);
- __ GetObjectType(a1, a0, a0);
- __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&fast, eq, a1, Operand(t0));
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t0));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
- break;
- }
- }
-
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
-
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
- &miss);
-
- // Move returned value, the function to call, to a1.
- __ mov(a1, v0);
- // Restore receiver.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Set up the context (function already in r1).
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Name register might be clobbered.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- a1, a2, a3, t0,
- &miss);
- __ bind(&miss);
- __ li(a2, Operand(Handle<String>(name))); // Restore name.
- Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(a1, &miss, a3);
- CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- __ push(a1); // Receiver.
- __ li(a3, Operand(callback)); // Callback info.
- __ Push(a3, a2, a0);
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(a0);
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- __ push(a1);
- __ push(a0);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(v0);
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(a1, &miss);
- CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(a1, a3, &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ Push(a1, a2, a0); // Receiver, name, value.
-
- __ li(a0, Operand(Smi::FromInt(strict_mode_)));
- __ push(a0); // Strict mode.
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ li(t0, Operand(cell));
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
- __ Branch(&miss, eq, t1, Operand(t2));
-
- // Store the value in the cell.
- __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
- __ mov(v0, a0); // Stored value must be returned in v0.
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
- __ Ret();
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Handle<GlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
-
- __ bind(&success);
- // Return undefined if maps of the full prototype chain is still the same.
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ Ret();
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { a0, a2, a3, a1, t0, t1 };
- return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { a1, a0, a2, a3, t0, t1 };
- return registers;
-}
-
-
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
- Register name_reg,
- Label* miss) {
- __ Branch(miss, ne, name_reg, Operand(name));
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- __ push(a0);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete) {
- Label success, miss;
-
- __ CheckMap(
- receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
-
- // Get the value from the cell.
- __ li(a3, Operand(cell));
- __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&miss, eq, t0, Operand(at));
- }
-
- HandlerFrontendFooter(&success, &miss);
- __ bind(&success);
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
- __ mov(v0, t0);
- __ Ret();
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
- Handle<Code> stub = KeyedLoadFastElementStub(
- receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode(isolate());
- __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
- } else {
- Handle<Code> stub =
- KeyedLoadDictionaryElementStub().GetCode(isolate());
- __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
- }
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
- }
-
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
-
- int receiver_count = receiver_maps->length();
- __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
- eq, map_reg, Operand(receiver_maps->at(current)));
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), kind());
-
- // Return the generated code.
- InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(Code::IC_FRAGMENT, type, name, state);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
-
- // Check that the name has not changed.
- __ Branch(&miss, ne, a1, Operand(name));
-
- // a3 is used as scratch register. a1 and a2 keep their values if a jump to
- // the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- a2, a1, a3, t0,
- &miss);
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array,
- elements_kind,
- grow_mode_).GetCode(isolate());
-
- __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(a2, &miss);
-
- int receiver_count = receiver_maps->length();
- __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
- a3, Operand(receiver_maps->at(i)));
- } else {
- Label next_map;
- __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i)));
- __ li(a3, Operand(transitioned_maps->at(i)));
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // a0 : argc
- // a1 : constructor
- // ra : return address
- // [sp] : last argument
- Label generic_stub_call;
-
- // Use t7 for holding undefined which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
- __ Branch(&generic_stub_call, ne, a2, Operand(t7));
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- // t7: undefined
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &generic_stub_call);
- __ GetObjectType(a2, a3, t0);
- __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // a0: argc
- // a1: constructor function
- // a2: initial map
- // t7: undefined
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Check(ne, "Function constructed by construct stub.",
- a3, Operand(JS_FUNCTION_TYPE));
-#endif
-
- // Now allocate the JSObject in new space.
- // a0: argc
- // a1: constructor function
- // a2: initial map
- // t7: undefined
- ASSERT(function->has_initial_map());
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ Check(eq, "Instance size of initial map changed.",
- a3, Operand(instance_size >> kPointerSizeLog2));
-#endif
- __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // a0: argc
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t7: undefined
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- __ Addu(t5, t5, Operand(3 * kPointerSize));
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
-
- // Calculate the location of the first argument. The stack contains only the
- // argc arguments.
- __ sll(a1, a0, kPointerSizeLog2);
- __ Addu(a1, a1, sp);
-
- // Fill all the in-object properties with undefined.
- // a0: argc
- // a1: first argument
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t5: First in-object property of JSObject (not tagged)
- // t7: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed, next;
- // Check if the argument assigned to the property is actually passed.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
- // Argument passed - find it on the stack.
- __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
- __ sw(a2, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- __ jmp(&next);
- __ bind(&not_passed);
- // Set the property to undefined.
- __ sw(t7, MemOperand(t5));
- __ Addu(t5, t5, Operand(kPointerSize));
- __ bind(&next);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- masm()->isolate());
- __ li(a2, Operand(constant));
- __ sw(a2, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ sw(t7, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- }
-
- // a0: argc
- // t4: JSObject (not tagged)
- // Move argc to a1 and the JSObject to return to v0 and tag it.
- __ mov(a1, a0);
- __ mov(v0, t4);
- __ Or(v0, v0, Operand(kHeapObjectTag));
-
- // v0: JSObject
- // a1: argc
- // Remove caller arguments and receiver from the stack and return.
- __ sll(t0, a1, kPointerSizeLog2);
- __ Addu(sp, sp, t0);
- __ Addu(sp, sp, Operand(kPointerSize));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
- __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
- __ Ret();
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> generic_construct_stub =
- masm()->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label slow, miss_force_generic;
-
- Register key = a0;
- Register receiver = a1;
-
- __ JumpIfNotSmi(key, &miss_force_generic);
- __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ sra(a2, a0, kSmiTagSize);
- __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
- __ Ret();
-
- // Slow case, key and receiver still in a0 and a1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
- // Entry registers are intact.
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static bool IsElementTypeSigned(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- return true;
-
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- return false;
-
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- return false;
- }
- return false;
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch0,
- Register scratch1,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
- Label* fail) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
- __ EmitFPUTruncate(kRoundToZero,
- scratch0,
- double_scratch0,
- at,
- double_scratch1,
- scratch1,
- kCheckForInexactConversion);
-
- __ Branch(fail, ne, scratch1, Operand(zero_reg));
-
- __ SmiTagCheckOverflow(key, scratch0, scratch1);
- __ BranchOnOverflow(fail, scratch1);
- __ bind(&key_ok);
- } else {
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = a0;
- Register key = a1;
- Register receiver = a2;
- // a3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
-
- __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range.
- __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // a3: external array.
-
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
- } else {
- __ JumpIfNotSmi(value, &check_heap_number);
- }
- __ SmiUntag(t1, value);
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
-
- // a3: base pointer of external storage.
- // t1: value (integer).
-
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- // Clamp the value to [0..255].
- // v0 is used as a scratch register here.
- Label done;
- __ li(v0, Operand(255));
- // Normal branch: nop in delay slot.
- __ Branch(&done, gt, t1, Operand(v0));
- // Use delay slot in this branch.
- __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
- __ mov(v0, zero_reg); // In delay slot.
- __ mov(v0, t1); // Value is in range 0..255.
- __ bind(&done);
- __ mov(t1, v0);
-
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t1, MemOperand(t8, 0));
- }
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(t0, key);
- StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t8, key, 2);
- __ addu(a3, a3, t8);
- // a3: effective address of the double element
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
- FloatingPointHelper::ConvertIntToDouble(
- masm, t1, destination,
- f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent.
- t0, f2); // These are: scratch2, single_scratch.
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- __ sdc1(f0, MemOperand(a3, 0));
- } else {
- __ sw(t2, MemOperand(a3, 0));
- __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, a0 holds the value which is the return value.
- __ mov(v0, a0);
- __ Ret();
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // a3: external array.
- __ bind(&check_heap_number);
- __ GetObjectType(value, t1, t2);
- __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
-
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
-
- // a3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvt_s_d(f0, f0);
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ swc1(f0, MemOperand(t8, 0));
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- __ sdc1(f0, MemOperand(t8, 0));
- } else {
- __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, a0 holds the value
- // which is the return value.
- __ mov(v0, a0);
- __ Ret();
- } else {
- // FPU is not available, do manual conversions.
-
- __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ li(t5, HeapNumber::kExponentMask);
- __ and_(t6, t3, t5);
- __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
-
- __ xor_(t1, t6, t5);
- __ li(t2, kBinary32ExponentMask);
- __ Movz(t6, t2, t1); // Only if t6 is equal to t5.
- __ Branch(&nan_or_infinity_or_zero, eq, t1, Operand(zero_reg));
-
- // Rebias exponent.
- __ srl(t6, t6, HeapNumber::kExponentShift);
- __ Addu(t6,
- t6,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ li(t1, Operand(kBinary32MaxExponent));
- __ Slt(t1, t1, t6);
- __ And(t2, t3, Operand(HeapNumber::kSignMask));
- __ Or(t2, t2, Operand(kBinary32ExponentMask));
- __ Movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
- __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
-
- __ Slt(t1, t6, Operand(kBinary32MinExponent));
- __ And(t2, t3, Operand(HeapNumber::kSignMask));
- __ Movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
- __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
-
- __ And(t7, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ sll(t3, t3, kMantissaInHiWordShift);
- __ or_(t7, t7, t3);
- __ srl(t4, t4, kMantissaInLoWordShift);
- __ or_(t7, t7, t4);
- __ sll(t6, t6, kBinary32ExponentShift);
- __ or_(t3, t7, t6);
-
- __ bind(&done);
- __ sll(t9, key, 1);
- __ addu(t9, a3, t9);
- __ sw(t3, MemOperand(t9, 0));
-
- // Entry registers are intact, a0 holds the value which is the return
- // value.
- __ mov(v0, a0);
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ And(t7, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ or_(t6, t6, t7);
- __ sll(t3, t3, kMantissaInHiWordShift);
- __ or_(t6, t6, t3);
- __ srl(t4, t4, kMantissaInLoWordShift);
- __ or_(t3, t6, t4);
- __ Branch(&done);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- // t8: effective address of destination element.
- __ sw(t4, MemOperand(t8, 0));
- __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
- __ mov(v0, a0);
- __ Ret();
- } else {
- bool is_signed_type = IsElementTypeSigned(elements_kind);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ li(t5, HeapNumber::kExponentMask);
- __ and_(t6, t3, t5);
- __ Movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
- __ Branch(&done, eq, t6, Operand(zero_reg));
-
- __ xor_(t2, t6, t5);
- __ Movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
- __ Branch(&done, eq, t6, Operand(t5));
-
- // Unbias exponent.
- __ srl(t6, t6, HeapNumber::kExponentShift);
- __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
- // If exponent is negative then result is 0.
- __ slt(t2, t6, zero_reg);
- __ Movn(t3, zero_reg, t2); // Only if exponent is negative.
- __ Branch(&done, lt, t6, Operand(zero_reg));
-
- // If exponent is too big then result is minimal value.
- __ slti(t1, t6, meaningfull_bits - 1);
- __ li(t2, min_value);
- __ Movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
- __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
-
- __ And(t5, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ li(t9, HeapNumber::kMantissaBitsInTopWord);
- __ subu(t6, t9, t6);
- __ slt(t1, t6, zero_reg);
- __ srlv(t2, t3, t6);
- __ Movz(t3, t2, t1); // Only if t6 is positive.
- __ Branch(&sign, ge, t6, Operand(zero_reg));
-
- __ subu(t6, zero_reg, t6);
- __ sllv(t3, t3, t6);
- __ li(t9, meaningfull_bits);
- __ subu(t6, t9, t6);
- __ srlv(t4, t4, t6);
- __ or_(t3, t3, t4);
-
- __ bind(&sign);
- __ subu(t2, t3, zero_reg);
- __ Movz(t3, t2, t5); // Only if t5 is zero.
-
- __ bind(&done);
-
- // Result is in t3.
- // This switch block should be exactly the same as above (FPU mode).
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- // Slow case, key and receiver still in a0 and a1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
- // Entry registers are intact.
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -- a4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = a0;
- Register key_reg = a1;
- Register receiver_reg = a2;
- Register scratch = t0;
- Register elements_reg = a3;
- Register length_reg = t1;
- Register scratch2 = t2;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ Branch(&grow, hs, key_reg, Operand(scratch));
- } else {
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (a0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime.
- __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ lw(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
- TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ sw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ li(length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ Branch(&slow, hs, length_reg, Operand(scratch));
-
- // Grow the array and finish the store.
- __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch (elements backing store)
- // -- t0 : scratch (elements_reg)
- // -- t1 : scratch (mantissa_reg)
- // -- t2 : scratch (exponent_reg)
- // -- t3 : scratch4
- // -- t4 : scratch
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = a0;
- Register key_reg = a1;
- Register receiver_reg = a2;
- Register elements_reg = a3;
- Register scratch1 = t0;
- Register scratch2 = t1;
- Register scratch3 = t2;
- Register scratch4 = t3;
- Register scratch5 = t4;
- Register length_reg = t3;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
-
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ lw(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ Branch(&grow, hs, key_reg, Operand(scratch1));
- } else {
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
- }
-
- __ bind(&finish_store);
-
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value_reg); // In delay slot.
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime.
- __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ lw(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
- TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ sw(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- scratch5,
- &transition_elements_kind);
-
- __ li(scratch1, Operand(kHoleNanLower32));
- __ li(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ sw(scratch1, FieldMemOperand(elements_reg, offset));
- __ sw(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
- // Install the new backing store in the JSArray.
- __ sw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ li(length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ lw(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ Branch(&slow, hs, length_reg, Operand(scratch1));
-
- // Grow the array and finish the store.
- __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/3rdparty/v8/src/mirror-debugger.js b/src/3rdparty/v8/src/mirror-debugger.js
deleted file mode 100644
index 7f1a05a..0000000
--- a/src/3rdparty/v8/src/mirror-debugger.js
+++ /dev/null
@@ -1,2626 +0,0 @@
-// Copyright 2006-2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Handle id counters.
-var next_handle_ = 0;
-var next_transient_handle_ = -1;
-
-// Mirror cache.
-var mirror_cache_ = [];
-
-
-/**
- * Clear the mirror handle cache.
- */
-function ClearMirrorCache() {
- next_handle_ = 0;
- mirror_cache_ = [];
-}
-
-
-/**
- * Returns the mirror for a specified value or object.
- *
- * @param {value or Object} value the value or object to retreive the mirror for
- * @param {boolean} transient indicate whether this object is transient and
- * should not be added to the mirror cache. The default is not transient.
- * @returns {Mirror} the mirror reflects the passed value or object
- */
-function MakeMirror(value, opt_transient) {
- var mirror;
-
- // Look for non transient mirrors in the mirror cache.
- if (!opt_transient) {
- for (id in mirror_cache_) {
- mirror = mirror_cache_[id];
- if (mirror.value() === value) {
- return mirror;
- }
- // Special check for NaN as NaN == NaN is false.
- if (mirror.isNumber() && isNaN(mirror.value()) &&
- typeof value == 'number' && isNaN(value)) {
- return mirror;
- }
- }
- }
-
- if (IS_UNDEFINED(value)) {
- mirror = new UndefinedMirror();
- } else if (IS_NULL(value)) {
- mirror = new NullMirror();
- } else if (IS_BOOLEAN(value)) {
- mirror = new BooleanMirror(value);
- } else if (IS_NUMBER(value)) {
- mirror = new NumberMirror(value);
- } else if (IS_STRING(value)) {
- mirror = new StringMirror(value);
- } else if (IS_ARRAY(value)) {
- mirror = new ArrayMirror(value);
- } else if (IS_DATE(value)) {
- mirror = new DateMirror(value);
- } else if (IS_FUNCTION(value)) {
- mirror = new FunctionMirror(value);
- } else if (IS_REGEXP(value)) {
- mirror = new RegExpMirror(value);
- } else if (IS_ERROR(value)) {
- mirror = new ErrorMirror(value);
- } else if (IS_SCRIPT(value)) {
- mirror = new ScriptMirror(value);
- } else {
- mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
- }
-
- mirror_cache_[mirror.handle()] = mirror;
- return mirror;
-}
-
-
-/**
- * Returns the mirror for a specified mirror handle.
- *
- * @param {number} handle the handle to find the mirror for
- * @returns {Mirror or undefiend} the mirror with the requested handle or
- * undefined if no mirror with the requested handle was found
- */
-function LookupMirror(handle) {
- return mirror_cache_[handle];
-}
-
-
-/**
- * Returns the mirror for the undefined value.
- *
- * @returns {Mirror} the mirror reflects the undefined value
- */
-function GetUndefinedMirror() {
- return MakeMirror(void 0);
-}
-
-
-/**
- * Inherit the prototype methods from one constructor into another.
- *
- * The Function.prototype.inherits from lang.js rewritten as a standalone
- * function (not on Function.prototype). NOTE: If this file is to be loaded
- * during bootstrapping this function needs to be revritten using some native
- * functions as prototype setup using normal JavaScript does not work as
- * expected during bootstrapping (see mirror.js in r114903).
- *
- * @param {function} ctor Constructor function which needs to inherit the
- * prototype
- * @param {function} superCtor Constructor function to inherit prototype from
- */
-function inherits(ctor, superCtor) {
- var tempCtor = function(){};
- tempCtor.prototype = superCtor.prototype;
- ctor.super_ = superCtor.prototype;
- ctor.prototype = new tempCtor();
- ctor.prototype.constructor = ctor;
-}
-
-
-// Type names of the different mirrors.
-var UNDEFINED_TYPE = 'undefined';
-var NULL_TYPE = 'null';
-var BOOLEAN_TYPE = 'boolean';
-var NUMBER_TYPE = 'number';
-var STRING_TYPE = 'string';
-var OBJECT_TYPE = 'object';
-var FUNCTION_TYPE = 'function';
-var REGEXP_TYPE = 'regexp';
-var ERROR_TYPE = 'error';
-var PROPERTY_TYPE = 'property';
-var INTERNAL_PROPERTY_TYPE = 'internalProperty';
-var FRAME_TYPE = 'frame';
-var SCRIPT_TYPE = 'script';
-var CONTEXT_TYPE = 'context';
-var SCOPE_TYPE = 'scope';
-
-// Maximum length when sending strings through the JSON protocol.
-var kMaxProtocolStringLength = 80;
-
-// Different kind of properties.
-var PropertyKind = {};
-PropertyKind.Named = 1;
-PropertyKind.Indexed = 2;
-
-
-// A copy of the PropertyType enum from global.h
-var PropertyType = {};
-PropertyType.Normal = 0;
-PropertyType.Field = 1;
-PropertyType.ConstantFunction = 2;
-PropertyType.Callbacks = 3;
-PropertyType.Handler = 4;
-PropertyType.Interceptor = 5;
-PropertyType.Transition = 6;
-PropertyType.Nonexistent = 7;
-
-
-// Different attributes for a property.
-var PropertyAttribute = {};
-PropertyAttribute.None = NONE;
-PropertyAttribute.ReadOnly = READ_ONLY;
-PropertyAttribute.DontEnum = DONT_ENUM;
-PropertyAttribute.DontDelete = DONT_DELETE;
-
-
-// A copy of the scope types from runtime.cc.
-var ScopeType = { Global: 0,
- Local: 1,
- With: 2,
- Closure: 3,
- Catch: 4,
- Block: 5 };
-
-
-// Mirror hierarchy:
-// - Mirror
-// - ValueMirror
-// - UndefinedMirror
-// - NullMirror
-// - NumberMirror
-// - StringMirror
-// - ObjectMirror
-// - FunctionMirror
-// - UnresolvedFunctionMirror
-// - ArrayMirror
-// - DateMirror
-// - RegExpMirror
-// - ErrorMirror
-// - PropertyMirror
-// - InternalPropertyMirror
-// - FrameMirror
-// - ScriptMirror
-
-
-/**
- * Base class for all mirror objects.
- * @param {string} type The type of the mirror
- * @constructor
- */
-function Mirror(type) {
- this.type_ = type;
-}
-
-
-Mirror.prototype.type = function() {
- return this.type_;
-};
-
-
-/**
- * Check whether the mirror reflects a value.
- * @returns {boolean} True if the mirror reflects a value.
- */
-Mirror.prototype.isValue = function() {
- return this instanceof ValueMirror;
-};
-
-
-/**
- * Check whether the mirror reflects the undefined value.
- * @returns {boolean} True if the mirror reflects the undefined value.
- */
-Mirror.prototype.isUndefined = function() {
- return this instanceof UndefinedMirror;
-};
-
-
-/**
- * Check whether the mirror reflects the null value.
- * @returns {boolean} True if the mirror reflects the null value
- */
-Mirror.prototype.isNull = function() {
- return this instanceof NullMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a boolean value.
- * @returns {boolean} True if the mirror reflects a boolean value
- */
-Mirror.prototype.isBoolean = function() {
- return this instanceof BooleanMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a number value.
- * @returns {boolean} True if the mirror reflects a number value
- */
-Mirror.prototype.isNumber = function() {
- return this instanceof NumberMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a string value.
- * @returns {boolean} True if the mirror reflects a string value
- */
-Mirror.prototype.isString = function() {
- return this instanceof StringMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an object.
- * @returns {boolean} True if the mirror reflects an object
- */
-Mirror.prototype.isObject = function() {
- return this instanceof ObjectMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a function.
- * @returns {boolean} True if the mirror reflects a function
- */
-Mirror.prototype.isFunction = function() {
- return this instanceof FunctionMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an unresolved function.
- * @returns {boolean} True if the mirror reflects an unresolved function
- */
-Mirror.prototype.isUnresolvedFunction = function() {
- return this instanceof UnresolvedFunctionMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an array.
- * @returns {boolean} True if the mirror reflects an array
- */
-Mirror.prototype.isArray = function() {
- return this instanceof ArrayMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a date.
- * @returns {boolean} True if the mirror reflects a date
- */
-Mirror.prototype.isDate = function() {
- return this instanceof DateMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a regular expression.
- * @returns {boolean} True if the mirror reflects a regular expression
- */
-Mirror.prototype.isRegExp = function() {
- return this instanceof RegExpMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an error.
- * @returns {boolean} True if the mirror reflects an error
- */
-Mirror.prototype.isError = function() {
- return this instanceof ErrorMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a property.
- * @returns {boolean} True if the mirror reflects a property
- */
-Mirror.prototype.isProperty = function() {
- return this instanceof PropertyMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an internal property.
- * @returns {boolean} True if the mirror reflects an internal property
- */
-Mirror.prototype.isInternalProperty = function() {
- return this instanceof InternalPropertyMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a stack frame.
- * @returns {boolean} True if the mirror reflects a stack frame
- */
-Mirror.prototype.isFrame = function() {
- return this instanceof FrameMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a script.
- * @returns {boolean} True if the mirror reflects a script
- */
-Mirror.prototype.isScript = function() {
- return this instanceof ScriptMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a context.
- * @returns {boolean} True if the mirror reflects a context
- */
-Mirror.prototype.isContext = function() {
- return this instanceof ContextMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a scope.
- * @returns {boolean} True if the mirror reflects a scope
- */
-Mirror.prototype.isScope = function() {
- return this instanceof ScopeMirror;
-};
-
-
-/**
- * Allocate a handle id for this object.
- */
-Mirror.prototype.allocateHandle_ = function() {
- this.handle_ = next_handle_++;
-};
-
-
-/**
- * Allocate a transient handle id for this object. Transient handles are
- * negative.
- */
-Mirror.prototype.allocateTransientHandle_ = function() {
- this.handle_ = next_transient_handle_--;
-};
-
-
-Mirror.prototype.toText = function() {
- // Simpel to text which is used when on specialization in subclass.
- return "#<" + this.constructor.name + ">";
-};
-
-
-/**
- * Base class for all value mirror objects.
- * @param {string} type The type of the mirror
- * @param {value} value The value reflected by this mirror
- * @param {boolean} transient indicate whether this object is transient with a
- * transient handle
- * @constructor
- * @extends Mirror
- */
-function ValueMirror(type, value, transient) {
- %_CallFunction(this, type, Mirror);
- this.value_ = value;
- if (!transient) {
- this.allocateHandle_();
- } else {
- this.allocateTransientHandle_();
- }
-}
-inherits(ValueMirror, Mirror);
-
-
-Mirror.prototype.handle = function() {
- return this.handle_;
-};
-
-
-/**
- * Check whether this is a primitive value.
- * @return {boolean} True if the mirror reflects a primitive value
- */
-ValueMirror.prototype.isPrimitive = function() {
- var type = this.type();
- return type === 'undefined' ||
- type === 'null' ||
- type === 'boolean' ||
- type === 'number' ||
- type === 'string';
-};
-
-
-/**
- * Get the actual value reflected by this mirror.
- * @return {value} The value reflected by this mirror
- */
-ValueMirror.prototype.value = function() {
- return this.value_;
-};
-
-
-/**
- * Mirror object for Undefined.
- * @constructor
- * @extends ValueMirror
- */
-function UndefinedMirror() {
- %_CallFunction(this, UNDEFINED_TYPE, void 0, ValueMirror);
-}
-inherits(UndefinedMirror, ValueMirror);
-
-
-UndefinedMirror.prototype.toText = function() {
- return 'undefined';
-};
-
-
-/**
- * Mirror object for null.
- * @constructor
- * @extends ValueMirror
- */
-function NullMirror() {
- %_CallFunction(this, NULL_TYPE, null, ValueMirror);
-}
-inherits(NullMirror, ValueMirror);
-
-
-NullMirror.prototype.toText = function() {
- return 'null';
-};
-
-
-/**
- * Mirror object for boolean values.
- * @param {boolean} value The boolean value reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function BooleanMirror(value) {
- %_CallFunction(this, BOOLEAN_TYPE, value, ValueMirror);
-}
-inherits(BooleanMirror, ValueMirror);
-
-
-BooleanMirror.prototype.toText = function() {
- return this.value_ ? 'true' : 'false';
-};
-
-
-/**
- * Mirror object for number values.
- * @param {number} value The number value reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function NumberMirror(value) {
- %_CallFunction(this, NUMBER_TYPE, value, ValueMirror);
-}
-inherits(NumberMirror, ValueMirror);
-
-
-NumberMirror.prototype.toText = function() {
- return %NumberToString(this.value_);
-};
-
-
-/**
- * Mirror object for string values.
- * @param {string} value The string value reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function StringMirror(value) {
- %_CallFunction(this, STRING_TYPE, value, ValueMirror);
-}
-inherits(StringMirror, ValueMirror);
-
-
-StringMirror.prototype.length = function() {
- return this.value_.length;
-};
-
-StringMirror.prototype.getTruncatedValue = function(maxLength) {
- if (maxLength != -1 && this.length() > maxLength) {
- return this.value_.substring(0, maxLength) +
- '... (length: ' + this.length() + ')';
- }
- return this.value_;
-};
-
-StringMirror.prototype.toText = function() {
- return this.getTruncatedValue(kMaxProtocolStringLength);
-};
-
-
-/**
- * Mirror object for objects.
- * @param {object} value The object reflected by this mirror
- * @param {boolean} transient indicate whether this object is transient with a
- * transient handle
- * @constructor
- * @extends ValueMirror
- */
-function ObjectMirror(value, type, transient) {
- %_CallFunction(this, type || OBJECT_TYPE, value, transient, ValueMirror);
-}
-inherits(ObjectMirror, ValueMirror);
-
-
-ObjectMirror.prototype.className = function() {
- return %_ClassOf(this.value_);
-};
-
-
-ObjectMirror.prototype.constructorFunction = function() {
- return MakeMirror(%DebugGetProperty(this.value_, 'constructor'));
-};
-
-
-ObjectMirror.prototype.prototypeObject = function() {
- return MakeMirror(%DebugGetProperty(this.value_, 'prototype'));
-};
-
-
-ObjectMirror.prototype.protoObject = function() {
- return MakeMirror(%DebugGetPrototype(this.value_));
-};
-
-
-ObjectMirror.prototype.hasNamedInterceptor = function() {
- // Get information on interceptors for this object.
- var x = %GetInterceptorInfo(this.value_);
- return (x & 2) != 0;
-};
-
-
-ObjectMirror.prototype.hasIndexedInterceptor = function() {
- // Get information on interceptors for this object.
- var x = %GetInterceptorInfo(this.value_);
- return (x & 1) != 0;
-};
-
-
-/**
- * Return the property names for this object.
- * @param {number} kind Indicate whether named, indexed or both kinds of
- * properties are requested
- * @param {number} limit Limit the number of names returend to the specified
- value
- * @return {Array} Property names for this object
- */
-ObjectMirror.prototype.propertyNames = function(kind, limit) {
- // Find kind and limit and allocate array for the result
- kind = kind || PropertyKind.Named | PropertyKind.Indexed;
-
- var propertyNames;
- var elementNames;
- var total = 0;
-
- // Find all the named properties.
- if (kind & PropertyKind.Named) {
- // Get the local property names.
- propertyNames = %GetLocalPropertyNames(this.value_);
- total += propertyNames.length;
-
- // Get names for named interceptor properties if any.
- if (this.hasNamedInterceptor() && (kind & PropertyKind.Named)) {
- var namedInterceptorNames =
- %GetNamedInterceptorPropertyNames(this.value_);
- if (namedInterceptorNames) {
- propertyNames = propertyNames.concat(namedInterceptorNames);
- total += namedInterceptorNames.length;
- }
- }
- }
-
- // Find all the indexed properties.
- if (kind & PropertyKind.Indexed) {
- // Get the local element names.
- elementNames = %GetLocalElementNames(this.value_);
- total += elementNames.length;
-
- // Get names for indexed interceptor properties.
- if (this.hasIndexedInterceptor() && (kind & PropertyKind.Indexed)) {
- var indexedInterceptorNames =
- %GetIndexedInterceptorElementNames(this.value_);
- if (indexedInterceptorNames) {
- elementNames = elementNames.concat(indexedInterceptorNames);
- total += indexedInterceptorNames.length;
- }
- }
- }
- limit = Math.min(limit || total, total);
-
- var names = new Array(limit);
- var index = 0;
-
- // Copy names for named properties.
- if (kind & PropertyKind.Named) {
- for (var i = 0; index < limit && i < propertyNames.length; i++) {
- names[index++] = propertyNames[i];
- }
- }
-
- // Copy names for indexed properties.
- if (kind & PropertyKind.Indexed) {
- for (var i = 0; index < limit && i < elementNames.length; i++) {
- names[index++] = elementNames[i];
- }
- }
-
- return names;
-};
-
-
-/**
- * Return the properties for this object as an array of PropertyMirror objects.
- * @param {number} kind Indicate whether named, indexed or both kinds of
- * properties are requested
- * @param {number} limit Limit the number of properties returned to the
- specified value
- * @return {Array} Property mirrors for this object
- */
-ObjectMirror.prototype.properties = function(kind, limit) {
- var names = this.propertyNames(kind, limit);
- var properties = new Array(names.length);
- for (var i = 0; i < names.length; i++) {
- properties[i] = this.property(names[i]);
- }
-
- return properties;
-};
-
-
-/**
- * Return the internal properties for this object as an array of
- * InternalPropertyMirror objects.
- * @return {Array} Property mirrors for this object
- */
-ObjectMirror.prototype.internalProperties = function() {
- return ObjectMirror.GetInternalProperties(this.value_);
-}
-
-
-ObjectMirror.prototype.property = function(name) {
- var details = %DebugGetPropertyDetails(this.value_, %ToString(name));
- if (details) {
- return new PropertyMirror(this, name, details);
- }
-
- // Nothing found.
- return GetUndefinedMirror();
-};
-
-
-
-/**
- * Try to find a property from its value.
- * @param {Mirror} value The property value to look for
- * @return {PropertyMirror} The property with the specified value. If no
- * property was found with the specified value UndefinedMirror is returned
- */
-ObjectMirror.prototype.lookupProperty = function(value) {
- var properties = this.properties();
-
- // Look for property value in properties.
- for (var i = 0; i < properties.length; i++) {
-
- // Skip properties which are defined through assessors.
- var property = properties[i];
- if (property.propertyType() != PropertyType.Callbacks) {
- if (%_ObjectEquals(property.value_, value.value_)) {
- return property;
- }
- }
- }
-
- // Nothing found.
- return GetUndefinedMirror();
-};
-
-
-/**
- * Returns objects which has direct references to this object
- * @param {number} opt_max_objects Optional parameter specifying the maximum
- * number of referencing objects to return.
- * @return {Array} The objects which has direct references to this object.
- */
-ObjectMirror.prototype.referencedBy = function(opt_max_objects) {
- // Find all objects with direct references to this object.
- var result = %DebugReferencedBy(this.value_,
- Mirror.prototype, opt_max_objects || 0);
-
- // Make mirrors for all the references found.
- for (var i = 0; i < result.length; i++) {
- result[i] = MakeMirror(result[i]);
- }
-
- return result;
-};
-
-
-ObjectMirror.prototype.toText = function() {
- var name;
- var ctor = this.constructorFunction();
- if (!ctor.isFunction()) {
- name = this.className();
- } else {
- name = ctor.name();
- if (!name) {
- name = this.className();
- }
- }
- return '#<' + name + '>';
-};
-
-
-/**
- * Return the internal properties of the value, such as [[PrimitiveValue]] of
- * scalar wrapper objects and properties of the bound function.
- * This method is done static to be accessible from Debug API with the bare
- * values without mirrors.
- * @return {Array} array (possibly empty) of InternalProperty instances
- */
-ObjectMirror.GetInternalProperties = function(value) {
- if (IS_STRING_WRAPPER(value) || IS_NUMBER_WRAPPER(value) ||
- IS_BOOLEAN_WRAPPER(value)) {
- var primitiveValue = %_ValueOf(value);
- return [new InternalPropertyMirror("[[PrimitiveValue]]", primitiveValue)];
- } else if (IS_FUNCTION(value)) {
- var bindings = %BoundFunctionGetBindings(value);
- var result = [];
- if (bindings && IS_ARRAY(bindings)) {
- result.push(new InternalPropertyMirror("[[TargetFunction]]",
- bindings[0]));
- result.push(new InternalPropertyMirror("[[BoundThis]]", bindings[1]));
- var boundArgs = [];
- for (var i = 2; i < bindings.length; i++) {
- boundArgs.push(bindings[i]);
- }
- result.push(new InternalPropertyMirror("[[BoundArgs]]", boundArgs));
- }
- return result;
- }
- return [];
-}
-
-
-/**
- * Mirror object for functions.
- * @param {function} value The function object reflected by this mirror.
- * @constructor
- * @extends ObjectMirror
- */
-function FunctionMirror(value) {
- %_CallFunction(this, value, FUNCTION_TYPE, ObjectMirror);
- this.resolved_ = true;
-}
-inherits(FunctionMirror, ObjectMirror);
-
-
-/**
- * Returns whether the function is resolved.
- * @return {boolean} True if the function is resolved. Unresolved functions can
- * only originate as functions from stack frames
- */
-FunctionMirror.prototype.resolved = function() {
- return this.resolved_;
-};
-
-
-/**
- * Returns the name of the function.
- * @return {string} Name of the function
- */
-FunctionMirror.prototype.name = function() {
- return %FunctionGetName(this.value_);
-};
-
-
-/**
- * Returns the inferred name of the function.
- * @return {string} Name of the function
- */
-FunctionMirror.prototype.inferredName = function() {
- return %FunctionGetInferredName(this.value_);
-};
-
-
-/**
- * Returns the source code for the function.
- * @return {string or undefined} The source code for the function. If the
- * function is not resolved undefined will be returned.
- */
-FunctionMirror.prototype.source = function() {
- // Return source if function is resolved. Otherwise just fall through to
- // return undefined.
- if (this.resolved()) {
- return builtins.FunctionSourceString(this.value_);
- }
-};
-
-
-/**
- * Returns the script object for the function.
- * @return {ScriptMirror or undefined} Script object for the function or
- * undefined if the function has no script
- */
-FunctionMirror.prototype.script = function() {
- // Return script if function is resolved. Otherwise just fall through
- // to return undefined.
- if (this.resolved()) {
- var script = %FunctionGetScript(this.value_);
- if (script) {
- return MakeMirror(script);
- }
- }
-};
-
-
-/**
- * Returns the script source position for the function. Only makes sense
- * for functions which has a script defined.
- * @return {Number or undefined} in-script position for the function
- */
-FunctionMirror.prototype.sourcePosition_ = function() {
- // Return script if function is resolved. Otherwise just fall through
- // to return undefined.
- if (this.resolved()) {
- return %FunctionGetScriptSourcePosition(this.value_);
- }
-};
-
-
-/**
- * Returns the script source location object for the function. Only makes sense
- * for functions which has a script defined.
- * @return {Location or undefined} in-script location for the function begin
- */
-FunctionMirror.prototype.sourceLocation = function() {
- if (this.resolved() && this.script()) {
- return this.script().locationFromPosition(this.sourcePosition_(),
- true);
- }
-};
-
-
-/**
- * Returns objects constructed by this function.
- * @param {number} opt_max_instances Optional parameter specifying the maximum
- * number of instances to return.
- * @return {Array or undefined} The objects constructed by this function.
- */
-FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
- if (this.resolved()) {
- // Find all objects constructed from this function.
- var result = %DebugConstructedBy(this.value_, opt_max_instances || 0);
-
- // Make mirrors for all the instances found.
- for (var i = 0; i < result.length; i++) {
- result[i] = MakeMirror(result[i]);
- }
-
- return result;
- } else {
- return [];
- }
-};
-
-
-FunctionMirror.prototype.scopeCount = function() {
- if (this.resolved()) {
- return %GetFunctionScopeCount(this.value());
- } else {
- return 0;
- }
-};
-
-
-FunctionMirror.prototype.scope = function(index) {
- if (this.resolved()) {
- return new ScopeMirror(void 0, this, index);
- }
-};
-
-
-FunctionMirror.prototype.toText = function() {
- return this.source();
-};
-
-
-/**
- * Mirror object for unresolved functions.
- * @param {string} value The name for the unresolved function reflected by this
- * mirror.
- * @constructor
- * @extends ObjectMirror
- */
-function UnresolvedFunctionMirror(value) {
- // Construct this using the ValueMirror as an unresolved function is not a
- // real object but just a string.
- %_CallFunction(this, FUNCTION_TYPE, value, ValueMirror);
- this.propertyCount_ = 0;
- this.elementCount_ = 0;
- this.resolved_ = false;
-}
-inherits(UnresolvedFunctionMirror, FunctionMirror);
-
-
-UnresolvedFunctionMirror.prototype.className = function() {
- return 'Function';
-};
-
-
-UnresolvedFunctionMirror.prototype.constructorFunction = function() {
- return GetUndefinedMirror();
-};
-
-
-UnresolvedFunctionMirror.prototype.prototypeObject = function() {
- return GetUndefinedMirror();
-};
-
-
-UnresolvedFunctionMirror.prototype.protoObject = function() {
- return GetUndefinedMirror();
-};
-
-
-UnresolvedFunctionMirror.prototype.name = function() {
- return this.value_;
-};
-
-
-UnresolvedFunctionMirror.prototype.inferredName = function() {
- return undefined;
-};
-
-
-UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
- return [];
-};
-
-
-/**
- * Mirror object for arrays.
- * @param {Array} value The Array object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function ArrayMirror(value) {
- %_CallFunction(this, value, ObjectMirror);
-}
-inherits(ArrayMirror, ObjectMirror);
-
-
-ArrayMirror.prototype.length = function() {
- return this.value_.length;
-};
-
-
-ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
- opt_to_index) {
- var from_index = opt_from_index || 0;
- var to_index = opt_to_index || this.length() - 1;
- if (from_index > to_index) return new Array();
- var values = new Array(to_index - from_index + 1);
- for (var i = from_index; i <= to_index; i++) {
- var details = %DebugGetPropertyDetails(this.value_, %ToString(i));
- var value;
- if (details) {
- value = new PropertyMirror(this, i, details);
- } else {
- value = GetUndefinedMirror();
- }
- values[i - from_index] = value;
- }
- return values;
-};
-
-
-/**
- * Mirror object for dates.
- * @param {Date} value The Date object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function DateMirror(value) {
- %_CallFunction(this, value, ObjectMirror);
-}
-inherits(DateMirror, ObjectMirror);
-
-
-DateMirror.prototype.toText = function() {
- var s = JSON.stringify(this.value_);
- return s.substring(1, s.length - 1); // cut quotes
-};
-
-
-/**
- * Mirror object for regular expressions.
- * @param {RegExp} value The RegExp object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function RegExpMirror(value) {
- %_CallFunction(this, value, REGEXP_TYPE, ObjectMirror);
-}
-inherits(RegExpMirror, ObjectMirror);
-
-
-/**
- * Returns the source to the regular expression.
- * @return {string or undefined} The source to the regular expression
- */
-RegExpMirror.prototype.source = function() {
- return this.value_.source;
-};
-
-
-/**
- * Returns whether this regular expression has the global (g) flag set.
- * @return {boolean} Value of the global flag
- */
-RegExpMirror.prototype.global = function() {
- return this.value_.global;
-};
-
-
-/**
- * Returns whether this regular expression has the ignore case (i) flag set.
- * @return {boolean} Value of the ignore case flag
- */
-RegExpMirror.prototype.ignoreCase = function() {
- return this.value_.ignoreCase;
-};
-
-
-/**
- * Returns whether this regular expression has the multiline (m) flag set.
- * @return {boolean} Value of the multiline flag
- */
-RegExpMirror.prototype.multiline = function() {
- return this.value_.multiline;
-};
-
-
-RegExpMirror.prototype.toText = function() {
- // Simpel to text which is used when on specialization in subclass.
- return "/" + this.source() + "/";
-};
-
-
-/**
- * Mirror object for error objects.
- * @param {Error} value The error object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function ErrorMirror(value) {
- %_CallFunction(this, value, ERROR_TYPE, ObjectMirror);
-}
-inherits(ErrorMirror, ObjectMirror);
-
-
-/**
- * Returns the message for this eror object.
- * @return {string or undefined} The message for this eror object
- */
-ErrorMirror.prototype.message = function() {
- return this.value_.message;
-};
-
-
-ErrorMirror.prototype.toText = function() {
- // Use the same text representation as in messages.js.
- var text;
- try {
- str = %_CallFunction(this.value_, builtins.ErrorToString);
- } catch (e) {
- str = '#<Error>';
- }
- return str;
-};
-
-
-/**
- * Base mirror object for properties.
- * @param {ObjectMirror} mirror The mirror object having this property
- * @param {string} name The name of the property
- * @param {Array} details Details about the property
- * @constructor
- * @extends Mirror
- */
-function PropertyMirror(mirror, name, details) {
- %_CallFunction(this, PROPERTY_TYPE, Mirror);
- this.mirror_ = mirror;
- this.name_ = name;
- this.value_ = details[0];
- this.details_ = details[1];
- if (details.length > 2) {
- this.exception_ = details[2];
- this.getter_ = details[3];
- this.setter_ = details[4];
- }
-}
-inherits(PropertyMirror, Mirror);
-
-
-PropertyMirror.prototype.isReadOnly = function() {
- return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
-};
-
-
-PropertyMirror.prototype.isEnum = function() {
- return (this.attributes() & PropertyAttribute.DontEnum) == 0;
-};
-
-
-PropertyMirror.prototype.canDelete = function() {
- return (this.attributes() & PropertyAttribute.DontDelete) == 0;
-};
-
-
-PropertyMirror.prototype.name = function() {
- return this.name_;
-};
-
-
-PropertyMirror.prototype.isIndexed = function() {
- for (var i = 0; i < this.name_.length; i++) {
- if (this.name_[i] < '0' || '9' < this.name_[i]) {
- return false;
- }
- }
- return true;
-};
-
-
-PropertyMirror.prototype.value = function() {
- return MakeMirror(this.value_, false);
-};
-
-
-/**
- * Returns whether this property value is an exception.
- * @return {booolean} True if this property value is an exception
- */
-PropertyMirror.prototype.isException = function() {
- return this.exception_ ? true : false;
-};
-
-
-PropertyMirror.prototype.attributes = function() {
- return %DebugPropertyAttributesFromDetails(this.details_);
-};
-
-
-PropertyMirror.prototype.propertyType = function() {
- return %DebugPropertyTypeFromDetails(this.details_);
-};
-
-
-PropertyMirror.prototype.insertionIndex = function() {
- return %DebugPropertyIndexFromDetails(this.details_);
-};
-
-
-/**
- * Returns whether this property has a getter defined through __defineGetter__.
- * @return {booolean} True if this property has a getter
- */
-PropertyMirror.prototype.hasGetter = function() {
- return this.getter_ ? true : false;
-};
-
-
-/**
- * Returns whether this property has a setter defined through __defineSetter__.
- * @return {booolean} True if this property has a setter
- */
-PropertyMirror.prototype.hasSetter = function() {
- return this.setter_ ? true : false;
-};
-
-
-/**
- * Returns the getter for this property defined through __defineGetter__.
- * @return {Mirror} FunctionMirror reflecting the getter function or
- * UndefinedMirror if there is no getter for this property
- */
-PropertyMirror.prototype.getter = function() {
- if (this.hasGetter()) {
- return MakeMirror(this.getter_);
- } else {
- return GetUndefinedMirror();
- }
-};
-
-
-/**
- * Returns the setter for this property defined through __defineSetter__.
- * @return {Mirror} FunctionMirror reflecting the setter function or
- * UndefinedMirror if there is no setter for this property
- */
-PropertyMirror.prototype.setter = function() {
- if (this.hasSetter()) {
- return MakeMirror(this.setter_);
- } else {
- return GetUndefinedMirror();
- }
-};
-
-
-/**
- * Returns whether this property is natively implemented by the host or a set
- * through JavaScript code.
- * @return {boolean} True if the property is
- * UndefinedMirror if there is no setter for this property
- */
-PropertyMirror.prototype.isNative = function() {
- return (this.propertyType() == PropertyType.Interceptor) ||
- ((this.propertyType() == PropertyType.Callbacks) &&
- !this.hasGetter() && !this.hasSetter());
-};
-
-
-/**
- * Mirror object for internal properties. Internal property reflects properties
- * not accessible from user code such as [[BoundThis]] in bound function.
- * Their names are merely symbolic.
- * @param {string} name The name of the property
- * @param {value} property value
- * @constructor
- * @extends Mirror
- */
-function InternalPropertyMirror(name, value) {
- %_CallFunction(this, INTERNAL_PROPERTY_TYPE, Mirror);
- this.name_ = name;
- this.value_ = value;
-}
-inherits(InternalPropertyMirror, Mirror);
-
-
-InternalPropertyMirror.prototype.name = function() {
- return this.name_;
-};
-
-
-InternalPropertyMirror.prototype.value = function() {
- return MakeMirror(this.value_, false);
-};
-
-
-var kFrameDetailsFrameIdIndex = 0;
-var kFrameDetailsReceiverIndex = 1;
-var kFrameDetailsFunctionIndex = 2;
-var kFrameDetailsArgumentCountIndex = 3;
-var kFrameDetailsLocalCountIndex = 4;
-var kFrameDetailsSourcePositionIndex = 5;
-var kFrameDetailsConstructCallIndex = 6;
-var kFrameDetailsAtReturnIndex = 7;
-var kFrameDetailsFlagsIndex = 8;
-var kFrameDetailsFirstDynamicIndex = 9;
-
-var kFrameDetailsNameIndex = 0;
-var kFrameDetailsValueIndex = 1;
-var kFrameDetailsNameValueSize = 2;
-
-var kFrameDetailsFlagDebuggerFrameMask = 1 << 0;
-var kFrameDetailsFlagOptimizedFrameMask = 1 << 1;
-var kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
-
-/**
- * Wrapper for the frame details information retreived from the VM. The frame
- * details from the VM is an array with the following content. See runtime.cc
- * Runtime_GetFrameDetails.
- * 0: Id
- * 1: Receiver
- * 2: Function
- * 3: Argument count
- * 4: Local count
- * 5: Source position
- * 6: Construct call
- * 7: Is at return
- * 8: Flags (debugger frame, optimized frame, inlined frame index)
- * Arguments name, value
- * Locals name, value
- * Return value if any
- * @param {number} break_id Current break id
- * @param {number} index Frame number
- * @constructor
- */
-function FrameDetails(break_id, index) {
- this.break_id_ = break_id;
- this.details_ = %GetFrameDetails(break_id, index);
-}
-
-
-FrameDetails.prototype.frameId = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsFrameIdIndex];
-};
-
-
-FrameDetails.prototype.receiver = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsReceiverIndex];
-};
-
-
-FrameDetails.prototype.func = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsFunctionIndex];
-};
-
-
-FrameDetails.prototype.isConstructCall = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsConstructCallIndex];
-};
-
-
-FrameDetails.prototype.isAtReturn = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsAtReturnIndex];
-};
-
-
-FrameDetails.prototype.isDebuggerFrame = function() {
- %CheckExecutionState(this.break_id_);
- var f = kFrameDetailsFlagDebuggerFrameMask;
- return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-};
-
-
-FrameDetails.prototype.isOptimizedFrame = function() {
- %CheckExecutionState(this.break_id_);
- var f = kFrameDetailsFlagOptimizedFrameMask;
- return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-};
-
-
-FrameDetails.prototype.isInlinedFrame = function() {
- return this.inlinedFrameIndex() > 0;
-};
-
-
-FrameDetails.prototype.inlinedFrameIndex = function() {
- %CheckExecutionState(this.break_id_);
- var f = kFrameDetailsFlagInlinedFrameIndexMask;
- return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2;
-};
-
-
-FrameDetails.prototype.argumentCount = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsArgumentCountIndex];
-};
-
-
-FrameDetails.prototype.argumentName = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.argumentCount()) {
- return this.details_[kFrameDetailsFirstDynamicIndex +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsNameIndex];
- }
-};
-
-
-FrameDetails.prototype.argumentValue = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.argumentCount()) {
- return this.details_[kFrameDetailsFirstDynamicIndex +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsValueIndex];
- }
-};
-
-
-FrameDetails.prototype.localCount = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsLocalCountIndex];
-};
-
-
-FrameDetails.prototype.sourcePosition = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsSourcePositionIndex];
-};
-
-
-FrameDetails.prototype.localName = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex +
- this.argumentCount() * kFrameDetailsNameValueSize;
- return this.details_[locals_offset +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsNameIndex];
- }
-};
-
-
-FrameDetails.prototype.localValue = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex +
- this.argumentCount() * kFrameDetailsNameValueSize;
- return this.details_[locals_offset +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsValueIndex];
- }
-};
-
-
-FrameDetails.prototype.returnValue = function() {
- %CheckExecutionState(this.break_id_);
- var return_value_offset =
- kFrameDetailsFirstDynamicIndex +
- (this.argumentCount() + this.localCount()) * kFrameDetailsNameValueSize;
- if (this.details_[kFrameDetailsAtReturnIndex]) {
- return this.details_[return_value_offset];
- }
-};
-
-
-FrameDetails.prototype.scopeCount = function() {
- return %GetScopeCount(this.break_id_, this.frameId());
-};
-
-
-/**
- * Mirror object for stack frames.
- * @param {number} break_id The break id in the VM for which this frame is
- valid
- * @param {number} index The frame index (top frame is index 0)
- * @constructor
- * @extends Mirror
- */
-function FrameMirror(break_id, index) {
- %_CallFunction(this, FRAME_TYPE, Mirror);
- this.break_id_ = break_id;
- this.index_ = index;
- this.details_ = new FrameDetails(break_id, index);
-}
-inherits(FrameMirror, Mirror);
-
-
-FrameMirror.prototype.index = function() {
- return this.index_;
-};
-
-
-FrameMirror.prototype.func = function() {
- // Get the function for this frame from the VM.
- var f = this.details_.func();
-
- // Create a function mirror. NOTE: MakeMirror cannot be used here as the
- // value returned from the VM might be a string if the function for the
- // frame is unresolved.
- if (IS_FUNCTION(f)) {
- return MakeMirror(f);
- } else {
- return new UnresolvedFunctionMirror(f);
- }
-};
-
-
-FrameMirror.prototype.receiver = function() {
- return MakeMirror(this.details_.receiver());
-};
-
-
-FrameMirror.prototype.isConstructCall = function() {
- return this.details_.isConstructCall();
-};
-
-
-FrameMirror.prototype.isAtReturn = function() {
- return this.details_.isAtReturn();
-};
-
-
-FrameMirror.prototype.isDebuggerFrame = function() {
- return this.details_.isDebuggerFrame();
-};
-
-
-FrameMirror.prototype.isOptimizedFrame = function() {
- return this.details_.isOptimizedFrame();
-};
-
-
-FrameMirror.prototype.isInlinedFrame = function() {
- return this.details_.isInlinedFrame();
-};
-
-
-FrameMirror.prototype.inlinedFrameIndex = function() {
- return this.details_.inlinedFrameIndex();
-};
-
-
-FrameMirror.prototype.argumentCount = function() {
- return this.details_.argumentCount();
-};
-
-
-FrameMirror.prototype.argumentName = function(index) {
- return this.details_.argumentName(index);
-};
-
-
-FrameMirror.prototype.argumentValue = function(index) {
- return MakeMirror(this.details_.argumentValue(index));
-};
-
-
-FrameMirror.prototype.localCount = function() {
- return this.details_.localCount();
-};
-
-
-FrameMirror.prototype.localName = function(index) {
- return this.details_.localName(index);
-};
-
-
-FrameMirror.prototype.localValue = function(index) {
- return MakeMirror(this.details_.localValue(index));
-};
-
-
-FrameMirror.prototype.returnValue = function() {
- return MakeMirror(this.details_.returnValue());
-};
-
-
-FrameMirror.prototype.sourcePosition = function() {
- return this.details_.sourcePosition();
-};
-
-
-FrameMirror.prototype.sourceLocation = function() {
- if (this.func().resolved() && this.func().script()) {
- return this.func().script().locationFromPosition(this.sourcePosition(),
- true);
- }
-};
-
-
-FrameMirror.prototype.sourceLine = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.line;
- }
- }
-};
-
-
-FrameMirror.prototype.sourceColumn = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.column;
- }
- }
-};
-
-
-FrameMirror.prototype.sourceLineText = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.sourceText();
- }
- }
-};
-
-
-FrameMirror.prototype.scopeCount = function() {
- return this.details_.scopeCount();
-};
-
-
-FrameMirror.prototype.scope = function(index) {
- return new ScopeMirror(this, void 0, index);
-};
-
-
-FrameMirror.prototype.evaluate = function(source, disable_break,
- opt_context_object) {
- var result = %DebugEvaluate(this.break_id_,
- this.details_.frameId(),
- this.details_.inlinedFrameIndex(),
- source,
- Boolean(disable_break),
- opt_context_object);
- return MakeMirror(result);
-};
-
-
-FrameMirror.prototype.invocationText = function() {
- // Format frame invoaction (receiver, function and arguments).
- var result = '';
- var func = this.func();
- var receiver = this.receiver();
- if (this.isConstructCall()) {
- // For constructor frames display new followed by the function name.
- result += 'new ';
- result += func.name() ? func.name() : '[anonymous]';
- } else if (this.isDebuggerFrame()) {
- result += '[debugger]';
- } else {
- // If the receiver has a className which is 'global' don't display it.
- var display_receiver =
- !receiver.className || (receiver.className() != 'global');
- if (display_receiver) {
- result += receiver.toText();
- }
- // Try to find the function as a property in the receiver. Include the
- // prototype chain in the lookup.
- var property = GetUndefinedMirror();
- if (receiver.isObject()) {
- for (var r = receiver;
- !r.isNull() && property.isUndefined();
- r = r.protoObject()) {
- property = r.lookupProperty(func);
- }
- }
- if (!property.isUndefined()) {
- // The function invoked was found on the receiver. Use the property name
- // for the backtrace.
- if (!property.isIndexed()) {
- if (display_receiver) {
- result += '.';
- }
- result += property.name();
- } else {
- result += '[';
- result += property.name();
- result += ']';
- }
- // Also known as - if the name in the function doesn't match the name
- // under which it was looked up.
- if (func.name() && func.name() != property.name()) {
- result += '(aka ' + func.name() + ')';
- }
- } else {
- // The function invoked was not found on the receiver. Use the function
- // name if available for the backtrace.
- if (display_receiver) {
- result += '.';
- }
- result += func.name() ? func.name() : '[anonymous]';
- }
- }
-
- // Render arguments for normal frames.
- if (!this.isDebuggerFrame()) {
- result += '(';
- for (var i = 0; i < this.argumentCount(); i++) {
- if (i != 0) result += ', ';
- if (this.argumentName(i)) {
- result += this.argumentName(i);
- result += '=';
- }
- result += this.argumentValue(i).toText();
- }
- result += ')';
- }
-
- if (this.isAtReturn()) {
- result += ' returning ';
- result += this.returnValue().toText();
- }
-
- return result;
-};
-
-
-FrameMirror.prototype.sourceAndPositionText = function() {
- // Format source and position.
- var result = '';
- var func = this.func();
- if (func.resolved()) {
- if (func.script()) {
- if (func.script().name()) {
- result += func.script().name();
- } else {
- result += '[unnamed]';
- }
- if (!this.isDebuggerFrame()) {
- var location = this.sourceLocation();
- result += ' line ';
- result += !IS_UNDEFINED(location) ? (location.line + 1) : '?';
- result += ' column ';
- result += !IS_UNDEFINED(location) ? (location.column + 1) : '?';
- if (!IS_UNDEFINED(this.sourcePosition())) {
- result += ' (position ' + (this.sourcePosition() + 1) + ')';
- }
- }
- } else {
- result += '[no source]';
- }
- } else {
- result += '[unresolved]';
- }
-
- return result;
-};
-
-
-FrameMirror.prototype.localsText = function() {
- // Format local variables.
- var result = '';
- var locals_count = this.localCount();
- if (locals_count > 0) {
- for (var i = 0; i < locals_count; ++i) {
- result += ' var ';
- result += this.localName(i);
- result += ' = ';
- result += this.localValue(i).toText();
- if (i < locals_count - 1) result += '\n';
- }
- }
-
- return result;
-};
-
-
-FrameMirror.prototype.restart = function() {
- var result = %LiveEditRestartFrame(this.break_id_, this.index_);
- if (IS_UNDEFINED(result)) {
- result = "Failed to find requested frame";
- }
- return result;
-};
-
-
-FrameMirror.prototype.toText = function(opt_locals) {
- var result = '';
- result += '#' + (this.index() <= 9 ? '0' : '') + this.index();
- result += ' ';
- result += this.invocationText();
- result += ' ';
- result += this.sourceAndPositionText();
- if (opt_locals) {
- result += '\n';
- result += this.localsText();
- }
- return result;
-};
-
-
-var kScopeDetailsTypeIndex = 0;
-var kScopeDetailsObjectIndex = 1;
-
-function ScopeDetails(frame, fun, index) {
- if (frame) {
- this.break_id_ = frame.break_id_;
- this.details_ = %GetScopeDetails(frame.break_id_,
- frame.details_.frameId(),
- frame.details_.inlinedFrameIndex(),
- index);
- this.frame_id_ = frame.details_.frameId();
- this.inlined_frame_id_ = frame.details_.inlinedFrameIndex();
- } else {
- this.details_ = %GetFunctionScopeDetails(fun.value(), index);
- this.fun_value_ = fun.value();
- this.break_id_ = undefined;
- }
- this.index_ = index;
-}
-
-
-ScopeDetails.prototype.type = function() {
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- }
- return this.details_[kScopeDetailsTypeIndex];
-};
-
-
-ScopeDetails.prototype.object = function() {
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- }
- return this.details_[kScopeDetailsObjectIndex];
-};
-
-
-ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
- var raw_res;
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- raw_res = %SetScopeVariableValue(this.break_id_, this.frame_id_,
- this.inlined_frame_id_, this.index_, name, new_value);
- } else {
- raw_res = %SetScopeVariableValue(this.fun_value_, null, null, this.index_,
- name, new_value);
- }
- if (!raw_res) {
- throw new Error("Failed to set variable value");
- }
-};
-
-
-/**
- * Mirror object for scope of frame or function. Either frame or function must
- * be specified.
- * @param {FrameMirror} frame The frame this scope is a part of
- * @param {FunctionMirror} function The function this scope is a part of
- * @param {number} index The scope index in the frame
- * @constructor
- * @extends Mirror
- */
-function ScopeMirror(frame, function, index) {
- %_CallFunction(this, SCOPE_TYPE, Mirror);
- if (frame) {
- this.frame_index_ = frame.index_;
- } else {
- this.frame_index_ = undefined;
- }
- this.scope_index_ = index;
- this.details_ = new ScopeDetails(frame, function, index);
-}
-inherits(ScopeMirror, Mirror);
-
-
-ScopeMirror.prototype.frameIndex = function() {
- return this.frame_index_;
-};
-
-
-ScopeMirror.prototype.scopeIndex = function() {
- return this.scope_index_;
-};
-
-
-ScopeMirror.prototype.scopeType = function() {
- return this.details_.type();
-};
-
-
-ScopeMirror.prototype.scopeObject = function() {
- // For local and closure scopes create a transient mirror as these objects are
- // created on the fly materializing the local or closure scopes and
- // therefore will not preserve identity.
- var transient = this.scopeType() == ScopeType.Local ||
- this.scopeType() == ScopeType.Closure;
- return MakeMirror(this.details_.object(), transient);
-};
-
-
-ScopeMirror.prototype.setVariableValue = function(name, new_value) {
- this.details_.setVariableValueImpl(name, new_value);
-};
-
-
-/**
- * Mirror object for script source.
- * @param {Script} script The script object
- * @constructor
- * @extends Mirror
- */
-function ScriptMirror(script) {
- %_CallFunction(this, SCRIPT_TYPE, Mirror);
- this.script_ = script;
- this.context_ = new ContextMirror(script.context_data);
- this.allocateHandle_();
-}
-inherits(ScriptMirror, Mirror);
-
-
-ScriptMirror.prototype.value = function() {
- return this.script_;
-};
-
-
-ScriptMirror.prototype.name = function() {
- return this.script_.name || this.script_.nameOrSourceURL();
-};
-
-
-ScriptMirror.prototype.id = function() {
- return this.script_.id;
-};
-
-
-ScriptMirror.prototype.source = function() {
- return this.script_.source;
-};
-
-
-ScriptMirror.prototype.setSource = function(source) {
- %DebugSetScriptSource(this.script_, source);
-};
-
-
-ScriptMirror.prototype.lineOffset = function() {
- return this.script_.line_offset;
-};
-
-
-ScriptMirror.prototype.columnOffset = function() {
- return this.script_.column_offset;
-};
-
-
-ScriptMirror.prototype.data = function() {
- return this.script_.data;
-};
-
-
-ScriptMirror.prototype.scriptType = function() {
- return this.script_.type;
-};
-
-
-ScriptMirror.prototype.compilationType = function() {
- return this.script_.compilation_type;
-};
-
-
-ScriptMirror.prototype.lineCount = function() {
- return this.script_.lineCount();
-};
-
-
-ScriptMirror.prototype.locationFromPosition = function(
- position, include_resource_offset) {
- return this.script_.locationFromPosition(position, include_resource_offset);
-};
-
-
-ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
- return this.script_.sourceSlice(opt_from_line, opt_to_line);
-};
-
-
-ScriptMirror.prototype.context = function() {
- return this.context_;
-};
-
-
-ScriptMirror.prototype.evalFromScript = function() {
- return MakeMirror(this.script_.eval_from_script);
-};
-
-
-ScriptMirror.prototype.evalFromFunctionName = function() {
- return MakeMirror(this.script_.eval_from_function_name);
-};
-
-
-ScriptMirror.prototype.evalFromLocation = function() {
- var eval_from_script = this.evalFromScript();
- if (!eval_from_script.isUndefined()) {
- var position = this.script_.eval_from_script_position;
- return eval_from_script.locationFromPosition(position, true);
- }
-};
-
-
-ScriptMirror.prototype.toText = function() {
- var result = '';
- result += this.name();
- result += ' (lines: ';
- if (this.lineOffset() > 0) {
- result += this.lineOffset();
- result += '-';
- result += this.lineOffset() + this.lineCount() - 1;
- } else {
- result += this.lineCount();
- }
- result += ')';
- return result;
-};
-
-
-/**
- * Mirror object for context.
- * @param {Object} data The context data
- * @constructor
- * @extends Mirror
- */
-function ContextMirror(data) {
- %_CallFunction(this, CONTEXT_TYPE, Mirror);
- this.data_ = data;
- this.allocateHandle_();
-}
-inherits(ContextMirror, Mirror);
-
-
-ContextMirror.prototype.data = function() {
- return this.data_;
-};
-
-
-/**
- * Returns a mirror serializer
- *
- * @param {boolean} details Set to true to include details
- * @param {Object} options Options comtrolling the serialization
- * The following options can be set:
- * includeSource: include ths full source of scripts
- * @returns {MirrorSerializer} mirror serializer
- */
-function MakeMirrorSerializer(details, options) {
- return new JSONProtocolSerializer(details, options);
-}
-
-
-/**
- * Object for serializing a mirror objects and its direct references.
- * @param {boolean} details Indicates whether to include details for the mirror
- * serialized
- * @constructor
- */
-function JSONProtocolSerializer(details, options) {
- this.details_ = details;
- this.options_ = options;
- this.mirrors_ = [ ];
-}
-
-
-/**
- * Returns a serialization of an object reference. The referenced object are
- * added to the serialization state.
- *
- * @param {Mirror} mirror The mirror to serialize
- * @returns {String} JSON serialization
- */
-JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
- return this.serialize_(mirror, true, true);
-};
-
-
-/**
- * Returns a serialization of an object value. The referenced objects are
- * added to the serialization state.
- *
- * @param {Mirror} mirror The mirror to serialize
- * @returns {String} JSON serialization
- */
-JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
- var json = this.serialize_(mirror, false, true);
- return json;
-};
-
-
-/**
- * Returns a serialization of all the objects referenced.
- *
- * @param {Mirror} mirror The mirror to serialize.
- * @returns {Array.<Object>} Array of the referenced objects converted to
- * protcol objects.
- */
-JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
- // Collect the protocol representation of the referenced objects in an array.
- var content = [];
-
- // Get the number of referenced objects.
- var count = this.mirrors_.length;
-
- for (var i = 0; i < count; i++) {
- content.push(this.serialize_(this.mirrors_[i], false, false));
- }
-
- return content;
-};
-
-
-JSONProtocolSerializer.prototype.includeSource_ = function() {
- return this.options_ && this.options_.includeSource;
-};
-
-
-JSONProtocolSerializer.prototype.inlineRefs_ = function() {
- return this.options_ && this.options_.inlineRefs;
-};
-
-
-JSONProtocolSerializer.prototype.maxStringLength_ = function() {
- if (IS_UNDEFINED(this.options_) ||
- IS_UNDEFINED(this.options_.maxStringLength)) {
- return kMaxProtocolStringLength;
- }
- return this.options_.maxStringLength;
-};
-
-
-JSONProtocolSerializer.prototype.add_ = function(mirror) {
- // If this mirror is already in the list just return.
- for (var i = 0; i < this.mirrors_.length; i++) {
- if (this.mirrors_[i] === mirror) {
- return;
- }
- }
-
- // Add the mirror to the list of mirrors to be serialized.
- this.mirrors_.push(mirror);
-};
-
-
-/**
- * Formats mirror object to protocol reference object with some data that can
- * be used to display the value in debugger.
- * @param {Mirror} mirror Mirror to serialize.
- * @return {Object} Protocol reference object.
- */
-JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
- function(mirror) {
- var o = {};
- o.ref = mirror.handle();
- o.type = mirror.type();
- switch (mirror.type()) {
- case UNDEFINED_TYPE:
- case NULL_TYPE:
- case BOOLEAN_TYPE:
- case NUMBER_TYPE:
- o.value = mirror.value();
- break;
- case STRING_TYPE:
- o.value = mirror.getTruncatedValue(this.maxStringLength_());
- break;
- case FUNCTION_TYPE:
- o.name = mirror.name();
- o.inferredName = mirror.inferredName();
- if (mirror.script()) {
- o.scriptId = mirror.script().id();
- }
- break;
- case ERROR_TYPE:
- case REGEXP_TYPE:
- o.value = mirror.toText();
- break;
- case OBJECT_TYPE:
- o.className = mirror.className();
- break;
- }
- return o;
-};
-
-
-JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
- details) {
- // If serializing a reference to a mirror just return the reference and add
- // the mirror to the referenced mirrors.
- if (reference &&
- (mirror.isValue() || mirror.isScript() || mirror.isContext())) {
- if (this.inlineRefs_() && mirror.isValue()) {
- return this.serializeReferenceWithDisplayData_(mirror);
- } else {
- this.add_(mirror);
- return {'ref' : mirror.handle()};
- }
- }
-
- // Collect the JSON property/value pairs.
- var content = {};
-
- // Add the mirror handle.
- if (mirror.isValue() || mirror.isScript() || mirror.isContext()) {
- content.handle = mirror.handle();
- }
-
- // Always add the type.
- content.type = mirror.type();
-
- switch (mirror.type()) {
- case UNDEFINED_TYPE:
- case NULL_TYPE:
- // Undefined and null are represented just by their type.
- break;
-
- case BOOLEAN_TYPE:
- // Boolean values are simply represented by their value.
- content.value = mirror.value();
- break;
-
- case NUMBER_TYPE:
- // Number values are simply represented by their value.
- content.value = NumberToJSON_(mirror.value());
- break;
-
- case STRING_TYPE:
- // String values might have their value cropped to keep down size.
- if (this.maxStringLength_() != -1 &&
- mirror.length() > this.maxStringLength_()) {
- var str = mirror.getTruncatedValue(this.maxStringLength_());
- content.value = str;
- content.fromIndex = 0;
- content.toIndex = this.maxStringLength_();
- } else {
- content.value = mirror.value();
- }
- content.length = mirror.length();
- break;
-
- case OBJECT_TYPE:
- case FUNCTION_TYPE:
- case ERROR_TYPE:
- case REGEXP_TYPE:
- // Add object representation.
- this.serializeObject_(mirror, content, details);
- break;
-
- case PROPERTY_TYPE:
- case INTERNAL_PROPERTY_TYPE:
- throw new Error('PropertyMirror cannot be serialized independently');
- break;
-
- case FRAME_TYPE:
- // Add object representation.
- this.serializeFrame_(mirror, content);
- break;
-
- case SCOPE_TYPE:
- // Add object representation.
- this.serializeScope_(mirror, content);
- break;
-
- case SCRIPT_TYPE:
- // Script is represented by id, name and source attributes.
- if (mirror.name()) {
- content.name = mirror.name();
- }
- content.id = mirror.id();
- content.lineOffset = mirror.lineOffset();
- content.columnOffset = mirror.columnOffset();
- content.lineCount = mirror.lineCount();
- if (mirror.data()) {
- content.data = mirror.data();
- }
- if (this.includeSource_()) {
- content.source = mirror.source();
- } else {
- var sourceStart = mirror.source().substring(0, 80);
- content.sourceStart = sourceStart;
- }
- content.sourceLength = mirror.source().length;
- content.scriptType = mirror.scriptType();
- content.compilationType = mirror.compilationType();
- // For compilation type eval emit information on the script from which
- // eval was called if a script is present.
- if (mirror.compilationType() == 1 &&
- mirror.evalFromScript()) {
- content.evalFromScript =
- this.serializeReference(mirror.evalFromScript());
- var evalFromLocation = mirror.evalFromLocation();
- if (evalFromLocation) {
- content.evalFromLocation = { line: evalFromLocation.line,
- column: evalFromLocation.column };
- }
- if (mirror.evalFromFunctionName()) {
- content.evalFromFunctionName = mirror.evalFromFunctionName();
- }
- }
- if (mirror.context()) {
- content.context = this.serializeReference(mirror.context());
- }
- break;
-
- case CONTEXT_TYPE:
- content.data = mirror.data();
- break;
- }
-
- // Always add the text representation.
- content.text = mirror.toText();
-
- // Create and return the JSON string.
- return content;
-};
-
-
-/**
- * Serialize object information to the following JSON format.
- *
- * {"className":"<class name>",
- * "constructorFunction":{"ref":<number>},
- * "protoObject":{"ref":<number>},
- * "prototypeObject":{"ref":<number>},
- * "namedInterceptor":<boolean>,
- * "indexedInterceptor":<boolean>,
- * "properties":[<properties>],
- * "internalProperties":[<internal properties>]}
- */
-JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
- details) {
- // Add general object properties.
- content.className = mirror.className();
- content.constructorFunction =
- this.serializeReference(mirror.constructorFunction());
- content.protoObject = this.serializeReference(mirror.protoObject());
- content.prototypeObject = this.serializeReference(mirror.prototypeObject());
-
- // Add flags to indicate whether there are interceptors.
- if (mirror.hasNamedInterceptor()) {
- content.namedInterceptor = true;
- }
- if (mirror.hasIndexedInterceptor()) {
- content.indexedInterceptor = true;
- }
-
- // Add function specific properties.
- if (mirror.isFunction()) {
- // Add function specific properties.
- content.name = mirror.name();
- if (!IS_UNDEFINED(mirror.inferredName())) {
- content.inferredName = mirror.inferredName();
- }
- content.resolved = mirror.resolved();
- if (mirror.resolved()) {
- content.source = mirror.source();
- }
- if (mirror.script()) {
- content.script = this.serializeReference(mirror.script());
- content.scriptId = mirror.script().id();
-
- serializeLocationFields(mirror.sourceLocation(), content);
- }
-
- content.scopes = [];
- for (var i = 0; i < mirror.scopeCount(); i++) {
- var scope = mirror.scope(i);
- content.scopes.push({
- type: scope.scopeType(),
- index: i
- });
- }
- }
-
- // Add date specific properties.
- if (mirror.isDate()) {
- // Add date specific properties.
- content.value = mirror.value();
- }
-
- // Add actual properties - named properties followed by indexed properties.
- var propertyNames = mirror.propertyNames(PropertyKind.Named);
- var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed);
- var p = new Array(propertyNames.length + propertyIndexes.length);
- for (var i = 0; i < propertyNames.length; i++) {
- var propertyMirror = mirror.property(propertyNames[i]);
- p[i] = this.serializeProperty_(propertyMirror);
- if (details) {
- this.add_(propertyMirror.value());
- }
- }
- for (var i = 0; i < propertyIndexes.length; i++) {
- var propertyMirror = mirror.property(propertyIndexes[i]);
- p[propertyNames.length + i] = this.serializeProperty_(propertyMirror);
- if (details) {
- this.add_(propertyMirror.value());
- }
- }
- content.properties = p;
-
- var internalProperties = mirror.internalProperties();
- if (internalProperties.length > 0) {
- var ip = [];
- for (var i = 0; i < internalProperties.length; i++) {
- ip.push(this.serializeInternalProperty_(internalProperties[i]));
- }
- content.internalProperties = ip;
- }
-};
-
-
-/**
- * Serialize location information to the following JSON format:
- *
- * "position":"<position>",
- * "line":"<line>",
- * "column":"<column>",
- *
- * @param {SourceLocation} location The location to serialize, may be undefined.
- */
-function serializeLocationFields (location, content) {
- if (!location) {
- return;
- }
- content.position = location.position;
- var line = location.line;
- if (!IS_UNDEFINED(line)) {
- content.line = line;
- }
- var column = location.column;
- if (!IS_UNDEFINED(column)) {
- content.column = column;
- }
-}
-
-
-/**
- * Serialize property information to the following JSON format for building the
- * array of properties.
- *
- * {"name":"<property name>",
- * "attributes":<number>,
- * "propertyType":<number>,
- * "ref":<number>}
- *
- * If the attribute for the property is PropertyAttribute.None it is not added.
- * If the propertyType for the property is PropertyType.Normal it is not added.
- * Here are a couple of examples.
- *
- * {"name":"hello","ref":1}
- * {"name":"length","attributes":7,"propertyType":3,"ref":2}
- *
- * @param {PropertyMirror} propertyMirror The property to serialize.
- * @returns {Object} Protocol object representing the property.
- */
-JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
- var result = {};
-
- result.name = propertyMirror.name();
- var propertyValue = propertyMirror.value();
- if (this.inlineRefs_() && propertyValue.isValue()) {
- result.value = this.serializeReferenceWithDisplayData_(propertyValue);
- } else {
- if (propertyMirror.attributes() != PropertyAttribute.None) {
- result.attributes = propertyMirror.attributes();
- }
- if (propertyMirror.propertyType() != PropertyType.Normal) {
- result.propertyType = propertyMirror.propertyType();
- }
- result.ref = propertyValue.handle();
- }
- return result;
-};
-
-
-/**
- * Serialize internal property information to the following JSON format for
- * building the array of properties.
- *
- * {"name":"<property name>",
- * "ref":<number>}
- *
- * {"name":"[[BoundThis]]","ref":117}
- *
- * @param {InternalPropertyMirror} propertyMirror The property to serialize.
- * @returns {Object} Protocol object representing the property.
- */
-JSONProtocolSerializer.prototype.serializeInternalProperty_ =
- function(propertyMirror) {
- var result = {};
-
- result.name = propertyMirror.name();
- var propertyValue = propertyMirror.value();
- if (this.inlineRefs_() && propertyValue.isValue()) {
- result.value = this.serializeReferenceWithDisplayData_(propertyValue);
- } else {
- result.ref = propertyValue.handle();
- }
- return result;
-};
-
-
-JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
- content.index = mirror.index();
- content.receiver = this.serializeReference(mirror.receiver());
- var func = mirror.func();
- content.func = this.serializeReference(func);
- if (func.script()) {
- content.script = this.serializeReference(func.script());
- }
- content.constructCall = mirror.isConstructCall();
- content.atReturn = mirror.isAtReturn();
- if (mirror.isAtReturn()) {
- content.returnValue = this.serializeReference(mirror.returnValue());
- }
- content.debuggerFrame = mirror.isDebuggerFrame();
- var x = new Array(mirror.argumentCount());
- for (var i = 0; i < mirror.argumentCount(); i++) {
- var arg = {};
- var argument_name = mirror.argumentName(i);
- if (argument_name) {
- arg.name = argument_name;
- }
- arg.value = this.serializeReference(mirror.argumentValue(i));
- x[i] = arg;
- }
- content.arguments = x;
- var x = new Array(mirror.localCount());
- for (var i = 0; i < mirror.localCount(); i++) {
- var local = {};
- local.name = mirror.localName(i);
- local.value = this.serializeReference(mirror.localValue(i));
- x[i] = local;
- }
- content.locals = x;
- serializeLocationFields(mirror.sourceLocation(), content);
- var source_line_text = mirror.sourceLineText();
- if (!IS_UNDEFINED(source_line_text)) {
- content.sourceLineText = source_line_text;
- }
-
- content.scopes = [];
- for (var i = 0; i < mirror.scopeCount(); i++) {
- var scope = mirror.scope(i);
- content.scopes.push({
- type: scope.scopeType(),
- index: i
- });
- }
-};
-
-
-JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
- content.index = mirror.scopeIndex();
- content.frameIndex = mirror.frameIndex();
- content.type = mirror.scopeType();
- content.object = this.inlineRefs_() ?
- this.serializeValue(mirror.scopeObject()) :
- this.serializeReference(mirror.scopeObject());
-};
-
-
-/**
- * Convert a number to a protocol value. For all finite numbers the number
- * itself is returned. For non finite numbers NaN, Infinite and
- * -Infinite the string representation "NaN", "Infinite" or "-Infinite"
- * (not including the quotes) is returned.
- *
- * @param {number} value The number value to convert to a protocol value.
- * @returns {number|string} Protocol value.
- */
-function NumberToJSON_(value) {
- if (isNaN(value)) {
- return 'NaN';
- }
- if (!NUMBER_IS_FINITE(value)) {
- if (value > 0) {
- return 'Infinity';
- } else {
- return '-Infinity';
- }
- }
- return value;
-}
diff --git a/src/3rdparty/v8/src/misc-intrinsics.h b/src/3rdparty/v8/src/misc-intrinsics.h
deleted file mode 100644
index c1da8a9..0000000
--- a/src/3rdparty/v8/src/misc-intrinsics.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MISC_INTRINSICS_H_
-#define V8_MISC_INTRINSICS_H_
-
-#include "../include/v8.h"
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Returns the index of the leading 1 bit, counting the least significant bit at
-// index 0. (1 << IntegerLog2(x)) is a mask for the most significant bit of x.
-// Result is undefined if input is zero.
-int IntegerLog2(uint32_t value);
-
-#if defined(__GNUC__)
-
-inline int IntegerLog2(uint32_t value) {
- return 31 - __builtin_clz(value);
-}
-
-#elif defined(_MSC_VER) && !defined(_WIN32_WCE)
-
-#pragma intrinsic(_BitScanReverse)
-
-inline int IntegerLog2(uint32_t value) {
- unsigned long result; // NOLINT: MSVC intrinsic demands this type.
- _BitScanReverse(&result, value);
- return result;
-}
-
-#else
-
-// Default version using regular operations. Code taken from:
-// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog
-inline int IntegerLog2(uint32_t value) {
- int result, shift;
-
- shift = (value > 0xFFFF) << 4;
- value >>= shift;
- result = shift;
-
- shift = (value > 0xFF) << 3;
- value >>= shift;
- result |= shift;
-
- shift = (value > 0xF) << 2;
- value >>= shift;
- result |= shift;
-
- shift = (value > 0x3) << 1;
- value >>= shift;
- result |= shift;
-
- result |= (value >> 1);
-
- return result;
-}
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_MISC_INTRINSICS_H_
diff --git a/src/3rdparty/v8/src/mksnapshot.cc b/src/3rdparty/v8/src/mksnapshot.cc
deleted file mode 100644
index a3665e9..0000000
--- a/src/3rdparty/v8/src/mksnapshot.cc
+++ /dev/null
@@ -1,426 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <errno.h>
-#include <stdio.h>
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-#include <bzlib.h>
-#endif
-#include <signal.h>
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "flags.h"
-#include "natives.h"
-#include "platform.h"
-#include "serialize.h"
-#include "list.h"
-
-using namespace v8;
-
-static const unsigned int kMaxCounters = 256;
-
-// A single counter in a counter collection.
-class Counter {
- public:
- static const int kMaxNameSize = 64;
- int32_t* Bind(const char* name) {
- int i;
- for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) {
- name_[i] = name[i];
- }
- name_[i] = '\0';
- return &counter_;
- }
- private:
- int32_t counter_;
- uint8_t name_[kMaxNameSize];
-};
-
-
-// A set of counters and associated information. An instance of this
-// class is stored directly in the memory-mapped counters file if
-// the --save-counters options is used
-class CounterCollection {
- public:
- CounterCollection() {
- magic_number_ = 0xDEADFACE;
- max_counters_ = kMaxCounters;
- max_name_size_ = Counter::kMaxNameSize;
- counters_in_use_ = 0;
- }
- Counter* GetNextCounter() {
- if (counters_in_use_ == kMaxCounters) return NULL;
- return &counters_[counters_in_use_++];
- }
- private:
- uint32_t magic_number_;
- uint32_t max_counters_;
- uint32_t max_name_size_;
- uint32_t counters_in_use_;
- Counter counters_[kMaxCounters];
-};
-
-
-class Compressor {
- public:
- virtual ~Compressor() {}
- virtual bool Compress(i::Vector<char> input) = 0;
- virtual i::Vector<char>* output() = 0;
-};
-
-
-class PartialSnapshotSink : public i::SnapshotByteSink {
- public:
- PartialSnapshotSink() : data_(), raw_size_(-1) { }
- virtual ~PartialSnapshotSink() { data_.Free(); }
- virtual void Put(int byte, const char* description) {
- data_.Add(byte);
- }
- virtual int Position() { return data_.length(); }
- void Print(FILE* fp) {
- int length = Position();
- for (int j = 0; j < length; j++) {
- if ((j & 0x1f) == 0x1f) {
- fprintf(fp, "\n");
- }
- if (j != 0) {
- fprintf(fp, ",");
- }
- fprintf(fp, "%u", static_cast<unsigned char>(at(j)));
- }
- }
- char at(int i) { return data_[i]; }
- bool Compress(Compressor* compressor) {
- ASSERT_EQ(-1, raw_size_);
- raw_size_ = data_.length();
- if (!compressor->Compress(data_.ToVector())) return false;
- data_.Clear();
- data_.AddAll(*compressor->output());
- return true;
- }
- int raw_size() { return raw_size_; }
-
- private:
- i::List<char> data_;
- int raw_size_;
-};
-
-
-class CppByteSink : public PartialSnapshotSink {
- public:
- explicit CppByteSink(const char* snapshot_file) {
- fp_ = i::OS::FOpen(snapshot_file, "wb");
- if (fp_ == NULL) {
- i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
- exit(1);
- }
- fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
- fprintf(fp_, "#include \"v8.h\"\n");
- fprintf(fp_, "#include \"platform.h\"\n\n");
- fprintf(fp_, "#include \"snapshot.h\"\n\n");
- fprintf(fp_, "namespace v8 {\nnamespace internal {\n\n");
- fprintf(fp_, "const byte Snapshot::data_[] = {");
- }
-
- virtual ~CppByteSink() {
- fprintf(fp_, "const int Snapshot::size_ = %d;\n", Position());
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- fprintf(fp_, "const byte* Snapshot::raw_data_ = NULL;\n");
- fprintf(fp_,
- "const int Snapshot::raw_size_ = %d;\n\n",
- raw_size());
-#else
- fprintf(fp_,
- "const byte* Snapshot::raw_data_ = Snapshot::data_;\n");
- fprintf(fp_,
- "const int Snapshot::raw_size_ = Snapshot::size_;\n\n");
-#endif
- fprintf(fp_, "} } // namespace v8::internal\n");
- fclose(fp_);
- }
-
- void WriteSpaceUsed(
- const char* prefix,
- int new_space_used,
- int pointer_space_used,
- int data_space_used,
- int code_space_used,
- int map_space_used,
- int cell_space_used) {
- fprintf(fp_,
- "const int Snapshot::%snew_space_used_ = %d;\n",
- prefix,
- new_space_used);
- fprintf(fp_,
- "const int Snapshot::%spointer_space_used_ = %d;\n",
- prefix,
- pointer_space_used);
- fprintf(fp_,
- "const int Snapshot::%sdata_space_used_ = %d;\n",
- prefix,
- data_space_used);
- fprintf(fp_,
- "const int Snapshot::%scode_space_used_ = %d;\n",
- prefix,
- code_space_used);
- fprintf(fp_,
- "const int Snapshot::%smap_space_used_ = %d;\n",
- prefix,
- map_space_used);
- fprintf(fp_,
- "const int Snapshot::%scell_space_used_ = %d;\n",
- prefix,
- cell_space_used);
- }
-
- void WritePartialSnapshot() {
- int length = partial_sink_.Position();
- fprintf(fp_, "};\n\n");
- fprintf(fp_, "const int Snapshot::context_size_ = %d;\n", length);
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- fprintf(fp_,
- "const int Snapshot::context_raw_size_ = %d;\n",
- partial_sink_.raw_size());
-#else
- fprintf(fp_,
- "const int Snapshot::context_raw_size_ = "
- "Snapshot::context_size_;\n");
-#endif
- fprintf(fp_, "const byte Snapshot::context_data_[] = {\n");
- partial_sink_.Print(fp_);
- fprintf(fp_, "};\n\n");
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- fprintf(fp_, "const byte* Snapshot::context_raw_data_ = NULL;\n");
-#else
- fprintf(fp_, "const byte* Snapshot::context_raw_data_ ="
- " Snapshot::context_data_;\n");
-#endif
- }
-
- void WriteSnapshot() {
- Print(fp_);
- }
-
- PartialSnapshotSink* partial_sink() { return &partial_sink_; }
-
- private:
- FILE* fp_;
- PartialSnapshotSink partial_sink_;
-};
-
-
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-class BZip2Compressor : public Compressor {
- public:
- BZip2Compressor() : output_(NULL) {}
- virtual ~BZip2Compressor() {
- delete output_;
- }
- virtual bool Compress(i::Vector<char> input) {
- delete output_;
- output_ = new i::ScopedVector<char>((input.length() * 101) / 100 + 1000);
- unsigned int output_length_ = output_->length();
- int result = BZ2_bzBuffToBuffCompress(output_->start(), &output_length_,
- input.start(), input.length(),
- 9, 1, 0);
- if (result == BZ_OK) {
- output_->Truncate(output_length_);
- return true;
- } else {
- fprintf(stderr, "bzlib error code: %d\n", result);
- return false;
- }
- }
- virtual i::Vector<char>* output() { return output_; }
-
- private:
- i::ScopedVector<char>* output_;
-};
-
-
-class BZip2Decompressor : public StartupDataDecompressor {
- public:
- virtual ~BZip2Decompressor() { }
-
- protected:
- virtual int DecompressData(char* raw_data,
- int* raw_data_size,
- const char* compressed_data,
- int compressed_data_size) {
- ASSERT_EQ(StartupData::kBZip2,
- V8::GetCompressedStartupDataAlgorithm());
- unsigned int decompressed_size = *raw_data_size;
- int result =
- BZ2_bzBuffToBuffDecompress(raw_data,
- &decompressed_size,
- const_cast<char*>(compressed_data),
- compressed_data_size,
- 0, 1);
- if (result == BZ_OK) {
- *raw_data_size = decompressed_size;
- }
- return result;
- }
-};
-#endif
-
-
-int main(int argc, char** argv) {
- // By default, log code create information in the snapshot.
- i::FLAG_log_code = true;
-
- // Print the usage if an error occurs when parsing the command line
- // flags or if the help flag is set.
- int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
- if (result > 0 || argc != 2 || i::FLAG_help) {
- ::printf("Usage: %s [flag] ... outfile\n", argv[0]);
- i::FlagList::PrintHelp();
- return !i::FLAG_help;
- }
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- BZip2Decompressor natives_decompressor;
- int bz2_result = natives_decompressor.Decompress();
- if (bz2_result != BZ_OK) {
- fprintf(stderr, "bzip error code: %d\n", bz2_result);
- exit(1);
- }
-#endif
- i::Serializer::Enable();
- Persistent<Context> context = v8::Context::New();
- if (context.IsEmpty()) {
- fprintf(stderr,
- "\nException thrown while compiling natives - see above.\n\n");
- exit(1);
- }
- if (i::FLAG_extra_code != NULL) {
- context->Enter();
- // Capture 100 frames if anything happens.
- V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
- HandleScope scope;
- const char* name = i::FLAG_extra_code;
- FILE* file = i::OS::FOpen(name, "rb");
- if (file == NULL) {
- fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno);
- exit(1);
- }
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
- rewind(file);
-
- char* chars = new char[size + 1];
- chars[size] = '\0';
- for (int i = 0; i < size;) {
- int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
- if (read < 0) {
- fprintf(stderr, "Failed to read '%s': errno %d\n", name, errno);
- exit(1);
- }
- i += read;
- }
- fclose(file);
- Local<String> source = String::New(chars);
- TryCatch try_catch;
- Local<Script> script = Script::Compile(source);
- if (try_catch.HasCaught()) {
- fprintf(stderr, "Failure compiling '%s' (see above)\n", name);
- exit(1);
- }
- script->Run();
- if (try_catch.HasCaught()) {
- fprintf(stderr, "Failure running '%s'\n", name);
- Local<Message> message = try_catch.Message();
- Local<String> message_string = message->Get();
- Local<String> message_line = message->GetSourceLine();
- int len = 2 + message_string->Utf8Length() + message_line->Utf8Length();
- char* buf = new char(len);
- message_string->WriteUtf8(buf);
- fprintf(stderr, "%s at line %d\n", buf, message->GetLineNumber());
- message_line->WriteUtf8(buf);
- fprintf(stderr, "%s\n", buf);
- int from = message->GetStartColumn();
- int to = message->GetEndColumn();
- int i;
- for (i = 0; i < from; i++) fprintf(stderr, " ");
- for ( ; i <= to; i++) fprintf(stderr, "^");
- fprintf(stderr, "\n");
- exit(1);
- }
- context->Exit();
- }
- // Make sure all builtin scripts are cached.
- { HandleScope scope;
- for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
- i::Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
- }
- }
- // If we don't do this then we end up with a stray root pointing at the
- // context even after we have disposed of the context.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
- i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
- context.Dispose(context->GetIsolate());
- CppByteSink sink(argv[1]);
- // This results in a somewhat smaller snapshot, probably because it gets rid
- // of some things that are cached between garbage collections.
- i::StartupSerializer ser(&sink);
- ser.SerializeStrongReferences();
-
- i::PartialSerializer partial_ser(&ser, sink.partial_sink());
- partial_ser.Serialize(&raw_context);
-
- ser.SerializeWeakReferences();
-
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- BZip2Compressor compressor;
- if (!sink.Compress(&compressor))
- return 1;
- if (!sink.partial_sink()->Compress(&compressor))
- return 1;
-#endif
- sink.WriteSnapshot();
- sink.WritePartialSnapshot();
-
- sink.WriteSpaceUsed(
- "context_",
- partial_ser.CurrentAllocationAddress(i::NEW_SPACE),
- partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
- partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
- partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
- partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
- partial_ser.CurrentAllocationAddress(i::CELL_SPACE));
- sink.WriteSpaceUsed(
- "",
- ser.CurrentAllocationAddress(i::NEW_SPACE),
- ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
- ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
- ser.CurrentAllocationAddress(i::CODE_SPACE),
- ser.CurrentAllocationAddress(i::MAP_SPACE),
- ser.CurrentAllocationAddress(i::CELL_SPACE));
- return 0;
-}
diff --git a/src/3rdparty/v8/src/natives.h b/src/3rdparty/v8/src/natives.h
deleted file mode 100644
index 5f34420..0000000
--- a/src/3rdparty/v8/src/natives.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_NATIVES_H_
-#define V8_NATIVES_H_
-
-namespace v8 {
-namespace internal {
-
-typedef bool (*NativeSourceCallback)(Vector<const char> name,
- Vector<const char> source,
- int index);
-
-enum NativeType {
- CORE, EXPERIMENTAL, D8, TEST
-};
-
-template <NativeType type>
-class NativesCollection {
- public:
- // Number of built-in scripts.
- static int GetBuiltinsCount();
- // Number of debugger implementation scripts.
- static int GetDebuggerCount();
-
- // These are used to access built-in scripts. The debugger implementation
- // scripts have an index in the interval [0, GetDebuggerCount()). The
- // non-debugger scripts have an index in the interval [GetDebuggerCount(),
- // GetNativesCount()).
- static int GetIndex(const char* name);
- static int GetRawScriptsSize();
- static Vector<const char> GetRawScriptSource(int index);
- static Vector<const char> GetScriptName(int index);
- static Vector<const byte> GetScriptsSource();
- static void SetRawScriptsSource(Vector<const char> raw_source);
-};
-
-typedef NativesCollection<CORE> Natives;
-typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
-
-} } // namespace v8::internal
-
-#endif // V8_NATIVES_H_
diff --git a/src/3rdparty/v8/src/object-observe.js b/src/3rdparty/v8/src/object-observe.js
deleted file mode 100644
index b35f547..0000000
--- a/src/3rdparty/v8/src/object-observe.js
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"use strict";
-
-var observationState = %GetObservationState();
-if (IS_UNDEFINED(observationState.observerInfoMap)) {
- observationState.observerInfoMap = %ObservationWeakMapCreate();
- observationState.objectInfoMap = %ObservationWeakMapCreate();
- observationState.notifierTargetMap = %ObservationWeakMapCreate();
- observationState.pendingObservers = new InternalArray;
- observationState.observerPriority = 0;
-}
-
-function ObservationWeakMap(map) {
- this.map_ = map;
-}
-
-ObservationWeakMap.prototype = {
- get: function(key) {
- key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return void 0;
- return %WeakMapGet(this.map_, key);
- },
- set: function(key, value) {
- key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return void 0;
- %WeakMapSet(this.map_, key, value);
- },
- has: function(key) {
- return !IS_UNDEFINED(this.get(key));
- }
-};
-
-var observerInfoMap =
- new ObservationWeakMap(observationState.observerInfoMap);
-var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
-var notifierTargetMap =
- new ObservationWeakMap(observationState.notifierTargetMap);
-
-function CreateObjectInfo(object) {
- var info = {
- changeObservers: new InternalArray,
- notifier: null,
- };
- objectInfoMap.set(object, info);
- return info;
-}
-
-function ObjectObserve(object, callback) {
- if (!IS_SPEC_OBJECT(object))
- throw MakeTypeError("observe_non_object", ["observe"]);
- if (!IS_SPEC_FUNCTION(callback))
- throw MakeTypeError("observe_non_function", ["observe"]);
- if (ObjectIsFrozen(callback))
- throw MakeTypeError("observe_callback_frozen");
-
- if (!observerInfoMap.has(callback)) {
- observerInfoMap.set(callback, {
- pendingChangeRecords: null,
- priority: observationState.observerPriority++,
- });
- }
-
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
- %SetIsObserved(object, true);
-
- var changeObservers = objectInfo.changeObservers;
- if (changeObservers.indexOf(callback) < 0) changeObservers.push(callback);
-
- return object;
-}
-
-function ObjectUnobserve(object, callback) {
- if (!IS_SPEC_OBJECT(object))
- throw MakeTypeError("observe_non_object", ["unobserve"]);
- if (!IS_SPEC_FUNCTION(callback))
- throw MakeTypeError("observe_non_function", ["unobserve"]);
-
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo))
- return object;
-
- var changeObservers = objectInfo.changeObservers;
- var index = changeObservers.indexOf(callback);
- if (index >= 0) {
- changeObservers.splice(index, 1);
- if (changeObservers.length === 0) %SetIsObserved(object, false);
- }
-
- return object;
-}
-
-function EnqueueChangeRecord(changeRecord, observers) {
- for (var i = 0; i < observers.length; i++) {
- var observer = observers[i];
- var observerInfo = observerInfoMap.get(observer);
- observationState.pendingObservers[observerInfo.priority] = observer;
- %SetObserverDeliveryPending();
- if (IS_NULL(observerInfo.pendingChangeRecords)) {
- observerInfo.pendingChangeRecords = new InternalArray(changeRecord);
- } else {
- observerInfo.pendingChangeRecords.push(changeRecord);
- }
- }
-}
-
-function NotifyChange(type, object, name, oldValue) {
- var objectInfo = objectInfoMap.get(object);
- var changeRecord = (arguments.length < 4) ?
- { type: type, object: object, name: name } :
- { type: type, object: object, name: name, oldValue: oldValue };
- ObjectFreeze(changeRecord);
- EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
-}
-
-var notifierPrototype = {};
-
-function ObjectNotifierNotify(changeRecord) {
- if (!IS_SPEC_OBJECT(this))
- throw MakeTypeError("called_on_non_object", ["notify"]);
-
- var target = notifierTargetMap.get(this);
- if (IS_UNDEFINED(target))
- throw MakeTypeError("observe_notify_non_notifier");
- if (!IS_STRING(changeRecord.type))
- throw MakeTypeError("observe_type_non_string");
-
- var objectInfo = objectInfoMap.get(target);
- if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
- return;
-
- var newRecord = { object: target };
- for (var prop in changeRecord) {
- if (prop === 'object') continue;
- %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
- READ_ONLY + DONT_DELETE);
- }
- ObjectFreeze(newRecord);
-
- EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
-}
-
-function ObjectGetNotifier(object) {
- if (!IS_SPEC_OBJECT(object))
- throw MakeTypeError("observe_non_object", ["getNotifier"]);
-
- if (ObjectIsFrozen(object)) return null;
-
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
-
- if (IS_NULL(objectInfo.notifier)) {
- objectInfo.notifier = { __proto__: notifierPrototype };
- notifierTargetMap.set(objectInfo.notifier, object);
- }
-
- return objectInfo.notifier;
-}
-
-function DeliverChangeRecordsForObserver(observer) {
- var observerInfo = observerInfoMap.get(observer);
- if (IS_UNDEFINED(observerInfo))
- return false;
-
- var pendingChangeRecords = observerInfo.pendingChangeRecords;
- if (IS_NULL(pendingChangeRecords))
- return false;
-
- observerInfo.pendingChangeRecords = null;
- delete observationState.pendingObservers[observerInfo.priority];
- var delivered = [];
- %MoveArrayContents(pendingChangeRecords, delivered);
- try {
- %Call(void 0, delivered, observer);
- } catch (ex) {}
- return true;
-}
-
-function ObjectDeliverChangeRecords(callback) {
- if (!IS_SPEC_FUNCTION(callback))
- throw MakeTypeError("observe_non_function", ["deliverChangeRecords"]);
-
- while (DeliverChangeRecordsForObserver(callback)) {}
-}
-
-function DeliverChangeRecords() {
- while (observationState.pendingObservers.length) {
- var pendingObservers = observationState.pendingObservers;
- observationState.pendingObservers = new InternalArray;
- for (var i in pendingObservers) {
- DeliverChangeRecordsForObserver(pendingObservers[i]);
- }
- }
-}
-
-function SetupObjectObserve() {
- %CheckIsBootstrapping();
- InstallFunctions($Object, DONT_ENUM, $Array(
- "deliverChangeRecords", ObjectDeliverChangeRecords,
- "getNotifier", ObjectGetNotifier,
- "observe", ObjectObserve,
- "unobserve", ObjectUnobserve
- ));
- InstallFunctions(notifierPrototype, DONT_ENUM, $Array(
- "notify", ObjectNotifierNotify
- ));
-}
-
-SetupObjectObserve();
diff --git a/src/3rdparty/v8/src/objects-debug.cc b/src/3rdparty/v8/src/objects-debug.cc
deleted file mode 100644
index e583016..0000000
--- a/src/3rdparty/v8/src/objects-debug.cc
+++ /dev/null
@@ -1,1077 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "disassembler.h"
-#include "disasm.h"
-#include "jsregexp.h"
-#include "macro-assembler.h"
-#include "objects-visiting.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef VERIFY_HEAP
-
-void MaybeObject::Verify() {
- Object* this_as_object;
- if (ToObject(&this_as_object)) {
- if (this_as_object->IsSmi()) {
- Smi::cast(this_as_object)->SmiVerify();
- } else {
- HeapObject::cast(this_as_object)->HeapObjectVerify();
- }
- } else {
- Failure::cast(this)->FailureVerify();
- }
-}
-
-
-void Object::VerifyPointer(Object* p) {
- if (p->IsHeapObject()) {
- HeapObject::VerifyHeapPointer(p);
- } else {
- CHECK(p->IsSmi());
- }
-}
-
-
-void Smi::SmiVerify() {
- CHECK(IsSmi());
-}
-
-
-void Failure::FailureVerify() {
- CHECK(IsFailure());
-}
-
-
-void HeapObject::HeapObjectVerify() {
- InstanceType instance_type = map()->instance_type();
-
- if (instance_type < FIRST_NONSTRING_TYPE) {
- String::cast(this)->StringVerify();
- return;
- }
-
- switch (instance_type) {
- case SYMBOL_TYPE:
- Symbol::cast(this)->SymbolVerify();
- break;
- case MAP_TYPE:
- Map::cast(this)->MapVerify();
- break;
- case HEAP_NUMBER_TYPE:
- HeapNumber::cast(this)->HeapNumberVerify();
- break;
- case FIXED_ARRAY_TYPE:
- FixedArray::cast(this)->FixedArrayVerify();
- break;
- case FIXED_DOUBLE_ARRAY_TYPE:
- FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
- break;
- case BYTE_ARRAY_TYPE:
- ByteArray::cast(this)->ByteArrayVerify();
- break;
- case FREE_SPACE_TYPE:
- FreeSpace::cast(this)->FreeSpaceVerify();
- break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- ExternalByteArray::cast(this)->ExternalByteArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayVerify();
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- ExternalShortArray::cast(this)->ExternalShortArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- ExternalUnsignedShortArray::cast(this)->
- ExternalUnsignedShortArrayVerify();
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- ExternalIntArray::cast(this)->ExternalIntArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayVerify();
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
- break;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- ExternalDoubleArray::cast(this)->ExternalDoubleArrayVerify();
- break;
- case CODE_TYPE:
- Code::cast(this)->CodeVerify();
- break;
- case ODDBALL_TYPE:
- Oddball::cast(this)->OddballVerify();
- break;
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- JSObject::cast(this)->JSObjectVerify();
- break;
- case JS_MODULE_TYPE:
- JSModule::cast(this)->JSModuleVerify();
- break;
- case JS_VALUE_TYPE:
- JSValue::cast(this)->JSValueVerify();
- break;
- case JS_DATE_TYPE:
- JSDate::cast(this)->JSDateVerify();
- break;
- case JS_FUNCTION_TYPE:
- JSFunction::cast(this)->JSFunctionVerify();
- break;
- case JS_GLOBAL_PROXY_TYPE:
- JSGlobalProxy::cast(this)->JSGlobalProxyVerify();
- break;
- case JS_GLOBAL_OBJECT_TYPE:
- JSGlobalObject::cast(this)->JSGlobalObjectVerify();
- break;
- case JS_BUILTINS_OBJECT_TYPE:
- JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
- break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellVerify();
- break;
- case JS_ARRAY_TYPE:
- JSArray::cast(this)->JSArrayVerify();
- break;
- case JS_SET_TYPE:
- JSSet::cast(this)->JSSetVerify();
- break;
- case JS_MAP_TYPE:
- JSMap::cast(this)->JSMapVerify();
- break;
- case JS_WEAK_MAP_TYPE:
- JSWeakMap::cast(this)->JSWeakMapVerify();
- break;
- case JS_REGEXP_TYPE:
- JSRegExp::cast(this)->JSRegExpVerify();
- break;
- case FILLER_TYPE:
- break;
- case JS_PROXY_TYPE:
- JSProxy::cast(this)->JSProxyVerify();
- break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::cast(this)->JSFunctionProxyVerify();
- break;
- case FOREIGN_TYPE:
- Foreign::cast(this)->ForeignVerify();
- break;
- case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
- break;
- case JS_MESSAGE_OBJECT_TYPE:
- JSMessageObject::cast(this)->JSMessageObjectVerify();
- break;
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
- Name::cast(this)->Name##Verify(); \
- break;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
-
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void HeapObject::VerifyHeapPointer(Object* p) {
- CHECK(p->IsHeapObject());
- CHECK(HEAP->Contains(HeapObject::cast(p)));
-}
-
-
-void Symbol::SymbolVerify() {
- CHECK(IsSymbol());
- CHECK(HasHashCode());
- CHECK_GT(Hash(), 0);
-}
-
-
-void HeapNumber::HeapNumberVerify() {
- CHECK(IsHeapNumber());
-}
-
-
-void ByteArray::ByteArrayVerify() {
- CHECK(IsByteArray());
-}
-
-
-void FreeSpace::FreeSpaceVerify() {
- CHECK(IsFreeSpace());
-}
-
-
-void ExternalPixelArray::ExternalPixelArrayVerify() {
- CHECK(IsExternalPixelArray());
-}
-
-
-void ExternalByteArray::ExternalByteArrayVerify() {
- CHECK(IsExternalByteArray());
-}
-
-
-void ExternalUnsignedByteArray::ExternalUnsignedByteArrayVerify() {
- CHECK(IsExternalUnsignedByteArray());
-}
-
-
-void ExternalShortArray::ExternalShortArrayVerify() {
- CHECK(IsExternalShortArray());
-}
-
-
-void ExternalUnsignedShortArray::ExternalUnsignedShortArrayVerify() {
- CHECK(IsExternalUnsignedShortArray());
-}
-
-
-void ExternalIntArray::ExternalIntArrayVerify() {
- CHECK(IsExternalIntArray());
-}
-
-
-void ExternalUnsignedIntArray::ExternalUnsignedIntArrayVerify() {
- CHECK(IsExternalUnsignedIntArray());
-}
-
-
-void ExternalFloatArray::ExternalFloatArrayVerify() {
- CHECK(IsExternalFloatArray());
-}
-
-
-void ExternalDoubleArray::ExternalDoubleArrayVerify() {
- CHECK(IsExternalDoubleArray());
-}
-
-
-void JSObject::JSObjectVerify() {
- VerifyHeapPointer(properties());
- VerifyHeapPointer(elements());
-
- if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
- CHECK(this->elements()->IsFixedArray());
- CHECK_GE(this->elements()->length(), 2);
- }
-
- if (HasFastProperties()) {
- CHECK_EQ(map()->unused_property_fields(),
- (map()->inobject_properties() + properties()->length() -
- map()->NextFreePropertyIndex()));
- }
- CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
- (elements() == GetHeap()->empty_fixed_array())),
- (elements()->map() == GetHeap()->fixed_array_map() ||
- elements()->map() == GetHeap()->fixed_cow_array_map()));
- CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
-}
-
-
-void Map::MapVerify() {
- CHECK(!HEAP->InNewSpace(this));
- CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
- CHECK(instance_size() == kVariableSizeSentinel ||
- (kPointerSize <= instance_size() &&
- instance_size() < HEAP->Capacity()));
- VerifyHeapPointer(prototype());
- VerifyHeapPointer(instance_descriptors());
- DescriptorArray* descriptors = instance_descriptors();
- for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
- CHECK_EQ(i, descriptors->GetDetails(i).descriptor_index() - 1);
- }
- SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
- if (HasTransitionArray()) {
- SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
- SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this));
- }
- ASSERT(!is_observed() || instance_type() < FIRST_JS_OBJECT_TYPE ||
- instance_type() > LAST_JS_OBJECT_TYPE ||
- has_slow_elements_kind() || has_external_array_elements());
-}
-
-
-void Map::SharedMapVerify() {
- MapVerify();
- CHECK(is_shared());
- CHECK(instance_descriptors()->IsEmpty());
- CHECK_EQ(0, pre_allocated_property_fields());
- CHECK_EQ(0, unused_property_fields());
- CHECK_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
- visitor_id());
-}
-
-
-void Map::VerifyOmittedPrototypeChecks() {
- if (!FLAG_omit_prototype_checks_for_leaf_maps) return;
- if (HasTransitionArray() || is_dictionary_map()) {
- CHECK_EQ(0, dependent_code()->number_of_entries(
- DependentCode::kPrototypeCheckGroup));
- }
-}
-
-
-void CodeCache::CodeCacheVerify() {
- VerifyHeapPointer(default_cache());
- VerifyHeapPointer(normal_type_cache());
- CHECK(default_cache()->IsFixedArray());
- CHECK(normal_type_cache()->IsUndefined()
- || normal_type_cache()->IsCodeCacheHashTable());
-}
-
-
-void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
- VerifyHeapPointer(cache());
- CHECK(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
-}
-
-
-void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
- VerifyObjectField(kStorage1Offset);
- VerifyObjectField(kStorage2Offset);
- VerifyHeapPointer(type_feedback_cells());
-}
-
-
-void AliasedArgumentsEntry::AliasedArgumentsEntryVerify() {
- VerifySmiField(kAliasedContextSlot);
-}
-
-
-void FixedArray::FixedArrayVerify() {
- for (int i = 0; i < length(); i++) {
- Object* e = get(i);
- if (e->IsHeapObject()) {
- VerifyHeapPointer(e);
- } else {
- e->Verify();
- }
- }
-}
-
-
-void FixedDoubleArray::FixedDoubleArrayVerify() {
- for (int i = 0; i < length(); i++) {
- if (!is_the_hole(i)) {
- double value = get_scalar(i);
- CHECK(!isnan(value) ||
- (BitCast<uint64_t>(value) ==
- BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
- ((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
- }
- }
-}
-
-
-void JSModule::JSModuleVerify() {
- VerifyObjectField(kContextOffset);
- VerifyObjectField(kScopeInfoOffset);
- CHECK(context()->IsUndefined() ||
- Context::cast(context())->IsModuleContext());
-}
-
-
-void JSValue::JSValueVerify() {
- Object* v = value();
- if (v->IsHeapObject()) {
- VerifyHeapPointer(v);
- }
-}
-
-
-void JSDate::JSDateVerify() {
- if (value()->IsHeapObject()) {
- VerifyHeapPointer(value());
- }
- CHECK(value()->IsUndefined() || value()->IsSmi() || value()->IsHeapNumber());
- CHECK(year()->IsUndefined() || year()->IsSmi() || year()->IsNaN());
- CHECK(month()->IsUndefined() || month()->IsSmi() || month()->IsNaN());
- CHECK(day()->IsUndefined() || day()->IsSmi() || day()->IsNaN());
- CHECK(weekday()->IsUndefined() || weekday()->IsSmi() || weekday()->IsNaN());
- CHECK(hour()->IsUndefined() || hour()->IsSmi() || hour()->IsNaN());
- CHECK(min()->IsUndefined() || min()->IsSmi() || min()->IsNaN());
- CHECK(sec()->IsUndefined() || sec()->IsSmi() || sec()->IsNaN());
- CHECK(cache_stamp()->IsUndefined() ||
- cache_stamp()->IsSmi() ||
- cache_stamp()->IsNaN());
-
- if (month()->IsSmi()) {
- int month = Smi::cast(this->month())->value();
- CHECK(0 <= month && month <= 11);
- }
- if (day()->IsSmi()) {
- int day = Smi::cast(this->day())->value();
- CHECK(1 <= day && day <= 31);
- }
- if (hour()->IsSmi()) {
- int hour = Smi::cast(this->hour())->value();
- CHECK(0 <= hour && hour <= 23);
- }
- if (min()->IsSmi()) {
- int min = Smi::cast(this->min())->value();
- CHECK(0 <= min && min <= 59);
- }
- if (sec()->IsSmi()) {
- int sec = Smi::cast(this->sec())->value();
- CHECK(0 <= sec && sec <= 59);
- }
- if (weekday()->IsSmi()) {
- int weekday = Smi::cast(this->weekday())->value();
- CHECK(0 <= weekday && weekday <= 6);
- }
- if (cache_stamp()->IsSmi()) {
- CHECK(Smi::cast(cache_stamp())->value() <=
- Smi::cast(Isolate::Current()->date_cache()->stamp())->value());
- }
-}
-
-
-void JSMessageObject::JSMessageObjectVerify() {
- CHECK(IsJSMessageObject());
- CHECK(type()->IsString());
- CHECK(arguments()->IsJSArray());
- VerifyObjectField(kStartPositionOffset);
- VerifyObjectField(kEndPositionOffset);
- VerifyObjectField(kArgumentsOffset);
- VerifyObjectField(kScriptOffset);
- VerifyObjectField(kStackTraceOffset);
- VerifyObjectField(kStackFramesOffset);
-}
-
-
-void String::StringVerify() {
- CHECK(IsString());
- CHECK(length() >= 0 && length() <= Smi::kMaxValue);
- if (IsInternalizedString()) {
- CHECK(!HEAP->InNewSpace(this));
- }
- if (IsConsString()) {
- ConsString::cast(this)->ConsStringVerify();
- } else if (IsSlicedString()) {
- SlicedString::cast(this)->SlicedStringVerify();
- } else if (IsSeqOneByteString()) {
- SeqOneByteString::cast(this)->SeqOneByteStringVerify();
- }
-}
-
-
-void SeqOneByteString::SeqOneByteStringVerify() {
-#ifndef ENABLE_LATIN_1
- CHECK(String::IsAscii(GetChars(), length()));
-#endif
-}
-
-
-void ConsString::ConsStringVerify() {
- CHECK(this->first()->IsString());
- CHECK(this->second() == GetHeap()->empty_string() ||
- this->second()->IsString());
- CHECK(this->length() >= ConsString::kMinLength);
- if (this->IsFlat()) {
- // A flat cons can only be created by String::SlowTryFlatten.
- // Afterwards, the first part may be externalized.
- CHECK(this->first()->IsSeqString() || this->first()->IsExternalString());
- }
-}
-
-
-void SlicedString::SlicedStringVerify() {
- CHECK(!this->parent()->IsConsString());
- CHECK(!this->parent()->IsSlicedString());
- CHECK(this->length() >= SlicedString::kMinLength);
-}
-
-
-void JSFunction::JSFunctionVerify() {
- CHECK(IsJSFunction());
- VerifyObjectField(kPrototypeOrInitialMapOffset);
- VerifyObjectField(kNextFunctionLinkOffset);
- CHECK(code()->IsCode());
- CHECK(next_function_link() == NULL ||
- next_function_link()->IsUndefined() ||
- next_function_link()->IsJSFunction());
-}
-
-
-void SharedFunctionInfo::SharedFunctionInfoVerify() {
- CHECK(IsSharedFunctionInfo());
- VerifyObjectField(kNameOffset);
- VerifyObjectField(kCodeOffset);
- VerifyObjectField(kOptimizedCodeMapOffset);
- VerifyObjectField(kScopeInfoOffset);
- VerifyObjectField(kInstanceClassNameOffset);
- VerifyObjectField(kFunctionDataOffset);
- VerifyObjectField(kScriptOffset);
- VerifyObjectField(kDebugInfoOffset);
-}
-
-
-void JSGlobalProxy::JSGlobalProxyVerify() {
- CHECK(IsJSGlobalProxy());
- JSObjectVerify();
- VerifyObjectField(JSGlobalProxy::kNativeContextOffset);
- // Make sure that this object has no properties, elements.
- CHECK_EQ(0, properties()->length());
- CHECK(HasFastObjectElements());
- CHECK_EQ(0, FixedArray::cast(elements())->length());
-}
-
-
-void JSGlobalObject::JSGlobalObjectVerify() {
- CHECK(IsJSGlobalObject());
- JSObjectVerify();
- for (int i = GlobalObject::kBuiltinsOffset;
- i < JSGlobalObject::kSize;
- i += kPointerSize) {
- VerifyObjectField(i);
- }
-}
-
-
-void JSBuiltinsObject::JSBuiltinsObjectVerify() {
- CHECK(IsJSBuiltinsObject());
- JSObjectVerify();
- for (int i = GlobalObject::kBuiltinsOffset;
- i < JSBuiltinsObject::kSize;
- i += kPointerSize) {
- VerifyObjectField(i);
- }
-}
-
-
-void Oddball::OddballVerify() {
- CHECK(IsOddball());
- VerifyHeapPointer(to_string());
- Object* number = to_number();
- if (number->IsHeapObject()) {
- CHECK(number == HEAP->nan_value());
- } else {
- CHECK(number->IsSmi());
- int value = Smi::cast(number)->value();
- // Hidden oddballs have negative smis.
- const int kLeastHiddenOddballNumber = -4;
- CHECK_LE(value, 1);
- CHECK(value >= kLeastHiddenOddballNumber);
- }
-}
-
-
-void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() {
- CHECK(IsJSGlobalPropertyCell());
- VerifyObjectField(kValueOffset);
-}
-
-
-void Code::CodeVerify() {
- CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
- kCodeAlignment));
- relocation_info()->Verify();
- Address last_gc_pc = NULL;
- for (RelocIterator it(this); !it.done(); it.next()) {
- it.rinfo()->Verify();
- // Ensure that GC will not iterate twice over the same pointer.
- if (RelocInfo::IsGCRelocMode(it.rinfo()->rmode())) {
- CHECK(it.rinfo()->pc() != last_gc_pc);
- last_gc_pc = it.rinfo()->pc();
- }
- }
-}
-
-
-void Code::VerifyEmbeddedMapsDependency() {
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Map* map = Map::cast(it.rinfo()->target_object());
- if (map->CanTransition()) {
- CHECK(map->dependent_code()->Contains(
- DependentCode::kWeaklyEmbeddedGroup, this));
- }
- }
- }
-}
-
-
-void JSArray::JSArrayVerify() {
- JSObjectVerify();
- CHECK(length()->IsNumber() || length()->IsUndefined());
- CHECK(elements()->IsUndefined() ||
- elements()->IsFixedArray() ||
- elements()->IsFixedDoubleArray());
-}
-
-
-void JSSet::JSSetVerify() {
- CHECK(IsJSSet());
- JSObjectVerify();
- VerifyHeapPointer(table());
- CHECK(table()->IsHashTable() || table()->IsUndefined());
-}
-
-
-void JSMap::JSMapVerify() {
- CHECK(IsJSMap());
- JSObjectVerify();
- VerifyHeapPointer(table());
- CHECK(table()->IsHashTable() || table()->IsUndefined());
-}
-
-
-void JSWeakMap::JSWeakMapVerify() {
- CHECK(IsJSWeakMap());
- JSObjectVerify();
- VerifyHeapPointer(table());
- CHECK(table()->IsHashTable() || table()->IsUndefined());
-}
-
-
-void JSRegExp::JSRegExpVerify() {
- JSObjectVerify();
- CHECK(data()->IsUndefined() || data()->IsFixedArray());
- switch (TypeTag()) {
- case JSRegExp::ATOM: {
- FixedArray* arr = FixedArray::cast(data());
- CHECK(arr->get(JSRegExp::kAtomPatternIndex)->IsString());
- break;
- }
- case JSRegExp::IRREGEXP: {
- bool is_native = RegExpImpl::UsesNativeRegExp();
-
- FixedArray* arr = FixedArray::cast(data());
- Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex);
- // Smi : Not compiled yet (-1) or code prepared for flushing.
- // JSObject: Compilation error.
- // Code/ByteArray: Compiled code.
- CHECK(ascii_data->IsSmi() ||
- (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
- Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
- CHECK(uc16_data->IsSmi() ||
- (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
-
- Object* ascii_saved = arr->get(JSRegExp::kIrregexpASCIICodeSavedIndex);
- CHECK(ascii_saved->IsSmi() || ascii_saved->IsString() ||
- ascii_saved->IsCode());
- Object* uc16_saved = arr->get(JSRegExp::kIrregexpUC16CodeSavedIndex);
- CHECK(uc16_saved->IsSmi() || uc16_saved->IsString() ||
- uc16_saved->IsCode());
-
- CHECK(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
- CHECK(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
- break;
- }
- default:
- CHECK_EQ(JSRegExp::NOT_COMPILED, TypeTag());
- CHECK(data()->IsUndefined());
- break;
- }
-}
-
-
-void JSProxy::JSProxyVerify() {
- CHECK(IsJSProxy());
- VerifyPointer(handler());
- CHECK(hash()->IsSmi() || hash()->IsUndefined());
-}
-
-
-void JSFunctionProxy::JSFunctionProxyVerify() {
- CHECK(IsJSFunctionProxy());
- JSProxyVerify();
- VerifyPointer(call_trap());
- VerifyPointer(construct_trap());
-}
-
-
-void Foreign::ForeignVerify() {
- CHECK(IsForeign());
-}
-
-
-void AccessorInfo::AccessorInfoVerify() {
- VerifyPointer(name());
- VerifyPointer(flag());
- VerifyPointer(expected_receiver_type());
-}
-
-
-void ExecutableAccessorInfo::ExecutableAccessorInfoVerify() {
- CHECK(IsExecutableAccessorInfo());
- AccessorInfoVerify();
- VerifyPointer(getter());
- VerifyPointer(setter());
- VerifyPointer(data());
-}
-
-
-void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorVerify() {
- CHECK(IsDeclaredAccessorDescriptor());
- VerifySmiField(kInternalFieldOffset);
-}
-
-
-void DeclaredAccessorInfo::DeclaredAccessorInfoVerify() {
- CHECK(IsDeclaredAccessorInfo());
- AccessorInfoVerify();
- VerifyPointer(descriptor());
-}
-
-
-void AccessorPair::AccessorPairVerify() {
- CHECK(IsAccessorPair());
- VerifyPointer(getter());
- VerifyPointer(setter());
-}
-
-
-void AccessCheckInfo::AccessCheckInfoVerify() {
- CHECK(IsAccessCheckInfo());
- VerifyPointer(named_callback());
- VerifyPointer(indexed_callback());
- VerifyPointer(data());
-}
-
-
-void InterceptorInfo::InterceptorInfoVerify() {
- CHECK(IsInterceptorInfo());
- VerifyPointer(getter());
- VerifyPointer(setter());
- VerifyPointer(query());
- VerifyPointer(deleter());
- VerifyPointer(enumerator());
- VerifyPointer(data());
-}
-
-
-void CallHandlerInfo::CallHandlerInfoVerify() {
- CHECK(IsCallHandlerInfo());
- VerifyPointer(callback());
- VerifyPointer(data());
-}
-
-
-void TemplateInfo::TemplateInfoVerify() {
- VerifyPointer(tag());
- VerifyPointer(property_list());
-}
-
-void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
- CHECK(IsFunctionTemplateInfo());
- TemplateInfoVerify();
- VerifyPointer(serial_number());
- VerifyPointer(call_code());
- VerifyPointer(property_accessors());
- VerifyPointer(prototype_template());
- VerifyPointer(parent_template());
- VerifyPointer(named_property_handler());
- VerifyPointer(indexed_property_handler());
- VerifyPointer(instance_template());
- VerifyPointer(signature());
- VerifyPointer(access_check_info());
-}
-
-
-void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
- CHECK(IsObjectTemplateInfo());
- TemplateInfoVerify();
- VerifyPointer(constructor());
- VerifyPointer(internal_field_count());
-}
-
-
-void SignatureInfo::SignatureInfoVerify() {
- CHECK(IsSignatureInfo());
- VerifyPointer(receiver());
- VerifyPointer(args());
-}
-
-
-void TypeSwitchInfo::TypeSwitchInfoVerify() {
- CHECK(IsTypeSwitchInfo());
- VerifyPointer(types());
-}
-
-
-void AllocationSiteInfo::AllocationSiteInfoVerify() {
- CHECK(IsAllocationSiteInfo());
- VerifyHeapPointer(payload());
- CHECK(payload()->IsObject());
-}
-
-
-void Script::ScriptVerify() {
- CHECK(IsScript());
- VerifyPointer(source());
- VerifyPointer(name());
- line_offset()->SmiVerify();
- column_offset()->SmiVerify();
- VerifyPointer(data());
- VerifyPointer(wrapper());
- type()->SmiVerify();
- VerifyPointer(line_ends());
- VerifyPointer(id());
-}
-
-
-void JSFunctionResultCache::JSFunctionResultCacheVerify() {
- JSFunction::cast(get(kFactoryIndex))->Verify();
-
- int size = Smi::cast(get(kCacheSizeIndex))->value();
- CHECK(kEntriesIndex <= size);
- CHECK(size <= length());
- CHECK_EQ(0, size % kEntrySize);
-
- int finger = Smi::cast(get(kFingerIndex))->value();
- CHECK(kEntriesIndex <= finger);
- CHECK((finger < size) || (finger == kEntriesIndex && finger == size));
- CHECK_EQ(0, finger % kEntrySize);
-
- if (FLAG_enable_slow_asserts) {
- for (int i = kEntriesIndex; i < size; i++) {
- CHECK(!get(i)->IsTheHole());
- get(i)->Verify();
- }
- for (int i = size; i < length(); i++) {
- CHECK(get(i)->IsTheHole());
- get(i)->Verify();
- }
- }
-}
-
-
-void NormalizedMapCache::NormalizedMapCacheVerify() {
- FixedArray::cast(this)->Verify();
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < length(); i++) {
- Object* e = get(i);
- if (e->IsMap()) {
- Map::cast(e)->SharedMapVerify();
- } else {
- CHECK(e->IsUndefined());
- }
- }
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void DebugInfo::DebugInfoVerify() {
- CHECK(IsDebugInfo());
- VerifyPointer(shared());
- VerifyPointer(original_code());
- VerifyPointer(code());
- VerifyPointer(break_points());
-}
-
-
-void BreakPointInfo::BreakPointInfoVerify() {
- CHECK(IsBreakPointInfo());
- code_position()->SmiVerify();
- source_position()->SmiVerify();
- statement_position()->SmiVerify();
- VerifyPointer(break_point_objects());
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-#endif // VERIFY_HEAP
-
-#ifdef DEBUG
-
-void JSObject::IncrementSpillStatistics(SpillInformation* info) {
- info->number_of_objects_++;
- // Named properties
- if (HasFastProperties()) {
- info->number_of_objects_with_fast_properties_++;
- info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
- info->number_of_fast_unused_fields_ += map()->unused_property_fields();
- } else {
- StringDictionary* dict = property_dictionary();
- info->number_of_slow_used_properties_ += dict->NumberOfElements();
- info->number_of_slow_unused_properties_ +=
- dict->Capacity() - dict->NumberOfElements();
- }
- // Indexed properties
- switch (GetElementsKind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS: {
- info->number_of_objects_with_fast_elements_++;
- int holes = 0;
- FixedArray* e = FixedArray::cast(elements());
- int len = e->length();
- Heap* heap = HEAP;
- for (int i = 0; i < len; i++) {
- if (e->get(i) == heap->the_hole_value()) holes++;
- }
- info->number_of_fast_used_elements_ += len - holes;
- info->number_of_fast_unused_elements_ += holes;
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS: {
- info->number_of_objects_with_fast_elements_++;
- ExternalPixelArray* e = ExternalPixelArray::cast(elements());
- info->number_of_fast_used_elements_ += e->length();
- break;
- }
- case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* dict = element_dictionary();
- info->number_of_slow_used_elements_ += dict->NumberOfElements();
- info->number_of_slow_unused_elements_ +=
- dict->Capacity() - dict->NumberOfElements();
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- break;
- }
-}
-
-
-void JSObject::SpillInformation::Clear() {
- number_of_objects_ = 0;
- number_of_objects_with_fast_properties_ = 0;
- number_of_objects_with_fast_elements_ = 0;
- number_of_fast_used_fields_ = 0;
- number_of_fast_unused_fields_ = 0;
- number_of_slow_used_properties_ = 0;
- number_of_slow_unused_properties_ = 0;
- number_of_fast_used_elements_ = 0;
- number_of_fast_unused_elements_ = 0;
- number_of_slow_used_elements_ = 0;
- number_of_slow_unused_elements_ = 0;
-}
-
-void JSObject::SpillInformation::Print() {
- PrintF("\n JSObject Spill Statistics (#%d):\n", number_of_objects_);
-
- PrintF(" - fast properties (#%d): %d (used) %d (unused)\n",
- number_of_objects_with_fast_properties_,
- number_of_fast_used_fields_, number_of_fast_unused_fields_);
-
- PrintF(" - slow properties (#%d): %d (used) %d (unused)\n",
- number_of_objects_ - number_of_objects_with_fast_properties_,
- number_of_slow_used_properties_, number_of_slow_unused_properties_);
-
- PrintF(" - fast elements (#%d): %d (used) %d (unused)\n",
- number_of_objects_with_fast_elements_,
- number_of_fast_used_elements_, number_of_fast_unused_elements_);
-
- PrintF(" - slow elements (#%d): %d (used) %d (unused)\n",
- number_of_objects_ - number_of_objects_with_fast_elements_,
- number_of_slow_used_elements_, number_of_slow_unused_elements_);
-
- PrintF("\n");
-}
-
-
-bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
- if (valid_entries == -1) valid_entries = number_of_descriptors();
- String* current_key = NULL;
- uint32_t current = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- String* key = GetSortedKey(i);
- if (key == current_key) {
- PrintDescriptors();
- return false;
- }
- current_key = key;
- uint32_t hash = GetSortedKey(i)->Hash();
- if (hash < current) {
- PrintDescriptors();
- return false;
- }
- current = hash;
- }
- return true;
-}
-
-
-bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
- ASSERT(valid_entries == -1);
- String* current_key = NULL;
- uint32_t current = 0;
- for (int i = 0; i < number_of_transitions(); i++) {
- String* key = GetSortedKey(i);
- if (key == current_key) {
- PrintTransitions();
- return false;
- }
- current_key = key;
- uint32_t hash = GetSortedKey(i)->Hash();
- if (hash < current) {
- PrintTransitions();
- return false;
- }
- current = hash;
- }
- return true;
-}
-
-
-static bool CheckOneBackPointer(Map* current_map, Object* target) {
- return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
-}
-
-
-bool TransitionArray::IsConsistentWithBackPointers(Map* current_map) {
- if (HasElementsTransition() &&
- !CheckOneBackPointer(current_map, elements_transition())) {
- return false;
- }
- for (int i = 0; i < number_of_transitions(); ++i) {
- if (!CheckOneBackPointer(current_map, GetTarget(i))) return false;
- }
- return true;
-}
-
-
-#endif // DEBUG
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/objects-inl.h b/src/3rdparty/v8/src/objects-inl.h
deleted file mode 100644
index 8e7d4cd..0000000
--- a/src/3rdparty/v8/src/objects-inl.h
+++ /dev/null
@@ -1,6007 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Review notes:
-//
-// - The use of macros in these inline functions may seem superfluous
-// but it is absolutely needed to make sure gcc generates optimal
-// code. gcc is not happy when attempting to inline too deep.
-//
-
-#ifndef V8_OBJECTS_INL_H_
-#define V8_OBJECTS_INL_H_
-
-#include "elements.h"
-#include "objects.h"
-#include "contexts.h"
-#include "conversions-inl.h"
-#include "heap.h"
-#include "isolate.h"
-#include "property.h"
-#include "spaces.h"
-#include "store-buffer.h"
-#include "v8memory.h"
-#include "factory.h"
-#include "incremental-marking.h"
-#include "transitions-inl.h"
-
-namespace v8 {
-namespace internal {
-
-PropertyDetails::PropertyDetails(Smi* smi) {
- value_ = smi->value();
-}
-
-
-Smi* PropertyDetails::AsSmi() {
- return Smi::FromInt(value_);
-}
-
-
-PropertyDetails PropertyDetails::AsDeleted() {
- Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
- return PropertyDetails(smi);
-}
-
-
-#define TYPE_CHECKER(type, instancetype) \
- bool Object::Is##type() { \
- return Object::IsHeapObject() && \
- HeapObject::cast(this)->map()->instance_type() == instancetype; \
- }
-
-
-#define CAST_ACCESSOR(type) \
- type* type::cast(Object* object) { \
- ASSERT(object->Is##type()); \
- return reinterpret_cast<type*>(object); \
- }
-
-
-#define INT_ACCESSORS(holder, name, offset) \
- int holder::name() { return READ_INT_FIELD(this, offset); } \
- void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
-
-
-#define ACCESSORS(holder, name, type, offset) \
- type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
- }
-
-
-// Getter that returns a tagged Smi and setter that writes a tagged Smi.
-#define ACCESSORS_TO_SMI(holder, name, offset) \
- Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); } \
- void holder::set_##name(Smi* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
- }
-
-
-// Getter that returns a Smi as an int and writes an int as a Smi.
-#define SMI_ACCESSORS(holder, name, offset) \
- int holder::name() { \
- Object* value = READ_FIELD(this, offset); \
- return Smi::cast(value)->value(); \
- } \
- void holder::set_##name(int value) { \
- WRITE_FIELD(this, offset, Smi::FromInt(value)); \
- }
-
-
-#define BOOL_GETTER(holder, field, name, offset) \
- bool holder::name() { \
- return BooleanBit::get(field(), offset); \
- } \
-
-
-#define BOOL_ACCESSORS(holder, field, name, offset) \
- bool holder::name() { \
- return BooleanBit::get(field(), offset); \
- } \
- void holder::set_##name(bool value) { \
- set_##field(BooleanBit::set(field(), offset, value)); \
- }
-
-
-bool Object::IsFixedArrayBase() {
- return IsFixedArray() || IsFixedDoubleArray();
-}
-
-
-// External objects are not extensible, so the map check is enough.
-bool Object::IsExternal() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->external_map();
-}
-
-
-bool Object::IsAccessorInfo() {
- return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo();
-}
-
-
-bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
- // There is a constraint on the object; check.
- if (!this->IsJSObject()) return false;
- // Fetch the constructor function of the object.
- Object* cons_obj = JSObject::cast(this)->map()->constructor();
- if (!cons_obj->IsJSFunction()) return false;
- JSFunction* fun = JSFunction::cast(cons_obj);
- // Iterate through the chain of inheriting function templates to
- // see if the required one occurs.
- for (Object* type = fun->shared()->function_data();
- type->IsFunctionTemplateInfo();
- type = FunctionTemplateInfo::cast(type)->parent_template()) {
- if (type == expected) return true;
- }
- // Didn't find the required type in the inheritance chain.
- return false;
-}
-
-
-bool Object::IsSmi() {
- return HAS_SMI_TAG(this);
-}
-
-
-bool Object::IsHeapObject() {
- return Internals::HasHeapObjectTag(this);
-}
-
-
-bool Object::NonFailureIsHeapObject() {
- ASSERT(!this->IsFailure());
- return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0;
-}
-
-
-TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
-TYPE_CHECKER(Symbol, SYMBOL_TYPE)
-
-
-bool Object::IsString() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
-}
-
-
-bool Object::IsName() {
- return IsString() || IsSymbol();
-}
-
-
-bool Object::IsUniqueName() {
- return IsInternalizedString() || IsSymbol();
-}
-
-
-bool Object::IsSpecObject() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
-}
-
-
-bool Object::IsSpecFunction() {
- if (!Object::IsHeapObject()) return false;
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
-}
-
-
-bool Object::IsInternalizedString() {
- if (!this->IsHeapObject()) return false;
- uint32_t type = HeapObject::cast(this)->map()->instance_type();
- // Because the internalized tag is non-zero and no non-string types have the
- // internalized bit set we can test for internalized strings with a very
- // simple test operation.
- STATIC_ASSERT(kInternalizedTag != 0);
- ASSERT(kNotStringTag + kIsInternalizedMask > LAST_TYPE);
- return (type & kIsInternalizedMask) != 0;
-}
-
-
-bool Object::IsConsString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsCons();
-}
-
-
-bool Object::IsSlicedString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSliced();
-}
-
-
-bool Object::IsSeqString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSequential();
-}
-
-
-bool Object::IsSeqOneByteString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSequential() &&
- String::cast(this)->IsOneByteRepresentation();
-}
-
-
-bool Object::IsSeqTwoByteString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSequential() &&
- String::cast(this)->IsTwoByteRepresentation();
-}
-
-
-bool Object::IsExternalString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsExternal();
-}
-
-
-bool Object::IsExternalAsciiString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsExternal() &&
- String::cast(this)->IsOneByteRepresentation();
-}
-
-
-bool Object::IsExternalTwoByteString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsExternal() &&
- String::cast(this)->IsTwoByteRepresentation();
-}
-
-bool Object::HasValidElements() {
- // Dictionary is covered under FixedArray.
- return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
-}
-
-StringShape::StringShape(String* str)
- : type_(str->map()->instance_type()) {
- set_valid();
- ASSERT((type_ & kIsNotStringMask) == kStringTag);
-}
-
-
-StringShape::StringShape(Map* map)
- : type_(map->instance_type()) {
- set_valid();
- ASSERT((type_ & kIsNotStringMask) == kStringTag);
-}
-
-
-StringShape::StringShape(InstanceType t)
- : type_(static_cast<uint32_t>(t)) {
- set_valid();
- ASSERT((type_ & kIsNotStringMask) == kStringTag);
-}
-
-
-bool StringShape::IsInternalized() {
- ASSERT(valid());
- STATIC_ASSERT(kInternalizedTag != 0);
- return (type_ & kIsInternalizedMask) != 0;
-}
-
-
-bool String::IsOneByteRepresentation() {
- uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kOneByteStringTag;
-}
-
-
-bool String::IsTwoByteRepresentation() {
- uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kTwoByteStringTag;
-}
-
-
-bool String::IsOneByteRepresentationUnderneath() {
- uint32_t type = map()->instance_type();
- STATIC_ASSERT(kIsIndirectStringTag != 0);
- STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
- ASSERT(IsFlat());
- switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kOneByteStringTag:
- return true;
- case kTwoByteStringTag:
- return false;
- default: // Cons or sliced string. Need to go deeper.
- return GetUnderlying()->IsOneByteRepresentation();
- }
-}
-
-
-bool String::IsTwoByteRepresentationUnderneath() {
- uint32_t type = map()->instance_type();
- STATIC_ASSERT(kIsIndirectStringTag != 0);
- STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
- ASSERT(IsFlat());
- switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kOneByteStringTag:
- return false;
- case kTwoByteStringTag:
- return true;
- default: // Cons or sliced string. Need to go deeper.
- return GetUnderlying()->IsTwoByteRepresentation();
- }
-}
-
-
-bool String::HasOnlyAsciiChars() {
- uint32_t type = map()->instance_type();
-#ifndef ENABLE_LATIN_1
- return (type & kStringEncodingMask) == kOneByteStringTag ||
- (type & kAsciiDataHintMask) == kAsciiDataHintTag;
-#else
- return (type & kAsciiDataHintMask) == kAsciiDataHintTag;
-#endif
-}
-
-
-bool String::IsOneByteConvertible() {
- return HasOnlyAsciiChars() || IsOneByteRepresentation();
-}
-
-
-bool StringShape::IsCons() {
- return (type_ & kStringRepresentationMask) == kConsStringTag;
-}
-
-
-bool StringShape::IsSliced() {
- return (type_ & kStringRepresentationMask) == kSlicedStringTag;
-}
-
-
-bool StringShape::IsIndirect() {
- return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
-}
-
-
-bool StringShape::IsExternal() {
- return (type_ & kStringRepresentationMask) == kExternalStringTag;
-}
-
-
-bool StringShape::IsSequential() {
- return (type_ & kStringRepresentationMask) == kSeqStringTag;
-}
-
-
-StringRepresentationTag StringShape::representation_tag() {
- uint32_t tag = (type_ & kStringRepresentationMask);
- return static_cast<StringRepresentationTag>(tag);
-}
-
-
-uint32_t StringShape::encoding_tag() {
- return type_ & kStringEncodingMask;
-}
-
-
-uint32_t StringShape::full_representation_tag() {
- return (type_ & (kStringRepresentationMask | kStringEncodingMask));
-}
-
-
-STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
- Internals::kFullStringRepresentationMask);
-
-STATIC_CHECK(static_cast<uint32_t>(kStringEncodingMask) ==
- Internals::kStringEncodingMask);
-
-
-bool StringShape::IsSequentialAscii() {
- return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
-}
-
-
-bool StringShape::IsSequentialTwoByte() {
- return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
-}
-
-
-bool StringShape::IsExternalAscii() {
- return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
-}
-
-
-STATIC_CHECK((kExternalStringTag | kOneByteStringTag) ==
- Internals::kExternalAsciiRepresentationTag);
-
-STATIC_CHECK(v8::String::ASCII_ENCODING == kOneByteStringTag);
-
-
-bool StringShape::IsExternalTwoByte() {
- return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
-}
-
-
-STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) ==
- Internals::kExternalTwoByteRepresentationTag);
-
-STATIC_CHECK(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
-
-uc32 FlatStringReader::Get(int index) {
- ASSERT(0 <= index && index <= length_);
- if (is_ascii_) {
- return static_cast<const byte*>(start_)[index];
- } else {
- return static_cast<const uc16*>(start_)[index];
- }
-}
-
-
-bool Object::IsNumber() {
- return IsSmi() || IsHeapNumber();
-}
-
-
-TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
-TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
-
-
-bool Object::IsFiller() {
- if (!Object::IsHeapObject()) return false;
- InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
- return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
-}
-
-
-TYPE_CHECKER(ExternalPixelArray, EXTERNAL_PIXEL_ARRAY_TYPE)
-
-
-bool Object::IsExternalArray() {
- if (!Object::IsHeapObject())
- return false;
- InstanceType instance_type =
- HeapObject::cast(this)->map()->instance_type();
- return (instance_type >= FIRST_EXTERNAL_ARRAY_TYPE &&
- instance_type <= LAST_EXTERNAL_ARRAY_TYPE);
-}
-
-
-TYPE_CHECKER(ExternalByteArray, EXTERNAL_BYTE_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedByteArray, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
-TYPE_CHECKER(ExternalShortArray, EXTERNAL_SHORT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedShortArray, EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalIntArray, EXTERNAL_INT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedIntArray, EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalFloatArray, EXTERNAL_FLOAT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalDoubleArray, EXTERNAL_DOUBLE_ARRAY_TYPE)
-
-
-bool MaybeObject::IsFailure() {
- return HAS_FAILURE_TAG(this);
-}
-
-
-bool MaybeObject::IsRetryAfterGC() {
- return HAS_FAILURE_TAG(this)
- && Failure::cast(this)->type() == Failure::RETRY_AFTER_GC;
-}
-
-
-bool MaybeObject::IsOutOfMemory() {
- return HAS_FAILURE_TAG(this)
- && Failure::cast(this)->IsOutOfMemoryException();
-}
-
-
-bool MaybeObject::IsException() {
- return this == Failure::Exception();
-}
-
-
-bool MaybeObject::IsTheHole() {
- return !IsFailure() && ToObjectUnchecked()->IsTheHole();
-}
-
-
-Failure* Failure::cast(MaybeObject* obj) {
- ASSERT(HAS_FAILURE_TAG(obj));
- return reinterpret_cast<Failure*>(obj);
-}
-
-
-bool Object::IsJSReceiver() {
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- return IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
-}
-
-
-bool Object::IsJSObject() {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
-}
-
-
-bool Object::IsJSProxy() {
- if (!Object::IsHeapObject()) return false;
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
-}
-
-
-TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
-TYPE_CHECKER(JSSet, JS_SET_TYPE)
-TYPE_CHECKER(JSMap, JS_MAP_TYPE)
-TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
-TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
-TYPE_CHECKER(Map, MAP_TYPE)
-TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
-
-
-bool Object::IsDescriptorArray() {
- return IsFixedArray();
-}
-
-
-bool Object::IsTransitionArray() {
- return IsFixedArray();
-}
-
-
-bool Object::IsDeoptimizationInputData() {
- // Must be a fixed array.
- if (!IsFixedArray()) return false;
-
- // There's no sure way to detect the difference between a fixed array and
- // a deoptimization data array. Since this is used for asserts we can
- // check that the length is zero or else the fixed size plus a multiple of
- // the entry size.
- int length = FixedArray::cast(this)->length();
- if (length == 0) return true;
-
- length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
- return length >= 0 &&
- length % DeoptimizationInputData::kDeoptEntrySize == 0;
-}
-
-
-bool Object::IsDeoptimizationOutputData() {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a deoptimization data array. Since this is used for asserts we can check
- // that the length is plausible though.
- if (FixedArray::cast(this)->length() % 2 != 0) return false;
- return true;
-}
-
-
-bool Object::IsDependentCode() {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a dependent codes array.
- return true;
-}
-
-
-bool Object::IsTypeFeedbackCells() {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a cache cells array. Since this is used for asserts we can check that
- // the length is plausible though.
- if (FixedArray::cast(this)->length() % 2 != 0) return false;
- return true;
-}
-
-
-bool Object::IsContext() {
- if (!Object::IsHeapObject()) return false;
- Map* map = HeapObject::cast(this)->map();
- Heap* heap = map->GetHeap();
- return (map == heap->function_context_map() ||
- map == heap->catch_context_map() ||
- map == heap->with_context_map() ||
- map == heap->native_context_map() ||
- map == heap->block_context_map() ||
- map == heap->module_context_map() ||
- map == heap->global_context_map());
-}
-
-
-bool Object::IsNativeContext() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->native_context_map();
-}
-
-
-bool Object::IsScopeInfo() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->scope_info_map();
-}
-
-
-TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
-
-
-template <> inline bool Is<JSFunction>(Object* obj) {
- return obj->IsJSFunction();
-}
-
-
-TYPE_CHECKER(Code, CODE_TYPE)
-TYPE_CHECKER(Oddball, ODDBALL_TYPE)
-TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
-TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
-TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
-TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
-TYPE_CHECKER(JSDate, JS_DATE_TYPE)
-TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
-
-
-bool Object::IsStringWrapper() {
- return IsJSValue() && JSValue::cast(this)->value()->IsString();
-}
-
-
-TYPE_CHECKER(Foreign, FOREIGN_TYPE)
-
-
-bool Object::IsBoolean() {
- return IsOddball() &&
- ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
-}
-
-
-TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
-TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
-
-
-template <> inline bool Is<JSArray>(Object* obj) {
- return obj->IsJSArray();
-}
-
-
-bool Object::IsHashTable() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->hash_table_map();
-}
-
-
-bool Object::IsDictionary() {
- return IsHashTable() &&
- this != HeapObject::cast(this)->GetHeap()->string_table();
-}
-
-
-bool Object::IsStringTable() {
- return IsHashTable() &&
- this == HeapObject::cast(this)->GetHeap()->raw_unchecked_string_table();
-}
-
-
-bool Object::IsJSFunctionResultCache() {
- if (!IsFixedArray()) return false;
- FixedArray* self = FixedArray::cast(this);
- int length = self->length();
- if (length < JSFunctionResultCache::kEntriesIndex) return false;
- if ((length - JSFunctionResultCache::kEntriesIndex)
- % JSFunctionResultCache::kEntrySize != 0) {
- return false;
- }
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- reinterpret_cast<JSFunctionResultCache*>(this)->
- JSFunctionResultCacheVerify();
- }
-#endif
- return true;
-}
-
-
-bool Object::IsNormalizedMapCache() {
- if (!IsFixedArray()) return false;
- if (FixedArray::cast(this)->length() != NormalizedMapCache::kEntries) {
- return false;
- }
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
- }
-#endif
- return true;
-}
-
-
-bool Object::IsCompilationCacheTable() {
- return IsHashTable();
-}
-
-
-bool Object::IsCodeCacheHashTable() {
- return IsHashTable();
-}
-
-
-bool Object::IsPolymorphicCodeCacheHashTable() {
- return IsHashTable();
-}
-
-
-bool Object::IsMapCache() {
- return IsHashTable();
-}
-
-
-bool Object::IsObjectHashTable() {
- return IsHashTable();
-}
-
-
-bool Object::IsPrimitive() {
- return IsOddball() || IsNumber() || IsString();
-}
-
-
-bool Object::IsJSGlobalProxy() {
- bool result = IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() ==
- JS_GLOBAL_PROXY_TYPE);
- ASSERT(!result || IsAccessCheckNeeded());
- return result;
-}
-
-
-bool Object::IsGlobalObject() {
- if (!IsHeapObject()) return false;
-
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return type == JS_GLOBAL_OBJECT_TYPE ||
- type == JS_BUILTINS_OBJECT_TYPE;
-}
-
-
-TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
-
-
-bool Object::IsUndetectableObject() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->is_undetectable();
-}
-
-
-bool Object::IsAccessCheckNeeded() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->is_access_check_needed();
-}
-
-
-bool Object::IsStruct() {
- if (!IsHeapObject()) return false;
- switch (HeapObject::cast(this)->map()->instance_type()) {
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return false;
- }
-}
-
-
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() { \
- return Object::IsHeapObject() \
- && HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \
- }
- STRUCT_LIST(MAKE_STRUCT_PREDICATE)
-#undef MAKE_STRUCT_PREDICATE
-
-
-bool Object::IsUndefined() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined;
-}
-
-
-bool Object::IsNull() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull;
-}
-
-
-bool Object::IsTheHole() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole;
-}
-
-
-bool Object::IsTrue() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
-}
-
-
-bool Object::IsFalse() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse;
-}
-
-
-bool Object::IsArgumentsMarker() {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker;
-}
-
-
-double Object::Number() {
- ASSERT(IsNumber());
- return IsSmi()
- ? static_cast<double>(reinterpret_cast<Smi*>(this)->value())
- : reinterpret_cast<HeapNumber*>(this)->value();
-}
-
-
-bool Object::IsNaN() {
- return this->IsHeapNumber() && isnan(HeapNumber::cast(this)->value());
-}
-
-
-MaybeObject* Object::ToSmi() {
- if (IsSmi()) return this;
- if (IsHeapNumber()) {
- double value = HeapNumber::cast(this)->value();
- int int_value = FastD2I(value);
- if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
- }
- }
- return Failure::Exception();
-}
-
-
-bool Object::HasSpecificClassOf(String* name) {
- return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
-}
-
-
-MaybeObject* Object::GetElement(uint32_t index) {
- // GetElement can trigger a getter which can cause allocation.
- // This was not always the case. This ASSERT is here to catch
- // leftover incorrect uses.
- ASSERT(HEAP->IsAllocationAllowed());
- return GetElementWithReceiver(this, index);
-}
-
-
-Object* Object::GetElementNoExceptionThrown(uint32_t index) {
- MaybeObject* maybe = GetElementWithReceiver(this, index);
- ASSERT(!maybe->IsFailure());
- Object* result = NULL; // Initialization to please compiler.
- maybe->ToObject(&result);
- return result;
-}
-
-
-MaybeObject* Object::GetProperty(String* key) {
- PropertyAttributes attributes;
- return GetPropertyWithReceiver(this, key, &attributes);
-}
-
-
-MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
- return GetPropertyWithReceiver(this, key, attributes);
-}
-
-
-#define FIELD_ADDR(p, offset) \
- (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
-
-#define READ_FIELD(p, offset) \
- (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
-
-#define WRITE_FIELD(p, offset, value) \
- (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-
-#define WRITE_BARRIER(heap, object, offset, value) \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- if (heap->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- }
-
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- if (heap->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- } \
- }
-
-#ifndef V8_TARGET_ARCH_MIPS
- #define READ_DOUBLE_FIELD(p, offset) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
-#else // V8_TARGET_ARCH_MIPS
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned HeapNumber::value.
- static inline double read_double_field(void* p, int offset) {
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)));
- c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4)));
- return c.d;
- }
- #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
-#endif // V8_TARGET_ARCH_MIPS
-
-#ifndef V8_TARGET_ARCH_MIPS
- #define WRITE_DOUBLE_FIELD(p, offset, value) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
-#else // V8_TARGET_ARCH_MIPS
- // Prevent gcc from using store-double (mips sdc1) on (possibly)
- // non-64-bit aligned HeapNumber::value.
- static inline void write_double_field(void* p, int offset,
- double value) {
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.d = value;
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0];
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1];
- }
- #define WRITE_DOUBLE_FIELD(p, offset, value) \
- write_double_field(p, offset, value)
-#endif // V8_TARGET_ARCH_MIPS
-
-
-#define READ_INT_FIELD(p, offset) \
- (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INT_FIELD(p, offset, value) \
- (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INTPTR_FIELD(p, offset) \
- (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INTPTR_FIELD(p, offset, value) \
- (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT32_FIELD(p, offset) \
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINT32_FIELD(p, offset, value) \
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INT64_FIELD(p, offset) \
- (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INT64_FIELD(p, offset, value) \
- (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_SHORT_FIELD(p, offset) \
- (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_SHORT_FIELD(p, offset, value) \
- (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_BYTE_FIELD(p, offset) \
- (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_BYTE_FIELD(p, offset, value) \
- (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
-
-
-Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
- return &READ_FIELD(obj, byte_offset);
-}
-
-
-int Smi::value() {
- return Internals::SmiValue(this);
-}
-
-
-Smi* Smi::FromInt(int value) {
- ASSERT(Smi::IsValid(value));
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- intptr_t tagged_value =
- (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
- return reinterpret_cast<Smi*>(tagged_value);
-}
-
-
-Smi* Smi::FromIntptr(intptr_t value) {
- ASSERT(Smi::IsValid(value));
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
-}
-
-
-Failure::Type Failure::type() const {
- return static_cast<Type>(value() & kFailureTypeTagMask);
-}
-
-
-bool Failure::IsInternalError() const {
- return type() == INTERNAL_ERROR;
-}
-
-
-bool Failure::IsOutOfMemoryException() const {
- return type() == OUT_OF_MEMORY_EXCEPTION;
-}
-
-
-AllocationSpace Failure::allocation_space() const {
- ASSERT_EQ(RETRY_AFTER_GC, type());
- return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
- & kSpaceTagMask);
-}
-
-
-Failure* Failure::InternalError() {
- return Construct(INTERNAL_ERROR);
-}
-
-
-Failure* Failure::Exception() {
- return Construct(EXCEPTION);
-}
-
-
-Failure* Failure::OutOfMemoryException(intptr_t value) {
- return Construct(OUT_OF_MEMORY_EXCEPTION, value);
-}
-
-
-intptr_t Failure::value() const {
- return static_cast<intptr_t>(
- reinterpret_cast<uintptr_t>(this) >> kFailureTagSize);
-}
-
-
-Failure* Failure::RetryAfterGC() {
- return RetryAfterGC(NEW_SPACE);
-}
-
-
-Failure* Failure::RetryAfterGC(AllocationSpace space) {
- ASSERT((space & ~kSpaceTagMask) == 0);
- return Construct(RETRY_AFTER_GC, space);
-}
-
-
-Failure* Failure::Construct(Type type, intptr_t value) {
- uintptr_t info =
- (static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type;
- ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
- // Fill the unused bits with a pattern that's easy to recognize in crash
- // dumps.
- static const int kFailureMagicPattern = 0x0BAD0000;
- return reinterpret_cast<Failure*>(
- (info << kFailureTagSize) | kFailureTag | kFailureMagicPattern);
-}
-
-
-bool Smi::IsValid(intptr_t value) {
-#ifdef DEBUG
- bool in_range = (value >= kMinValue) && (value <= kMaxValue);
-#endif
-
-#ifdef V8_TARGET_ARCH_X64
- // To be representable as a long smi, the value must be a 32-bit integer.
- bool result = (value == static_cast<int32_t>(value));
-#else
- // To be representable as an tagged small integer, the two
- // most-significant bits of 'value' must be either 00 or 11 due to
- // sign-extension. To check this we add 01 to the two
- // most-significant bits, and check if the most-significant bit is 0
- //
- // CAUTION: The original code below:
- // bool result = ((value + 0x40000000) & 0x80000000) == 0;
- // may lead to incorrect results according to the C language spec, and
- // in fact doesn't work correctly with gcc4.1.1 in some cases: The
- // compiler may produce undefined results in case of signed integer
- // overflow. The computation must be done w/ unsigned ints.
- bool result = (static_cast<uintptr_t>(value + 0x40000000U) < 0x80000000U);
-#endif
- ASSERT(result == in_range);
- return result;
-}
-
-
-MapWord MapWord::FromMap(Map* map) {
- return MapWord(reinterpret_cast<uintptr_t>(map));
-}
-
-
-Map* MapWord::ToMap() {
- return reinterpret_cast<Map*>(value_);
-}
-
-
-bool MapWord::IsForwardingAddress() {
- return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
-}
-
-
-MapWord MapWord::FromForwardingAddress(HeapObject* object) {
- Address raw = reinterpret_cast<Address>(object) - kHeapObjectTag;
- return MapWord(reinterpret_cast<uintptr_t>(raw));
-}
-
-
-HeapObject* MapWord::ToForwardingAddress() {
- ASSERT(IsForwardingAddress());
- return HeapObject::FromAddress(reinterpret_cast<Address>(value_));
-}
-
-
-#ifdef VERIFY_HEAP
-void HeapObject::VerifyObjectField(int offset) {
- VerifyPointer(READ_FIELD(this, offset));
-}
-
-void HeapObject::VerifySmiField(int offset) {
- CHECK(READ_FIELD(this, offset)->IsSmi());
-}
-#endif
-
-
-Heap* HeapObject::GetHeap() {
- Heap* heap =
- MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
- ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
- return heap;
-}
-
-
-Isolate* HeapObject::GetIsolate() {
- return GetHeap()->isolate();
-}
-
-
-Map* HeapObject::map() {
- return map_word().ToMap();
-}
-
-
-void HeapObject::set_map(Map* value) {
- set_map_word(MapWord::FromMap(value));
- if (value != NULL) {
- // TODO(1600) We are passing NULL as a slot because maps can never be on
- // evacuation candidate.
- value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
- }
-}
-
-
-// Unsafe accessor omitting write barrier.
-void HeapObject::set_map_no_write_barrier(Map* value) {
- set_map_word(MapWord::FromMap(value));
-}
-
-
-MapWord HeapObject::map_word() {
- return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset)));
-}
-
-
-void HeapObject::set_map_word(MapWord map_word) {
- // WRITE_FIELD does not invoke write barrier, but there is no need
- // here.
- WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
-}
-
-
-HeapObject* HeapObject::FromAddress(Address address) {
- ASSERT_TAG_ALIGNED(address);
- return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
-}
-
-
-Address HeapObject::address() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag;
-}
-
-
-int HeapObject::Size() {
- return SizeFromMap(map());
-}
-
-
-void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
- v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
- reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
-}
-
-
-void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
- v->VisitPointer(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
-}
-
-
-double HeapNumber::value() {
- return READ_DOUBLE_FIELD(this, kValueOffset);
-}
-
-
-void HeapNumber::set_value(double value) {
- WRITE_DOUBLE_FIELD(this, kValueOffset, value);
-}
-
-
-int HeapNumber::get_exponent() {
- return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
- kExponentShift) - kExponentBias;
-}
-
-
-int HeapNumber::get_sign() {
- return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
-}
-
-
-ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
-
-
-Object** FixedArray::GetFirstElementAddress() {
- return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
-}
-
-
-bool FixedArray::ContainsOnlySmisOrHoles() {
- Object* the_hole = GetHeap()->the_hole_value();
- Object** current = GetFirstElementAddress();
- for (int i = 0; i < length(); ++i) {
- Object* candidate = *current++;
- if (!candidate->IsSmi() && candidate != the_hole) return false;
- }
- return true;
-}
-
-
-FixedArrayBase* JSObject::elements() {
- Object* array = READ_FIELD(this, kElementsOffset);
- return static_cast<FixedArrayBase*>(array);
-}
-
-
-void JSObject::ValidateElements() {
-#if DEBUG
- if (FLAG_enable_slow_asserts) {
- ElementsAccessor* accessor = GetElementsAccessor();
- accessor->Validate(this);
- }
-#endif
-}
-
-
-MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
- ValidateElements();
- ElementsKind elements_kind = map()->elements_kind();
- if (!IsFastObjectElementsKind(elements_kind)) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- return TransitionElementsKind(FAST_HOLEY_ELEMENTS);
- } else {
- return TransitionElementsKind(FAST_ELEMENTS);
- }
- }
- return this;
-}
-
-
-MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
- uint32_t count,
- EnsureElementsMode mode) {
- ElementsKind current_kind = map()->elements_kind();
- ElementsKind target_kind = current_kind;
- ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
- bool is_holey = IsFastHoleyElementsKind(current_kind);
- if (current_kind == FAST_HOLEY_ELEMENTS) return this;
- Heap* heap = GetHeap();
- Object* the_hole = heap->the_hole_value();
- for (uint32_t i = 0; i < count; ++i) {
- Object* current = *objects++;
- if (current == the_hole) {
- is_holey = true;
- target_kind = GetHoleyElementsKind(target_kind);
- } else if (!current->IsSmi()) {
- if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
- if (IsFastSmiElementsKind(target_kind)) {
- if (is_holey) {
- target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_DOUBLE_ELEMENTS;
- }
- }
- } else if (is_holey) {
- target_kind = FAST_HOLEY_ELEMENTS;
- break;
- } else {
- target_kind = FAST_ELEMENTS;
- }
- }
- }
-
- if (target_kind != current_kind) {
- return TransitionElementsKind(target_kind);
- }
- return this;
-}
-
-
-MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
- uint32_t length,
- EnsureElementsMode mode) {
- if (elements->map() != GetHeap()->fixed_double_array_map()) {
- ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
- elements->map() == GetHeap()->fixed_cow_array_map());
- if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
- mode = DONT_ALLOW_DOUBLE_ELEMENTS;
- }
- Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
- return EnsureCanContainElements(objects, length, mode);
- }
-
- ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
- } else if (GetElementsKind() == FAST_SMI_ELEMENTS) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(elements);
- for (uint32_t i = 0; i < length; ++i) {
- if (double_array->is_the_hole(i)) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
- }
- }
- return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
- }
-
- return this;
-}
-
-
-MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate,
- ElementsKind to_kind) {
- Map* current_map = map();
- ElementsKind from_kind = current_map->elements_kind();
- if (from_kind == to_kind) return current_map;
-
- Context* native_context = isolate->context()->native_context();
- Object* maybe_array_maps = native_context->js_array_maps();
- if (maybe_array_maps->IsFixedArray()) {
- FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
- if (array_maps->get(from_kind) == current_map) {
- Object* maybe_transitioned_map = array_maps->get(to_kind);
- if (maybe_transitioned_map->IsMap()) {
- return Map::cast(maybe_transitioned_map);
- }
- }
- }
-
- return GetElementsTransitionMapSlow(to_kind);
-}
-
-
-void JSObject::set_map_and_elements(Map* new_map,
- FixedArrayBase* value,
- WriteBarrierMode mode) {
- ASSERT(value->HasValidElements());
- if (new_map != NULL) {
- if (mode == UPDATE_WRITE_BARRIER) {
- set_map(new_map);
- } else {
- ASSERT(mode == SKIP_WRITE_BARRIER);
- set_map_no_write_barrier(new_map);
- }
- }
- ASSERT((map()->has_fast_smi_or_object_elements() ||
- (value == GetHeap()->empty_fixed_array())) ==
- (value->map() == GetHeap()->fixed_array_map() ||
- value->map() == GetHeap()->fixed_cow_array_map()));
- ASSERT((value == GetHeap()->empty_fixed_array()) ||
- (map()->has_fast_double_elements() == value->IsFixedDoubleArray()));
- WRITE_FIELD(this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
-}
-
-
-void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
- set_map_and_elements(NULL, value, mode);
-}
-
-
-void JSObject::initialize_properties() {
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
-}
-
-
-void JSObject::initialize_elements() {
- ASSERT(map()->has_fast_smi_or_object_elements() ||
- map()->has_fast_double_elements());
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
-}
-
-
-MaybeObject* JSObject::ResetElements() {
- if (map()->is_observed()) {
- // Maintain invariant that observed elements are always in dictionary mode.
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe = SeededNumberDictionary::Allocate(0);
- if (!maybe->To(&dictionary)) return maybe;
- if (map() == GetHeap()->non_strict_arguments_elements_map()) {
- FixedArray::cast(elements())->set(1, dictionary);
- } else {
- set_elements(dictionary);
- }
- return this;
- }
-
- ElementsKind elements_kind = GetInitialFastElementsKind();
- if (!FLAG_smi_only_arrays) {
- elements_kind = FastSmiToObjectElementsKind(elements_kind);
- }
- MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
- Map* map;
- if (!maybe->To(&map)) return maybe;
- set_map(map);
- initialize_elements();
-
- return this;
-}
-
-
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) {
- ASSERT(this->map()->NumberOfOwnDescriptors() + 1 ==
- map->NumberOfOwnDescriptors());
- if (this->map()->unused_property_fields() == 0) {
- int new_size = properties()->length() + map->unused_property_fields() + 1;
- FixedArray* new_properties;
- MaybeObject* maybe_properties = properties()->CopySize(new_size);
- if (!maybe_properties->To(&new_properties)) return maybe_properties;
- set_properties(new_properties);
- }
- set_map(map);
- return this;
-}
-
-
-bool JSObject::TryTransitionToField(Handle<JSObject> object,
- Handle<String> key) {
- if (!object->map()->HasTransitionArray()) return false;
- Handle<TransitionArray> transitions(object->map()->transitions());
- int transition = transitions->Search(*key);
- if (transition == TransitionArray::kNotFound) return false;
- PropertyDetails target_details = transitions->GetTargetDetails(transition);
- if (target_details.type() != FIELD) return false;
- if (target_details.attributes() != NONE) return false;
- Handle<Map> target(transitions->GetTarget(transition));
- JSObject::AddFastPropertyUsingMap(object, target);
- return true;
-}
-
-
-int JSObject::LastAddedFieldIndex() {
- Map* map = this->map();
- int last_added = map->LastAdded();
- return map->instance_descriptors()->GetFieldIndex(last_added);
-}
-
-
-ACCESSORS(Oddball, to_string, String, kToStringOffset)
-ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
-
-
-byte Oddball::kind() {
- return Smi::cast(READ_FIELD(this, kKindOffset))->value();
-}
-
-
-void Oddball::set_kind(byte value) {
- WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
-}
-
-
-Object* JSGlobalPropertyCell::value() {
- return READ_FIELD(this, kValueOffset);
-}
-
-
-void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
- // The write barrier is not used for global property cells.
- ASSERT(!val->IsJSGlobalPropertyCell());
- WRITE_FIELD(this, kValueOffset, val);
-}
-
-
-int JSObject::GetHeaderSize() {
- InstanceType type = map()->instance_type();
- // Check for the most common kind of JavaScript object before
- // falling into the generic switch. This speeds up the internal
- // field operations considerably on average.
- if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
- switch (type) {
- case JS_MODULE_TYPE:
- return JSModule::kSize;
- case JS_GLOBAL_PROXY_TYPE:
- return JSGlobalProxy::kSize;
- case JS_GLOBAL_OBJECT_TYPE:
- return JSGlobalObject::kSize;
- case JS_BUILTINS_OBJECT_TYPE:
- return JSBuiltinsObject::kSize;
- case JS_FUNCTION_TYPE:
- return JSFunction::kSize;
- case JS_VALUE_TYPE:
- return JSValue::kSize;
- case JS_DATE_TYPE:
- return JSDate::kSize;
- case JS_ARRAY_TYPE:
- return JSArray::kSize;
- case JS_WEAK_MAP_TYPE:
- return JSWeakMap::kSize;
- case JS_REGEXP_TYPE:
- return JSRegExp::kSize;
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- return JSObject::kHeaderSize;
- case JS_MESSAGE_OBJECT_TYPE:
- return JSMessageObject::kSize;
- default:
- UNREACHABLE();
- return 0;
- }
-}
-
-
-int JSObject::GetInternalFieldCount() {
- ASSERT(1 << kPointerSizeLog2 == kPointerSize);
- // Make sure to adjust for the number of in-object properties. These
- // properties do contribute to the size, but are not internal fields.
- return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
- map()->inobject_properties() - (map()->has_external_resource()?1:0);
-}
-
-
-int JSObject::GetInternalFieldOffset(int index) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
- return GetHeaderSize() + (kPointerSize * index);
-}
-
-
-Object* JSObject::GetInternalField(int index) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
-}
-
-
-void JSObject::SetInternalField(int index, Object* value) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- int offset = GetHeaderSize() + (kPointerSize * index);
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
-}
-
-
-void JSObject::SetInternalField(int index, Smi* value) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- int offset = GetHeaderSize() + (kPointerSize * index);
- WRITE_FIELD(this, offset, value);
-}
-
-
-void JSObject::SetExternalResourceObject(Object* value) {
- ASSERT(map()->has_external_resource());
- int offset = GetHeaderSize() + kPointerSize * GetInternalFieldCount();
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
-}
-
-
-Object *JSObject::GetExternalResourceObject() {
- if (map()->has_external_resource()) {
- int offset = GetHeaderSize() + kPointerSize * GetInternalFieldCount();
- return READ_FIELD(this, offset);
- } else {
- return GetHeap()->undefined_value();
- }
-}
-
-
-// Access fast-case object properties at index. The use of these routines
-// is needed to correctly distinguish between properties stored in-object and
-// properties stored in the properties array.
-Object* JSObject::FastPropertyAt(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- if (index < 0) {
- int offset = map()->instance_size() + (index * kPointerSize);
- return READ_FIELD(this, offset);
- } else {
- ASSERT(index < properties()->length());
- return properties()->get(index);
- }
-}
-
-
-Object* JSObject::FastPropertyAtPut(int index, Object* value) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- if (index < 0) {
- int offset = map()->instance_size() + (index * kPointerSize);
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
- } else {
- ASSERT(index < properties()->length());
- properties()->set(index, value);
- }
- return value;
-}
-
-
-int JSObject::GetInObjectPropertyOffset(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- return map()->instance_size() + (index * kPointerSize);
-}
-
-
-Object* JSObject::InObjectPropertyAt(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- int offset = map()->instance_size() + (index * kPointerSize);
- return READ_FIELD(this, offset);
-}
-
-
-Object* JSObject::InObjectPropertyAtPut(int index,
- Object* value,
- WriteBarrierMode mode) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- int offset = map()->instance_size() + (index * kPointerSize);
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
- return value;
-}
-
-
-
-void JSObject::InitializeBody(Map* map,
- Object* pre_allocated_value,
- Object* filler_value) {
- ASSERT(!filler_value->IsHeapObject() ||
- !GetHeap()->InNewSpace(filler_value));
- ASSERT(!pre_allocated_value->IsHeapObject() ||
- !GetHeap()->InNewSpace(pre_allocated_value));
- int size = map->instance_size();
- int offset = kHeaderSize;
- if (filler_value != pre_allocated_value) {
- int pre_allocated = map->pre_allocated_property_fields();
- ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
- for (int i = 0; i < pre_allocated; i++) {
- WRITE_FIELD(this, offset, pre_allocated_value);
- offset += kPointerSize;
- }
- }
- while (offset < size) {
- WRITE_FIELD(this, offset, filler_value);
- offset += kPointerSize;
- }
-}
-
-
-bool JSObject::HasFastProperties() {
- ASSERT(properties()->IsDictionary() == map()->is_dictionary_map());
- return !properties()->IsDictionary();
-}
-
-
-bool JSObject::TooManyFastProperties(int properties,
- JSObject::StoreFromKeyed store_mode) {
- // Allow extra fast properties if the object has more than
- // kFastPropertiesSoftLimit in-object properties. When this is the case,
- // it is very unlikely that the object is being used as a dictionary
- // and there is a good chance that allowing more map transitions
- // will be worth it.
- int inobject = map()->inobject_properties();
-
- int limit;
- if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) {
- limit = Max(inobject, kMaxFastProperties);
- } else {
- limit = Max(inobject, kFastPropertiesSoftLimit);
- }
- return properties > limit;
-}
-
-
-void Struct::InitializeBody(int object_size) {
- Object* value = GetHeap()->undefined_value();
- for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
- WRITE_FIELD(this, offset, value);
- }
-}
-
-
-bool Object::ToArrayIndex(uint32_t* index) {
- if (IsSmi()) {
- int value = Smi::cast(this)->value();
- if (value < 0) return false;
- *index = value;
- return true;
- }
- if (IsHeapNumber()) {
- double value = HeapNumber::cast(this)->value();
- uint32_t uint_value = static_cast<uint32_t>(value);
- if (value == static_cast<double>(uint_value)) {
- *index = uint_value;
- return true;
- }
- }
- return false;
-}
-
-
-bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
- if (!this->IsJSValue()) return false;
-
- JSValue* js_value = JSValue::cast(this);
- if (!js_value->value()->IsString()) return false;
-
- String* str = String::cast(js_value->value());
- if (index >= static_cast<uint32_t>(str->length())) return false;
-
- return true;
-}
-
-
-
-void Object::VerifyApiCallResultType() {
-#if ENABLE_EXTRA_CHECKS
- if (!(IsSmi() ||
- IsString() ||
- IsSpecObject() ||
- IsHeapNumber() ||
- IsUndefined() ||
- IsTrue() ||
- IsFalse() ||
- IsNull())) {
- FATAL("API call returned invalid object");
- }
-#endif // ENABLE_EXTRA_CHECKS
-}
-
-
-FixedArrayBase* FixedArrayBase::cast(Object* object) {
- ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray());
- return reinterpret_cast<FixedArrayBase*>(object);
-}
-
-
-Object* FixedArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
- return READ_FIELD(this, kHeaderSize + index * kPointerSize);
-}
-
-
-bool FixedArray::is_the_hole(int index) {
- return get(index) == GetHeap()->the_hole_value();
-}
-
-
-void FixedArray::set(int index, Smi* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
-}
-
-
-void FixedArray::set(int index, Object* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
-}
-
-
-inline bool FixedDoubleArray::is_the_hole_nan(double value) {
- return BitCast<uint64_t, double>(value) == kHoleNanInt64;
-}
-
-
-inline double FixedDoubleArray::hole_nan_as_double() {
- return BitCast<double, uint64_t>(kHoleNanInt64);
-}
-
-
-inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
- ASSERT(BitCast<uint64_t>(OS::nan_value()) != kHoleNanInt64);
- ASSERT((BitCast<uint64_t>(OS::nan_value()) >> 32) != kHoleNanUpper32);
- return OS::nan_value();
-}
-
-
-double FixedDoubleArray::get_scalar(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
- ASSERT(index >= 0 && index < this->length());
- double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
- ASSERT(!is_the_hole_nan(result));
- return result;
-}
-
-int64_t FixedDoubleArray::get_representation(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
- ASSERT(index >= 0 && index < this->length());
- return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
-}
-
-MaybeObject* FixedDoubleArray::get(int index) {
- if (is_the_hole(index)) {
- return GetHeap()->the_hole_value();
- } else {
- return GetHeap()->NumberFromDouble(get_scalar(index));
- }
-}
-
-
-void FixedDoubleArray::set(int index, double value) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
- int offset = kHeaderSize + index * kDoubleSize;
- if (isnan(value)) value = canonical_not_the_hole_nan_as_double();
- WRITE_DOUBLE_FIELD(this, offset, value);
-}
-
-
-void FixedDoubleArray::set_the_hole(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
- int offset = kHeaderSize + index * kDoubleSize;
- WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
-}
-
-
-bool FixedDoubleArray::is_the_hole(int index) {
- int offset = kHeaderSize + index * kDoubleSize;
- return is_the_hole_nan(READ_DOUBLE_FIELD(this, offset));
-}
-
-
-WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
- Heap* heap = GetHeap();
- if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
- if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
- return UPDATE_WRITE_BARRIER;
-}
-
-
-void FixedArray::set(int index,
- Object* value,
- WriteBarrierMode mode) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
-}
-
-
-void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
- int index,
- Object* value) {
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
- ASSERT(index >= 0 && index < array->length());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(array, offset, value);
- Heap* heap = array->GetHeap();
- if (heap->InNewSpace(value)) {
- heap->RecordWrite(array->address(), offset);
- }
-}
-
-
-void FixedArray::NoWriteBarrierSet(FixedArray* array,
- int index,
- Object* value) {
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
- ASSERT(index >= 0 && index < array->length());
- ASSERT(!HEAP->InNewSpace(value));
- WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
-}
-
-
-void FixedArray::set_undefined(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- set_undefined(GetHeap(), index);
-}
-
-
-void FixedArray::set_undefined(Heap* heap, int index) {
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->undefined_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
- heap->undefined_value());
-}
-
-
-void FixedArray::set_null(int index) {
- set_null(GetHeap(), index);
-}
-
-
-void FixedArray::set_null(Heap* heap, int index) {
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
-}
-
-
-void FixedArray::set_the_hole(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!HEAP->InNewSpace(HEAP->the_hole_value()));
- WRITE_FIELD(this,
- kHeaderSize + index * kPointerSize,
- GetHeap()->the_hole_value());
-}
-
-
-void FixedArray::set_unchecked(int index, Smi* value) {
- ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
-}
-
-
-void FixedArray::set_unchecked(Heap* heap,
- int index,
- Object* value,
- WriteBarrierMode mode) {
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode);
-}
-
-
-void FixedArray::set_null_unchecked(Heap* heap, int index) {
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
-}
-
-
-double* FixedDoubleArray::data_start() {
- return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
-}
-
-
-Object** FixedArray::data_start() {
- return HeapObject::RawField(this, kHeaderSize);
-}
-
-
-bool DescriptorArray::IsEmpty() {
- ASSERT(length() >= kFirstIndex ||
- this == HEAP->empty_descriptor_array());
- return length() < kFirstIndex;
-}
-
-
-void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) {
- WRITE_FIELD(
- this, kDescriptorLengthOffset, Smi::FromInt(number_of_descriptors));
-}
-
-
-// Perform a binary search in a fixed array. Low and high are entry indices. If
-// there are three entries in this array it should be called with low=0 and
-// high=2.
-template<SearchMode search_mode, typename T>
-int BinarySearch(T* array, String* name, int low, int high, int valid_entries) {
- uint32_t hash = name->Hash();
- int limit = high;
-
- ASSERT(low <= high);
-
- while (low != high) {
- int mid = (low + high) / 2;
- String* mid_name = array->GetSortedKey(mid);
- uint32_t mid_hash = mid_name->Hash();
-
- if (mid_hash >= hash) {
- high = mid;
- } else {
- low = mid + 1;
- }
- }
-
- for (; low <= limit; ++low) {
- int sort_index = array->GetSortedKeyIndex(low);
- String* entry = array->GetKey(sort_index);
- if (entry->Hash() != hash) break;
- if (entry->Equals(name)) {
- if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {
- return sort_index;
- }
- return T::kNotFound;
- }
- }
-
- return T::kNotFound;
-}
-
-
-// Perform a linear search in this fixed array. len is the number of entry
-// indices that are valid.
-template<SearchMode search_mode, typename T>
-int LinearSearch(T* array, String* name, int len, int valid_entries) {
- uint32_t hash = name->Hash();
- if (search_mode == ALL_ENTRIES) {
- for (int number = 0; number < len; number++) {
- int sorted_index = array->GetSortedKeyIndex(number);
- String* entry = array->GetKey(sorted_index);
- uint32_t current_hash = entry->Hash();
- if (current_hash > hash) break;
- if (current_hash == hash && entry->Equals(name)) return sorted_index;
- }
- } else {
- ASSERT(len >= valid_entries);
- for (int number = 0; number < valid_entries; number++) {
- String* entry = array->GetKey(number);
- uint32_t current_hash = entry->Hash();
- if (current_hash == hash && entry->Equals(name)) return number;
- }
- }
- return T::kNotFound;
-}
-
-
-template<SearchMode search_mode, typename T>
-int Search(T* array, String* name, int valid_entries) {
- if (search_mode == VALID_ENTRIES) {
- SLOW_ASSERT(array->IsSortedNoDuplicates(valid_entries));
- } else {
- SLOW_ASSERT(array->IsSortedNoDuplicates());
- }
-
- int nof = array->number_of_entries();
- if (nof == 0) return T::kNotFound;
-
- // Fast case: do linear search for small arrays.
- const int kMaxElementsForLinearSearch = 8;
- if ((search_mode == ALL_ENTRIES &&
- nof <= kMaxElementsForLinearSearch) ||
- (search_mode == VALID_ENTRIES &&
- valid_entries <= (kMaxElementsForLinearSearch * 3))) {
- return LinearSearch<search_mode>(array, name, nof, valid_entries);
- }
-
- // Slow case: perform binary search.
- return BinarySearch<search_mode>(array, name, 0, nof - 1, valid_entries);
-}
-
-
-int DescriptorArray::Search(String* name, int valid_descriptors) {
- return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors);
-}
-
-
-int DescriptorArray::SearchWithCache(String* name, Map* map) {
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- if (number_of_own_descriptors == 0) return kNotFound;
-
- DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
- int number = cache->Lookup(map, name);
-
- if (number == DescriptorLookupCache::kAbsent) {
- number = Search(name, number_of_own_descriptors);
- cache->Update(map, name, number);
- }
-
- return number;
-}
-
-
-void Map::LookupDescriptor(JSObject* holder,
- String* name,
- LookupResult* result) {
- DescriptorArray* descriptors = this->instance_descriptors();
- int number = descriptors->SearchWithCache(name, this);
- if (number == DescriptorArray::kNotFound) return result->NotFound();
- result->DescriptorResult(holder, descriptors->GetDetails(number), number);
-}
-
-
-void Map::LookupTransition(JSObject* holder,
- String* name,
- LookupResult* result) {
- if (HasTransitionArray()) {
- TransitionArray* transition_array = transitions();
- int number = transition_array->Search(name);
- if (number != TransitionArray::kNotFound) {
- return result->TransitionResult(holder, number);
- }
- }
- result->NotFound();
-}
-
-
-Object** DescriptorArray::GetKeySlot(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToKeyIndex(descriptor_number)));
-}
-
-
-Object** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) {
- return GetKeySlot(descriptor_number);
-}
-
-
-Object** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) {
- return GetValueSlot(descriptor_number - 1) + 1;
-}
-
-
-String* DescriptorArray::GetKey(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
- return String::cast(get(ToKeyIndex(descriptor_number)));
-}
-
-
-int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
- return GetDetails(descriptor_number).pointer();
-}
-
-
-String* DescriptorArray::GetSortedKey(int descriptor_number) {
- return GetKey(GetSortedKeyIndex(descriptor_number));
-}
-
-
-void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
- PropertyDetails details = GetDetails(descriptor_index);
- set(ToDetailsIndex(descriptor_index), details.set_pointer(pointer).AsSmi());
-}
-
-
-Object** DescriptorArray::GetValueSlot(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToValueIndex(descriptor_number)));
-}
-
-
-Object* DescriptorArray::GetValue(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
- return get(ToValueIndex(descriptor_number));
-}
-
-
-PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
- Object* details = get(ToDetailsIndex(descriptor_number));
- return PropertyDetails(Smi::cast(details));
-}
-
-
-PropertyType DescriptorArray::GetType(int descriptor_number) {
- return GetDetails(descriptor_number).type();
-}
-
-
-int DescriptorArray::GetFieldIndex(int descriptor_number) {
- return Descriptor::IndexFromValue(GetValue(descriptor_number));
-}
-
-
-JSFunction* DescriptorArray::GetConstantFunction(int descriptor_number) {
- return JSFunction::cast(GetValue(descriptor_number));
-}
-
-
-Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
- ASSERT(GetType(descriptor_number) == CALLBACKS);
- return GetValue(descriptor_number);
-}
-
-
-AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
- ASSERT(GetType(descriptor_number) == CALLBACKS);
- Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
- return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
-}
-
-
-void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
- desc->Init(GetKey(descriptor_number),
- GetValue(descriptor_number),
- GetDetails(descriptor_number));
-}
-
-
-void DescriptorArray::Set(int descriptor_number,
- Descriptor* desc,
- const WhitenessWitness&) {
- // Range check.
- ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() <=
- number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() > 0);
-
- NoIncrementalWriteBarrierSet(this,
- ToKeyIndex(descriptor_number),
- desc->GetKey());
- NoIncrementalWriteBarrierSet(this,
- ToValueIndex(descriptor_number),
- desc->GetValue());
- NoIncrementalWriteBarrierSet(this,
- ToDetailsIndex(descriptor_number),
- desc->GetDetails().AsSmi());
-}
-
-
-void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
- // Range check.
- ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() <=
- number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() > 0);
-
- set(ToKeyIndex(descriptor_number), desc->GetKey());
- set(ToValueIndex(descriptor_number), desc->GetValue());
- set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi());
-}
-
-
-void DescriptorArray::Append(Descriptor* desc,
- const WhitenessWitness& witness) {
- int descriptor_number = number_of_descriptors();
- int enumeration_index = descriptor_number + 1;
- SetNumberOfDescriptors(descriptor_number + 1);
- desc->SetEnumerationIndex(enumeration_index);
- Set(descriptor_number, desc, witness);
-
- uint32_t hash = desc->GetKey()->Hash();
-
- int insertion;
-
- for (insertion = descriptor_number; insertion > 0; --insertion) {
- String* key = GetSortedKey(insertion - 1);
- if (key->Hash() <= hash) break;
- SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
- }
-
- SetSortedKey(insertion, descriptor_number);
-}
-
-
-void DescriptorArray::Append(Descriptor* desc) {
- int descriptor_number = number_of_descriptors();
- int enumeration_index = descriptor_number + 1;
- SetNumberOfDescriptors(descriptor_number + 1);
- desc->SetEnumerationIndex(enumeration_index);
- Set(descriptor_number, desc);
-
- uint32_t hash = desc->GetKey()->Hash();
-
- int insertion;
-
- for (insertion = descriptor_number; insertion > 0; --insertion) {
- String* key = GetSortedKey(insertion - 1);
- if (key->Hash() <= hash) break;
- SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
- }
-
- SetSortedKey(insertion, descriptor_number);
-}
-
-
-void DescriptorArray::SwapSortedKeys(int first, int second) {
- int first_key = GetSortedKeyIndex(first);
- SetSortedKey(first, GetSortedKeyIndex(second));
- SetSortedKey(second, first_key);
-}
-
-
-DescriptorArray::WhitenessWitness::WhitenessWitness(FixedArray* array)
- : marking_(array->GetHeap()->incremental_marking()) {
- marking_->EnterNoMarkingScope();
- ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
-}
-
-
-DescriptorArray::WhitenessWitness::~WhitenessWitness() {
- marking_->LeaveNoMarkingScope();
-}
-
-
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::ComputeCapacity(int at_least_space_for) {
- const int kMinCapacity = 32;
- int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
- if (capacity < kMinCapacity) {
- capacity = kMinCapacity; // Guarantee min capacity.
- }
- return capacity;
-}
-
-
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::FindEntry(Key key) {
- return FindEntry(GetIsolate(), key);
-}
-
-
-// Find entry for key otherwise return kNotFound.
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(HashTable<Shape, Key>::Hash(key), capacity);
- uint32_t count = 1;
- // EnsureCapacity will guarantee the hash table is never full.
- while (true) {
- Object* element = KeyAt(entry);
- // Empty entry. Uses raw unchecked accessors because it is called by the
- // string table during bootstrapping.
- if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
- if (element != isolate->heap()->raw_unchecked_the_hole_value() &&
- Shape::IsMatch(key, element)) return entry;
- entry = NextProbe(entry, count++, capacity);
- }
- return kNotFound;
-}
-
-
-bool SeededNumberDictionary::requires_slow_elements() {
- Object* max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return false;
- return 0 !=
- (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
-}
-
-uint32_t SeededNumberDictionary::max_number_key() {
- ASSERT(!requires_slow_elements());
- Object* max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return 0;
- uint32_t value = static_cast<uint32_t>(Smi::cast(max_index_object)->value());
- return value >> kRequiresSlowElementsTagSize;
-}
-
-void SeededNumberDictionary::set_requires_slow_elements() {
- set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
-}
-
-
-// ------------------------------------
-// Cast operations
-
-
-CAST_ACCESSOR(FixedArray)
-CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(DescriptorArray)
-CAST_ACCESSOR(DeoptimizationInputData)
-CAST_ACCESSOR(DeoptimizationOutputData)
-CAST_ACCESSOR(DependentCode)
-CAST_ACCESSOR(TypeFeedbackCells)
-CAST_ACCESSOR(StringTable)
-CAST_ACCESSOR(JSFunctionResultCache)
-CAST_ACCESSOR(NormalizedMapCache)
-CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(CompilationCacheTable)
-CAST_ACCESSOR(CodeCacheHashTable)
-CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
-CAST_ACCESSOR(MapCache)
-CAST_ACCESSOR(String)
-CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqOneByteString)
-CAST_ACCESSOR(SeqTwoByteString)
-CAST_ACCESSOR(SlicedString)
-CAST_ACCESSOR(ConsString)
-CAST_ACCESSOR(ExternalString)
-CAST_ACCESSOR(ExternalAsciiString)
-CAST_ACCESSOR(ExternalTwoByteString)
-CAST_ACCESSOR(Symbol)
-CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(Smi)
-CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(HeapNumber)
-CAST_ACCESSOR(Name)
-CAST_ACCESSOR(Oddball)
-CAST_ACCESSOR(JSGlobalPropertyCell)
-CAST_ACCESSOR(SharedFunctionInfo)
-CAST_ACCESSOR(Map)
-CAST_ACCESSOR(JSFunction)
-CAST_ACCESSOR(GlobalObject)
-CAST_ACCESSOR(JSGlobalProxy)
-CAST_ACCESSOR(JSGlobalObject)
-CAST_ACCESSOR(JSBuiltinsObject)
-CAST_ACCESSOR(Code)
-CAST_ACCESSOR(JSArray)
-CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(JSProxy)
-CAST_ACCESSOR(JSFunctionProxy)
-CAST_ACCESSOR(JSSet)
-CAST_ACCESSOR(JSMap)
-CAST_ACCESSOR(JSWeakMap)
-CAST_ACCESSOR(Foreign)
-CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(FreeSpace)
-CAST_ACCESSOR(ExternalArray)
-CAST_ACCESSOR(ExternalByteArray)
-CAST_ACCESSOR(ExternalUnsignedByteArray)
-CAST_ACCESSOR(ExternalShortArray)
-CAST_ACCESSOR(ExternalUnsignedShortArray)
-CAST_ACCESSOR(ExternalIntArray)
-CAST_ACCESSOR(ExternalUnsignedIntArray)
-CAST_ACCESSOR(ExternalFloatArray)
-CAST_ACCESSOR(ExternalDoubleArray)
-CAST_ACCESSOR(ExternalPixelArray)
-CAST_ACCESSOR(Struct)
-CAST_ACCESSOR(AccessorInfo)
-
-
-#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
- STRUCT_LIST(MAKE_STRUCT_CAST)
-#undef MAKE_STRUCT_CAST
-
-
-template <typename Shape, typename Key>
-HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
- ASSERT(obj->IsHashTable());
- return reinterpret_cast<HashTable*>(obj);
-}
-
-
-SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
-
-SMI_ACCESSORS(String, length, kLengthOffset)
-SMI_ACCESSORS(SeqString, symbol_id, kSymbolIdOffset)
-
-
-uint32_t Name::hash_field() {
- return READ_UINT32_FIELD(this, kHashFieldOffset);
-}
-
-
-void Name::set_hash_field(uint32_t value) {
- WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
-#if V8_HOST_ARCH_64_BIT
- WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
-#endif
-}
-
-
-bool String::Equals(String* other) {
- if (other == this) return true;
- if (StringShape(this).IsInternalized() &&
- StringShape(other).IsInternalized()) {
- return false;
- }
- return SlowEquals(other);
-}
-
-
-MaybeObject* String::TryFlatten(PretenureFlag pretenure) {
- if (!StringShape(this).IsCons()) return this;
- ConsString* cons = ConsString::cast(this);
- if (cons->IsFlat()) return cons->first();
- return SlowTryFlatten(pretenure);
-}
-
-
-String* String::TryFlattenGetString(PretenureFlag pretenure) {
- MaybeObject* flat = TryFlatten(pretenure);
- Object* successfully_flattened;
- if (!flat->ToObject(&successfully_flattened)) return this;
- return String::cast(successfully_flattened);
-}
-
-
-uint16_t String::Get(int index) {
- ASSERT(index >= 0 && index < length());
- switch (StringShape(this).full_representation_tag()) {
- case kSeqStringTag | kOneByteStringTag:
- return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
- case kSeqStringTag | kTwoByteStringTag:
- return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
- case kConsStringTag | kOneByteStringTag:
- case kConsStringTag | kTwoByteStringTag:
- return ConsString::cast(this)->ConsStringGet(index);
- case kExternalStringTag | kOneByteStringTag:
- return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
- case kExternalStringTag | kTwoByteStringTag:
- return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
- case kSlicedStringTag | kOneByteStringTag:
- case kSlicedStringTag | kTwoByteStringTag:
- return SlicedString::cast(this)->SlicedStringGet(index);
- default:
- break;
- }
-
- UNREACHABLE();
- return 0;
-}
-
-
-void String::Set(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length());
- ASSERT(StringShape(this).IsSequential());
-
- return this->IsOneByteRepresentation()
- ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
- : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
-}
-
-
-bool String::IsFlat() {
- if (!StringShape(this).IsCons()) return true;
- return ConsString::cast(this)->second()->length() == 0;
-}
-
-
-String* String::GetUnderlying() {
- // Giving direct access to underlying string only makes sense if the
- // wrapping string is already flattened.
- ASSERT(this->IsFlat());
- ASSERT(StringShape(this).IsIndirect());
- STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
- const int kUnderlyingOffset = SlicedString::kParentOffset;
- return String::cast(READ_FIELD(this, kUnderlyingOffset));
-}
-
-
-template<class Visitor, class ConsOp>
-void String::Visit(
- String* string,
- unsigned offset,
- Visitor& visitor,
- ConsOp& cons_op,
- int32_t type,
- unsigned length) {
- ASSERT(length == static_cast<unsigned>(string->length()));
- ASSERT(offset <= length);
- unsigned slice_offset = offset;
- while (true) {
- ASSERT(type == string->map()->instance_type());
-
- switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
- case kSeqStringTag | kOneByteStringTag:
- visitor.VisitOneByteString(
- SeqOneByteString::cast(string)->GetChars() + slice_offset,
- length - offset);
- return;
-
- case kSeqStringTag | kTwoByteStringTag:
- visitor.VisitTwoByteString(
- SeqTwoByteString::cast(string)->GetChars() + slice_offset,
- length - offset);
- return;
-
- case kExternalStringTag | kOneByteStringTag:
- visitor.VisitOneByteString(
- ExternalAsciiString::cast(string)->GetChars() + slice_offset,
- length - offset);
- return;
-
- case kExternalStringTag | kTwoByteStringTag:
- visitor.VisitTwoByteString(
- ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
- length - offset);
- return;
-
- case kSlicedStringTag | kOneByteStringTag:
- case kSlicedStringTag | kTwoByteStringTag: {
- SlicedString* slicedString = SlicedString::cast(string);
- slice_offset += slicedString->offset();
- string = slicedString->parent();
- type = string->map()->instance_type();
- continue;
- }
-
- case kConsStringTag | kOneByteStringTag:
- case kConsStringTag | kTwoByteStringTag:
- string = cons_op.Operate(string, &offset, &type, &length);
- if (string == NULL) return;
- slice_offset = offset;
- ASSERT(length == static_cast<unsigned>(string->length()));
- continue;
-
- default:
- UNREACHABLE();
- return;
- }
- }
-}
-
-
-uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
-}
-
-
-void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
- static_cast<byte>(value));
-}
-
-
-Address SeqOneByteString::GetCharsAddress() {
- return FIELD_ADDR(this, kHeaderSize);
-}
-
-
-uint8_t* SeqOneByteString::GetChars() {
- return reinterpret_cast<uint8_t*>(GetCharsAddress());
-}
-
-
-Address SeqTwoByteString::GetCharsAddress() {
- return FIELD_ADDR(this, kHeaderSize);
-}
-
-
-uc16* SeqTwoByteString::GetChars() {
- return reinterpret_cast<uc16*>(FIELD_ADDR(this, kHeaderSize));
-}
-
-
-uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize);
-}
-
-
-void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length());
- WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value);
-}
-
-
-int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
- return SizeFor(length());
-}
-
-
-int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
- return SizeFor(length());
-}
-
-
-String* SlicedString::parent() {
- return String::cast(READ_FIELD(this, kParentOffset));
-}
-
-
-void SlicedString::set_parent(String* parent, WriteBarrierMode mode) {
- ASSERT(parent->IsSeqString() || parent->IsExternalString());
- WRITE_FIELD(this, kParentOffset, parent);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode);
-}
-
-
-SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
-
-
-String* ConsString::first() {
- return String::cast(READ_FIELD(this, kFirstOffset));
-}
-
-
-Object* ConsString::unchecked_first() {
- return READ_FIELD(this, kFirstOffset);
-}
-
-
-void ConsString::set_first(String* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
-}
-
-
-String* ConsString::second() {
- return String::cast(READ_FIELD(this, kSecondOffset));
-}
-
-
-Object* ConsString::unchecked_second() {
- return READ_FIELD(this, kSecondOffset);
-}
-
-
-void ConsString::set_second(String* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
-}
-
-
-bool ExternalString::is_short() {
- InstanceType type = map()->instance_type();
- return (type & kShortExternalStringMask) == kShortExternalStringTag;
-}
-
-
-const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
- return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
-}
-
-
-void ExternalAsciiString::update_data_cache() {
- if (is_short()) return;
- const char** data_field =
- reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
- *data_field = resource()->data();
-}
-
-
-void ExternalAsciiString::set_resource(
- const ExternalAsciiString::Resource* resource) {
- *reinterpret_cast<const Resource**>(
- FIELD_ADDR(this, kResourceOffset)) = resource;
- if (resource != NULL) update_data_cache();
-}
-
-
-const uint8_t* ExternalAsciiString::GetChars() {
- return reinterpret_cast<const uint8_t*>(resource()->data());
-}
-
-
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return GetChars()[index];
-}
-
-
-const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
- return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
-}
-
-
-void ExternalTwoByteString::update_data_cache() {
- if (is_short()) return;
- const uint16_t** data_field =
- reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
- *data_field = resource()->data();
-}
-
-
-void ExternalTwoByteString::set_resource(
- const ExternalTwoByteString::Resource* resource) {
- *reinterpret_cast<const Resource**>(
- FIELD_ADDR(this, kResourceOffset)) = resource;
- if (resource != NULL) update_data_cache();
-}
-
-
-const uint16_t* ExternalTwoByteString::GetChars() {
- return resource()->data();
-}
-
-
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return GetChars()[index];
-}
-
-
-const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
- unsigned start) {
- return GetChars() + start;
-}
-
-
-String* ConsStringNullOp::Operate(String*, unsigned*, int32_t*, unsigned*) {
- return NULL;
-}
-
-
-unsigned ConsStringIteratorOp::OffsetForDepth(unsigned depth) {
- return depth & kDepthMask;
-}
-
-
-void ConsStringIteratorOp::PushLeft(ConsString* string) {
- frames_[depth_++ & kDepthMask] = string;
-}
-
-
-void ConsStringIteratorOp::PushRight(ConsString* string) {
- // Inplace update.
- frames_[(depth_-1) & kDepthMask] = string;
-}
-
-
-void ConsStringIteratorOp::AdjustMaximumDepth() {
- if (depth_ > maximum_depth_) maximum_depth_ = depth_;
-}
-
-
-void ConsStringIteratorOp::Pop() {
- ASSERT(depth_ > 0);
- ASSERT(depth_ <= maximum_depth_);
- depth_--;
-}
-
-
-bool ConsStringIteratorOp::HasMore() {
- return depth_ != 0;
-}
-
-
-void ConsStringIteratorOp::Reset() {
- depth_ = 0;
-}
-
-
-String* ConsStringIteratorOp::ContinueOperation(int32_t* type_out,
- unsigned* length_out) {
- bool blew_stack = false;
- String* string = NextLeaf(&blew_stack, type_out, length_out);
- // String found.
- if (string != NULL) {
- // Verify output.
- ASSERT(*length_out == static_cast<unsigned>(string->length()));
- ASSERT(*type_out == string->map()->instance_type());
- return string;
- }
- // Traversal complete.
- if (!blew_stack) return NULL;
- // Restart search from root.
- unsigned offset_out;
- string = Search(&offset_out, type_out, length_out);
- // Verify output.
- ASSERT(string == NULL || offset_out == 0);
- ASSERT(string == NULL ||
- *length_out == static_cast<unsigned>(string->length()));
- ASSERT(string == NULL || *type_out == string->map()->instance_type());
- return string;
-}
-
-
-uint16_t StringCharacterStream::GetNext() {
- ASSERT(buffer8_ != NULL && end_ != NULL);
- // Advance cursor if needed.
- // TODO(dcarney): Ensure uses of the api call HasMore first and avoid this.
- if (buffer8_ == end_) HasMore();
- ASSERT(buffer8_ < end_);
- return is_one_byte_ ? *buffer8_++ : *buffer16_++;
-}
-
-
-StringCharacterStream::StringCharacterStream(String* string,
- ConsStringIteratorOp* op,
- unsigned offset)
- : is_one_byte_(false),
- op_(op) {
- Reset(string, offset);
-}
-
-
-void StringCharacterStream::Reset(String* string, unsigned offset) {
- op_->Reset();
- buffer8_ = NULL;
- end_ = NULL;
- int32_t type = string->map()->instance_type();
- unsigned length = string->length();
- String::Visit(string, offset, *this, *op_, type, length);
-}
-
-
-bool StringCharacterStream::HasMore() {
- if (buffer8_ != end_) return true;
- if (!op_->HasMore()) return false;
- unsigned length;
- int32_t type;
- String* string = op_->ContinueOperation(&type, &length);
- if (string == NULL) return false;
- ASSERT(!string->IsConsString());
- ASSERT(string->length() != 0);
- ConsStringNullOp null_op;
- String::Visit(string, 0, *this, null_op, type, length);
- ASSERT(buffer8_ != end_);
- return true;
-}
-
-
-void StringCharacterStream::VisitOneByteString(
- const uint8_t* chars, unsigned length) {
- is_one_byte_ = true;
- buffer8_ = chars;
- end_ = chars + length;
-}
-
-
-void StringCharacterStream::VisitTwoByteString(
- const uint16_t* chars, unsigned length) {
- is_one_byte_ = false;
- buffer16_ = chars;
- end_ = reinterpret_cast<const uint8_t*>(chars + length);
-}
-
-
-void JSFunctionResultCache::MakeZeroSize() {
- set_finger_index(kEntriesIndex);
- set_size(kEntriesIndex);
-}
-
-
-void JSFunctionResultCache::Clear() {
- int cache_size = size();
- Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
- MemsetPointer(entries_start,
- GetHeap()->the_hole_value(),
- cache_size - kEntriesIndex);
- MakeZeroSize();
-}
-
-
-int JSFunctionResultCache::size() {
- return Smi::cast(get(kCacheSizeIndex))->value();
-}
-
-
-void JSFunctionResultCache::set_size(int size) {
- set(kCacheSizeIndex, Smi::FromInt(size));
-}
-
-
-int JSFunctionResultCache::finger_index() {
- return Smi::cast(get(kFingerIndex))->value();
-}
-
-
-void JSFunctionResultCache::set_finger_index(int finger_index) {
- set(kFingerIndex, Smi::FromInt(finger_index));
-}
-
-
-byte ByteArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
-}
-
-
-void ByteArray::set(int index, byte value) {
- ASSERT(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
-}
-
-
-int ByteArray::get_int(int index) {
- ASSERT(index >= 0 && (index * kIntSize) < this->length());
- return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
-}
-
-
-ByteArray* ByteArray::FromDataStartAddress(Address address) {
- ASSERT_TAG_ALIGNED(address);
- return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
-}
-
-
-Address ByteArray::GetDataStartAddress() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
-}
-
-
-uint8_t* ExternalPixelArray::external_pixel_pointer() {
- return reinterpret_cast<uint8_t*>(external_pointer());
-}
-
-
-uint8_t ExternalPixelArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pixel_pointer();
- return ptr[index];
-}
-
-
-MaybeObject* ExternalPixelArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
-}
-
-
-void ExternalPixelArray::set(int index, uint8_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pixel_pointer();
- ptr[index] = value;
-}
-
-
-void* ExternalArray::external_pointer() {
- intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-
-void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
-}
-
-
-int8_t ExternalByteArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- int8_t* ptr = static_cast<int8_t*>(external_pointer());
- return ptr[index];
-}
-
-
-MaybeObject* ExternalByteArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
-}
-
-
-void ExternalByteArray::set(int index, int8_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- int8_t* ptr = static_cast<int8_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-uint8_t ExternalUnsignedByteArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
- return ptr[index];
-}
-
-
-MaybeObject* ExternalUnsignedByteArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
-}
-
-
-void ExternalUnsignedByteArray::set(int index, uint8_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-int16_t ExternalShortArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- int16_t* ptr = static_cast<int16_t*>(external_pointer());
- return ptr[index];
-}
-
-
-MaybeObject* ExternalShortArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
-}
-
-
-void ExternalShortArray::set(int index, int16_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- int16_t* ptr = static_cast<int16_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-uint16_t ExternalUnsignedShortArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
- return ptr[index];
-}
-
-
-MaybeObject* ExternalUnsignedShortArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
-}
-
-
-void ExternalUnsignedShortArray::set(int index, uint16_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-int32_t ExternalIntArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- int32_t* ptr = static_cast<int32_t*>(external_pointer());
- return ptr[index];
-}
-
-
-MaybeObject* ExternalIntArray::get(int index) {
- return GetHeap()->NumberFromInt32(get_scalar(index));
-}
-
-
-void ExternalIntArray::set(int index, int32_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- int32_t* ptr = static_cast<int32_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-uint32_t ExternalUnsignedIntArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
- return ptr[index];
-}
-
-
-MaybeObject* ExternalUnsignedIntArray::get(int index) {
- return GetHeap()->NumberFromUint32(get_scalar(index));
-}
-
-
-void ExternalUnsignedIntArray::set(int index, uint32_t value) {
- ASSERT((index >= 0) && (index < this->length()));
- uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-float ExternalFloatArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- return ptr[index];
-}
-
-
-MaybeObject* ExternalFloatArray::get(int index) {
- return GetHeap()->NumberFromDouble(get_scalar(index));
-}
-
-
-void ExternalFloatArray::set(int index, float value) {
- ASSERT((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- ptr[index] = value;
-}
-
-
-double ExternalDoubleArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- double* ptr = static_cast<double*>(external_pointer());
- return ptr[index];
-}
-
-
-MaybeObject* ExternalDoubleArray::get(int index) {
- return GetHeap()->NumberFromDouble(get_scalar(index));
-}
-
-
-void ExternalDoubleArray::set(int index, double value) {
- ASSERT((index >= 0) && (index < this->length()));
- double* ptr = static_cast<double*>(external_pointer());
- ptr[index] = value;
-}
-
-
-int Map::visitor_id() {
- return READ_BYTE_FIELD(this, kVisitorIdOffset);
-}
-
-
-void Map::set_visitor_id(int id) {
- ASSERT(0 <= id && id < 256);
- WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
-}
-
-
-int Map::instance_size() {
- return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
-}
-
-
-int Map::inobject_properties() {
- return READ_BYTE_FIELD(this, kInObjectPropertiesOffset);
-}
-
-
-int Map::pre_allocated_property_fields() {
- return READ_BYTE_FIELD(this, kPreAllocatedPropertyFieldsOffset);
-}
-
-
-int HeapObject::SizeFromMap(Map* map) {
- int instance_size = map->instance_size();
- if (instance_size != kVariableSizeSentinel) return instance_size;
- // We can ignore the "internalized" bit becase it is only set for strings
- // and thus implies a string type.
- int instance_type =
- static_cast<int>(map->instance_type()) & ~kIsInternalizedMask;
- // Only inline the most frequent cases.
- if (instance_type == FIXED_ARRAY_TYPE) {
- return FixedArray::BodyDescriptor::SizeOf(map, this);
- }
- if (instance_type == ASCII_STRING_TYPE) {
- return SeqOneByteString::SizeFor(
- reinterpret_cast<SeqOneByteString*>(this)->length());
- }
- if (instance_type == BYTE_ARRAY_TYPE) {
- return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
- }
- if (instance_type == FREE_SPACE_TYPE) {
- return reinterpret_cast<FreeSpace*>(this)->size();
- }
- if (instance_type == STRING_TYPE) {
- return SeqTwoByteString::SizeFor(
- reinterpret_cast<SeqTwoByteString*>(this)->length());
- }
- if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
- return FixedDoubleArray::SizeFor(
- reinterpret_cast<FixedDoubleArray*>(this)->length());
- }
- ASSERT(instance_type == CODE_TYPE);
- return reinterpret_cast<Code*>(this)->CodeSize();
-}
-
-
-void Map::set_instance_size(int value) {
- ASSERT_EQ(0, value & (kPointerSize - 1));
- value >>= kPointerSizeLog2;
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
-}
-
-
-void Map::set_inobject_properties(int value) {
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kInObjectPropertiesOffset, static_cast<byte>(value));
-}
-
-
-void Map::set_pre_allocated_property_fields(int value) {
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this,
- kPreAllocatedPropertyFieldsOffset,
- static_cast<byte>(value));
-}
-
-
-InstanceType Map::instance_type() {
- return static_cast<InstanceType>(READ_BYTE_FIELD(this, kInstanceTypeOffset));
-}
-
-
-void Map::set_instance_type(InstanceType value) {
- WRITE_BYTE_FIELD(this, kInstanceTypeOffset, value);
-}
-
-
-int Map::unused_property_fields() {
- return READ_BYTE_FIELD(this, kUnusedPropertyFieldsOffset);
-}
-
-
-void Map::set_unused_property_fields(int value) {
- WRITE_BYTE_FIELD(this, kUnusedPropertyFieldsOffset, Min(value, 255));
-}
-
-
-byte Map::bit_field() {
- return READ_BYTE_FIELD(this, kBitFieldOffset);
-}
-
-
-void Map::set_bit_field(byte value) {
- WRITE_BYTE_FIELD(this, kBitFieldOffset, value);
-}
-
-
-byte Map::bit_field2() {
- return READ_BYTE_FIELD(this, kBitField2Offset);
-}
-
-
-void Map::set_bit_field2(byte value) {
- WRITE_BYTE_FIELD(this, kBitField2Offset, value);
-}
-
-
-void Map::set_non_instance_prototype(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kHasNonInstancePrototype));
- } else {
- set_bit_field(bit_field() & ~(1 << kHasNonInstancePrototype));
- }
-}
-
-
-bool Map::has_non_instance_prototype() {
- return ((1 << kHasNonInstancePrototype) & bit_field()) != 0;
-}
-
-
-void Map::set_function_with_prototype(bool value) {
- set_bit_field3(FunctionWithPrototype::update(bit_field3(), value));
-}
-
-
-bool Map::function_with_prototype() {
- return FunctionWithPrototype::decode(bit_field3());
-}
-
-
-void Map::set_is_access_check_needed(bool access_check_needed) {
- if (access_check_needed) {
- set_bit_field(bit_field() | (1 << kIsAccessCheckNeeded));
- } else {
- set_bit_field(bit_field() & ~(1 << kIsAccessCheckNeeded));
- }
-}
-
-
-bool Map::is_access_check_needed() {
- return ((1 << kIsAccessCheckNeeded) & bit_field()) != 0;
-}
-
-
-void Map::set_is_extensible(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kIsExtensible));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kIsExtensible));
- }
-}
-
-bool Map::is_extensible() {
- return ((1 << kIsExtensible) & bit_field2()) != 0;
-}
-
-
-void Map::set_attached_to_shared_function_info(bool value) {
- set_bit_field3(AttachedToSharedFunctionInfo::update(bit_field3(), value));
-}
-
-bool Map::attached_to_shared_function_info() {
- return AttachedToSharedFunctionInfo::decode(bit_field3());
-}
-
-
-void Map::set_is_shared(bool value) {
- set_bit_field3(IsShared::update(bit_field3(), value));
-}
-
-
-bool Map::is_shared() {
- return IsShared::decode(bit_field3());
-}
-
-void Map::set_has_external_resource(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kHasExternalResource));
- } else {
- set_bit_field(bit_field() & ~(1 << kHasExternalResource));
- }
-}
-
-bool Map::has_external_resource() {
- return ((1 << kHasExternalResource) & bit_field()) != 0;
-}
-
-
-void Map::set_use_user_object_comparison(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kUseUserObjectComparison));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kUseUserObjectComparison));
- }
-}
-
-
-bool Map::use_user_object_comparison() {
- return ((1 << kUseUserObjectComparison) & bit_field2()) != 0;
-}
-
-
-void Map::set_named_interceptor_is_fallback(bool value) {
- set_bit_field3(NamedInterceptorIsFallback::update(bit_field3(), value));
-}
-
-bool Map::named_interceptor_is_fallback() {
- return NamedInterceptorIsFallback::decode(bit_field3());
-}
-
-
-void Map::set_dictionary_map(bool value) {
- set_bit_field3(DictionaryMap::update(bit_field3(), value));
-}
-
-
-bool Map::is_dictionary_map() {
- return DictionaryMap::decode(bit_field3());
-}
-
-
-JSFunction* Map::unchecked_constructor() {
- return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
-}
-
-
-Code::Flags Code::flags() {
- return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
-}
-
-
-inline bool Map::CanTrackAllocationSite() {
- return instance_type() == JS_ARRAY_TYPE;
-}
-
-
-void Map::set_owns_descriptors(bool is_shared) {
- set_bit_field3(OwnsDescriptors::update(bit_field3(), is_shared));
-}
-
-
-bool Map::owns_descriptors() {
- return OwnsDescriptors::decode(bit_field3());
-}
-
-
-void Map::set_is_observed(bool is_observed) {
- ASSERT(instance_type() < FIRST_JS_OBJECT_TYPE ||
- instance_type() > LAST_JS_OBJECT_TYPE ||
- has_slow_elements_kind() || has_external_array_elements());
- set_bit_field3(IsObserved::update(bit_field3(), is_observed));
-}
-
-
-bool Map::is_observed() {
- return IsObserved::decode(bit_field3());
-}
-
-
-void Map::NotifyLeafMapLayoutChange() {
- dependent_code()->DeoptimizeDependentCodeGroup(
- DependentCode::kPrototypeCheckGroup);
-}
-
-
-bool Map::CanOmitPrototypeChecks() {
- return !HasTransitionArray() && !is_dictionary_map() &&
- FLAG_omit_prototype_checks_for_leaf_maps;
-}
-
-
-void Map::AddDependentCode(DependentCode::DependencyGroup group,
- Handle<Code> code) {
- Handle<DependentCode> codes =
- DependentCode::Insert(Handle<DependentCode>(dependent_code()),
- group, code);
- if (*codes != dependent_code()) {
- set_dependent_code(*codes);
- }
-}
-
-
-int DependentCode::number_of_entries(DependencyGroup group) {
- if (length() == 0) return 0;
- return Smi::cast(get(group))->value();
-}
-
-
-void DependentCode::set_number_of_entries(DependencyGroup group, int value) {
- set(group, Smi::FromInt(value));
-}
-
-
-Code* DependentCode::code_at(int i) {
- return Code::cast(get(kCodesStartIndex + i));
-}
-
-
-void DependentCode::set_code_at(int i, Code* value) {
- set(kCodesStartIndex + i, value);
-}
-
-
-Object** DependentCode::code_slot_at(int i) {
- return HeapObject::RawField(
- this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
-}
-
-
-void DependentCode::clear_code_at(int i) {
- set_undefined(kCodesStartIndex + i);
-}
-
-
-void DependentCode::ExtendGroup(DependencyGroup group) {
- GroupStartIndexes starts(this);
- for (int g = kGroupCount - 1; g > group; g--) {
- if (starts.at(g) < starts.at(g + 1)) {
- set_code_at(starts.at(g + 1), code_at(starts.at(g)));
- }
- }
-}
-
-
-void Code::set_flags(Code::Flags flags) {
- STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
- // Make sure that all call stubs have an arguments count.
- ASSERT((ExtractKindFromFlags(flags) != CALL_IC &&
- ExtractKindFromFlags(flags) != KEYED_CALL_IC) ||
- ExtractArgumentsCountFromFlags(flags) >= 0);
- WRITE_INT_FIELD(this, kFlagsOffset, flags);
-}
-
-
-Code::Kind Code::kind() {
- return ExtractKindFromFlags(flags());
-}
-
-
-InlineCacheState Code::ic_state() {
- InlineCacheState result = ExtractICStateFromFlags(flags());
- // Only allow uninitialized or debugger states for non-IC code
- // objects. This is used in the debugger to determine whether or not
- // a call to code object has been replaced with a debug break call.
- ASSERT(is_inline_cache_stub() ||
- result == UNINITIALIZED ||
- result == DEBUG_STUB);
- return result;
-}
-
-
-Code::ExtraICState Code::extra_ic_state() {
- ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
- return ExtractExtraICStateFromFlags(flags());
-}
-
-
-Code::StubType Code::type() {
- return ExtractTypeFromFlags(flags());
-}
-
-
-int Code::arguments_count() {
- ASSERT(is_call_stub() || is_keyed_call_stub() || kind() == STUB);
- return ExtractArgumentsCountFromFlags(flags());
-}
-
-
-int Code::major_key() {
- ASSERT(kind() == STUB ||
- kind() == COMPILED_STUB ||
- kind() == UNARY_OP_IC ||
- kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC ||
- kind() == LOAD_IC ||
- kind() == KEYED_LOAD_IC ||
- kind() == TO_BOOLEAN_IC);
- return StubMajorKeyField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
-}
-
-
-void Code::set_major_key(int major) {
- ASSERT(kind() == STUB ||
- kind() == COMPILED_STUB ||
- kind() == UNARY_OP_IC ||
- kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC ||
- kind() == LOAD_IC ||
- kind() == KEYED_LOAD_IC ||
- kind() == STORE_IC ||
- kind() == KEYED_STORE_IC ||
- kind() == TO_BOOLEAN_IC);
- ASSERT(0 <= major && major < 256);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = StubMajorKeyField::update(previous, major);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
-}
-
-
-bool Code::is_pregenerated() {
- return kind() == STUB && IsPregeneratedField::decode(flags());
-}
-
-
-void Code::set_is_pregenerated(bool value) {
- ASSERT(kind() == STUB);
- Flags f = flags();
- f = static_cast<Flags>(IsPregeneratedField::update(f, value));
- set_flags(f);
-}
-
-
-bool Code::optimizable() {
- ASSERT_EQ(FUNCTION, kind());
- return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
-}
-
-
-void Code::set_optimizable(bool value) {
- ASSERT_EQ(FUNCTION, kind());
- WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
-}
-
-
-bool Code::has_deoptimization_support() {
- ASSERT_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
- return FullCodeFlagsHasDeoptimizationSupportField::decode(flags);
-}
-
-
-void Code::set_has_deoptimization_support(bool value) {
- ASSERT_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
- flags = FullCodeFlagsHasDeoptimizationSupportField::update(flags, value);
- WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
-}
-
-
-bool Code::has_debug_break_slots() {
- ASSERT_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
- return FullCodeFlagsHasDebugBreakSlotsField::decode(flags);
-}
-
-
-void Code::set_has_debug_break_slots(bool value) {
- ASSERT_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
- flags = FullCodeFlagsHasDebugBreakSlotsField::update(flags, value);
- WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
-}
-
-
-bool Code::is_compiled_optimizable() {
- ASSERT_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
- return FullCodeFlagsIsCompiledOptimizable::decode(flags);
-}
-
-
-void Code::set_compiled_optimizable(bool value) {
- ASSERT_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
- flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value);
- WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
-}
-
-
-int Code::allow_osr_at_loop_nesting_level() {
- ASSERT_EQ(FUNCTION, kind());
- return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
-}
-
-
-void Code::set_allow_osr_at_loop_nesting_level(int level) {
- ASSERT_EQ(FUNCTION, kind());
- ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
- WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
-}
-
-
-int Code::profiler_ticks() {
- ASSERT_EQ(FUNCTION, kind());
- return READ_BYTE_FIELD(this, kProfilerTicksOffset);
-}
-
-
-void Code::set_profiler_ticks(int ticks) {
- ASSERT_EQ(FUNCTION, kind());
- ASSERT(ticks < 256);
- WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
-}
-
-
-unsigned Code::stack_slots() {
- ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
- return StackSlotsField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_stack_slots(unsigned slots) {
- CHECK(slots <= (1 << kStackSlotsBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = StackSlotsField::update(previous, slots);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-unsigned Code::safepoint_table_offset() {
- ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
- return SafepointTableOffsetField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
-}
-
-
-void Code::set_safepoint_table_offset(unsigned offset) {
- CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
- ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = SafepointTableOffsetField::update(previous, offset);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
-}
-
-
-unsigned Code::stack_check_table_offset() {
- ASSERT_EQ(FUNCTION, kind());
- return StackCheckTableOffsetField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
-}
-
-
-void Code::set_stack_check_table_offset(unsigned offset) {
- ASSERT_EQ(FUNCTION, kind());
- ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = StackCheckTableOffsetField::update(previous, offset);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
-}
-
-
-CheckType Code::check_type() {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- byte type = READ_BYTE_FIELD(this, kCheckTypeOffset);
- return static_cast<CheckType>(type);
-}
-
-
-void Code::set_check_type(CheckType value) {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- WRITE_BYTE_FIELD(this, kCheckTypeOffset, value);
-}
-
-
-byte Code::unary_op_type() {
- ASSERT(is_unary_op_stub());
- return UnaryOpTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_unary_op_type(byte value) {
- ASSERT(is_unary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = UnaryOpTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::to_boolean_state() {
- ASSERT(is_to_boolean_ic_stub());
- return ToBooleanStateField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_to_boolean_state(byte value) {
- ASSERT(is_to_boolean_ic_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = ToBooleanStateField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-bool Code::has_function_cache() {
- ASSERT(kind() == STUB);
- return HasFunctionCacheField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_has_function_cache(bool flag) {
- ASSERT(kind() == STUB);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = HasFunctionCacheField::update(previous, flag);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-bool Code::marked_for_deoptimization() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- return MarkedForDeoptimizationField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_marked_for_deoptimization(bool flag) {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = MarkedForDeoptimizationField::update(previous, flag);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-bool Code::is_inline_cache_stub() {
- Kind kind = this->kind();
- return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
-}
-
-
-bool Code::is_debug_break() {
- return ic_state() == DEBUG_STUB && extra_ic_state() == DEBUG_BREAK;
-}
-
-
-Code::Flags Code::ComputeFlags(Kind kind,
- InlineCacheState ic_state,
- ExtraICState extra_ic_state,
- StubType type,
- int argc,
- InlineCacheHolderFlag holder) {
- // Compute the bit mask.
- int bits = KindField::encode(kind)
- | ICStateField::encode(ic_state)
- | TypeField::encode(type)
- | ExtraICStateField::encode(extra_ic_state)
- | (argc << kArgumentsCountShift)
- | CacheHolderField::encode(holder);
- return static_cast<Flags>(bits);
-}
-
-
-Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
- ExtraICState extra_ic_state,
- StubType type,
- int argc,
- InlineCacheHolderFlag holder) {
- return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
-}
-
-
-Code::Kind Code::ExtractKindFromFlags(Flags flags) {
- return KindField::decode(flags);
-}
-
-
-InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
- return ICStateField::decode(flags);
-}
-
-
-Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
- return ExtraICStateField::decode(flags);
-}
-
-
-Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
- return TypeField::decode(flags);
-}
-
-
-int Code::ExtractArgumentsCountFromFlags(Flags flags) {
- return (flags & kArgumentsCountMask) >> kArgumentsCountShift;
-}
-
-
-InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
- return CacheHolderField::decode(flags);
-}
-
-
-Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
- int bits = flags & ~TypeField::kMask;
- return static_cast<Flags>(bits);
-}
-
-
-Code* Code::GetCodeFromTargetAddress(Address address) {
- HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
- // GetCodeFromTargetAddress might be called when marking objects during mark
- // sweep. reinterpret_cast is therefore used instead of the more appropriate
- // Code::cast. Code::cast does not work when the object's map is
- // marked.
- Code* result = reinterpret_cast<Code*>(code);
- return result;
-}
-
-
-Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
- return HeapObject::
- FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
-}
-
-
-Object* Map::prototype() {
- return READ_FIELD(this, kPrototypeOffset);
-}
-
-
-void Map::set_prototype(Object* value, WriteBarrierMode mode) {
- ASSERT(value->IsNull() || value->IsJSReceiver());
- WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
-}
-
-
-// If the descriptor is using the empty transition array, install a new empty
-// transition array that will have place for an element transition.
-static MaybeObject* EnsureHasTransitionArray(Map* map) {
- TransitionArray* transitions;
- MaybeObject* maybe_transitions;
- if (!map->HasTransitionArray()) {
- maybe_transitions = TransitionArray::Allocate(0);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
- transitions->set_back_pointer_storage(map->GetBackPointer());
- } else if (!map->transitions()->IsFullTransitionArray()) {
- maybe_transitions = map->transitions()->ExtendToFullTransitionArray();
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
- } else {
- return map;
- }
- map->set_transitions(transitions);
- return transitions;
-}
-
-
-void Map::InitializeDescriptors(DescriptorArray* descriptors) {
- int len = descriptors->number_of_descriptors();
-#ifdef DEBUG
- ASSERT(len <= DescriptorArray::kMaxNumberOfDescriptors);
-
- bool used_indices[DescriptorArray::kMaxNumberOfDescriptors];
- for (int i = 0; i < len; ++i) used_indices[i] = false;
-
- // Ensure that all enumeration indexes between 1 and length occur uniquely in
- // the descriptor array.
- for (int i = 0; i < len; ++i) {
- int enum_index = descriptors->GetDetails(i).descriptor_index() -
- PropertyDetails::kInitialIndex;
- ASSERT(0 <= enum_index && enum_index < len);
- ASSERT(!used_indices[enum_index]);
- used_indices[enum_index] = true;
- }
-#endif
-
- set_instance_descriptors(descriptors);
- SetNumberOfOwnDescriptors(len);
-}
-
-
-ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
-SMI_ACCESSORS(Map, bit_field3, kBitField3Offset)
-
-
-void Map::ClearTransitions(Heap* heap, WriteBarrierMode mode) {
- Object* back_pointer = GetBackPointer();
-
- if (Heap::ShouldZapGarbage() && HasTransitionArray()) {
- ZapTransitions();
- }
-
- WRITE_FIELD(this, kTransitionsOrBackPointerOffset, back_pointer);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kTransitionsOrBackPointerOffset, back_pointer, mode);
-}
-
-
-void Map::AppendDescriptor(Descriptor* desc,
- const DescriptorArray::WhitenessWitness& witness) {
- DescriptorArray* descriptors = instance_descriptors();
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
- descriptors->Append(desc, witness);
- SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
-}
-
-
-Object* Map::GetBackPointer() {
- Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
- if (object->IsDescriptorArray()) {
- return TransitionArray::cast(object)->back_pointer_storage();
- } else {
- ASSERT(object->IsMap() || object->IsUndefined());
- return object;
- }
-}
-
-
-bool Map::HasElementsTransition() {
- return HasTransitionArray() && transitions()->HasElementsTransition();
-}
-
-
-bool Map::HasTransitionArray() {
- Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
- return object->IsTransitionArray();
-}
-
-
-Map* Map::elements_transition_map() {
- return transitions()->elements_transition();
-}
-
-
-bool Map::CanHaveMoreTransitions() {
- if (!HasTransitionArray()) return true;
- return FixedArray::SizeFor(transitions()->length() +
- TransitionArray::kTransitionSize)
- <= Page::kMaxNonCodeHeapObjectSize;
-}
-
-
-MaybeObject* Map::AddTransition(String* key,
- Map* target,
- SimpleTransitionFlag flag) {
- if (HasTransitionArray()) return transitions()->CopyInsert(key, target);
- return TransitionArray::NewWith(flag, key, target, GetBackPointer());
-}
-
-
-void Map::SetTransition(int transition_index, Map* target) {
- transitions()->SetTarget(transition_index, target);
-}
-
-
-Map* Map::GetTransition(int transition_index) {
- return transitions()->GetTarget(transition_index);
-}
-
-
-MaybeObject* Map::set_elements_transition_map(Map* transitioned_map) {
- MaybeObject* allow_elements = EnsureHasTransitionArray(this);
- if (allow_elements->IsFailure()) return allow_elements;
- transitions()->set_elements_transition(transitioned_map);
- return this;
-}
-
-
-FixedArray* Map::GetPrototypeTransitions() {
- if (!HasTransitionArray()) return GetHeap()->empty_fixed_array();
- if (!transitions()->HasPrototypeTransitions()) {
- return GetHeap()->empty_fixed_array();
- }
- return transitions()->GetPrototypeTransitions();
-}
-
-
-MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
- MaybeObject* allow_prototype = EnsureHasTransitionArray(this);
- if (allow_prototype->IsFailure()) return allow_prototype;
-#ifdef DEBUG
- if (HasPrototypeTransitions()) {
- ASSERT(GetPrototypeTransitions() != proto_transitions);
- ZapPrototypeTransitions();
- }
-#endif
- transitions()->SetPrototypeTransitions(proto_transitions);
- return this;
-}
-
-
-bool Map::HasPrototypeTransitions() {
- return HasTransitionArray() && transitions()->HasPrototypeTransitions();
-}
-
-
-TransitionArray* Map::transitions() {
- ASSERT(HasTransitionArray());
- Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
- return TransitionArray::cast(object);
-}
-
-
-void Map::set_transitions(TransitionArray* transition_array,
- WriteBarrierMode mode) {
- // In release mode, only run this code if verify_heap is on.
- if (Heap::ShouldZapGarbage() && HasTransitionArray()) {
- CHECK(transitions() != transition_array);
- ZapTransitions();
- }
-
- WRITE_FIELD(this, kTransitionsOrBackPointerOffset, transition_array);
- CONDITIONAL_WRITE_BARRIER(
- GetHeap(), this, kTransitionsOrBackPointerOffset, transition_array, mode);
-}
-
-
-void Map::init_back_pointer(Object* undefined) {
- ASSERT(undefined->IsUndefined());
- WRITE_FIELD(this, kTransitionsOrBackPointerOffset, undefined);
-}
-
-
-void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
- ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
- ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
- (value->IsMap() && GetBackPointer()->IsUndefined()));
- Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
- if (object->IsTransitionArray()) {
- TransitionArray::cast(object)->set_back_pointer_storage(value);
- } else {
- WRITE_FIELD(this, kTransitionsOrBackPointerOffset, value);
- CONDITIONAL_WRITE_BARRIER(
- GetHeap(), this, kTransitionsOrBackPointerOffset, value, mode);
- }
-}
-
-
-// Can either be Smi (no transitions), normal transition array, or a transition
-// array with the header overwritten as a Smi (thus iterating).
-TransitionArray* Map::unchecked_transition_array() {
- Object* object = *HeapObject::RawField(this,
- Map::kTransitionsOrBackPointerOffset);
- TransitionArray* transition_array = static_cast<TransitionArray*>(object);
- return transition_array;
-}
-
-
-HeapObject* Map::UncheckedPrototypeTransitions() {
- ASSERT(HasTransitionArray());
- ASSERT(unchecked_transition_array()->HasPrototypeTransitions());
- return unchecked_transition_array()->UncheckedPrototypeTransitions();
-}
-
-
-ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
-ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
-ACCESSORS(Map, constructor, Object, kConstructorOffset)
-
-ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
-ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
-
-ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
-ACCESSORS(GlobalObject, native_context, Context, kNativeContextOffset)
-ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
-ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
-
-ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
-
-ACCESSORS(AccessorInfo, name, Object, kNameOffset)
-ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
-ACCESSORS(AccessorInfo, expected_receiver_type, Object,
- kExpectedReceiverTypeOffset)
-
-ACCESSORS(DeclaredAccessorDescriptor, internal_field, Smi, kInternalFieldOffset)
-
-ACCESSORS(DeclaredAccessorInfo, descriptor, DeclaredAccessorDescriptor,
- kDescriptorOffset)
-
-ACCESSORS(ExecutableAccessorInfo, getter, Object, kGetterOffset)
-ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset)
-ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
-
-ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
-ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
-
-ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
-ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
-ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
-
-ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
-ACCESSORS(InterceptorInfo, setter, Object, kSetterOffset)
-ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
-ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
-ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
-ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
-ACCESSORS(InterceptorInfo, is_fallback, Smi, kFallbackOffset)
-
-ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
-ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
-
-ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
-ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
-
-ACCESSORS(FunctionTemplateInfo, serial_number, Object, kSerialNumberOffset)
-ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
-ACCESSORS(FunctionTemplateInfo, property_accessors, Object,
- kPropertyAccessorsOffset)
-ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
- kPrototypeTemplateOffset)
-ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
-ACCESSORS(FunctionTemplateInfo, named_property_handler, Object,
- kNamedPropertyHandlerOffset)
-ACCESSORS(FunctionTemplateInfo, indexed_property_handler, Object,
- kIndexedPropertyHandlerOffset)
-ACCESSORS(FunctionTemplateInfo, instance_template, Object,
- kInstanceTemplateOffset)
-ACCESSORS(FunctionTemplateInfo, class_name, Object, kClassNameOffset)
-ACCESSORS(FunctionTemplateInfo, signature, Object, kSignatureOffset)
-ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
- kInstanceCallHandlerOffset)
-ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
- kAccessCheckInfoOffset)
-ACCESSORS_TO_SMI(FunctionTemplateInfo, flag, kFlagOffset)
-
-ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
-ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
- kInternalFieldCountOffset)
-ACCESSORS(ObjectTemplateInfo, has_external_resource, Object,
- kHasExternalResourceOffset)
-ACCESSORS(ObjectTemplateInfo, use_user_object_comparison, Object,
- kUseUserObjectComparisonOffset)
-
-ACCESSORS(SignatureInfo, receiver, Object, kReceiverOffset)
-ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
-
-ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
-
-ACCESSORS(AllocationSiteInfo, payload, Object, kPayloadOffset)
-
-ACCESSORS(Script, source, Object, kSourceOffset)
-ACCESSORS(Script, name, Object, kNameOffset)
-ACCESSORS(Script, id, Object, kIdOffset)
-ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
-ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
-ACCESSORS(Script, data, Object, kDataOffset)
-ACCESSORS(Script, context_data, Object, kContextOffset)
-ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
-ACCESSORS_TO_SMI(Script, type, kTypeOffset)
-ACCESSORS_TO_SMI(Script, compilation_type, kCompilationTypeOffset)
-ACCESSORS_TO_SMI(Script, compilation_state, kCompilationStateOffset)
-ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
-ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
-ACCESSORS_TO_SMI(Script, eval_from_instructions_offset,
- kEvalFrominstructionsOffsetOffset)
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
-ACCESSORS(DebugInfo, original_code, Code, kOriginalCodeIndex)
-ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
-ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
-
-ACCESSORS_TO_SMI(BreakPointInfo, code_position, kCodePositionIndex)
-ACCESSORS_TO_SMI(BreakPointInfo, source_position, kSourcePositionIndex)
-ACCESSORS_TO_SMI(BreakPointInfo, statement_position, kStatementPositionIndex)
-ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
-#endif
-
-ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
- kOptimizedCodeMapOffset)
-ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
-ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
- kInstanceClassNameOffset)
-ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
-ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
-ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
-ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
-ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
- kThisPropertyAssignmentsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
-
-
-SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
- kHiddenPrototypeBit)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
- kNeedsAccessCheckBit)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, read_only_prototype,
- kReadOnlyPrototypeBit)
-BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
- kIsExpressionBit)
-BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
- kIsTopLevelBit)
-BOOL_GETTER(SharedFunctionInfo,
- compiler_hints,
- has_only_simple_this_property_assignments,
- kHasOnlySimpleThisPropertyAssignments)
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- allows_lazy_compilation,
- kAllowLazyCompilation)
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- allows_lazy_compilation_without_context,
- kAllowLazyCompilationWithoutContext)
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- uses_arguments,
- kUsesArguments)
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- has_duplicate_parameters,
- kHasDuplicateParameters)
-
-
-#if V8_HOST_ARCH_32_BIT
-SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
-SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
- kFormalParameterCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
- kExpectedNofPropertiesOffset)
-SMI_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
- kStartPositionAndTypeOffset)
-SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
-SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
- kFunctionTokenPositionOffset)
-SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
- kCompilerHintsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
- kThisPropertyAssignmentsCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
-SMI_ACCESSORS(SharedFunctionInfo,
- stress_deopt_counter,
- kStressDeoptCounterOffset)
-#else
-
-#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
- STATIC_ASSERT(holder::offset % kPointerSize == 0); \
- int holder::name() { \
- int value = READ_INT_FIELD(this, offset); \
- ASSERT(kHeapObjectTag == 1); \
- ASSERT((value & kHeapObjectTag) == 0); \
- return value >> 1; \
- } \
- void holder::set_##name(int value) { \
- ASSERT(kHeapObjectTag == 1); \
- ASSERT((value & 0xC0000000) == 0xC0000000 || \
- (value & 0xC0000000) == 0x000000000); \
- WRITE_INT_FIELD(this, \
- offset, \
- (value << 1) & ~kHeapObjectTag); \
- }
-
-#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
- STATIC_ASSERT(holder::offset % kPointerSize == kIntSize); \
- INT_ACCESSORS(holder, name, offset)
-
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, length, kLengthOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- formal_parameter_count,
- kFormalParameterCountOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- expected_nof_properties,
- kExpectedNofPropertiesOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, end_position, kEndPositionOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- start_position_and_type,
- kStartPositionAndTypeOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- function_token_position,
- kFunctionTokenPositionOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- compiler_hints,
- kCompilerHintsOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- this_property_assignments_count,
- kThisPropertyAssignmentsCountOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, counters, kCountersOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- stress_deopt_counter,
- kStressDeoptCounterOffset)
-#endif
-
-
-int SharedFunctionInfo::construction_count() {
- return READ_BYTE_FIELD(this, kConstructionCountOffset);
-}
-
-
-void SharedFunctionInfo::set_construction_count(int value) {
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kConstructionCountOffset, static_cast<byte>(value));
-}
-
-
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- live_objects_may_exist,
- kLiveObjectsMayExist)
-
-
-bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
- return initial_map() != GetHeap()->undefined_value();
-}
-
-
-BOOL_GETTER(SharedFunctionInfo,
- compiler_hints,
- optimization_disabled,
- kOptimizationDisabled)
-
-
-void SharedFunctionInfo::set_optimization_disabled(bool disable) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kOptimizationDisabled,
- disable));
- // If disabling optimizations we reflect that in the code object so
- // it will not be counted as optimizable code.
- if ((code()->kind() == Code::FUNCTION) && disable) {
- code()->set_optimizable(false);
- }
-}
-
-
-int SharedFunctionInfo::profiler_ticks() {
- if (code()->kind() != Code::FUNCTION) return 0;
- return code()->profiler_ticks();
-}
-
-
-LanguageMode SharedFunctionInfo::language_mode() {
- int hints = compiler_hints();
- if (BooleanBit::get(hints, kExtendedModeFunction)) {
- ASSERT(BooleanBit::get(hints, kStrictModeFunction));
- return EXTENDED_MODE;
- }
- return BooleanBit::get(hints, kStrictModeFunction)
- ? STRICT_MODE : CLASSIC_MODE;
-}
-
-
-void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
- // We only allow language mode transitions that go set the same language mode
- // again or go up in the chain:
- // CLASSIC_MODE -> STRICT_MODE -> EXTENDED_MODE.
- ASSERT(this->language_mode() == CLASSIC_MODE ||
- this->language_mode() == language_mode ||
- language_mode == EXTENDED_MODE);
- int hints = compiler_hints();
- hints = BooleanBit::set(
- hints, kStrictModeFunction, language_mode != CLASSIC_MODE);
- hints = BooleanBit::set(
- hints, kExtendedModeFunction, language_mode == EXTENDED_MODE);
- set_compiler_hints(hints);
-}
-
-
-bool SharedFunctionInfo::is_classic_mode() {
- return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
-}
-
-BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
- kExtendedModeFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, qml_mode,
- kQmlModeFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
- name_should_print_as_anonymous,
- kNameShouldPrintAsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
- kDontOptimize)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
-
-void SharedFunctionInfo::BeforeVisitingPointers() {
- if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
-}
-
-
-void SharedFunctionInfo::ClearOptimizedCodeMap() {
- set_optimized_code_map(Smi::FromInt(0));
-}
-
-
-ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
-ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
-
-ACCESSORS(PolymorphicCodeCache, cache, Object, kCacheOffset)
-
-bool Script::HasValidSource() {
- Object* src = this->source();
- if (!src->IsString()) return true;
- String* src_str = String::cast(src);
- if (!StringShape(src_str).IsExternal()) return true;
- if (src_str->IsOneByteRepresentation()) {
- return ExternalAsciiString::cast(src)->resource() != NULL;
- } else if (src_str->IsTwoByteRepresentation()) {
- return ExternalTwoByteString::cast(src)->resource() != NULL;
- }
- return true;
-}
-
-
-void SharedFunctionInfo::DontAdaptArguments() {
- ASSERT(code()->kind() == Code::BUILTIN);
- set_formal_parameter_count(kDontAdaptArgumentsSentinel);
-}
-
-
-int SharedFunctionInfo::start_position() {
- return start_position_and_type() >> kStartPositionShift;
-}
-
-
-void SharedFunctionInfo::set_start_position(int start_position) {
- set_start_position_and_type((start_position << kStartPositionShift)
- | (start_position_and_type() & ~kStartPositionMask));
-}
-
-
-Code* SharedFunctionInfo::code() {
- return Code::cast(READ_FIELD(this, kCodeOffset));
-}
-
-
-Code* SharedFunctionInfo::unchecked_code() {
- return reinterpret_cast<Code*>(READ_FIELD(this, kCodeOffset));
-}
-
-
-void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kCodeOffset, value);
- CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
-}
-
-
-void SharedFunctionInfo::ReplaceCode(Code* value) {
- // If the GC metadata field is already used then the function was
- // enqueued as a code flushing candidate and we remove it now.
- if (code()->gc_metadata() != NULL) {
- CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
- flusher->EvictCandidate(this);
- }
-
- ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
- set_code(value);
-}
-
-
-ScopeInfo* SharedFunctionInfo::scope_info() {
- return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
-}
-
-
-void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
- WriteBarrierMode mode) {
- WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
- CONDITIONAL_WRITE_BARRIER(GetHeap(),
- this,
- kScopeInfoOffset,
- reinterpret_cast<Object*>(value),
- mode);
-}
-
-
-bool SharedFunctionInfo::is_compiled() {
- return code() !=
- Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
-}
-
-
-bool SharedFunctionInfo::IsApiFunction() {
- return function_data()->IsFunctionTemplateInfo();
-}
-
-
-FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
- ASSERT(IsApiFunction());
- return FunctionTemplateInfo::cast(function_data());
-}
-
-
-bool SharedFunctionInfo::HasBuiltinFunctionId() {
- return function_data()->IsSmi();
-}
-
-
-BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
- ASSERT(HasBuiltinFunctionId());
- return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
-}
-
-
-int SharedFunctionInfo::code_age() {
- return (compiler_hints() >> kCodeAgeShift) & kCodeAgeMask;
-}
-
-
-void SharedFunctionInfo::set_code_age(int code_age) {
- int hints = compiler_hints() & ~(kCodeAgeMask << kCodeAgeShift);
- set_compiler_hints(hints | ((code_age & kCodeAgeMask) << kCodeAgeShift));
-}
-
-
-int SharedFunctionInfo::ic_age() {
- return ICAgeBits::decode(counters());
-}
-
-
-void SharedFunctionInfo::set_ic_age(int ic_age) {
- set_counters(ICAgeBits::update(counters(), ic_age));
-}
-
-
-int SharedFunctionInfo::deopt_count() {
- return DeoptCountBits::decode(counters());
-}
-
-
-void SharedFunctionInfo::set_deopt_count(int deopt_count) {
- set_counters(DeoptCountBits::update(counters(), deopt_count));
-}
-
-
-void SharedFunctionInfo::increment_deopt_count() {
- int value = counters();
- int deopt_count = DeoptCountBits::decode(value);
- deopt_count = (deopt_count + 1) & DeoptCountBits::kMax;
- set_counters(DeoptCountBits::update(value, deopt_count));
-}
-
-
-int SharedFunctionInfo::opt_reenable_tries() {
- return OptReenableTriesBits::decode(counters());
-}
-
-
-void SharedFunctionInfo::set_opt_reenable_tries(int tries) {
- set_counters(OptReenableTriesBits::update(counters(), tries));
-}
-
-
-bool SharedFunctionInfo::has_deoptimization_support() {
- Code* code = this->code();
- return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
-}
-
-
-void SharedFunctionInfo::TryReenableOptimization() {
- int tries = opt_reenable_tries();
- set_opt_reenable_tries((tries + 1) & OptReenableTriesBits::kMax);
- // We reenable optimization whenever the number of tries is a large
- // enough power of 2.
- if (tries >= 16 && (((tries - 1) & tries) == 0)) {
- set_optimization_disabled(false);
- set_opt_count(0);
- set_deopt_count(0);
- code()->set_optimizable(true);
- }
-}
-
-
-bool JSFunction::IsBuiltin() {
- return context()->global_object()->IsJSBuiltinsObject();
-}
-
-
-bool JSFunction::NeedsArgumentsAdaption() {
- return shared()->formal_parameter_count() !=
- SharedFunctionInfo::kDontAdaptArgumentsSentinel;
-}
-
-
-bool JSFunction::IsOptimized() {
- return code()->kind() == Code::OPTIMIZED_FUNCTION;
-}
-
-
-bool JSFunction::IsOptimizable() {
- return code()->kind() == Code::FUNCTION && code()->optimizable();
-}
-
-
-bool JSFunction::IsMarkedForLazyRecompilation() {
- return code() == GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile);
-}
-
-
-bool JSFunction::IsMarkedForParallelRecompilation() {
- return code() ==
- GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile);
-}
-
-
-bool JSFunction::IsInRecompileQueue() {
- return code() == GetIsolate()->builtins()->builtin(
- Builtins::kInRecompileQueue);
-}
-
-
-Code* JSFunction::code() {
- return Code::cast(unchecked_code());
-}
-
-
-Code* JSFunction::unchecked_code() {
- return reinterpret_cast<Code*>(
- Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset)));
-}
-
-
-void JSFunction::set_code(Code* value) {
- ASSERT(!HEAP->InNewSpace(value));
- Address entry = value->entry();
- WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
- GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
- this,
- HeapObject::RawField(this, kCodeEntryOffset),
- value);
-}
-
-
-void JSFunction::ReplaceCode(Code* code) {
- bool was_optimized = IsOptimized();
- bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
-
- set_code(code);
-
- // Add/remove the function from the list of optimized functions for this
- // context based on the state change.
- if (!was_optimized && is_optimized) {
- context()->native_context()->AddOptimizedFunction(this);
- }
- if (was_optimized && !is_optimized) {
- context()->native_context()->RemoveOptimizedFunction(this);
- }
-}
-
-
-Context* JSFunction::context() {
- return Context::cast(READ_FIELD(this, kContextOffset));
-}
-
-
-Object* JSFunction::unchecked_context() {
- return READ_FIELD(this, kContextOffset);
-}
-
-
-SharedFunctionInfo* JSFunction::unchecked_shared() {
- return reinterpret_cast<SharedFunctionInfo*>(
- READ_FIELD(this, kSharedFunctionInfoOffset));
-}
-
-
-void JSFunction::set_context(Object* value) {
- ASSERT(value->IsUndefined() || value->IsContext());
- WRITE_FIELD(this, kContextOffset, value);
- WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
-}
-
-ACCESSORS(JSFunction, prototype_or_initial_map, Object,
- kPrototypeOrInitialMapOffset)
-
-
-Map* JSFunction::initial_map() {
- return Map::cast(prototype_or_initial_map());
-}
-
-
-void JSFunction::set_initial_map(Map* value) {
- set_prototype_or_initial_map(value);
-}
-
-
-bool JSFunction::has_initial_map() {
- return prototype_or_initial_map()->IsMap();
-}
-
-
-bool JSFunction::has_instance_prototype() {
- return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
-}
-
-
-bool JSFunction::has_prototype() {
- return map()->has_non_instance_prototype() || has_instance_prototype();
-}
-
-
-Object* JSFunction::instance_prototype() {
- ASSERT(has_instance_prototype());
- if (has_initial_map()) return initial_map()->prototype();
- // When there is no initial map and the prototype is a JSObject, the
- // initial map field is used for the prototype field.
- return prototype_or_initial_map();
-}
-
-
-Object* JSFunction::prototype() {
- ASSERT(has_prototype());
- // If the function's prototype property has been set to a non-JSObject
- // value, that value is stored in the constructor field of the map.
- if (map()->has_non_instance_prototype()) return map()->constructor();
- return instance_prototype();
-}
-
-
-bool JSFunction::should_have_prototype() {
- return map()->function_with_prototype();
-}
-
-
-bool JSFunction::is_compiled() {
- return code() != GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
-}
-
-
-FixedArray* JSFunction::literals() {
- ASSERT(!shared()->bound());
- return literals_or_bindings();
-}
-
-
-void JSFunction::set_literals(FixedArray* literals) {
- ASSERT(!shared()->bound());
- set_literals_or_bindings(literals);
-}
-
-
-FixedArray* JSFunction::function_bindings() {
- ASSERT(shared()->bound());
- return literals_or_bindings();
-}
-
-
-void JSFunction::set_function_bindings(FixedArray* bindings) {
- ASSERT(shared()->bound());
- // Bound function literal may be initialized to the empty fixed array
- // before the bindings are set.
- ASSERT(bindings == GetHeap()->empty_fixed_array() ||
- bindings->map() == GetHeap()->fixed_cow_array_map());
- set_literals_or_bindings(bindings);
-}
-
-
-int JSFunction::NumberOfLiterals() {
- ASSERT(!shared()->bound());
- return literals()->length();
-}
-
-
-Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
- return READ_FIELD(this, OffsetOfFunctionWithId(id));
-}
-
-
-void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
- Object* value) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
- WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
- WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
-}
-
-
-Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
- return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id)));
-}
-
-
-void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
- Code* value) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
- WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
- ASSERT(!HEAP->InNewSpace(value));
-}
-
-
-ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
-ACCESSORS(JSProxy, hash, Object, kHashOffset)
-ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
-ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
-
-
-void JSProxy::InitializeBody(int object_size, Object* value) {
- ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
- for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
- WRITE_FIELD(this, offset, value);
- }
-}
-
-
-ACCESSORS(JSSet, table, Object, kTableOffset)
-ACCESSORS(JSMap, table, Object, kTableOffset)
-ACCESSORS(JSWeakMap, table, Object, kTableOffset)
-ACCESSORS(JSWeakMap, next, Object, kNextOffset)
-
-
-Address Foreign::foreign_address() {
- return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
-}
-
-
-void Foreign::set_foreign_address(Address value) {
- WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
-}
-
-
-ACCESSORS(JSModule, context, Object, kContextOffset)
-ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset)
-
-
-JSModule* JSModule::cast(Object* obj) {
- ASSERT(obj->IsJSModule());
- ASSERT(HeapObject::cast(obj)->Size() == JSModule::kSize);
- return reinterpret_cast<JSModule*>(obj);
-}
-
-
-ACCESSORS(JSValue, value, Object, kValueOffset)
-
-
-JSValue* JSValue::cast(Object* obj) {
- ASSERT(obj->IsJSValue());
- ASSERT(HeapObject::cast(obj)->Size() == JSValue::kSize);
- return reinterpret_cast<JSValue*>(obj);
-}
-
-
-ACCESSORS(JSDate, value, Object, kValueOffset)
-ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
-ACCESSORS(JSDate, year, Object, kYearOffset)
-ACCESSORS(JSDate, month, Object, kMonthOffset)
-ACCESSORS(JSDate, day, Object, kDayOffset)
-ACCESSORS(JSDate, weekday, Object, kWeekdayOffset)
-ACCESSORS(JSDate, hour, Object, kHourOffset)
-ACCESSORS(JSDate, min, Object, kMinOffset)
-ACCESSORS(JSDate, sec, Object, kSecOffset)
-
-
-JSDate* JSDate::cast(Object* obj) {
- ASSERT(obj->IsJSDate());
- ASSERT(HeapObject::cast(obj)->Size() == JSDate::kSize);
- return reinterpret_cast<JSDate*>(obj);
-}
-
-
-ACCESSORS(JSMessageObject, type, String, kTypeOffset)
-ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
-ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_trace, Object, kStackTraceOffset)
-ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
-SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
-SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
-
-
-JSMessageObject* JSMessageObject::cast(Object* obj) {
- ASSERT(obj->IsJSMessageObject());
- ASSERT(HeapObject::cast(obj)->Size() == JSMessageObject::kSize);
- return reinterpret_cast<JSMessageObject*>(obj);
-}
-
-
-INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
-INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
-ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
-ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
-ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-
-
-// Type feedback slot: type_feedback_info for FUNCTIONs, stub_info for STUBs.
-void Code::InitializeTypeFeedbackInfoNoWriteBarrier(Object* value) {
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
-}
-
-
-Object* Code::type_feedback_info() {
- ASSERT(kind() == FUNCTION);
- return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
-}
-
-
-void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
- ASSERT(kind() == FUNCTION);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
- value, mode);
-}
-
-
-int Code::stub_info() {
- ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC || kind() == LOAD_IC);
- Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset);
- return Smi::cast(value)->value();
-}
-
-
-void Code::set_stub_info(int value) {
- ASSERT(kind() == COMPARE_IC ||
- kind() == BINARY_OP_IC ||
- kind() == LOAD_IC ||
- kind() == KEYED_LOAD_IC ||
- kind() == STORE_IC ||
- kind() == KEYED_STORE_IC);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, Smi::FromInt(value));
-}
-
-
-void Code::set_deoptimizing_functions(Object* value) {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
-}
-
-
-Object* Code::deoptimizing_functions() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
-}
-
-
-ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
-INT_ACCESSORS(Code, ic_age, kICAgeOffset)
-
-
-byte* Code::instruction_start() {
- return FIELD_ADDR(this, kHeaderSize);
-}
-
-
-byte* Code::instruction_end() {
- return instruction_start() + instruction_size();
-}
-
-
-int Code::body_size() {
- return RoundUp(instruction_size(), kObjectAlignment);
-}
-
-
-FixedArray* Code::unchecked_deoptimization_data() {
- return reinterpret_cast<FixedArray*>(
- READ_FIELD(this, kDeoptimizationDataOffset));
-}
-
-
-ByteArray* Code::unchecked_relocation_info() {
- return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
-}
-
-
-byte* Code::relocation_start() {
- return unchecked_relocation_info()->GetDataStartAddress();
-}
-
-
-int Code::relocation_size() {
- return unchecked_relocation_info()->length();
-}
-
-
-byte* Code::entry() {
- return instruction_start();
-}
-
-
-bool Code::contains(byte* inner_pointer) {
- return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
-}
-
-
-ACCESSORS(JSArray, length, Object, kLengthOffset)
-
-
-ACCESSORS(JSRegExp, data, Object, kDataOffset)
-
-
-JSRegExp::Type JSRegExp::TypeTag() {
- Object* data = this->data();
- if (data->IsUndefined()) return JSRegExp::NOT_COMPILED;
- Smi* smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
- return static_cast<JSRegExp::Type>(smi->value());
-}
-
-
-JSRegExp::Type JSRegExp::TypeTagUnchecked() {
- Smi* smi = Smi::cast(DataAtUnchecked(kTagIndex));
- return static_cast<JSRegExp::Type>(smi->value());
-}
-
-
-int JSRegExp::CaptureCount() {
- switch (TypeTag()) {
- case ATOM:
- return 0;
- case IRREGEXP:
- return Smi::cast(DataAt(kIrregexpCaptureCountIndex))->value();
- default:
- UNREACHABLE();
- return -1;
- }
-}
-
-
-JSRegExp::Flags JSRegExp::GetFlags() {
- ASSERT(this->data()->IsFixedArray());
- Object* data = this->data();
- Smi* smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
- return Flags(smi->value());
-}
-
-
-String* JSRegExp::Pattern() {
- ASSERT(this->data()->IsFixedArray());
- Object* data = this->data();
- String* pattern= String::cast(FixedArray::cast(data)->get(kSourceIndex));
- return pattern;
-}
-
-
-Object* JSRegExp::DataAt(int index) {
- ASSERT(TypeTag() != NOT_COMPILED);
- return FixedArray::cast(data())->get(index);
-}
-
-
-Object* JSRegExp::DataAtUnchecked(int index) {
- FixedArray* fa = reinterpret_cast<FixedArray*>(data());
- int offset = FixedArray::kHeaderSize + index * kPointerSize;
- return READ_FIELD(fa, offset);
-}
-
-
-void JSRegExp::SetDataAt(int index, Object* value) {
- ASSERT(TypeTag() != NOT_COMPILED);
- ASSERT(index >= kDataIndex); // Only implementation data can be set this way.
- FixedArray::cast(data())->set(index, value);
-}
-
-
-void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) {
- ASSERT(index >= kDataIndex); // Only implementation data can be set this way.
- FixedArray* fa = reinterpret_cast<FixedArray*>(data());
- if (value->IsSmi()) {
- fa->set_unchecked(index, Smi::cast(value));
- } else {
- // We only do this during GC, so we don't need to notify the write barrier.
- fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
- }
-}
-
-
-ElementsKind JSObject::GetElementsKind() {
- ElementsKind kind = map()->elements_kind();
-#if DEBUG
- FixedArrayBase* fixed_array =
- reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
- Map* map = fixed_array->map();
- ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
- (map == GetHeap()->fixed_array_map() ||
- map == GetHeap()->fixed_cow_array_map())) ||
- (IsFastDoubleElementsKind(kind) &&
- (fixed_array->IsFixedDoubleArray() ||
- fixed_array == GetHeap()->empty_fixed_array())) ||
- (kind == DICTIONARY_ELEMENTS &&
- fixed_array->IsFixedArray() &&
- fixed_array->IsDictionary()) ||
- (kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
- (elements()->IsFixedArray() && elements()->length() >= 2));
-#endif
- return kind;
-}
-
-
-ElementsAccessor* JSObject::GetElementsAccessor() {
- return ElementsAccessor::ForKind(GetElementsKind());
-}
-
-
-bool JSObject::HasFastObjectElements() {
- return IsFastObjectElementsKind(GetElementsKind());
-}
-
-
-bool JSObject::HasFastSmiElements() {
- return IsFastSmiElementsKind(GetElementsKind());
-}
-
-
-bool JSObject::HasFastSmiOrObjectElements() {
- return IsFastSmiOrObjectElementsKind(GetElementsKind());
-}
-
-
-bool JSObject::HasFastDoubleElements() {
- return IsFastDoubleElementsKind(GetElementsKind());
-}
-
-
-bool JSObject::HasFastHoleyElements() {
- return IsFastHoleyElementsKind(GetElementsKind());
-}
-
-
-bool JSObject::HasFastElements() {
- return IsFastElementsKind(GetElementsKind());
-}
-
-
-bool JSObject::HasDictionaryElements() {
- return GetElementsKind() == DICTIONARY_ELEMENTS;
-}
-
-
-bool JSObject::HasNonStrictArgumentsElements() {
- return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
-}
-
-
-bool JSObject::HasExternalArrayElements() {
- HeapObject* array = elements();
- ASSERT(array != NULL);
- return array->IsExternalArray();
-}
-
-
-#define EXTERNAL_ELEMENTS_CHECK(name, type) \
-bool JSObject::HasExternal##name##Elements() { \
- HeapObject* array = elements(); \
- ASSERT(array != NULL); \
- if (!array->IsHeapObject()) \
- return false; \
- return array->map()->instance_type() == type; \
-}
-
-
-EXTERNAL_ELEMENTS_CHECK(Byte, EXTERNAL_BYTE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedByte, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Short, EXTERNAL_SHORT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedShort,
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Int, EXTERNAL_INT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedInt,
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Float,
- EXTERNAL_FLOAT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Double,
- EXTERNAL_DOUBLE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Pixel, EXTERNAL_PIXEL_ARRAY_TYPE)
-
-
-bool JSObject::HasNamedInterceptor() {
- return map()->has_named_interceptor();
-}
-
-
-bool JSObject::HasIndexedInterceptor() {
- return map()->has_indexed_interceptor();
-}
-
-
-MaybeObject* JSObject::EnsureWritableFastElements() {
- ASSERT(HasFastSmiOrObjectElements());
- FixedArray* elems = FixedArray::cast(elements());
- Isolate* isolate = GetIsolate();
- if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
- Object* writable_elems;
- { MaybeObject* maybe_writable_elems = isolate->heap()->CopyFixedArrayWithMap(
- elems, isolate->heap()->fixed_array_map());
- if (!maybe_writable_elems->ToObject(&writable_elems)) {
- return maybe_writable_elems;
- }
- }
- set_elements(FixedArray::cast(writable_elems));
- isolate->counters()->cow_arrays_converted()->Increment();
- return writable_elems;
-}
-
-
-StringDictionary* JSObject::property_dictionary() {
- ASSERT(!HasFastProperties());
- return StringDictionary::cast(properties());
-}
-
-
-SeededNumberDictionary* JSObject::element_dictionary() {
- ASSERT(HasDictionaryElements());
- return SeededNumberDictionary::cast(elements());
-}
-
-
-bool Name::IsHashFieldComputed(uint32_t field) {
- return (field & kHashNotComputedMask) == 0;
-}
-
-
-bool Name::HasHashCode() {
- return IsHashFieldComputed(hash_field());
-}
-
-
-uint32_t Name::Hash() {
- // Fast case: has hash code already been computed?
- uint32_t field = hash_field();
- if (IsHashFieldComputed(field)) return field >> kHashShift;
- // Slow case: compute hash code and set it. Has to be a string.
- return String::cast(this)->ComputeAndSetHash();
-}
-
-
-StringHasher::StringHasher(int length, uint32_t seed)
- : length_(length),
- raw_running_hash_(seed),
- array_index_(0),
- is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
- is_first_char_(true) {
- ASSERT(FLAG_randomize_hashes || raw_running_hash_ == 0);
-}
-
-
-bool StringHasher::has_trivial_hash() {
- return length_ > String::kMaxHashCalcLength;
-}
-
-
-uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint16_t c) {
- running_hash += c;
- running_hash += (running_hash << 10);
- running_hash ^= (running_hash >> 6);
- return running_hash;
-}
-
-
-uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
- running_hash += (running_hash << 3);
- running_hash ^= (running_hash >> 11);
- running_hash += (running_hash << 15);
- if ((running_hash & String::kHashBitMask) == 0) {
- return kZeroHash;
- }
- return running_hash;
-}
-
-
-void StringHasher::AddCharacter(uint16_t c) {
- // Use the Jenkins one-at-a-time hash function to update the hash
- // for the given character.
- raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
-}
-
-
-bool StringHasher::UpdateIndex(uint16_t c) {
- ASSERT(is_array_index_);
- if (c < '0' || c > '9') {
- is_array_index_ = false;
- return false;
- }
- int d = c - '0';
- if (is_first_char_) {
- is_first_char_ = false;
- if (c == '0' && length_ > 1) {
- is_array_index_ = false;
- return false;
- }
- }
- if (array_index_ > 429496729U - ((d + 2) >> 3)) {
- is_array_index_ = false;
- return false;
- }
- array_index_ = array_index_ * 10 + d;
- return true;
-}
-
-
-template<typename Char>
-inline void StringHasher::AddCharacters(const Char* chars, int length) {
- ASSERT(sizeof(Char) == 1 || sizeof(Char) == 2);
- int i = 0;
- if (is_array_index_) {
- for (; i < length; i++) {
- AddCharacter(chars[i]);
- if (!UpdateIndex(chars[i])) {
- i++;
- break;
- }
- }
- }
- for (; i < length; i++) {
- ASSERT(!is_array_index_);
- AddCharacter(chars[i]);
- }
-}
-
-
-template <typename schar>
-uint32_t StringHasher::HashSequentialString(const schar* chars,
- int length,
- uint32_t seed) {
- StringHasher hasher(length, seed);
- if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length);
- return hasher.GetHashField();
-}
-
-
-bool String::AsArrayIndex(uint32_t* index) {
- uint32_t field = hash_field();
- if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) {
- return false;
- }
- return SlowAsArrayIndex(index);
-}
-
-
-Object* JSReceiver::GetPrototype() {
- return map()->prototype();
-}
-
-
-Object* JSReceiver::GetConstructor() {
- return map()->constructor();
-}
-
-
-bool JSReceiver::HasProperty(String* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
- }
- return GetPropertyAttribute(name) != ABSENT;
-}
-
-
-bool JSReceiver::HasLocalProperty(String* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
- }
- return GetLocalPropertyAttribute(name) != ABSENT;
-}
-
-
-PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) {
- uint32_t index;
- if (IsJSObject() && key->AsArrayIndex(&index)) {
- return GetElementAttribute(index);
- }
- return GetPropertyAttributeWithReceiver(this, key);
-}
-
-
-PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
- }
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true);
-}
-
-
-// TODO(504): this may be useful in other places too where JSGlobalProxy
-// is used.
-Object* JSObject::BypassGlobalProxy() {
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return GetHeap()->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- return proto;
- }
- return this;
-}
-
-
-MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
- return IsJSProxy()
- ? JSProxy::cast(this)->GetIdentityHash(flag)
- : JSObject::cast(this)->GetIdentityHash(flag);
-}
-
-
-bool JSReceiver::HasElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
- }
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true) != ABSENT;
-}
-
-
-bool JSReceiver::HasLocalElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
- }
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false) != ABSENT;
-}
-
-
-PropertyAttributes JSReceiver::GetLocalElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
- }
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false);
-}
-
-
-bool AccessorInfo::all_can_read() {
- return BooleanBit::get(flag(), kAllCanReadBit);
-}
-
-
-void AccessorInfo::set_all_can_read(bool value) {
- set_flag(BooleanBit::set(flag(), kAllCanReadBit, value));
-}
-
-
-bool AccessorInfo::all_can_write() {
- return BooleanBit::get(flag(), kAllCanWriteBit);
-}
-
-
-void AccessorInfo::set_all_can_write(bool value) {
- set_flag(BooleanBit::set(flag(), kAllCanWriteBit, value));
-}
-
-
-bool AccessorInfo::prohibits_overwriting() {
- return BooleanBit::get(flag(), kProhibitsOverwritingBit);
-}
-
-
-void AccessorInfo::set_prohibits_overwriting(bool value) {
- set_flag(BooleanBit::set(flag(), kProhibitsOverwritingBit, value));
-}
-
-
-PropertyAttributes AccessorInfo::property_attributes() {
- return AttributesField::decode(static_cast<uint32_t>(flag()->value()));
-}
-
-
-void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
- set_flag(Smi::FromInt(AttributesField::update(flag()->value(), attributes)));
-}
-
-
-bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
- Object* function_template = expected_receiver_type();
- if (!function_template->IsFunctionTemplateInfo()) return true;
- return receiver->IsInstanceOf(FunctionTemplateInfo::cast(function_template));
-}
-
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::SetEntry(int entry,
- Object* key,
- Object* value) {
- SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0)));
-}
-
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::SetEntry(int entry,
- Object* key,
- Object* value,
- PropertyDetails details) {
- ASSERT(!key->IsString() ||
- details.IsDeleted() ||
- details.dictionary_index() > 0);
- int index = HashTable<Shape, Key>::EntryToIndex(entry);
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
- FixedArray::set(index, key, mode);
- FixedArray::set(index+1, value, mode);
- FixedArray::set(index+2, details.AsSmi());
-}
-
-
-bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
- ASSERT(other->IsNumber());
- return key == static_cast<uint32_t>(other->Number());
-}
-
-
-uint32_t UnseededNumberDictionaryShape::Hash(uint32_t key) {
- return ComputeIntegerHash(key, 0);
-}
-
-
-uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
- Object* other) {
- ASSERT(other->IsNumber());
- return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0);
-}
-
-uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) {
- return ComputeIntegerHash(key, seed);
-}
-
-uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
- uint32_t seed,
- Object* other) {
- ASSERT(other->IsNumber());
- return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed);
-}
-
-MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
- return Isolate::Current()->heap()->NumberFromUint32(key);
-}
-
-
-bool StringDictionaryShape::IsMatch(String* key, Object* other) {
- // We know that all entries in a hash table had their hash keys created.
- // Use that knowledge to have fast failure.
- if (key->Hash() != String::cast(other)->Hash()) return false;
- return key->Equals(String::cast(other));
-}
-
-
-uint32_t StringDictionaryShape::Hash(String* key) {
- return key->Hash();
-}
-
-
-uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) {
- return String::cast(other)->Hash();
-}
-
-
-MaybeObject* StringDictionaryShape::AsObject(String* key) {
- return key;
-}
-
-
-template <int entrysize>
-bool ObjectHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
- return key->SameValue(other);
-}
-
-
-template <int entrysize>
-uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) {
- MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- return Smi::cast(maybe_hash->ToObjectChecked())->value();
-}
-
-
-template <int entrysize>
-uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
- Object* other) {
- MaybeObject* maybe_hash = other->GetHash(OMIT_CREATION);
- return Smi::cast(maybe_hash->ToObjectChecked())->value();
-}
-
-
-template <int entrysize>
-MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Object* key) {
- return key;
-}
-
-
-void Map::ClearCodeCache(Heap* heap) {
- // No write barrier is needed since empty_fixed_array is not in new space.
- // Please note this function is used during marking:
- // - MarkCompactCollector::MarkUnmarkedObject
- // - IncrementalMarking::Step
- ASSERT(!heap->InNewSpace(heap->empty_fixed_array()));
- WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array());
-}
-
-
-void JSArray::EnsureSize(int required_size) {
- ASSERT(HasFastSmiOrObjectElements());
- FixedArray* elts = FixedArray::cast(elements());
- const int kArraySizeThatFitsComfortablyInNewSpace = 128;
- if (elts->length() < required_size) {
- // Doubling in size would be overkill, but leave some slack to avoid
- // constantly growing.
- Expand(required_size + (required_size >> 3));
- // It's a performance benefit to keep a frequently used array in new-space.
- } else if (!GetHeap()->new_space()->Contains(elts) &&
- required_size < kArraySizeThatFitsComfortablyInNewSpace) {
- // Expand will allocate a new backing store in new space even if the size
- // we asked for isn't larger than what we had before.
- Expand(required_size);
- }
-}
-
-
-void JSArray::set_length(Smi* length) {
- // Don't need a write barrier for a Smi.
- set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
-}
-
-
-bool JSArray::AllowsSetElementsLength() {
- bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
- ASSERT(result == !HasExternalArrayElements());
- return result;
-}
-
-
-MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
- MaybeObject* maybe_result = EnsureCanContainElements(
- storage, storage->length(), ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (maybe_result->IsFailure()) return maybe_result;
- ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
- IsFastDoubleElementsKind(GetElementsKind())) ||
- ((storage->map() != GetHeap()->fixed_double_array_map()) &&
- (IsFastObjectElementsKind(GetElementsKind()) ||
- (IsFastSmiElementsKind(GetElementsKind()) &&
- FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
- set_elements(storage);
- set_length(Smi::FromInt(storage->length()));
- return this;
-}
-
-
-MaybeObject* FixedArray::Copy() {
- if (length() == 0) return this;
- return GetHeap()->CopyFixedArray(this);
-}
-
-
-MaybeObject* FixedDoubleArray::Copy() {
- if (length() == 0) return this;
- return GetHeap()->CopyFixedDoubleArray(this);
-}
-
-
-void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
- set(1 + index * 2, Smi::FromInt(id.ToInt()));
-}
-
-
-TypeFeedbackId TypeFeedbackCells::AstId(int index) {
- return TypeFeedbackId(Smi::cast(get(1 + index * 2))->value());
-}
-
-
-void TypeFeedbackCells::SetCell(int index, JSGlobalPropertyCell* cell) {
- set(index * 2, cell);
-}
-
-
-JSGlobalPropertyCell* TypeFeedbackCells::Cell(int index) {
- return JSGlobalPropertyCell::cast(get(index * 2));
-}
-
-
-Handle<Object> TypeFeedbackCells::UninitializedSentinel(Isolate* isolate) {
- return isolate->factory()->the_hole_value();
-}
-
-
-Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
- return isolate->factory()->undefined_value();
-}
-
-
-Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
-}
-
-
-Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
- return heap->the_hole_value();
-}
-
-
-int TypeFeedbackInfo::ic_total_count() {
- int current = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
- return ICTotalCountField::decode(current);
-}
-
-
-void TypeFeedbackInfo::set_ic_total_count(int count) {
- int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
- value = ICTotalCountField::update(value,
- ICTotalCountField::decode(count));
- WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
-}
-
-
-int TypeFeedbackInfo::ic_with_type_info_count() {
- int current = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
- return ICsWithTypeInfoCountField::decode(current);
-}
-
-
-void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) {
- int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
- int new_count = ICsWithTypeInfoCountField::decode(value) + delta;
- // We can get negative count here when the type-feedback info is
- // shared between two code objects. The can only happen when
- // the debugger made a shallow copy of code object (see Heap::CopyCode).
- // Since we do not optimize when the debugger is active, we can skip
- // this counter update.
- if (new_count >= 0) {
- new_count &= ICsWithTypeInfoCountField::kMask;
- value = ICsWithTypeInfoCountField::update(value, new_count);
- WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
- }
-}
-
-
-void TypeFeedbackInfo::initialize_storage() {
- WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(0));
- WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(0));
-}
-
-
-void TypeFeedbackInfo::change_own_type_change_checksum() {
- int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
- int checksum = OwnTypeChangeChecksum::decode(value);
- checksum = (checksum + 1) % (1 << kTypeChangeChecksumBits);
- value = OwnTypeChangeChecksum::update(value, checksum);
- // Ensure packed bit field is in Smi range.
- if (value > Smi::kMaxValue) value |= Smi::kMinValue;
- if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
- WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
-}
-
-
-void TypeFeedbackInfo::set_inlined_type_change_checksum(int checksum) {
- int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
- int mask = (1 << kTypeChangeChecksumBits) - 1;
- value = InlinedTypeChangeChecksum::update(value, checksum & mask);
- // Ensure packed bit field is in Smi range.
- if (value > Smi::kMaxValue) value |= Smi::kMinValue;
- if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
- WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
-}
-
-
-int TypeFeedbackInfo::own_type_change_checksum() {
- int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
- return OwnTypeChangeChecksum::decode(value);
-}
-
-
-bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
- int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
- int mask = (1 << kTypeChangeChecksumBits) - 1;
- return InlinedTypeChangeChecksum::decode(value) == (checksum & mask);
-}
-
-
-ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
- kTypeFeedbackCellsOffset)
-
-
-SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
-
-
-Relocatable::Relocatable(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- isolate_ = isolate;
- prev_ = isolate->relocatable_top();
- isolate->set_relocatable_top(this);
-}
-
-
-Relocatable::~Relocatable() {
- ASSERT(isolate_ == Isolate::Current());
- ASSERT_EQ(isolate_->relocatable_top(), this);
- isolate_->set_relocatable_top(prev_);
-}
-
-
-int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
- return map->instance_size();
-}
-
-
-void Foreign::ForeignIterateBody(ObjectVisitor* v) {
- v->VisitExternalReference(
- reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
-}
-
-
-template<typename StaticVisitor>
-void Foreign::ForeignIterateBody() {
- StaticVisitor::VisitExternalReference(
- reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
-}
-
-
-void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalAsciiStringResource Resource;
- v->VisitExternalAsciiString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-template<typename StaticVisitor>
-void ExternalAsciiString::ExternalAsciiStringIterateBody() {
- typedef v8::String::ExternalAsciiStringResource Resource;
- StaticVisitor::VisitExternalAsciiString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalStringResource Resource;
- v->VisitExternalTwoByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-template<typename StaticVisitor>
-void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
- typedef v8::String::ExternalStringResource Resource;
- StaticVisitor::VisitExternalTwoByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-template<int start_offset, int end_offset, int size>
-void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
- HeapObject* obj,
- ObjectVisitor* v) {
- v->VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, end_offset));
-}
-
-
-template<int start_offset>
-void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
- int object_size,
- ObjectVisitor* v) {
- v->VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, object_size));
-}
-
-
-#undef TYPE_CHECKER
-#undef CAST_ACCESSOR
-#undef INT_ACCESSORS
-#undef ACCESSORS
-#undef ACCESSORS_TO_SMI
-#undef SMI_ACCESSORS
-#undef BOOL_GETTER
-#undef BOOL_ACCESSORS
-#undef FIELD_ADDR
-#undef READ_FIELD
-#undef WRITE_FIELD
-#undef WRITE_BARRIER
-#undef CONDITIONAL_WRITE_BARRIER
-#undef READ_DOUBLE_FIELD
-#undef WRITE_DOUBLE_FIELD
-#undef READ_INT_FIELD
-#undef WRITE_INT_FIELD
-#undef READ_INTPTR_FIELD
-#undef WRITE_INTPTR_FIELD
-#undef READ_UINT32_FIELD
-#undef WRITE_UINT32_FIELD
-#undef READ_SHORT_FIELD
-#undef WRITE_SHORT_FIELD
-#undef READ_BYTE_FIELD
-#undef WRITE_BYTE_FIELD
-
-
-} } // namespace v8::internal
-
-#endif // V8_OBJECTS_INL_H_
diff --git a/src/3rdparty/v8/src/objects-printer.cc b/src/3rdparty/v8/src/objects-printer.cc
deleted file mode 100644
index b4cf9a9..0000000
--- a/src/3rdparty/v8/src/objects-printer.cc
+++ /dev/null
@@ -1,1169 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "disassembler.h"
-#include "disasm.h"
-#include "jsregexp.h"
-#include "objects-visiting.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef OBJECT_PRINT
-
-static const char* TypeToString(InstanceType type);
-
-
-void MaybeObject::Print(FILE* out) {
- Object* this_as_object;
- if (ToObject(&this_as_object)) {
- if (this_as_object->IsSmi()) {
- Smi::cast(this_as_object)->SmiPrint(out);
- } else {
- HeapObject::cast(this_as_object)->HeapObjectPrint(out);
- }
- } else {
- Failure::cast(this)->FailurePrint(out);
- }
- Flush(out);
-}
-
-
-void MaybeObject::PrintLn(FILE* out) {
- Print(out);
- FPrintF(out, "\n");
-}
-
-
-void HeapObject::PrintHeader(FILE* out, const char* id) {
- FPrintF(out, "%p: [%s]\n", reinterpret_cast<void*>(this), id);
-}
-
-
-void HeapObject::HeapObjectPrint(FILE* out) {
- InstanceType instance_type = map()->instance_type();
-
- HandleScope scope(GetIsolate());
- if (instance_type < FIRST_NONSTRING_TYPE) {
- String::cast(this)->StringPrint(out);
- return;
- }
-
- switch (instance_type) {
- case SYMBOL_TYPE:
- Symbol::cast(this)->SymbolPrint(out);
- break;
- case MAP_TYPE:
- Map::cast(this)->MapPrint(out);
- break;
- case HEAP_NUMBER_TYPE:
- HeapNumber::cast(this)->HeapNumberPrint(out);
- break;
- case FIXED_DOUBLE_ARRAY_TYPE:
- FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
- break;
- case FIXED_ARRAY_TYPE:
- FixedArray::cast(this)->FixedArrayPrint(out);
- break;
- case BYTE_ARRAY_TYPE:
- ByteArray::cast(this)->ByteArrayPrint(out);
- break;
- case FREE_SPACE_TYPE:
- FreeSpace::cast(this)->FreeSpacePrint(out);
- break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- ExternalByteArray::cast(this)->ExternalByteArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- ExternalUnsignedByteArray::cast(this)
- ->ExternalUnsignedByteArrayPrint(out);
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- ExternalShortArray::cast(this)->ExternalShortArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- ExternalUnsignedShortArray::cast(this)
- ->ExternalUnsignedShortArrayPrint(out);
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- ExternalIntArray::cast(this)->ExternalIntArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayPrint(out);
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- ExternalFloatArray::cast(this)->ExternalFloatArrayPrint(out);
- break;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- ExternalDoubleArray::cast(this)->ExternalDoubleArrayPrint(out);
- break;
- case FILLER_TYPE:
- FPrintF(out, "filler");
- break;
- case JS_OBJECT_TYPE: // fall through
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_ARRAY_TYPE:
- case JS_REGEXP_TYPE:
- JSObject::cast(this)->JSObjectPrint(out);
- break;
- case ODDBALL_TYPE:
- Oddball::cast(this)->to_string()->Print(out);
- break;
- case JS_MODULE_TYPE:
- JSModule::cast(this)->JSModulePrint(out);
- break;
- case JS_FUNCTION_TYPE:
- JSFunction::cast(this)->JSFunctionPrint(out);
- break;
- case JS_GLOBAL_PROXY_TYPE:
- JSGlobalProxy::cast(this)->JSGlobalProxyPrint(out);
- break;
- case JS_GLOBAL_OBJECT_TYPE:
- JSGlobalObject::cast(this)->JSGlobalObjectPrint(out);
- break;
- case JS_BUILTINS_OBJECT_TYPE:
- JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(out);
- break;
- case JS_VALUE_TYPE:
- FPrintF(out, "Value wrapper around:");
- JSValue::cast(this)->value()->Print(out);
- break;
- case JS_DATE_TYPE:
- JSDate::cast(this)->JSDatePrint(out);
- break;
- case CODE_TYPE:
- Code::cast(this)->CodePrint(out);
- break;
- case JS_PROXY_TYPE:
- JSProxy::cast(this)->JSProxyPrint(out);
- break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::cast(this)->JSFunctionProxyPrint(out);
- break;
- case JS_WEAK_MAP_TYPE:
- JSWeakMap::cast(this)->JSWeakMapPrint(out);
- break;
- case FOREIGN_TYPE:
- Foreign::cast(this)->ForeignPrint(out);
- break;
- case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out);
- break;
- case JS_MESSAGE_OBJECT_TYPE:
- JSMessageObject::cast(this)->JSMessageObjectPrint(out);
- break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint(out);
- break;
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
- Name::cast(this)->Name##Print(out); \
- break;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
-
- default:
- FPrintF(out, "UNKNOWN TYPE %d", map()->instance_type());
- UNREACHABLE();
- break;
- }
-}
-
-
-void ByteArray::ByteArrayPrint(FILE* out) {
- FPrintF(out, "byte array, data starts at %p", GetDataStartAddress());
-}
-
-
-void FreeSpace::FreeSpacePrint(FILE* out) {
- FPrintF(out, "free space, size %d", Size());
-}
-
-
-void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
- FPrintF(out, "external pixel array");
-}
-
-
-void ExternalByteArray::ExternalByteArrayPrint(FILE* out) {
- FPrintF(out, "external byte array");
-}
-
-
-void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint(FILE* out) {
- FPrintF(out, "external unsigned byte array");
-}
-
-
-void ExternalShortArray::ExternalShortArrayPrint(FILE* out) {
- FPrintF(out, "external short array");
-}
-
-
-void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint(FILE* out) {
- FPrintF(out, "external unsigned short array");
-}
-
-
-void ExternalIntArray::ExternalIntArrayPrint(FILE* out) {
- FPrintF(out, "external int array");
-}
-
-
-void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint(FILE* out) {
- FPrintF(out, "external unsigned int array");
-}
-
-
-void ExternalFloatArray::ExternalFloatArrayPrint(FILE* out) {
- FPrintF(out, "external float array");
-}
-
-
-void ExternalDoubleArray::ExternalDoubleArrayPrint(FILE* out) {
- FPrintF(out, "external double array");
-}
-
-
-void JSObject::PrintProperties(FILE* out) {
- if (HasFastProperties()) {
- DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
- FPrintF(out, " ");
- descs->GetKey(i)->StringPrint(out);
- FPrintF(out, ": ");
- switch (descs->GetType(i)) {
- case FIELD: {
- int index = descs->GetFieldIndex(i);
- FastPropertyAt(index)->ShortPrint(out);
- FPrintF(out, " (field at offset %d)\n", index);
- break;
- }
- case CONSTANT_FUNCTION:
- descs->GetConstantFunction(i)->ShortPrint(out);
- FPrintF(out, " (constant function)\n");
- break;
- case CALLBACKS:
- descs->GetCallbacksObject(i)->ShortPrint(out);
- FPrintF(out, " (callback)\n");
- break;
- case NORMAL: // only in slow mode
- case HANDLER: // only in lookup results, not in descriptors
- case INTERCEPTOR: // only in lookup results, not in descriptors
- // There are no transitions in the descriptor array.
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
- } else {
- property_dictionary()->Print(out);
- }
-}
-
-
-void JSObject::PrintElements(FILE* out) {
- // Don't call GetElementsKind, its validation code can cause the printer to
- // fail when debugging.
- switch (map()->elements_kind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS: {
- // Print in array notation for non-sparse arrays.
- FixedArray* p = FixedArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: ", i);
- p->get(i)->ShortPrint(out);
- FPrintF(out, "\n");
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // Print in array notation for non-sparse arrays.
- if (elements()->length() > 0) {
- FixedDoubleArray* p = FixedDoubleArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- if (p->is_the_hole(i)) {
- FPrintF(out, " %d: <the hole>", i);
- } else {
- FPrintF(out, " %d: %g", i, p->get_scalar(i));
- }
- FPrintF(out, "\n");
- }
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* p = ExternalPixelArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %d\n", i, p->get_scalar(i));
- }
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- ExternalByteArray* p = ExternalByteArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- ExternalUnsignedByteArray* p =
- ExternalUnsignedByteArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- ExternalShortArray* p = ExternalShortArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- ExternalUnsignedShortArray* p =
- ExternalUnsignedShortArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_INT_ELEMENTS: {
- ExternalIntArray* p = ExternalIntArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- ExternalUnsignedIntArray* p =
- ExternalUnsignedIntArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalFloatArray* p = ExternalFloatArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %f\n", i, p->get_scalar(i));
- }
- break;
- }
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %f\n", i, p->get_scalar(i));
- }
- break;
- }
- case DICTIONARY_ELEMENTS:
- elements()->Print(out);
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
- FixedArray* p = FixedArray::cast(elements());
- FPrintF(out, " parameter map:");
- for (int i = 2; i < p->length(); i++) {
- FPrintF(out, " %d:", i - 2);
- p->get(i)->ShortPrint(out);
- }
- FPrintF(out, "\n context: ");
- p->get(0)->ShortPrint(out);
- FPrintF(out, "\n arguments: ");
- p->get(1)->ShortPrint(out);
- FPrintF(out, "\n");
- break;
- }
- }
-}
-
-
-void JSObject::PrintTransitions(FILE* out) {
- if (!map()->HasTransitionArray()) return;
- TransitionArray* transitions = map()->transitions();
- for (int i = 0; i < transitions->number_of_transitions(); i++) {
- FPrintF(out, " ");
- transitions->GetKey(i)->StringPrint(out);
- FPrintF(out, ": ");
- switch (transitions->GetTargetDetails(i).type()) {
- case FIELD: {
- FPrintF(out, " (transition to field)\n");
- break;
- }
- case CONSTANT_FUNCTION:
- FPrintF(out, " (transition to constant function)\n");
- break;
- case CALLBACKS:
- FPrintF(out, " (transition to callback)\n");
- break;
- // Values below are never in the target descriptor array.
- case NORMAL:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void JSObject::JSObjectPrint(FILE* out) {
- FPrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
- FPrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
- // Don't call GetElementsKind, its validation code can cause the printer to
- // fail when debugging.
- PrintElementsKind(out, this->map()->elements_kind());
- FPrintF(out,
- "]\n - prototype = %p\n",
- reinterpret_cast<void*>(GetPrototype()));
- FPrintF(out, " {\n");
- PrintProperties(out);
- PrintTransitions(out);
- PrintElements(out);
- FPrintF(out, " }\n");
-}
-
-
-void JSModule::JSModulePrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSModule");
- FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- FPrintF(out, " - context = ");
- context()->Print(out);
- FPrintF(out, " - scope_info = ");
- scope_info()->ShortPrint(out);
- PrintElementsKind(out, this->map()->elements_kind());
- FPrintF(out, " {\n");
- PrintProperties(out);
- PrintElements(out);
- FPrintF(out, " }\n");
-}
-
-
-static const char* TypeToString(InstanceType type) {
- switch (type) {
- case INVALID_TYPE: return "INVALID";
- case MAP_TYPE: return "MAP";
- case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
- case SYMBOL_TYPE: return "SYMBOL";
- case STRING_TYPE: return "TWO_BYTE_STRING";
- case ASCII_STRING_TYPE: return "ASCII_STRING";
- case CONS_STRING_TYPE:
- case CONS_ASCII_STRING_TYPE:
- return "CONS_STRING";
- case EXTERNAL_STRING_TYPE:
- case EXTERNAL_ASCII_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- return "EXTERNAL_STRING";
- case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- return "SHORT_EXTERNAL_STRING";
- case INTERNALIZED_STRING_TYPE: return "INTERNALIZED_STRING";
- case ASCII_INTERNALIZED_STRING_TYPE: return "ASCII_INTERNALIZED_STRING";
- case CONS_INTERNALIZED_STRING_TYPE: return "CONS_INTERNALIZED_STRING";
- case CONS_ASCII_INTERNALIZED_STRING_TYPE:
- return "CONS_ASCII_INTERNALIZED_STRING";
- case EXTERNAL_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE:
- return "EXTERNAL_INTERNALIZED_STRING";
- case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE:
- return "SHORT_EXTERNAL_INTERNALIZED_STRING";
- case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
- case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
- case FREE_SPACE_TYPE: return "FREE_SPACE";
- case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
- case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_BYTE_ARRAY";
- case EXTERNAL_SHORT_ARRAY_TYPE: return "EXTERNAL_SHORT_ARRAY";
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_SHORT_ARRAY";
- case EXTERNAL_INT_ARRAY_TYPE: return "EXTERNAL_INT_ARRAY";
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_INT_ARRAY";
- case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
- case EXTERNAL_DOUBLE_ARRAY_TYPE: return "EXTERNAL_DOUBLE_ARRAY";
- case FILLER_TYPE: return "FILLER";
- case JS_OBJECT_TYPE: return "JS_OBJECT";
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
- case ODDBALL_TYPE: return "ODDBALL";
- case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL";
- case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
- case JS_MODULE_TYPE: return "JS_MODULE";
- case JS_FUNCTION_TYPE: return "JS_FUNCTION";
- case CODE_TYPE: return "CODE";
- case JS_ARRAY_TYPE: return "JS_ARRAY";
- case JS_PROXY_TYPE: return "JS_PROXY";
- case JS_WEAK_MAP_TYPE: return "JS_WEAK_MAP";
- case JS_REGEXP_TYPE: return "JS_REGEXP";
- case JS_VALUE_TYPE: return "JS_VALUE";
- case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
- case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
- case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
- case FOREIGN_TYPE: return "FOREIGN";
- case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return "UNKNOWN";
- }
-}
-
-
-void Symbol::SymbolPrint(FILE* out) {
- HeapObject::PrintHeader(out, "Symbol");
- FPrintF(out, " - hash: %d\n", Hash());
-}
-
-
-void Map::MapPrint(FILE* out) {
- HeapObject::PrintHeader(out, "Map");
- FPrintF(out, " - type: %s\n", TypeToString(instance_type()));
- FPrintF(out, " - instance size: %d\n", instance_size());
- FPrintF(out, " - inobject properties: %d\n", inobject_properties());
- FPrintF(out, " - elements kind: ");
- PrintElementsKind(out, elements_kind());
- FPrintF(out, "\n - pre-allocated property fields: %d\n",
- pre_allocated_property_fields());
- FPrintF(out, " - unused property fields: %d\n", unused_property_fields());
- if (is_hidden_prototype()) {
- FPrintF(out, " - hidden_prototype\n");
- }
- if (has_named_interceptor()) {
- FPrintF(out, " - named_interceptor\n");
- }
- if (has_indexed_interceptor()) {
- FPrintF(out, " - indexed_interceptor\n");
- }
- if (is_undetectable()) {
- FPrintF(out, " - undetectable\n");
- }
- if (has_instance_call_handler()) {
- FPrintF(out, " - instance_call_handler\n");
- }
- if (is_access_check_needed()) {
- FPrintF(out, " - access_check_needed\n");
- }
- FPrintF(out, " - back pointer: ");
- GetBackPointer()->ShortPrint(out);
- FPrintF(out, "\n - instance descriptors %i #%i: ",
- owns_descriptors(),
- NumberOfOwnDescriptors());
- instance_descriptors()->ShortPrint(out);
- if (HasTransitionArray()) {
- FPrintF(out, "\n - transitions: ");
- transitions()->ShortPrint(out);
- }
- FPrintF(out, "\n - prototype: ");
- prototype()->ShortPrint(out);
- FPrintF(out, "\n - constructor: ");
- constructor()->ShortPrint(out);
- FPrintF(out, "\n - code cache: ");
- code_cache()->ShortPrint(out);
- FPrintF(out, "\n");
-}
-
-
-void CodeCache::CodeCachePrint(FILE* out) {
- HeapObject::PrintHeader(out, "CodeCache");
- FPrintF(out, "\n - default_cache: ");
- default_cache()->ShortPrint(out);
- FPrintF(out, "\n - normal_type_cache: ");
- normal_type_cache()->ShortPrint(out);
-}
-
-
-void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
- HeapObject::PrintHeader(out, "PolymorphicCodeCache");
- FPrintF(out, "\n - cache: ");
- cache()->ShortPrint(out);
-}
-
-
-void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "TypeFeedbackInfo");
- FPrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n",
- ic_total_count(), ic_with_type_info_count());
- FPrintF(out, " - type_feedback_cells: ");
- type_feedback_cells()->FixedArrayPrint(out);
-}
-
-
-void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(FILE* out) {
- HeapObject::PrintHeader(out, "AliasedArgumentsEntry");
- FPrintF(out, "\n - aliased_context_slot: %d", aliased_context_slot());
-}
-
-
-void FixedArray::FixedArrayPrint(FILE* out) {
- HeapObject::PrintHeader(out, "FixedArray");
- FPrintF(out, " - length: %d", length());
- for (int i = 0; i < length(); i++) {
- FPrintF(out, "\n [%d]: ", i);
- get(i)->ShortPrint(out);
- }
- FPrintF(out, "\n");
-}
-
-
-void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
- HeapObject::PrintHeader(out, "FixedDoubleArray");
- FPrintF(out, " - length: %d", length());
- for (int i = 0; i < length(); i++) {
- if (is_the_hole(i)) {
- FPrintF(out, "\n [%d]: <the hole>", i);
- } else {
- FPrintF(out, "\n [%d]: %g", i, get_scalar(i));
- }
- }
- FPrintF(out, "\n");
-}
-
-
-void JSValue::JSValuePrint(FILE* out) {
- HeapObject::PrintHeader(out, "ValueObject");
- value()->Print(out);
-}
-
-
-void JSMessageObject::JSMessageObjectPrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSMessageObject");
- FPrintF(out, " - type: ");
- type()->ShortPrint(out);
- FPrintF(out, "\n - arguments: ");
- arguments()->ShortPrint(out);
- FPrintF(out, "\n - start_position: %d", start_position());
- FPrintF(out, "\n - end_position: %d", end_position());
- FPrintF(out, "\n - script: ");
- script()->ShortPrint(out);
- FPrintF(out, "\n - stack_trace: ");
- stack_trace()->ShortPrint(out);
- FPrintF(out, "\n - stack_frames: ");
- stack_frames()->ShortPrint(out);
- FPrintF(out, "\n");
-}
-
-
-void String::StringPrint(FILE* out) {
- if (StringShape(this).IsInternalized()) {
- FPrintF(out, "#");
- } else if (StringShape(this).IsCons()) {
- FPrintF(out, "c\"");
- } else {
- FPrintF(out, "\"");
- }
-
- const char truncated_epilogue[] = "...<truncated>";
- int len = length();
- if (!FLAG_use_verbose_printer) {
- if (len > 100) {
- len = 100 - sizeof(truncated_epilogue);
- }
- }
- for (int i = 0; i < len; i++) {
- FPrintF(out, "%c", Get(i));
- }
- if (len != length()) {
- FPrintF(out, "%s", truncated_epilogue);
- }
-
- if (!StringShape(this).IsInternalized()) FPrintF(out, "\"");
-}
-
-
-// This method is only meant to be called from gdb for debugging purposes.
-// Since the string can also be in two-byte encoding, non-ASCII characters
-// will be ignored in the output.
-char* String::ToAsciiArray() {
- // Static so that subsequent calls frees previously allocated space.
- // This also means that previous results will be overwritten.
- static char* buffer = NULL;
- if (buffer != NULL) free(buffer);
- buffer = new char[length()+1];
- WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length());
- buffer[length()] = 0;
- return buffer;
-}
-
-
-static const char* const weekdays[] = {
- "???", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
-};
-
-void JSDate::JSDatePrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSDate");
- FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- FPrintF(out, " - value = ");
- value()->Print(out);
- if (!year()->IsSmi()) {
- FPrintF(out, " - time = NaN\n");
- } else {
- FPrintF(out, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
- weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
- year()->IsSmi() ? Smi::cast(year())->value() : -1,
- month()->IsSmi() ? Smi::cast(month())->value() : -1,
- day()->IsSmi() ? Smi::cast(day())->value() : -1,
- hour()->IsSmi() ? Smi::cast(hour())->value() : -1,
- min()->IsSmi() ? Smi::cast(min())->value() : -1,
- sec()->IsSmi() ? Smi::cast(sec())->value() : -1);
- }
-}
-
-
-void JSProxy::JSProxyPrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSProxy");
- FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- FPrintF(out, " - handler = ");
- handler()->Print(out);
- FPrintF(out, " - hash = ");
- hash()->Print(out);
- FPrintF(out, "\n");
-}
-
-
-void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSFunctionProxy");
- FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- FPrintF(out, " - handler = ");
- handler()->Print(out);
- FPrintF(out, " - call_trap = ");
- call_trap()->Print(out);
- FPrintF(out, " - construct_trap = ");
- construct_trap()->Print(out);
- FPrintF(out, "\n");
-}
-
-
-void JSWeakMap::JSWeakMapPrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSWeakMap");
- FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- FPrintF(out, " - table = ");
- table()->ShortPrint(out);
- FPrintF(out, "\n");
-}
-
-
-void JSFunction::JSFunctionPrint(FILE* out) {
- HeapObject::PrintHeader(out, "Function");
- FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- FPrintF(out, " - initial_map = ");
- if (has_initial_map()) {
- initial_map()->ShortPrint(out);
- }
- FPrintF(out, "\n - shared_info = ");
- shared()->ShortPrint(out);
- FPrintF(out, "\n - name = ");
- shared()->name()->Print(out);
- FPrintF(out, "\n - context = ");
- unchecked_context()->ShortPrint(out);
- FPrintF(out, "\n - literals = ");
- literals()->ShortPrint(out);
- FPrintF(out, "\n - code = ");
- code()->ShortPrint(out);
- FPrintF(out, "\n");
-
- PrintProperties(out);
- PrintElements(out);
-
- FPrintF(out, "\n");
-}
-
-
-void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "SharedFunctionInfo");
- FPrintF(out, " - name: ");
- name()->ShortPrint(out);
- FPrintF(out, "\n - expected_nof_properties: %d", expected_nof_properties());
- FPrintF(out, "\n - instance class name = ");
- instance_class_name()->Print(out);
- FPrintF(out, "\n - code = ");
- code()->ShortPrint(out);
- if (HasSourceCode()) {
- FPrintF(out, "\n - source code = ");
- String* source = String::cast(Script::cast(script())->source());
- int start = start_position();
- int length = end_position() - start;
- SmartArrayPointer<char> source_string =
- source->ToCString(DISALLOW_NULLS,
- FAST_STRING_TRAVERSAL,
- start, length, NULL);
- FPrintF(out, "%s", *source_string);
- }
- // Script files are often large, hard to read.
- // FPrintF(out, "\n - script =");
- // script()->Print(out);
- FPrintF(out, "\n - function token position = %d", function_token_position());
- FPrintF(out, "\n - start position = %d", start_position());
- FPrintF(out, "\n - end position = %d", end_position());
- FPrintF(out, "\n - is expression = %d", is_expression());
- FPrintF(out, "\n - debug info = ");
- debug_info()->ShortPrint(out);
- FPrintF(out, "\n - length = %d", length());
- FPrintF(out, "\n - has_only_simple_this_property_assignments = %d",
- has_only_simple_this_property_assignments());
- FPrintF(out, "\n - this_property_assignments = ");
- this_property_assignments()->ShortPrint(out);
- FPrintF(out, "\n");
-}
-
-
-void JSGlobalProxy::JSGlobalProxyPrint(FILE* out) {
- FPrintF(out, "global_proxy ");
- JSObjectPrint(out);
- FPrintF(out, "native context : ");
- native_context()->ShortPrint(out);
- FPrintF(out, "\n");
-}
-
-
-void JSGlobalObject::JSGlobalObjectPrint(FILE* out) {
- FPrintF(out, "global ");
- JSObjectPrint(out);
- FPrintF(out, "native context : ");
- native_context()->ShortPrint(out);
- FPrintF(out, "\n");
-}
-
-
-void JSBuiltinsObject::JSBuiltinsObjectPrint(FILE* out) {
- FPrintF(out, "builtins ");
- JSObjectPrint(out);
-}
-
-
-void JSGlobalPropertyCell::JSGlobalPropertyCellPrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSGlobalPropertyCell");
-}
-
-
-void Code::CodePrint(FILE* out) {
- HeapObject::PrintHeader(out, "Code");
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_use_verbose_printer) {
- Disassemble(NULL, out);
- }
-#endif
-}
-
-
-void Foreign::ForeignPrint(FILE* out) {
- FPrintF(out, "foreign address : %p", foreign_address());
-}
-
-
-void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "ExecutableAccessorInfo");
- FPrintF(out, "\n - name: ");
- name()->ShortPrint(out);
- FPrintF(out, "\n - flag: ");
- flag()->ShortPrint(out);
- FPrintF(out, "\n - getter: ");
- getter()->ShortPrint(out);
- FPrintF(out, "\n - setter: ");
- setter()->ShortPrint(out);
- FPrintF(out, "\n - data: ");
- data()->ShortPrint(out);
-}
-
-
-void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "DeclaredAccessorInfo");
- FPrintF(out, "\n - name: ");
- name()->ShortPrint(out);
- FPrintF(out, "\n - flag: ");
- flag()->ShortPrint(out);
- FPrintF(out, "\n - descriptor: ");
- descriptor()->ShortPrint(out);
-}
-
-
-void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(FILE* out) {
- HeapObject::PrintHeader(out, "DeclaredAccessorDescriptor");
- FPrintF(out, "\n - internal field: ");
- internal_field()->ShortPrint(out);
-}
-
-
-void AccessorPair::AccessorPairPrint(FILE* out) {
- HeapObject::PrintHeader(out, "AccessorPair");
- FPrintF(out, "\n - getter: ");
- getter()->ShortPrint(out);
- FPrintF(out, "\n - setter: ");
- setter()->ShortPrint(out);
-}
-
-
-void AccessCheckInfo::AccessCheckInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "AccessCheckInfo");
- FPrintF(out, "\n - named_callback: ");
- named_callback()->ShortPrint(out);
- FPrintF(out, "\n - indexed_callback: ");
- indexed_callback()->ShortPrint(out);
- FPrintF(out, "\n - data: ");
- data()->ShortPrint(out);
-}
-
-
-void InterceptorInfo::InterceptorInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "InterceptorInfo");
- FPrintF(out, "\n - getter: ");
- getter()->ShortPrint(out);
- FPrintF(out, "\n - setter: ");
- setter()->ShortPrint(out);
- FPrintF(out, "\n - query: ");
- query()->ShortPrint(out);
- FPrintF(out, "\n - deleter: ");
- deleter()->ShortPrint(out);
- FPrintF(out, "\n - enumerator: ");
- enumerator()->ShortPrint(out);
- FPrintF(out, "\n - data: ");
- data()->ShortPrint(out);
-}
-
-
-void CallHandlerInfo::CallHandlerInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "CallHandlerInfo");
- FPrintF(out, "\n - callback: ");
- callback()->ShortPrint(out);
- FPrintF(out, "\n - data: ");
- data()->ShortPrint(out);
- FPrintF(out, "\n - call_stub_cache: ");
-}
-
-
-void FunctionTemplateInfo::FunctionTemplateInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "FunctionTemplateInfo");
- FPrintF(out, "\n - class name: ");
- class_name()->ShortPrint(out);
- FPrintF(out, "\n - tag: ");
- tag()->ShortPrint(out);
- FPrintF(out, "\n - property_list: ");
- property_list()->ShortPrint(out);
- FPrintF(out, "\n - serial_number: ");
- serial_number()->ShortPrint(out);
- FPrintF(out, "\n - call_code: ");
- call_code()->ShortPrint(out);
- FPrintF(out, "\n - property_accessors: ");
- property_accessors()->ShortPrint(out);
- FPrintF(out, "\n - prototype_template: ");
- prototype_template()->ShortPrint(out);
- FPrintF(out, "\n - parent_template: ");
- parent_template()->ShortPrint(out);
- FPrintF(out, "\n - named_property_handler: ");
- named_property_handler()->ShortPrint(out);
- FPrintF(out, "\n - indexed_property_handler: ");
- indexed_property_handler()->ShortPrint(out);
- FPrintF(out, "\n - instance_template: ");
- instance_template()->ShortPrint(out);
- FPrintF(out, "\n - signature: ");
- signature()->ShortPrint(out);
- FPrintF(out, "\n - access_check_info: ");
- access_check_info()->ShortPrint(out);
- FPrintF(out, "\n - hidden_prototype: %s",
- hidden_prototype() ? "true" : "false");
- FPrintF(out, "\n - undetectable: %s", undetectable() ? "true" : "false");
- FPrintF(out, "\n - need_access_check: %s",
- needs_access_check() ? "true" : "false");
-}
-
-
-void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "ObjectTemplateInfo");
- FPrintF(out, " - tag: ");
- tag()->ShortPrint(out);
- FPrintF(out, "\n - property_list: ");
- property_list()->ShortPrint(out);
- FPrintF(out, "\n - constructor: ");
- constructor()->ShortPrint(out);
- FPrintF(out, "\n - internal_field_count: ");
- internal_field_count()->ShortPrint(out);
- FPrintF(out, "\n");
-}
-
-
-void SignatureInfo::SignatureInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "SignatureInfo");
- FPrintF(out, "\n - receiver: ");
- receiver()->ShortPrint(out);
- FPrintF(out, "\n - args: ");
- args()->ShortPrint(out);
-}
-
-
-void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "TypeSwitchInfo");
- FPrintF(out, "\n - types: ");
- types()->ShortPrint(out);
-}
-
-
-void AllocationSiteInfo::AllocationSiteInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "AllocationSiteInfo");
- FPrintF(out, " - payload: ");
- if (payload()->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload());
- Object* cell_contents = cell->value();
- if (cell_contents->IsSmi()) {
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(cell_contents)->value());
- FPrintF(out, "Array allocation with ElementsKind ");
- PrintElementsKind(out, kind);
- FPrintF(out, "\n");
- return;
- }
- } else if (payload()->IsJSArray()) {
- FPrintF(out, "Array literal ");
- payload()->ShortPrint(out);
- FPrintF(out, "\n");
- return;
- }
-
- FPrintF(out, "unknown payload ");
- payload()->ShortPrint(out);
- FPrintF(out, "\n");
-}
-
-
-void Script::ScriptPrint(FILE* out) {
- HeapObject::PrintHeader(out, "Script");
- FPrintF(out, "\n - source: ");
- source()->ShortPrint(out);
- FPrintF(out, "\n - name: ");
- name()->ShortPrint(out);
- FPrintF(out, "\n - line_offset: ");
- line_offset()->ShortPrint(out);
- FPrintF(out, "\n - column_offset: ");
- column_offset()->ShortPrint(out);
- FPrintF(out, "\n - type: ");
- type()->ShortPrint(out);
- FPrintF(out, "\n - id: ");
- id()->ShortPrint(out);
- FPrintF(out, "\n - data: ");
- data()->ShortPrint(out);
- FPrintF(out, "\n - context data: ");
- context_data()->ShortPrint(out);
- FPrintF(out, "\n - wrapper: ");
- wrapper()->ShortPrint(out);
- FPrintF(out, "\n - compilation type: ");
- compilation_type()->ShortPrint(out);
- FPrintF(out, "\n - line ends: ");
- line_ends()->ShortPrint(out);
- FPrintF(out, "\n - eval from shared: ");
- eval_from_shared()->ShortPrint(out);
- FPrintF(out, "\n - eval from instructions offset: ");
- eval_from_instructions_offset()->ShortPrint(out);
- FPrintF(out, "\n");
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void DebugInfo::DebugInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "DebugInfo");
- FPrintF(out, "\n - shared: ");
- shared()->ShortPrint(out);
- FPrintF(out, "\n - original_code: ");
- original_code()->ShortPrint(out);
- FPrintF(out, "\n - code: ");
- code()->ShortPrint(out);
- FPrintF(out, "\n - break_points: ");
- break_points()->Print(out);
-}
-
-
-void BreakPointInfo::BreakPointInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "BreakPointInfo");
- FPrintF(out, "\n - code_position: %d", code_position()->value());
- FPrintF(out, "\n - source_position: %d", source_position()->value());
- FPrintF(out, "\n - statement_position: %d", statement_position()->value());
- FPrintF(out, "\n - break_point_objects: ");
- break_point_objects()->ShortPrint(out);
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void DescriptorArray::PrintDescriptors(FILE* out) {
- FPrintF(out, "Descriptor array %d\n", number_of_descriptors());
- for (int i = 0; i < number_of_descriptors(); i++) {
- FPrintF(out, " %d: ", i);
- Descriptor desc;
- Get(i, &desc);
- desc.Print(out);
- }
- FPrintF(out, "\n");
-}
-
-
-void TransitionArray::PrintTransitions(FILE* out) {
- FPrintF(out, "Transition array %d\n", number_of_transitions());
- for (int i = 0; i < number_of_transitions(); i++) {
- FPrintF(out, " %d: ", i);
- GetKey(i)->StringPrint(out);
- FPrintF(out, ": ");
- switch (GetTargetDetails(i).type()) {
- case FIELD: {
- FPrintF(out, " (transition to field)\n");
- break;
- }
- case CONSTANT_FUNCTION:
- FPrintF(out, " (transition to constant function)\n");
- break;
- case CALLBACKS:
- FPrintF(out, " (transition to callback)\n");
- break;
- // Values below are never in the target descriptor array.
- case NORMAL:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
- FPrintF(out, "\n");
-}
-
-
-#endif // OBJECT_PRINT
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/objects-visiting-inl.h b/src/3rdparty/v8/src/objects-visiting-inl.h
deleted file mode 100644
index beb07b5..0000000
--- a/src/3rdparty/v8/src/objects-visiting-inl.h
+++ /dev/null
@@ -1,725 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_OBJECTS_VISITING_INL_H_
-#define V8_OBJECTS_VISITING_INL_H_
-
-
-namespace v8 {
-namespace internal {
-
-template<typename StaticVisitor>
-void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticVisitor,
- SlicedString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
-
- table_.Register(kVisitNativeContext,
- &FixedBodyVisitor<StaticVisitor,
- Context::ScavengeBodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitByteArray, &VisitByteArray);
-
- table_.Register(kVisitSharedFunctionInfo,
- &FixedBodyVisitor<StaticVisitor,
- SharedFunctionInfo::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
-
- table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
- table_.Register(kVisitJSFunction, &VisitJSFunction);
-
- table_.Register(kVisitFreeSpace, &VisitFreeSpace);
-
- table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
-
- table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
-
- table_.template RegisterSpecializations<DataObjectVisitor,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
-
- table_.template RegisterSpecializations<JSObjectVisitor,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
- table_.template RegisterSpecializations<StructVisitor,
- kVisitStruct,
- kVisitStructGeneric>();
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticVisitor,
- SlicedString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
-
- table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitNativeContext, &VisitNativeContext);
-
- table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitJSWeakMap, &StaticVisitor::VisitJSWeakMap);
-
- table_.Register(kVisitOddball,
- &FixedBodyVisitor<StaticVisitor,
- Oddball::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitMap, &VisitMap);
-
- table_.Register(kVisitCode, &VisitCode);
-
- table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
-
- table_.Register(kVisitJSFunction, &VisitJSFunction);
-
- // Registration for kVisitJSRegExp is done by StaticVisitor.
-
- table_.Register(kVisitPropertyCell,
- &FixedBodyVisitor<StaticVisitor,
- JSGlobalPropertyCell::BodyDescriptor,
- void>::Visit);
-
- table_.template RegisterSpecializations<DataObjectVisitor,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
-
- table_.template RegisterSpecializations<JSObjectVisitor,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
-
- table_.template RegisterSpecializations<StructObjectVisitor,
- kVisitStruct,
- kVisitStructGeneric>();
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
- Heap* heap, Address entry_address) {
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code);
- StaticVisitor::MarkObject(heap, code);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
- Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- ASSERT(!rinfo->target_object()->IsConsString());
- HeapObject* object = HeapObject::cast(rinfo->target_object());
- if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
- rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
- !object->IsMap() || !Map::cast(object)->CanTransition()) {
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
- StaticVisitor::MarkObject(heap, object);
- }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitGlobalPropertyCell(
- Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- JSGlobalPropertyCell* cell = rinfo->target_cell();
- StaticVisitor::MarkObject(heap, cell);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(
- Heap* heap, RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- StaticVisitor::MarkObject(heap, target);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
- Heap* heap, RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- // Monomorphic ICs are preserved when possible, but need to be flushed
- // when they might be keeping a Context alive, or when the heap is about
- // to be serialized.
- if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
- && (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
- target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
- Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
- IC::Clear(rinfo->pc());
- target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- }
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- StaticVisitor::MarkObject(heap, target);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
- Heap* heap, RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Code* target = rinfo->code_age_stub();
- ASSERT(target != NULL);
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- StaticVisitor::MarkObject(heap, target);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
- Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticVisitor,
- Context::MarkCompactBodyDescriptor,
- void>::Visit(map, object);
-
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- for (int idx = Context::FIRST_WEAK_SLOT;
- idx < Context::NATIVE_CONTEXT_SLOTS;
- ++idx) {
- Object** slot =
- HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
- collector->RecordSlot(slot, slot, *slot);
- }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitMap(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- Map* map_object = Map::cast(object);
-
- // Clears the cache of ICs related to this map.
- if (FLAG_cleanup_code_caches_at_gc) {
- map_object->ClearCodeCache(heap);
- }
-
- // When map collection is enabled we have to mark through map's transitions
- // and back pointers in a special way to make these links weak.
- if (FLAG_collect_maps && map_object->CanTransition()) {
- MarkMapContents(heap, map_object);
- } else {
- StaticVisitor::VisitPointers(heap,
- HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
- }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCode(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- Code* code = Code::cast(object);
- if (FLAG_cleanup_code_caches_at_gc) {
- code->ClearTypeFeedbackCells(heap);
- }
- if (FLAG_age_code && !Serializer::enabled()) {
- code->MakeOlder(heap->mark_compact_collector()->marking_parity());
- }
- code->CodeIterateBody<StaticVisitor>(heap);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
- if (shared->ic_age() != heap->global_ic_age()) {
- shared->ResetForNewContext(heap->global_ic_age());
- }
- if (FLAG_cache_optimized_code) {
- // Flush optimized code map on major GC.
- // TODO(mstarzinger): We may experiment with rebuilding it or with
- // retaining entries which should survive as we iterate through
- // optimized functions anyway.
- shared->ClearOptimizedCodeMap();
- }
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- if (IsFlushable(heap, shared)) {
- // This function's code looks flushable. But we have to postpone
- // the decision until we see all functions that point to the same
- // SharedFunctionInfo because some of them might be optimized.
- // That would also make the non-optimized version of the code
- // non-flushable, because it is required for bailing out from
- // optimized code.
- collector->code_flusher()->AddCandidate(shared);
- // Treat the reference to the code object weakly.
- VisitSharedFunctionInfoWeakCode(heap, object);
- return;
- }
- }
- VisitSharedFunctionInfoStrongCode(heap, object);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- JSFunction* function = JSFunction::cast(object);
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- if (IsFlushable(heap, function)) {
- // This function's code looks flushable. But we have to postpone
- // the decision until we see all functions that point to the same
- // SharedFunctionInfo because some of them might be optimized.
- // That would also make the non-optimized version of the code
- // non-flushable, because it is required for bailing out from
- // optimized code.
- collector->code_flusher()->AddCandidate(function);
- // Visit shared function info immediately to avoid double checking
- // of its flushability later. This is just an optimization because
- // the shared function info would eventually be visited.
- SharedFunctionInfo* shared = function->unchecked_shared();
- if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
- StaticVisitor::MarkObject(heap, shared->map());
- VisitSharedFunctionInfoWeakCode(heap, shared);
- }
- // Treat the reference to the code object weakly.
- VisitJSFunctionWeakCode(heap, object);
- return;
- } else {
- // Visit all unoptimized code objects to prevent flushing them.
- StaticVisitor::MarkObject(heap, function->shared()->code());
- if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
- MarkInlinedFunctionsCode(heap, function->code());
- }
- }
- }
- VisitJSFunctionStrongCode(heap, object);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
- Map* map, HeapObject* object) {
- int last_property_offset =
- JSRegExp::kSize + kPointerSize * map->inobject_properties();
- StaticVisitor::VisitPointers(map->GetHeap(),
- HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
- HeapObject::RawField(object, last_property_offset));
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
- Heap* heap, Map* map) {
- // Make sure that the back pointer stored either in the map itself or
- // inside its transitions array is marked. Skip recording the back
- // pointer slot since map space is not compacted.
- StaticVisitor::MarkObject(heap, HeapObject::cast(map->GetBackPointer()));
-
- // Treat pointers in the transitions array as weak and also mark that
- // array to prevent visiting it later. Skip recording the transition
- // array slot, since it will be implicitly recorded when the pointer
- // fields of this map are visited.
- TransitionArray* transitions = map->unchecked_transition_array();
- if (transitions->IsTransitionArray()) {
- MarkTransitionArray(heap, transitions);
- } else {
- // Already marked by marking map->GetBackPointer() above.
- ASSERT(transitions->IsMap() || transitions->IsUndefined());
- }
-
- // Since descriptor arrays are potentially shared, ensure that only the
- // descriptors that appeared for this map are marked. The first time a
- // non-empty descriptor array is marked, its header is also visited. The slot
- // holding the descriptor array will be implicitly recorded when the pointer
- // fields of this map are visited.
- DescriptorArray* descriptors = map->instance_descriptors();
- if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
- descriptors->length() > 0) {
- StaticVisitor::VisitPointers(heap,
- descriptors->GetFirstElementAddress(),
- descriptors->GetDescriptorEndSlot(0));
- }
- int start = 0;
- int end = map->NumberOfOwnDescriptors();
- Object* back_pointer = map->GetBackPointer();
- if (!back_pointer->IsUndefined()) {
- Map* parent_map = Map::cast(back_pointer);
- if (descriptors == parent_map->instance_descriptors()) {
- start = parent_map->NumberOfOwnDescriptors();
- }
- }
- if (start < end) {
- StaticVisitor::VisitPointers(heap,
- descriptors->GetDescriptorStartSlot(start),
- descriptors->GetDescriptorEndSlot(end));
- }
-
- // Mark prototype dependent codes array but do not push it onto marking
- // stack, this will make references from it weak. We will clean dead
- // codes when we iterate over maps in ClearNonLiveTransitions.
- Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset);
- HeapObject* obj = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
- StaticVisitor::MarkObjectWithoutPush(heap, obj);
-
- // Mark the pointer fields of the Map. Since the transitions array has
- // been marked already, it is fine that one of these fields contains a
- // pointer to it.
- StaticVisitor::VisitPointers(heap,
- HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
- Heap* heap, TransitionArray* transitions) {
- if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
-
- // Simple transitions do not have keys nor prototype transitions.
- if (transitions->IsSimpleTransition()) return;
-
- if (transitions->HasPrototypeTransitions()) {
- // Mark prototype transitions array but do not push it onto marking
- // stack, this will make references from it weak. We will clean dead
- // prototype transitions in ClearNonLiveTransitions.
- Object** slot = transitions->GetPrototypeTransitionsSlot();
- HeapObject* obj = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
- StaticVisitor::MarkObjectWithoutPush(heap, obj);
- }
-
- for (int i = 0; i < transitions->number_of_transitions(); ++i) {
- StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i));
- }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(
- Heap* heap, Code* code) {
- // For optimized functions we should retain both non-optimized version
- // of its code and non-optimized version of all inlined functions.
- // This is required to support bailing out from inlined code.
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- FixedArray* literals = data->LiteralArray();
- for (int i = 0, count = data->InlinedFunctionCount()->value();
- i < count;
- i++) {
- JSFunction* inlined = JSFunction::cast(literals->get(i));
- StaticVisitor::MarkObject(heap, inlined->shared()->code());
- }
-}
-
-
-inline static bool IsValidNonBuiltinContext(Object* context) {
- return context->IsContext() &&
- !Context::cast(context)->global_object()->IsJSBuiltinsObject();
-}
-
-
-inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
- Object* undefined = heap->undefined_value();
- return (info->script() != undefined) &&
- (reinterpret_cast<Script*>(info->script())->source() != undefined);
-}
-
-
-template<typename StaticVisitor>
-bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
- Heap* heap, JSFunction* function) {
- SharedFunctionInfo* shared_info = function->unchecked_shared();
-
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- MarkBit code_mark = Marking::MarkBitFrom(function->code());
- if (code_mark.Get()) {
- if (!FLAG_age_code) {
- if (!Marking::MarkBitFrom(shared_info).Get()) {
- shared_info->set_code_age(0);
- }
- }
- return false;
- }
-
- // The function must have a valid context and not be a builtin.
- if (!IsValidNonBuiltinContext(function->unchecked_context())) {
- return false;
- }
-
- // We do not (yet) flush code for optimized functions.
- if (function->code() != shared_info->code()) {
- return false;
- }
-
- // Check age of optimized code.
- if (FLAG_age_code && !function->code()->IsOld()) {
- return false;
- }
-
- return IsFlushable(heap, shared_info);
-}
-
-
-template<typename StaticVisitor>
-bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
- Heap* heap, SharedFunctionInfo* shared_info) {
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- MarkBit code_mark = Marking::MarkBitFrom(shared_info->code());
- if (code_mark.Get()) {
- return false;
- }
-
- // The function must be compiled and have the source code available,
- // to be able to recompile it in case we need the function again.
- if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
- return false;
- }
-
- // We never flush code for API functions.
- Object* function_data = shared_info->function_data();
- if (function_data->IsFunctionTemplateInfo()) {
- return false;
- }
-
- // Only flush code for functions.
- if (shared_info->code()->kind() != Code::FUNCTION) {
- return false;
- }
-
- // Function must be lazy compilable.
- if (!shared_info->allows_lazy_compilation()) {
- return false;
- }
-
- // If this is a full script wrapped in a function we do no flush the code.
- if (shared_info->is_toplevel()) {
- return false;
- }
-
- if (FLAG_age_code) {
- return shared_info->code()->IsOld();
- } else {
- // How many collections newly compiled code object will survive before being
- // flushed.
- static const int kCodeAgeThreshold = 5;
-
- // Age this shared function info.
- if (shared_info->code_age() < kCodeAgeThreshold) {
- shared_info->set_code_age(shared_info->code_age() + 1);
- return false;
- }
- return true;
- }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
- Heap* heap, HeapObject* object) {
- StaticVisitor::BeforeVisitingSharedFunctionInfo(object);
- Object** start_slot =
- HeapObject::RawField(object,
- SharedFunctionInfo::BodyDescriptor::kStartOffset);
- Object** end_slot =
- HeapObject::RawField(object,
- SharedFunctionInfo::BodyDescriptor::kEndOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
- Heap* heap, HeapObject* object) {
- StaticVisitor::BeforeVisitingSharedFunctionInfo(object);
- Object** name_slot =
- HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
- StaticVisitor::VisitPointer(heap, name_slot);
-
- // Skip visiting kCodeOffset as it is treated weakly here.
- STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
- SharedFunctionInfo::kCodeOffset);
- STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize ==
- SharedFunctionInfo::kOptimizedCodeMapOffset);
-
- Object** start_slot =
- HeapObject::RawField(object,
- SharedFunctionInfo::kOptimizedCodeMapOffset);
- Object** end_slot =
- HeapObject::RawField(object,
- SharedFunctionInfo::BodyDescriptor::kEndOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
- Heap* heap, HeapObject* object) {
- Object** start_slot =
- HeapObject::RawField(object, JSFunction::kPropertiesOffset);
- Object** end_slot =
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-
- VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
- STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
- JSFunction::kPrototypeOrInitialMapOffset);
-
- start_slot =
- HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
- end_slot =
- HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
- Heap* heap, HeapObject* object) {
- Object** start_slot =
- HeapObject::RawField(object, JSFunction::kPropertiesOffset);
- Object** end_slot =
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-
- // Skip visiting kCodeEntryOffset as it is treated weakly here.
- STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
- JSFunction::kPrototypeOrInitialMapOffset);
-
- start_slot =
- HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
- end_slot =
- HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-}
-
-
-void Code::CodeIterateBody(ObjectVisitor* v) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
- // There are two places where we iterate code bodies: here and the
- // templated CodeIterateBody (below). They should be kept in sync.
- IteratePointer(v, kRelocationInfoOffset);
- IteratePointer(v, kHandlerTableOffset);
- IteratePointer(v, kDeoptimizationDataOffset);
- IteratePointer(v, kTypeFeedbackInfoOffset);
-
- RelocIterator it(this, mode_mask);
- for (; !it.done(); it.next()) {
- it.rinfo()->Visit(v);
- }
-}
-
-
-template<typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
- // There are two places where we iterate code bodies: here and the non-
- // templated CodeIterateBody (above). They should be kept in sync.
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
-
- RelocIterator it(this, mode_mask);
- for (; !it.done(); it.next()) {
- it.rinfo()->template Visit<StaticVisitor>(heap);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_OBJECTS_VISITING_INL_H_
diff --git a/src/3rdparty/v8/src/objects-visiting.cc b/src/3rdparty/v8/src/objects-visiting.cc
deleted file mode 100644
index 088f5eb..0000000
--- a/src/3rdparty/v8/src/objects-visiting.cc
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ic-inl.h"
-#include "objects-visiting.h"
-
-namespace v8 {
-namespace internal {
-
-
-static inline bool IsShortcutCandidate(int type) {
- return ((type & kShortcutTypeMask) == kShortcutTypeTag);
-}
-
-
-StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
- int instance_type,
- int instance_size) {
- if (instance_type < FIRST_NONSTRING_TYPE) {
- switch (instance_type & kStringRepresentationMask) {
- case kSeqStringTag:
- if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
- return kVisitSeqOneByteString;
- } else {
- return kVisitSeqTwoByteString;
- }
-
- case kConsStringTag:
- if (IsShortcutCandidate(instance_type)) {
- return kVisitShortcutCandidate;
- } else {
- return kVisitConsString;
- }
-
- case kSlicedStringTag:
- return kVisitSlicedString;
-
- case kExternalStringTag:
- return GetVisitorIdForSize(kVisitDataObject,
- kVisitDataObjectGeneric,
- instance_size);
- }
- UNREACHABLE();
- }
-
- switch (instance_type) {
- case BYTE_ARRAY_TYPE:
- return kVisitByteArray;
-
- case FREE_SPACE_TYPE:
- return kVisitFreeSpace;
-
- case FIXED_ARRAY_TYPE:
- return kVisitFixedArray;
-
- case FIXED_DOUBLE_ARRAY_TYPE:
- return kVisitFixedDoubleArray;
-
- case ODDBALL_TYPE:
- return kVisitOddball;
-
- case MAP_TYPE:
- return kVisitMap;
-
- case CODE_TYPE:
- return kVisitCode;
-
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- return kVisitPropertyCell;
-
- case JS_SET_TYPE:
- return GetVisitorIdForSize(kVisitStruct,
- kVisitStructGeneric,
- JSSet::kSize);
-
- case JS_MAP_TYPE:
- return GetVisitorIdForSize(kVisitStruct,
- kVisitStructGeneric,
- JSMap::kSize);
-
- case JS_WEAK_MAP_TYPE:
- return kVisitJSWeakMap;
-
- case JS_REGEXP_TYPE:
- return kVisitJSRegExp;
-
- case SHARED_FUNCTION_INFO_TYPE:
- return kVisitSharedFunctionInfo;
-
- case JS_PROXY_TYPE:
- return GetVisitorIdForSize(kVisitStruct,
- kVisitStructGeneric,
- JSProxy::kSize);
-
- case JS_FUNCTION_PROXY_TYPE:
- return GetVisitorIdForSize(kVisitStruct,
- kVisitStructGeneric,
- JSFunctionProxy::kSize);
-
- case FOREIGN_TYPE:
- return GetVisitorIdForSize(kVisitDataObject,
- kVisitDataObjectGeneric,
- Foreign::kSize);
-
- case SYMBOL_TYPE:
- return GetVisitorIdForSize(kVisitDataObject,
- kVisitDataObjectGeneric,
- Symbol::kSize);
-
- case FILLER_TYPE:
- return kVisitDataObjectGeneric;
-
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_MODULE_TYPE:
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- return GetVisitorIdForSize(kVisitJSObject,
- kVisitJSObjectGeneric,
- instance_size);
-
- case JS_FUNCTION_TYPE:
- return kVisitJSFunction;
-
- case HEAP_NUMBER_TYPE:
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- case EXTERNAL_BYTE_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- case EXTERNAL_SHORT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- case EXTERNAL_INT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return GetVisitorIdForSize(kVisitDataObject,
- kVisitDataObjectGeneric,
- instance_size);
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- return GetVisitorIdForSize(kVisitStruct,
- kVisitStructGeneric,
- instance_size);
-
- default:
- UNREACHABLE();
- return kVisitorIdCount;
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/objects-visiting.h b/src/3rdparty/v8/src/objects-visiting.h
deleted file mode 100644
index 9b2422c..0000000
--- a/src/3rdparty/v8/src/objects-visiting.h
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_OBJECTS_VISITING_H_
-#define V8_OBJECTS_VISITING_H_
-
-#include "allocation.h"
-
-// This file provides base classes and auxiliary methods for defining
-// static object visitors used during GC.
-// Visiting HeapObject body with a normal ObjectVisitor requires performing
-// two switches on object's instance type to determine object size and layout
-// and one or more virtual method calls on visitor itself.
-// Static visitor is different: it provides a dispatch table which contains
-// pointers to specialized visit functions. Each map has the visitor_id
-// field which contains an index of specialized visitor to use.
-
-namespace v8 {
-namespace internal {
-
-
-// Base class for all static visitors.
-class StaticVisitorBase : public AllStatic {
- public:
-#define VISITOR_ID_LIST(V) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(ShortcutCandidate) \
- V(ByteArray) \
- V(FreeSpace) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(NativeContext) \
- V(DataObject2) \
- V(DataObject3) \
- V(DataObject4) \
- V(DataObject5) \
- V(DataObject6) \
- V(DataObject7) \
- V(DataObject8) \
- V(DataObject9) \
- V(DataObjectGeneric) \
- V(JSObject2) \
- V(JSObject3) \
- V(JSObject4) \
- V(JSObject5) \
- V(JSObject6) \
- V(JSObject7) \
- V(JSObject8) \
- V(JSObject9) \
- V(JSObjectGeneric) \
- V(Struct2) \
- V(Struct3) \
- V(Struct4) \
- V(Struct5) \
- V(Struct6) \
- V(Struct7) \
- V(Struct8) \
- V(Struct9) \
- V(StructGeneric) \
- V(ConsString) \
- V(SlicedString) \
- V(Oddball) \
- V(Code) \
- V(Map) \
- V(PropertyCell) \
- V(SharedFunctionInfo) \
- V(JSFunction) \
- V(JSWeakMap) \
- V(JSRegExp)
-
- // For data objects, JS objects and structs along with generic visitor which
- // can visit object of any size we provide visitors specialized by
- // object size in words.
- // Ids of specialized visitors are declared in a linear order (without
- // holes) starting from the id of visitor specialized for 2 words objects
- // (base visitor id) and ending with the id of generic visitor.
- // Method GetVisitorIdForSize depends on this ordering to calculate visitor
- // id of specialized visitor from given instance size, base visitor id and
- // generic visitor's id.
- enum VisitorId {
-#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
- VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
-#undef VISITOR_ID_ENUM_DECL
- kVisitorIdCount,
- kVisitDataObject = kVisitDataObject2,
- kVisitJSObject = kVisitJSObject2,
- kVisitStruct = kVisitStruct2,
- kMinObjectSizeInWords = 2
- };
-
- // Visitor ID should fit in one byte.
- STATIC_ASSERT(kVisitorIdCount <= 256);
-
- // Determine which specialized visitor should be used for given instance type
- // and instance type.
- static VisitorId GetVisitorId(int instance_type, int instance_size);
-
- static VisitorId GetVisitorId(Map* map) {
- return GetVisitorId(map->instance_type(), map->instance_size());
- }
-
- // For visitors that allow specialization by size calculate VisitorId based
- // on size, base visitor id and generic visitor id.
- static VisitorId GetVisitorIdForSize(VisitorId base,
- VisitorId generic,
- int object_size) {
- ASSERT((base == kVisitDataObject) ||
- (base == kVisitStruct) ||
- (base == kVisitJSObject));
- ASSERT(IsAligned(object_size, kPointerSize));
- ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
- ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
-
- const VisitorId specialization = static_cast<VisitorId>(
- base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
-
- return Min(specialization, generic);
- }
-};
-
-
-template<typename Callback>
-class VisitorDispatchTable {
- public:
- void CopyFrom(VisitorDispatchTable* other) {
- // We are not using memcpy to guarantee that during update
- // every element of callbacks_ array will remain correct
- // pointer (memcpy might be implemented as a byte copying loop).
- for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
- NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
- }
- }
-
- inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
- return reinterpret_cast<Callback>(callbacks_[id]);
- }
-
- inline Callback GetVisitor(Map* map) {
- return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
- }
-
- void Register(StaticVisitorBase::VisitorId id, Callback callback) {
- ASSERT(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned.
- callbacks_[id] = reinterpret_cast<AtomicWord>(callback);
- }
-
- template<typename Visitor,
- StaticVisitorBase::VisitorId base,
- StaticVisitorBase::VisitorId generic,
- int object_size_in_words>
- void RegisterSpecialization() {
- static const int size = object_size_in_words * kPointerSize;
- Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size),
- &Visitor::template VisitSpecialized<size>);
- }
-
-
- template<typename Visitor,
- StaticVisitorBase::VisitorId base,
- StaticVisitorBase::VisitorId generic>
- void RegisterSpecializations() {
- STATIC_ASSERT(
- (generic - base + StaticVisitorBase::kMinObjectSizeInWords) == 10);
- RegisterSpecialization<Visitor, base, generic, 2>();
- RegisterSpecialization<Visitor, base, generic, 3>();
- RegisterSpecialization<Visitor, base, generic, 4>();
- RegisterSpecialization<Visitor, base, generic, 5>();
- RegisterSpecialization<Visitor, base, generic, 6>();
- RegisterSpecialization<Visitor, base, generic, 7>();
- RegisterSpecialization<Visitor, base, generic, 8>();
- RegisterSpecialization<Visitor, base, generic, 9>();
- Register(generic, &Visitor::Visit);
- }
-
- private:
- AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
-};
-
-
-template<typename StaticVisitor>
-class BodyVisitorBase : public AllStatic {
- public:
- INLINE(static void IteratePointers(Heap* heap,
- HeapObject* object,
- int start_offset,
- int end_offset)) {
- Object** start_slot = reinterpret_cast<Object**>(object->address() +
- start_offset);
- Object** end_slot = reinterpret_cast<Object**>(object->address() +
- end_offset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
- }
-};
-
-
-template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
- public:
- INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
- int object_size = BodyDescriptor::SizeOf(map, object);
- BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->GetHeap(),
- object,
- BodyDescriptor::kStartOffset,
- object_size);
- return static_cast<ReturnType>(object_size);
- }
-
- template<int object_size>
- static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
- ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
- BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->GetHeap(),
- object,
- BodyDescriptor::kStartOffset,
- object_size);
- return static_cast<ReturnType>(object_size);
- }
-};
-
-
-template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
- public:
- INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
- BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->GetHeap(),
- object,
- BodyDescriptor::kStartOffset,
- BodyDescriptor::kEndOffset);
- return static_cast<ReturnType>(BodyDescriptor::kSize);
- }
-};
-
-
-// Base class for visitors used for a linear new space iteration.
-// IterateBody returns size of visited object.
-// Certain types of objects (i.e. Code objects) are not handled
-// by dispatch table of this visitor because they cannot appear
-// in the new space.
-//
-// This class is intended to be used in the following way:
-//
-// class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> {
-// ...
-// }
-//
-// This is an example of Curiously recurring template pattern
-// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).
-// We use CRTP to guarantee aggressive compile time optimizations (i.e.
-// inlining and specialization of StaticVisitor::VisitPointers methods).
-template<typename StaticVisitor>
-class StaticNewSpaceVisitor : public StaticVisitorBase {
- public:
- static void Initialize();
-
- INLINE(static int IterateBody(Map* map, HeapObject* obj)) {
- return table_.GetVisitor(map)(map, obj);
- }
-
- INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
- for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
- }
-
- private:
- INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
- Heap* heap = map->GetHeap();
- VisitPointers(heap,
- HeapObject::RawField(object, JSFunction::kPropertiesOffset),
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
-
- // Don't visit code entry. We are using this visitor only during scavenges.
-
- VisitPointers(
- heap,
- HeapObject::RawField(object,
- JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(object,
- JSFunction::kNonWeakFieldsEndOffset));
- return JSFunction::kSize;
- }
-
- INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
- return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- }
-
- INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
- int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
- return FixedDoubleArray::SizeFor(length);
- }
-
- INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
- return JSObjectVisitor::Visit(map, object);
- }
-
- INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) {
- return SeqOneByteString::cast(object)->
- SeqOneByteStringSize(map->instance_type());
- }
-
- INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) {
- return SeqTwoByteString::cast(object)->
- SeqTwoByteStringSize(map->instance_type());
- }
-
- INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
- return FreeSpace::cast(object)->Size();
- }
-
- class DataObjectVisitor {
- public:
- template<int object_size>
- static inline int VisitSpecialized(Map* map, HeapObject* object) {
- return object_size;
- }
-
- INLINE(static int Visit(Map* map, HeapObject* object)) {
- return map->instance_size();
- }
- };
-
- typedef FlexibleBodyVisitor<StaticVisitor,
- StructBodyDescriptor,
- int> StructVisitor;
-
- typedef FlexibleBodyVisitor<StaticVisitor,
- JSObject::BodyDescriptor,
- int> JSObjectVisitor;
-
- typedef int (*Callback)(Map* map, HeapObject* object);
-
- static VisitorDispatchTable<Callback> table_;
-};
-
-
-template<typename StaticVisitor>
-VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
- StaticNewSpaceVisitor<StaticVisitor>::table_;
-
-
-// Base class for visitors used to transitively mark the entire heap.
-// IterateBody returns nothing.
-// Certain types of objects might not be handled by this base class and
-// no visitor function is registered by the generic initialization. A
-// specialized visitor function needs to be provided by the inheriting
-// class itself for those cases.
-//
-// This class is intended to be used in the following way:
-//
-// class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> {
-// ...
-// }
-//
-// This is an example of Curiously recurring template pattern.
-template<typename StaticVisitor>
-class StaticMarkingVisitor : public StaticVisitorBase {
- public:
- static void Initialize();
-
- INLINE(static void IterateBody(Map* map, HeapObject* obj)) {
- table_.GetVisitor(map)(map, obj);
- }
-
- INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
- INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitExternalReference(RelocInfo* rinfo)) { }
- INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) { }
-
- // TODO(mstarzinger): This should be made protected once refactoring is done.
- // Mark non-optimize code for functions inlined into the given optimized
- // code. This will prevent it from being flushed.
- static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
-
- protected:
- INLINE(static void VisitMap(Map* map, HeapObject* object));
- INLINE(static void VisitCode(Map* map, HeapObject* object));
- INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
- INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
- INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
- INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
-
- // Mark pointers in a Map and its TransitionArray together, possibly
- // treating transitions or back pointers weak.
- static void MarkMapContents(Heap* heap, Map* map);
- static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
-
- // Code flushing support.
- INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
- INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
-
- // Helpers used by code flushing support that visit pointer fields and treat
- // references to code objects either strongly or weakly.
- static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
- static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
- static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object);
- static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object);
-
- class DataObjectVisitor {
- public:
- template<int size>
- static inline void VisitSpecialized(Map* map, HeapObject* object) {
- }
-
- INLINE(static void Visit(Map* map, HeapObject* object)) {
- }
- };
-
- typedef FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor,
- void> FixedArrayVisitor;
-
- typedef FlexibleBodyVisitor<StaticVisitor,
- JSObject::BodyDescriptor,
- void> JSObjectVisitor;
-
- typedef FlexibleBodyVisitor<StaticVisitor,
- StructBodyDescriptor,
- void> StructObjectVisitor;
-
- typedef void (*Callback)(Map* map, HeapObject* object);
-
- static VisitorDispatchTable<Callback> table_;
-};
-
-
-template<typename StaticVisitor>
-VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
- StaticMarkingVisitor<StaticVisitor>::table_;
-
-
-} } // namespace v8::internal
-
-#endif // V8_OBJECTS_VISITING_H_
diff --git a/src/3rdparty/v8/src/objects.cc b/src/3rdparty/v8/src/objects.cc
deleted file mode 100644
index bb185a5..0000000
--- a/src/3rdparty/v8/src/objects.cc
+++ /dev/null
@@ -1,14119 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "date.h"
-#include "elements.h"
-#include "execution.h"
-#include "full-codegen.h"
-#include "hydrogen.h"
-#include "objects-inl.h"
-#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
-#include "macro-assembler.h"
-#include "mark-compact.h"
-#include "safepoint-table.h"
-#include "string-stream.h"
-#include "utils.h"
-#include "vm-state-inl.h"
-
-#ifdef ENABLE_DISASSEMBLER
-#include "disasm.h"
-#include "disassembler.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
- Object* value) {
- Object* result;
- { MaybeObject* maybe_result =
- constructor->GetHeap()->AllocateJSObject(constructor);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSValue::cast(result)->set_value(value);
- return result;
-}
-
-
-MaybeObject* Object::ToObject(Context* native_context) {
- if (IsNumber()) {
- return CreateJSValue(native_context->number_function(), this);
- } else if (IsBoolean()) {
- return CreateJSValue(native_context->boolean_function(), this);
- } else if (IsString()) {
- return CreateJSValue(native_context->string_function(), this);
- }
- ASSERT(IsJSObject());
- return this;
-}
-
-
-MaybeObject* Object::ToObject() {
- if (IsJSReceiver()) {
- return this;
- } else if (IsNumber()) {
- Isolate* isolate = Isolate::Current();
- Context* native_context = isolate->context()->native_context();
- return CreateJSValue(native_context->number_function(), this);
- } else if (IsBoolean()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
- Context* native_context = isolate->context()->native_context();
- return CreateJSValue(native_context->boolean_function(), this);
- } else if (IsString()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
- Context* native_context = isolate->context()->native_context();
- return CreateJSValue(native_context->string_function(), this);
- }
-
- // Throw a type error.
- return Failure::InternalError();
-}
-
-
-Object* Object::ToBoolean() {
- if (IsTrue()) return this;
- if (IsFalse()) return this;
- if (IsSmi()) {
- return Isolate::Current()->heap()->ToBoolean(Smi::cast(this)->value() != 0);
- }
- HeapObject* heap_object = HeapObject::cast(this);
- if (heap_object->IsUndefined() || heap_object->IsNull()) {
- return heap_object->GetHeap()->false_value();
- }
- // Undetectable object is false
- if (heap_object->IsUndetectableObject()) {
- return heap_object->GetHeap()->false_value();
- }
- if (heap_object->IsString()) {
- return heap_object->GetHeap()->ToBoolean(
- String::cast(this)->length() != 0);
- }
- if (heap_object->IsHeapNumber()) {
- return HeapNumber::cast(this)->HeapNumberToBoolean();
- }
- return heap_object->GetHeap()->true_value();
-}
-
-
-void Object::Lookup(String* name, LookupResult* result) {
- Object* holder = NULL;
- if (IsJSReceiver()) {
- holder = this;
- } else {
- Context* native_context = result->isolate()->context()->native_context();
- if (IsNumber()) {
- holder = native_context->number_function()->instance_prototype();
- } else if (IsString()) {
- holder = native_context->string_function()->instance_prototype();
- } else if (IsBoolean()) {
- holder = native_context->boolean_function()->instance_prototype();
- } else if (IsSymbol()) {
- holder = native_context->symbol_delegate();
- } else {
- Isolate::Current()->PushStackTraceAndDie(
- 0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001);
- }
- }
- ASSERT(holder != NULL); // Cannot handle null or undefined.
- JSReceiver::cast(holder)->Lookup(name, result);
-}
-
-
-MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
- String* name,
- PropertyAttributes* attributes) {
- LookupResult result(name->GetIsolate());
- Lookup(name, &result);
- MaybeObject* value = GetProperty(receiver, &result, name, attributes);
- ASSERT(*attributes <= ABSENT);
- return value;
-}
-
-
-MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name) {
- Isolate* isolate = name->GetIsolate();
- // To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually foreign
- // callbacks should be phased out.
- if (structure->IsForeign()) {
- AccessorDescriptor* callback =
- reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* value = (callback->getter)(receiver, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return value;
- }
-
- // api style callbacks.
- if (structure->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
- if (!data->IsCompatibleReceiver(receiver)) {
- Handle<Object> name_handle(name, isolate);
- Handle<Object> receiver_handle(receiver, isolate);
- Handle<Object> args[2] = { name_handle, receiver_handle };
- Handle<Object> error =
- isolate->factory()->NewTypeError("incompatible_method_receiver",
- HandleVector(args,
- ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
- if (call_fun == NULL) return isolate->heap()->undefined_value();
- HandleScope scope(isolate);
- JSObject* self = JSObject::cast(receiver);
- Handle<String> key(name);
- LOG(isolate, ApiNamedPropertyAccess("load", self, name));
- CustomArguments args(isolate, data->data(), self, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = call_fun(v8::Utils::ToLocal(key), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.IsEmpty()) {
- return isolate->heap()->undefined_value();
- }
- Object* return_value = *v8::Utils::OpenHandle(*result);
- return_value->VerifyApiCallResultType();
- return return_value;
- }
-
- // __defineGetter__ callback
- if (structure->IsAccessorPair()) {
- Object* getter = AccessorPair::cast(structure)->getter();
- if (getter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
- }
- // Getter is not a function.
- return isolate->heap()->undefined_value();
- }
-
- // TODO(dcarney): Handle correctly.
- if (structure->IsDeclaredAccessorInfo()) {
- return isolate->heap()->undefined_value();
- }
-
- UNREACHABLE();
- return NULL;
-}
-
-
-MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
- String* name_raw) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> receiver(receiver_raw, isolate);
- Handle<Object> name(name_raw, isolate);
-
- Handle<Object> args[] = { receiver, name };
- Handle<Object> result = CallTrap(
- "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
-
- return *result;
-}
-
-
-Handle<Object> Object::GetProperty(Handle<Object> object, Handle<String> name) {
- // TODO(rossberg): The index test should not be here but in the GetProperty
- // method (or somewhere else entirely). Needs more global clean-up.
- uint32_t index;
- if (name->AsArrayIndex(&index)) return GetElement(object, index);
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
- CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
-}
-
-
-Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
- CALL_HEAP_FUNCTION(isolate, object->GetElement(index), Object);
-}
-
-
-MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
- uint32_t index) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return GetPropertyWithHandler(receiver, name);
-}
-
-
-MaybeObject* JSProxy::SetElementWithHandler(JSReceiver* receiver,
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return SetPropertyWithHandler(receiver, name, value, NONE, strict_mode);
-}
-
-
-bool JSProxy::HasElementWithHandler(uint32_t index) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return HasPropertyWithHandler(name);
-}
-
-
-MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
- JSReceiver* getter) {
- Isolate* isolate = getter->GetIsolate();
- HandleScope scope(isolate);
- Handle<JSReceiver> fun(getter);
- Handle<Object> self(receiver, isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = isolate->debug();
- // Handle stepping into a getter if step into is active.
- // TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && fun->IsJSFunction()) {
- debug->HandleStepIn(
- Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
- }
-#endif
-
- bool has_pending_exception;
- Handle<Object> result =
- Execution::Call(fun, self, 0, NULL, &has_pending_exception, true);
- // Check for pending exception and return the result.
- if (has_pending_exception) return Failure::Exception();
- return *result;
-}
-
-
-// Only deal with CALLBACKS and INTERCEPTOR
-MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- PropertyAttributes* attributes) {
- if (result->IsProperty()) {
- switch (result->type()) {
- case CALLBACKS: {
- // Only allow API accessors.
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_read()) {
- *attributes = result->GetAttributes();
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
- }
- }
- break;
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION: {
- // Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
- if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
- }
- break;
- }
- case INTERCEPTOR: {
- // If the object has an interceptor, try real named properties.
- // No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedProperty(name, &r);
- if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- }
-
- // No accessible property found.
- *attributes = ABSENT;
- Heap* heap = name->GetHeap();
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
- return heap->undefined_value();
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- bool continue_search) {
- if (result->IsProperty()) {
- switch (result->type()) {
- case CALLBACKS: {
- // Only allow API accessors.
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_read()) {
- return result->GetAttributes();
- }
- }
- break;
- }
-
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION: {
- if (!continue_search) break;
- // Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
- if (r.IsProperty()) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
- }
- break;
- }
-
- case INTERCEPTOR: {
- // If the object has an interceptor, try real named properties.
- // No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r(GetIsolate());
- if (continue_search) {
- result->holder()->LookupRealNamedProperty(name, &r);
- } else {
- result->holder()->LocalLookupRealNamedProperty(name, &r);
- }
- if (!r.IsFound()) break;
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
- }
-
- case HANDLER:
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- }
- }
-
- GetIsolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return ABSENT;
-}
-
-
-Object* JSObject::GetNormalizedProperty(LookupResult* result) {
- ASSERT(!HasFastProperties());
- Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
- if (IsGlobalObject()) {
- value = JSGlobalPropertyCell::cast(value)->value();
- }
- ASSERT(!value->IsJSGlobalPropertyCell());
- return value;
-}
-
-
-Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) {
- ASSERT(!HasFastProperties());
- if (IsGlobalObject()) {
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(
- property_dictionary()->ValueAt(result->GetDictionaryEntry()));
- cell->set_value(value);
- } else {
- property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
- }
- return value;
-}
-
-
-Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(*key, *value, details),
- Object);
-}
-
-
-MaybeObject* JSObject::SetNormalizedProperty(String* name,
- Object* value,
- PropertyDetails details) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
- if (entry == StringDictionary::kNotFound) {
- Object* store_value = value;
- if (IsGlobalObject()) {
- Heap* heap = name->GetHeap();
- MaybeObject* maybe_store_value =
- heap->AllocateJSGlobalPropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
- }
- Object* dict;
- { MaybeObject* maybe_dict =
- property_dictionary()->Add(name, store_value, details);
- if (!maybe_dict->ToObject(&dict)) return maybe_dict;
- }
- set_properties(StringDictionary::cast(dict));
- return value;
- }
-
- PropertyDetails original_details = property_dictionary()->DetailsAt(entry);
- int enumeration_index;
- // Preserve the enumeration index unless the property was deleted.
- if (original_details.IsDeleted()) {
- enumeration_index = property_dictionary()->NextEnumerationIndex();
- property_dictionary()->SetNextEnumerationIndex(enumeration_index + 1);
- } else {
- enumeration_index = original_details.dictionary_index();
- ASSERT(enumeration_index > 0);
- }
-
- details = PropertyDetails(
- details.attributes(), details.type(), enumeration_index);
-
- if (IsGlobalObject()) {
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry));
- cell->set_value(value);
- // Please note we have to update the property details.
- property_dictionary()->DetailsAtPut(entry, details);
- } else {
- property_dictionary()->SetEntry(entry, name, value, details);
- }
- return value;
-}
-
-
-MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
- ASSERT(!HasFastProperties());
- StringDictionary* dictionary = property_dictionary();
- int entry = dictionary->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
- // If we have a global object set the cell to the hole.
- if (IsGlobalObject()) {
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.IsDontDelete()) {
- if (mode != FORCE_DELETION) return GetHeap()->false_value();
- // When forced to delete global properties, we have to make a
- // map change to invalidate any ICs that think they can load
- // from the DontDelete cell without checking if it contains
- // the hole value.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- ASSERT(new_map->is_dictionary_map());
- set_map(new_map);
- }
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
- cell->set_value(cell->GetHeap()->the_hole_value());
- dictionary->DetailsAtPut(entry, details.AsDeleted());
- } else {
- Object* deleted = dictionary->DeleteProperty(entry, mode);
- if (deleted == GetHeap()->true_value()) {
- FixedArray* new_properties = NULL;
- MaybeObject* maybe_properties = dictionary->Shrink(name);
- if (!maybe_properties->To(&new_properties)) {
- return maybe_properties;
- }
- set_properties(new_properties);
- }
- return deleted;
- }
- }
- return GetHeap()->true_value();
-}
-
-
-bool JSObject::IsDirty() {
- Object* cons_obj = map()->constructor();
- if (!cons_obj->IsJSFunction())
- return true;
- JSFunction* fun = JSFunction::cast(cons_obj);
- if (!fun->shared()->IsApiFunction())
- return true;
- // If the object is fully fast case and has the same map it was
- // created with then no changes can have been made to it.
- return map() != fun->initial_map()
- || !HasFastObjectElements()
- || !HasFastProperties();
-}
-
-
-Handle<Object> Object::GetProperty(Handle<Object> object,
- Handle<Object> receiver,
- LookupResult* result,
- Handle<String> key,
- PropertyAttributes* attributes) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate,
- object->GetProperty(*receiver, result, *key, attributes),
- Object);
-}
-
-
-MaybeObject* Object::GetProperty(Object* receiver,
- LookupResult* result,
- String* name,
- PropertyAttributes* attributes) {
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- Isolate* isolate = name->GetIsolate();
- Heap* heap = isolate->heap();
-
- // Traverse the prototype chain from the current object (this) to
- // the holder and check for access rights. This avoids traversing the
- // objects more than once in case of interceptors, because the
- // holder will always be the interceptor holder and the search may
- // only continue with a current object just after the interceptor
- // holder in the prototype chain.
- // Proxy handlers do not use the proxy's prototype, so we can skip this.
- if (!result->IsHandler()) {
- Object* last = result->IsProperty() && !receiver->IsSymbol()
- ? result->holder()
- : Object::cast(heap->null_value());
- ASSERT(this != this->GetPrototype(isolate));
- for (Object* current = this;
- true;
- current = current->GetPrototype(isolate)) {
- if (current->IsAccessCheckNeeded()) {
- // Check if we're allowed to read from the current object. Note
- // that even though we may not actually end up loading the named
- // property from the current object, we still check that we have
- // access to it.
- // TODO(dcarney): revert.
- CHECK(current->IsJSObject());
- JSObject* checked = JSObject::cast(current);
- if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
- return checked->GetPropertyWithFailedAccessCheck(receiver,
- result,
- name,
- attributes);
- }
- }
- // Stop traversing the chain once we reach the last object in the
- // chain; either the holder of the result or null in case of an
- // absent property.
- if (current == last) break;
- }
- }
-
- if (!result->IsProperty()) {
- *attributes = ABSENT;
- return heap->undefined_value();
- }
- *attributes = result->GetAttributes();
- Object* value;
- switch (result->type()) {
- case NORMAL:
- value = result->holder()->GetNormalizedProperty(result);
- ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? heap->undefined_value() : value;
- case FIELD:
- value = result->holder()->FastPropertyAt(
- result->GetFieldIndex().field_index());
- ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? heap->undefined_value() : value;
- case CONSTANT_FUNCTION:
- return result->GetConstantFunction();
- case CALLBACKS:
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
- case HANDLER:
- return result->proxy()->GetPropertyWithHandler(receiver, name);
- case INTERCEPTOR:
- return result->holder()->GetPropertyWithInterceptor(
- receiver, name, attributes);
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
- Isolate* isolate = IsSmi()
- ? Isolate::Current()
- : HeapObject::cast(this)->GetIsolate();
- Heap* heap = isolate->heap();
- Object* holder = this;
-
- // Iterate up the prototype chain until an element is found or the null
- // prototype is encountered.
- for (holder = this;
- holder != heap->null_value();
- holder = holder->GetPrototype(isolate)) {
- if (!holder->IsJSObject()) {
- Context* native_context = isolate->context()->native_context();
- if (holder->IsNumber()) {
- holder = native_context->number_function()->instance_prototype();
- } else if (holder->IsString()) {
- holder = native_context->string_function()->instance_prototype();
- } else if (holder->IsBoolean()) {
- holder = native_context->boolean_function()->instance_prototype();
- } else if (holder->IsSymbol()) {
- holder = native_context->symbol_delegate();
- } else if (holder->IsJSProxy()) {
- return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
- } else {
- // Undefined and null have no indexed properties.
- ASSERT(holder->IsUndefined() || holder->IsNull());
- return heap->undefined_value();
- }
- }
-
- // Inline the case for JSObjects. Doing so significantly improves the
- // performance of fetching elements where checking the prototype chain is
- // necessary.
- JSObject* js_object = JSObject::cast(holder);
-
- // Check access rights if needed.
- if (js_object->IsAccessCheckNeeded()) {
- Isolate* isolate = heap->isolate();
- if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET);
- return heap->undefined_value();
- }
- }
-
- if (js_object->HasIndexedInterceptor()) {
- return js_object->GetElementWithInterceptor(receiver, index);
- }
-
- if (js_object->elements() != heap->empty_fixed_array()) {
- MaybeObject* result = js_object->GetElementsAccessor()->Get(
- receiver, js_object, index);
- if (result != heap->the_hole_value()) return result;
- }
- }
-
- return heap->undefined_value();
-}
-
-
-Object* Object::GetPrototype(Isolate* isolate) {
- if (IsSmi()) {
- Context* context = isolate->context()->native_context();
- return context->number_function()->instance_prototype();
- }
-
- HeapObject* heap_object = HeapObject::cast(this);
-
- // The object is either a number, a string, a boolean,
- // a real JS object, or a Harmony proxy.
- if (heap_object->IsJSReceiver()) {
- return heap_object->map()->prototype();
- }
- Context* context = isolate->context()->native_context();
-
- if (heap_object->IsHeapNumber()) {
- return context->number_function()->instance_prototype();
- }
- if (heap_object->IsString()) {
- return context->string_function()->instance_prototype();
- }
- if (heap_object->IsBoolean()) {
- return context->boolean_function()->instance_prototype();
- } else {
- return isolate->heap()->null_value();
- }
-}
-
-
-Object* Object::GetDelegate(Isolate* isolate) {
- if (IsSymbol()) {
- Heap* heap = Symbol::cast(this)->GetHeap();
- Context* context = heap->isolate()->context()->native_context();
- return context->symbol_delegate();
- }
- return GetPrototype(isolate);
-}
-
-
-MaybeObject* Object::GetHash(CreationFlag flag) {
- // The object is either a number, a string, an odd-ball,
- // a real JS object, or a Harmony proxy.
- if (IsNumber()) {
- uint32_t hash = ComputeLongHash(double_to_uint64(Number()));
- return Smi::FromInt(hash & Smi::kMaxValue);
- }
- if (IsName()) {
- uint32_t hash = Name::cast(this)->Hash();
- return Smi::FromInt(hash);
- }
- if (IsOddball()) {
- uint32_t hash = Oddball::cast(this)->to_string()->Hash();
- return Smi::FromInt(hash);
- }
- if (IsJSReceiver()) {
- return JSReceiver::cast(this)->GetIdentityHash(flag);
- }
-
- UNREACHABLE();
- return Smi::FromInt(0);
-}
-
-
-bool Object::SameValue(Object* other) {
- if (other == this) return true;
-
- // The object is either a number, a string, an odd-ball,
- // a real JS object, or a Harmony proxy.
- if (IsNumber() && other->IsNumber()) {
- double this_value = Number();
- double other_value = other->Number();
- return (this_value == other_value) ||
- (isnan(this_value) && isnan(other_value));
- }
- if (IsString() && other->IsString()) {
- return String::cast(this)->Equals(String::cast(other));
- }
- return false;
-}
-
-
-void Object::ShortPrint(FILE* out) {
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- ShortPrint(&accumulator);
- accumulator.OutputToFile(out);
-}
-
-
-void Object::ShortPrint(StringStream* accumulator) {
- if (IsSmi()) {
- Smi::cast(this)->SmiPrint(accumulator);
- } else if (IsFailure()) {
- Failure::cast(this)->FailurePrint(accumulator);
- } else {
- HeapObject::cast(this)->HeapObjectShortPrint(accumulator);
- }
-}
-
-
-void Smi::SmiPrint(FILE* out) {
- FPrintF(out, "%d", value());
-}
-
-
-void Smi::SmiPrint(StringStream* accumulator) {
- accumulator->Add("%d", value());
-}
-
-
-void Failure::FailurePrint(StringStream* accumulator) {
- accumulator->Add("Failure(%p)", reinterpret_cast<void*>(value()));
-}
-
-
-void Failure::FailurePrint(FILE* out) {
- FPrintF(out, "Failure(%p)", reinterpret_cast<void*>(value()));
-}
-
-
-// Should a word be prefixed by 'a' or 'an' in order to read naturally in
-// English? Returns false for non-ASCII or words that don't start with
-// a capital letter. The a/an rule follows pronunciation in English.
-// We don't use the BBC's overcorrect "an historic occasion" though if
-// you speak a dialect you may well say "an 'istoric occasion".
-static bool AnWord(String* str) {
- if (str->length() == 0) return false; // A nothing.
- int c0 = str->Get(0);
- int c1 = str->length() > 1 ? str->Get(1) : 0;
- if (c0 == 'U') {
- if (c1 > 'Z') {
- return true; // An Umpire, but a UTF8String, a U.
- }
- } else if (c0 == 'A' || c0 == 'E' || c0 == 'I' || c0 == 'O') {
- return true; // An Ape, an ABCBook.
- } else if ((c1 == 0 || (c1 >= 'A' && c1 <= 'Z')) &&
- (c0 == 'F' || c0 == 'H' || c0 == 'M' || c0 == 'N' || c0 == 'R' ||
- c0 == 'S' || c0 == 'X')) {
- return true; // An MP3File, an M.
- }
- return false;
-}
-
-
-MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
-#ifdef DEBUG
- // Do not attempt to flatten in debug mode when allocation is not
- // allowed. This is to avoid an assertion failure when allocating.
- // Flattening strings is the only case where we always allow
- // allocation because no GC is performed if the allocation fails.
- if (!HEAP->IsAllocationAllowed()) return this;
-#endif
-
- Heap* heap = GetHeap();
- switch (StringShape(this).representation_tag()) {
- case kConsStringTag: {
- ConsString* cs = ConsString::cast(this);
- if (cs->second()->length() == 0) {
- return cs->first();
- }
- // There's little point in putting the flat string in new space if the
- // cons string is in old space. It can never get GCed until there is
- // an old space GC.
- PretenureFlag tenure = heap->InNewSpace(this) ? pretenure : TENURED;
- int len = length();
- Object* object;
- String* result;
- if (IsOneByteRepresentation()) {
- { MaybeObject* maybe_object =
- heap->AllocateRawOneByteString(len, tenure);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- result = String::cast(object);
- String* first = cs->first();
- int first_length = first->length();
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- WriteToFlat(first, dest, 0, first_length);
- String* second = cs->second();
- WriteToFlat(second,
- dest + first_length,
- 0,
- len - first_length);
- } else {
- { MaybeObject* maybe_object =
- heap->AllocateRawTwoByteString(len, tenure);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- result = String::cast(object);
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- String* first = cs->first();
- int first_length = first->length();
- WriteToFlat(first, dest, 0, first_length);
- String* second = cs->second();
- WriteToFlat(second,
- dest + first_length,
- 0,
- len - first_length);
- }
- cs->set_first(result);
- cs->set_second(heap->empty_string(), SKIP_WRITE_BARRIER);
- return result;
- }
- default:
- return this;
- }
-}
-
-
-bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
- // Externalizing twice leaks the external resource, so it's
- // prohibited by the API.
- ASSERT(!this->IsExternalString());
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // Assert that the resource and the string are equivalent.
- ASSERT(static_cast<size_t>(this->length()) == resource->length());
- ScopedVector<uc16> smart_chars(this->length());
- String::WriteToFlat(this, smart_chars.start(), 0, this->length());
- ASSERT(memcmp(smart_chars.start(),
- resource->data(),
- resource->length() * sizeof(smart_chars[0])) == 0);
- }
-#endif // DEBUG
- Heap* heap = GetHeap();
- int size = this->Size(); // Byte size of the original string.
- if (size < ExternalString::kShortSize) {
- return false;
- }
- bool is_ascii = this->IsOneByteRepresentation();
- bool is_internalized = this->IsInternalizedString();
-
- // Morph the object to an external string by adjusting the map and
- // reinitializing the fields.
- if (size >= ExternalString::kSize) {
- this->set_map_no_write_barrier(
- is_internalized
- ? (is_ascii
- ? heap->external_internalized_string_with_ascii_data_map()
- : heap->external_internalized_string_map())
- : (is_ascii
- ? heap->external_string_with_ascii_data_map()
- : heap->external_string_map()));
- } else {
- this->set_map_no_write_barrier(
- is_internalized
- ? (is_ascii
- ? heap->
- short_external_internalized_string_with_ascii_data_map()
- : heap->short_external_internalized_string_map())
- : (is_ascii
- ? heap->short_external_string_with_ascii_data_map()
- : heap->short_external_string_map()));
- }
- ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
- self->set_resource(resource);
- if (is_internalized) self->Hash(); // Force regeneration of the hash value.
-
- // Fill the remainder of the string with dead wood.
- int new_size = this->Size(); // Byte size of the external String object.
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
- new_size - size);
- }
- return true;
-}
-
-
-bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // Assert that the resource and the string are equivalent.
- ASSERT(static_cast<size_t>(this->length()) == resource->length());
- ScopedVector<char> smart_chars(this->length());
- String::WriteToFlat(this, smart_chars.start(), 0, this->length());
- ASSERT(memcmp(smart_chars.start(),
- resource->data(),
- resource->length() * sizeof(smart_chars[0])) == 0);
- }
-#endif // DEBUG
- Heap* heap = GetHeap();
- int size = this->Size(); // Byte size of the original string.
- if (size < ExternalString::kShortSize) {
- return false;
- }
- bool is_internalized = this->IsInternalizedString();
-
- // Morph the object to an external string by adjusting the map and
- // reinitializing the fields. Use short version if space is limited.
- if (size >= ExternalString::kSize) {
- this->set_map_no_write_barrier(
- is_internalized ? heap->external_ascii_internalized_string_map()
- : heap->external_ascii_string_map());
- } else {
- this->set_map_no_write_barrier(
- is_internalized ? heap->short_external_ascii_internalized_string_map()
- : heap->short_external_ascii_string_map());
- }
- ExternalAsciiString* self = ExternalAsciiString::cast(this);
- self->set_resource(resource);
- if (is_internalized) self->Hash(); // Force regeneration of the hash value.
-
- // Fill the remainder of the string with dead wood.
- int new_size = this->Size(); // Byte size of the external String object.
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
- new_size - size);
- }
- return true;
-}
-
-
-void String::StringShortPrint(StringStream* accumulator) {
- int len = length();
- if (len > kMaxShortPrintLength) {
- accumulator->Add("<Very long string[%u]>", len);
- return;
- }
-
- if (!LooksValid()) {
- accumulator->Add("<Invalid String>");
- return;
- }
-
- ConsStringIteratorOp op;
- StringCharacterStream stream(this, &op);
-
- bool truncated = false;
- if (len > kMaxShortPrintLength) {
- len = kMaxShortPrintLength;
- truncated = true;
- }
- bool ascii = true;
- for (int i = 0; i < len; i++) {
- uint16_t c = stream.GetNext();
-
- if (c < 32 || c >= 127) {
- ascii = false;
- }
- }
- stream.Reset(this);
- if (ascii) {
- accumulator->Add("<String[%u]: ", length());
- for (int i = 0; i < len; i++) {
- accumulator->Put(static_cast<char>(stream.GetNext()));
- }
- accumulator->Put('>');
- } else {
- // Backslash indicates that the string contains control
- // characters and that backslashes are therefore escaped.
- accumulator->Add("<String[%u]\\: ", length());
- for (int i = 0; i < len; i++) {
- uint16_t c = stream.GetNext();
- if (c == '\n') {
- accumulator->Add("\\n");
- } else if (c == '\r') {
- accumulator->Add("\\r");
- } else if (c == '\\') {
- accumulator->Add("\\\\");
- } else if (c < 32 || c > 126) {
- accumulator->Add("\\x%02x", c);
- } else {
- accumulator->Put(static_cast<char>(c));
- }
- }
- if (truncated) {
- accumulator->Put('.');
- accumulator->Put('.');
- accumulator->Put('.');
- }
- accumulator->Put('>');
- }
- return;
-}
-
-
-void JSObject::JSObjectShortPrint(StringStream* accumulator) {
- switch (map()->instance_type()) {
- case JS_ARRAY_TYPE: {
- double length = JSArray::cast(this)->length()->IsUndefined()
- ? 0
- : JSArray::cast(this)->length()->Number();
- accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
- break;
- }
- case JS_WEAK_MAP_TYPE: {
- accumulator->Add("<JS WeakMap>");
- break;
- }
- case JS_REGEXP_TYPE: {
- accumulator->Add("<JS RegExp>");
- break;
- }
- case JS_FUNCTION_TYPE: {
- Object* fun_name = JSFunction::cast(this)->shared()->name();
- bool printed = false;
- if (fun_name->IsString()) {
- String* str = String::cast(fun_name);
- if (str->length() > 0) {
- accumulator->Add("<JS Function ");
- accumulator->Put(str);
- accumulator->Put('>');
- printed = true;
- }
- }
- if (!printed) {
- accumulator->Add("<JS Function>");
- }
- break;
- }
- case JS_MODULE_TYPE: {
- accumulator->Add("<JS Module>");
- break;
- }
- // All other JSObjects are rather similar to each other (JSObject,
- // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
- default: {
- Map* map_of_this = map();
- Heap* heap = GetHeap();
- Object* constructor = map_of_this->constructor();
- bool printed = false;
- if (constructor->IsHeapObject() &&
- !heap->Contains(HeapObject::cast(constructor))) {
- accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
- } else {
- bool global_object = IsJSGlobalProxy();
- if (constructor->IsJSFunction()) {
- if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
- accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
- } else {
- Object* constructor_name =
- JSFunction::cast(constructor)->shared()->name();
- if (constructor_name->IsString()) {
- String* str = String::cast(constructor_name);
- if (str->length() > 0) {
- bool vowel = AnWord(str);
- accumulator->Add("<%sa%s ",
- global_object ? "Global Object: " : "",
- vowel ? "n" : "");
- accumulator->Put(str);
- printed = true;
- }
- }
- }
- }
- if (!printed) {
- accumulator->Add("<JS %sObject", global_object ? "Global " : "");
- }
- }
- if (IsJSValue()) {
- accumulator->Add(" value = ");
- JSValue::cast(this)->value()->ShortPrint(accumulator);
- }
- accumulator->Put('>');
- break;
- }
- }
-}
-
-
-void JSObject::PrintElementsTransition(
- FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
- ElementsKind to_kind, FixedArrayBase* to_elements) {
- if (from_kind != to_kind) {
- FPrintF(file, "elements transition [");
- PrintElementsKind(file, from_kind);
- FPrintF(file, " -> ");
- PrintElementsKind(file, to_kind);
- FPrintF(file, "] in ");
- JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
- FPrintF(file, " for ");
- ShortPrint(file);
- FPrintF(file, " from ");
- from_elements->ShortPrint(file);
- FPrintF(file, " to ");
- to_elements->ShortPrint(file);
- FPrintF(file, "\n");
- }
-}
-
-
-void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
- Heap* heap = GetHeap();
- if (!heap->Contains(this)) {
- accumulator->Add("!!!INVALID POINTER!!!");
- return;
- }
- if (!heap->Contains(map())) {
- accumulator->Add("!!!INVALID MAP!!!");
- return;
- }
-
- accumulator->Add("%p ", this);
-
- if (IsString()) {
- String::cast(this)->StringShortPrint(accumulator);
- return;
- }
- if (IsJSObject()) {
- JSObject::cast(this)->JSObjectShortPrint(accumulator);
- return;
- }
- switch (map()->instance_type()) {
- case MAP_TYPE:
- accumulator->Add("<Map(elements=%u)>", Map::cast(this)->elements_kind());
- break;
- case FIXED_ARRAY_TYPE:
- accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
- break;
- case FIXED_DOUBLE_ARRAY_TYPE:
- accumulator->Add("<FixedDoubleArray[%u]>",
- FixedDoubleArray::cast(this)->length());
- break;
- case BYTE_ARRAY_TYPE:
- accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
- break;
- case FREE_SPACE_TYPE:
- accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
- break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- accumulator->Add("<ExternalPixelArray[%u]>",
- ExternalPixelArray::cast(this)->length());
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- accumulator->Add("<ExternalByteArray[%u]>",
- ExternalByteArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedByteArray[%u]>",
- ExternalUnsignedByteArray::cast(this)->length());
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- accumulator->Add("<ExternalShortArray[%u]>",
- ExternalShortArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedShortArray[%u]>",
- ExternalUnsignedShortArray::cast(this)->length());
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- accumulator->Add("<ExternalIntArray[%u]>",
- ExternalIntArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedIntArray[%u]>",
- ExternalUnsignedIntArray::cast(this)->length());
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- accumulator->Add("<ExternalFloatArray[%u]>",
- ExternalFloatArray::cast(this)->length());
- break;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- accumulator->Add("<ExternalDoubleArray[%u]>",
- ExternalDoubleArray::cast(this)->length());
- break;
- case SHARED_FUNCTION_INFO_TYPE:
- accumulator->Add("<SharedFunctionInfo>");
- break;
- case JS_MESSAGE_OBJECT_TYPE:
- accumulator->Add("<JSMessageObject>");
- break;
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
- accumulator->Put('<'); \
- accumulator->Add(#Name); \
- accumulator->Put('>'); \
- break;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- case CODE_TYPE:
- accumulator->Add("<Code>");
- break;
- case ODDBALL_TYPE: {
- if (IsUndefined())
- accumulator->Add("<undefined>");
- else if (IsTheHole())
- accumulator->Add("<the hole>");
- else if (IsNull())
- accumulator->Add("<null>");
- else if (IsTrue())
- accumulator->Add("<true>");
- else if (IsFalse())
- accumulator->Add("<false>");
- else
- accumulator->Add("<Odd Oddball>");
- break;
- }
- case SYMBOL_TYPE:
- accumulator->Add("<Symbol: %d>", Symbol::cast(this)->Hash());
- break;
- case HEAP_NUMBER_TYPE:
- accumulator->Add("<Number: ");
- HeapNumber::cast(this)->HeapNumberPrint(accumulator);
- accumulator->Put('>');
- break;
- case JS_PROXY_TYPE:
- accumulator->Add("<JSProxy>");
- break;
- case JS_FUNCTION_PROXY_TYPE:
- accumulator->Add("<JSFunctionProxy>");
- break;
- case FOREIGN_TYPE:
- accumulator->Add("<Foreign>");
- break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- accumulator->Add("Cell for ");
- JSGlobalPropertyCell::cast(this)->value()->ShortPrint(accumulator);
- break;
- default:
- accumulator->Add("<Other heap object (%d)>", map()->instance_type());
- break;
- }
-}
-
-
-void HeapObject::Iterate(ObjectVisitor* v) {
- // Handle header
- IteratePointer(v, kMapOffset);
- // Handle object body
- Map* m = map();
- IterateBody(m->instance_type(), SizeFromMap(m), v);
-}
-
-
-void HeapObject::IterateBody(InstanceType type, int object_size,
- ObjectVisitor* v) {
- // Avoiding <Type>::cast(this) because it accesses the map pointer field.
- // During GC, the map pointer field is encoded.
- if (type < FIRST_NONSTRING_TYPE) {
- switch (type & kStringRepresentationMask) {
- case kSeqStringTag:
- break;
- case kConsStringTag:
- ConsString::BodyDescriptor::IterateBody(this, v);
- break;
- case kSlicedStringTag:
- SlicedString::BodyDescriptor::IterateBody(this, v);
- break;
- case kExternalStringTag:
- if ((type & kStringEncodingMask) == kOneByteStringTag) {
- reinterpret_cast<ExternalAsciiString*>(this)->
- ExternalAsciiStringIterateBody(v);
- } else {
- reinterpret_cast<ExternalTwoByteString*>(this)->
- ExternalTwoByteStringIterateBody(v);
- }
- break;
- }
- return;
- }
-
- switch (type) {
- case FIXED_ARRAY_TYPE:
- FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case FIXED_DOUBLE_ARRAY_TYPE:
- break;
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_MODULE_TYPE:
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_WEAK_MAP_TYPE:
- case JS_REGEXP_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- JSObject::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case JS_FUNCTION_TYPE:
- reinterpret_cast<JSFunction*>(this)
- ->JSFunctionIterateBody(object_size, v);
- break;
- case ODDBALL_TYPE:
- Oddball::BodyDescriptor::IterateBody(this, v);
- break;
- case JS_PROXY_TYPE:
- JSProxy::BodyDescriptor::IterateBody(this, v);
- break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::BodyDescriptor::IterateBody(this, v);
- break;
- case FOREIGN_TYPE:
- reinterpret_cast<Foreign*>(this)->ForeignIterateBody(v);
- break;
- case MAP_TYPE:
- Map::BodyDescriptor::IterateBody(this, v);
- break;
- case CODE_TYPE:
- reinterpret_cast<Code*>(this)->CodeIterateBody(v);
- break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::BodyDescriptor::IterateBody(this, v);
- break;
- case SYMBOL_TYPE:
- case HEAP_NUMBER_TYPE:
- case FILLER_TYPE:
- case BYTE_ARRAY_TYPE:
- case FREE_SPACE_TYPE:
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- case EXTERNAL_BYTE_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- case EXTERNAL_SHORT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- case EXTERNAL_INT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- break;
- case SHARED_FUNCTION_INFO_TYPE: {
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
- break;
- }
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- StructBodyDescriptor::IterateBody(this, object_size, v);
- break;
- default:
- PrintF("Unknown type: %d\n", type);
- UNREACHABLE();
- }
-}
-
-
-Object* HeapNumber::HeapNumberToBoolean() {
- // NaN, +0, and -0 should return the false object
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- union IeeeDoubleLittleEndianArchType u;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- union IeeeDoubleBigEndianArchType u;
-#endif
- u.d = value();
- if (u.bits.exp == 2047) {
- // Detect NaN for IEEE double precision floating point.
- if ((u.bits.man_low | u.bits.man_high) != 0)
- return GetHeap()->false_value();
- }
- if (u.bits.exp == 0) {
- // Detect +0, and -0 for IEEE double precision floating point.
- if ((u.bits.man_low | u.bits.man_high) == 0)
- return GetHeap()->false_value();
- }
- return GetHeap()->true_value();
-}
-
-
-void HeapNumber::HeapNumberPrint(FILE* out) {
- FPrintF(out, "%.16g", Number());
-}
-
-
-void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
- // The Windows version of vsnprintf can allocate when printing a %g string
- // into a buffer that may not be big enough. We don't want random memory
- // allocation when producing post-crash stack traces, so we print into a
- // buffer that is plenty big enough for any floating point number, then
- // print that using vsnprintf (which may truncate but never allocate if
- // there is no more space in the buffer).
- EmbeddedVector<char, 100> buffer;
- OS::SNPrintF(buffer, "%.16g", Number());
- accumulator->Add("%s", buffer.start());
-}
-
-
-String* JSReceiver::class_name() {
- if (IsJSFunction() && IsJSFunctionProxy()) {
- return GetHeap()->function_class_string();
- }
- if (map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(map()->constructor());
- return String::cast(constructor->shared()->instance_class_name());
- }
- // If the constructor is not present, return "Object".
- return GetHeap()->Object_string();
-}
-
-
-String* JSReceiver::constructor_name() {
- if (map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(map()->constructor());
- String* name = String::cast(constructor->shared()->name());
- if (name->length() > 0) return name;
- String* inferred_name = constructor->shared()->inferred_name();
- if (inferred_name->length() > 0) return inferred_name;
- Object* proto = GetPrototype();
- if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
- }
- // TODO(rossberg): what about proxies?
- // If the constructor is not present, return "Object".
- return GetHeap()->Object_string();
-}
-
-
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
- String* name,
- Object* value,
- int field_index) {
- if (map()->unused_property_fields() == 0) {
- int new_unused = new_map->unused_property_fields();
- FixedArray* values;
- { MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->To(&values)) return maybe_values;
- }
- set_properties(values);
- }
- set_map(new_map);
- return FastPropertyAtPut(field_index, value);
-}
-
-
-static bool IsIdentifier(UnicodeCache* cache, String* string) {
- // Checks whether the buffer contains an identifier (no escape).
- if (string->length() == 0) return false;
- ConsStringIteratorOp op;
- StringCharacterStream stream(string, &op);
- if (!cache->IsIdentifierStart(stream.GetNext())) {
- return false;
- }
- while (stream.HasMore()) {
- if (!cache->IsIdentifierPart(stream.GetNext())) {
- return false;
- }
- }
- return true;
-}
-
-
-MaybeObject* JSObject::AddFastProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode) {
- ASSERT(!IsJSGlobalProxy());
- ASSERT(DescriptorArray::kNotFound ==
- map()->instance_descriptors()->Search(
- name, map()->NumberOfOwnDescriptors()));
-
- // Normalize the object if the name is an actual string (not the
- // hidden strings) and is not a real identifier.
- // Normalize the object if it will have too many fast properties.
- Isolate* isolate = GetHeap()->isolate();
- if ((!IsIdentifier(isolate->unicode_cache(), name)
- && name != isolate->heap()->hidden_string()) ||
- (map()->unused_property_fields() == 0 &&
- TooManyFastProperties(properties()->length(), store_mode))) {
- Object* obj;
- MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-
- return AddSlowProperty(name, value, attributes);
- }
-
- // Compute the new index for new field.
- int index = map()->NextFreePropertyIndex();
-
- // Allocate new instance descriptors with (name, index) added
- FieldDescriptor new_field(name, index, attributes, 0);
-
- ASSERT(index < map()->inobject_properties() ||
- (index - map()->inobject_properties()) < properties()->length() ||
- map()->unused_property_fields() == 0);
-
- FixedArray* values = NULL;
-
- if (map()->unused_property_fields() == 0) {
- // Make room for the new value
- MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_values->To(&values)) return maybe_values;
- }
-
- TransitionFlag flag = INSERT_TRANSITION;
-
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- if (map()->unused_property_fields() == 0) {
- ASSERT(values != NULL);
- set_properties(values);
- new_map->set_unused_property_fields(kFieldsAdded - 1);
- } else {
- new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
- }
-
- set_map(new_map);
- return FastPropertyAtPut(index, value);
-}
-
-
-MaybeObject* JSObject::AddConstantFunctionProperty(
- String* name,
- JSFunction* function,
- PropertyAttributes attributes) {
- // Allocate new instance descriptors with (name, function) added
- ConstantFunctionDescriptor d(name, function, attributes, 0);
-
- TransitionFlag flag =
- // Do not add transitions to global objects.
- (IsGlobalObject() ||
- // Don't add transitions to special properties with non-trivial
- // attributes.
- attributes != NONE)
- ? OMIT_TRANSITION
- : INSERT_TRANSITION;
-
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- set_map(new_map);
- return function;
-}
-
-
-// Add property in slow mode
-MaybeObject* JSObject::AddSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes) {
- ASSERT(!HasFastProperties());
- StringDictionary* dict = property_dictionary();
- Object* store_value = value;
- if (IsGlobalObject()) {
- // In case name is an orphaned property reuse the cell.
- int entry = dict->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
- store_value = dict->ValueAt(entry);
- JSGlobalPropertyCell::cast(store_value)->set_value(value);
- // Assign an enumeration index to the property and update
- // SetNextEnumerationIndex.
- int index = dict->NextEnumerationIndex();
- PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
- dict->SetNextEnumerationIndex(index + 1);
- dict->SetEntry(entry, name, store_value, details);
- return value;
- }
- Heap* heap = GetHeap();
- { MaybeObject* maybe_store_value =
- heap->AllocateJSGlobalPropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
- }
- JSGlobalPropertyCell::cast(store_value)->set_value(value);
- }
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
- Object* result;
- { MaybeObject* maybe_result = dict->Add(name, store_value, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- if (dict != result) set_properties(StringDictionary::cast(result));
- return value;
-}
-
-
-MaybeObject* JSObject::AddProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check) {
- ASSERT(!IsJSGlobalProxy());
- Map* map_of_this = map();
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
- MaybeObject* result;
- if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
- !map_of_this->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
- return value;
- } else {
- Handle<Object> args[1] = {Handle<String>(name)};
- return isolate->Throw(
- *FACTORY->NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
- }
- }
-
- if (HasFastProperties()) {
- // Ensure the descriptor array does not get too big.
- if (map_of_this->NumberOfOwnDescriptors() <
- DescriptorArray::kMaxNumberOfDescriptors) {
- if (value->IsJSFunction()) {
- result = AddConstantFunctionProperty(name,
- JSFunction::cast(value),
- attributes);
- } else {
- result = AddFastProperty(name, value, attributes, store_mode);
- }
- } else {
- // Normalize the object to prevent very large instance descriptors.
- // This eliminates unwanted N^2 allocation and lookup behavior.
- Object* obj;
- MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe->To(&obj)) return maybe;
- result = AddSlowProperty(name, value, attributes);
- }
- } else {
- result = AddSlowProperty(name, value, attributes);
- }
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (FLAG_harmony_observation && map()->is_observed()) {
- EnqueueChangeRecord(handle(this, isolate),
- "new",
- handle(name, isolate),
- handle(heap->the_hole_value(), isolate));
- }
-
- return *hresult;
-}
-
-
-void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
- const char* type_str,
- Handle<String> name,
- Handle<Object> old_value) {
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<String> type = isolate->factory()->InternalizeUtf8String(type_str);
- if (object->IsJSGlobalObject()) {
- object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate);
- }
- Handle<Object> args[] = { type, object, name, old_value };
- bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_notify_change()),
- isolate->factory()->undefined_value(),
- old_value->IsTheHole() ? 3 : 4, args,
- &threw);
- ASSERT(!threw);
-}
-
-
-void JSObject::DeliverChangeRecords(Isolate* isolate) {
- ASSERT(isolate->observer_delivery_pending());
- bool threw = false;
- Execution::Call(
- isolate->observers_deliver_changes(),
- isolate->factory()->undefined_value(),
- 0,
- NULL,
- &threw);
- ASSERT(!threw);
- isolate->set_observer_delivery_pending(false);
-}
-
-
-MaybeObject* JSObject::SetPropertyPostInterceptor(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check) {
- // Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (!result.IsFound()) map()->LookupTransition(this, name, &result);
- if (result.IsFound()) {
- // An existing property or a map transition was found. Use set property to
- // handle all these cases.
- return SetProperty(&result, name, value, attributes, strict_mode);
- }
- bool done = false;
- MaybeObject* result_object;
- result_object =
- SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
- if (done) return result_object;
- // Add a new real property.
- return AddProperty(name, value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED, extensibility_check);
-}
-
-
-MaybeObject* JSObject::ReplaceSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes) {
- StringDictionary* dictionary = property_dictionary();
- int old_index = dictionary->FindEntry(name);
- int new_enumeration_index = 0; // 0 means "Use the next available index."
- if (old_index != -1) {
- // All calls to ReplaceSlowProperty have had all transitions removed.
- new_enumeration_index = dictionary->DetailsAt(old_index).dictionary_index();
- }
-
- PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
- return SetNormalizedProperty(name, value, new_details);
-}
-
-
-MaybeObject* JSObject::ConvertTransitionToMapTransition(
- int transition_index,
- String* name,
- Object* new_value,
- PropertyAttributes attributes) {
- Map* old_map = map();
- Map* old_target = old_map->GetTransition(transition_index);
- Object* result;
-
- MaybeObject* maybe_result =
- ConvertDescriptorToField(name, new_value, attributes);
- if (!maybe_result->To(&result)) return maybe_result;
-
- if (!HasFastProperties()) return result;
-
- // This method should only be used to convert existing transitions.
- Map* new_map = map();
-
- // TODO(verwaest): From here on we lose existing map transitions, causing
- // invalid back pointers. This will change once we can store multiple
- // transitions with the same key.
-
- bool owned_descriptors = old_map->owns_descriptors();
- if (owned_descriptors ||
- old_target->instance_descriptors() == old_map->instance_descriptors()) {
- // Since the conversion above generated a new fast map with an additional
- // property which can be shared as well, install this descriptor pointer
- // along the entire chain of smaller maps.
- Map* map;
- DescriptorArray* new_descriptors = new_map->instance_descriptors();
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
- for (Object* current = old_map;
- !current->IsUndefined();
- current = map->GetBackPointer()) {
- map = Map::cast(current);
- if (map->instance_descriptors() != old_descriptors) break;
- map->SetEnumLength(Map::kInvalidEnumCache);
- map->set_instance_descriptors(new_descriptors);
- }
- old_map->set_owns_descriptors(false);
- }
-
- old_map->SetTransition(transition_index, new_map);
- new_map->SetBackPointer(old_map);
- return result;
-}
-
-
-MaybeObject* JSObject::ConvertDescriptorToField(String* name,
- Object* new_value,
- PropertyAttributes attributes) {
- if (map()->unused_property_fields() == 0 &&
- TooManyFastProperties(properties()->length(), MAY_BE_STORE_FROM_KEYED)) {
- Object* obj;
- MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- return ReplaceSlowProperty(name, new_value, attributes);
- }
-
- int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes, 0);
-
- // Make a new map for the object.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyInsertDescriptor(&new_field,
- OMIT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- // Make new properties array if necessary.
- FixedArray* new_properties = NULL;
- int new_unused_property_fields = map()->unused_property_fields() - 1;
- if (map()->unused_property_fields() == 0) {
- new_unused_property_fields = kFieldsAdded - 1;
- MaybeObject* maybe_new_properties =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_new_properties->To(&new_properties)) return maybe_new_properties;
- }
-
- // Update pointers to commit changes.
- // Object points to the new map.
- new_map->set_unused_property_fields(new_unused_property_fields);
- set_map(new_map);
- if (new_properties != NULL) {
- set_properties(new_properties);
- }
- return FastPropertyAtPut(index, new_value);
-}
-
-
-
-MaybeObject* JSObject::SetPropertyWithInterceptor(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> this_handle(this);
- Handle<String> name_handle(name);
- Handle<Object> value_handle(value, isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- if (!interceptor->setter()->IsUndefined()) {
- LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::NamedPropertySetter setter =
- v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- Handle<Object> value_unhole(value->IsTheHole() ?
- isolate->heap()->undefined_value() :
- value,
- isolate);
- result = setter(v8::Utils::ToLocal(name_handle),
- v8::Utils::ToLocal(value_unhole),
- info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
- }
- MaybeObject* raw_result =
- this_handle->SetPropertyPostInterceptor(*name_handle,
- *value_handle,
- attributes,
- strict_mode,
- PERFORM_EXTENSIBILITY_CHECK);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool skip_fallback_interceptor) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED,
- skip_fallback_interceptor),
- Object);
-}
-
-
-MaybeObject* JSReceiver::SetProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode,
- bool skip_fallback_interceptor) {
- LookupResult result(GetIsolate());
- LocalLookup(name, &result, true, skip_fallback_interceptor);
- if (!result.IsFound()) {
- map()->LookupTransition(JSObject::cast(this), name, &result);
- }
- return SetProperty(&result, name, value, attributes, strict_mode, store_mode);
-}
-
-
-MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
- String* name,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
-
- // We should never get here to initialize a const with the hole
- // value since a const declaration would conflict with the setter.
- ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
-
- // To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually foreign
- // callbacks should be phased out.
- if (structure->IsForeign()) {
- AccessorDescriptor* callback =
- reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* obj = (callback->setter)(this, value, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (obj->IsFailure()) return obj;
- return *value_handle;
- }
-
- if (structure->IsExecutableAccessorInfo()) {
- // api style callbacks
- ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
- if (!data->IsCompatibleReceiver(this)) {
- Handle<Object> name_handle(name, isolate);
- Handle<Object> receiver_handle(this, isolate);
- Handle<Object> args[2] = { name_handle, receiver_handle };
- Handle<Object> error =
- isolate->factory()->NewTypeError("incompatible_method_receiver",
- HandleVector(args,
- ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
- if (call_fun == NULL) return value;
- Handle<String> key(name);
- LOG(isolate, ApiNamedPropertyAccess("store", this, name));
- CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
- v8::AccessorInfo info(args.end());
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- call_fun(v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle),
- info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
- }
-
- if (structure->IsAccessorPair()) {
- Object* setter = AccessorPair::cast(structure)->setter();
- if (setter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
- } else {
- if (strict_mode == kNonStrictMode) {
- return value;
- }
- Handle<String> key(name);
- Handle<Object> holder_handle(holder, isolate);
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
- }
- }
-
- // TODO(dcarney): Handle correctly.
- if (structure->IsDeclaredAccessorInfo()) {
- return value;
- }
-
- UNREACHABLE();
- return NULL;
-}
-
-
-MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value) {
- Isolate* isolate = GetIsolate();
- Handle<Object> value_handle(value, isolate);
- Handle<JSReceiver> fun(setter, isolate);
- Handle<JSReceiver> self(this, isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = isolate->debug();
- // Handle stepping into a setter if step into is active.
- // TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && fun->IsJSFunction()) {
- debug->HandleStepIn(
- Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
- }
-#endif
- bool has_pending_exception;
- Handle<Object> argv[] = { value_handle };
- Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
- // Check for pending exception and return the result.
- if (has_pending_exception) return Failure::Exception();
- return *value_handle;
-}
-
-
-MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
- uint32_t index,
- Object* value,
- bool* found,
- StrictModeFlag strict_mode) {
- Heap* heap = GetHeap();
- for (Object* pt = GetPrototype();
- pt != heap->null_value();
- pt = pt->GetPrototype(GetIsolate())) {
- if (pt->IsJSProxy()) {
- String* name;
- MaybeObject* maybe = heap->Uint32ToString(index);
- if (!maybe->To<String>(&name)) {
- *found = true; // Force abort
- return maybe;
- }
- return JSProxy::cast(pt)->SetPropertyViaPrototypesWithHandler(
- this, name, value, NONE, strict_mode, found);
- }
- if (!JSObject::cast(pt)->HasDictionaryElements()) {
- continue;
- }
- SeededNumberDictionary* dictionary =
- JSObject::cast(pt)->element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != SeededNumberDictionary::kNotFound) {
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- *found = true;
- return SetElementWithCallback(dictionary->ValueAt(entry),
- index,
- value,
- JSObject::cast(pt),
- strict_mode);
- }
- }
- }
- *found = false;
- return heap->the_hole_value();
-}
-
-MaybeObject* JSObject::SetPropertyViaPrototypes(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* done) {
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
-
- *done = false;
- // We could not find a local property so let's check whether there is an
- // accessor that wants to handle the property, or whether the property is
- // read-only on the prototype chain.
- LookupResult result(isolate);
- LookupRealNamedPropertyInPrototypes(name, &result);
- if (result.IsFound()) {
- switch (result.type()) {
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- *done = result.IsReadOnly();
- break;
- case INTERCEPTOR: {
- PropertyAttributes attr =
- result.holder()->GetPropertyAttributeWithInterceptor(
- this, name, true);
- *done = !!(attr & READ_ONLY);
- break;
- }
- case CALLBACKS: {
- if (!FLAG_es5_readonly && result.IsReadOnly()) break;
- *done = true;
- return SetPropertyWithCallback(result.GetCallbackObject(),
- name, value, result.holder(), strict_mode);
- }
- case HANDLER: {
- return result.proxy()->SetPropertyViaPrototypesWithHandler(
- this, name, value, attributes, strict_mode, done);
- }
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
-
- // If we get here with *done true, we have encountered a read-only property.
- if (!FLAG_es5_readonly) *done = false;
- if (*done) {
- if (strict_mode == kNonStrictMode) return value;
- Handle<Object> args[] = { Handle<Object>(name, isolate),
- Handle<Object>(this, isolate)};
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
- }
- return heap->the_hole_value();
-}
-
-
-enum RightTrimMode { FROM_GC, FROM_MUTATOR };
-
-
-static void ZapEndOfFixedArray(Address new_end, int to_trim) {
- // If we are doing a big trim in old space then we zap the space.
- Object** zap = reinterpret_cast<Object**>(new_end);
- zap++; // Header of filler must be at least one word so skip that.
- for (int i = 1; i < to_trim; i++) {
- *zap++ = Smi::FromInt(0);
- }
-}
-
-
-template<RightTrimMode trim_mode>
-static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
- ASSERT(!HEAP->lo_space()->Contains(elms));
-
- const int len = elms->length();
-
- ASSERT(to_trim < len);
-
- Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
-
- if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
- ZapEndOfFixedArray(new_end, to_trim);
- }
-
- int size_delta = to_trim * kPointerSize;
-
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- heap->CreateFillerObjectAt(new_end, size_delta);
-
- elms->set_length(len - to_trim);
-
- // Maintain marking consistency for IncrementalMarking.
- if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
- if (trim_mode == FROM_GC) {
- MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
- }
-}
-
-
-void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
- if (slack <= descriptors->NumberOfSlackDescriptors()) return;
- int number_of_descriptors = descriptors->number_of_descriptors();
- Isolate* isolate = map->GetIsolate();
- Handle<DescriptorArray> new_descriptors =
- isolate->factory()->NewDescriptorArray(number_of_descriptors, slack);
- DescriptorArray::WhitenessWitness witness(*new_descriptors);
-
- for (int i = 0; i < number_of_descriptors; ++i) {
- new_descriptors->CopyFrom(i, *descriptors, i, witness);
- }
-
- map->set_instance_descriptors(*new_descriptors);
-}
-
-
-void Map::AppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors) {
- Isolate* isolate = map->GetIsolate();
- Handle<DescriptorArray> array(map->instance_descriptors());
- NeanderArray callbacks(descriptors);
- int nof_callbacks = callbacks.length();
-
- ASSERT(array->NumberOfSlackDescriptors() >= nof_callbacks);
-
- // Ensure the keys are internalized strings before writing them into the
- // instance descriptor. Since it may cause a GC, it has to be done before we
- // temporarily put the heap in an invalid state while appending descriptors.
- for (int i = 0; i < nof_callbacks; ++i) {
- Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i)));
- Handle<String> key =
- isolate->factory()->InternalizedStringFromString(
- Handle<String>(String::cast(entry->name())));
- entry->set_name(*key);
- }
-
- int nof = map->NumberOfOwnDescriptors();
-
- // Fill in new callback descriptors. Process the callbacks from
- // back to front so that the last callback with a given name takes
- // precedence over previously added callbacks with that name.
- for (int i = nof_callbacks - 1; i >= 0; i--) {
- AccessorInfo* entry = AccessorInfo::cast(callbacks.get(i));
- String* key = String::cast(entry->name());
- // Check if a descriptor with this name already exists before writing.
- if (array->Search(key, nof) == DescriptorArray::kNotFound) {
- CallbacksDescriptor desc(key, entry, entry->property_attributes());
- array->Append(&desc);
- nof += 1;
- }
- }
-
- map->SetNumberOfOwnDescriptors(nof);
-}
-
-
-static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
- ASSERT(!map.is_null());
- for (int i = 0; i < maps->length(); ++i) {
- if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true;
- }
- return false;
-}
-
-
-template <class T>
-static Handle<T> MaybeNull(T* p) {
- if (p == NULL) return Handle<T>::null();
- return Handle<T>(p);
-}
-
-
-Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
- ElementsKind kind = elements_kind();
- Handle<Map> transitioned_map = Handle<Map>::null();
- Handle<Map> current_map(this);
- bool packed = IsFastPackedElementsKind(kind);
- if (IsTransitionableFastElementsKind(kind)) {
- while (CanTransitionToMoreGeneralFastElementsKind(kind, false)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, false);
- Handle<Map> maybe_transitioned_map =
- MaybeNull(current_map->LookupElementsTransitionMap(kind));
- if (maybe_transitioned_map.is_null()) break;
- if (ContainsMap(candidates, maybe_transitioned_map) &&
- (packed || !IsFastPackedElementsKind(kind))) {
- transitioned_map = maybe_transitioned_map;
- if (!IsFastPackedElementsKind(kind)) packed = false;
- }
- current_map = maybe_transitioned_map;
- }
- }
- return transitioned_map;
-}
-
-
-static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
- Map* current_map = map;
- int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
- int to_index = IsFastElementsKind(to_kind)
- ? GetSequenceIndexFromFastElementsKind(to_kind)
- : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
-
- ASSERT(index <= to_index);
-
- for (; index < to_index; ++index) {
- if (!current_map->HasElementsTransition()) return current_map;
- current_map = current_map->elements_transition_map();
- }
- if (!IsFastElementsKind(to_kind) && current_map->HasElementsTransition()) {
- Map* next_map = current_map->elements_transition_map();
- if (next_map->elements_kind() == to_kind) return next_map;
- }
- ASSERT(IsFastElementsKind(to_kind)
- ? current_map->elements_kind() == to_kind
- : current_map->elements_kind() == TERMINAL_FAST_ELEMENTS_KIND);
- return current_map;
-}
-
-
-Map* Map::LookupElementsTransitionMap(ElementsKind to_kind) {
- Map* to_map = FindClosestElementsTransition(this, to_kind);
- if (to_map->elements_kind() == to_kind) return to_map;
- return NULL;
-}
-
-
-static MaybeObject* AddMissingElementsTransitions(Map* map,
- ElementsKind to_kind) {
- ASSERT(IsFastElementsKind(map->elements_kind()));
- int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
- int to_index = IsFastElementsKind(to_kind)
- ? GetSequenceIndexFromFastElementsKind(to_kind)
- : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
-
- ASSERT(index <= to_index);
-
- Map* current_map = map;
-
- for (; index < to_index; ++index) {
- ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(index + 1);
- MaybeObject* maybe_next_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
- if (!maybe_next_map->To(&current_map)) return maybe_next_map;
- }
-
- // In case we are exiting the fast elements kind system, just add the map in
- // the end.
- if (!IsFastElementsKind(to_kind)) {
- MaybeObject* maybe_next_map =
- current_map->CopyAsElementsKind(to_kind, INSERT_TRANSITION);
- if (!maybe_next_map->To(&current_map)) return maybe_next_map;
- }
-
- ASSERT(current_map->elements_kind() == to_kind);
- return current_map;
-}
-
-
-Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind to_kind) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- object->GetElementsTransitionMap(isolate, to_kind),
- Map);
-}
-
-
-MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
- Map* start_map = map();
- ElementsKind from_kind = start_map->elements_kind();
-
- if (from_kind == to_kind) {
- return start_map;
- }
-
- bool allow_store_transition =
- // Only remember the map transition if there is not an already existing
- // non-matching element transition.
- !start_map->IsUndefined() && !start_map->is_shared() &&
- IsFastElementsKind(from_kind);
-
- // Only store fast element maps in ascending generality.
- if (IsFastElementsKind(to_kind)) {
- allow_store_transition &=
- IsTransitionableFastElementsKind(from_kind) &&
- IsMoreGeneralElementsKindTransition(from_kind, to_kind);
- }
-
- if (!allow_store_transition) {
- return start_map->CopyAsElementsKind(to_kind, OMIT_TRANSITION);
- }
-
- Map* closest_map = FindClosestElementsTransition(start_map, to_kind);
-
- if (closest_map->elements_kind() == to_kind) {
- return closest_map;
- }
-
- return AddMissingElementsTransitions(closest_map, to_kind);
-}
-
-
-void JSObject::LocalLookupRealNamedProperty(String* name,
- LookupResult* result) {
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return result->NotFound();
- ASSERT(proto->IsJSGlobalObject());
- // A GlobalProxy's prototype should always be a proper JSObject.
- return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
- }
-
- if (HasFastProperties()) {
- map()->LookupDescriptor(this, name, result);
- // A property or a map transition was found. We return all of these result
- // types because LocalLookupRealNamedProperty is used when setting
- // properties where map transitions are handled.
- ASSERT(!result->IsFound() ||
- (result->holder() == this && result->IsFastPropertyType()));
- // Disallow caching for uninitialized constants. These can only
- // occur as fields.
- if (result->IsField() &&
- result->IsReadOnly() &&
- FastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) {
- result->DisallowCaching();
- }
- return;
- }
-
- int entry = property_dictionary()->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
- Object* value = property_dictionary()->ValueAt(entry);
- if (IsGlobalObject()) {
- PropertyDetails d = property_dictionary()->DetailsAt(entry);
- if (d.IsDeleted()) {
- result->NotFound();
- return;
- }
- value = JSGlobalPropertyCell::cast(value)->value();
- }
- // Make sure to disallow caching for uninitialized constants
- // found in the dictionary-mode objects.
- if (value->IsTheHole()) result->DisallowCaching();
- result->DictionaryResult(this, entry);
- return;
- }
-
- result->NotFound();
-}
-
-
-void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
- LocalLookupRealNamedProperty(name, result);
- if (result->IsFound()) return;
-
- LookupRealNamedPropertyInPrototypes(name, result);
-}
-
-
-void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
- LookupResult* result) {
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
- for (Object* pt = GetPrototype();
- pt != heap->null_value();
- pt = pt->GetPrototype(isolate)) {
- if (pt->IsJSProxy()) {
- return result->HandlerResult(JSProxy::cast(pt));
- }
- JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- ASSERT(!(result->IsFound() && result->type() == INTERCEPTOR));
- if (result->IsFound()) return;
- }
- result->NotFound();
-}
-
-
-// We only need to deal with CALLBACKS and INTERCEPTORS
-MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
- LookupResult* result,
- String* name,
- Object* value,
- bool check_prototype,
- StrictModeFlag strict_mode) {
- if (check_prototype && !result->IsProperty()) {
- LookupRealNamedPropertyInPrototypes(name, result);
- }
-
- if (result->IsProperty()) {
- if (!result->IsReadOnly()) {
- switch (result->type()) {
- case CALLBACKS: {
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_write()) {
- return SetPropertyWithCallback(result->GetCallbackObject(),
- name,
- value,
- result->holder(),
- strict_mode);
- }
- }
- break;
- }
- case INTERCEPTOR: {
- // Try lookup real named properties. Note that only property can be
- // set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
- LookupResult r(GetIsolate());
- LookupRealNamedProperty(name, &r);
- if (r.IsProperty()) {
- return SetPropertyWithFailedAccessCheck(&r,
- name,
- value,
- check_prototype,
- strict_mode);
- }
- break;
- }
- default: {
- break;
- }
- }
- }
- }
-
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> value_handle(value, isolate);
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return *value_handle;
-}
-
-
-MaybeObject* JSReceiver::SetProperty(LookupResult* result,
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
- if (result->IsHandler()) {
- return result->proxy()->SetPropertyWithHandler(
- this, key, value, attributes, strict_mode);
- } else {
- return JSObject::cast(this)->SetPropertyForResult(
- result, key, value, attributes, strict_mode, store_mode);
- }
-}
-
-
-bool JSProxy::HasPropertyWithHandler(String* name_raw) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> receiver(this, isolate);
- Handle<Object> name(name_raw, isolate);
-
- Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
- "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return false;
-
- return result->ToBoolean()->IsTrue();
-}
-
-
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
- JSReceiver* receiver_raw,
- String* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw, isolate);
- Handle<Object> value(value_raw, isolate);
-
- Handle<Object> args[] = { receiver, name, value };
- CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
-
- return *value;
-}
-
-
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver_raw,
- String* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* done) {
- Isolate* isolate = GetIsolate();
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<String> name(name_raw);
- Handle<Object> value(value_raw, isolate);
- Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
-
- *done = true; // except where redefined...
- Handle<Object> args[] = { name };
- Handle<Object> result = proxy->CallTrap(
- "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
-
- if (result->IsUndefined()) {
- *done = false;
- return GetHeap()->the_hole_value();
- }
-
- // Emulate [[GetProperty]] semantics for proxies.
- bool has_pending_exception;
- Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
-
- // [[GetProperty]] requires to check that all properties are configurable.
- Handle<String> configurable_name =
- isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("configurable_"));
- Handle<Object> configurable(
- v8::internal::GetProperty(isolate, desc, configurable_name));
- ASSERT(!isolate->has_pending_exception());
- ASSERT(configurable->IsTrue() || configurable->IsFalse());
- if (configurable->IsFalse()) {
- Handle<String> trap =
- isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("getPropertyDescriptor"));
- Handle<Object> args[] = { handler, trap, name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- ASSERT(configurable->IsTrue());
-
- // Check for DataDescriptor.
- Handle<String> hasWritable_name =
- isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("hasWritable_"));
- Handle<Object> hasWritable(
- v8::internal::GetProperty(isolate, desc, hasWritable_name));
- ASSERT(!isolate->has_pending_exception());
- ASSERT(hasWritable->IsTrue() || hasWritable->IsFalse());
- if (hasWritable->IsTrue()) {
- Handle<String> writable_name =
- isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("writable_"));
- Handle<Object> writable(
- v8::internal::GetProperty(isolate, desc, writable_name));
- ASSERT(!isolate->has_pending_exception());
- ASSERT(writable->IsTrue() || writable->IsFalse());
- *done = writable->IsFalse();
- if (!*done) return GetHeap()->the_hole_value();
- if (strict_mode == kNonStrictMode) return *value;
- Handle<Object> args[] = { name, receiver };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
-
- // We have an AccessorDescriptor.
- Handle<String> set_name = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("set_"));
- Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_name));
- ASSERT(!isolate->has_pending_exception());
- if (!setter->IsUndefined()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return receiver->SetPropertyWithDefinedSetter(
- JSReceiver::cast(*setter), *value);
- }
-
- if (strict_mode == kNonStrictMode) return *value;
- Handle<Object> args2[] = { name, proxy };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
- return isolate->Throw(*error);
-}
-
-
-MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
- String* name_raw, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> receiver(this);
- Handle<Object> name(name_raw, isolate);
-
- Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
- "delete", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
-
- Object* bool_result = result->ToBoolean();
- if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
- Handle<Object> handler(receiver->handler(), isolate);
- Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("delete"));
- Handle<Object> args[] = { handler, trap_name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Failure::Exception();
- }
- return bool_result;
-}
-
-
-MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler(
- uint32_t index,
- DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return JSProxy::DeletePropertyWithHandler(*name, mode);
-}
-
-
-MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
- JSReceiver* receiver_raw,
- String* name_raw) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
- Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw, isolate);
-
- Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
- "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return NONE;
-
- if (result->IsUndefined()) return ABSENT;
-
- bool has_pending_exception;
- Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return NONE;
-
- // Convert result to PropertyAttributes.
- Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("enumerable"));
- Handle<Object> enumerable(v8::internal::GetProperty(isolate, desc, enum_n));
- if (isolate->has_pending_exception()) return NONE;
- Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("configurable"));
- Handle<Object> configurable(v8::internal::GetProperty(isolate, desc, conf_n));
- if (isolate->has_pending_exception()) return NONE;
- Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("writable"));
- Handle<Object> writable(v8::internal::GetProperty(isolate, desc, writ_n));
- if (isolate->has_pending_exception()) return NONE;
-
- if (configurable->IsFalse()) {
- Handle<String> trap = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("getPropertyDescriptor"));
- Handle<Object> args[] = { handler, trap, name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return NONE;
- }
-
- int attributes = NONE;
- if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM;
- if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE;
- if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY;
- return static_cast<PropertyAttributes>(attributes);
-}
-
-
-MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
- JSReceiver* receiver_raw,
- uint32_t index) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return proxy->GetPropertyAttributeWithHandler(*receiver, *name);
-}
-
-
-void JSProxy::Fix() {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> self(this);
-
- // Save identity hash.
- MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
-
- if (IsJSFunctionProxy()) {
- isolate->factory()->BecomeJSFunction(self);
- // Code will be set on the JavaScript side.
- } else {
- isolate->factory()->BecomeJSObject(self);
- }
- ASSERT(self->IsJSObject());
-
- // Inherit identity, if it was present.
- Object* hash;
- if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
- Handle<JSObject> new_self(JSObject::cast(*self));
- isolate->factory()->SetIdentityHash(new_self, Smi::cast(hash));
- }
-}
-
-
-MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
- Handle<Object> derived,
- int argc,
- Handle<Object> argv[]) {
- Isolate* isolate = GetIsolate();
- Handle<Object> handler(this->handler(), isolate);
-
- Handle<String> trap_name = isolate->factory()->InternalizeUtf8String(name);
- Handle<Object> trap(v8::internal::GetProperty(isolate, handler, trap_name));
- if (isolate->has_pending_exception()) return trap;
-
- if (trap->IsUndefined()) {
- if (derived.is_null()) {
- Handle<Object> args[] = { handler, trap_name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
- }
- trap = Handle<Object>(derived);
- }
-
- bool threw;
- return Execution::Call(trap, handler, argc, argv, &threw);
-}
-
-
-void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> map) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->AddFastPropertyUsingMap(*map));
-}
-
-
-MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
- String* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode) {
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
-
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. We internalize these short keys to avoid constantly
- // reallocating them.
- if (!name_raw->IsInternalizedString() && name_raw->length() <= 2) {
- Object* internalized_version;
- { MaybeObject* maybe_string_version = heap->InternalizeString(name_raw);
- if (maybe_string_version->ToObject(&internalized_version)) {
- name_raw = String::cast(internalized_version);
- }
- }
- }
-
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(
- lookup, name_raw, value_raw, true, strict_mode);
- }
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetPropertyForResult(
- lookup, name_raw, value_raw, attributes, strict_mode, store_mode);
- }
-
- ASSERT(!lookup->IsFound() || lookup->holder() == this ||
- lookup->holder()->map()->is_hidden_prototype());
-
- // From this point on everything needs to be handlified, because
- // SetPropertyViaPrototypes might call back into JavaScript.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<String> name(name_raw);
- Handle<Object> value(value_raw, isolate);
-
- if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) {
- bool done = false;
- MaybeObject* result_object = self->SetPropertyViaPrototypes(
- *name, *value, attributes, strict_mode, &done);
- if (done) return result_object;
- }
-
- if (!lookup->IsFound()) {
- // Neither properties nor transitions found.
- return self->AddProperty(
- *name, *value, attributes, strict_mode, store_mode);
- }
-
- if (lookup->IsProperty() && lookup->IsReadOnly()) {
- if (strict_mode == kStrictMode) {
- Handle<Object> args[] = { name, self };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
- } else {
- return *value;
- }
- }
-
- Handle<Object> old_value(heap->the_hole_value(), isolate);
- if (FLAG_harmony_observation &&
- map()->is_observed() && lookup->IsDataProperty()) {
- old_value = Object::GetProperty(self, name);
- }
-
- // This is a real property that is not read-only, or it is a
- // transition or null descriptor and there are no setters in the prototypes.
- MaybeObject* result = *value;
- switch (lookup->type()) {
- case NORMAL:
- result = lookup->holder()->SetNormalizedProperty(lookup, *value);
- break;
- case FIELD:
- result = lookup->holder()->FastPropertyAtPut(
- lookup->GetFieldIndex().field_index(), *value);
- break;
- case CONSTANT_FUNCTION:
- // Only replace the function if necessary.
- if (*value == lookup->GetConstantFunction()) return *value;
- // Preserve the attributes of this existing property.
- attributes = lookup->GetAttributes();
- result =
- lookup->holder()->ConvertDescriptorToField(*name, *value, attributes);
- break;
- case CALLBACKS: {
- Object* callback_object = lookup->GetCallbackObject();
- return self->SetPropertyWithCallback(
- callback_object, *name, *value, lookup->holder(), strict_mode);
- }
- case INTERCEPTOR:
- result = lookup->holder()->SetPropertyWithInterceptor(
- *name, *value, attributes, strict_mode);
- break;
- case TRANSITION: {
- Map* transition_map = lookup->GetTransitionTarget();
- int descriptor = transition_map->LastAdded();
-
- DescriptorArray* descriptors = transition_map->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD) {
- if (attributes == details.attributes()) {
- int field_index = descriptors->GetFieldIndex(descriptor);
- result = lookup->holder()->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index);
- } else {
- result = lookup->holder()->ConvertDescriptorToField(
- *name, *value, attributes);
- }
- } else if (details.type() == CALLBACKS) {
- result = lookup->holder()->ConvertDescriptorToField(
- *name, *value, attributes);
- } else {
- ASSERT(details.type() == CONSTANT_FUNCTION);
-
- Object* constant_function = descriptors->GetValue(descriptor);
- if (constant_function == *value) {
- // If the same constant function is being added we can simply
- // transition to the target map.
- lookup->holder()->set_map(transition_map);
- result = constant_function;
- } else {
- // Otherwise, replace with a map transition to a new map with a FIELD,
- // even if the value is a constant function.
- result = lookup->holder()->ConvertTransitionToMapTransition(
- lookup->GetTransitionIndex(), *name, *value, attributes);
- }
- }
- break;
- }
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
- }
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (FLAG_harmony_observation && map()->is_observed()) {
- if (lookup->IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
- } else {
- LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
- if (new_lookup.IsDataProperty() &&
- !Object::GetProperty(self, name)->SameValue(*old_value)) {
- EnqueueChangeRecord(self, "updated", name, old_value);
- }
- }
- }
-
- return *hresult;
-}
-
-
-// Set a real local property, even if it is READ_ONLY. If the property is not
-// present, add it with attributes NONE. This code is an exact clone of
-// SetProperty, with the check for IsReadOnly and the check for a
-// callback setter removed. The two lines looking up the LookupResult
-// result are also added. If one of the functions is changed, the other
-// should be.
-// Note that this method cannot be used to set the prototype of a function
-// because ConvertDescriptorToField() which is called in "case CALLBACKS:"
-// doesn't handle function prototypes correctly.
-Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
- Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
- Object);
-}
-
-
-MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
- String* name_raw,
- Object* value_raw,
- PropertyAttributes attributes) {
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
- Isolate* isolate = GetIsolate();
- LookupResult lookup(isolate);
- LocalLookup(name_raw, &lookup, true);
- if (!lookup.IsFound()) map()->LookupTransition(this, name_raw, &lookup);
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&lookup,
- name_raw,
- value_raw,
- false,
- kNonStrictMode);
- }
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
- name_raw,
- value_raw,
- attributes);
- }
-
- // Check for accessor in prototype chain removed here in clone.
- if (!lookup.IsFound()) {
- // Neither properties nor transitions found.
- return AddProperty(name_raw, value_raw, attributes, kNonStrictMode);
- }
-
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<String> name(name_raw);
- Handle<Object> value(value_raw, isolate);
-
- Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate);
- PropertyAttributes old_attributes = ABSENT;
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
- if (is_observed) {
- if (lookup.IsDataProperty()) old_value = Object::GetProperty(self, name);
- old_attributes = lookup.GetAttributes();
- }
-
- // Check of IsReadOnly removed from here in clone.
- MaybeObject* result = *value;
- switch (lookup.type()) {
- case NORMAL: {
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
- result = self->SetNormalizedProperty(*name, *value, details);
- break;
- }
- case FIELD:
- result = self->FastPropertyAtPut(
- lookup.GetFieldIndex().field_index(), *value);
- break;
- case CONSTANT_FUNCTION:
- // Only replace the function if necessary.
- if (*value != lookup.GetConstantFunction()) {
- // Preserve the attributes of this existing property.
- attributes = lookup.GetAttributes();
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- }
- break;
- case CALLBACKS:
- case INTERCEPTOR:
- // Override callback in clone
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- break;
- case TRANSITION: {
- Map* transition_map = lookup.GetTransitionTarget();
- int descriptor = transition_map->LastAdded();
-
- DescriptorArray* descriptors = transition_map->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD) {
- if (attributes == details.attributes()) {
- int field_index = descriptors->GetFieldIndex(descriptor);
- result = self->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index);
- } else {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- }
- } else if (details.type() == CALLBACKS) {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- } else {
- ASSERT(details.type() == CONSTANT_FUNCTION);
-
- // Replace transition to CONSTANT FUNCTION with a map transition to a
- // new map with a FIELD, even if the value is a function.
- result = self->ConvertTransitionToMapTransition(
- lookup.GetTransitionIndex(), *name, *value, attributes);
- }
- break;
- }
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
- }
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (is_observed) {
- if (lookup.IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
- } else if (old_value->IsTheHole()) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
- } else {
- LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
- bool value_changed = new_lookup.IsDataProperty() &&
- !old_value->SameValue(*Object::GetProperty(self, name));
- if (new_lookup.GetAttributes() != old_attributes) {
- if (!value_changed) old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
- } else if (value_changed) {
- EnqueueChangeRecord(self, "updated", name, old_value);
- }
- }
- }
-
- return *hresult;
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
- JSObject* receiver,
- String* name,
- bool continue_search) {
- // Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound()) return result.GetAttributes();
-
- if (continue_search) {
- // Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- if (!pt->IsNull()) {
- return JSObject::cast(pt)->
- GetPropertyAttributeWithReceiver(receiver, name);
- }
- }
- return ABSENT;
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
- JSObject* receiver,
- String* name,
- bool continue_search) {
- Isolate* isolate = GetIsolate();
-
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
-
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- Handle<JSObject> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(name);
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- if (!interceptor->query()->IsUndefined()) {
- v8::NamedPropertyQuery query =
- v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = query(v8::Utils::ToLocal(name_handle), info);
- }
- if (!result.IsEmpty()) {
- ASSERT(result->IsInt32());
- return static_cast<PropertyAttributes>(result->Int32Value());
- }
- } else if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(v8::Utils::ToLocal(name_handle), info);
- }
- if (!result.IsEmpty()) return DONT_ENUM;
- }
- return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
- *name_handle,
- continue_search);
-}
-
-
-PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
- JSReceiver* receiver,
- String* key) {
- uint32_t index = 0;
- if (IsJSObject() && key->AsArrayIndex(&index)) {
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- receiver, index, true);
- }
- // Named property.
- LookupResult lookup(GetIsolate());
- Lookup(key, &lookup);
- return GetPropertyAttributeForResult(receiver, &lookup, key, true);
-}
-
-
-PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
- JSReceiver* receiver,
- LookupResult* lookup,
- String* name,
- bool continue_search) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- JSObject* this_obj = JSObject::cast(this);
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
- return this_obj->GetPropertyAttributeWithFailedAccessCheck(
- receiver, lookup, name, continue_search);
- }
- }
- if (lookup->IsFound()) {
- switch (lookup->type()) {
- case NORMAL: // fall through
- case FIELD:
- case CONSTANT_FUNCTION:
- case CALLBACKS:
- return lookup->GetAttributes();
- case HANDLER: {
- return JSProxy::cast(lookup->proxy())->GetPropertyAttributeWithHandler(
- receiver, name);
- }
- case INTERCEPTOR:
- return lookup->holder()->GetPropertyAttributeWithInterceptor(
- JSObject::cast(receiver), name, continue_search);
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- }
- }
- return ABSENT;
-}
-
-
-PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) {
- // Check whether the name is an array index.
- uint32_t index = 0;
- if (IsJSObject() && name->AsArrayIndex(&index)) {
- return GetLocalElementAttribute(index);
- }
- // Named property.
- LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup, true);
- return GetPropertyAttributeForResult(this, &lookup, name, false);
-}
-
-
-PropertyAttributes JSObject::GetElementAttributeWithReceiver(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
-
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return ABSENT;
- }
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return ABSENT;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetElementAttributeWithReceiver(
- receiver, index, continue_search);
- }
-
- // Check for lookup interceptor except when bootstrapping.
- if (HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
- return GetElementAttributeWithInterceptor(receiver, index, continue_search);
- }
-
- return GetElementAttributeWithoutInterceptor(
- receiver, index, continue_search);
-}
-
-
-PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSReceiver> hreceiver(receiver);
- Handle<JSObject> holder(this);
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- if (!interceptor->query()->IsUndefined()) {
- v8::IndexedPropertyQuery query =
- v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = query(index, info);
- }
- if (!result.IsEmpty())
- return static_cast<PropertyAttributes>(result->Int32Value());
- } else if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(index, info);
- }
- if (!result.IsEmpty()) return NONE;
- }
-
- return holder->GetElementAttributeWithoutInterceptor(
- *hreceiver, index, continue_search);
-}
-
-
-PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- PropertyAttributes attr = GetElementsAccessor()->GetAttributes(
- receiver, this, index);
- if (attr != ABSENT) return attr;
-
- // Handle [] on String objects.
- if (IsStringObjectWithCharacterAt(index)) {
- return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- }
-
- if (!continue_search) return ABSENT;
-
- Object* pt = GetPrototype();
- if (pt->IsJSProxy()) {
- // We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(receiver, index);
- }
- if (pt->IsNull()) return ABSENT;
- return JSObject::cast(pt)->GetElementAttributeWithReceiver(
- receiver, index, true);
-}
-
-
-MaybeObject* NormalizedMapCache::Get(JSObject* obj,
- PropertyNormalizationMode mode) {
- Isolate* isolate = obj->GetIsolate();
- Map* fast = obj->map();
- int index = fast->Hash() % kEntries;
- Object* result = get(index);
- if (result->IsMap() &&
- Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Map::cast(result)->SharedMapVerify();
- }
-#endif
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // The cached map should match newly created normalized map bit-by-bit,
- // except for the code cache, which can contain some ics which can be
- // applied to the shared map.
- Object* fresh;
- MaybeObject* maybe_fresh =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (maybe_fresh->ToObject(&fresh)) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kCodeCacheOffset) == 0);
- int offset = Map::kCodeCacheOffset + kPointerSize;
- ASSERT(memcmp(Map::cast(fresh)->address() + offset,
- Map::cast(result)->address() + offset,
- Map::kSize - offset) == 0);
- }
- }
-#endif
- return result;
- }
-
- { MaybeObject* maybe_result =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- ASSERT(Map::cast(result)->is_dictionary_map());
- set(index, result);
- isolate->counters()->normalized_maps()->Increment();
-
- return result;
-}
-
-
-void NormalizedMapCache::Clear() {
- int entries = length();
- for (int i = 0; i != entries; i++) {
- set_undefined(i);
- }
-}
-
-
-void JSObject::UpdateMapCodeCache(Handle<JSObject> object,
- Handle<String> name,
- Handle<Code> code) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION_VOID(isolate,
- object->UpdateMapCodeCache(*name, *code));
-}
-
-
-MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
- if (map()->is_shared()) {
- // Fast case maps are never marked as shared.
- ASSERT(!HasFastProperties());
- // Replace the map with an identical copy that can be safely modified.
- Object* obj;
- { MaybeObject* maybe_obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
- UNIQUE_NORMALIZED_MAP);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- GetIsolate()->counters()->normalized_maps()->Increment();
-
- set_map(Map::cast(obj));
- }
- return map()->UpdateCodeCache(name, code);
-}
-
-
-void JSObject::NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode,
- int expected_additional_properties) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeProperties(
- mode, expected_additional_properties));
-}
-
-
-MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
- int expected_additional_properties) {
- if (!HasFastProperties()) return this;
-
- // The global object is always normalized.
- ASSERT(!IsGlobalObject());
- // JSGlobalProxy must never be normalized
- ASSERT(!IsJSGlobalProxy());
-
- Map* map_of_this = map();
-
- // Allocate new content.
- int real_size = map_of_this->NumberOfOwnDescriptors();
- int property_count = real_size;
- if (expected_additional_properties > 0) {
- property_count += expected_additional_properties;
- } else {
- property_count += 2; // Make space for two more properties.
- }
- StringDictionary* dictionary;
- MaybeObject* maybe_dictionary = StringDictionary::Allocate(property_count);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
-
- DescriptorArray* descs = map_of_this->instance_descriptors();
- for (int i = 0; i < real_size; i++) {
- PropertyDetails details = descs->GetDetails(i);
- switch (details.type()) {
- case CONSTANT_FUNCTION: {
- PropertyDetails d = PropertyDetails(details.attributes(),
- NORMAL,
- details.descriptor_index());
- Object* value = descs->GetConstantFunction(i);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- break;
- }
- case FIELD: {
- PropertyDetails d = PropertyDetails(details.attributes(),
- NORMAL,
- details.descriptor_index());
- Object* value = FastPropertyAt(descs->GetFieldIndex(i));
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- break;
- }
- case CALLBACKS: {
- Object* value = descs->GetCallbacksObject(i);
- details = details.set_pointer(0);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, details);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- break;
- }
- case INTERCEPTOR:
- break;
- case HANDLER:
- case NORMAL:
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
-
- Heap* current_heap = GetHeap();
-
- // Copy the next enumeration index from instance descriptor.
- dictionary->SetNextEnumerationIndex(real_size + 1);
-
- Map* new_map;
- MaybeObject* maybe_map =
- current_heap->isolate()->context()->native_context()->
- normalized_map_cache()->Get(this, mode);
- if (!maybe_map->To(&new_map)) return maybe_map;
- ASSERT(new_map->is_dictionary_map());
-
- // We have now successfully allocated all the necessary objects.
- // Changes can now be made with the guarantee that all of them take effect.
-
- // Resize the object in the heap if necessary.
- int new_instance_size = new_map->instance_size();
- int instance_size_delta = map_of_this->instance_size() - new_instance_size;
- ASSERT(instance_size_delta >= 0);
- current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
- instance_size_delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
- -instance_size_delta);
- }
-
- set_map(new_map);
- map_of_this->NotifyLeafMapLayoutChange();
-
- set_properties(dictionary);
-
- current_heap->isolate()->counters()->props_to_dictionary()->Increment();
-
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- PrintF("Object properties have been normalized:\n");
- Print();
- }
-#endif
- return this;
-}
-
-
-void JSObject::TransformToFastProperties(Handle<JSObject> object,
- int unused_property_fields) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->TransformToFastProperties(unused_property_fields));
-}
-
-
-MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
- if (HasFastProperties()) return this;
- ASSERT(!IsGlobalObject());
- return property_dictionary()->
- TransformPropertiesToFastFor(this, unused_property_fields);
-}
-
-
-Handle<SeededNumberDictionary> JSObject::NormalizeElements(
- Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->NormalizeElements(),
- SeededNumberDictionary);
-}
-
-
-MaybeObject* JSObject::NormalizeElements() {
- ASSERT(!HasExternalArrayElements());
-
- // Find the backing store.
- FixedArrayBase* array = FixedArrayBase::cast(elements());
- Map* old_map = array->map();
- bool is_arguments =
- (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
- if (is_arguments) {
- array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
- }
- if (array->IsDictionary()) return array;
-
- ASSERT(HasFastSmiOrObjectElements() ||
- HasFastDoubleElements() ||
- HasFastArgumentsElements());
- // Compute the effective length and allocate a new backing store.
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
- : array->length();
- int old_capacity = 0;
- int used_elements = 0;
- GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- SeededNumberDictionary* dictionary = NULL;
- { Object* object;
- MaybeObject* maybe = SeededNumberDictionary::Allocate(used_elements);
- if (!maybe->ToObject(&object)) return maybe;
- dictionary = SeededNumberDictionary::cast(object);
- }
-
- // Copy the elements to the new backing store.
- bool has_double_elements = array->IsFixedDoubleArray();
- for (int i = 0; i < length; i++) {
- Object* value = NULL;
- if (has_double_elements) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
- if (double_array->is_the_hole(i)) {
- value = GetIsolate()->heap()->the_hole_value();
- } else {
- // Objects must be allocated in the old object space, since the
- // overall number of HeapNumbers needed for the conversion might
- // exceed the capacity of new space, and we would fail repeatedly
- // trying to convert the FixedDoubleArray.
- MaybeObject* maybe_value_object =
- GetHeap()->AllocateHeapNumber(double_array->get_scalar(i), TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
- }
- } else {
- ASSERT(old_map->has_fast_smi_or_object_elements());
- value = FixedArray::cast(array)->get(i);
- }
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
- if (!value->IsTheHole()) {
- Object* result;
- MaybeObject* maybe_result =
- dictionary->AddNumberEntry(i, value, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- dictionary = SeededNumberDictionary::cast(result);
- }
- }
-
- // Switch to using the dictionary as the backing storage for elements.
- if (is_arguments) {
- FixedArray::cast(elements())->set(1, dictionary);
- } else {
- // Set the new map first to satify the elements type assert in
- // set_elements().
- Object* new_map;
- MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(),
- DICTIONARY_ELEMENTS);
- if (!maybe->ToObject(&new_map)) return maybe;
- set_map(Map::cast(new_map));
- set_elements(dictionary);
- }
-
- old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
- Increment();
-
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- PrintF("Object elements have been normalized:\n");
- Print();
- }
-#endif
-
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
- return dictionary;
-}
-
-
-Smi* JSReceiver::GenerateIdentityHash() {
- Isolate* isolate = GetIsolate();
-
- int hash_value;
- int attempts = 0;
- do {
- // Generate a random 32-bit hash value but limit range to fit
- // within a smi.
- hash_value = V8::RandomPrivate(isolate) & Smi::kMaxValue;
- attempts++;
- } while (hash_value == 0 && attempts < 30);
- hash_value = hash_value != 0 ? hash_value : 1; // never return 0
-
- return Smi::FromInt(hash_value);
-}
-
-
-MaybeObject* JSObject::SetIdentityHash(Smi* hash, CreationFlag flag) {
- MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_string(),
- hash);
- if (maybe->IsFailure()) return maybe;
- return this;
-}
-
-
-int JSObject::GetIdentityHash(Handle<JSObject> obj) {
- CALL_AND_RETRY(obj->GetIsolate(),
- obj->GetIdentityHash(ALLOW_CREATION),
- return Smi::cast(__object__)->value(),
- return 0);
-}
-
-
-MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
- Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string());
- if (stored_value->IsSmi()) return stored_value;
-
- // Do not generate permanent identity hash code if not requested.
- if (flag == OMIT_CREATION) return GetHeap()->undefined_value();
-
- Smi* hash = GenerateIdentityHash();
- MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_string(),
- hash);
- if (result->IsFailure()) return result;
- if (result->ToObjectUnchecked()->IsUndefined()) {
- // Trying to get hash of detached proxy.
- return Smi::FromInt(0);
- }
- return hash;
-}
-
-
-MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
- Object* hash = this->hash();
- if (!hash->IsSmi() && flag == ALLOW_CREATION) {
- hash = GenerateIdentityHash();
- set_hash(hash);
- }
- return hash;
-}
-
-
-Object* JSObject::GetHiddenProperty(String* key) {
- ASSERT(key->IsInternalizedString());
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
- // If the proxy is detached, return undefined.
- if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
- ASSERT(proxy_parent->IsJSGlobalObject());
- return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
- }
- ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- Object* inline_value = hidden_lookup->ToObjectUnchecked();
-
- if (inline_value->IsSmi()) {
- // Handle inline-stored identity hash.
- if (key == GetHeap()->identity_hash_string()) {
- return inline_value;
- } else {
- return GetHeap()->undefined_value();
- }
- }
-
- if (inline_value->IsUndefined()) return GetHeap()->undefined_value();
-
- ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
- Object* entry = hashtable->Lookup(key);
- if (entry->IsTheHole()) return GetHeap()->undefined_value();
- return entry;
-}
-
-
-Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->SetHiddenProperty(*key, *value),
- Object);
-}
-
-
-MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
- ASSERT(key->IsInternalizedString());
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
- // If the proxy is detached, return undefined.
- if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
- ASSERT(proxy_parent->IsJSGlobalObject());
- return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
- }
- ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- Object* inline_value = hidden_lookup->ToObjectUnchecked();
-
- // If there is no backing store yet, store the identity hash inline.
- if (value->IsSmi() &&
- key == GetHeap()->identity_hash_string() &&
- (inline_value->IsUndefined() || inline_value->IsSmi())) {
- return SetHiddenPropertiesHashTable(value);
- }
-
- hidden_lookup = GetHiddenPropertiesHashTable(CREATE_NEW_IF_ABSENT);
- ObjectHashTable* hashtable;
- if (!hidden_lookup->To(&hashtable)) return hidden_lookup;
-
- // If it was found, check if the key is already in the dictionary.
- MaybeObject* insert_result = hashtable->Put(key, value);
- ObjectHashTable* new_table;
- if (!insert_result->To(&new_table)) return insert_result;
- if (new_table != hashtable) {
- // If adding the key expanded the dictionary (i.e., Add returned a new
- // dictionary), store it back to the object.
- MaybeObject* store_result = SetHiddenPropertiesHashTable(new_table);
- if (store_result->IsFailure()) return store_result;
- }
- // Return this to mark success.
- return this;
-}
-
-
-void JSObject::DeleteHiddenProperty(String* key) {
- ASSERT(key->IsInternalizedString());
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
- // If the proxy is detached, return immediately.
- if (proxy_parent->IsNull()) return;
- ASSERT(proxy_parent->IsJSGlobalObject());
- JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
- return;
- }
- ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- Object* inline_value = hidden_lookup->ToObjectUnchecked();
-
- // We never delete (inline-stored) identity hashes.
- ASSERT(key != GetHeap()->identity_hash_string());
- if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
-
- ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
- MaybeObject* delete_result = hashtable->Put(key, GetHeap()->the_hole_value());
- USE(delete_result);
- ASSERT(!delete_result->IsFailure()); // Delete does not cause GC.
-}
-
-
-bool JSObject::HasHiddenProperties() {
- return GetPropertyAttributePostInterceptor(this,
- GetHeap()->hidden_string(),
- false) != ABSENT;
-}
-
-
-MaybeObject* JSObject::GetHiddenPropertiesHashTable(
- InitializeHiddenProperties init_option) {
- ASSERT(!IsJSGlobalProxy());
- Object* inline_value;
- if (HasFastProperties()) {
- // If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden string. Since the
- // hidden strings hash code is zero (and no other string has hash
- // code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = this->map()->instance_descriptors();
- if (descriptors->number_of_descriptors() > 0) {
- int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
- sorted_index < map()->NumberOfOwnDescriptors()) {
- ASSERT(descriptors->GetType(sorted_index) == FIELD);
- inline_value =
- this->FastPropertyAt(descriptors->GetFieldIndex(sorted_index));
- } else {
- inline_value = GetHeap()->undefined_value();
- }
- } else {
- inline_value = GetHeap()->undefined_value();
- }
- } else {
- PropertyAttributes attributes;
- // You can't install a getter on a property indexed by the hidden string,
- // so we can be sure that GetLocalPropertyPostInterceptor returns a real
- // object.
- inline_value =
- GetLocalPropertyPostInterceptor(this,
- GetHeap()->hidden_string(),
- &attributes)->ToObjectUnchecked();
- }
-
- if (init_option == ONLY_RETURN_INLINE_VALUE ||
- inline_value->IsHashTable()) {
- return inline_value;
- }
-
- ObjectHashTable* hashtable;
- static const int kInitialCapacity = 4;
- MaybeObject* maybe_obj =
- ObjectHashTable::Allocate(kInitialCapacity,
- ObjectHashTable::USE_CUSTOM_MINIMUM_CAPACITY);
- if (!maybe_obj->To<ObjectHashTable>(&hashtable)) return maybe_obj;
-
- if (inline_value->IsSmi()) {
- // We were storing the identity hash inline and now allocated an actual
- // dictionary. Put the identity hash into the new dictionary.
- MaybeObject* insert_result =
- hashtable->Put(GetHeap()->identity_hash_string(), inline_value);
- ObjectHashTable* new_table;
- if (!insert_result->To(&new_table)) return insert_result;
- // We expect no resizing for the first insert.
- ASSERT_EQ(hashtable, new_table);
- }
-
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_string(),
- hashtable,
- DONT_ENUM,
- kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK);
- if (store_result->IsFailure()) return store_result;
- return hashtable;
-}
-
-
-MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
- ASSERT(!IsJSGlobalProxy());
- // We can store the identity hash inline iff there is no backing store
- // for hidden properties yet.
- ASSERT(HasHiddenProperties() != value->IsSmi());
- if (HasFastProperties()) {
- // If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden string. Since the
- // hidden strings hash code is zero (and no other string has hash
- // code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = this->map()->instance_descriptors();
- if (descriptors->number_of_descriptors() > 0) {
- int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
- sorted_index < map()->NumberOfOwnDescriptors()) {
- ASSERT(descriptors->GetType(sorted_index) == FIELD);
- this->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
- value);
- return this;
- }
- }
- }
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_string(),
- value,
- DONT_ENUM,
- kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK);
- if (store_result->IsFailure()) return store_result;
- return this;
-}
-
-
-MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
- DeleteMode mode) {
- // Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (!result.IsFound()) return GetHeap()->true_value();
-
- // Normalize object if needed.
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- return DeleteNormalizedProperty(name, mode);
-}
-
-
-MaybeObject* JSObject::DeletePropertyWithInterceptor(String* name) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- Handle<String> name_handle(name);
- Handle<JSObject> this_handle(this);
- if (!interceptor->deleter()->IsUndefined()) {
- v8::NamedPropertyDeleter deleter =
- v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Boolean> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = deleter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) {
- ASSERT(result->IsBoolean());
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- return *result_internal;
- }
- }
- MaybeObject* raw_result =
- this_handle->DeletePropertyPostInterceptor(*name_handle, NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- if (interceptor->deleter()->IsUndefined()) return heap->false_value();
- v8::IndexedPropertyDeleter deleter =
- v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
- Handle<JSObject> this_handle(this);
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Boolean> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = deleter(index, info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) {
- ASSERT(result->IsBoolean());
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- return *result_internal;
- }
- MaybeObject* raw_result = this_handle->GetElementsAccessor()->Delete(
- *this_handle,
- index,
- NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-Handle<Object> JSObject::DeleteElement(Handle<JSObject> obj,
- uint32_t index) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteElement(index, JSObject::NORMAL_DELETION),
- Object);
-}
-
-
-MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return isolate->heap()->false_value();
- }
-
- if (IsStringObjectWithCharacterAt(index)) {
- if (mode == STRICT_DELETION) {
- // Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> holder(this, isolate);
- Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { name, holder };
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_delete_property",
- HandleVector(args, 2));
- return isolate->Throw(*error);
- }
- return isolate->heap()->false_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
- ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
- }
-
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
-
- Handle<Object> old_value;
- bool should_enqueue_change_record = false;
- if (FLAG_harmony_observation && self->map()->is_observed()) {
- should_enqueue_change_record = self->HasLocalElement(index);
- if (should_enqueue_change_record) {
- old_value = self->GetLocalElementAccessorPair(index) != NULL
- ? Handle<Object>::cast(isolate->factory()->the_hole_value())
- : Object::GetElement(self, index);
- }
- }
-
- MaybeObject* result;
- // Skip interceptor if forcing deletion.
- if (self->HasIndexedInterceptor() && mode != FORCE_DELETION) {
- result = self->DeleteElementWithInterceptor(index);
- } else {
- result = self->GetElementsAccessor()->Delete(*self, index, mode);
- }
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (should_enqueue_change_record && !self->HasLocalElement(index)) {
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- EnqueueChangeRecord(self, "deleted", name, old_value);
- }
-
- return *hresult;
-}
-
-
-Handle<Object> JSObject::DeleteProperty(Handle<JSObject> obj,
- Handle<String> prop) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
- Object);
-}
-
-
-MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- // ECMA-262, 3rd, 8.6.2.5
- ASSERT(name->IsString());
-
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return isolate->heap()->false_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
- ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteProperty(name, mode);
- }
-
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- return DeleteElement(index, mode);
- }
-
- LookupResult lookup(isolate);
- LocalLookup(name, &lookup, true);
- if (!lookup.IsFound()) return isolate->heap()->true_value();
- // Ignore attributes if forcing a deletion.
- if (lookup.IsDontDelete() && mode != FORCE_DELETION) {
- if (mode == STRICT_DELETION) {
- // Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> args[2] = { Handle<Object>(name, isolate),
- Handle<Object>(this, isolate) };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_delete_property", HandleVector(args, 2)));
- }
- return isolate->heap()->false_value();
- }
-
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<String> hname(name);
-
- Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
- if (is_observed && lookup.IsDataProperty()) {
- old_value = Object::GetProperty(self, hname);
- }
- MaybeObject* result;
-
- // Check for interceptor.
- if (lookup.IsInterceptor()) {
- // Skip interceptor if forcing a deletion.
- if (mode == FORCE_DELETION) {
- result = self->DeletePropertyPostInterceptor(*hname, mode);
- } else {
- result = self->DeletePropertyWithInterceptor(*hname);
- }
- } else {
- // Normalize object if needed.
- Object* obj;
- result = self->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!result->To(&obj)) return result;
- // Make sure the properties are normalized before removing the entry.
- result = self->DeleteNormalizedProperty(*hname, mode);
- }
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (is_observed && !self->HasLocalProperty(*hname)) {
- EnqueueChangeRecord(self, "deleted", hname, old_value);
- }
-
- return *hresult;
-}
-
-
-MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
- }
- return JSObject::cast(this)->DeleteElement(index, mode);
-}
-
-
-MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
- }
- return JSObject::cast(this)->DeleteProperty(name, mode);
-}
-
-
-bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
- ElementsKind kind,
- Object* object) {
- ASSERT(IsFastObjectElementsKind(kind) ||
- kind == DICTIONARY_ELEMENTS);
- if (IsFastObjectElementsKind(kind)) {
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
- : elements->length();
- for (int i = 0; i < length; ++i) {
- Object* element = elements->get(i);
- if (!element->IsTheHole() && element == object) return true;
- }
- } else {
- Object* key =
- SeededNumberDictionary::cast(elements)->SlowReverseLookup(object);
- if (!key->IsUndefined()) return true;
- }
- return false;
-}
-
-
-// Check whether this object references another object.
-bool JSObject::ReferencesObject(Object* obj) {
- Map* map_of_this = map();
- Heap* heap = GetHeap();
- AssertNoAllocation no_alloc;
-
- // Is the object the constructor for this object?
- if (map_of_this->constructor() == obj) {
- return true;
- }
-
- // Is the object the prototype for this object?
- if (map_of_this->prototype() == obj) {
- return true;
- }
-
- // Check if the object is among the named properties.
- Object* key = SlowReverseLookup(obj);
- if (!key->IsUndefined()) {
- return true;
- }
-
- // Check if the object is among the indexed properties.
- ElementsKind kind = GetElementsKind();
- switch (kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- // Raw pixels and external arrays do not reference other
- // objects.
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- break;
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case DICTIONARY_ELEMENTS: {
- FixedArray* elements = FixedArray::cast(this->elements());
- if (ReferencesObjectFromElements(elements, kind, obj)) return true;
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
- FixedArray* parameter_map = FixedArray::cast(elements());
- // Check the mapped parameters.
- int length = parameter_map->length();
- for (int i = 2; i < length; ++i) {
- Object* value = parameter_map->get(i);
- if (!value->IsTheHole() && value == obj) return true;
- }
- // Check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS :
- FAST_HOLEY_ELEMENTS;
- if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
- break;
- }
- }
-
- // For functions check the context.
- if (IsJSFunction()) {
- // Get the constructor function for arguments array.
- JSObject* arguments_boilerplate =
- heap->isolate()->context()->native_context()->
- arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
-
- // Get the context and don't check if it is the native context.
- JSFunction* f = JSFunction::cast(this);
- Context* context = f->context();
- if (context->IsNativeContext()) {
- return false;
- }
-
- // Check the non-special context slots.
- for (int i = Context::MIN_CONTEXT_SLOTS; i < context->length(); i++) {
- // Only check JS objects.
- if (context->get(i)->IsJSObject()) {
- JSObject* ctxobj = JSObject::cast(context->get(i));
- // If it is an arguments array check the content.
- if (ctxobj->map()->constructor() == arguments_function) {
- if (ctxobj->ReferencesObject(obj)) {
- return true;
- }
- } else if (ctxobj == obj) {
- return true;
- }
- }
- }
-
- // Check the context extension (if any) if it can have references.
- if (context->has_extension() && !context->IsCatchContext()) {
- return JSObject::cast(context->extension())->ReferencesObject(obj);
- }
- }
-
- // No references to object.
- return false;
-}
-
-
-Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
-}
-
-
-MaybeObject* JSObject::PreventExtensions() {
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- return isolate->heap()->false_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->PreventExtensions();
- }
-
- // It's not possible to seal objects with external array elements
- if (HasExternalArrayElements()) {
- HandleScope scope(isolate);
- Handle<Object> object(this, isolate);
- Handle<Object> error =
- isolate->factory()->NewTypeError(
- "cant_prevent_ext_external_array_elements",
- HandleVector(&object, 1));
- return isolate->Throw(*error);
- }
-
- // If there are fast elements we normalize.
- SeededNumberDictionary* dictionary = NULL;
- { MaybeObject* maybe = NormalizeElements();
- if (!maybe->To<SeededNumberDictionary>(&dictionary)) return maybe;
- }
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
- // Make sure that we never go back to fast case.
- dictionary->set_requires_slow_elements();
-
- // Do a map transition, other objects with this map may still
- // be extensible.
- Map* new_map;
- MaybeObject* maybe = map()->Copy();
- if (!maybe->To(&new_map)) return maybe;
-
- new_map->set_is_extensible(false);
- set_map(new_map);
- ASSERT(!map()->is_extensible());
- return new_map;
-}
-
-
-// Tests for the fast common case for property enumeration:
-// - This object and all prototypes has an enum cache (which means that
-// it is no proxy, has no interceptors and needs no access checks).
-// - This object has no elements.
-// - No prototype has enumerable properties/elements.
-bool JSReceiver::IsSimpleEnum() {
- Heap* heap = GetHeap();
- for (Object* o = this;
- o != heap->null_value();
- o = JSObject::cast(o)->GetPrototype()) {
- if (!o->IsJSObject()) return false;
- JSObject* curr = JSObject::cast(o);
- int enum_length = curr->map()->EnumLength();
- if (enum_length == Map::kInvalidEnumCache) return false;
- ASSERT(!curr->HasNamedInterceptor());
- ASSERT(!curr->HasIndexedInterceptor());
- ASSERT(!curr->IsAccessCheckNeeded());
- if (curr->NumberOfEnumElements() > 0) return false;
- if (curr != this && enum_length != 0) return false;
- }
- return true;
-}
-
-
-int Map::NumberOfDescribedProperties(DescriptorFlag which,
- PropertyAttributes filter) {
- int result = 0;
- DescriptorArray* descs = instance_descriptors();
- int limit = which == ALL_DESCRIPTORS
- ? descs->number_of_descriptors()
- : NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if ((descs->GetDetails(i).attributes() & filter) == 0) result++;
- }
- return result;
-}
-
-
-int Map::PropertyIndexFor(String* name) {
- DescriptorArray* descs = instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if (name->Equals(descs->GetKey(i))) return descs->GetFieldIndex(i);
- }
- return -1;
-}
-
-
-int Map::NextFreePropertyIndex() {
- int max_index = -1;
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- DescriptorArray* descs = instance_descriptors();
- for (int i = 0; i < number_of_own_descriptors; i++) {
- if (descs->GetType(i) == FIELD) {
- int current_index = descs->GetFieldIndex(i);
- if (current_index > max_index) max_index = current_index;
- }
- }
- return max_index + 1;
-}
-
-
-AccessorDescriptor* Map::FindAccessor(String* name) {
- DescriptorArray* descs = instance_descriptors();
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- for (int i = 0; i < number_of_own_descriptors; i++) {
- if (descs->GetType(i) == CALLBACKS && name->Equals(descs->GetKey(i))) {
- return descs->GetCallbacks(i);
- }
- }
- return NULL;
-}
-
-
-void JSReceiver::LocalLookup(String* name, LookupResult* result,
- bool search_hidden_prototypes,
- bool skip_fallback_interceptor) {
- ASSERT(name->IsString());
-
- Heap* heap = GetHeap();
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return result->NotFound();
- ASSERT(proto->IsJSGlobalObject());
- return JSReceiver::cast(proto)->LocalLookup(
- name, result, search_hidden_prototypes);
- }
-
- if (IsJSProxy()) {
- result->HandlerResult(JSProxy::cast(this));
- return;
- }
-
- // Do not use inline caching if the object is a non-global object
- // that requires access checks.
- if (IsAccessCheckNeeded()) {
- result->DisallowCaching();
- }
-
- JSObject* js_object = JSObject::cast(this);
-
- // Check for lookup interceptor except when bootstrapping.
- bool wouldIntercept = js_object->HasNamedInterceptor() &&
- !heap->isolate()->bootstrapper()->IsActive();
- if (wouldIntercept && !map()->named_interceptor_is_fallback()) {
- result->InterceptorResult(js_object);
- return;
- }
-
- js_object->LocalLookupRealNamedProperty(name, result);
-
- if (result->IsFound()) return;
-
- if (search_hidden_prototypes) {
- Object* proto = js_object->GetPrototype();
-
- if (proto->IsJSReceiver()) {
- JSReceiver* receiver = JSReceiver::cast(proto);
- if (receiver->map()->is_hidden_prototype()) {
- receiver->LocalLookup(name, result, search_hidden_prototypes);
- return;
- }
- }
- }
-
- if (wouldIntercept && !skip_fallback_interceptor && !result->IsProperty() &&
- map()->named_interceptor_is_fallback()) {
- result->InterceptorResult(js_object);
- }
-}
-
-
-void JSReceiver::Lookup(String* name,
- LookupResult* result,
- bool skip_fallback_interceptor) {
- // Ecma-262 3rd 8.6.2.4
- Heap* heap = GetHeap();
- for (Object* current = this;
- current != heap->null_value();
- current = JSObject::cast(current)->GetPrototype()) {
- JSReceiver::cast(current)->LocalLookup(name, result, false,
- skip_fallback_interceptor);
- if (result->IsFound()) return;
- }
- result->NotFound();
-}
-
-
-// Search object and its prototype chain for callback properties.
-void JSObject::LookupCallbackProperty(String* name, LookupResult* result) {
- Heap* heap = GetHeap();
- for (Object* current = this;
- current != heap->null_value() && current->IsJSObject();
- current = JSObject::cast(current)->GetPrototype()) {
- JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
- if (result->IsPropertyCallbacks()) return;
- }
- result->NotFound();
-}
-
-
-// Try to update an accessor in an elements dictionary. Return true if the
-// update succeeded, and false otherwise.
-static bool UpdateGetterSetterInDictionary(
- SeededNumberDictionary* dictionary,
- uint32_t index,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes) {
- int entry = dictionary->FindEntry(index);
- if (entry != SeededNumberDictionary::kNotFound) {
- Object* result = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS && result->IsAccessorPair()) {
- ASSERT(!details.IsDontDelete());
- if (details.attributes() != attributes) {
- dictionary->DetailsAtPut(entry,
- PropertyDetails(attributes, CALLBACKS, index));
- }
- AccessorPair::cast(result)->SetComponents(getter, setter);
- return true;
- }
- }
- return false;
-}
-
-
-MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes) {
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Ignore getters and setters on pixel and external array elements.
- return GetHeap()->undefined_value();
- case DICTIONARY_ELEMENTS:
- if (UpdateGetterSetterInDictionary(element_dictionary(),
- index,
- getter,
- setter,
- attributes)) {
- return GetHeap()->undefined_value();
- }
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
- // Ascertain whether we have read-only properties or an existing
- // getter/setter pair in an arguments elements dictionary backing
- // store.
- FixedArray* parameter_map = FixedArray::cast(elements());
- uint32_t length = parameter_map->length();
- Object* probe =
- index < (length - 2) ? parameter_map->get(index + 2) : NULL;
- if (probe == NULL || probe->IsTheHole()) {
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- if (arguments->IsDictionary()) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(arguments);
- if (UpdateGetterSetterInDictionary(dictionary,
- index,
- getter,
- setter,
- attributes)) {
- return GetHeap()->undefined_value();
- }
- }
- }
- break;
- }
- }
-
- AccessorPair* accessors;
- { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
- }
- accessors->SetComponents(getter, setter);
-
- return SetElementCallback(index, accessors, attributes);
-}
-
-
-MaybeObject* JSObject::CreateAccessorPairFor(String* name) {
- LookupResult result(GetHeap()->isolate());
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsPropertyCallbacks()) {
- // Note that the result can actually have IsDontDelete() == true when we
- // e.g. have to fall back to the slow case while adding a setter after
- // successfully reusing a map transition for a getter. Nevertheless, this is
- // OK, because the assertion only holds for the whole addition of both
- // accessors, not for the addition of each part. See first comment in
- // DefinePropertyAccessor below.
- Object* obj = result.GetCallbackObject();
- if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->Copy();
- }
- }
- return GetHeap()->AllocateAccessorPair();
-}
-
-
-MaybeObject* JSObject::DefinePropertyAccessor(String* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes) {
- // We could assert that the property is configurable here, but we would need
- // to do a lookup, which seems to be a bit of overkill.
- Heap* heap = GetHeap();
- bool only_attribute_changes = getter->IsNull() && setter->IsNull();
- if (HasFastProperties() && !only_attribute_changes &&
- (map()->NumberOfOwnDescriptors() <
- DescriptorArray::kMaxNumberOfDescriptors)) {
- MaybeObject* getterOk = heap->undefined_value();
- if (!getter->IsNull()) {
- getterOk = DefineFastAccessor(name, ACCESSOR_GETTER, getter, attributes);
- if (getterOk->IsFailure()) return getterOk;
- }
-
- MaybeObject* setterOk = heap->undefined_value();
- if (getterOk != heap->null_value() && !setter->IsNull()) {
- setterOk = DefineFastAccessor(name, ACCESSOR_SETTER, setter, attributes);
- if (setterOk->IsFailure()) return setterOk;
- }
-
- if (getterOk != heap->null_value() && setterOk != heap->null_value()) {
- return heap->undefined_value();
- }
- }
-
- AccessorPair* accessors;
- MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
-
- accessors->SetComponents(getter, setter);
- return SetPropertyCallback(name, accessors, attributes);
-}
-
-
-bool JSObject::CanSetCallback(String* name) {
- ASSERT(!IsAccessCheckNeeded() ||
- GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
-
- // Check if there is an API defined callback object which prohibits
- // callback overwriting in this object or its prototype chain.
- // This mechanism is needed for instance in a browser setting, where
- // certain accessors such as window.location should not be allowed
- // to be overwritten because allowing overwriting could potentially
- // cause security problems.
- LookupResult callback_result(GetIsolate());
- LookupCallbackProperty(name, &callback_result);
- if (callback_result.IsFound()) {
- Object* obj = callback_result.GetCallbackObject();
- if (obj->IsAccessorInfo() &&
- AccessorInfo::cast(obj)->prohibits_overwriting()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-MaybeObject* JSObject::SetElementCallback(uint32_t index,
- Object* structure,
- PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
-
- // Normalize elements to make this operation simple.
- SeededNumberDictionary* dictionary;
- { MaybeObject* maybe_dictionary = NormalizeElements();
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- }
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
-
- // Update the dictionary with the new CALLBACKS property.
- { MaybeObject* maybe_dictionary = dictionary->Set(index, structure, details);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- }
-
- dictionary->set_requires_slow_elements();
- // Update the dictionary backing store on the object.
- if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
- // Also delete any parameter alias.
- //
- // TODO(kmillikin): when deleting the last parameter alias we could
- // switch to a direct backing store without the parameter map. This
- // would allow GC of the context.
- FixedArray* parameter_map = FixedArray::cast(elements());
- if (index < static_cast<uint32_t>(parameter_map->length()) - 2) {
- parameter_map->set(index + 2, GetHeap()->the_hole_value());
- }
- parameter_map->set(1, dictionary);
- } else {
- set_elements(dictionary);
- }
-
- return GetHeap()->undefined_value();
-}
-
-
-MaybeObject* JSObject::SetPropertyCallback(String* name,
- Object* structure,
- PropertyAttributes attributes) {
- // Normalize object to make this operation simple.
- MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_ok->IsFailure()) return maybe_ok;
-
- // For the global object allocate a new map to invalidate the global inline
- // caches which have a global property cell reference directly in the code.
- if (IsGlobalObject()) {
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- ASSERT(new_map->is_dictionary_map());
-
- set_map(new_map);
- // When running crankshaft, changing the map is not enough. We
- // need to deoptimize all functions that rely on this global
- // object.
- Deoptimizer::DeoptimizeGlobalObject(this);
- }
-
- // Update the dictionary with the new CALLBACKS property.
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
- maybe_ok = SetNormalizedProperty(name, structure, details);
- if (maybe_ok->IsFailure()) return maybe_ok;
-
- return GetHeap()->undefined_value();
-}
-
-
-void JSObject::DefineAccessor(Handle<JSObject> object,
- Handle<String> name,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->DefineAccessor(*name, *getter, *setter, attributes));
-}
-
-MaybeObject* JSObject::DefineAccessor(String* name_raw,
- Object* getter_raw,
- Object* setter_raw,
- PropertyAttributes attributes) {
- Isolate* isolate = GetIsolate();
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return isolate->heap()->undefined_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(
- name_raw, getter_raw, setter_raw, attributes);
- }
-
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
-
- // Try to flatten before operating on the string.
- name_raw->TryFlatten();
-
- if (!CanSetCallback(name_raw)) return isolate->heap()->undefined_value();
-
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<String> name(name_raw);
- Handle<Object> getter(getter_raw, isolate);
- Handle<Object> setter(setter_raw, isolate);
-
- uint32_t index = 0;
- bool is_element = name->AsArrayIndex(&index);
-
- Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
- bool preexists = false;
- if (is_observed) {
- if (is_element) {
- preexists = HasLocalElement(index);
- if (preexists && self->GetLocalElementAccessorPair(index) == NULL) {
- old_value = Object::GetElement(self, index);
- }
- } else {
- LookupResult lookup(isolate);
- LocalLookup(*name, &lookup, true);
- preexists = lookup.IsProperty();
- if (preexists && lookup.IsDataProperty()) {
- old_value = Object::GetProperty(self, name);
- }
- }
- }
-
- MaybeObject* result = is_element ?
- self->DefineElementAccessor(index, *getter, *setter, attributes) :
- self->DefinePropertyAccessor(*name, *getter, *setter, attributes);
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (is_observed) {
- const char* type = preexists ? "reconfigured" : "new";
- EnqueueChangeRecord(self, type, name, old_value);
- }
-
- return *hresult;
-}
-
-
-static MaybeObject* TryAccessorTransition(JSObject* self,
- Map* transitioned_map,
- int target_descriptor,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes) {
- DescriptorArray* descs = transitioned_map->instance_descriptors();
- PropertyDetails details = descs->GetDetails(target_descriptor);
-
- // If the transition target was not callbacks, fall back to the slow case.
- if (details.type() != CALLBACKS) return self->GetHeap()->null_value();
- Object* descriptor = descs->GetCallbacksObject(target_descriptor);
- if (!descriptor->IsAccessorPair()) return self->GetHeap()->null_value();
-
- Object* target_accessor = AccessorPair::cast(descriptor)->get(component);
- PropertyAttributes target_attributes = details.attributes();
-
- // Reuse transition if adding same accessor with same attributes.
- if (target_accessor == accessor && target_attributes == attributes) {
- self->set_map(transitioned_map);
- return self;
- }
-
- // If either not the same accessor, or not the same attributes, fall back to
- // the slow case.
- return self->GetHeap()->null_value();
-}
-
-
-MaybeObject* JSObject::DefineFastAccessor(String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes) {
- ASSERT(accessor->IsSpecFunction() || accessor->IsUndefined());
- LookupResult result(GetIsolate());
- LocalLookup(name, &result);
-
- if (result.IsFound()
- && !result.IsPropertyCallbacks()
- && !result.IsTransition()) return GetHeap()->null_value();
-
- // Return success if the same accessor with the same attributes already exist.
- AccessorPair* source_accessors = NULL;
- if (result.IsPropertyCallbacks()) {
- Object* callback_value = result.GetCallbackObject();
- if (callback_value->IsAccessorPair()) {
- source_accessors = AccessorPair::cast(callback_value);
- Object* entry = source_accessors->get(component);
- if (entry == accessor && result.GetAttributes() == attributes) {
- return this;
- }
- } else {
- return GetHeap()->null_value();
- }
-
- int descriptor_number = result.GetDescriptorIndex();
-
- map()->LookupTransition(this, name, &result);
-
- if (result.IsFound()) {
- Map* target = result.GetTransitionTarget();
- ASSERT(target->NumberOfOwnDescriptors() ==
- map()->NumberOfOwnDescriptors());
- // This works since descriptors are sorted in order of addition.
- ASSERT(map()->instance_descriptors()->GetKey(descriptor_number) == name);
- return TryAccessorTransition(
- this, target, descriptor_number, component, accessor, attributes);
- }
- } else {
- // If not, lookup a transition.
- map()->LookupTransition(this, name, &result);
-
- // If there is a transition, try to follow it.
- if (result.IsFound()) {
- Map* target = result.GetTransitionTarget();
- int descriptor_number = target->LastAdded();
- ASSERT(target->instance_descriptors()->GetKey(descriptor_number)
- ->Equals(name));
- return TryAccessorTransition(
- this, target, descriptor_number, component, accessor, attributes);
- }
- }
-
- // If there is no transition yet, add a transition to the a new accessor pair
- // containing the accessor.
- AccessorPair* accessors;
- MaybeObject* maybe_accessors;
-
- // Allocate a new pair if there were no source accessors. Otherwise, copy the
- // pair and modify the accessor.
- if (source_accessors != NULL) {
- maybe_accessors = source_accessors->Copy();
- } else {
- maybe_accessors = GetHeap()->AllocateAccessorPair();
- }
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
- accessors->set(component, accessor);
-
- CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
-
- Map* new_map;
- MaybeObject* maybe_new_map =
- map()->CopyInsertDescriptor(&new_accessors_desc, INSERT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- set_map(new_map);
- return this;
-}
-
-
-MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
- Isolate* isolate = GetIsolate();
- String* name = String::cast(info->name());
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return isolate->heap()->undefined_value();
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(info);
- }
-
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
-
- // Try to flatten before operating on the string.
- name->TryFlatten();
-
- if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
-
- uint32_t index = 0;
- bool is_element = name->AsArrayIndex(&index);
-
- if (is_element) {
- if (IsJSArray()) return isolate->heap()->undefined_value();
-
- // Accessors overwrite previous callbacks (cf. with getters/setters).
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Ignore getters and setters on pixel and external array
- // elements.
- return isolate->heap()->undefined_value();
- case DICTIONARY_ELEMENTS:
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- }
-
- MaybeObject* maybe_ok =
- SetElementCallback(index, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
- } else {
- // Lookup the name.
- LookupResult result(isolate);
- LocalLookup(name, &result, true);
- // ES5 forbids turning a property into an accessor if it's not
- // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
- if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
- return isolate->heap()->undefined_value();
- }
-
- MaybeObject* maybe_ok =
- SetPropertyCallback(name, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
-
- return this;
-}
-
-
-Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
- Heap* heap = GetHeap();
-
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc;
-
- // Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return heap->undefined_value();
- }
-
- // Make the lookup and include prototypes.
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSReceiver::cast(obj)->GetPrototype()) {
- if (obj->IsJSObject() && JSObject::cast(obj)->HasDictionaryElements()) {
- JSObject* js_object = JSObject::cast(obj);
- SeededNumberDictionary* dictionary = js_object->element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != SeededNumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- if (dictionary->DetailsAt(entry).type() == CALLBACKS &&
- element->IsAccessorPair()) {
- return AccessorPair::cast(element)->GetComponent(component);
- }
- }
- }
- }
- } else {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSReceiver::cast(obj)->GetPrototype()) {
- LookupResult result(heap->isolate());
- JSReceiver::cast(obj)->LocalLookup(name, &result);
- if (result.IsFound()) {
- if (result.IsReadOnly()) return heap->undefined_value();
- if (result.IsPropertyCallbacks()) {
- Object* obj = result.GetCallbackObject();
- if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->GetComponent(component);
- }
- }
- }
- }
- }
- return heap->undefined_value();
-}
-
-
-Object* JSObject::SlowReverseLookup(Object* value) {
- if (HasFastProperties()) {
- int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
- DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < number_of_own_descriptors; i++) {
- if (descs->GetType(i) == FIELD) {
- if (FastPropertyAt(descs->GetFieldIndex(i)) == value) {
- return descs->GetKey(i);
- }
- } else if (descs->GetType(i) == CONSTANT_FUNCTION) {
- if (descs->GetConstantFunction(i) == value) {
- return descs->GetKey(i);
- }
- }
- }
- return GetHeap()->undefined_value();
- } else {
- return property_dictionary()->SlowReverseLookup(value);
- }
-}
-
-
-MaybeObject* Map::RawCopy(int instance_size) {
- Map* result;
- MaybeObject* maybe_result =
- GetHeap()->AllocateMap(instance_type(), instance_size);
- if (!maybe_result->To(&result)) return maybe_result;
-
- result->set_prototype(prototype());
- result->set_constructor(constructor());
- result->set_bit_field(bit_field());
- result->set_bit_field2(bit_field2());
- int new_bit_field3 = bit_field3();
- new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
- new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
- new_bit_field3 = EnumLengthBits::update(new_bit_field3, kInvalidEnumCache);
- result->set_bit_field3(new_bit_field3);
- return result;
-}
-
-
-MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing) {
- int new_instance_size = instance_size();
- if (mode == CLEAR_INOBJECT_PROPERTIES) {
- new_instance_size -= inobject_properties() * kPointerSize;
- }
-
- Map* result;
- MaybeObject* maybe_result = RawCopy(new_instance_size);
- if (!maybe_result->To(&result)) return maybe_result;
-
- if (mode != CLEAR_INOBJECT_PROPERTIES) {
- result->set_inobject_properties(inobject_properties());
- }
-
- result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
- result->set_dictionary_map(true);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && result->is_shared()) {
- result->SharedMapVerify();
- }
-#endif
-
- return result;
-}
-
-
-MaybeObject* Map::CopyDropDescriptors() {
- Map* result;
- MaybeObject* maybe_result = RawCopy(instance_size());
- if (!maybe_result->To(&result)) return maybe_result;
-
- // Please note instance_type and instance_size are set when allocated.
- result->set_inobject_properties(inobject_properties());
- result->set_unused_property_fields(unused_property_fields());
-
- result->set_pre_allocated_property_fields(pre_allocated_property_fields());
- result->set_is_shared(false);
- result->ClearCodeCache(GetHeap());
- NotifyLeafMapLayoutChange();
- return result;
-}
-
-
-MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
- Descriptor* descriptor) {
- // Sanity check. This path is only to be taken if the map owns its descriptor
- // array, implying that its NumberOfOwnDescriptors equals the number of
- // descriptors in the descriptor array.
- ASSERT(NumberOfOwnDescriptors() ==
- instance_descriptors()->number_of_descriptors());
- Map* result;
- MaybeObject* maybe_result = CopyDropDescriptors();
- if (!maybe_result->To(&result)) return maybe_result;
-
- String* name = descriptor->GetKey();
-
- TransitionArray* transitions;
- MaybeObject* maybe_transitions =
- AddTransition(name, result, SIMPLE_TRANSITION);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
-
- int old_size = descriptors->number_of_descriptors();
-
- DescriptorArray* new_descriptors;
-
- if (descriptors->NumberOfSlackDescriptors() > 0) {
- new_descriptors = descriptors;
- new_descriptors->Append(descriptor);
- } else {
- // Descriptor arrays grow by 50%.
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
- old_size, old_size < 4 ? 1 : old_size / 2);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- DescriptorArray::WhitenessWitness witness(new_descriptors);
-
- // Copy the descriptors, inserting a descriptor.
- for (int i = 0; i < old_size; ++i) {
- new_descriptors->CopyFrom(i, descriptors, i, witness);
- }
-
- new_descriptors->Append(descriptor, witness);
-
- if (old_size > 0) {
- // If the source descriptors had an enum cache we copy it. This ensures
- // that the maps to which we push the new descriptor array back can rely
- // on a cache always being available once it is set. If the map has more
- // enumerated descriptors than available in the original cache, the cache
- // will be lazily replaced by the extended cache when needed.
- if (descriptors->HasEnumCache()) {
- new_descriptors->CopyEnumCacheFrom(descriptors);
- }
-
- Map* map;
- // Replace descriptors by new_descriptors in all maps that share it.
- for (Object* current = GetBackPointer();
- !current->IsUndefined();
- current = map->GetBackPointer()) {
- map = Map::cast(current);
- if (map->instance_descriptors() != descriptors) break;
- map->set_instance_descriptors(new_descriptors);
- }
-
- set_instance_descriptors(new_descriptors);
- }
- }
-
- result->SetBackPointer(this);
- result->InitializeDescriptors(new_descriptors);
- ASSERT(result->NumberOfOwnDescriptors() == NumberOfOwnDescriptors() + 1);
-
- set_transitions(transitions);
- set_owns_descriptors(false);
-
- return result;
-}
-
-
-MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
- String* name,
- TransitionFlag flag,
- int descriptor_index) {
- ASSERT(descriptors->IsSortedNoDuplicates());
-
- Map* result;
- MaybeObject* maybe_result = CopyDropDescriptors();
- if (!maybe_result->To(&result)) return maybe_result;
-
- result->InitializeDescriptors(descriptors);
-
- if (flag == INSERT_TRANSITION && CanHaveMoreTransitions()) {
- TransitionArray* transitions;
- SimpleTransitionFlag simple_flag =
- (descriptor_index == descriptors->number_of_descriptors() - 1)
- ? SIMPLE_TRANSITION
- : FULL_TRANSITION;
- MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
-
- set_transitions(transitions);
- result->SetBackPointer(this);
- }
-
- return result;
-}
-
-
-MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
- if (flag == INSERT_TRANSITION) {
- ASSERT(!HasElementsTransition() ||
- ((elements_transition_map()->elements_kind() == DICTIONARY_ELEMENTS ||
- IsExternalArrayElementsKind(
- elements_transition_map()->elements_kind())) &&
- (kind == DICTIONARY_ELEMENTS ||
- IsExternalArrayElementsKind(kind))));
- ASSERT(!IsFastElementsKind(kind) ||
- IsMoreGeneralElementsKindTransition(elements_kind(), kind));
- ASSERT(kind != elements_kind());
- }
-
- bool insert_transition =
- flag == INSERT_TRANSITION && !HasElementsTransition();
-
- if (insert_transition && owns_descriptors()) {
- // In case the map owned its own descriptors, share the descriptors and
- // transfer ownership to the new map.
- Map* new_map;
- MaybeObject* maybe_new_map = CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- MaybeObject* added_elements = set_elements_transition_map(new_map);
- if (added_elements->IsFailure()) return added_elements;
-
- new_map->set_elements_kind(kind);
- new_map->InitializeDescriptors(instance_descriptors());
- new_map->SetBackPointer(this);
- set_owns_descriptors(false);
- return new_map;
- }
-
- // In case the map did not own its own descriptors, a split is forced by
- // copying the map; creating a new descriptor array cell.
- // Create a new free-floating map only if we are not allowed to store it.
- Map* new_map;
- MaybeObject* maybe_new_map = Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- new_map->set_elements_kind(kind);
-
- if (insert_transition) {
- MaybeObject* added_elements = set_elements_transition_map(new_map);
- if (added_elements->IsFailure()) return added_elements;
- new_map->SetBackPointer(this);
- }
-
- return new_map;
-}
-
-
-MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
- if (pre_allocated_property_fields() == 0) return CopyDropDescriptors();
-
- // If the map has pre-allocated properties always start out with a descriptor
- // array describing these properties.
- ASSERT(constructor()->IsJSFunction());
- JSFunction* ctor = JSFunction::cast(constructor());
- Map* map = ctor->initial_map();
- DescriptorArray* descriptors = map->instance_descriptors();
-
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors =
- descriptors->CopyUpTo(number_of_own_descriptors);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
-}
-
-
-MaybeObject* Map::Copy() {
- DescriptorArray* descriptors = instance_descriptors();
- DescriptorArray* new_descriptors;
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- MaybeObject* maybe_descriptors =
- descriptors->CopyUpTo(number_of_own_descriptors);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
-}
-
-
-MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
- TransitionFlag flag) {
- DescriptorArray* descriptors = instance_descriptors();
-
- // Ensure the key is an internalized string.
- MaybeObject* maybe_failure = descriptor->KeyToInternalizedString();
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- int old_size = NumberOfOwnDescriptors();
- int new_size = old_size + 1;
- descriptor->SetEnumerationIndex(new_size);
-
- if (flag == INSERT_TRANSITION &&
- owns_descriptors() &&
- CanHaveMoreTransitions()) {
- return ShareDescriptor(descriptors, descriptor);
- }
-
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(old_size, 1);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- DescriptorArray::WhitenessWitness witness(new_descriptors);
-
- // Copy the descriptors, inserting a descriptor.
- for (int i = 0; i < old_size; ++i) {
- new_descriptors->CopyFrom(i, descriptors, i, witness);
- }
-
- if (old_size != descriptors->number_of_descriptors()) {
- new_descriptors->SetNumberOfDescriptors(new_size);
- new_descriptors->Set(old_size, descriptor, witness);
- new_descriptors->Sort();
- } else {
- new_descriptors->Append(descriptor, witness);
- }
-
- String* key = descriptor->GetKey();
- int insertion_index = new_descriptors->number_of_descriptors() - 1;
-
- return CopyReplaceDescriptors(new_descriptors, key, flag, insertion_index);
-}
-
-
-MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
- TransitionFlag flag) {
- DescriptorArray* old_descriptors = instance_descriptors();
-
- // Ensure the key is an internalized string.
- MaybeObject* maybe_result = descriptor->KeyToInternalizedString();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // We replace the key if it is already present.
- int index = old_descriptors->SearchWithCache(descriptor->GetKey(), this);
- if (index != DescriptorArray::kNotFound) {
- return CopyReplaceDescriptor(old_descriptors, descriptor, index, flag);
- }
- return CopyAddDescriptor(descriptor, flag);
-}
-
-
-MaybeObject* DescriptorArray::CopyUpTo(int enumeration_index) {
- if (enumeration_index == 0) return GetHeap()->empty_descriptor_array();
-
- int size = enumeration_index;
-
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = Allocate(size);
- if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
- DescriptorArray::WhitenessWitness witness(descriptors);
-
- for (int i = 0; i < size; ++i) {
- descriptors->CopyFrom(i, this, i, witness);
- }
-
- if (number_of_descriptors() != enumeration_index) descriptors->Sort();
-
- return descriptors;
-}
-
-
-MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
- Descriptor* descriptor,
- int insertion_index,
- TransitionFlag flag) {
- // Ensure the key is an internalized string.
- MaybeObject* maybe_failure = descriptor->KeyToInternalizedString();
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- String* key = descriptor->GetKey();
- ASSERT(key == descriptors->GetKey(insertion_index));
-
- int new_size = NumberOfOwnDescriptors();
- ASSERT(0 <= insertion_index && insertion_index < new_size);
-
- PropertyDetails details = descriptors->GetDetails(insertion_index);
- ASSERT_LE(details.descriptor_index(), new_size);
- descriptor->SetEnumerationIndex(details.descriptor_index());
-
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(new_size);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
- DescriptorArray::WhitenessWitness witness(new_descriptors);
-
- for (int i = 0; i < new_size; ++i) {
- if (i == insertion_index) {
- new_descriptors->Set(i, descriptor, witness);
- } else {
- new_descriptors->CopyFrom(i, descriptors, i, witness);
- }
- }
-
- // Re-sort if descriptors were removed.
- if (new_size != descriptors->length()) new_descriptors->Sort();
-
- return CopyReplaceDescriptors(new_descriptors, key, flag, insertion_index);
-}
-
-
-void Map::UpdateCodeCache(Handle<Map> map,
- Handle<String> name,
- Handle<Code> code) {
- Isolate* isolate = map->GetIsolate();
- CALL_HEAP_FUNCTION_VOID(isolate,
- map->UpdateCodeCache(*name, *code));
-}
-
-
-MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
- ASSERT(!is_shared() || code->allowed_in_shared_map_code_cache());
-
- // Allocate the code cache if not present.
- if (code_cache()->IsFixedArray()) {
- Object* result;
- { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- set_code_cache(result);
- }
-
- // Update the code cache.
- return CodeCache::cast(code_cache())->Update(name, code);
-}
-
-
-Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
- // Do a lookup if a code cache exists.
- if (!code_cache()->IsFixedArray()) {
- return CodeCache::cast(code_cache())->Lookup(name, flags);
- } else {
- return GetHeap()->undefined_value();
- }
-}
-
-
-int Map::IndexInCodeCache(Object* name, Code* code) {
- // Get the internal index if a code cache exists.
- if (!code_cache()->IsFixedArray()) {
- return CodeCache::cast(code_cache())->GetIndex(name, code);
- }
- return -1;
-}
-
-
-void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
- // No GC is supposed to happen between a call to IndexInCodeCache and
- // RemoveFromCodeCache so the code cache must be there.
- ASSERT(!code_cache()->IsFixedArray());
- CodeCache::cast(code_cache())->RemoveByIndex(name, code, index);
-}
-
-
-// An iterator over all map transitions in an descriptor array, reusing the map
-// field of the contens array while it is running.
-class IntrusiveMapTransitionIterator {
- public:
- explicit IntrusiveMapTransitionIterator(TransitionArray* transition_array)
- : transition_array_(transition_array) { }
-
- void Start() {
- ASSERT(!IsIterating());
- *TransitionArrayHeader() = Smi::FromInt(0);
- }
-
- bool IsIterating() {
- return (*TransitionArrayHeader())->IsSmi();
- }
-
- Map* Next() {
- ASSERT(IsIterating());
- int index = Smi::cast(*TransitionArrayHeader())->value();
- int number_of_transitions = transition_array_->number_of_transitions();
- while (index < number_of_transitions) {
- *TransitionArrayHeader() = Smi::FromInt(index + 1);
- return transition_array_->GetTarget(index);
- }
-
- if (index == number_of_transitions &&
- transition_array_->HasElementsTransition()) {
- Map* elements_transition = transition_array_->elements_transition();
- *TransitionArrayHeader() = Smi::FromInt(index + 1);
- return elements_transition;
- }
- *TransitionArrayHeader() = transition_array_->GetHeap()->fixed_array_map();
- return NULL;
- }
-
- private:
- Object** TransitionArrayHeader() {
- return HeapObject::RawField(transition_array_, TransitionArray::kMapOffset);
- }
-
- TransitionArray* transition_array_;
-};
-
-
-// An iterator over all prototype transitions, reusing the map field of the
-// underlying array while it is running.
-class IntrusivePrototypeTransitionIterator {
- public:
- explicit IntrusivePrototypeTransitionIterator(HeapObject* proto_trans)
- : proto_trans_(proto_trans) { }
-
- void Start() {
- ASSERT(!IsIterating());
- *Header() = Smi::FromInt(0);
- }
-
- bool IsIterating() {
- return (*Header())->IsSmi();
- }
-
- Map* Next() {
- ASSERT(IsIterating());
- int transitionNumber = Smi::cast(*Header())->value();
- if (transitionNumber < NumberOfTransitions()) {
- *Header() = Smi::FromInt(transitionNumber + 1);
- return GetTransition(transitionNumber);
- }
- *Header() = proto_trans_->GetHeap()->fixed_array_map();
- return NULL;
- }
-
- private:
- Object** Header() {
- return HeapObject::RawField(proto_trans_, FixedArray::kMapOffset);
- }
-
- int NumberOfTransitions() {
- FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
- Object* num = proto_trans->get(Map::kProtoTransitionNumberOfEntriesOffset);
- return Smi::cast(num)->value();
- }
-
- Map* GetTransition(int transitionNumber) {
- FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
- return Map::cast(proto_trans->get(IndexFor(transitionNumber)));
- }
-
- int IndexFor(int transitionNumber) {
- return Map::kProtoTransitionHeaderSize +
- Map::kProtoTransitionMapOffset +
- transitionNumber * Map::kProtoTransitionElementsPerEntry;
- }
-
- HeapObject* proto_trans_;
-};
-
-
-// To traverse the transition tree iteratively, we have to store two kinds of
-// information in a map: The parent map in the traversal and which children of a
-// node have already been visited. To do this without additional memory, we
-// temporarily reuse two maps with known values:
-//
-// (1) The map of the map temporarily holds the parent, and is restored to the
-// meta map afterwards.
-//
-// (2) The info which children have already been visited depends on which part
-// of the map we currently iterate:
-//
-// (a) If we currently follow normal map transitions, we temporarily store
-// the current index in the map of the FixedArray of the desciptor
-// array's contents, and restore it to the fixed array map afterwards.
-// Note that a single descriptor can have 0, 1, or 2 transitions.
-//
-// (b) If we currently follow prototype transitions, we temporarily store
-// the current index in the map of the FixedArray holding the prototype
-// transitions, and restore it to the fixed array map afterwards.
-//
-// Note that the child iterator is just a concatenation of two iterators: One
-// iterating over map transitions and one iterating over prototype transisitons.
-class TraversableMap : public Map {
- public:
- // Record the parent in the traversal within this map. Note that this destroys
- // this map's map!
- void SetParent(TraversableMap* parent) { set_map_no_write_barrier(parent); }
-
- // Reset the current map's map, returning the parent previously stored in it.
- TraversableMap* GetAndResetParent() {
- TraversableMap* old_parent = static_cast<TraversableMap*>(map());
- set_map_no_write_barrier(GetHeap()->meta_map());
- return old_parent;
- }
-
- // Start iterating over this map's children, possibly destroying a FixedArray
- // map (see explanation above).
- void ChildIteratorStart() {
- if (HasTransitionArray()) {
- if (HasPrototypeTransitions()) {
- IntrusivePrototypeTransitionIterator(GetPrototypeTransitions()).Start();
- }
-
- IntrusiveMapTransitionIterator(transitions()).Start();
- }
- }
-
- // If we have an unvisited child map, return that one and advance. If we have
- // none, return NULL and reset any destroyed FixedArray maps.
- TraversableMap* ChildIteratorNext() {
- TransitionArray* transition_array = unchecked_transition_array();
- if (!transition_array->map()->IsSmi() &&
- !transition_array->IsTransitionArray()) {
- return NULL;
- }
-
- if (transition_array->HasPrototypeTransitions()) {
- HeapObject* proto_transitions =
- transition_array->UncheckedPrototypeTransitions();
- IntrusivePrototypeTransitionIterator proto_iterator(proto_transitions);
- if (proto_iterator.IsIterating()) {
- Map* next = proto_iterator.Next();
- if (next != NULL) return static_cast<TraversableMap*>(next);
- }
- }
-
- IntrusiveMapTransitionIterator transition_iterator(transition_array);
- if (transition_iterator.IsIterating()) {
- Map* next = transition_iterator.Next();
- if (next != NULL) return static_cast<TraversableMap*>(next);
- }
-
- return NULL;
- }
-};
-
-
-// Traverse the transition tree in postorder without using the C++ stack by
-// doing pointer reversal.
-void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
- TraversableMap* current = static_cast<TraversableMap*>(this);
- current->ChildIteratorStart();
- while (true) {
- TraversableMap* child = current->ChildIteratorNext();
- if (child != NULL) {
- child->ChildIteratorStart();
- child->SetParent(current);
- current = child;
- } else {
- TraversableMap* parent = current->GetAndResetParent();
- callback(current, data);
- if (current == this) break;
- current = parent;
- }
- }
-}
-
-
-MaybeObject* CodeCache::Update(String* name, Code* code) {
- // The number of monomorphic stubs for normal load/store/call IC's can grow to
- // a large number and therefore they need to go into a hash table. They are
- // used to load global properties from cells.
- if (code->type() == Code::NORMAL) {
- // Make sure that a hash table is allocated for the normal load code cache.
- if (normal_type_cache()->IsUndefined()) {
- Object* result;
- { MaybeObject* maybe_result =
- CodeCacheHashTable::Allocate(CodeCacheHashTable::kInitialSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- set_normal_type_cache(result);
- }
- return UpdateNormalTypeCache(name, code);
- } else {
- ASSERT(default_cache()->IsFixedArray());
- return UpdateDefaultCache(name, code);
- }
-}
-
-
-MaybeObject* CodeCache::UpdateDefaultCache(String* name, Code* code) {
- // When updating the default code cache we disregard the type encoded in the
- // flags. This allows call constant stubs to overwrite call field
- // stubs, etc.
- Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
-
- // First check whether we can update existing code cache without
- // extending it.
- FixedArray* cache = default_cache();
- int length = cache->length();
- int deleted_index = -1;
- for (int i = 0; i < length; i += kCodeCacheEntrySize) {
- Object* key = cache->get(i);
- if (key->IsNull()) {
- if (deleted_index < 0) deleted_index = i;
- continue;
- }
- if (key->IsUndefined()) {
- if (deleted_index >= 0) i = deleted_index;
- cache->set(i + kCodeCacheEntryNameOffset, name);
- cache->set(i + kCodeCacheEntryCodeOffset, code);
- return this;
- }
- if (name->Equals(String::cast(key))) {
- Code::Flags found =
- Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
- if (Code::RemoveTypeFromFlags(found) == flags) {
- cache->set(i + kCodeCacheEntryCodeOffset, code);
- return this;
- }
- }
- }
-
- // Reached the end of the code cache. If there were deleted
- // elements, reuse the space for the first of them.
- if (deleted_index >= 0) {
- cache->set(deleted_index + kCodeCacheEntryNameOffset, name);
- cache->set(deleted_index + kCodeCacheEntryCodeOffset, code);
- return this;
- }
-
- // Extend the code cache with some new entries (at least one). Must be a
- // multiple of the entry size.
- int new_length = length + ((length >> 1)) + kCodeCacheEntrySize;
- new_length = new_length - new_length % kCodeCacheEntrySize;
- ASSERT((new_length % kCodeCacheEntrySize) == 0);
- Object* result;
- { MaybeObject* maybe_result = cache->CopySize(new_length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Add the (name, code) pair to the new cache.
- cache = FixedArray::cast(result);
- cache->set(length + kCodeCacheEntryNameOffset, name);
- cache->set(length + kCodeCacheEntryCodeOffset, code);
- set_default_cache(cache);
- return this;
-}
-
-
-MaybeObject* CodeCache::UpdateNormalTypeCache(String* name, Code* code) {
- // Adding a new entry can cause a new cache to be allocated.
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- Object* new_cache;
- { MaybeObject* maybe_new_cache = cache->Put(name, code);
- if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache;
- }
- set_normal_type_cache(new_cache);
- return this;
-}
-
-
-Object* CodeCache::Lookup(String* name, Code::Flags flags) {
- if (Code::ExtractTypeFromFlags(flags) == Code::NORMAL) {
- return LookupNormalTypeCache(name, flags);
- } else {
- return LookupDefaultCache(name, flags);
- }
-}
-
-
-Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
- FixedArray* cache = default_cache();
- int length = cache->length();
- for (int i = 0; i < length; i += kCodeCacheEntrySize) {
- Object* key = cache->get(i + kCodeCacheEntryNameOffset);
- // Skip deleted elements.
- if (key->IsNull()) continue;
- if (key->IsUndefined()) return key;
- if (name->Equals(String::cast(key))) {
- Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
- if (code->flags() == flags) {
- return code;
- }
- }
- }
- return GetHeap()->undefined_value();
-}
-
-
-Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) {
- if (!normal_type_cache()->IsUndefined()) {
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- return cache->Lookup(name, flags);
- } else {
- return GetHeap()->undefined_value();
- }
-}
-
-
-int CodeCache::GetIndex(Object* name, Code* code) {
- if (code->type() == Code::NORMAL) {
- if (normal_type_cache()->IsUndefined()) return -1;
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- return cache->GetIndex(String::cast(name), code->flags());
- }
-
- FixedArray* array = default_cache();
- int len = array->length();
- for (int i = 0; i < len; i += kCodeCacheEntrySize) {
- if (array->get(i + kCodeCacheEntryCodeOffset) == code) return i + 1;
- }
- return -1;
-}
-
-
-void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
- if (code->type() == Code::NORMAL) {
- ASSERT(!normal_type_cache()->IsUndefined());
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- ASSERT(cache->GetIndex(String::cast(name), code->flags()) == index);
- cache->RemoveByIndex(index);
- } else {
- FixedArray* array = default_cache();
- ASSERT(array->length() >= index && array->get(index)->IsCode());
- // Use null instead of undefined for deleted elements to distinguish
- // deleted elements from unused elements. This distinction is used
- // when looking up in the cache and when updating the cache.
- ASSERT_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset);
- array->set_null(index - 1); // Name.
- array->set_null(index); // Code.
- }
-}
-
-
-// The key in the code cache hash table consists of the property name and the
-// code object. The actual match is on the name and the code flags. If a key
-// is created using the flags and not a code object it can only be used for
-// lookup not to create a new entry.
-class CodeCacheHashTableKey : public HashTableKey {
- public:
- CodeCacheHashTableKey(String* name, Code::Flags flags)
- : name_(name), flags_(flags), code_(NULL) { }
-
- CodeCacheHashTableKey(String* name, Code* code)
- : name_(name),
- flags_(code->flags()),
- code_(code) { }
-
-
- bool IsMatch(Object* other) {
- if (!other->IsFixedArray()) return false;
- FixedArray* pair = FixedArray::cast(other);
- String* name = String::cast(pair->get(0));
- Code::Flags flags = Code::cast(pair->get(1))->flags();
- if (flags != flags_) {
- return false;
- }
- return name_->Equals(name);
- }
-
- static uint32_t NameFlagsHashHelper(String* name, Code::Flags flags) {
- return name->Hash() ^ flags;
- }
-
- uint32_t Hash() { return NameFlagsHashHelper(name_, flags_); }
-
- uint32_t HashForObject(Object* obj) {
- FixedArray* pair = FixedArray::cast(obj);
- String* name = String::cast(pair->get(0));
- Code* code = Code::cast(pair->get(1));
- return NameFlagsHashHelper(name, code->flags());
- }
-
- MUST_USE_RESULT MaybeObject* AsObject() {
- ASSERT(code_ != NULL);
- Object* obj;
- { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* pair = FixedArray::cast(obj);
- pair->set(0, name_);
- pair->set(1, code_);
- return pair;
- }
-
- private:
- String* name_;
- Code::Flags flags_;
- // TODO(jkummerow): We should be able to get by without this.
- Code* code_;
-};
-
-
-Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) {
- CodeCacheHashTableKey key(name, flags);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-MaybeObject* CodeCacheHashTable::Put(String* name, Code* code) {
- CodeCacheHashTableKey key(name, code);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- // Don't use |this|, as the table might have grown.
- CodeCacheHashTable* cache = reinterpret_cast<CodeCacheHashTable*>(obj);
-
- int entry = cache->FindInsertionEntry(key.Hash());
- Object* k;
- { MaybeObject* maybe_k = key.AsObject();
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
-
- cache->set(EntryToIndex(entry), k);
- cache->set(EntryToIndex(entry) + 1, code);
- cache->ElementAdded();
- return cache;
-}
-
-
-int CodeCacheHashTable::GetIndex(String* name, Code::Flags flags) {
- CodeCacheHashTableKey key(name, flags);
- int entry = FindEntry(&key);
- return (entry == kNotFound) ? -1 : entry;
-}
-
-
-void CodeCacheHashTable::RemoveByIndex(int index) {
- ASSERT(index >= 0);
- Heap* heap = GetHeap();
- set(EntryToIndex(index), heap->the_hole_value());
- set(EntryToIndex(index) + 1, heap->the_hole_value());
- ElementRemoved();
-}
-
-
-void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> cache,
- MapHandleList* maps,
- Code::Flags flags,
- Handle<Code> code) {
- Isolate* isolate = cache->GetIsolate();
- CALL_HEAP_FUNCTION_VOID(isolate, cache->Update(maps, flags, *code));
-}
-
-
-MaybeObject* PolymorphicCodeCache::Update(MapHandleList* maps,
- Code::Flags flags,
- Code* code) {
- // Initialize cache if necessary.
- if (cache()->IsUndefined()) {
- Object* result;
- { MaybeObject* maybe_result =
- PolymorphicCodeCacheHashTable::Allocate(
- PolymorphicCodeCacheHashTable::kInitialSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- set_cache(result);
- } else {
- // This entry shouldn't be contained in the cache yet.
- ASSERT(PolymorphicCodeCacheHashTable::cast(cache())
- ->Lookup(maps, flags)->IsUndefined());
- }
- PolymorphicCodeCacheHashTable* hash_table =
- PolymorphicCodeCacheHashTable::cast(cache());
- Object* new_cache;
- { MaybeObject* maybe_new_cache = hash_table->Put(maps, flags, code);
- if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache;
- }
- set_cache(new_cache);
- return this;
-}
-
-
-Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps,
- Code::Flags flags) {
- if (!cache()->IsUndefined()) {
- PolymorphicCodeCacheHashTable* hash_table =
- PolymorphicCodeCacheHashTable::cast(cache());
- return Handle<Object>(hash_table->Lookup(maps, flags), GetIsolate());
- } else {
- return GetIsolate()->factory()->undefined_value();
- }
-}
-
-
-// Despite their name, object of this class are not stored in the actual
-// hash table; instead they're temporarily used for lookups. It is therefore
-// safe to have a weak (non-owning) pointer to a MapList as a member field.
-class PolymorphicCodeCacheHashTableKey : public HashTableKey {
- public:
- // Callers must ensure that |maps| outlives the newly constructed object.
- PolymorphicCodeCacheHashTableKey(MapHandleList* maps, int code_flags)
- : maps_(maps),
- code_flags_(code_flags) {}
-
- bool IsMatch(Object* other) {
- MapHandleList other_maps(kDefaultListAllocationSize);
- int other_flags;
- FromObject(other, &other_flags, &other_maps);
- if (code_flags_ != other_flags) return false;
- if (maps_->length() != other_maps.length()) return false;
- // Compare just the hashes first because it's faster.
- int this_hash = MapsHashHelper(maps_, code_flags_);
- int other_hash = MapsHashHelper(&other_maps, other_flags);
- if (this_hash != other_hash) return false;
-
- // Full comparison: for each map in maps_, look for an equivalent map in
- // other_maps. This implementation is slow, but probably good enough for
- // now because the lists are short (<= 4 elements currently).
- for (int i = 0; i < maps_->length(); ++i) {
- bool match_found = false;
- for (int j = 0; j < other_maps.length(); ++j) {
- if (*(maps_->at(i)) == *(other_maps.at(j))) {
- match_found = true;
- break;
- }
- }
- if (!match_found) return false;
- }
- return true;
- }
-
- static uint32_t MapsHashHelper(MapHandleList* maps, int code_flags) {
- uint32_t hash = code_flags;
- for (int i = 0; i < maps->length(); ++i) {
- hash ^= maps->at(i)->Hash();
- }
- return hash;
- }
-
- uint32_t Hash() {
- return MapsHashHelper(maps_, code_flags_);
- }
-
- uint32_t HashForObject(Object* obj) {
- MapHandleList other_maps(kDefaultListAllocationSize);
- int other_flags;
- FromObject(obj, &other_flags, &other_maps);
- return MapsHashHelper(&other_maps, other_flags);
- }
-
- MUST_USE_RESULT MaybeObject* AsObject() {
- Object* obj;
- // The maps in |maps_| must be copied to a newly allocated FixedArray,
- // both because the referenced MapList is short-lived, and because C++
- // objects can't be stored in the heap anyway.
- { MaybeObject* maybe_obj =
- HEAP->AllocateUninitializedFixedArray(maps_->length() + 1);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* list = FixedArray::cast(obj);
- list->set(0, Smi::FromInt(code_flags_));
- for (int i = 0; i < maps_->length(); ++i) {
- list->set(i + 1, *maps_->at(i));
- }
- return list;
- }
-
- private:
- static MapHandleList* FromObject(Object* obj,
- int* code_flags,
- MapHandleList* maps) {
- FixedArray* list = FixedArray::cast(obj);
- maps->Rewind(0);
- *code_flags = Smi::cast(list->get(0))->value();
- for (int i = 1; i < list->length(); ++i) {
- maps->Add(Handle<Map>(Map::cast(list->get(i))));
- }
- return maps;
- }
-
- MapHandleList* maps_; // weak.
- int code_flags_;
- static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
-};
-
-
-Object* PolymorphicCodeCacheHashTable::Lookup(MapHandleList* maps,
- int code_flags) {
- PolymorphicCodeCacheHashTableKey key(maps, code_flags);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
- int code_flags,
- Code* code) {
- PolymorphicCodeCacheHashTableKey key(maps, code_flags);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- PolymorphicCodeCacheHashTable* cache =
- reinterpret_cast<PolymorphicCodeCacheHashTable*>(obj);
- int entry = cache->FindInsertionEntry(key.Hash());
- { MaybeObject* maybe_obj = key.AsObject();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- cache->set(EntryToIndex(entry), obj);
- cache->set(EntryToIndex(entry) + 1, code);
- cache->ElementAdded();
- return cache;
-}
-
-
-MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_result =
- accessor->AddElementsToFixedArray(array, array, this);
- FixedArray* result;
- if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < result->length(); i++) {
- Object* current = result->get(i);
- ASSERT(current->IsNumber() || current->IsString());
- }
- }
-#endif
- return result;
-}
-
-
-MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
- ElementsAccessor* accessor = ElementsAccessor::ForArray(other);
- MaybeObject* maybe_result =
- accessor->AddElementsToFixedArray(NULL, NULL, this, other);
- FixedArray* result;
- if (!maybe_result->To(&result)) return maybe_result;
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < result->length(); i++) {
- Object* current = result->get(i);
- ASSERT(current->IsNumber() || current->IsString());
- }
- }
-#endif
- return result;
-}
-
-
-MaybeObject* FixedArray::CopySize(int new_length) {
- Heap* heap = GetHeap();
- if (new_length == 0) return heap->empty_fixed_array();
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* result = FixedArray::cast(obj);
- // Copy the content
- AssertNoAllocation no_gc;
- int len = length();
- if (new_length < len) len = new_length;
- // We are taking the map from the old fixed array so the map is sure to
- // be an immortal immutable object.
- result->set_map_no_write_barrier(map());
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len; i++) {
- result->set(i, get(i), mode);
- }
- return result;
-}
-
-
-void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < len; index++) {
- dest->set(dest_pos+index, get(pos+index), mode);
- }
-}
-
-
-#ifdef DEBUG
-bool FixedArray::IsEqualTo(FixedArray* other) {
- if (length() != other->length()) return false;
- for (int i = 0 ; i < length(); ++i) {
- if (get(i) != other->get(i)) return false;
- }
- return true;
-}
-#endif
-
-
-MaybeObject* DescriptorArray::Allocate(int number_of_descriptors, int slack) {
- Heap* heap = Isolate::Current()->heap();
- // Do not use DescriptorArray::cast on incomplete object.
- int size = number_of_descriptors + slack;
- if (size == 0) return heap->empty_descriptor_array();
- FixedArray* result;
- // Allocate the array of keys.
- MaybeObject* maybe_array = heap->AllocateFixedArray(LengthFor(size));
- if (!maybe_array->To(&result)) return maybe_array;
-
- result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
- result->set(kEnumCacheIndex, Smi::FromInt(0));
- return result;
-}
-
-
-void DescriptorArray::ClearEnumCache() {
- set(kEnumCacheIndex, Smi::FromInt(0));
-}
-
-
-void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
- FixedArray* new_cache,
- Object* new_index_cache) {
- ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
- ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
- ASSERT(!IsEmpty());
- ASSERT(!HasEnumCache() || new_cache->length() > GetEnumCache()->length());
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
- set(kEnumCacheIndex, bridge_storage);
-}
-
-
-void DescriptorArray::CopyFrom(int dst_index,
- DescriptorArray* src,
- int src_index,
- const WhitenessWitness& witness) {
- Object* value = src->GetValue(src_index);
- PropertyDetails details = src->GetDetails(src_index);
- Descriptor desc(src->GetKey(src_index), value, details);
- Set(dst_index, &desc, witness);
-}
-
-
-// We need the whiteness witness since sort will reshuffle the entries in the
-// descriptor array. If the descriptor array were to be black, the shuffling
-// would move a slot that was already recorded as pointing into an evacuation
-// candidate. This would result in missing updates upon evacuation.
-void DescriptorArray::Sort() {
- // In-place heap sort.
- int len = number_of_descriptors();
- // Reset sorting since the descriptor array might contain invalid pointers.
- for (int i = 0; i < len; ++i) SetSortedKey(i, i);
- // Bottom-up max-heap construction.
- // Index of the last node with children
- const int max_parent_index = (len / 2) - 1;
- for (int i = max_parent_index; i >= 0; --i) {
- int parent_index = i;
- const uint32_t parent_hash = GetSortedKey(i)->Hash();
- while (parent_index <= max_parent_index) {
- int child_index = 2 * parent_index + 1;
- uint32_t child_hash = GetSortedKey(child_index)->Hash();
- if (child_index + 1 < len) {
- uint32_t right_child_hash = GetSortedKey(child_index + 1)->Hash();
- if (right_child_hash > child_hash) {
- child_index++;
- child_hash = right_child_hash;
- }
- }
- if (child_hash <= parent_hash) break;
- SwapSortedKeys(parent_index, child_index);
- // Now element at child_index could be < its children.
- parent_index = child_index; // parent_hash remains correct.
- }
- }
-
- // Extract elements and create sorted array.
- for (int i = len - 1; i > 0; --i) {
- // Put max element at the back of the array.
- SwapSortedKeys(0, i);
- // Shift down the new top element.
- int parent_index = 0;
- const uint32_t parent_hash = GetSortedKey(parent_index)->Hash();
- const int max_parent_index = (i / 2) - 1;
- while (parent_index <= max_parent_index) {
- int child_index = parent_index * 2 + 1;
- uint32_t child_hash = GetSortedKey(child_index)->Hash();
- if (child_index + 1 < i) {
- uint32_t right_child_hash = GetSortedKey(child_index + 1)->Hash();
- if (right_child_hash > child_hash) {
- child_index++;
- child_hash = right_child_hash;
- }
- }
- if (child_hash <= parent_hash) break;
- SwapSortedKeys(parent_index, child_index);
- parent_index = child_index;
- }
- }
- ASSERT(IsSortedNoDuplicates());
-}
-
-
-MaybeObject* AccessorPair::Copy() {
- Heap* heap = GetHeap();
- AccessorPair* copy;
- MaybeObject* maybe_copy = heap->AllocateAccessorPair();
- if (!maybe_copy->To(&copy)) return maybe_copy;
-
- copy->set_getter(getter());
- copy->set_setter(setter());
- return copy;
-}
-
-
-Object* AccessorPair::GetComponent(AccessorComponent component) {
- Object* accessor = get(component);
- return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
-}
-
-
-MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
- PretenureFlag pretenure) {
- ASSERT(deopt_entry_count > 0);
- return HEAP->AllocateFixedArray(LengthFor(deopt_entry_count),
- pretenure);
-}
-
-
-MaybeObject* DeoptimizationOutputData::Allocate(int number_of_deopt_points,
- PretenureFlag pretenure) {
- if (number_of_deopt_points == 0) return HEAP->empty_fixed_array();
- return HEAP->AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
- pretenure);
-}
-
-
-#ifdef DEBUG
-bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
- if (IsEmpty()) return other->IsEmpty();
- if (other->IsEmpty()) return false;
- if (length() != other->length()) return false;
- for (int i = 0; i < length(); ++i) {
- if (get(i) != other->get(i)) return false;
- }
- return true;
-}
-#endif
-
-
-bool String::LooksValid() {
- if (!Isolate::Current()->heap()->Contains(this)) return false;
- return true;
-}
-
-
-String::FlatContent String::GetFlatContent() {
- int length = this->length();
- StringShape shape(this);
- String* string = this;
- int offset = 0;
- if (shape.representation_tag() == kConsStringTag) {
- ConsString* cons = ConsString::cast(string);
- if (cons->second()->length() != 0) {
- return FlatContent();
- }
- string = cons->first();
- shape = StringShape(string);
- }
- if (shape.representation_tag() == kSlicedStringTag) {
- SlicedString* slice = SlicedString::cast(string);
- offset = slice->offset();
- string = slice->parent();
- shape = StringShape(string);
- ASSERT(shape.representation_tag() != kConsStringTag &&
- shape.representation_tag() != kSlicedStringTag);
- }
- if (shape.encoding_tag() == kOneByteStringTag) {
- const uint8_t* start;
- if (shape.representation_tag() == kSeqStringTag) {
- start = SeqOneByteString::cast(string)->GetChars();
- } else {
- start = ExternalAsciiString::cast(string)->GetChars();
- }
- return FlatContent(Vector<const uint8_t>(start + offset, length));
- } else {
- ASSERT(shape.encoding_tag() == kTwoByteStringTag);
- const uc16* start;
- if (shape.representation_tag() == kSeqStringTag) {
- start = SeqTwoByteString::cast(string)->GetChars();
- } else {
- start = ExternalTwoByteString::cast(string)->GetChars();
- }
- return FlatContent(Vector<const uc16>(start + offset, length));
- }
-}
-
-
-SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int offset,
- int length,
- int* length_return) {
- if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
- return SmartArrayPointer<char>(NULL);
- }
- Heap* heap = GetHeap();
-
- // Negative length means the to the end of the string.
- if (length < 0) length = kMaxInt - offset;
-
- // Compute the size of the UTF-8 string. Start at the specified offset.
- Access<ConsStringIteratorOp> op(
- heap->isolate()->objects_string_iterator());
- StringCharacterStream stream(this, op.value(), offset);
- int character_position = offset;
- int utf8_bytes = 0;
- int last = unibrow::Utf16::kNoPreviousCharacter;
- while (stream.HasMore() && character_position++ < offset + length) {
- uint16_t character = stream.GetNext();
- utf8_bytes += unibrow::Utf8::Length(character, last);
- last = character;
- }
-
- if (length_return) {
- *length_return = utf8_bytes;
- }
-
- char* result = NewArray<char>(utf8_bytes + 1);
-
- // Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
- stream.Reset(this, offset);
- character_position = offset;
- int utf8_byte_position = 0;
- last = unibrow::Utf16::kNoPreviousCharacter;
- while (stream.HasMore() && character_position++ < offset + length) {
- uint16_t character = stream.GetNext();
- if (allow_nulls == DISALLOW_NULLS && character == 0) {
- character = ' ';
- }
- utf8_byte_position +=
- unibrow::Utf8::Encode(result + utf8_byte_position, character, last);
- last = character;
- }
- result[utf8_byte_position] = 0;
- return SmartArrayPointer<char>(result);
-}
-
-
-SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int* length_return) {
- return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
-}
-
-
-const uc16* String::GetTwoByteData() {
- return GetTwoByteData(0);
-}
-
-
-const uc16* String::GetTwoByteData(unsigned start) {
- ASSERT(!IsOneByteRepresentationUnderneath());
- switch (StringShape(this).representation_tag()) {
- case kSeqStringTag:
- return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
- case kExternalStringTag:
- return ExternalTwoByteString::cast(this)->
- ExternalTwoByteStringGetData(start);
- case kSlicedStringTag: {
- SlicedString* slice = SlicedString::cast(this);
- return slice->parent()->GetTwoByteData(start + slice->offset());
- }
- case kConsStringTag:
- UNREACHABLE();
- return NULL;
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-SmartArrayPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
- if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
- return SmartArrayPointer<uc16>();
- }
- Heap* heap = GetHeap();
-
- Access<ConsStringIteratorOp> op(
- heap->isolate()->objects_string_iterator());
- StringCharacterStream stream(this, op.value());
-
- uc16* result = NewArray<uc16>(length() + 1);
-
- int i = 0;
- while (stream.HasMore()) {
- uint16_t character = stream.GetNext();
- result[i++] = character;
- }
- result[i] = 0;
- return SmartArrayPointer<uc16>(result);
-}
-
-
-const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
- return reinterpret_cast<uc16*>(
- reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize) + start;
-}
-
-
-void Relocatable::PostGarbageCollectionProcessing() {
- Isolate* isolate = Isolate::Current();
- Relocatable* current = isolate->relocatable_top();
- while (current != NULL) {
- current->PostGarbageCollection();
- current = current->prev_;
- }
-}
-
-
-// Reserve space for statics needing saving and restoring.
-int Relocatable::ArchiveSpacePerThread() {
- return sizeof(Isolate::Current()->relocatable_top());
-}
-
-
-// Archive statics that are thread local.
-char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
- *reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
- isolate->set_relocatable_top(NULL);
- return to + ArchiveSpacePerThread();
-}
-
-
-// Restore statics that are thread local.
-char* Relocatable::RestoreState(Isolate* isolate, char* from) {
- isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from));
- return from + ArchiveSpacePerThread();
-}
-
-
-char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
- Relocatable* top = *reinterpret_cast<Relocatable**>(thread_storage);
- Iterate(v, top);
- return thread_storage + ArchiveSpacePerThread();
-}
-
-
-void Relocatable::Iterate(ObjectVisitor* v) {
- Isolate* isolate = Isolate::Current();
- Iterate(v, isolate->relocatable_top());
-}
-
-
-void Relocatable::Iterate(ObjectVisitor* v, Relocatable* top) {
- Relocatable* current = top;
- while (current != NULL) {
- current->IterateInstance(v);
- current = current->prev_;
- }
-}
-
-
-FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
- : Relocatable(isolate),
- str_(str.location()),
- length_(str->length()) {
- PostGarbageCollection();
-}
-
-
-FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
- : Relocatable(isolate),
- str_(0),
- is_ascii_(true),
- length_(input.length()),
- start_(input.start()) { }
-
-
-void FlatStringReader::PostGarbageCollection() {
- if (str_ == NULL) return;
- Handle<String> str(str_);
- ASSERT(str->IsFlat());
- String::FlatContent content = str->GetFlatContent();
- ASSERT(content.IsFlat());
- is_ascii_ = content.IsAscii();
- if (is_ascii_) {
- start_ = content.ToOneByteVector().start();
- } else {
- start_ = content.ToUC16Vector().start();
- }
-}
-
-
-String* ConsStringIteratorOp::Operate(String* string,
- unsigned* offset_out,
- int32_t* type_out,
- unsigned* length_out) {
- ASSERT(string->IsConsString());
- ConsString* cons_string = ConsString::cast(string);
- // Set up search data.
- root_ = cons_string;
- consumed_ = *offset_out;
- // Now search.
- return Search(offset_out, type_out, length_out);
-}
-
-
-String* ConsStringIteratorOp::Search(unsigned* offset_out,
- int32_t* type_out,
- unsigned* length_out) {
- ConsString* cons_string = root_;
- // Reset the stack, pushing the root string.
- depth_ = 1;
- maximum_depth_ = 1;
- frames_[0] = cons_string;
- const unsigned consumed = consumed_;
- unsigned offset = 0;
- while (true) {
- // Loop until the string is found which contains the target offset.
- String* string = cons_string->first();
- unsigned length = string->length();
- int32_t type;
- if (consumed < offset + length) {
- // Target offset is in the left branch.
- // Keep going if we're still in a ConString.
- type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag) {
- cons_string = ConsString::cast(string);
- PushLeft(cons_string);
- continue;
- }
- // Tell the stack we're done decending.
- AdjustMaximumDepth();
- } else {
- // Descend right.
- // Update progress through the string.
- offset += length;
- // Keep going if we're still in a ConString.
- string = cons_string->second();
- type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag) {
- cons_string = ConsString::cast(string);
- PushRight(cons_string);
- // TODO(dcarney) Add back root optimization.
- continue;
- }
- // Need this to be updated for the current string.
- length = string->length();
- // Account for the possibility of an empty right leaf.
- // This happens only if we have asked for an offset outside the string.
- if (length == 0) {
- // Reset depth so future operations will return null immediately.
- Reset();
- return NULL;
- }
- // Tell the stack we're done decending.
- AdjustMaximumDepth();
- // Pop stack so next iteration is in correct place.
- Pop();
- }
- ASSERT(length != 0);
- // Adjust return values and exit.
- consumed_ = offset + length;
- *offset_out = consumed - offset;
- *type_out = type;
- *length_out = length;
- return string;
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
- int32_t* type_out,
- unsigned* length_out) {
- while (true) {
- // Tree traversal complete.
- if (depth_ == 0) {
- *blew_stack = false;
- return NULL;
- }
- // We've lost track of higher nodes.
- if (maximum_depth_ - depth_ == kStackSize) {
- *blew_stack = true;
- return NULL;
- }
- // Go right.
- ConsString* cons_string = frames_[OffsetForDepth(depth_ - 1)];
- String* string = cons_string->second();
- int32_t type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) != kConsStringTag) {
- // Pop stack so next iteration is in correct place.
- Pop();
- unsigned length = static_cast<unsigned>(string->length());
- // Could be a flattened ConsString.
- if (length == 0) continue;
- *length_out = length;
- *type_out = type;
- consumed_ += length;
- return string;
- }
- cons_string = ConsString::cast(string);
- // TODO(dcarney) Add back root optimization.
- PushRight(cons_string);
- // Need to traverse all the way left.
- while (true) {
- // Continue left.
- string = cons_string->first();
- type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) != kConsStringTag) {
- AdjustMaximumDepth();
- unsigned length = static_cast<unsigned>(string->length());
- ASSERT(length != 0);
- *length_out = length;
- *type_out = type;
- consumed_ += length;
- return string;
- }
- cons_string = ConsString::cast(string);
- PushLeft(cons_string);
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-uint16_t ConsString::ConsStringGet(int index) {
- ASSERT(index >= 0 && index < this->length());
-
- // Check for a flattened cons string
- if (second()->length() == 0) {
- String* left = first();
- return left->Get(index);
- }
-
- String* string = String::cast(this);
-
- while (true) {
- if (StringShape(string).IsCons()) {
- ConsString* cons_string = ConsString::cast(string);
- String* left = cons_string->first();
- if (left->length() > index) {
- string = left;
- } else {
- index -= left->length();
- string = cons_string->second();
- }
- } else {
- return string->Get(index);
- }
- }
-
- UNREACHABLE();
- return 0;
-}
-
-
-uint16_t SlicedString::SlicedStringGet(int index) {
- return parent()->Get(offset() + index);
-}
-
-
-template <typename sinkchar>
-void String::WriteToFlat(String* src,
- sinkchar* sink,
- int f,
- int t) {
- String* source = src;
- int from = f;
- int to = t;
- while (true) {
- ASSERT(0 <= from && from <= to && to <= source->length());
- switch (StringShape(source).full_representation_tag()) {
- case kOneByteStringTag | kExternalStringTag: {
- CopyChars(sink,
- ExternalAsciiString::cast(source)->GetChars() + from,
- to - from);
- return;
- }
- case kTwoByteStringTag | kExternalStringTag: {
- const uc16* data =
- ExternalTwoByteString::cast(source)->GetChars();
- CopyChars(sink,
- data + from,
- to - from);
- return;
- }
- case kOneByteStringTag | kSeqStringTag: {
- CopyChars(sink,
- SeqOneByteString::cast(source)->GetChars() + from,
- to - from);
- return;
- }
- case kTwoByteStringTag | kSeqStringTag: {
- CopyChars(sink,
- SeqTwoByteString::cast(source)->GetChars() + from,
- to - from);
- return;
- }
- case kOneByteStringTag | kConsStringTag:
- case kTwoByteStringTag | kConsStringTag: {
- ConsString* cons_string = ConsString::cast(source);
- String* first = cons_string->first();
- int boundary = first->length();
- if (to - boundary >= boundary - from) {
- // Right hand side is longer. Recurse over left.
- if (from < boundary) {
- WriteToFlat(first, sink, from, boundary);
- sink += boundary - from;
- from = 0;
- } else {
- from -= boundary;
- }
- to -= boundary;
- source = cons_string->second();
- } else {
- // Left hand side is longer. Recurse over right.
- if (to > boundary) {
- String* second = cons_string->second();
- // When repeatedly appending to a string, we get a cons string that
- // is unbalanced to the left, a list, essentially. We inline the
- // common case of sequential ascii right child.
- if (to - boundary == 1) {
- sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
- } else if (second->IsSeqOneByteString()) {
- CopyChars(sink + boundary - from,
- SeqOneByteString::cast(second)->GetChars(),
- to - boundary);
- } else {
- WriteToFlat(second,
- sink + boundary - from,
- 0,
- to - boundary);
- }
- to = boundary;
- }
- source = first;
- }
- break;
- }
- case kOneByteStringTag | kSlicedStringTag:
- case kTwoByteStringTag | kSlicedStringTag: {
- SlicedString* slice = SlicedString::cast(source);
- unsigned offset = slice->offset();
- WriteToFlat(slice->parent(), sink, from + offset, to + offset);
- return;
- }
- }
- }
-}
-
-
-// Compares the contents of two strings by reading and comparing
-// int-sized blocks of characters.
-template <typename Char>
-static inline bool CompareRawStringContents(const Char* const a,
- const Char* const b,
- int length) {
- int i = 0;
-#ifndef V8_HOST_CAN_READ_UNALIGNED
- // If this architecture isn't comfortable reading unaligned ints
- // then we have to check that the strings are aligned before
- // comparing them blockwise.
- const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT
- uint32_t pa_addr = reinterpret_cast<uint32_t>(a);
- uint32_t pb_addr = reinterpret_cast<uint32_t>(b);
- if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
-#endif
- const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT
- int endpoint = length - kStepSize;
- // Compare blocks until we reach near the end of the string.
- for (; i <= endpoint; i += kStepSize) {
- uint32_t wa = *reinterpret_cast<const uint32_t*>(a + i);
- uint32_t wb = *reinterpret_cast<const uint32_t*>(b + i);
- if (wa != wb) {
- return false;
- }
- }
-#ifndef V8_HOST_CAN_READ_UNALIGNED
- }
-#endif
- // Compare the remaining characters that didn't fit into a block.
- for (; i < length; i++) {
- if (a[i] != b[i]) {
- return false;
- }
- }
- return true;
-}
-
-
-template<typename Chars1, typename Chars2>
-class RawStringComparator : public AllStatic {
- public:
- static inline bool compare(const Chars1* a, const Chars2* b, int len) {
- ASSERT(sizeof(Chars1) != sizeof(Chars2));
- for (int i = 0; i < len; i++) {
- if (a[i] != b[i]) {
- return false;
- }
- }
- return true;
- }
-};
-
-
-template<>
-class RawStringComparator<uint16_t, uint16_t> {
- public:
- static inline bool compare(const uint16_t* a, const uint16_t* b, int len) {
- return CompareRawStringContents(a, b, len);
- }
-};
-
-
-template<>
-class RawStringComparator<uint8_t, uint8_t> {
- public:
- static inline bool compare(const uint8_t* a, const uint8_t* b, int len) {
- return CompareRawStringContents(a, b, len);
- }
-};
-
-
-class StringComparator {
- class State {
- public:
- explicit inline State(ConsStringIteratorOp* op)
- : op_(op), is_one_byte_(true), length_(0), buffer8_(NULL) {}
-
- inline void Init(String* string, unsigned len) {
- op_->Reset();
- int32_t type = string->map()->instance_type();
- String::Visit(string, 0, *this, *op_, type, len);
- }
-
- inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
- is_one_byte_ = true;
- buffer8_ = chars;
- length_ = length;
- }
-
- inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
- is_one_byte_ = false;
- buffer16_ = chars;
- length_ = length;
- }
-
- void Advance(unsigned consumed) {
- ASSERT(consumed <= length_);
- // Still in buffer.
- if (length_ != consumed) {
- if (is_one_byte_) {
- buffer8_ += consumed;
- } else {
- buffer16_ += consumed;
- }
- length_ -= consumed;
- return;
- }
- // Advance state.
- ASSERT(op_->HasMore());
- int32_t type = 0;
- unsigned length = 0;
- String* next = op_->ContinueOperation(&type, &length);
- ASSERT(next != NULL);
- ConsStringNullOp null_op;
- String::Visit(next, 0, *this, null_op, type, length);
- }
-
- ConsStringIteratorOp* const op_;
- bool is_one_byte_;
- unsigned length_;
- union {
- const uint8_t* buffer8_;
- const uint16_t* buffer16_;
- };
- DISALLOW_IMPLICIT_CONSTRUCTORS(State);
- };
-
- public:
- inline StringComparator(ConsStringIteratorOp* op_1,
- ConsStringIteratorOp* op_2)
- : state_1_(op_1),
- state_2_(op_2) {
- }
-
- template<typename Chars1, typename Chars2>
- static inline bool Equals(State* state_1, State* state_2, unsigned to_check) {
- const Chars1* a = reinterpret_cast<const Chars1*>(state_1->buffer8_);
- const Chars2* b = reinterpret_cast<const Chars2*>(state_2->buffer8_);
- return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check);
- }
-
- bool Equals(unsigned length, String* string_1, String* string_2) {
- ASSERT(length != 0);
- state_1_.Init(string_1, length);
- state_2_.Init(string_2, length);
- while (true) {
- unsigned to_check = Min(state_1_.length_, state_2_.length_);
- ASSERT(to_check > 0 && to_check <= length);
- bool is_equal;
- if (state_1_.is_one_byte_) {
- if (state_2_.is_one_byte_) {
- is_equal = Equals<uint8_t, uint8_t>(&state_1_, &state_2_, to_check);
- } else {
- is_equal = Equals<uint8_t, uint16_t>(&state_1_, &state_2_, to_check);
- }
- } else {
- if (state_2_.is_one_byte_) {
- is_equal = Equals<uint16_t, uint8_t>(&state_1_, &state_2_, to_check);
- } else {
- is_equal = Equals<uint16_t, uint16_t>(&state_1_, &state_2_, to_check);
- }
- }
- // Looping done.
- if (!is_equal) return false;
- length -= to_check;
- // Exit condition. Strings are equal.
- if (length == 0) return true;
- state_1_.Advance(to_check);
- state_2_.Advance(to_check);
- }
- }
-
- private:
- State state_1_;
- State state_2_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringComparator);
-};
-
-
-bool String::SlowEquals(String* other) {
- // Fast check: negative check with lengths.
- int len = length();
- if (len != other->length()) return false;
- if (len == 0) return true;
-
- // Fast check: if hash code is computed for both strings
- // a fast negative check can be performed.
- if (HasHashCode() && other->HasHashCode()) {
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- if (Hash() != other->Hash()) {
- bool found_difference = false;
- for (int i = 0; i < len; i++) {
- if (Get(i) != other->Get(i)) {
- found_difference = true;
- break;
- }
- }
- ASSERT(found_difference);
- }
- }
-#endif
- if (Hash() != other->Hash()) return false;
- }
-
- // We know the strings are both non-empty. Compare the first chars
- // before we try to flatten the strings.
- if (this->Get(0) != other->Get(0)) return false;
-
- String* lhs = this->TryFlattenGetString();
- String* rhs = other->TryFlattenGetString();
-
- // TODO(dcarney): Compare all types of flat strings with a Visitor.
- if (StringShape(lhs).IsSequentialAscii() &&
- StringShape(rhs).IsSequentialAscii()) {
- const uint8_t* str1 = SeqOneByteString::cast(lhs)->GetChars();
- const uint8_t* str2 = SeqOneByteString::cast(rhs)->GetChars();
- return CompareRawStringContents(str1, str2, len);
- }
-
- Isolate* isolate = GetIsolate();
- StringComparator comparator(isolate->objects_string_compare_iterator_a(),
- isolate->objects_string_compare_iterator_b());
-
- return comparator.Equals(static_cast<unsigned>(len), lhs, rhs);
-}
-
-
-bool String::MarkAsUndetectable() {
- if (StringShape(this).IsInternalized()) return false;
-
- Map* map = this->map();
- Heap* heap = GetHeap();
- if (map == heap->string_map()) {
- this->set_map(heap->undetectable_string_map());
- return true;
- } else if (map == heap->ascii_string_map()) {
- this->set_map(heap->undetectable_ascii_string_map());
- return true;
- }
- // Rest cannot be marked as undetectable
- return false;
-}
-
-
-bool String::IsUtf8EqualTo(Vector<const char> str) {
- int slen = length();
- // Can't check exact length equality, but we can check bounds.
- int str_len = str.length();
- if (str_len < slen ||
- str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize)) {
- return false;
- }
- int i;
- unsigned remaining_in_str = static_cast<unsigned>(str_len);
- const uint8_t* utf8_data = reinterpret_cast<const uint8_t*>(str.start());
- for (i = 0; i < slen && remaining_in_str > 0; i++) {
- unsigned cursor = 0;
- uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor);
- ASSERT(cursor > 0 && cursor <= remaining_in_str);
- if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- if (i > slen - 1) return false;
- if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
- if (Get(i) != unibrow::Utf16::TrailSurrogate(r)) return false;
- } else {
- if (Get(i) != r) return false;
- }
- utf8_data += cursor;
- remaining_in_str -= cursor;
- }
- return i == slen && remaining_in_str == 0;
-}
-
-
-bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
- int slen = length();
- if (str.length() != slen) return false;
- FlatContent content = GetFlatContent();
- if (content.IsAscii()) {
- return CompareChars(content.ToOneByteVector().start(),
- str.start(), slen) == 0;
- }
- for (int i = 0; i < slen; i++) {
- if (Get(i) != static_cast<uint16_t>(str[i])) return false;
- }
- return true;
-}
-
-
-bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
- int slen = length();
- if (str.length() != slen) return false;
- FlatContent content = GetFlatContent();
- if (content.IsTwoByte()) {
- return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
- }
- for (int i = 0; i < slen; i++) {
- if (Get(i) != str[i]) return false;
- }
- return true;
-}
-
-
-class IteratingStringHasher: public StringHasher {
- public:
- static inline uint32_t Hash(String* string, uint32_t seed) {
- const unsigned len = static_cast<unsigned>(string->length());
- IteratingStringHasher hasher(len, seed);
- if (hasher.has_trivial_hash()) {
- return hasher.GetHashField();
- }
- int32_t type = string->map()->instance_type();
- ConsStringNullOp null_op;
- String::Visit(string, 0, hasher, null_op, type, len);
- // Flat strings terminate immediately.
- if (hasher.consumed_ == len) {
- ASSERT(!string->IsConsString());
- return hasher.GetHashField();
- }
- ASSERT(string->IsConsString());
- // This is a ConsString, iterate across it.
- ConsStringIteratorOp op;
- unsigned offset = 0;
- unsigned leaf_length = len;
- string = op.Operate(string, &offset, &type, &leaf_length);
- while (true) {
- ASSERT(hasher.consumed_ < len);
- String::Visit(string, 0, hasher, null_op, type, leaf_length);
- if (hasher.consumed_ == len) break;
- string = op.ContinueOperation(&type, &leaf_length);
- // This should be taken care of by the length check.
- ASSERT(string != NULL);
- }
- return hasher.GetHashField();
- }
- inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
- AddCharacters(chars, static_cast<int>(length));
- consumed_ += length;
- }
- inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
- AddCharacters(chars, static_cast<int>(length));
- consumed_ += length;
- }
-
- private:
- inline IteratingStringHasher(int len, uint32_t seed)
- : StringHasher(len, seed),
- consumed_(0) {}
- unsigned consumed_;
- DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
-};
-
-
-uint32_t String::ComputeAndSetHash() {
- // Should only be called if hash code has not yet been computed.
- ASSERT(!HasHashCode());
-
- // Store the hash code in the object.
- uint32_t field = IteratingStringHasher::Hash(this, GetHeap()->HashSeed());
- set_hash_field(field);
-
- // Check the hash code is there.
- ASSERT(HasHashCode());
- uint32_t result = field >> kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
-}
-
-
-bool String::ComputeArrayIndex(uint32_t* index) {
- int length = this->length();
- if (length == 0 || length > kMaxArrayIndexSize) return false;
- ConsStringIteratorOp op;
- StringCharacterStream stream(this, &op);
- uint16_t ch = stream.GetNext();
-
- // If the string begins with a '0' character, it must only consist
- // of it to be a legal array index.
- if (ch == '0') {
- *index = 0;
- return length == 1;
- }
-
- // Convert string to uint32 array index; character by character.
- int d = ch - '0';
- if (d < 0 || d > 9) return false;
- uint32_t result = d;
- while (stream.HasMore()) {
- d = stream.GetNext() - '0';
- if (d < 0 || d > 9) return false;
- // Check that the new result is below the 32 bit limit.
- if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
- result = (result * 10) + d;
- }
-
- *index = result;
- return true;
-}
-
-
-bool String::SlowAsArrayIndex(uint32_t* index) {
- if (length() <= kMaxCachedArrayIndexLength) {
- Hash(); // force computation of hash code
- uint32_t field = hash_field();
- if ((field & kIsNotArrayIndexMask) != 0) return false;
- // Isolate the array index form the full hash field.
- *index = (kArrayIndexHashMask & field) >> kHashShift;
- return true;
- } else {
- return ComputeArrayIndex(index);
- }
-}
-
-
-String* SeqString::Truncate(int new_length) {
- Heap* heap = GetHeap();
- if (new_length <= 0) return heap->empty_string();
-
- int string_size, allocated_string_size;
- int old_length = length();
- if (old_length <= new_length) return this;
-
- if (IsSeqOneByteString()) {
- allocated_string_size = SeqOneByteString::SizeFor(old_length);
- string_size = SeqOneByteString::SizeFor(new_length);
- } else {
- allocated_string_size = SeqTwoByteString::SizeFor(old_length);
- string_size = SeqTwoByteString::SizeFor(new_length);
- }
-
- int delta = allocated_string_size - string_size;
- set_length(new_length);
-
- // String sizes are pointer size aligned, so that we can use filler objects
- // that are a multiple of pointer size.
- Address end_of_string = address() + string_size;
- heap->CreateFillerObjectAt(end_of_string, delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(address(), -delta);
- }
- return this;
-}
-
-
-AllocationSiteInfo* AllocationSiteInfo::FindForJSObject(JSObject* object) {
- // Currently, AllocationSiteInfo objects are only allocated immediately
- // after JSArrays in NewSpace, and detecting whether a JSArray has one
- // involves carefully checking the object immediately after the JSArray
- // (if there is one) to see if it's an AllocationSiteInfo.
- if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
- Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
- object->Size();
- if ((ptr_end + AllocationSiteInfo::kSize) <=
- object->GetHeap()->NewSpaceTop()) {
- // There is room in newspace for allocation info. Do we have some?
- Map** possible_allocation_site_info_map =
- reinterpret_cast<Map**>(ptr_end);
- if (*possible_allocation_site_info_map ==
- object->GetHeap()->allocation_site_info_map()) {
- AllocationSiteInfo* info = AllocationSiteInfo::cast(
- reinterpret_cast<Object*>(ptr_end + 1));
- return info;
- }
- }
- }
- return NULL;
-}
-
-
-bool AllocationSiteInfo::GetElementsKindPayload(ElementsKind* kind) {
- ASSERT(kind != NULL);
- if (payload()->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload());
- Object* cell_contents = cell->value();
- if (cell_contents->IsSmi()) {
- *kind = static_cast<ElementsKind>(
- Smi::cast(cell_contents)->value());
- return true;
- }
- }
- return false;
-}
-
-
-// Heuristic: We only need to create allocation site info if the boilerplate
-// elements kind is the initial elements kind.
-AllocationSiteMode AllocationSiteInfo::GetMode(
- ElementsKind boilerplate_elements_kind) {
- if (FLAG_track_allocation_sites &&
- IsFastSmiElementsKind(boilerplate_elements_kind)) {
- return TRACK_ALLOCATION_SITE;
- }
-
- return DONT_TRACK_ALLOCATION_SITE;
-}
-
-
-AllocationSiteMode AllocationSiteInfo::GetMode(ElementsKind from,
- ElementsKind to) {
- if (FLAG_track_allocation_sites &&
- IsFastSmiElementsKind(from) &&
- (IsFastObjectElementsKind(to) || IsFastDoubleElementsKind(to))) {
- return TRACK_ALLOCATION_SITE;
- }
-
- return DONT_TRACK_ALLOCATION_SITE;
-}
-
-
-uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
- // For array indexes mix the length into the hash as an array index could
- // be zero.
- ASSERT(length > 0);
- ASSERT(length <= String::kMaxArrayIndexSize);
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
-
- value <<= String::kHashShift;
- value |= length << String::kArrayIndexHashLengthShift;
-
- ASSERT((value & String::kIsNotArrayIndexMask) == 0);
- ASSERT((length > String::kMaxCachedArrayIndexLength) ||
- (value & String::kContainsCachedArrayIndexMask) == 0);
- return value;
-}
-
-
-uint32_t StringHasher::GetHashField() {
- if (length_ <= String::kMaxHashCalcLength) {
- if (is_array_index_) {
- return MakeArrayIndexHash(array_index_, length_);
- }
- return (GetHashCore(raw_running_hash_) << String::kHashShift) |
- String::kIsNotArrayIndexMask;
- } else {
- return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask;
- }
-}
-
-
-uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
- uint32_t seed,
- int* utf16_length_out) {
- int vector_length = chars.length();
- // Handle some edge cases
- if (vector_length <= 1) {
- ASSERT(vector_length == 0 ||
- static_cast<uint8_t>(chars.start()[0]) <=
- unibrow::Utf8::kMaxOneByteChar);
- *utf16_length_out = vector_length;
- return HashSequentialString(chars.start(), vector_length, seed);
- }
- // Start with a fake length which won't affect computation.
- // It will be updated later.
- StringHasher hasher(String::kMaxArrayIndexSize, seed);
- unsigned remaining = static_cast<unsigned>(vector_length);
- const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start());
- int utf16_length = 0;
- bool is_index = true;
- ASSERT(hasher.is_array_index_);
- while (remaining > 0) {
- unsigned consumed = 0;
- uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed);
- ASSERT(consumed > 0 && consumed <= remaining);
- stream += consumed;
- remaining -= consumed;
- bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode;
- utf16_length += is_two_characters ? 2 : 1;
- // No need to keep hashing. But we do need to calculate utf16_length.
- if (utf16_length > String::kMaxHashCalcLength) continue;
- if (is_two_characters) {
- uint16_t c1 = unibrow::Utf16::LeadSurrogate(c);
- uint16_t c2 = unibrow::Utf16::TrailSurrogate(c);
- hasher.AddCharacter(c1);
- hasher.AddCharacter(c2);
- if (is_index) is_index = hasher.UpdateIndex(c1);
- if (is_index) is_index = hasher.UpdateIndex(c2);
- } else {
- hasher.AddCharacter(c);
- if (is_index) is_index = hasher.UpdateIndex(c);
- }
- }
- *utf16_length_out = static_cast<int>(utf16_length);
- // Must set length here so that hash computation is correct.
- hasher.length_ = utf16_length;
- return hasher.GetHashField();
-}
-
-
-MaybeObject* String::SubString(int start, int end, PretenureFlag pretenure) {
- Heap* heap = GetHeap();
- if (start == 0 && end == length()) return this;
- MaybeObject* result = heap->AllocateSubString(this, start, end, pretenure);
- return result;
-}
-
-
-void String::PrintOn(FILE* file) {
- int length = this->length();
- for (int i = 0; i < length; i++) {
- fprintf(file, "%c", Get(i));
- }
-}
-
-
-static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) {
- int live_enum = map->EnumLength();
- if (live_enum == Map::kInvalidEnumCache) {
- live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
- }
- if (live_enum == 0) return descriptors->ClearEnumCache();
-
- FixedArray* enum_cache = descriptors->GetEnumCache();
-
- int to_trim = enum_cache->length() - live_enum;
- if (to_trim <= 0) return;
- RightTrimFixedArray<FROM_GC>(heap, descriptors->GetEnumCache(), to_trim);
-
- if (!descriptors->HasEnumIndicesCache()) return;
- FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
- RightTrimFixedArray<FROM_GC>(heap, enum_indices_cache, to_trim);
-}
-
-
-static void TrimDescriptorArray(Heap* heap,
- Map* map,
- DescriptorArray* descriptors,
- int number_of_own_descriptors) {
- int number_of_descriptors = descriptors->number_of_descriptors_storage();
- int to_trim = number_of_descriptors - number_of_own_descriptors;
- if (to_trim == 0) return;
-
- RightTrimFixedArray<FROM_GC>(
- heap, descriptors, to_trim * DescriptorArray::kDescriptorSize);
- descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
-
- if (descriptors->HasEnumCache()) TrimEnumCache(heap, map, descriptors);
- descriptors->Sort();
-}
-
-
-// Clear a possible back pointer in case the transition leads to a dead map.
-// Return true in case a back pointer has been cleared and false otherwise.
-static bool ClearBackPointer(Heap* heap, Map* target) {
- if (Marking::MarkBitFrom(target).Get()) return false;
- target->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER);
- return true;
-}
-
-
-// TODO(mstarzinger): This method should be moved into MarkCompactCollector,
-// because it cannot be called from outside the GC and we already have methods
-// depending on the transitions layout in the GC anyways.
-void Map::ClearNonLiveTransitions(Heap* heap) {
- // If there are no transitions to be cleared, return.
- // TODO(verwaest) Should be an assert, otherwise back pointers are not
- // properly cleared.
- if (!HasTransitionArray()) return;
-
- TransitionArray* t = transitions();
- MarkCompactCollector* collector = heap->mark_compact_collector();
-
- int transition_index = 0;
-
- DescriptorArray* descriptors = instance_descriptors();
- bool descriptors_owner_died = false;
-
- // Compact all live descriptors to the left.
- for (int i = 0; i < t->number_of_transitions(); ++i) {
- Map* target = t->GetTarget(i);
- if (ClearBackPointer(heap, target)) {
- if (target->instance_descriptors() == descriptors) {
- descriptors_owner_died = true;
- }
- } else {
- if (i != transition_index) {
- String* key = t->GetKey(i);
- t->SetKey(transition_index, key);
- Object** key_slot = t->GetKeySlot(transition_index);
- collector->RecordSlot(key_slot, key_slot, key);
- // Target slots do not need to be recorded since maps are not compacted.
- t->SetTarget(transition_index, t->GetTarget(i));
- }
- transition_index++;
- }
- }
-
- if (t->HasElementsTransition() &&
- ClearBackPointer(heap, t->elements_transition())) {
- if (t->elements_transition()->instance_descriptors() == descriptors) {
- descriptors_owner_died = true;
- }
- t->ClearElementsTransition();
- } else {
- // If there are no transitions to be cleared, return.
- // TODO(verwaest) Should be an assert, otherwise back pointers are not
- // properly cleared.
- if (transition_index == t->number_of_transitions()) return;
- }
-
- int number_of_own_descriptors = NumberOfOwnDescriptors();
-
- if (descriptors_owner_died) {
- if (number_of_own_descriptors > 0) {
- TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors);
- ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
- } else {
- ASSERT(descriptors == GetHeap()->empty_descriptor_array());
- }
- }
-
- int trim = t->number_of_transitions() - transition_index;
- if (trim > 0) {
- RightTrimFixedArray<FROM_GC>(heap, t, t->IsSimpleTransition()
- ? trim : trim * TransitionArray::kTransitionSize);
- }
-}
-
-
-int Map::Hash() {
- // For performance reasons we only hash the 3 most variable fields of a map:
- // constructor, prototype and bit_field2.
-
- // Shift away the tag.
- int hash = (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(constructor())) >> 2);
-
- // XOR-ing the prototype and constructor directly yields too many zero bits
- // when the two pointers are close (which is fairly common).
- // To avoid this we shift the prototype 4 bits relatively to the constructor.
- hash ^= (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(prototype())) << 2);
-
- return hash ^ (hash >> 16) ^ bit_field2();
-}
-
-
-bool Map::EquivalentToForNormalization(Map* other,
- PropertyNormalizationMode mode) {
- return
- constructor() == other->constructor() &&
- prototype() == other->prototype() &&
- inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
- 0 :
- other->inobject_properties()) &&
- instance_type() == other->instance_type() &&
- bit_field() == other->bit_field() &&
- bit_field2() == other->bit_field2() &&
- is_observed() == other->is_observed() &&
- function_with_prototype() == other->function_with_prototype();
-}
-
-
-void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
- // Iterate over all fields in the body but take care in dealing with
- // the code entry.
- IteratePointers(v, kPropertiesOffset, kCodeEntryOffset);
- v->VisitCodeEntry(this->address() + kCodeEntryOffset);
- IteratePointers(v, kCodeEntryOffset + kPointerSize, object_size);
-}
-
-
-void JSFunction::MarkForLazyRecompilation() {
- ASSERT(is_compiled() && !IsOptimized());
- ASSERT(shared()->allows_lazy_compilation() ||
- code()->optimizable());
- Builtins* builtins = GetIsolate()->builtins();
- ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
-}
-
-void JSFunction::MarkForParallelRecompilation() {
- ASSERT(is_compiled() && !IsOptimized());
- ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- Builtins* builtins = GetIsolate()->builtins();
- ReplaceCode(builtins->builtin(Builtins::kParallelRecompile));
-
- // Unlike MarkForLazyRecompilation, after queuing a function for
- // recompilation on the compiler thread, we actually tail-call into
- // the full code. We reset the profiler ticks here so that the
- // function doesn't bother the runtime profiler too much.
- shared()->code()->set_profiler_ticks(0);
-}
-
-static bool CompileLazyHelper(CompilationInfo* info,
- ClearExceptionFlag flag) {
- // Compile the source information to a code object.
- ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
- ASSERT(!info->isolate()->has_pending_exception());
- bool result = Compiler::CompileLazy(info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
- if (!result && flag == CLEAR_EXCEPTION) {
- info->isolate()->clear_pending_exception();
- }
- return result;
-}
-
-
-bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag) {
- ASSERT(shared->allows_lazy_compilation_without_context());
- CompilationInfoWithZone info(shared);
- return CompileLazyHelper(&info, flag);
-}
-
-
-void SharedFunctionInfo::AddToOptimizedCodeMap(
- Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- Handle<Code> code,
- Handle<FixedArray> literals) {
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
- ASSERT(native_context->IsNativeContext());
- STATIC_ASSERT(kEntryLength == 3);
- Object* value = shared->optimized_code_map();
- Handle<FixedArray> new_code_map;
- if (value->IsSmi()) {
- // No optimized code map.
- ASSERT_EQ(0, Smi::cast(value)->value());
- // Crate 3 entries per context {context, code, literals}.
- new_code_map = FACTORY->NewFixedArray(kEntryLength);
- new_code_map->set(0, *native_context);
- new_code_map->set(1, *code);
- new_code_map->set(2, *literals);
- } else {
- // Copy old map and append one new entry.
- Handle<FixedArray> old_code_map(FixedArray::cast(value));
- ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*native_context));
- int old_length = old_code_map->length();
- int new_length = old_length + kEntryLength;
- new_code_map = FACTORY->NewFixedArray(new_length);
- old_code_map->CopyTo(0, *new_code_map, 0, old_length);
- new_code_map->set(old_length, *native_context);
- new_code_map->set(old_length + 1, *code);
- new_code_map->set(old_length + 2, *literals);
- }
-#ifdef DEBUG
- for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
- ASSERT(new_code_map->get(i)->IsNativeContext());
- ASSERT(new_code_map->get(i + 1)->IsCode());
- ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
- Code::OPTIMIZED_FUNCTION);
- ASSERT(new_code_map->get(i + 2)->IsFixedArray());
- }
-#endif
- shared->set_optimized_code_map(*new_code_map);
-}
-
-
-void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
- int index) {
- ASSERT(index > 0);
- ASSERT(optimized_code_map()->IsFixedArray());
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
- if (!bound()) {
- FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
- ASSERT(cached_literals != NULL);
- function->set_literals(cached_literals);
- }
- Code* code = Code::cast(code_map->get(index));
- ASSERT(code != NULL);
- ASSERT(function->context()->native_context() == code_map->get(index - 1));
- function->ReplaceCode(code);
-}
-
-
-bool JSFunction::CompileLazy(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- bool result = true;
- if (function->shared()->is_compiled()) {
- function->ReplaceCode(function->shared()->code());
- function->shared()->set_code_age(0);
- } else {
- ASSERT(function->shared()->allows_lazy_compilation());
- CompilationInfoWithZone info(function);
- result = CompileLazyHelper(&info, flag);
- ASSERT(!result || function->is_compiled());
- }
- return result;
-}
-
-
-bool JSFunction::CompileOptimized(Handle<JSFunction> function,
- BailoutId osr_ast_id,
- ClearExceptionFlag flag) {
- CompilationInfoWithZone info(function);
- info.SetOptimizing(osr_ast_id);
- return CompileLazyHelper(&info, flag);
-}
-
-
-bool JSFunction::EnsureCompiled(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- return function->is_compiled() || CompileLazy(function, flag);
-}
-
-
-bool JSFunction::IsInlineable() {
- if (IsBuiltin()) return false;
- SharedFunctionInfo* shared_info = shared();
- // Check that the function has a script associated with it.
- if (!shared_info->script()->IsScript()) return false;
- if (shared_info->optimization_disabled()) return false;
- Code* code = shared_info->code();
- if (code->kind() == Code::OPTIMIZED_FUNCTION) return true;
- // If we never ran this (unlikely) then lets try to optimize it.
- if (code->kind() != Code::FUNCTION) return true;
- return code->optimizable();
-}
-
-
-MaybeObject* JSObject::OptimizeAsPrototype() {
- if (IsGlobalObject()) return this;
-
- // Make sure prototypes are fast objects and their maps have the bit set
- // so they remain fast.
- if (!HasFastProperties()) {
- MaybeObject* new_proto = TransformToFastProperties(0);
- if (new_proto->IsFailure()) return new_proto;
- ASSERT(new_proto == this);
- }
- return this;
-}
-
-
-MUST_USE_RESULT static MaybeObject* CacheInitialJSArrayMaps(
- Context* native_context, Map* initial_map) {
- // Replace all of the cached initial array maps in the native context with
- // the appropriate transitioned elements kind maps.
- Heap* heap = native_context->GetHeap();
- MaybeObject* maybe_maps =
- heap->AllocateFixedArrayWithHoles(kElementsKindCount);
- FixedArray* maps;
- if (!maybe_maps->To(&maps)) return maybe_maps;
-
- Map* current_map = initial_map;
- ElementsKind kind = current_map->elements_kind();
- ASSERT(kind == GetInitialFastElementsKind());
- maps->set(kind, current_map);
- for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
- i < kFastElementsKindCount; ++i) {
- Map* new_map;
- ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- MaybeObject* maybe_new_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- maps->set(next_kind, new_map);
- current_map = new_map;
- }
- native_context->set_js_array_maps(maps);
- return initial_map;
-}
-
-
-MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
- ASSERT(value->IsJSReceiver());
- Heap* heap = GetHeap();
-
- // First some logic for the map of the prototype to make sure it is in fast
- // mode.
- if (value->IsJSObject()) {
- MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
- if (ok->IsFailure()) return ok;
- }
-
- // Now some logic for the maps of the objects that are created by using this
- // function as a constructor.
- if (has_initial_map()) {
- // If the function has allocated the initial map replace it with a
- // copy containing the new prototype. Also complete any in-object
- // slack tracking that is in progress at this point because it is
- // still tracking the old copy.
- if (shared()->IsInobjectSlackTrackingInProgress()) {
- shared()->CompleteInobjectSlackTracking();
- }
- Map* new_map;
- MaybeObject* maybe_object = initial_map()->Copy();
- if (!maybe_object->To(&new_map)) return maybe_object;
- new_map->set_prototype(value);
-
- // If the function is used as the global Array function, cache the
- // initial map (and transitioned versions) in the native context.
- Context* native_context = context()->native_context();
- Object* array_function = native_context->get(Context::ARRAY_FUNCTION_INDEX);
- if (array_function->IsJSFunction() &&
- this == JSFunction::cast(array_function)) {
- MaybeObject* ok = CacheInitialJSArrayMaps(native_context, new_map);
- if (ok->IsFailure()) return ok;
- }
-
- set_initial_map(new_map);
- } else {
- // Put the value in the initial map field until an initial map is
- // needed. At that point, a new initial map is created and the
- // prototype is put into the initial map where it belongs.
- set_prototype_or_initial_map(value);
- }
- heap->ClearInstanceofCache();
- return value;
-}
-
-
-MaybeObject* JSFunction::SetPrototype(Object* value) {
- ASSERT(should_have_prototype());
- Object* construct_prototype = value;
-
- // If the value is not a JSReceiver, store the value in the map's
- // constructor field so it can be accessed. Also, set the prototype
- // used for constructing objects to the original object prototype.
- // See ECMA-262 13.2.2.
- if (!value->IsJSReceiver()) {
- // Copy the map so this does not affect unrelated functions.
- // Remove map transitions because they point to maps with a
- // different prototype.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- Heap* heap = new_map->GetHeap();
- set_map(new_map);
- new_map->set_constructor(value);
- new_map->set_non_instance_prototype(true);
- construct_prototype =
- heap->isolate()->context()->native_context()->
- initial_object_prototype();
- } else {
- map()->set_non_instance_prototype(false);
- }
-
- return SetInstancePrototype(construct_prototype);
-}
-
-
-void JSFunction::RemovePrototype() {
- Context* native_context = context()->native_context();
- Map* no_prototype_map = shared()->is_classic_mode()
- ? native_context->function_without_prototype_map()
- : native_context->strict_mode_function_without_prototype_map();
-
- if (map() == no_prototype_map) return;
-
- ASSERT(map() == (shared()->is_classic_mode()
- ? native_context->function_map()
- : native_context->strict_mode_function_map()));
-
- set_map(no_prototype_map);
- set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
-}
-
-
-void JSFunction::SetInstanceClassName(String* name) {
- shared()->set_instance_class_name(name);
-}
-
-
-void JSFunction::PrintName(FILE* out) {
- SmartArrayPointer<char> name = shared()->DebugName()->ToCString();
- FPrintF(out, "%s", *name);
-}
-
-
-Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
- return Context::cast(literals->get(JSFunction::kLiteralNativeContextIndex));
-}
-
-
-MaybeObject* Oddball::Initialize(const char* to_string,
- Object* to_number,
- byte kind) {
- String* internalized_to_string;
- { MaybeObject* maybe_string =
- Isolate::Current()->heap()->InternalizeUtf8String(
- CStrVector(to_string));
- if (!maybe_string->To(&internalized_to_string)) return maybe_string;
- }
- set_to_string(internalized_to_string);
- set_to_number(to_number);
- set_kind(kind);
- return this;
-}
-
-
-String* SharedFunctionInfo::DebugName() {
- Object* n = name();
- if (!n->IsString() || String::cast(n)->length() == 0) return inferred_name();
- return String::cast(n);
-}
-
-
-bool SharedFunctionInfo::HasSourceCode() {
- return !script()->IsUndefined() &&
- !reinterpret_cast<Script*>(script())->source()->IsUndefined();
-}
-
-
-Handle<Object> SharedFunctionInfo::GetSourceCode() {
- if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value();
- Handle<String> source(String::cast(Script::cast(script())->source()));
- return SubString(source, start_position(), end_position());
-}
-
-
-int SharedFunctionInfo::SourceSize() {
- return end_position() - start_position();
-}
-
-
-int SharedFunctionInfo::CalculateInstanceSize() {
- int instance_size =
- JSObject::kHeaderSize +
- expected_nof_properties() * kPointerSize;
- if (instance_size > JSObject::kMaxInstanceSize) {
- instance_size = JSObject::kMaxInstanceSize;
- }
- return instance_size;
-}
-
-
-int SharedFunctionInfo::CalculateInObjectProperties() {
- return (CalculateInstanceSize() - JSObject::kHeaderSize) / kPointerSize;
-}
-
-
-bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
- // Check the basic conditions for generating inline constructor code.
- if (!FLAG_inline_new
- || !has_only_simple_this_property_assignments()
- || this_property_assignments_count() == 0) {
- return false;
- }
-
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
-
- // Traverse the proposed prototype chain looking for properties of the
- // same names as are set by the inline constructor.
- for (Object* obj = prototype;
- obj != heap->null_value();
- obj = obj->GetPrototype(isolate)) {
- JSReceiver* receiver = JSReceiver::cast(obj);
- for (int i = 0; i < this_property_assignments_count(); i++) {
- LookupResult result(heap->isolate());
- String* name = GetThisPropertyAssignmentName(i);
- receiver->LocalLookup(name, &result);
- if (result.IsFound()) {
- switch (result.type()) {
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- break;
- case INTERCEPTOR:
- case CALLBACKS:
- case HANDLER:
- return false;
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- return true;
-}
-
-
-void SharedFunctionInfo::ForbidInlineConstructor() {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- false));
-}
-
-
-void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
- bool only_simple_this_property_assignments,
- FixedArray* assignments) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- only_simple_this_property_assignments));
- set_this_property_assignments(assignments);
- set_this_property_assignments_count(assignments->length() / 3);
-}
-
-
-void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
- Heap* heap = GetHeap();
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- false));
- set_this_property_assignments(heap->undefined_value());
- set_this_property_assignments_count(0);
-}
-
-
-String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
- Object* obj = this_property_assignments();
- ASSERT(obj->IsFixedArray());
- ASSERT(index < this_property_assignments_count());
- obj = FixedArray::cast(obj)->get(index * 3);
- ASSERT(obj->IsString());
- return String::cast(obj);
-}
-
-
-bool SharedFunctionInfo::IsThisPropertyAssignmentArgument(int index) {
- Object* obj = this_property_assignments();
- ASSERT(obj->IsFixedArray());
- ASSERT(index < this_property_assignments_count());
- obj = FixedArray::cast(obj)->get(index * 3 + 1);
- return Smi::cast(obj)->value() != -1;
-}
-
-
-int SharedFunctionInfo::GetThisPropertyAssignmentArgument(int index) {
- ASSERT(IsThisPropertyAssignmentArgument(index));
- Object* obj =
- FixedArray::cast(this_property_assignments())->get(index * 3 + 1);
- return Smi::cast(obj)->value();
-}
-
-
-Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
- ASSERT(!IsThisPropertyAssignmentArgument(index));
- Object* obj =
- FixedArray::cast(this_property_assignments())->get(index * 3 + 2);
- return obj;
-}
-
-
-// Support function for printing the source code to a StringStream
-// without any allocation in the heap.
-void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
- int max_length) {
- // For some native functions there is no source.
- if (!HasSourceCode()) {
- accumulator->Add("<No Source>");
- return;
- }
-
- // Get the source for the script which this function came from.
- // Don't use String::cast because we don't want more assertion errors while
- // we are already creating a stack dump.
- String* script_source =
- reinterpret_cast<String*>(Script::cast(script())->source());
-
- if (!script_source->LooksValid()) {
- accumulator->Add("<Invalid Source>");
- return;
- }
-
- if (!is_toplevel()) {
- accumulator->Add("function ");
- Object* name = this->name();
- if (name->IsString() && String::cast(name)->length() > 0) {
- accumulator->PrintName(name);
- }
- }
-
- int len = end_position() - start_position();
- if (len <= max_length || max_length < 0) {
- accumulator->Put(script_source, start_position(), end_position());
- } else {
- accumulator->Put(script_source,
- start_position(),
- start_position() + max_length);
- accumulator->Add("...\n");
- }
-}
-
-
-static bool IsCodeEquivalent(Code* code, Code* recompiled) {
- if (code->instruction_size() != recompiled->instruction_size()) return false;
- ByteArray* code_relocation = code->relocation_info();
- ByteArray* recompiled_relocation = recompiled->relocation_info();
- int length = code_relocation->length();
- if (length != recompiled_relocation->length()) return false;
- int compare = memcmp(code_relocation->GetDataStartAddress(),
- recompiled_relocation->GetDataStartAddress(),
- length);
- return compare == 0;
-}
-
-
-void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
- ASSERT(!has_deoptimization_support());
- AssertNoAllocation no_allocation;
- Code* code = this->code();
- if (IsCodeEquivalent(code, recompiled)) {
- // Copy the deoptimization data from the recompiled code.
- code->set_deoptimization_data(recompiled->deoptimization_data());
- code->set_has_deoptimization_support(true);
- } else {
- // TODO(3025757): In case the recompiled isn't equivalent to the
- // old code, we have to replace it. We should try to avoid this
- // altogether because it flushes valuable type feedback by
- // effectively resetting all IC state.
- ReplaceCode(recompiled);
- }
- ASSERT(has_deoptimization_support());
-}
-
-
-void SharedFunctionInfo::DisableOptimization(const char* reason) {
- // Disable optimization for the shared function info and mark the
- // code as non-optimizable. The marker on the shared function info
- // is there because we flush non-optimized code thereby loosing the
- // non-optimizable information for the code. When the code is
- // regenerated and set on the shared function info it is marked as
- // non-optimizable if optimization is disabled for the shared
- // function info.
- set_optimization_disabled(true);
- // Code should be the lazy compilation stub or else unoptimized. If the
- // latter, disable optimization for the code too.
- ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
- if (code()->kind() == Code::FUNCTION) {
- code()->set_optimizable(false);
- }
- if (FLAG_trace_opt) {
- PrintF("[disabled optimization for %s, reason: %s]\n",
- *DebugName()->ToCString(), reason);
- }
-}
-
-
-bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
- ASSERT(!id.IsNone());
- Code* unoptimized = code();
- DeoptimizationOutputData* data =
- DeoptimizationOutputData::cast(unoptimized->deoptimization_data());
- unsigned ignore = Deoptimizer::GetOutputInfo(data, id, this);
- USE(ignore);
- return true; // Return true if there was no ASSERT.
-}
-
-
-void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
- ASSERT(!IsInobjectSlackTrackingInProgress());
-
- if (!FLAG_clever_optimizations) return;
-
- // Only initiate the tracking the first time.
- if (live_objects_may_exist()) return;
- set_live_objects_may_exist(true);
-
- // No tracking during the snapshot construction phase.
- if (Serializer::enabled()) return;
-
- if (map->unused_property_fields() == 0) return;
-
- // Nonzero counter is a leftover from the previous attempt interrupted
- // by GC, keep it.
- if (construction_count() == 0) {
- set_construction_count(kGenerousAllocationCount);
- }
- set_initial_map(map);
- Builtins* builtins = map->GetHeap()->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
- construct_stub());
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
-}
-
-
-// Called from GC, hence reinterpret_cast and unchecked accessors.
-void SharedFunctionInfo::DetachInitialMap() {
- Map* map = reinterpret_cast<Map*>(initial_map());
-
- // Make the map remember to restore the link if it survives the GC.
- map->set_attached_to_shared_function_info(true);
-
- // Undo state changes made by StartInobjectTracking (except the
- // construction_count). This way if the initial map does not survive the GC
- // then StartInobjectTracking will be called again the next time the
- // constructor is called. The countdown will continue and (possibly after
- // several more GCs) CompleteInobjectSlackTracking will eventually be called.
- Heap* heap = map->GetHeap();
- set_initial_map(heap->undefined_value());
- Builtins* builtins = heap->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
- *RawField(this, kConstructStubOffset));
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
- // It is safe to clear the flag: it will be set again if the map is live.
- set_live_objects_may_exist(false);
-}
-
-
-// Called from GC, hence reinterpret_cast and unchecked accessors.
-void SharedFunctionInfo::AttachInitialMap(Map* map) {
- map->set_attached_to_shared_function_info(false);
-
- // Resume inobject slack tracking.
- set_initial_map(map);
- Builtins* builtins = map->GetHeap()->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
- *RawField(this, kConstructStubOffset));
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
- // The map survived the gc, so there may be objects referencing it.
- set_live_objects_may_exist(true);
-}
-
-
-void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
- code()->ClearInlineCaches();
- set_ic_age(new_ic_age);
- if (code()->kind() == Code::FUNCTION) {
- code()->set_profiler_ticks(0);
- if (optimization_disabled() &&
- opt_count() >= FLAG_max_opt_count) {
- // Re-enable optimizations if they were disabled due to opt_count limit.
- set_optimization_disabled(false);
- code()->set_optimizable(true);
- }
- set_opt_count(0);
- set_deopt_count(0);
- }
-}
-
-
-static void GetMinInobjectSlack(Map* map, void* data) {
- int slack = map->unused_property_fields();
- if (*reinterpret_cast<int*>(data) > slack) {
- *reinterpret_cast<int*>(data) = slack;
- }
-}
-
-
-static void ShrinkInstanceSize(Map* map, void* data) {
- int slack = *reinterpret_cast<int*>(data);
- map->set_inobject_properties(map->inobject_properties() - slack);
- map->set_unused_property_fields(map->unused_property_fields() - slack);
- map->set_instance_size(map->instance_size() - slack * kPointerSize);
-
- // Visitor id might depend on the instance size, recalculate it.
- map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
-}
-
-
-void SharedFunctionInfo::CompleteInobjectSlackTracking() {
- ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
- Map* map = Map::cast(initial_map());
-
- Heap* heap = map->GetHeap();
- set_initial_map(heap->undefined_value());
- Builtins* builtins = heap->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
- construct_stub());
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
-
- int slack = map->unused_property_fields();
- map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
- if (slack != 0) {
- // Resize the initial map and all maps in its transition tree.
- map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
-
- // Give the correct expected_nof_properties to initial maps created later.
- ASSERT(expected_nof_properties() >= slack);
- set_expected_nof_properties(expected_nof_properties() - slack);
- }
-}
-
-
-int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
- ASSERT(native_context->IsNativeContext());
- if (!FLAG_cache_optimized_code) return -1;
- Object* value = optimized_code_map();
- if (!value->IsSmi()) {
- FixedArray* optimized_code_map = FixedArray::cast(value);
- int length = optimized_code_map->length();
- for (int i = 0; i < length; i += 3) {
- if (optimized_code_map->get(i) == native_context) {
- return i + 1;
- }
- }
- }
- return -1;
-}
-
-
-#define DECLARE_TAG(ignore1, name, ignore2) name,
-const char* const VisitorSynchronization::kTags[
- VisitorSynchronization::kNumberOfSyncTags] = {
- VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
-};
-#undef DECLARE_TAG
-
-
-#define DECLARE_TAG(ignore1, ignore2, name) name,
-const char* const VisitorSynchronization::kTagNames[
- VisitorSynchronization::kNumberOfSyncTags] = {
- VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
-};
-#undef DECLARE_TAG
-
-
-void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Object* old_target = target;
- VisitPointer(&target);
- CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
-}
-
-
-void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Object* stub = rinfo->code_age_stub();
- if (stub) {
- VisitPointer(&stub);
- }
-}
-
-
-void ObjectVisitor::VisitCodeEntry(Address entry_address) {
- Object* code = Code::GetObjectFromEntryAddress(entry_address);
- Object* old_code = code;
- VisitPointer(&code);
- if (code != old_code) {
- Memory::Address_at(entry_address) = reinterpret_cast<Code*>(code)->entry();
- }
-}
-
-
-void ObjectVisitor::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- Object* cell = rinfo->target_cell();
- Object* old_cell = cell;
- VisitPointer(&cell);
- if (cell != old_cell) {
- rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
- }
-}
-
-
-void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- Object* old_target = target;
- VisitPointer(&target);
- CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
-}
-
-void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- VisitPointer(rinfo->target_object_address());
-}
-
-void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
- Address* p = rinfo->target_reference_address();
- VisitExternalReferences(p, p + 1);
-}
-
-void Code::InvalidateRelocation() {
- set_relocation_info(GetHeap()->empty_byte_array());
-}
-
-
-void Code::Relocate(intptr_t delta) {
- for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
- it.rinfo()->apply(delta);
- }
- CPU::FlushICache(instruction_start(), instruction_size());
-}
-
-
-void Code::CopyFrom(const CodeDesc& desc) {
- ASSERT(Marking::Color(this) == Marking::WHITE_OBJECT);
-
- // copy code
- memmove(instruction_start(), desc.buffer, desc.instr_size);
-
- // copy reloc info
- memmove(relocation_start(),
- desc.buffer + desc.buffer_size - desc.reloc_size,
- desc.reloc_size);
-
- // unbox handles and relocate
- intptr_t delta = instruction_start() - desc.buffer;
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::kApplyMask;
- Assembler* origin = desc.origin; // Needed to find target_object on X64.
- for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Handle<Object> p = it.rinfo()->target_object_handle(origin);
- it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
- it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- // rewrite code handles in inline cache targets to direct
- // pointers to the first instruction in the code object
- Handle<Object> p = it.rinfo()->target_object_handle(origin);
- Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(code->instruction_start(),
- SKIP_WRITE_BARRIER);
- } else {
- it.rinfo()->apply(delta);
- }
- }
- CPU::FlushICache(instruction_start(), instruction_size());
-}
-
-
-// Locate the source position which is closest to the address in the code. This
-// is using the source position information embedded in the relocation info.
-// The position returned is relative to the beginning of the script where the
-// source for this function is found.
-int Code::SourcePosition(Address pc) {
- int distance = kMaxInt;
- int position = RelocInfo::kNoPosition; // Initially no position found.
- // Run through all the relocation info to find the best matching source
- // position. All the code needs to be considered as the sequence of the
- // instructions in the code does not necessarily follow the same order as the
- // source.
- RelocIterator it(this, RelocInfo::kPositionMask);
- while (!it.done()) {
- // Only look at positions after the current pc.
- if (it.rinfo()->pc() < pc) {
- // Get position and distance.
-
- int dist = static_cast<int>(pc - it.rinfo()->pc());
- int pos = static_cast<int>(it.rinfo()->data());
- // If this position is closer than the current candidate or if it has the
- // same distance as the current candidate and the position is higher then
- // this position is the new candidate.
- if ((dist < distance) ||
- (dist == distance && pos > position)) {
- position = pos;
- distance = dist;
- }
- }
- it.next();
- }
- return position;
-}
-
-
-// Same as Code::SourcePosition above except it only looks for statement
-// positions.
-int Code::SourceStatementPosition(Address pc) {
- // First find the position as close as possible using all position
- // information.
- int position = SourcePosition(pc);
- // Now find the closest statement position before the position.
- int statement_position = 0;
- RelocIterator it(this, RelocInfo::kPositionMask);
- while (!it.done()) {
- if (RelocInfo::IsStatementPosition(it.rinfo()->rmode())) {
- int p = static_cast<int>(it.rinfo()->data());
- if (statement_position < p && p <= position) {
- statement_position = p;
- }
- }
- it.next();
- }
- return statement_position;
-}
-
-
-SafepointEntry Code::GetSafepointEntry(Address pc) {
- SafepointTable table(this);
- return table.FindEntry(pc);
-}
-
-
-void Code::SetNoStackCheckTable() {
- // Indicate the absence of a stack-check table by a table start after the
- // end of the instructions. Table start must be aligned, so round up.
- set_stack_check_table_offset(RoundUp(instruction_size(), kIntSize));
-}
-
-
-Map* Code::FindFirstMap() {
- ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
- if (object->IsMap()) return Map::cast(object);
- }
- return NULL;
-}
-
-
-void Code::FindAllMaps(MapHandleList* maps) {
- ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
- if (object->IsMap()) maps->Add(Handle<Map>(Map::cast(object)));
- }
-}
-
-
-Code* Code::FindFirstCode() {
- ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- return Code::GetCodeFromTargetAddress(info->target_address());
- }
- return NULL;
-}
-
-
-void Code::FindAllCode(CodeHandleList* code_list, int length) {
- ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
- int i = 0;
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- if (i++ == length) return;
- RelocInfo* info = it.rinfo();
- Code* code = Code::GetCodeFromTargetAddress(info->target_address());
- ASSERT(code->is_load_stub());
- code_list->Add(Handle<Code>(code));
- }
- UNREACHABLE();
-}
-
-
-void Code::ClearInlineCaches() {
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET_CONTEXT);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
- if (target->is_inline_cache_stub()) {
- IC::Clear(info->pc());
- }
- }
-}
-
-
-void Code::ClearTypeFeedbackCells(Heap* heap) {
- if (kind() != FUNCTION) return;
- Object* raw_info = type_feedback_info();
- if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackCells* type_feedback_cells =
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
- for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
- cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
- }
- }
-}
-
-
-bool Code::allowed_in_shared_map_code_cache() {
- return is_keyed_load_stub() || is_keyed_store_stub() ||
- (is_compare_ic_stub() &&
- ICCompareStub::CompareState(stub_info()) == CompareIC::KNOWN_OBJECT);
-}
-
-
-void Code::MakeCodeAgeSequenceYoung(byte* sequence) {
- PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
-}
-
-
-void Code::MakeOlder(MarkingParity current_parity) {
- byte* sequence = FindCodeAgeSequence();
- if (sequence != NULL) {
- Age age;
- MarkingParity code_parity;
- GetCodeAgeAndParity(sequence, &age, &code_parity);
- if (age != kLastCodeAge && code_parity != current_parity) {
- PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1),
- current_parity);
- }
- }
-}
-
-
-bool Code::IsOld() {
- byte* sequence = FindCodeAgeSequence();
- if (sequence == NULL) return false;
- Age age;
- MarkingParity parity;
- GetCodeAgeAndParity(sequence, &age, &parity);
- return age >= kSexagenarianCodeAge;
-}
-
-
-byte* Code::FindCodeAgeSequence() {
- return FLAG_age_code &&
- prologue_offset() != kPrologueOffsetNotSet &&
- (kind() == OPTIMIZED_FUNCTION ||
- (kind() == FUNCTION && !has_debug_break_slots()))
- ? instruction_start() + prologue_offset()
- : NULL;
-}
-
-
-void Code::GetCodeAgeAndParity(Code* code, Age* age,
- MarkingParity* parity) {
- Isolate* isolate = Isolate::Current();
- Builtins* builtins = isolate->builtins();
- Code* stub = NULL;
-#define HANDLE_CODE_AGE(AGE) \
- stub = *builtins->Make##AGE##CodeYoungAgainEvenMarking(); \
- if (code == stub) { \
- *age = k##AGE##CodeAge; \
- *parity = EVEN_MARKING_PARITY; \
- return; \
- } \
- stub = *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
- if (code == stub) { \
- *age = k##AGE##CodeAge; \
- *parity = ODD_MARKING_PARITY; \
- return; \
- }
- CODE_AGE_LIST(HANDLE_CODE_AGE)
-#undef HANDLE_CODE_AGE
- UNREACHABLE();
-}
-
-
-Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
- Isolate* isolate = Isolate::Current();
- Builtins* builtins = isolate->builtins();
- switch (age) {
-#define HANDLE_CODE_AGE(AGE) \
- case k##AGE##CodeAge: { \
- Code* stub = parity == EVEN_MARKING_PARITY \
- ? *builtins->Make##AGE##CodeYoungAgainEvenMarking() \
- : *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
- return stub; \
- }
- CODE_AGE_LIST(HANDLE_CODE_AGE)
-#undef HANDLE_CODE_AGE
- default:
- UNREACHABLE();
- break;
- }
- return NULL;
-}
-
-
-void Code::PrintDeoptLocation(int bailout_id) {
- const char* last_comment = NULL;
- int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
- | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::COMMENT) {
- last_comment = reinterpret_cast<const char*>(info->data());
- } else if (last_comment != NULL &&
- bailout_id == Deoptimizer::GetDeoptimizationId(
- info->target_address(), Deoptimizer::EAGER)) {
- CHECK(info->rmode() == RelocInfo::RUNTIME_ENTRY);
- PrintF(" %s\n", last_comment);
- return;
- }
- }
-}
-
-
-// Identify kind of code.
-const char* Code::Kind2String(Kind kind) {
- switch (kind) {
- case FUNCTION: return "FUNCTION";
- case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
- case COMPILED_STUB: return "COMPILED_STUB";
- case STUB: return "STUB";
- case BUILTIN: return "BUILTIN";
- case LOAD_IC: return "LOAD_IC";
- case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
- case STORE_IC: return "STORE_IC";
- case KEYED_STORE_IC: return "KEYED_STORE_IC";
- case CALL_IC: return "CALL_IC";
- case KEYED_CALL_IC: return "KEYED_CALL_IC";
- case UNARY_OP_IC: return "UNARY_OP_IC";
- case BINARY_OP_IC: return "BINARY_OP_IC";
- case COMPARE_IC: return "COMPARE_IC";
- case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-#ifdef ENABLE_DISASSEMBLER
-
-void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
- disasm::NameConverter converter;
- int deopt_count = DeoptCount();
- FPrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
- if (0 == deopt_count) return;
-
- FPrintF(out, "%6s %6s %6s %6s %12s\n", "index", "ast id", "argc", "pc",
- FLAG_print_code_verbose ? "commands" : "");
- for (int i = 0; i < deopt_count; i++) {
- FPrintF(out, "%6d %6d %6d %6d",
- i,
- AstId(i).ToInt(),
- ArgumentsStackHeight(i)->value(),
- Pc(i)->value());
-
- if (!FLAG_print_code_verbose) {
- FPrintF(out, "\n");
- continue;
- }
- // Print details of the frame translation.
- int translation_index = TranslationIndex(i)->value();
- TranslationIterator iterator(TranslationByteArray(), translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- int frame_count = iterator.Next();
- int jsframe_count = iterator.Next();
- FPrintF(out, " %s {frame count=%d, js frame count=%d}\n",
- Translation::StringFor(opcode),
- frame_count,
- jsframe_count);
-
- while (iterator.HasNext() &&
- Translation::BEGIN !=
- (opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
- FPrintF(out, "%24s %s ", "", Translation::StringFor(opcode));
-
- switch (opcode) {
- case Translation::BEGIN:
- UNREACHABLE();
- break;
-
- case Translation::JS_FRAME: {
- int ast_id = iterator.Next();
- int function_id = iterator.Next();
- unsigned height = iterator.Next();
- FPrintF(out, "{ast_id=%d, function=", ast_id);
- if (function_id != Translation::kSelfLiteralId) {
- Object* function = LiteralArray()->get(function_id);
- JSFunction::cast(function)->PrintName(out);
- } else {
- FPrintF(out, "<self>");
- }
- FPrintF(out, ", height=%u}", height);
- break;
- }
-
- case Translation::COMPILED_STUB_FRAME: {
- Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
- FPrintF(out, "{kind=%d}", stub_kind);
- break;
- }
-
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME: {
- int function_id = iterator.Next();
- JSFunction* function =
- JSFunction::cast(LiteralArray()->get(function_id));
- unsigned height = iterator.Next();
- FPrintF(out, "{function=");
- function->PrintName(out);
- FPrintF(out, ", height=%u}", height);
- break;
- }
-
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME: {
- int function_id = iterator.Next();
- JSFunction* function =
- JSFunction::cast(LiteralArray()->get(function_id));
- FPrintF(out, "{function=");
- function->PrintName(out);
- FPrintF(out, "}");
- break;
- }
-
- case Translation::DUPLICATE:
- break;
-
- case Translation::REGISTER: {
- int reg_code = iterator.Next();
- FPrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
- break;
- }
-
- case Translation::INT32_REGISTER: {
- int reg_code = iterator.Next();
- FPrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
- break;
- }
-
- case Translation::UINT32_REGISTER: {
- int reg_code = iterator.Next();
- FPrintF(out, "{input=%s (unsigned)}",
- converter.NameOfCPURegister(reg_code));
- break;
- }
-
- case Translation::DOUBLE_REGISTER: {
- int reg_code = iterator.Next();
- FPrintF(out, "{input=%s}",
- DoubleRegister::AllocationIndexToString(reg_code));
- break;
- }
-
- case Translation::STACK_SLOT: {
- int input_slot_index = iterator.Next();
- FPrintF(out, "{input=%d}", input_slot_index);
- break;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- FPrintF(out, "{input=%d}", input_slot_index);
- break;
- }
-
- case Translation::UINT32_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- FPrintF(out, "{input=%d (unsigned)}", input_slot_index);
- break;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- FPrintF(out, "{input=%d}", input_slot_index);
- break;
- }
-
- case Translation::LITERAL: {
- unsigned literal_index = iterator.Next();
- FPrintF(out, "{literal_id=%u}", literal_index);
- break;
- }
-
- case Translation::ARGUMENTS_OBJECT: {
- bool args_known = iterator.Next();
- int args_index = iterator.Next();
- int args_length = iterator.Next();
- FPrintF(out, "{index=%d, length=%d, known=%d}",
- args_index, args_length, args_known);
- break;
- }
- }
- FPrintF(out, "\n");
- }
- }
-}
-
-
-void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
- FPrintF(out, "Deoptimization Output Data (deopt points = %d)\n",
- this->DeoptPoints());
- if (this->DeoptPoints() == 0) return;
-
- PrintF("%6s %8s %s\n", "ast id", "pc", "state");
- for (int i = 0; i < this->DeoptPoints(); i++) {
- int pc_and_state = this->PcAndState(i)->value();
- PrintF("%6d %8d %s\n",
- this->AstId(i).ToInt(),
- FullCodeGenerator::PcField::decode(pc_and_state),
- FullCodeGenerator::State2String(
- FullCodeGenerator::StateField::decode(pc_and_state)));
- }
-}
-
-
-const char* Code::ICState2String(InlineCacheState state) {
- switch (state) {
- case UNINITIALIZED: return "UNINITIALIZED";
- case PREMONOMORPHIC: return "PREMONOMORPHIC";
- case MONOMORPHIC: return "MONOMORPHIC";
- case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE";
- case POLYMORPHIC: return "POLYMORPHIC";
- case MEGAMORPHIC: return "MEGAMORPHIC";
- case GENERIC: return "GENERIC";
- case DEBUG_STUB: return "DEBUG_STUB";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-const char* Code::StubType2String(StubType type) {
- switch (type) {
- case NORMAL: return "NORMAL";
- case FIELD: return "FIELD";
- case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
- case CALLBACKS: return "CALLBACKS";
- case INTERCEPTOR: return "INTERCEPTOR";
- case MAP_TRANSITION: return "MAP_TRANSITION";
- case NONEXISTENT: return "NONEXISTENT";
- }
- UNREACHABLE(); // keep the compiler happy
- return NULL;
-}
-
-
-void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
- const char* name = NULL;
- switch (kind) {
- case CALL_IC:
- if (extra == STRING_INDEX_OUT_OF_BOUNDS) {
- name = "STRING_INDEX_OUT_OF_BOUNDS";
- }
- break;
- case STORE_IC:
- case KEYED_STORE_IC:
- if (extra == kStrictMode) {
- name = "STRICT";
- }
- break;
- default:
- break;
- }
- if (name != NULL) {
- FPrintF(out, "extra_ic_state = %s\n", name);
- } else {
- FPrintF(out, "extra_ic_state = %d\n", extra);
- }
-}
-
-
-void Code::Disassemble(const char* name, FILE* out) {
- FPrintF(out, "kind = %s\n", Kind2String(kind()));
- if (is_inline_cache_stub()) {
- FPrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
- PrintExtraICState(out, kind(), extra_ic_state());
- if (ic_state() == MONOMORPHIC) {
- FPrintF(out, "type = %s\n", StubType2String(type()));
- }
- if (is_call_stub() || is_keyed_call_stub()) {
- FPrintF(out, "argc = %d\n", arguments_count());
- }
- if (is_compare_ic_stub()) {
- ASSERT(major_key() == CodeStub::CompareIC);
- CompareIC::State left_state, right_state, handler_state;
- Token::Value op;
- ICCompareStub::DecodeMinorKey(stub_info(), &left_state, &right_state,
- &handler_state, &op);
- FPrintF(out, "compare_state = %s*%s -> %s\n",
- CompareIC::GetStateName(left_state),
- CompareIC::GetStateName(right_state),
- CompareIC::GetStateName(handler_state));
- FPrintF(out, "compare_operation = %s\n", Token::Name(op));
- }
- }
- if ((name != NULL) && (name[0] != '\0')) {
- FPrintF(out, "name = %s\n", name);
- }
- if (kind() == OPTIMIZED_FUNCTION) {
- FPrintF(out, "stack_slots = %d\n", stack_slots());
- }
-
- FPrintF(out, "Instructions (size = %d)\n", instruction_size());
- Disassembler::Decode(out, this);
- FPrintF(out, "\n");
-
- if (kind() == FUNCTION) {
- DeoptimizationOutputData* data =
- DeoptimizationOutputData::cast(this->deoptimization_data());
- data->DeoptimizationOutputDataPrint(out);
- } else if (kind() == OPTIMIZED_FUNCTION) {
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(this->deoptimization_data());
- data->DeoptimizationInputDataPrint(out);
- }
- PrintF("\n");
-
- if (kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB) {
- SafepointTable table(this);
- FPrintF(out, "Safepoints (size = %u)\n", table.size());
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
- FPrintF(out, "%p %4d ", (instruction_start() + pc_offset), pc_offset);
- table.PrintEntry(i);
- FPrintF(out, " (sp -> fp)");
- SafepointEntry entry = table.GetEntry(i);
- if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
- FPrintF(out, " %6d", entry.deoptimization_index());
- } else {
- FPrintF(out, " <none>");
- }
- if (entry.argument_count() > 0) {
- FPrintF(out, " argc: %d", entry.argument_count());
- }
- FPrintF(out, "\n");
- }
- FPrintF(out, "\n");
- } else if (kind() == FUNCTION) {
- unsigned offset = stack_check_table_offset();
- // If there is no stack check table, the "table start" will at or after
- // (due to alignment) the end of the instruction stream.
- if (static_cast<int>(offset) < instruction_size()) {
- unsigned* address =
- reinterpret_cast<unsigned*>(instruction_start() + offset);
- unsigned length = address[0];
- FPrintF(out, "Stack checks (size = %u)\n", length);
- FPrintF(out, "ast_id pc_offset\n");
- for (unsigned i = 0; i < length; ++i) {
- unsigned index = (2 * i) + 1;
- FPrintF(out, "%6u %9u\n", address[index], address[index + 1]);
- }
- FPrintF(out, "\n");
- }
-#ifdef OBJECT_PRINT
- if (!type_feedback_info()->IsUndefined()) {
- TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(out);
- FPrintF(out, "\n");
- }
-#endif
- }
-
- PrintF("RelocInfo (size = %d)\n", relocation_size());
- for (RelocIterator it(this); !it.done(); it.next()) it.rinfo()->Print(out);
- FPrintF(out, "\n");
-}
-#endif // ENABLE_DISASSEMBLER
-
-
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(
- int capacity,
- int length,
- SetFastElementsCapacitySmiMode smi_mode) {
- Heap* heap = GetHeap();
- // We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
- ASSERT(!map()->is_observed());
-
- // Allocate a new fast elements backing store.
- FixedArray* new_elements;
- MaybeObject* maybe = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe->To(&new_elements)) return maybe;
-
- ElementsKind elements_kind = GetElementsKind();
- ElementsKind new_elements_kind;
- // The resized array has FAST_*_SMI_ELEMENTS if the capacity mode forces it,
- // or if it's allowed and the old elements array contained only SMIs.
- bool has_fast_smi_elements =
- (smi_mode == kForceSmiElements) ||
- ((smi_mode == kAllowSmiElements) && HasFastSmiElements());
- if (has_fast_smi_elements) {
- if (IsHoleyElementsKind(elements_kind)) {
- new_elements_kind = FAST_HOLEY_SMI_ELEMENTS;
- } else {
- new_elements_kind = FAST_SMI_ELEMENTS;
- }
- } else {
- if (IsHoleyElementsKind(elements_kind)) {
- new_elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- new_elements_kind = FAST_ELEMENTS;
- }
- }
- FixedArrayBase* old_elements = elements();
- ElementsAccessor* accessor = ElementsAccessor::ForKind(new_elements_kind);
- MaybeObject* maybe_obj =
- accessor->CopyElements(this, new_elements, elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
-
- if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- Map* new_map = map();
- if (new_elements_kind != elements_kind) {
- MaybeObject* maybe =
- GetElementsTransitionMap(GetIsolate(), new_elements_kind);
- if (!maybe->To(&new_map)) return maybe;
- }
- ValidateElements();
- set_map_and_elements(new_map, new_elements);
- } else {
- FixedArray* parameter_map = FixedArray::cast(old_elements);
- parameter_map->set(1, new_elements);
- }
-
- if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements,
- GetElementsKind(), new_elements);
- }
-
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(length));
- }
- return new_elements;
-}
-
-
-MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
- int capacity,
- int length) {
- Heap* heap = GetHeap();
- // We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
- ASSERT(!map()->is_observed());
-
- FixedArrayBase* elems;
- { MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedDoubleArray(capacity);
- if (!maybe_obj->To(&elems)) return maybe_obj;
- }
-
- ElementsKind elements_kind = GetElementsKind();
- ElementsKind new_elements_kind = elements_kind;
- if (IsHoleyElementsKind(elements_kind)) {
- new_elements_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
- } else {
- new_elements_kind = FAST_DOUBLE_ELEMENTS;
- }
-
- Map* new_map;
- { MaybeObject* maybe_obj =
- GetElementsTransitionMap(heap->isolate(), new_elements_kind);
- if (!maybe_obj->To(&new_map)) return maybe_obj;
- }
-
- FixedArrayBase* old_elements = elements();
- ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS);
- { MaybeObject* maybe_obj =
- accessor->CopyElements(this, elems, elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
- if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- ValidateElements();
- set_map_and_elements(new_map, elems);
- } else {
- FixedArray* parameter_map = FixedArray::cast(old_elements);
- parameter_map->set(1, elems);
- }
-
- if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements,
- GetElementsKind(), elems);
- }
-
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(length));
- }
-
- return this;
-}
-
-
-MaybeObject* JSArray::Initialize(int capacity, int length) {
- ASSERT(capacity >= 0);
- return GetHeap()->AllocateJSArrayStorage(this, length, capacity,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
-}
-
-
-void JSArray::Expand(int required_size) {
- GetIsolate()->factory()->SetElementsCapacityAndLength(
- Handle<JSArray>(this), required_size, required_size);
-}
-
-
-// Returns false if the passed-in index is marked non-configurable,
-// which will cause the ES5 truncation operation to halt, and thus
-// no further old values need be collected.
-static bool GetOldValue(Isolate* isolate,
- Handle<JSObject> object,
- uint32_t index,
- List<Handle<Object> >* old_values,
- List<Handle<String> >* indices) {
- PropertyAttributes attributes = object->GetLocalElementAttribute(index);
- ASSERT(attributes != ABSENT);
- if (attributes == DONT_DELETE) return false;
- old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
- ? Object::GetElement(object, index)
- : Handle<Object>::cast(isolate->factory()->the_hole_value()));
- indices->Add(isolate->factory()->Uint32ToString(index));
- return true;
-}
-
-
-MaybeObject* JSArray::SetElementsLength(Object* len) {
- // We should never end in here with a pixel or external array.
- ASSERT(AllowsSetElementsLength());
- if (!(FLAG_harmony_observation && map()->is_observed()))
- return GetElementsAccessor()->SetLength(this, len);
-
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSArray> self(this);
- List<Handle<String> > indices;
- List<Handle<Object> > old_values;
- Handle<Object> old_length_handle(self->length(), isolate);
- Handle<Object> new_length_handle(len, isolate);
- uint32_t old_length = 0;
- CHECK(old_length_handle->ToArrayIndex(&old_length));
- uint32_t new_length = 0;
- if (!new_length_handle->ToArrayIndex(&new_length))
- return Failure::InternalError();
-
- // Observed arrays should always be in dictionary mode;
- // if they were in fast mode, the below is slower than necessary
- // as it iterates over the array backing store multiple times.
- ASSERT(self->HasDictionaryElements());
- static const PropertyAttributes kNoAttrFilter = NONE;
- int num_elements = self->NumberOfLocalElements(kNoAttrFilter);
- if (num_elements > 0) {
- if (old_length == static_cast<uint32_t>(num_elements)) {
- // Simple case for arrays without holes.
- for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
- if (!GetOldValue(isolate, self, i, &old_values, &indices)) break;
- }
- } else {
- // For sparse arrays, only iterate over existing elements.
- Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
- self->GetLocalElementKeys(*keys, kNoAttrFilter);
- while (num_elements-- > 0) {
- uint32_t index = NumberToUint32(keys->get(num_elements));
- if (index < new_length) break;
- if (!GetOldValue(isolate, self, index, &old_values, &indices)) break;
- }
- }
- }
-
- MaybeObject* result =
- self->GetElementsAccessor()->SetLength(*self, *new_length_handle);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- CHECK(self->length()->ToArrayIndex(&new_length));
- if (old_length != new_length) {
- for (int i = 0; i < indices.length(); ++i) {
- JSObject::EnqueueChangeRecord(
- self, "deleted", indices[i], old_values[i]);
- }
- JSObject::EnqueueChangeRecord(
- self, "updated", isolate->factory()->length_string(),
- old_length_handle);
- }
- return *hresult;
-}
-
-
-Map* Map::GetPrototypeTransition(Object* prototype) {
- FixedArray* cache = GetPrototypeTransitions();
- int number_of_transitions = NumberOfProtoTransitions();
- const int proto_offset =
- kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
- const int map_offset = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
- const int step = kProtoTransitionElementsPerEntry;
- for (int i = 0; i < number_of_transitions; i++) {
- if (cache->get(proto_offset + i * step) == prototype) {
- Object* map = cache->get(map_offset + i * step);
- return Map::cast(map);
- }
- }
- return NULL;
-}
-
-
-MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
- ASSERT(map->IsMap());
- ASSERT(HeapObject::cast(prototype)->map()->IsMap());
- // Don't cache prototype transition if this map is shared.
- if (is_shared() || !FLAG_cache_prototype_transitions) return this;
-
- FixedArray* cache = GetPrototypeTransitions();
-
- const int step = kProtoTransitionElementsPerEntry;
- const int header = kProtoTransitionHeaderSize;
-
- int capacity = (cache->length() - header) / step;
-
- int transitions = NumberOfProtoTransitions() + 1;
-
- if (transitions > capacity) {
- if (capacity > kMaxCachedPrototypeTransitions) return this;
-
- FixedArray* new_cache;
- // Grow array by factor 2 over and above what we need.
- { MaybeObject* maybe_cache =
- GetHeap()->AllocateFixedArray(transitions * 2 * step + header);
- if (!maybe_cache->To(&new_cache)) return maybe_cache;
- }
-
- for (int i = 0; i < capacity * step; i++) {
- new_cache->set(i + header, cache->get(i + header));
- }
- cache = new_cache;
- MaybeObject* set_result = SetPrototypeTransitions(cache);
- if (set_result->IsFailure()) return set_result;
- }
-
- int last = transitions - 1;
-
- cache->set(header + last * step + kProtoTransitionPrototypeOffset, prototype);
- cache->set(header + last * step + kProtoTransitionMapOffset, map);
- SetNumberOfProtoTransitions(transitions);
-
- return cache;
-}
-
-
-void Map::ZapTransitions() {
- TransitionArray* transition_array = transitions();
- MemsetPointer(transition_array->data_start(),
- GetHeap()->the_hole_value(),
- transition_array->length());
-}
-
-
-void Map::ZapPrototypeTransitions() {
- FixedArray* proto_transitions = GetPrototypeTransitions();
- MemsetPointer(proto_transitions->data_start(),
- GetHeap()->the_hole_value(),
- proto_transitions->length());
-}
-
-
-DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
- Recompute(entries);
-}
-
-
-void DependentCode::GroupStartIndexes::Recompute(DependentCode* entries) {
- start_indexes_[0] = 0;
- for (int g = 1; g <= kGroupCount; g++) {
- int count = entries->number_of_entries(static_cast<DependencyGroup>(g - 1));
- start_indexes_[g] = start_indexes_[g - 1] + count;
- }
-}
-
-
-Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<Code> value) {
- GroupStartIndexes starts(*entries);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- int number_of_entries = starts.number_of_entries();
- if (start < end && entries->code_at(end - 1) == *value) {
- // Do not append the code if it is already in the array.
- // It is sufficient to just check only the last element because
- // we process embedded maps of an optimized code in one batch.
- return entries;
- }
- if (entries->length() < kCodesStartIndex + number_of_entries + 1) {
- Factory* factory = entries->GetIsolate()->factory();
- int capacity = kCodesStartIndex + number_of_entries + 1;
- if (capacity > 5) capacity = capacity * 5 / 4;
- Handle<DependentCode> new_entries = Handle<DependentCode>::cast(
- factory->CopySizeFixedArray(entries, capacity));
- // The number of codes can change after GC.
- starts.Recompute(*entries);
- start = starts.at(group);
- end = starts.at(group + 1);
- number_of_entries = starts.number_of_entries();
- for (int i = 0; i < number_of_entries; i++) {
- entries->clear_code_at(i);
- }
- // If the old fixed array was empty, we need to reset counters of the
- // new array.
- if (number_of_entries == 0) {
- for (int g = 0; g < kGroupCount; g++) {
- new_entries->set_number_of_entries(static_cast<DependencyGroup>(g), 0);
- }
- }
- entries = new_entries;
- }
- entries->ExtendGroup(group);
- entries->set_code_at(end, *value);
- entries->set_number_of_entries(group, end + 1 - start);
- return entries;
-}
-
-
-bool DependentCode::Contains(DependencyGroup group, Code* code) {
- GroupStartIndexes starts(this);
- int number_of_entries = starts.at(kGroupCount);
- for (int i = 0; i < number_of_entries; i++) {
- if (code_at(i) == code) return true;
- }
- return false;
-}
-
-
-class DeoptimizeDependentCodeFilter : public OptimizedFunctionFilter {
- public:
- virtual bool TakeFunction(JSFunction* function) {
- return function->code()->marked_for_deoptimization();
- }
-};
-
-
-void DependentCode::DeoptimizeDependentCodeGroup(
- DependentCode::DependencyGroup group) {
- AssertNoAllocation no_allocation_scope;
- DependentCode::GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- int number_of_entries = starts.at(DependentCode::kGroupCount);
- if (start == end) return;
- for (int i = start; i < end; i++) {
- Code* code = code_at(i);
- code->set_marked_for_deoptimization(true);
- }
- // Compact the array by moving all subsequent groups to fill in the new holes.
- for (int src = end, dst = start; src < number_of_entries; src++, dst++) {
- set_code_at(dst, code_at(src));
- }
- // Now the holes are at the end of the array, zap them for heap-verifier.
- int removed = end - start;
- for (int i = number_of_entries - removed; i < number_of_entries; i++) {
- clear_code_at(i);
- }
- set_number_of_entries(group, 0);
- DeoptimizeDependentCodeFilter filter;
- Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
-}
-
-
-MaybeObject* JSReceiver::SetPrototype(Object* value,
- bool skip_hidden_prototypes) {
-#ifdef DEBUG
- int size = Size();
-#endif
-
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
- // Silently ignore the change if value is not a JSObject or null.
- // SpiderMonkey behaves this way.
- if (!value->IsJSReceiver() && !value->IsNull()) return value;
-
- // From 8.6.2 Object Internal Methods
- // ...
- // In addition, if [[Extensible]] is false the value of the [[Class]] and
- // [[Prototype]] internal properties of the object may not be modified.
- // ...
- // Implementation specific extensions that modify [[Class]], [[Prototype]]
- // or [[Extensible]] must not violate the invariants defined in the preceding
- // paragraph.
- if (!this->map()->is_extensible()) {
- HandleScope scope(isolate);
- Handle<Object> handle(this, isolate);
- return isolate->Throw(
- *isolate->factory()->NewTypeError("non_extensible_proto",
- HandleVector<Object>(&handle, 1)));
- }
-
- // Before we can set the prototype we need to be sure
- // prototype cycles are prevented.
- // It is sufficient to validate that the receiver is not in the new prototype
- // chain.
- for (Object* pt = value;
- pt != heap->null_value();
- pt = pt->GetPrototype(isolate)) {
- if (JSReceiver::cast(pt) == this) {
- // Cycle detected.
- HandleScope scope(isolate);
- return isolate->Throw(
- *FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0)));
- }
- }
-
- JSReceiver* real_receiver = this;
-
- if (skip_hidden_prototypes) {
- // Find the first object in the chain whose prototype object is not
- // hidden and set the new prototype on that object.
- Object* current_proto = real_receiver->GetPrototype();
- while (current_proto->IsJSObject() &&
- JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
- real_receiver = JSReceiver::cast(current_proto);
- current_proto = current_proto->GetPrototype(isolate);
- }
- }
-
- // Set the new prototype of the object.
- Map* map = real_receiver->map();
-
- // Nothing to do if prototype is already set.
- if (map->prototype() == value) return value;
-
- if (value->IsJSObject()) {
- MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
- if (ok->IsFailure()) return ok;
- }
-
- Map* new_map = map->GetPrototypeTransition(value);
- if (new_map == NULL) {
- MaybeObject* maybe_new_map = map->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- MaybeObject* maybe_new_cache =
- map->PutPrototypeTransition(value, new_map);
- if (maybe_new_cache->IsFailure()) return maybe_new_cache;
-
- new_map->set_prototype(value);
- }
- ASSERT(new_map->prototype() == value);
- real_receiver->set_map(new_map);
-
- heap->ClearInstanceofCache();
- ASSERT(size == Size());
- return value;
-}
-
-
-MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
- uint32_t first_arg,
- uint32_t arg_count,
- EnsureElementsMode mode) {
- // Elements in |Arguments| are ordered backwards (because they're on the
- // stack), but the method that's called here iterates over them in forward
- // direction.
- return EnsureCanContainElements(
- args->arguments() - first_arg - (arg_count - 1),
- arg_count, mode);
-}
-
-
-PropertyType JSObject::GetLocalPropertyType(String* name) {
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- return GetLocalElementType(index);
- }
- LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup, true);
- return lookup.type();
-}
-
-
-PropertyType JSObject::GetLocalElementType(uint32_t index) {
- return GetElementsAccessor()->GetType(this, this, index);
-}
-
-
-AccessorPair* JSObject::GetLocalPropertyAccessorPair(String* name) {
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- return GetLocalElementAccessorPair(index);
- }
-
- LookupResult lookup(GetIsolate());
- LocalLookupRealNamedProperty(name, &lookup);
-
- if (lookup.IsPropertyCallbacks() &&
- lookup.GetCallbackObject()->IsAccessorPair()) {
- return AccessorPair::cast(lookup.GetCallbackObject());
- }
- return NULL;
-}
-
-
-AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) {
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return NULL;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetLocalElementAccessorPair(index);
- }
-
- // Check for lookup interceptor.
- if (HasIndexedInterceptor()) return NULL;
-
- return GetElementsAccessor()->GetAccessorPair(this, this, index);
-}
-
-
-MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- Isolate* isolate = GetIsolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSObject> this_handle(this);
- Handle<Object> value_handle(value, isolate);
- if (!interceptor->setter()->IsUndefined()) {
- v8::IndexedPropertySetter setter =
- v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = setter(index, v8::Utils::ToLocal(value_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
- }
- MaybeObject* raw_result =
- this_handle->SetElementWithoutInterceptor(index,
- *value_handle,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
- Object* structure,
- uint32_t index,
- Object* holder) {
- Isolate* isolate = GetIsolate();
- ASSERT(!structure->IsForeign());
-
- // api style callbacks.
- if (structure->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> data(
- ExecutableAccessorInfo::cast(structure));
- Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
- if (call_fun == NULL) return isolate->heap()->undefined_value();
- HandleScope scope(isolate);
- Handle<JSObject> self(JSObject::cast(receiver));
- Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<String> key = isolate->factory()->NumberToString(number);
- LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
- CustomArguments args(isolate, data->data(), *self, *holder_handle);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = call_fun(v8::Utils::ToLocal(key), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.IsEmpty()) return isolate->heap()->undefined_value();
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- return *result_internal;
- }
-
- // __defineGetter__ callback
- if (structure->IsAccessorPair()) {
- Object* getter = AccessorPair::cast(structure)->getter();
- if (getter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
- }
- // Getter is not a function.
- return isolate->heap()->undefined_value();
- }
-
- if (structure->IsDeclaredAccessorInfo()) {
- // TODO(dcarney): Handle correctly.
- return isolate->heap()->undefined_value();
- }
-
- UNREACHABLE();
- return NULL;
-}
-
-
-MaybeObject* JSObject::SetElementWithCallback(Object* structure,
- uint32_t index,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
-
- // We should never get here to initialize a const with the hole
- // value since a const declaration would conflict with the setter.
- ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
-
- // To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually foreign
- // callbacks should be phased out.
- ASSERT(!structure->IsForeign());
-
- if (structure->IsExecutableAccessorInfo()) {
- // api style callbacks
- Handle<JSObject> self(this);
- Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<ExecutableAccessorInfo> data(
- ExecutableAccessorInfo::cast(structure));
- Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
- if (call_fun == NULL) return value;
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<String> key(isolate->factory()->NumberToString(number));
- LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
- CustomArguments args(isolate, data->data(), *self, *holder_handle);
- v8::AccessorInfo info(args.end());
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- call_fun(v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle),
- info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
- }
-
- if (structure->IsAccessorPair()) {
- Handle<Object> setter(AccessorPair::cast(structure)->setter(), isolate);
- if (setter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
- } else {
- if (strict_mode == kNonStrictMode) {
- return value;
- }
- Handle<Object> holder_handle(holder, isolate);
- Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
- }
- }
-
- // TODO(dcarney): Handle correctly.
- if (structure->IsDeclaredAccessorInfo()) return value;
-
- UNREACHABLE();
- return NULL;
-}
-
-
-bool JSObject::HasFastArgumentsElements() {
- Heap* heap = GetHeap();
- if (!elements()->IsFixedArray()) return false;
- FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->non_strict_arguments_elements_map()) {
- return false;
- }
- FixedArray* arguments = FixedArray::cast(elements->get(1));
- return !arguments->IsDictionary();
-}
-
-
-bool JSObject::HasDictionaryArgumentsElements() {
- Heap* heap = GetHeap();
- if (!elements()->IsFixedArray()) return false;
- FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->non_strict_arguments_elements_map()) {
- return false;
- }
- FixedArray* arguments = FixedArray::cast(elements->get(1));
- return arguments->IsDictionary();
-}
-
-
-// Adding n elements in fast case is O(n*n).
-// Note: revisit design to have dual undefined values to capture absent
-// elements.
-MaybeObject* JSObject::SetFastElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype) {
- ASSERT(HasFastSmiOrObjectElements() ||
- HasFastArgumentsElements());
-
- FixedArray* backing_store = FixedArray::cast(elements());
- if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
- backing_store = FixedArray::cast(backing_store->get(1));
- } else {
- MaybeObject* maybe = EnsureWritableFastElements();
- if (!maybe->To(&backing_store)) return maybe;
- }
- uint32_t capacity = static_cast<uint32_t>(backing_store->length());
-
- if (check_prototype &&
- (index >= capacity || backing_store->get(index)->IsTheHole())) {
- bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
- value,
- &found,
- strict_mode);
- if (found) return result;
- }
-
- uint32_t new_capacity = capacity;
- // Check if the length property of this object needs to be updated.
- uint32_t array_length = 0;
- bool must_update_array_length = false;
- bool introduces_holes = true;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
- introduces_holes = index > array_length;
- if (index >= array_length) {
- must_update_array_length = true;
- array_length = index + 1;
- }
- } else {
- introduces_holes = index >= capacity;
- }
-
- // If the array is growing, and it's not growth by a single element at the
- // end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = GetElementsKind();
- if (introduces_holes &&
- IsFastElementsKind(elements_kind) &&
- !IsFastHoleyElementsKind(elements_kind)) {
- ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
- if (maybe->IsFailure()) return maybe;
- }
-
- // Check if the capacity of the backing store needs to be increased, or if
- // a transition to slow elements is necessary.
- if (index >= capacity) {
- bool convert_to_slow = true;
- if ((index - capacity) < kMaxGap) {
- new_capacity = NewElementsCapacity(index + 1);
- ASSERT(new_capacity > index);
- if (!ShouldConvertToSlowElements(new_capacity)) {
- convert_to_slow = false;
- }
- }
- if (convert_to_slow) {
- MaybeObject* result = NormalizeElements();
- if (result->IsFailure()) return result;
- return SetDictionaryElement(index, value, NONE, strict_mode,
- check_prototype);
- }
- }
- // Convert to fast double elements if appropriate.
- if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
- // Consider fixing the boilerplate as well if we have one.
- ElementsKind to_kind = IsHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
-
- MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- MaybeObject* maybe =
- SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
- if (maybe->IsFailure()) return maybe;
- FixedDoubleArray::cast(elements())->set(index, value->Number());
- ValidateElements();
- return value;
- }
- // Change elements kind from Smi-only to generic FAST if necessary.
- if (HasFastSmiElements() && !value->IsSmi()) {
- Map* new_map;
- ElementsKind kind = HasFastHoleyElements()
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
-
- MaybeObject* maybe_failure = UpdateAllocationSiteInfo(kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
- kind);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- set_map(new_map);
- }
- // Increase backing store capacity if that's been decided previously.
- if (new_capacity != capacity) {
- FixedArray* new_elements;
- SetFastElementsCapacitySmiMode smi_mode =
- value->IsSmi() && HasFastSmiElements()
- ? kAllowSmiElements
- : kDontAllowSmiElements;
- { MaybeObject* maybe =
- SetFastElementsCapacityAndLength(new_capacity,
- array_length,
- smi_mode);
- if (!maybe->To(&new_elements)) return maybe;
- }
- new_elements->set(index, value);
- ValidateElements();
- return value;
- }
-
- // Finally, set the new element and length.
- ASSERT(elements()->IsFixedArray());
- backing_store->set(index, value);
- if (must_update_array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(array_length));
- }
- return value;
-}
-
-
-MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw, isolate);
-
- // Insert element in the dictionary.
- Handle<FixedArray> elements(FixedArray::cast(this->elements()));
- bool is_arguments =
- (elements->map() == heap->non_strict_arguments_elements_map());
- Handle<SeededNumberDictionary> dictionary(is_arguments
- ? SeededNumberDictionary::cast(elements->get(1))
- : SeededNumberDictionary::cast(*elements));
-
- int entry = dictionary->FindEntry(index);
- if (entry != SeededNumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
- return SetElementWithCallback(element, index, *value, this, strict_mode);
- } else {
- dictionary->UpdateMaxNumberKey(index);
- // If a value has not been initialized we allow writing to it even if it
- // is read-only (a declared const that has not been initialized). If a
- // value is being defined we skip attribute checks completely.
- if (set_mode == DEFINE_PROPERTY) {
- details = PropertyDetails(
- attributes, NORMAL, details.dictionary_index());
- dictionary->DetailsAtPut(entry, details);
- } else if (details.IsReadOnly() && !element->IsTheHole()) {
- if (strict_mode == kNonStrictMode) {
- return isolate->heap()->undefined_value();
- } else {
- Handle<Object> holder(this, isolate);
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { number, holder };
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_read_only_property",
- HandleVector(args, 2));
- return isolate->Throw(*error);
- }
- }
- // Elements of the arguments object in slow mode might be slow aliases.
- if (is_arguments && element->IsAliasedArgumentsEntry()) {
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element);
- Context* context = Context::cast(elements->get(0));
- int context_index = entry->aliased_context_slot();
- ASSERT(!context->get(context_index)->IsTheHole());
- context->set(context_index, *value);
- // For elements that are still writable we keep slow aliasing.
- if (!details.IsReadOnly()) value = handle(element, isolate);
- }
- dictionary->ValueAtPut(entry, *value);
- }
- } else {
- // Index not already used. Look for an accessor in the prototype chain.
- // Can cause GC!
- if (check_prototype) {
- bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(
- index, *value, &found, strict_mode);
- if (found) return result;
- }
- // When we set the is_extensible flag to false we always force the
- // element into dictionary mode (and force them to stay there).
- if (!self->map()->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
- return isolate->heap()->undefined_value();
- } else {
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<String> name = isolate->factory()->NumberToString(number);
- Handle<Object> args[1] = { name };
- Handle<Object> error =
- isolate->factory()->NewTypeError("object_not_extensible",
- HandleVector(args, 1));
- return isolate->Throw(*error);
- }
- }
- FixedArrayBase* new_dictionary;
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
- MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
- if (!maybe->To(&new_dictionary)) return maybe;
- if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
- if (is_arguments) {
- elements->set(1, new_dictionary);
- } else {
- self->set_elements(new_dictionary);
- }
- dictionary =
- handle(SeededNumberDictionary::cast(new_dictionary), isolate);
- }
- }
-
- // Update the array length if this JSObject is an array.
- if (self->IsJSArray()) {
- MaybeObject* result =
- JSArray::cast(*self)->JSArrayUpdateLengthFromIndex(index, *value);
- if (result->IsFailure()) return result;
- }
-
- // Attempt to put this object back in fast case.
- if (self->ShouldConvertToFastElements()) {
- uint32_t new_length = 0;
- if (self->IsJSArray()) {
- CHECK(JSArray::cast(*self)->length()->ToArrayIndex(&new_length));
- } else {
- new_length = dictionary->max_number_key() + 1;
- }
- SetFastElementsCapacitySmiMode smi_mode = FLAG_smi_only_arrays
- ? kAllowSmiElements
- : kDontAllowSmiElements;
- bool has_smi_only_elements = false;
- bool should_convert_to_fast_double_elements =
- self->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
- if (has_smi_only_elements) {
- smi_mode = kForceSmiElements;
- }
- MaybeObject* result = should_convert_to_fast_double_elements
- ? self->SetFastDoubleElementsCapacityAndLength(new_length, new_length)
- : self->SetFastElementsCapacityAndLength(
- new_length, new_length, smi_mode);
- self->ValidateElements();
- if (result->IsFailure()) return result;
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- PrintF("Object elements are fast case again:\n");
- Print();
- }
-#endif
- }
- return *value;
-}
-
-
-MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype) {
- ASSERT(HasFastDoubleElements());
-
- FixedArrayBase* base_elms = FixedArrayBase::cast(elements());
- uint32_t elms_length = static_cast<uint32_t>(base_elms->length());
-
- // If storing to an element that isn't in the array, pass the store request
- // up the prototype chain before storing in the receiver's elements.
- if (check_prototype &&
- (index >= elms_length ||
- FixedDoubleArray::cast(base_elms)->is_the_hole(index))) {
- bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
- value,
- &found,
- strict_mode);
- if (found) return result;
- }
-
- // If the value object is not a heap number, switch to fast elements and try
- // again.
- bool value_is_smi = value->IsSmi();
- bool introduces_holes = true;
- uint32_t length = elms_length;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
- introduces_holes = index > length;
- } else {
- introduces_holes = index >= elms_length;
- }
-
- if (!value->IsNumber()) {
- MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
- elms_length,
- length,
- kDontAllowSmiElements);
- if (maybe_obj->IsFailure()) return maybe_obj;
- maybe_obj = SetFastElement(index, value, strict_mode, check_prototype);
- if (maybe_obj->IsFailure()) return maybe_obj;
- ValidateElements();
- return maybe_obj;
- }
-
- double double_value = value_is_smi
- ? static_cast<double>(Smi::cast(value)->value())
- : HeapNumber::cast(value)->value();
-
- // If the array is growing, and it's not growth by a single element at the
- // end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = GetElementsKind();
- if (introduces_holes && !IsFastHoleyElementsKind(elements_kind)) {
- ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
- if (maybe->IsFailure()) return maybe;
- }
-
- // Check whether there is extra space in the fixed array.
- if (index < elms_length) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
- elms->set(index, double_value);
- if (IsJSArray()) {
- // Update the length of the array if needed.
- uint32_t array_length = 0;
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
- if (index >= array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
- }
- }
- return value;
- }
-
- // Allow gap in fast case.
- if ((index - elms_length) < kMaxGap) {
- // Try allocating extra space.
- int new_capacity = NewElementsCapacity(index+1);
- if (!ShouldConvertToSlowElements(new_capacity)) {
- ASSERT(static_cast<uint32_t>(new_capacity) > index);
- MaybeObject* maybe_obj =
- SetFastDoubleElementsCapacityAndLength(new_capacity, index + 1);
- if (maybe_obj->IsFailure()) return maybe_obj;
- FixedDoubleArray::cast(elements())->set(index, double_value);
- ValidateElements();
- return value;
- }
- }
-
- // Otherwise default to slow case.
- ASSERT(HasFastDoubleElements());
- ASSERT(map()->has_fast_double_elements());
- ASSERT(elements()->IsFixedDoubleArray());
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ASSERT(HasDictionaryElements());
- return SetElement(index, value, NONE, strict_mode, check_prototype);
-}
-
-
-MaybeObject* JSReceiver::SetElement(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_proto) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->SetElementWithHandler(
- this, index, value, strict_mode);
- } else {
- return JSObject::cast(this)->SetElement(
- index, value, attributes, strict_mode, check_proto);
- }
-}
-
-
-Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode) {
- ASSERT(!object->HasExternalArrayElements());
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetElement(index, *value, NONE, strict_mode, false),
- Object);
-}
-
-
-Handle<Object> JSObject::SetElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode,
- SetPropertyMode set_mode) {
- if (object->HasExternalArrayElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
- bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
- if (has_exception) return Handle<Object>();
- value = number;
- }
- }
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetElement(index, *value, attr, strict_mode, true, set_mode),
- Object);
-}
-
-
-MaybeObject* JSObject::SetElement(uint32_t index,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- Isolate* isolate = GetIsolate();
-
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return value_raw;
- }
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetElement(index,
- value_raw,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
- }
-
- // Don't allow element properties to be redefined for external arrays.
- if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { handle(this, isolate), number };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
-
- // Normalize the elements to enable attributes on the property.
- if ((attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) {
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_object = NormalizeElements();
- if (!maybe_object->To(&dictionary)) return maybe_object;
- // Make sure that we never go back to fast case.
- dictionary->set_requires_slow_elements();
- }
-
- if (!(FLAG_harmony_observation && map()->is_observed())) {
- return HasIndexedInterceptor()
- ? SetElementWithInterceptor(
- index, value_raw, attributes, strict_mode, check_prototype, set_mode)
- : SetElementWithoutInterceptor(
- index, value_raw, attributes, strict_mode, check_prototype, set_mode);
- }
-
- // From here on, everything has to be handlified.
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw, isolate);
- PropertyAttributes old_attributes = self->GetLocalElementAttribute(index);
- Handle<Object> old_value = isolate->factory()->the_hole_value();
- Handle<Object> old_length;
-
- if (old_attributes != ABSENT) {
- if (self->GetLocalElementAccessorPair(index) == NULL)
- old_value = Object::GetElement(self, index);
- } else if (self->IsJSArray()) {
- // Store old array length in case adding an element grows the array.
- old_length = handle(Handle<JSArray>::cast(self)->length(), isolate);
- }
-
- // Check for lookup interceptor
- MaybeObject* result = self->HasIndexedInterceptor()
- ? self->SetElementWithInterceptor(
- index, *value, attributes, strict_mode, check_prototype, set_mode)
- : self->SetElementWithoutInterceptor(
- index, *value, attributes, strict_mode, check_prototype, set_mode);
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- PropertyAttributes new_attributes = self->GetLocalElementAttribute(index);
- if (old_attributes == ABSENT) {
- EnqueueChangeRecord(self, "new", name, old_value);
- if (self->IsJSArray() &&
- !old_length->SameValue(Handle<JSArray>::cast(self)->length())) {
- EnqueueChangeRecord(
- self, "updated", isolate->factory()->length_string(), old_length);
- }
- } else if (old_value->IsTheHole()) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
- } else {
- bool value_changed =
- !old_value->SameValue(*Object::GetElement(self, index));
- if (old_attributes != new_attributes) {
- if (!value_changed) old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
- } else if (value_changed) {
- EnqueueChangeRecord(self, "updated", name, old_value);
- }
- }
-
- return *hresult;
-}
-
-
-MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
- Object* value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- ASSERT(HasDictionaryElements() ||
- HasDictionaryArgumentsElements() ||
- (attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
- Isolate* isolate = GetIsolate();
- if (FLAG_trace_external_array_abuse &&
- IsExternalArrayElementsKind(GetElementsKind())) {
- CheckArrayAbuse(this, "external elements write", index);
- }
- if (FLAG_trace_js_array_abuse &&
- !IsExternalArrayElementsKind(GetElementsKind())) {
- if (IsJSArray()) {
- CheckArrayAbuse(this, "elements write", index, true);
- }
- }
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- return SetFastElement(index, value, strict_mode, check_prototype);
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return SetFastDoubleElement(index, value, strict_mode, check_prototype);
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- return pixels->SetValue(index, value);
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- ExternalByteArray* array = ExternalByteArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- ExternalUnsignedByteArray* array =
- ExternalUnsignedByteArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- ExternalShortArray* array = ExternalShortArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- ExternalUnsignedShortArray* array =
- ExternalUnsignedShortArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_INT_ELEMENTS: {
- ExternalIntArray* array = ExternalIntArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- ExternalUnsignedIntArray* array =
- ExternalUnsignedIntArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalFloatArray* array = ExternalFloatArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalDoubleArray* array = ExternalDoubleArray::cast(elements());
- return array->SetValue(index, value);
- }
- case DICTIONARY_ELEMENTS:
- return SetDictionaryElement(index, value, attr, strict_mode,
- check_prototype, set_mode);
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
- FixedArray* parameter_map = FixedArray::cast(elements());
- uint32_t length = parameter_map->length();
- Object* probe =
- (index < length - 2) ? parameter_map->get(index + 2) : NULL;
- if (probe != NULL && !probe->IsTheHole()) {
- Context* context = Context::cast(parameter_map->get(0));
- int context_index = Smi::cast(probe)->value();
- ASSERT(!context->get(context_index)->IsTheHole());
- context->set(context_index, value);
- // Redefining attributes of an aliased element destroys fast aliasing.
- if (set_mode == SET_PROPERTY || attr == NONE) return value;
- parameter_map->set_the_hole(index + 2);
- // For elements that are still writable we re-establish slow aliasing.
- if ((attr & READ_ONLY) == 0) {
- MaybeObject* maybe_entry =
- isolate->heap()->AllocateAliasedArgumentsEntry(context_index);
- if (!maybe_entry->ToObject(&value)) return maybe_entry;
- }
- }
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- if (arguments->IsDictionary()) {
- return SetDictionaryElement(index, value, attr, strict_mode,
- check_prototype, set_mode);
- } else {
- return SetFastElement(index, value, strict_mode, check_prototype);
- }
- }
- }
- // All possible cases have been handled above. Add a return to avoid the
- // complaints from the compiler.
- UNREACHABLE();
- return isolate->heap()->null_value();
-}
-
-
-Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->TransitionElementsKind(to_kind),
- Object);
-}
-
-
-MaybeObject* JSObject::UpdateAllocationSiteInfo(ElementsKind to_kind) {
- if (!FLAG_track_allocation_sites || !IsJSArray()) {
- return this;
- }
-
- AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(this);
- if (info == NULL) {
- return this;
- }
-
- if (info->payload()->IsJSArray()) {
- JSArray* payload = JSArray::cast(info->payload());
- ElementsKind kind = payload->GetElementsKind();
- if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
- // If the array is huge, it's not likely to be defined in a local
- // function, so we shouldn't make new instances of it very often.
- uint32_t length = 0;
- CHECK(payload->length()->ToArrayIndex(&length));
- if (length <= AllocationSiteInfo::kMaximumArrayBytesToPretransition) {
- if (FLAG_trace_track_allocation_sites) {
- PrintF(
- "AllocationSiteInfo: JSArray %p boilerplate updated %s->%s\n",
- reinterpret_cast<void*>(this),
- ElementsKindToString(kind),
- ElementsKindToString(to_kind));
- }
- return payload->TransitionElementsKind(to_kind);
- }
- }
- } else if (info->payload()->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(info->payload());
- Object* cell_contents = cell->value();
- if (cell_contents->IsSmi()) {
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(cell_contents)->value());
- if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
- if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSiteInfo: JSArray %p info updated %s->%s\n",
- reinterpret_cast<void*>(this),
- ElementsKindToString(kind),
- ElementsKindToString(to_kind));
- }
- cell->set_value(Smi::FromInt(to_kind));
- }
- }
- }
- return this;
-}
-
-
-MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
- ASSERT(!map()->is_observed());
- ElementsKind from_kind = map()->elements_kind();
-
- if (IsFastHoleyElementsKind(from_kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- }
-
- if (from_kind == to_kind) return this;
-
- MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- Isolate* isolate = GetIsolate();
- if (elements() == isolate->heap()->empty_fixed_array() ||
- (IsFastSmiOrObjectElementsKind(from_kind) &&
- IsFastSmiOrObjectElementsKind(to_kind)) ||
- (from_kind == FAST_DOUBLE_ELEMENTS &&
- to_kind == FAST_HOLEY_DOUBLE_ELEMENTS)) {
- ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
- // No change is needed to the elements() buffer, the transition
- // only requires a map change.
- MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
- Map* new_map;
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- set_map(new_map);
- if (FLAG_trace_elements_transitions) {
- FixedArrayBase* elms = FixedArrayBase::cast(elements());
- PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
- }
- return this;
- }
-
- FixedArrayBase* elms = FixedArrayBase::cast(elements());
- uint32_t capacity = static_cast<uint32_t>(elms->length());
- uint32_t length = capacity;
-
- if (IsJSArray()) {
- Object* raw_length = JSArray::cast(this)->length();
- if (raw_length->IsUndefined()) {
- // If length is undefined, then JSArray is being initialized and has no
- // elements, assume a length of zero.
- length = 0;
- } else {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
- }
- }
-
- if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- MaybeObject* maybe_result =
- SetFastDoubleElementsCapacityAndLength(capacity, length);
- if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
- return this;
- }
-
- if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
- capacity, length, kDontAllowSmiElements);
- if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
- return this;
- }
-
- // This method should never be called for any other case than the ones
- // handled above.
- UNREACHABLE();
- return GetIsolate()->heap()->null_value();
-}
-
-
-// static
-bool Map::IsValidElementsTransition(ElementsKind from_kind,
- ElementsKind to_kind) {
- // Transitions can't go backwards.
- if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
- return false;
- }
-
- // Transitions from HOLEY -> PACKED are not allowed.
- return !IsFastHoleyElementsKind(from_kind) ||
- IsFastHoleyElementsKind(to_kind);
-}
-
-
-MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
- Object* value) {
- uint32_t old_len = 0;
- CHECK(length()->ToArrayIndex(&old_len));
- // Check to see if we need to update the length. For now, we make
- // sure that the length stays within 32-bits (unsigned).
- if (index >= old_len && index != 0xffffffff) {
- Object* len;
- { MaybeObject* maybe_len =
- GetHeap()->NumberFromDouble(static_cast<double>(index) + 1);
- if (!maybe_len->ToObject(&len)) return maybe_len;
- }
- set_length(len);
- }
- return value;
-}
-
-
-MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
- uint32_t index) {
- Isolate* isolate = GetIsolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
- Handle<Object> this_handle(receiver, isolate);
- Handle<JSObject> holder_handle(this, isolate);
- if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(index, info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) {
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- return *result_internal;
- }
- }
-
- Heap* heap = holder_handle->GetHeap();
- ElementsAccessor* handler = holder_handle->GetElementsAccessor();
- MaybeObject* raw_result = handler->Get(*this_handle,
- *holder_handle,
- index);
- if (raw_result != heap->the_hole_value()) return raw_result;
-
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
-
- Object* pt = holder_handle->GetPrototype();
- if (pt == heap->null_value()) return heap->undefined_value();
- return pt->GetElementWithReceiver(*this_handle, index);
-}
-
-
-bool JSObject::HasDenseElements() {
- int capacity = 0;
- int used = 0;
- GetElementsCapacityAndUsage(&capacity, &used);
- return (capacity == 0) || (used > (capacity / 2));
-}
-
-
-void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
- *capacity = 0;
- *used = 0;
-
- FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
- FixedArray* backing_store = NULL;
- switch (GetElementsKind()) {
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- backing_store_base =
- FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
- backing_store = FixedArray::cast(backing_store_base);
- if (backing_store->IsDictionary()) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(backing_store);
- *capacity = dictionary->Capacity();
- *used = dictionary->NumberOfElements();
- break;
- }
- // Fall through.
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- if (IsJSArray()) {
- *capacity = backing_store_base->length();
- *used = Smi::cast(JSArray::cast(this)->length())->value();
- break;
- }
- // Fall through if packing is not guaranteed.
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- backing_store = FixedArray::cast(backing_store_base);
- *capacity = backing_store->length();
- for (int i = 0; i < *capacity; ++i) {
- if (!backing_store->get(i)->IsTheHole()) ++(*used);
- }
- break;
- case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(FixedArray::cast(elements()));
- *capacity = dictionary->Capacity();
- *used = dictionary->NumberOfElements();
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- if (IsJSArray()) {
- *capacity = backing_store_base->length();
- *used = Smi::cast(JSArray::cast(this)->length())->value();
- break;
- }
- // Fall through if packing is not guaranteed.
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
- *capacity = elms->length();
- for (int i = 0; i < *capacity; i++) {
- if (!elms->is_the_hole(i)) ++(*used);
- }
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- // External arrays are considered 100% used.
- ExternalArray* external_array = ExternalArray::cast(elements());
- *capacity = external_array->length();
- *used = external_array->length();
- break;
- }
-}
-
-
-bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
- STATIC_ASSERT(kMaxUncheckedOldFastElementsLength <=
- kMaxUncheckedFastElementsLength);
- if (new_capacity <= kMaxUncheckedOldFastElementsLength ||
- (new_capacity <= kMaxUncheckedFastElementsLength &&
- GetHeap()->InNewSpace(this))) {
- return false;
- }
- // If the fast-case backing storage takes up roughly three times as
- // much space (in machine words) as a dictionary backing storage
- // would, the object should have slow elements.
- int old_capacity = 0;
- int used_elements = 0;
- GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- int dictionary_size = SeededNumberDictionary::ComputeCapacity(used_elements) *
- SeededNumberDictionary::kEntrySize;
- return 3 * dictionary_size <= new_capacity;
-}
-
-
-bool JSObject::ShouldConvertToFastElements() {
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
- // If the elements are sparse, we should not go back to fast case.
- if (!HasDenseElements()) return false;
- // An object requiring access checks is never allowed to have fast
- // elements. If it had fast elements we would skip security checks.
- if (IsAccessCheckNeeded()) return false;
- // Observed objects may not go to fast mode because they rely on map checks,
- // and for fast element accesses we sometimes check element kinds only.
- if (FLAG_harmony_observation && map()->is_observed()) return false;
-
- FixedArray* elements = FixedArray::cast(this->elements());
- SeededNumberDictionary* dictionary = NULL;
- if (elements->map() == GetHeap()->non_strict_arguments_elements_map()) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
- // If an element has been added at a very high index in the elements
- // dictionary, we cannot go back to fast case.
- if (dictionary->requires_slow_elements()) return false;
- // If the dictionary backing storage takes up roughly half as much
- // space (in machine words) as a fast-case backing storage would,
- // the object should have fast elements.
- uint32_t array_size = 0;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_size));
- } else {
- array_size = dictionary->max_number_key();
- }
- uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
- SeededNumberDictionary::kEntrySize;
- return 2 * dictionary_size >= array_size;
-}
-
-
-bool JSObject::ShouldConvertToFastDoubleElements(
- bool* has_smi_only_elements) {
- *has_smi_only_elements = false;
- if (FLAG_unbox_double_arrays) {
- ASSERT(HasDictionaryElements());
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(elements());
- bool found_double = false;
- for (int i = 0; i < dictionary->Capacity(); i++) {
- Object* key = dictionary->KeyAt(i);
- if (key->IsNumber()) {
- Object* value = dictionary->ValueAt(i);
- if (!value->IsNumber()) return false;
- if (!value->IsSmi()) {
- found_double = true;
- }
- }
- }
- *has_smi_only_elements = !found_double;
- return found_double;
- } else {
- return false;
- }
-}
-
-
-// Certain compilers request function template instantiation when they
-// see the definition of the other template functions in the
-// class. This requires us to have the template functions put
-// together, so even though this function belongs in objects-debug.cc,
-// we keep it here instead to satisfy certain compilers.
-#ifdef OBJECT_PRINT
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::Print(FILE* out) {
- int capacity = HashTable<Shape, Key>::Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
- FPrintF(out, " ");
- if (k->IsString()) {
- String::cast(k)->StringPrint(out);
- } else {
- k->ShortPrint(out);
- }
- FPrintF(out, ": ");
- ValueAt(i)->ShortPrint(out);
- FPrintF(out, "\n");
- }
- }
-}
-#endif
-
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
- int pos = 0;
- int capacity = HashTable<Shape, Key>::Capacity();
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < capacity; i++) {
- Object* k = Dictionary<Shape, Key>::KeyAt(i);
- if (Dictionary<Shape, Key>::IsKey(k)) {
- elements->set(pos++, ValueAt(i), mode);
- }
- }
- ASSERT(pos == elements->length());
-}
-
-
-InterceptorInfo* JSObject::GetNamedInterceptor() {
- ASSERT(map()->has_named_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->constructor());
- ASSERT(constructor->shared()->IsApiFunction());
- Object* result =
- constructor->shared()->get_api_func_data()->named_property_handler();
- return InterceptorInfo::cast(result);
-}
-
-
-InterceptorInfo* JSObject::GetIndexedInterceptor() {
- ASSERT(map()->has_indexed_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->constructor());
- ASSERT(constructor->shared()->IsApiFunction());
- Object* result =
- constructor->shared()->get_api_func_data()->indexed_property_handler();
- return InterceptorInfo::cast(result);
-}
-
-
-MaybeObject* JSObject::GetPropertyPostInterceptor(
- Object* receiver,
- String* name,
- PropertyAttributes* attributes) {
- // Check local property in holder, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound()) {
- return GetProperty(receiver, &result, name, attributes);
- }
- // Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- *attributes = ABSENT;
- if (pt->IsNull()) return GetHeap()->undefined_value();
- return pt->GetPropertyWithReceiver(receiver, name, attributes);
-}
-
-
-MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
- Object* receiver,
- String* name,
- PropertyAttributes* attributes) {
- // Check local property in holder, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound()) {
- return GetProperty(receiver, &result, name, attributes);
- }
- return GetHeap()->undefined_value();
-}
-
-
-MaybeObject* JSObject::GetPropertyWithInterceptor(
- Object* receiver,
- String* name,
- PropertyAttributes* attributes) {
- Isolate* isolate = GetIsolate();
- InterceptorInfo* interceptor = GetNamedInterceptor();
- HandleScope scope(isolate);
- Handle<Object> receiver_handle(receiver, isolate);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(name);
-
- if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) {
- *attributes = NONE;
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- return *result_internal;
- }
- }
-
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attributes);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
-}
-
-
-bool JSObject::HasRealNamedProperty(String* key) {
- // Check access rights if needed.
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
- }
- }
-
- LookupResult result(isolate);
- LocalLookupRealNamedProperty(key, &result);
- return result.IsFound() && !result.IsInterceptor();
-}
-
-
-bool JSObject::HasRealElementProperty(uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
- }
- }
-
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
-
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- return (index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole();
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
- return (index < length) &&
- !FixedDoubleArray::cast(elements())->is_the_hole(index);
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- return index < static_cast<uint32_t>(pixels->length());
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- return index < static_cast<uint32_t>(array->length());
- }
- case DICTIONARY_ELEMENTS: {
- return element_dictionary()->FindEntry(index)
- != SeededNumberDictionary::kNotFound;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- }
- // All possibilities have been handled above already.
- UNREACHABLE();
- return GetHeap()->null_value();
-}
-
-
-bool JSObject::HasRealNamedCallbackProperty(String* key) {
- // Check access rights if needed.
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
- }
- }
-
- LookupResult result(isolate);
- LocalLookupRealNamedProperty(key, &result);
- return result.IsPropertyCallbacks();
-}
-
-
-int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
- if (HasFastProperties()) {
- Map* map = this->map();
- if (filter == NONE) return map->NumberOfOwnDescriptors();
- if (filter == DONT_ENUM) {
- int result = map->EnumLength();
- if (result != Map::kInvalidEnumCache) return result;
- }
- return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
- }
- return property_dictionary()->NumberOfElementsFilterAttributes(filter);
-}
-
-
-void FixedArray::SwapPairs(FixedArray* numbers, int i, int j) {
- Object* temp = get(i);
- set(i, get(j));
- set(j, temp);
- if (this != numbers) {
- temp = numbers->get(i);
- numbers->set(i, Smi::cast(numbers->get(j)));
- numbers->set(j, Smi::cast(temp));
- }
-}
-
-
-static void InsertionSortPairs(FixedArray* content,
- FixedArray* numbers,
- int len) {
- for (int i = 1; i < len; i++) {
- int j = i;
- while (j > 0 &&
- (NumberToUint32(numbers->get(j - 1)) >
- NumberToUint32(numbers->get(j)))) {
- content->SwapPairs(numbers, j - 1, j);
- j--;
- }
- }
-}
-
-
-void HeapSortPairs(FixedArray* content, FixedArray* numbers, int len) {
- // In-place heap sort.
- ASSERT(content->length() == numbers->length());
-
- // Bottom-up max-heap construction.
- for (int i = 1; i < len; ++i) {
- int child_index = i;
- while (child_index > 0) {
- int parent_index = ((child_index + 1) >> 1) - 1;
- uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
- uint32_t child_value = NumberToUint32(numbers->get(child_index));
- if (parent_value < child_value) {
- content->SwapPairs(numbers, parent_index, child_index);
- } else {
- break;
- }
- child_index = parent_index;
- }
- }
-
- // Extract elements and create sorted array.
- for (int i = len - 1; i > 0; --i) {
- // Put max element at the back of the array.
- content->SwapPairs(numbers, 0, i);
- // Sift down the new top element.
- int parent_index = 0;
- while (true) {
- int child_index = ((parent_index + 1) << 1) - 1;
- if (child_index >= i) break;
- uint32_t child1_value = NumberToUint32(numbers->get(child_index));
- uint32_t child2_value = NumberToUint32(numbers->get(child_index + 1));
- uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
- if (child_index + 1 >= i || child1_value > child2_value) {
- if (parent_value > child1_value) break;
- content->SwapPairs(numbers, parent_index, child_index);
- parent_index = child_index;
- } else {
- if (parent_value > child2_value) break;
- content->SwapPairs(numbers, parent_index, child_index + 1);
- parent_index = child_index + 1;
- }
- }
- }
-}
-
-
-// Sort this array and the numbers as pairs wrt. the (distinct) numbers.
-void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
- ASSERT(this->length() == numbers->length());
- // For small arrays, simply use insertion sort.
- if (len <= 10) {
- InsertionSortPairs(this, numbers, len);
- return;
- }
- // Check the range of indices.
- uint32_t min_index = NumberToUint32(numbers->get(0));
- uint32_t max_index = min_index;
- uint32_t i;
- for (i = 1; i < len; i++) {
- if (NumberToUint32(numbers->get(i)) < min_index) {
- min_index = NumberToUint32(numbers->get(i));
- } else if (NumberToUint32(numbers->get(i)) > max_index) {
- max_index = NumberToUint32(numbers->get(i));
- }
- }
- if (max_index - min_index + 1 == len) {
- // Indices form a contiguous range, unless there are duplicates.
- // Do an in-place linear time sort assuming distinct numbers, but
- // avoid hanging in case they are not.
- for (i = 0; i < len; i++) {
- uint32_t p;
- uint32_t j = 0;
- // While the current element at i is not at its correct position p,
- // swap the elements at these two positions.
- while ((p = NumberToUint32(numbers->get(i)) - min_index) != i &&
- j++ < len) {
- SwapPairs(numbers, i, p);
- }
- }
- } else {
- HeapSortPairs(this, numbers, len);
- return;
- }
-}
-
-
-// Fill in the names of local properties into the supplied storage. The main
-// purpose of this function is to provide reflection information for the object
-// mirrors.
-void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
- ASSERT(storage->length() >= (NumberOfLocalProperties() - index));
- if (HasFastProperties()) {
- int real_size = map()->NumberOfOwnDescriptors();
- DescriptorArray* descs = map()->instance_descriptors();
- ASSERT(storage->length() >= index + real_size);
- for (int i = 0; i < real_size; i++) {
- storage->set(index + i, descs->GetKey(i));
- }
- } else {
- property_dictionary()->CopyKeysTo(storage,
- index,
- StringDictionary::UNSORTED);
- }
-}
-
-
-int JSObject::NumberOfLocalElements(PropertyAttributes filter) {
- return GetLocalElementKeys(NULL, filter);
-}
-
-
-int JSObject::NumberOfEnumElements() {
- // Fast case for objects with no elements.
- if (!IsJSValue() && HasFastObjectElements()) {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if (length == 0) return 0;
- }
- // Compute the number of enumerable elements.
- return NumberOfLocalElements(static_cast<PropertyAttributes>(DONT_ENUM));
-}
-
-
-int JSObject::GetLocalElementKeys(FixedArray* storage,
- PropertyAttributes filter) {
- int counter = 0;
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- FixedArray::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(i));
- }
- counter++;
- }
- }
- ASSERT(!storage || storage->length() >= counter);
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- FixedDoubleArray::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- if (!FixedDoubleArray::cast(elements())->is_the_hole(i)) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(i));
- }
- counter++;
- }
- }
- ASSERT(!storage || storage->length() >= counter);
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- int length = ExternalPixelArray::cast(elements())->length();
- while (counter < length) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter));
- }
- counter++;
- }
- ASSERT(!storage || storage->length() >= counter);
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- int length = ExternalArray::cast(elements())->length();
- while (counter < length) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter));
- }
- counter++;
- }
- ASSERT(!storage || storage->length() >= counter);
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (storage != NULL) {
- element_dictionary()->CopyKeysTo(storage,
- filter,
- SeededNumberDictionary::SORTED);
- }
- counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
- FixedArray* parameter_map = FixedArray::cast(elements());
- int mapped_length = parameter_map->length() - 2;
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- if (arguments->IsDictionary()) {
- // Copy the keys from arguments first, because Dictionary::CopyKeysTo
- // will insert in storage starting at index 0.
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(arguments);
- if (storage != NULL) {
- dictionary->CopyKeysTo(
- storage, filter, SeededNumberDictionary::UNSORTED);
- }
- counter += dictionary->NumberOfElementsFilterAttributes(filter);
- for (int i = 0; i < mapped_length; ++i) {
- if (!parameter_map->get(i + 2)->IsTheHole()) {
- if (storage != NULL) storage->set(counter, Smi::FromInt(i));
- ++counter;
- }
- }
- if (storage != NULL) storage->SortPairs(storage, counter);
-
- } else {
- int backing_length = arguments->length();
- int i = 0;
- for (; i < mapped_length; ++i) {
- if (!parameter_map->get(i + 2)->IsTheHole()) {
- if (storage != NULL) storage->set(counter, Smi::FromInt(i));
- ++counter;
- } else if (i < backing_length && !arguments->get(i)->IsTheHole()) {
- if (storage != NULL) storage->set(counter, Smi::FromInt(i));
- ++counter;
- }
- }
- for (; i < backing_length; ++i) {
- if (storage != NULL) storage->set(counter, Smi::FromInt(i));
- ++counter;
- }
- }
- break;
- }
- }
-
- if (this->IsJSValue()) {
- Object* val = JSValue::cast(this)->value();
- if (val->IsString()) {
- String* str = String::cast(val);
- if (storage) {
- for (int i = 0; i < str->length(); i++) {
- storage->set(counter + i, Smi::FromInt(i));
- }
- }
- counter += str->length();
- }
- }
- ASSERT(!storage || storage->length() == counter);
- return counter;
-}
-
-
-int JSObject::GetEnumElementKeys(FixedArray* storage) {
- return GetLocalElementKeys(storage,
- static_cast<PropertyAttributes>(DONT_ENUM));
-}
-
-
-// StringKey simply carries a string object as key.
-class StringKey : public HashTableKey {
- public:
- explicit StringKey(String* string) :
- string_(string),
- hash_(HashForObject(string)) { }
-
- bool IsMatch(Object* string) {
- // We know that all entries in a hash table had their hash keys created.
- // Use that knowledge to have fast failure.
- if (hash_ != HashForObject(string)) {
- return false;
- }
- return string_->Equals(String::cast(string));
- }
-
- uint32_t Hash() { return hash_; }
-
- uint32_t HashForObject(Object* other) { return String::cast(other)->Hash(); }
-
- Object* AsObject() { return string_; }
-
- String* string_;
- uint32_t hash_;
-};
-
-
-// StringSharedKeys are used as keys in the eval cache.
-class StringSharedKey : public HashTableKey {
- public:
- StringSharedKey(String* source,
- SharedFunctionInfo* shared,
- LanguageMode language_mode,
- int scope_position)
- : source_(source),
- shared_(shared),
- language_mode_(language_mode),
- scope_position_(scope_position) { }
-
- bool IsMatch(Object* other) {
- if (!other->IsFixedArray()) return false;
- FixedArray* other_array = FixedArray::cast(other);
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
- if (shared != shared_) return false;
- int language_unchecked = Smi::cast(other_array->get(2))->value();
- ASSERT(language_unchecked == CLASSIC_MODE ||
- language_unchecked == STRICT_MODE ||
- language_unchecked == EXTENDED_MODE);
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- if (language_mode != language_mode_) return false;
- int scope_position = Smi::cast(other_array->get(3))->value();
- if (scope_position != scope_position_) return false;
- String* source = String::cast(other_array->get(1));
- return source->Equals(source_);
- }
-
- static uint32_t StringSharedHashHelper(String* source,
- SharedFunctionInfo* shared,
- LanguageMode language_mode,
- int scope_position) {
- uint32_t hash = source->Hash();
- if (shared->HasSourceCode()) {
- // Instead of using the SharedFunctionInfo pointer in the hash
- // code computation, we use a combination of the hash of the
- // script source code and the start position of the calling scope.
- // We do this to ensure that the cache entries can survive garbage
- // collection.
- Script* script = Script::cast(shared->script());
- hash ^= String::cast(script->source())->Hash();
- if (language_mode == STRICT_MODE) hash ^= 0x8000;
- if (language_mode == EXTENDED_MODE) hash ^= 0x0080;
- hash += scope_position;
- }
- return hash;
- }
-
- uint32_t Hash() {
- return StringSharedHashHelper(
- source_, shared_, language_mode_, scope_position_);
- }
-
- uint32_t HashForObject(Object* obj) {
- FixedArray* other_array = FixedArray::cast(obj);
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
- String* source = String::cast(other_array->get(1));
- int language_unchecked = Smi::cast(other_array->get(2))->value();
- ASSERT(language_unchecked == CLASSIC_MODE ||
- language_unchecked == STRICT_MODE ||
- language_unchecked == EXTENDED_MODE);
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- int scope_position = Smi::cast(other_array->get(3))->value();
- return StringSharedHashHelper(
- source, shared, language_mode, scope_position);
- }
-
- MUST_USE_RESULT MaybeObject* AsObject() {
- Object* obj;
- { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(4);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* other_array = FixedArray::cast(obj);
- other_array->set(0, shared_);
- other_array->set(1, source_);
- other_array->set(2, Smi::FromInt(language_mode_));
- other_array->set(3, Smi::FromInt(scope_position_));
- return other_array;
- }
-
- private:
- String* source_;
- SharedFunctionInfo* shared_;
- LanguageMode language_mode_;
- int scope_position_;
-};
-
-
-// RegExpKey carries the source and flags of a regular expression as key.
-class RegExpKey : public HashTableKey {
- public:
- RegExpKey(String* string, JSRegExp::Flags flags)
- : string_(string),
- flags_(Smi::FromInt(flags.value())) { }
-
- // Rather than storing the key in the hash table, a pointer to the
- // stored value is stored where the key should be. IsMatch then
- // compares the search key to the found object, rather than comparing
- // a key to a key.
- bool IsMatch(Object* obj) {
- FixedArray* val = FixedArray::cast(obj);
- return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
- && (flags_ == val->get(JSRegExp::kFlagsIndex));
- }
-
- uint32_t Hash() { return RegExpHash(string_, flags_); }
-
- Object* AsObject() {
- // Plain hash maps, which is where regexp keys are used, don't
- // use this function.
- UNREACHABLE();
- return NULL;
- }
-
- uint32_t HashForObject(Object* obj) {
- FixedArray* val = FixedArray::cast(obj);
- return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
- Smi::cast(val->get(JSRegExp::kFlagsIndex)));
- }
-
- static uint32_t RegExpHash(String* string, Smi* flags) {
- return string->Hash() + flags->value();
- }
-
- String* string_;
- Smi* flags_;
-};
-
-// Utf8StringKey carries a vector of chars as key.
-class Utf8StringKey : public HashTableKey {
- public:
- explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->IsUtf8EqualTo(string_);
- }
-
- uint32_t Hash() {
- if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
- hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- MaybeObject* AsObject() {
- if (hash_field_ == 0) Hash();
- return Isolate::Current()->heap()->AllocateInternalizedStringFromUtf8(
- string_, chars_, hash_field_);
- }
-
- Vector<const char> string_;
- uint32_t hash_field_;
- int chars_; // Caches the number of characters when computing the hash code.
- uint32_t seed_;
-};
-
-
-template <typename Char>
-class SequentialStringKey : public HashTableKey {
- public:
- explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) { }
-
- uint32_t Hash() {
- hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
- string_.length(),
- seed_);
-
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- Vector<const Char> string_;
- uint32_t hash_field_;
- uint32_t seed_;
-};
-
-
-
-class OneByteStringKey : public SequentialStringKey<uint8_t> {
- public:
- OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
- : SequentialStringKey<uint8_t>(str, seed) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->IsOneByteEqualTo(string_);
- }
-
- MaybeObject* AsObject() {
- if (hash_field_ == 0) Hash();
- MaybeObject* result = HEAP->AllocateOneByteInternalizedString(string_, hash_field_);
- if (!result->IsFailure() && result->ToObjectUnchecked()->IsSeqString()) {
- while (true) {
- Atomic32 my_symbol_id = next_symbol_id;
- if (my_symbol_id > Smi::kMaxValue)
- break;
- if (my_symbol_id == NoBarrier_CompareAndSwap(&next_symbol_id,
- my_symbol_id,
- my_symbol_id + 1)) {
- SeqString::cast(result->ToObjectUnchecked())->
- set_symbol_id(my_symbol_id);
- break;
- }
- }
- }
- return result;
- }
-
- static Atomic32 next_symbol_id;
-};
-Atomic32 OneByteStringKey::next_symbol_id = 1;
-
-
-class SubStringOneByteStringKey : public HashTableKey {
- public:
- explicit SubStringOneByteStringKey(Handle<SeqOneByteString> string,
- int from,
- int length)
- : string_(string), from_(from), length_(length) { }
-
- uint32_t Hash() {
- ASSERT(length_ >= 0);
- ASSERT(from_ + length_ <= string_->length());
- uint8_t* chars = string_->GetChars() + from_;
- hash_field_ = StringHasher::HashSequentialString(
- chars, length_, string_->GetHeap()->HashSeed());
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- bool IsMatch(Object* string) {
- Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
- return String::cast(string)->IsOneByteEqualTo(chars);
- }
-
- MaybeObject* AsObject() {
- if (hash_field_ == 0) Hash();
- Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
- return HEAP->AllocateOneByteInternalizedString(chars, hash_field_);
- }
-
- private:
- Handle<SeqOneByteString> string_;
- int from_;
- int length_;
- uint32_t hash_field_;
-};
-
-
-class TwoByteStringKey : public SequentialStringKey<uc16> {
- public:
- explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
- : SequentialStringKey<uc16>(str, seed) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->IsTwoByteEqualTo(string_);
- }
-
- MaybeObject* AsObject() {
- if (hash_field_ == 0) Hash();
- return HEAP->AllocateTwoByteInternalizedString(string_, hash_field_);
- }
-};
-
-
-// InternalizedStringKey carries a string/internalized-string object as key.
-class InternalizedStringKey : public HashTableKey {
- public:
- explicit InternalizedStringKey(String* string)
- : string_(string) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->Equals(string_);
- }
-
- uint32_t Hash() { return string_->Hash(); }
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- MaybeObject* AsObject() {
- // Attempt to flatten the string, so that internalized strings will most
- // often be flat strings.
- string_ = string_->TryFlattenGetString();
- Heap* heap = string_->GetHeap();
- // Internalize the string if possible.
- Map* map = heap->InternalizedStringMapForString(string_);
- if (map != NULL) {
- string_->set_map_no_write_barrier(map);
- ASSERT(string_->IsInternalizedString());
- SeqString::cast(string_)->set_symbol_id(0);
- return string_;
- }
- // Otherwise allocate a new internalized string.
- return heap->AllocateInternalizedStringImpl(
- string_, string_->length(), string_->hash_field());
- }
-
- static uint32_t StringHash(Object* obj) {
- return String::cast(obj)->Hash();
- }
-
- String* string_;
-};
-
-
-template<typename Shape, typename Key>
-void HashTable<Shape, Key>::IteratePrefix(ObjectVisitor* v) {
- IteratePointers(v, 0, kElementsStartOffset);
-}
-
-
-template<typename Shape, typename Key>
-void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
- IteratePointers(v,
- kElementsStartOffset,
- kHeaderSize + length() * kPointerSize);
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
- MinimumCapacity capacity_option,
- PretenureFlag pretenure) {
- ASSERT(!capacity_option || IS_POWER_OF_TWO(at_least_space_for));
- int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
- ? at_least_space_for
- : ComputeCapacity(at_least_space_for);
- if (capacity > HashTable::kMaxCapacity) {
- return Failure::OutOfMemoryException(0x10);
- }
-
- Object* obj;
- { MaybeObject* maybe_obj = Isolate::Current()->heap()->
- AllocateHashTable(EntryToIndex(capacity), pretenure);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- HashTable::cast(obj)->SetNumberOfElements(0);
- HashTable::cast(obj)->SetNumberOfDeletedElements(0);
- HashTable::cast(obj)->SetCapacity(capacity);
- return obj;
-}
-
-
-// Find entry for key otherwise return kNotFound.
-int StringDictionary::FindEntry(String* key) {
- if (!key->IsInternalizedString()) {
- return HashTable<StringDictionaryShape, String*>::FindEntry(key);
- }
-
- // Optimized for internalized string key. Knowledge of the key type allows:
- // 1. Move the check if the key is internalized out of the loop.
- // 2. Avoid comparing hash codes in internalized-to-internalized comparison.
- // 3. Detect a case when a dictionary key is not internalized but the key is.
- // In case of positive result the dictionary key may be replaced by the
- // internalized string with minimal performance penalty. It gives a chance
- // to perform further lookups in code stubs (and significant performance
- // boost a certain style of code).
-
- // EnsureCapacity will guarantee the hash table is never full.
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(key->Hash(), capacity);
- uint32_t count = 1;
-
- while (true) {
- int index = EntryToIndex(entry);
- Object* element = get(index);
- if (element->IsUndefined()) break; // Empty entry.
- if (key == element) return entry;
- if (!element->IsInternalizedString() &&
- !element->IsTheHole() &&
- String::cast(element)->Equals(key)) {
- // Replace a key that is not an internalized string by the equivalent
- // internalized string for faster further lookups.
- set(index, key);
- return entry;
- }
- ASSERT(element->IsTheHole() || !String::cast(element)->Equals(key));
- entry = NextProbe(entry, count++, capacity);
- }
- return kNotFound;
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
- ASSERT(NumberOfElements() < new_table->Capacity());
-
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
-
- // Copy prefix to new array.
- for (int i = kPrefixStartIndex;
- i < kPrefixStartIndex + Shape::kPrefixSize;
- i++) {
- new_table->set(i, get(i), mode);
- }
-
- // Rehash the elements.
- int capacity = Capacity();
- for (int i = 0; i < capacity; i++) {
- uint32_t from_index = EntryToIndex(i);
- Object* k = get(from_index);
- if (IsKey(k)) {
- uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k);
- uint32_t insertion_index =
- EntryToIndex(new_table->FindInsertionEntry(hash));
- for (int j = 0; j < Shape::kEntrySize; j++) {
- new_table->set(insertion_index + j, get(from_index + j), mode);
- }
- }
- }
- new_table->SetNumberOfElements(NumberOfElements());
- new_table->SetNumberOfDeletedElements(0);
- return new_table;
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
- int capacity = Capacity();
- int nof = NumberOfElements() + n;
- int nod = NumberOfDeletedElements();
- // Return if:
- // 50% is still free after adding n elements and
- // at most 50% of the free elements are deleted elements.
- if (nod <= (capacity - nof) >> 1) {
- int needed_free = nof >> 1;
- if (nof + needed_free <= capacity) return this;
- }
-
- const int kMinCapacityForPretenure = 256;
- bool pretenure =
- (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
- Object* obj;
- { MaybeObject* maybe_obj =
- Allocate(nof * 2,
- USE_DEFAULT_MINIMUM_CAPACITY,
- pretenure ? TENURED : NOT_TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- return Rehash(HashTable::cast(obj), key);
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::Shrink(Key key) {
- int capacity = Capacity();
- int nof = NumberOfElements();
-
- // Shrink to fit the number of elements if only a quarter of the
- // capacity is filled with elements.
- if (nof > (capacity >> 2)) return this;
- // Allocate a new dictionary with room for at least the current
- // number of elements. The allocation method will make sure that
- // there is extra room in the dictionary for additions. Don't go
- // lower than room for 16 elements.
- int at_least_room_for = nof;
- if (at_least_room_for < 16) return this;
-
- const int kMinCapacityForPretenure = 256;
- bool pretenure =
- (at_least_room_for > kMinCapacityForPretenure) &&
- !GetHeap()->InNewSpace(this);
- Object* obj;
- { MaybeObject* maybe_obj =
- Allocate(at_least_room_for,
- USE_DEFAULT_MINIMUM_CAPACITY,
- pretenure ? TENURED : NOT_TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- return Rehash(HashTable::cast(obj), key);
-}
-
-
-template<typename Shape, typename Key>
-uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(hash, capacity);
- uint32_t count = 1;
- // EnsureCapacity will guarantee the hash table is never full.
- while (true) {
- Object* element = KeyAt(entry);
- if (element->IsUndefined() || element->IsTheHole()) break;
- entry = NextProbe(entry, count++, capacity);
- }
- return entry;
-}
-
-// Force instantiation of template instances class.
-// Please note this list is compiler dependent.
-
-template class HashTable<StringTableShape, HashTableKey*>;
-
-template class HashTable<CompilationCacheShape, HashTableKey*>;
-
-template class HashTable<MapCacheShape, HashTableKey*>;
-
-template class HashTable<ObjectHashTableShape<1>, Object*>;
-
-template class HashTable<ObjectHashTableShape<2>, Object*>;
-
-template class Dictionary<StringDictionaryShape, String*>;
-
-template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
-
-template class Dictionary<UnseededNumberDictionaryShape, uint32_t>;
-
-#ifndef __INTEL_COMPILER
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- Allocate(int at_least_space_for);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- Allocate(int at_least_space_for);
-
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::Allocate(
- int);
-
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::AtPut(
- uint32_t, Object*);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- AtPut(uint32_t, Object*);
-
-template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- SlowReverseLookup(Object* value);
-
-template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- SlowReverseLookup(Object* value);
-
-template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup(
- Object*);
-
-template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo(
- FixedArray*,
- PropertyAttributes,
- Dictionary<SeededNumberDictionaryShape, uint32_t>::SortMode);
-
-template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
- int, JSObject::DeleteMode);
-
-template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- DeleteProperty(int, JSObject::DeleteMode);
-
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::Shrink(
- String*);
-
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Shrink(
- uint32_t);
-
-template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
- FixedArray*,
- int,
- Dictionary<StringDictionaryShape, String*>::SortMode);
-
-template int
-Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes(
- PropertyAttributes);
-
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::Add(
- String*, Object*, PropertyDetails);
-
-template MaybeObject*
-Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices();
-
-template int
-Dictionary<SeededNumberDictionaryShape, uint32_t>::
- NumberOfElementsFilterAttributes(PropertyAttributes);
-
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Add(
- uint32_t, Object*, PropertyDetails);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::Add(
- uint32_t, Object*, PropertyDetails);
-
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- EnsureCapacity(int, uint32_t);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- EnsureCapacity(int, uint32_t);
-
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::
- EnsureCapacity(int, String*);
-
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
-
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::AddEntry(
- String*, Object*, PropertyDetails, uint32_t);
-
-template
-int Dictionary<SeededNumberDictionaryShape, uint32_t>::NumberOfEnumElements();
-
-template
-int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
-
-template
-int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
-#endif
-
-// Collates undefined and unexisting elements below limit from position
-// zero of the elements. The object stays in Dictionary mode.
-MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
- ASSERT(HasDictionaryElements());
- // Must stay in dictionary mode, either because of requires_slow_elements,
- // or because we are not going to sort (and therefore compact) all of the
- // elements.
- SeededNumberDictionary* dict = element_dictionary();
- HeapNumber* result_double = NULL;
- if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Allocate space for result before we start mutating the object.
- Object* new_double;
- { MaybeObject* maybe_new_double = GetHeap()->AllocateHeapNumber(0.0);
- if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
- }
- result_double = HeapNumber::cast(new_double);
- }
-
- Object* obj;
- { MaybeObject* maybe_obj =
- SeededNumberDictionary::Allocate(dict->NumberOfElements());
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj);
-
- AssertNoAllocation no_alloc;
-
- uint32_t pos = 0;
- uint32_t undefs = 0;
- int capacity = dict->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = dict->KeyAt(i);
- if (dict->IsKey(k)) {
- ASSERT(k->IsNumber());
- ASSERT(!k->IsSmi() || Smi::cast(k)->value() >= 0);
- ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
- ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
- Object* value = dict->ValueAt(i);
- PropertyDetails details = dict->DetailsAt(i);
- if (details.type() == CALLBACKS || details.IsReadOnly()) {
- // Bail out and do the sorting of undefineds and array holes in JS.
- // Also bail out if the element is not supposed to be moved.
- return Smi::FromInt(-1);
- }
- uint32_t key = NumberToUint32(k);
- // In the following we assert that adding the entry to the new dictionary
- // does not cause GC. This is the case because we made sure to allocate
- // the dictionary big enough above, so it need not grow.
- if (key < limit) {
- if (value->IsUndefined()) {
- undefs++;
- } else {
- if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return Smi::FromInt(-1);
- }
- new_dict->AddNumberEntry(pos, value, details)->ToObjectUnchecked();
- pos++;
- }
- } else {
- if (key > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return Smi::FromInt(-1);
- }
- new_dict->AddNumberEntry(key, value, details)->ToObjectUnchecked();
- }
- }
- }
-
- uint32_t result = pos;
- PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
- Heap* heap = GetHeap();
- while (undefs > 0) {
- if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return Smi::FromInt(-1);
- }
- new_dict->AddNumberEntry(pos, heap->undefined_value(), no_details)->
- ToObjectUnchecked();
- pos++;
- undefs--;
- }
-
- set_elements(new_dict);
-
- if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Smi::FromInt(static_cast<int>(result));
- }
-
- ASSERT_NE(NULL, result_double);
- result_double->set_value(static_cast<double>(result));
- return result_double;
-}
-
-
-// Collects all defined (non-hole) and non-undefined (array) elements at
-// the start of the elements array.
-// If the object is in dictionary mode, it is converted to fast elements
-// mode.
-MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
- Heap* heap = GetHeap();
-
- if (HasDictionaryElements()) {
- // Convert to fast elements containing only the existing properties.
- // Ordering is irrelevant, since we are going to sort anyway.
- SeededNumberDictionary* dict = element_dictionary();
- if (IsJSArray() || dict->requires_slow_elements() ||
- dict->max_number_key() >= limit) {
- return PrepareSlowElementsForSort(limit);
- }
- // Convert to fast elements.
-
- Object* obj;
- MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
- FAST_HOLEY_ELEMENTS);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- Map* new_map = Map::cast(obj);
-
- PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
- Object* new_array;
- { MaybeObject* maybe_new_array =
- heap->AllocateFixedArray(dict->NumberOfElements(), tenure);
- if (!maybe_new_array->ToObject(&new_array)) return maybe_new_array;
- }
- FixedArray* fast_elements = FixedArray::cast(new_array);
- dict->CopyValuesTo(fast_elements);
- ValidateElements();
-
- set_map_and_elements(new_map, fast_elements);
- } else if (HasExternalArrayElements()) {
- // External arrays cannot have holes or undefined elements.
- return Smi::FromInt(ExternalArray::cast(elements())->length());
- } else if (!HasFastDoubleElements()) {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- }
- ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements());
-
- // Collect holes at the end, undefined before that and the rest at the
- // start, and return the number of non-hole, non-undefined values.
-
- FixedArrayBase* elements_base = FixedArrayBase::cast(this->elements());
- uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
- if (limit > elements_length) {
- limit = elements_length ;
- }
- if (limit == 0) {
- return Smi::FromInt(0);
- }
-
- HeapNumber* result_double = NULL;
- if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Pessimistically allocate space for return value before
- // we start mutating the array.
- Object* new_double;
- { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
- if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
- }
- result_double = HeapNumber::cast(new_double);
- }
-
- uint32_t result = 0;
- if (elements_base->map() == heap->fixed_double_array_map()) {
- FixedDoubleArray* elements = FixedDoubleArray::cast(elements_base);
- // Split elements into defined and the_hole, in that order.
- unsigned int holes = limit;
- // Assume most arrays contain no holes and undefined values, so minimize the
- // number of stores of non-undefined, non-the-hole values.
- for (unsigned int i = 0; i < holes; i++) {
- if (elements->is_the_hole(i)) {
- holes--;
- } else {
- continue;
- }
- // Position i needs to be filled.
- while (holes > i) {
- if (elements->is_the_hole(holes)) {
- holes--;
- } else {
- elements->set(i, elements->get_scalar(holes));
- break;
- }
- }
- }
- result = holes;
- while (holes < limit) {
- elements->set_the_hole(holes);
- holes++;
- }
- } else {
- FixedArray* elements = FixedArray::cast(elements_base);
- AssertNoAllocation no_alloc;
-
- // Split elements into defined, undefined and the_hole, in that order. Only
- // count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
- unsigned int undefs = limit;
- unsigned int holes = limit;
- // Assume most arrays contain no holes and undefined values, so minimize the
- // number of stores of non-undefined, non-the-hole values.
- for (unsigned int i = 0; i < undefs; i++) {
- Object* current = elements->get(i);
- if (current->IsTheHole()) {
- holes--;
- undefs--;
- } else if (current->IsUndefined()) {
- undefs--;
- } else {
- continue;
- }
- // Position i needs to be filled.
- while (undefs > i) {
- current = elements->get(undefs);
- if (current->IsTheHole()) {
- holes--;
- undefs--;
- } else if (current->IsUndefined()) {
- undefs--;
- } else {
- elements->set(i, current, write_barrier);
- break;
- }
- }
- }
- result = undefs;
- while (undefs < holes) {
- elements->set_undefined(undefs);
- undefs++;
- }
- while (holes < limit) {
- elements->set_the_hole(holes);
- holes++;
- }
- }
-
- if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Smi::FromInt(static_cast<int>(result));
- }
- ASSERT_NE(NULL, result_double);
- result_double->set_value(static_cast<double>(result));
- return result_double;
-}
-
-
-Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
- uint8_t clamped_value = 0;
- if (index < static_cast<uint32_t>(length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- if (int_value < 0) {
- clamped_value = 0;
- } else if (int_value > 255) {
- clamped_value = 255;
- } else {
- clamped_value = static_cast<uint8_t>(int_value);
- }
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- if (!(double_value > 0)) {
- // NaN and less than zero clamp to zero.
- clamped_value = 0;
- } else if (double_value > 255) {
- // Greater than 255 clamp to 255.
- clamped_value = 255;
- } else {
- // Other doubles are rounded to the nearest integer.
- clamped_value = static_cast<uint8_t>(lrint(double_value));
- }
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- set(index, clamped_value);
- }
- return Smi::FromInt(clamped_value);
-}
-
-
-template<typename ExternalArrayClass, typename ValueType>
-static MaybeObject* ExternalArrayIntSetter(Heap* heap,
- ExternalArrayClass* receiver,
- uint32_t index,
- Object* value) {
- ValueType cast_value = 0;
- if (index < static_cast<uint32_t>(receiver->length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- cast_value = static_cast<ValueType>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<ValueType>(DoubleToInt32(double_value));
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- receiver->set(index, cast_value);
- }
- return heap->NumberFromInt32(cast_value);
-}
-
-
-MaybeObject* ExternalByteArray::SetValue(uint32_t index, Object* value) {
- return ExternalArrayIntSetter<ExternalByteArray, int8_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalUnsignedByteArray::SetValue(uint32_t index,
- Object* value) {
- return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalShortArray::SetValue(uint32_t index,
- Object* value) {
- return ExternalArrayIntSetter<ExternalShortArray, int16_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalUnsignedShortArray::SetValue(uint32_t index,
- Object* value) {
- return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalIntArray::SetValue(uint32_t index, Object* value) {
- return ExternalArrayIntSetter<ExternalIntArray, int32_t>
- (GetHeap(), this, index, value);
-}
-
-
-MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
- uint32_t cast_value = 0;
- Heap* heap = GetHeap();
- if (index < static_cast<uint32_t>(length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- cast_value = static_cast<uint32_t>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<uint32_t>(DoubleToUint32(double_value));
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- set(index, cast_value);
- }
- return heap->NumberFromUint32(cast_value);
-}
-
-
-MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
- float cast_value = static_cast<float>(OS::nan_value());
- Heap* heap = GetHeap();
- if (index < static_cast<uint32_t>(length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- cast_value = static_cast<float>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<float>(double_value);
- } else {
- // Clamp undefined to NaN (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- set(index, cast_value);
- }
- return heap->AllocateHeapNumber(cast_value);
-}
-
-
-MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
- double double_value = OS::nan_value();
- Heap* heap = GetHeap();
- if (index < static_cast<uint32_t>(length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- double_value = static_cast<double>(int_value);
- } else if (value->IsHeapNumber()) {
- double_value = HeapNumber::cast(value)->value();
- } else {
- // Clamp undefined to NaN (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- set(index, double_value);
- }
- return heap->AllocateHeapNumber(double_value);
-}
-
-
-JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
- ASSERT(!HasFastProperties());
- Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
- return JSGlobalPropertyCell::cast(value);
-}
-
-
-Handle<JSGlobalPropertyCell> GlobalObject::EnsurePropertyCell(
- Handle<GlobalObject> global,
- Handle<String> name) {
- Isolate* isolate = global->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- global->EnsurePropertyCell(*name),
- JSGlobalPropertyCell);
-}
-
-
-MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
- if (entry == StringDictionary::kNotFound) {
- Heap* heap = GetHeap();
- Object* cell;
- { MaybeObject* maybe_cell =
- heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
- if (!maybe_cell->ToObject(&cell)) return maybe_cell;
- }
- PropertyDetails details(NONE, NORMAL);
- details = details.AsDeleted();
- Object* dictionary;
- { MaybeObject* maybe_dictionary =
- property_dictionary()->Add(name, cell, details);
- if (!maybe_dictionary->ToObject(&dictionary)) return maybe_dictionary;
- }
- set_properties(StringDictionary::cast(dictionary));
- return cell;
- } else {
- Object* value = property_dictionary()->ValueAt(entry);
- ASSERT(value->IsJSGlobalPropertyCell());
- return value;
- }
-}
-
-
-MaybeObject* StringTable::LookupString(String* string, Object** s) {
- InternalizedStringKey key(string);
- return LookupKey(&key, s);
-}
-
-
-// This class is used for looking up two character strings in the string table.
-// If we don't have a hit we don't want to waste much time so we unroll the
-// string hash calculation loop here for speed. Doesn't work if the two
-// characters form a decimal integer, since such strings have a different hash
-// algorithm.
-class TwoCharHashTableKey : public HashTableKey {
- public:
- TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint32_t seed)
- : c1_(c1), c2_(c2) {
- // Char 1.
- uint32_t hash = seed;
- hash += c1;
- hash += hash << 10;
- hash ^= hash >> 6;
- // Char 2.
- hash += c2;
- hash += hash << 10;
- hash ^= hash >> 6;
- // GetHash.
- hash += hash << 3;
- hash ^= hash >> 11;
- hash += hash << 15;
- if ((hash & String::kHashBitMask) == 0) hash = StringHasher::kZeroHash;
- hash_ = hash;
-#ifdef DEBUG
- // If this assert fails then we failed to reproduce the two-character
- // version of the string hashing algorithm above. One reason could be
- // that we were passed two digits as characters, since the hash
- // algorithm is different in that case.
- uint16_t chars[2] = {c1, c2};
- uint32_t check_hash = StringHasher::HashSequentialString(chars, 2, seed);
- hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
- ASSERT_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash));
-#endif
- }
-
- bool IsMatch(Object* o) {
- if (!o->IsString()) return false;
- String* other = String::cast(o);
- if (other->length() != 2) return false;
- if (other->Get(0) != c1_) return false;
- return other->Get(1) == c2_;
- }
-
- uint32_t Hash() { return hash_; }
- uint32_t HashForObject(Object* key) {
- if (!key->IsString()) return 0;
- return String::cast(key)->Hash();
- }
-
- Object* AsObject() {
- // The TwoCharHashTableKey is only used for looking in the string
- // table, not for adding to it.
- UNREACHABLE();
- return NULL;
- }
-
- private:
- uint16_t c1_;
- uint16_t c2_;
- uint32_t hash_;
-};
-
-
-bool StringTable::LookupStringIfExists(String* string, String** result) {
- InternalizedStringKey key(string);
- int entry = FindEntry(&key);
- if (entry == kNotFound) {
- return false;
- } else {
- *result = String::cast(KeyAt(entry));
- ASSERT(StringShape(*result).IsInternalized());
- return true;
- }
-}
-
-
-bool StringTable::LookupTwoCharsStringIfExists(uint16_t c1,
- uint16_t c2,
- String** result) {
- TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed());
- int entry = FindEntry(&key);
- if (entry == kNotFound) {
- return false;
- } else {
- *result = String::cast(KeyAt(entry));
- ASSERT(StringShape(*result).IsInternalized());
- return true;
- }
-}
-
-
-MaybeObject* StringTable::LookupUtf8String(Vector<const char> str,
- Object** s) {
- Utf8StringKey key(str, GetHeap()->HashSeed());
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* StringTable::LookupOneByteString(Vector<const uint8_t> str,
- Object** s) {
- OneByteStringKey key(str, GetHeap()->HashSeed());
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* StringTable::LookupSubStringOneByteString(
- Handle<SeqOneByteString> str,
- int from,
- int length,
- Object** s) {
- SubStringOneByteStringKey key(str, from, length);
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* StringTable::LookupTwoByteString(Vector<const uc16> str,
- Object** s) {
- TwoByteStringKey key(str, GetHeap()->HashSeed());
- return LookupKey(&key, s);
-}
-
-MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) {
- int entry = FindEntry(key);
-
- // String already in table.
- if (entry != kNotFound) {
- *s = KeyAt(entry);
- return this;
- }
-
- // Adding new string. Grow table if needed.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- // Create string object.
- Object* string;
- { MaybeObject* maybe_string = key->AsObject();
- if (!maybe_string->ToObject(&string)) return maybe_string;
- }
-
- // If the string table grew as part of EnsureCapacity, obj is not
- // the current string table and therefore we cannot use
- // StringTable::cast here.
- StringTable* table = reinterpret_cast<StringTable*>(obj);
-
- // Add the new string and return it along with the string table.
- entry = table->FindInsertionEntry(key->Hash());
- table->set(EntryToIndex(entry), string);
- table->ElementAdded();
- *s = string;
- return table;
-}
-
-
-// The key for the script compilation cache is dependent on the mode flags,
-// because they change the global language mode and thus binding behaviour.
-// If flags change at some point, we must ensure that we do not hit the cache
-// for code compiled with different settings.
-static LanguageMode CurrentGlobalLanguageMode() {
- return FLAG_use_strict
- ? (FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE)
- : CLASSIC_MODE;
-}
-
-
-Object* CompilationCacheTable::Lookup(String* src, Context* context) {
- SharedFunctionInfo* shared = context->closure()->shared();
- StringSharedKey key(src,
- shared,
- CurrentGlobalLanguageMode(),
- RelocInfo::kNoPosition);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-Object* CompilationCacheTable::LookupEval(String* src,
- Context* context,
- LanguageMode language_mode,
- int scope_position) {
- StringSharedKey key(src,
- context->closure()->shared(),
- language_mode,
- scope_position);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-Object* CompilationCacheTable::LookupRegExp(String* src,
- JSRegExp::Flags flags) {
- RegExpKey key(src, flags);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-MaybeObject* CompilationCacheTable::Put(String* src,
- Context* context,
- Object* value) {
- SharedFunctionInfo* shared = context->closure()->shared();
- StringSharedKey key(src,
- shared,
- CurrentGlobalLanguageMode(),
- RelocInfo::kNoPosition);
- CompilationCacheTable* cache;
- MaybeObject* maybe_cache = EnsureCapacity(1, &key);
- if (!maybe_cache->To(&cache)) return maybe_cache;
-
- Object* k;
- MaybeObject* maybe_k = key.AsObject();
- if (!maybe_k->To(&k)) return maybe_k;
-
- int entry = cache->FindInsertionEntry(key.Hash());
- cache->set(EntryToIndex(entry), k);
- cache->set(EntryToIndex(entry) + 1, value);
- cache->ElementAdded();
- return cache;
-}
-
-
-MaybeObject* CompilationCacheTable::PutEval(String* src,
- Context* context,
- SharedFunctionInfo* value,
- int scope_position) {
- StringSharedKey key(src,
- context->closure()->shared(),
- value->language_mode(),
- scope_position);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- CompilationCacheTable* cache =
- reinterpret_cast<CompilationCacheTable*>(obj);
- int entry = cache->FindInsertionEntry(key.Hash());
-
- Object* k;
- { MaybeObject* maybe_k = key.AsObject();
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
-
- cache->set(EntryToIndex(entry), k);
- cache->set(EntryToIndex(entry) + 1, value);
- cache->ElementAdded();
- return cache;
-}
-
-
-MaybeObject* CompilationCacheTable::PutRegExp(String* src,
- JSRegExp::Flags flags,
- FixedArray* value) {
- RegExpKey key(src, flags);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- CompilationCacheTable* cache =
- reinterpret_cast<CompilationCacheTable*>(obj);
- int entry = cache->FindInsertionEntry(key.Hash());
- // We store the value in the key slot, and compare the search key
- // to the stored value with a custon IsMatch function during lookups.
- cache->set(EntryToIndex(entry), value);
- cache->set(EntryToIndex(entry) + 1, value);
- cache->ElementAdded();
- return cache;
-}
-
-
-void CompilationCacheTable::Remove(Object* value) {
- Object* the_hole_value = GetHeap()->the_hole_value();
- for (int entry = 0, size = Capacity(); entry < size; entry++) {
- int entry_index = EntryToIndex(entry);
- int value_index = entry_index + 1;
- if (get(value_index) == value) {
- NoWriteBarrierSet(this, entry_index, the_hole_value);
- NoWriteBarrierSet(this, value_index, the_hole_value);
- ElementRemoved();
- }
- }
- return;
-}
-
-
-// StringsKey used for HashTable where key is array of internalzied strings.
-class StringsKey : public HashTableKey {
- public:
- explicit StringsKey(FixedArray* strings) : strings_(strings) { }
-
- bool IsMatch(Object* strings) {
- FixedArray* o = FixedArray::cast(strings);
- int len = strings_->length();
- if (o->length() != len) return false;
- for (int i = 0; i < len; i++) {
- if (o->get(i) != strings_->get(i)) return false;
- }
- return true;
- }
-
- uint32_t Hash() { return HashForObject(strings_); }
-
- uint32_t HashForObject(Object* obj) {
- FixedArray* strings = FixedArray::cast(obj);
- int len = strings->length();
- uint32_t hash = 0;
- for (int i = 0; i < len; i++) {
- hash ^= String::cast(strings->get(i))->Hash();
- }
- return hash;
- }
-
- Object* AsObject() { return strings_; }
-
- private:
- FixedArray* strings_;
-};
-
-
-Object* MapCache::Lookup(FixedArray* array) {
- StringsKey key(array);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-MaybeObject* MapCache::Put(FixedArray* array, Map* value) {
- StringsKey key(array);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- MapCache* cache = reinterpret_cast<MapCache*>(obj);
- int entry = cache->FindInsertionEntry(key.Hash());
- cache->set(EntryToIndex(entry), array);
- cache->set(EntryToIndex(entry) + 1, value);
- cache->ElementAdded();
- return cache;
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::Allocate(int at_least_space_for) {
- Object* obj;
- { MaybeObject* maybe_obj =
- HashTable<Shape, Key>::Allocate(at_least_space_for);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- // Initialize the next enumeration index.
- Dictionary<Shape, Key>::cast(obj)->
- SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
- return obj;
-}
-
-
-void StringDictionary::DoGenerateNewEnumerationIndices(
- Handle<StringDictionary> dictionary) {
- CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(),
- dictionary->GenerateNewEnumerationIndices());
-}
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
- Heap* heap = Dictionary<Shape, Key>::GetHeap();
- int length = HashTable<Shape, Key>::NumberOfElements();
-
- // Allocate and initialize iteration order array.
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* iteration_order = FixedArray::cast(obj);
- for (int i = 0; i < length; i++) {
- iteration_order->set(i, Smi::FromInt(i));
- }
-
- // Allocate array with enumeration order.
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* enumeration_order = FixedArray::cast(obj);
-
- // Fill the enumeration order array with property details.
- int capacity = HashTable<Shape, Key>::Capacity();
- int pos = 0;
- for (int i = 0; i < capacity; i++) {
- if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
- int index = DetailsAt(i).dictionary_index();
- enumeration_order->set(pos++, Smi::FromInt(index));
- }
- }
-
- // Sort the arrays wrt. enumeration order.
- iteration_order->SortPairs(enumeration_order, enumeration_order->length());
-
- // Overwrite the enumeration_order with the enumeration indices.
- for (int i = 0; i < length; i++) {
- int index = Smi::cast(iteration_order->get(i))->value();
- int enum_index = PropertyDetails::kInitialIndex + i;
- enumeration_order->set(index, Smi::FromInt(enum_index));
- }
-
- // Update the dictionary with new indices.
- capacity = HashTable<Shape, Key>::Capacity();
- pos = 0;
- for (int i = 0; i < capacity; i++) {
- if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
- int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
- PropertyDetails details = DetailsAt(i);
- PropertyDetails new_details =
- PropertyDetails(details.attributes(), details.type(), enum_index);
- DetailsAtPut(i, new_details);
- }
- }
-
- // Set the next enumeration index.
- SetNextEnumerationIndex(PropertyDetails::kInitialIndex+length);
- return this;
-}
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::EnsureCapacity(int n, Key key) {
- // Check whether there are enough enumeration indices to add n elements.
- if (Shape::kIsEnumerable &&
- !PropertyDetails::IsValidIndex(NextEnumerationIndex() + n)) {
- // If not, we generate new indices for the properties.
- Object* result;
- { MaybeObject* maybe_result = GenerateNewEnumerationIndices();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return HashTable<Shape, Key>::EnsureCapacity(n, key);
-}
-
-
-template<typename Shape, typename Key>
-Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
- JSReceiver::DeleteMode mode) {
- Heap* heap = Dictionary<Shape, Key>::GetHeap();
- PropertyDetails details = DetailsAt(entry);
- // Ignore attributes if forcing a deletion.
- if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
- return heap->false_value();
- }
- SetEntry(entry, heap->the_hole_value(), heap->the_hole_value());
- HashTable<Shape, Key>::ElementRemoved();
- return heap->true_value();
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::Shrink(Key key) {
- return HashTable<Shape, Key>::Shrink(key);
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
- int entry = this->FindEntry(key);
-
- // If the entry is present set the value;
- if (entry != Dictionary<Shape, Key>::kNotFound) {
- ValueAtPut(entry, value);
- return this;
- }
-
- // Check whether the dictionary should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- Object* k;
- { MaybeObject* maybe_k = Shape::AsObject(key);
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
-
- return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
- Dictionary<Shape, Key>::Hash(key));
-}
-
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::Add(Key key,
- Object* value,
- PropertyDetails details) {
- ASSERT(details.dictionary_index() == details.descriptor_index());
-
- // Valdate key is absent.
- SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
- // Check whether the dictionary should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
- Dictionary<Shape, Key>::Hash(key));
-}
-
-
-// Add a key, value pair to the dictionary.
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key,
- Object* value,
- PropertyDetails details,
- uint32_t hash) {
- // Compute the key object.
- Object* k;
- { MaybeObject* maybe_k = Shape::AsObject(key);
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
-
- uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash);
- // Insert element at empty or deleted entry
- if (!details.IsDeleted() &&
- details.dictionary_index() == 0 &&
- Shape::kIsEnumerable) {
- // Assign an enumeration index to the property and update
- // SetNextEnumerationIndex.
- int index = NextEnumerationIndex();
- details = PropertyDetails(details.attributes(), details.type(), index);
- SetNextEnumerationIndex(index + 1);
- }
- SetEntry(entry, k, value, details);
- ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber()
- || Dictionary<Shape, Key>::KeyAt(entry)->IsString()));
- HashTable<Shape, Key>::ElementAdded();
- return this;
-}
-
-
-void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
- // If the dictionary requires slow elements an element has already
- // been added at a high index.
- if (requires_slow_elements()) return;
- // Check if this index is high enough that we should require slow
- // elements.
- if (key > kRequiresSlowElementsLimit) {
- set_requires_slow_elements();
- return;
- }
- // Update max key value.
- Object* max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi() || max_number_key() < key) {
- FixedArray::set(kMaxNumberKeyIndex,
- Smi::FromInt(key << kRequiresSlowElementsTagSize));
- }
-}
-
-
-MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details) {
- UpdateMaxNumberKey(key);
- SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, details);
-}
-
-
-MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
- Object* value) {
- SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, PropertyDetails(NONE, NORMAL));
-}
-
-
-MaybeObject* SeededNumberDictionary::AtNumberPut(uint32_t key, Object* value) {
- UpdateMaxNumberKey(key);
- return AtPut(key, value);
-}
-
-
-MaybeObject* UnseededNumberDictionary::AtNumberPut(uint32_t key,
- Object* value) {
- return AtPut(key, value);
-}
-
-
-Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
- dictionary->Set(index, *value, details),
- SeededNumberDictionary);
-}
-
-
-Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
- Handle<UnseededNumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
- dictionary->Set(index, *value),
- UnseededNumberDictionary);
-}
-
-
-MaybeObject* SeededNumberDictionary::Set(uint32_t key,
- Object* value,
- PropertyDetails details) {
- int entry = FindEntry(key);
- if (entry == kNotFound) return AddNumberEntry(key, value, details);
- // Preserve enumeration index.
- details = PropertyDetails(details.attributes(),
- details.type(),
- DetailsAt(entry).dictionary_index());
- MaybeObject* maybe_object_key = SeededNumberDictionaryShape::AsObject(key);
- Object* object_key;
- if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
- SetEntry(entry, object_key, value, details);
- return this;
-}
-
-
-MaybeObject* UnseededNumberDictionary::Set(uint32_t key,
- Object* value) {
- int entry = FindEntry(key);
- if (entry == kNotFound) return AddNumberEntry(key, value);
- MaybeObject* maybe_object_key = UnseededNumberDictionaryShape::AsObject(key);
- Object* object_key;
- if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
- SetEntry(entry, object_key, value);
- return this;
-}
-
-
-
-template<typename Shape, typename Key>
-int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
- PropertyAttributes filter) {
- int capacity = HashTable<Shape, Key>::Capacity();
- int result = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
- PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) == 0) result++;
- }
- }
- return result;
-}
-
-
-template<typename Shape, typename Key>
-int Dictionary<Shape, Key>::NumberOfEnumElements() {
- return NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(DONT_ENUM));
-}
-
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyKeysTo(
- FixedArray* storage,
- PropertyAttributes filter,
- typename Dictionary<Shape, Key>::SortMode sort_mode) {
- ASSERT(storage->length() >= NumberOfEnumElements());
- int capacity = HashTable<Shape, Key>::Capacity();
- int index = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
- PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) == 0) storage->set(index++, k);
- }
- }
- if (sort_mode == Dictionary<Shape, Key>::SORTED) {
- storage->SortPairs(storage, index);
- }
- ASSERT(storage->length() >= index);
-}
-
-
-FixedArray* StringDictionary::CopyEnumKeysTo(FixedArray* storage) {
- int length = storage->length();
- ASSERT(length >= NumberOfEnumElements());
- Heap* heap = GetHeap();
- Object* undefined_value = heap->undefined_value();
- int capacity = Capacity();
- int properties = 0;
-
- // Fill in the enumeration array by assigning enumerable keys at their
- // enumeration index. This will leave holes in the array if there are keys
- // that are deleted or not enumerable.
- for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
- PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted() || details.IsDontEnum()) continue;
- properties++;
- storage->set(details.dictionary_index() - 1, k);
- if (properties == length) break;
- }
- }
-
- // There are holes in the enumeration array if less properties were assigned
- // than the length of the array. If so, crunch all the existing properties
- // together by shifting them to the left (maintaining the enumeration order),
- // and trimming of the right side of the array.
- if (properties < length) {
- if (properties == 0) return heap->empty_fixed_array();
- properties = 0;
- for (int i = 0; i < length; ++i) {
- Object* value = storage->get(i);
- if (value != undefined_value) {
- storage->set(properties, value);
- ++properties;
- }
- }
- RightTrimFixedArray<FROM_MUTATOR>(heap, storage, length - properties);
- }
- return storage;
-}
-
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyKeysTo(
- FixedArray* storage,
- int index,
- typename Dictionary<Shape, Key>::SortMode sort_mode) {
- ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(NONE)));
- int capacity = HashTable<Shape, Key>::Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
- PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
- storage->set(index++, k);
- }
- }
- if (sort_mode == Dictionary<Shape, Key>::SORTED) {
- storage->SortPairs(storage, index);
- }
- ASSERT(storage->length() >= index);
-}
-
-
-// Backwards lookup (slow).
-template<typename Shape, typename Key>
-Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
- int capacity = HashTable<Shape, Key>::Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (Dictionary<Shape, Key>::IsKey(k)) {
- Object* e = ValueAt(i);
- if (e->IsJSGlobalPropertyCell()) {
- e = JSGlobalPropertyCell::cast(e)->value();
- }
- if (e == value) return k;
- }
- }
- Heap* heap = Dictionary<Shape, Key>::GetHeap();
- return heap->undefined_value();
-}
-
-
-MaybeObject* StringDictionary::TransformPropertiesToFastFor(
- JSObject* obj, int unused_property_fields) {
- // Make sure we preserve dictionary representation if there are too many
- // descriptors.
- int number_of_elements = NumberOfElements();
- if (number_of_elements > DescriptorArray::kMaxNumberOfDescriptors) return obj;
-
- if (number_of_elements != NextEnumerationIndex()) {
- MaybeObject* maybe_result = GenerateNewEnumerationIndices();
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- int instance_descriptor_length = 0;
- int number_of_fields = 0;
-
- Heap* heap = GetHeap();
-
- // Compute the length of the instance descriptor.
- int capacity = Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
- Object* value = ValueAt(i);
- PropertyType type = DetailsAt(i).type();
- ASSERT(type != FIELD);
- instance_descriptor_length++;
- if (type == NORMAL && !value->IsJSFunction()) {
- number_of_fields += 1;
- }
- }
- }
-
- int inobject_props = obj->map()->inobject_properties();
-
- // Allocate new map.
- Map* new_map;
- MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- new_map->set_dictionary_map(false);
-
- if (instance_descriptor_length == 0) {
- ASSERT_LE(unused_property_fields, inobject_props);
- // Transform the object.
- new_map->set_unused_property_fields(inobject_props);
- obj->set_map(new_map);
- obj->set_properties(heap->empty_fixed_array());
- // Check that it really works.
- ASSERT(obj->HasFastProperties());
- return obj;
- }
-
- // Allocate the instance descriptor.
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(instance_descriptor_length);
- if (!maybe_descriptors->To(&descriptors)) {
- return maybe_descriptors;
- }
-
- DescriptorArray::WhitenessWitness witness(descriptors);
-
- int number_of_allocated_fields =
- number_of_fields + unused_property_fields - inobject_props;
- if (number_of_allocated_fields < 0) {
- // There is enough inobject space for all fields (including unused).
- number_of_allocated_fields = 0;
- unused_property_fields = inobject_props - number_of_fields;
- }
-
- // Allocate the fixed array for the fields.
- FixedArray* fields;
- MaybeObject* maybe_fields =
- heap->AllocateFixedArray(number_of_allocated_fields);
- if (!maybe_fields->To(&fields)) return maybe_fields;
-
- // Fill in the instance descriptor and the fields.
- int current_offset = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
- Object* value = ValueAt(i);
- // Ensure the key is an internalized string before writing into the
- // instance descriptor.
- String* key;
- MaybeObject* maybe_key = heap->InternalizeString(String::cast(k));
- if (!maybe_key->To(&key)) return maybe_key;
-
- PropertyDetails details = DetailsAt(i);
- ASSERT(details.descriptor_index() == details.dictionary_index());
- int enumeration_index = details.descriptor_index();
- PropertyType type = details.type();
-
- if (value->IsJSFunction()) {
- ConstantFunctionDescriptor d(key,
- JSFunction::cast(value),
- details.attributes(),
- enumeration_index);
- descriptors->Set(enumeration_index - 1, &d, witness);
- } else if (type == NORMAL) {
- if (current_offset < inobject_props) {
- obj->InObjectPropertyAtPut(current_offset,
- value,
- UPDATE_WRITE_BARRIER);
- } else {
- int offset = current_offset - inobject_props;
- fields->set(offset, value);
- }
- FieldDescriptor d(key,
- current_offset++,
- details.attributes(),
- enumeration_index);
- descriptors->Set(enumeration_index - 1, &d, witness);
- } else if (type == CALLBACKS) {
- CallbacksDescriptor d(key,
- value,
- details.attributes(),
- enumeration_index);
- descriptors->Set(enumeration_index - 1, &d, witness);
- } else {
- UNREACHABLE();
- }
- }
- }
- ASSERT(current_offset == number_of_fields);
-
- descriptors->Sort();
-
- new_map->InitializeDescriptors(descriptors);
- new_map->set_unused_property_fields(unused_property_fields);
-
- // Transform the object.
- obj->set_map(new_map);
-
- obj->set_properties(fields);
- ASSERT(obj->IsJSObject());
-
- // Check that it really works.
- ASSERT(obj->HasFastProperties());
-
- return obj;
-}
-
-
-bool ObjectHashSet::Contains(Object* key) {
- ASSERT(IsKey(key));
-
- // If the object does not have an identity hash, it was never used as a key.
- { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return false;
- }
- return (FindEntry(key) != kNotFound);
-}
-
-
-MaybeObject* ObjectHashSet::Add(Object* key) {
- ASSERT(IsKey(key));
-
- // Make sure the key object has an identity hash code.
- int hash;
- { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
- if (maybe_hash->IsFailure()) return maybe_hash;
- hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
- }
- int entry = FindEntry(key);
-
- // Check whether key is already present.
- if (entry != kNotFound) return this;
-
- // Check whether the hash set should be extended and add entry.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ObjectHashSet* table = ObjectHashSet::cast(obj);
- entry = table->FindInsertionEntry(hash);
- table->set(EntryToIndex(entry), key);
- table->ElementAdded();
- return table;
-}
-
-
-MaybeObject* ObjectHashSet::Remove(Object* key) {
- ASSERT(IsKey(key));
-
- // If the object does not have an identity hash, it was never used as a key.
- { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return this;
- }
- int entry = FindEntry(key);
-
- // Check whether key is actually present.
- if (entry == kNotFound) return this;
-
- // Remove entry and try to shrink this hash set.
- set_the_hole(EntryToIndex(entry));
- ElementRemoved();
- return Shrink(key);
-}
-
-
-Object* ObjectHashTable::Lookup(Object* key) {
- ASSERT(IsKey(key));
-
- // If the object does not have an identity hash, it was never used as a key.
- { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
- return GetHeap()->the_hole_value();
- }
- }
- int entry = FindEntry(key);
- if (entry == kNotFound) return GetHeap()->the_hole_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
- ASSERT(IsKey(key));
-
- // Make sure the key object has an identity hash code.
- int hash;
- { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
- if (maybe_hash->IsFailure()) return maybe_hash;
- hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
- }
- int entry = FindEntry(key);
-
- // Check whether to perform removal operation.
- if (value->IsTheHole()) {
- if (entry == kNotFound) return this;
- RemoveEntry(entry);
- return Shrink(key);
- }
-
- // Key is already in table, just overwrite value.
- if (entry != kNotFound) {
- set(EntryToIndex(entry) + 1, value);
- return this;
- }
-
- // Check whether the hash table should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ObjectHashTable* table = ObjectHashTable::cast(obj);
- table->AddEntry(table->FindInsertionEntry(hash), key, value);
- return table;
-}
-
-
-void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) {
- set(EntryToIndex(entry), key);
- set(EntryToIndex(entry) + 1, value);
- ElementAdded();
-}
-
-
-void ObjectHashTable::RemoveEntry(int entry) {
- set_the_hole(EntryToIndex(entry));
- set_the_hole(EntryToIndex(entry) + 1);
- ElementRemoved();
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-// Check if there is a break point at this code position.
-bool DebugInfo::HasBreakPoint(int code_position) {
- // Get the break point info object for this code position.
- Object* break_point_info = GetBreakPointInfo(code_position);
-
- // If there is no break point info object or no break points in the break
- // point info object there is no break point at this code position.
- if (break_point_info->IsUndefined()) return false;
- return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
-}
-
-
-// Get the break point info object for this code position.
-Object* DebugInfo::GetBreakPointInfo(int code_position) {
- // Find the index of the break point info object for this code position.
- int index = GetBreakPointInfoIndex(code_position);
-
- // Return the break point info object if any.
- if (index == kNoBreakPointInfo) return GetHeap()->undefined_value();
- return BreakPointInfo::cast(break_points()->get(index));
-}
-
-
-// Clear a break point at the specified code position.
-void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
- int code_position,
- Handle<Object> break_point_object) {
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
- Isolate::Current());
- if (break_point_info->IsUndefined()) return;
- BreakPointInfo::ClearBreakPoint(
- Handle<BreakPointInfo>::cast(break_point_info),
- break_point_object);
-}
-
-
-void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
- int code_position,
- int source_position,
- int statement_position,
- Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
- isolate);
- if (!break_point_info->IsUndefined()) {
- BreakPointInfo::SetBreakPoint(
- Handle<BreakPointInfo>::cast(break_point_info),
- break_point_object);
- return;
- }
-
- // Adding a new break point for a code position which did not have any
- // break points before. Try to find a free slot.
- int index = kNoBreakPointInfo;
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (debug_info->break_points()->get(i)->IsUndefined()) {
- index = i;
- break;
- }
- }
- if (index == kNoBreakPointInfo) {
- // No free slot - extend break point info array.
- Handle<FixedArray> old_break_points =
- Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
- Handle<FixedArray> new_break_points =
- isolate->factory()->NewFixedArray(
- old_break_points->length() +
- Debug::kEstimatedNofBreakPointsInFunction);
-
- debug_info->set_break_points(*new_break_points);
- for (int i = 0; i < old_break_points->length(); i++) {
- new_break_points->set(i, old_break_points->get(i));
- }
- index = old_break_points->length();
- }
- ASSERT(index != kNoBreakPointInfo);
-
- // Allocate new BreakPointInfo object and set the break point.
- Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
- isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
- new_break_point_info->set_code_position(Smi::FromInt(code_position));
- new_break_point_info->set_source_position(Smi::FromInt(source_position));
- new_break_point_info->
- set_statement_position(Smi::FromInt(statement_position));
- new_break_point_info->set_break_point_objects(
- isolate->heap()->undefined_value());
- BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
- debug_info->break_points()->set(index, *new_break_point_info);
-}
-
-
-// Get the break point objects for a code position.
-Object* DebugInfo::GetBreakPointObjects(int code_position) {
- Object* break_point_info = GetBreakPointInfo(code_position);
- if (break_point_info->IsUndefined()) {
- return GetHeap()->undefined_value();
- }
- return BreakPointInfo::cast(break_point_info)->break_point_objects();
-}
-
-
-// Get the total number of break points.
-int DebugInfo::GetBreakPointCount() {
- if (break_points()->IsUndefined()) return 0;
- int count = 0;
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined()) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- count += break_point_info->GetBreakPointCount();
- }
- }
- return count;
-}
-
-
-Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object) {
- Heap* heap = debug_info->GetHeap();
- if (debug_info->break_points()->IsUndefined()) return heap->undefined_value();
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined()) {
- Handle<BreakPointInfo> break_point_info =
- Handle<BreakPointInfo>(BreakPointInfo::cast(
- debug_info->break_points()->get(i)));
- if (BreakPointInfo::HasBreakPointObject(break_point_info,
- break_point_object)) {
- return *break_point_info;
- }
- }
- }
- return heap->undefined_value();
-}
-
-
-// Find the index of the break point info object for the specified code
-// position.
-int DebugInfo::GetBreakPointInfoIndex(int code_position) {
- if (break_points()->IsUndefined()) return kNoBreakPointInfo;
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined()) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->code_position()->value() == code_position) {
- return i;
- }
- }
- }
- return kNoBreakPointInfo;
-}
-
-
-// Remove the specified break point object.
-void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
- // If there are no break points just ignore.
- if (break_point_info->break_point_objects()->IsUndefined()) return;
- // If there is a single break point clear it if it is the same.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- if (break_point_info->break_point_objects() == *break_point_object) {
- break_point_info->set_break_point_objects(
- isolate->heap()->undefined_value());
- }
- return;
- }
- // If there are multiple break points shrink the array
- ASSERT(break_point_info->break_point_objects()->IsFixedArray());
- Handle<FixedArray> old_array =
- Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
- Handle<FixedArray> new_array =
- isolate->factory()->NewFixedArray(old_array->length() - 1);
- int found_count = 0;
- for (int i = 0; i < old_array->length(); i++) {
- if (old_array->get(i) == *break_point_object) {
- ASSERT(found_count == 0);
- found_count++;
- } else {
- new_array->set(i - found_count, old_array->get(i));
- }
- }
- // If the break point was found in the list change it.
- if (found_count > 0) break_point_info->set_break_point_objects(*new_array);
-}
-
-
-// Add the specified break point object.
-void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
- // If there was no break point objects before just set it.
- if (break_point_info->break_point_objects()->IsUndefined()) {
- break_point_info->set_break_point_objects(*break_point_object);
- return;
- }
- // If the break point object is the same as before just ignore.
- if (break_point_info->break_point_objects() == *break_point_object) return;
- // If there was one break point object before replace with array.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- Handle<FixedArray> array = FACTORY->NewFixedArray(2);
- array->set(0, break_point_info->break_point_objects());
- array->set(1, *break_point_object);
- break_point_info->set_break_point_objects(*array);
- return;
- }
- // If there was more than one break point before extend array.
- Handle<FixedArray> old_array =
- Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
- Handle<FixedArray> new_array =
- FACTORY->NewFixedArray(old_array->length() + 1);
- for (int i = 0; i < old_array->length(); i++) {
- // If the break point was there before just ignore.
- if (old_array->get(i) == *break_point_object) return;
- new_array->set(i, old_array->get(i));
- }
- // Add the new break point.
- new_array->set(old_array->length(), *break_point_object);
- break_point_info->set_break_point_objects(*new_array);
-}
-
-
-bool BreakPointInfo::HasBreakPointObject(
- Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
- // No break point.
- if (break_point_info->break_point_objects()->IsUndefined()) return false;
- // Single break point.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- return break_point_info->break_point_objects() == *break_point_object;
- }
- // Multiple break points.
- FixedArray* array = FixedArray::cast(break_point_info->break_point_objects());
- for (int i = 0; i < array->length(); i++) {
- if (array->get(i) == *break_point_object) {
- return true;
- }
- }
- return false;
-}
-
-
-// Get the number of break points.
-int BreakPointInfo::GetBreakPointCount() {
- // No break point.
- if (break_point_objects()->IsUndefined()) return 0;
- // Single break point.
- if (!break_point_objects()->IsFixedArray()) return 1;
- // Multiple break points.
- return FixedArray::cast(break_point_objects())->length();
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-Object* JSDate::GetField(Object* object, Smi* index) {
- return JSDate::cast(object)->DoGetField(
- static_cast<FieldIndex>(index->value()));
-}
-
-
-Object* JSDate::DoGetField(FieldIndex index) {
- ASSERT(index != kDateValue);
-
- DateCache* date_cache = GetIsolate()->date_cache();
-
- if (index < kFirstUncachedField) {
- Object* stamp = cache_stamp();
- if (stamp != date_cache->stamp() && stamp->IsSmi()) {
- // Since the stamp is not NaN, the value is also not NaN.
- int64_t local_time_ms =
- date_cache->ToLocal(static_cast<int64_t>(value()->Number()));
- SetLocalFields(local_time_ms, date_cache);
- }
- switch (index) {
- case kYear: return year();
- case kMonth: return month();
- case kDay: return day();
- case kWeekday: return weekday();
- case kHour: return hour();
- case kMinute: return min();
- case kSecond: return sec();
- default: UNREACHABLE();
- }
- }
-
- if (index >= kFirstUTCField) {
- return GetUTCField(index, value()->Number(), date_cache);
- }
-
- double time = value()->Number();
- if (isnan(time)) return GetIsolate()->heap()->nan_value();
-
- int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time));
- int days = DateCache::DaysFromTime(local_time_ms);
-
- if (index == kDays) return Smi::FromInt(days);
-
- int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
- if (index == kMillisecond) return Smi::FromInt(time_in_day_ms % 1000);
- ASSERT(index == kTimeInDay);
- return Smi::FromInt(time_in_day_ms);
-}
-
-
-Object* JSDate::GetUTCField(FieldIndex index,
- double value,
- DateCache* date_cache) {
- ASSERT(index >= kFirstUTCField);
-
- if (isnan(value)) return GetIsolate()->heap()->nan_value();
-
- int64_t time_ms = static_cast<int64_t>(value);
-
- if (index == kTimezoneOffset) {
- return Smi::FromInt(date_cache->TimezoneOffset(time_ms));
- }
-
- int days = DateCache::DaysFromTime(time_ms);
-
- if (index == kWeekdayUTC) return Smi::FromInt(date_cache->Weekday(days));
-
- if (index <= kDayUTC) {
- int year, month, day;
- date_cache->YearMonthDayFromDays(days, &year, &month, &day);
- if (index == kYearUTC) return Smi::FromInt(year);
- if (index == kMonthUTC) return Smi::FromInt(month);
- ASSERT(index == kDayUTC);
- return Smi::FromInt(day);
- }
-
- int time_in_day_ms = DateCache::TimeInDay(time_ms, days);
- switch (index) {
- case kHourUTC: return Smi::FromInt(time_in_day_ms / (60 * 60 * 1000));
- case kMinuteUTC: return Smi::FromInt((time_in_day_ms / (60 * 1000)) % 60);
- case kSecondUTC: return Smi::FromInt((time_in_day_ms / 1000) % 60);
- case kMillisecondUTC: return Smi::FromInt(time_in_day_ms % 1000);
- case kDaysUTC: return Smi::FromInt(days);
- case kTimeInDayUTC: return Smi::FromInt(time_in_day_ms);
- default: UNREACHABLE();
- }
-
- UNREACHABLE();
- return NULL;
-}
-
-
-void JSDate::SetValue(Object* value, bool is_value_nan) {
- set_value(value);
- if (is_value_nan) {
- HeapNumber* nan = GetIsolate()->heap()->nan_value();
- set_cache_stamp(nan, SKIP_WRITE_BARRIER);
- set_year(nan, SKIP_WRITE_BARRIER);
- set_month(nan, SKIP_WRITE_BARRIER);
- set_day(nan, SKIP_WRITE_BARRIER);
- set_hour(nan, SKIP_WRITE_BARRIER);
- set_min(nan, SKIP_WRITE_BARRIER);
- set_sec(nan, SKIP_WRITE_BARRIER);
- set_weekday(nan, SKIP_WRITE_BARRIER);
- } else {
- set_cache_stamp(Smi::FromInt(DateCache::kInvalidStamp), SKIP_WRITE_BARRIER);
- }
-}
-
-
-void JSDate::SetLocalFields(int64_t local_time_ms, DateCache* date_cache) {
- int days = DateCache::DaysFromTime(local_time_ms);
- int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
- int year, month, day;
- date_cache->YearMonthDayFromDays(days, &year, &month, &day);
- int weekday = date_cache->Weekday(days);
- int hour = time_in_day_ms / (60 * 60 * 1000);
- int min = (time_in_day_ms / (60 * 1000)) % 60;
- int sec = (time_in_day_ms / 1000) % 60;
- set_cache_stamp(date_cache->stamp());
- set_year(Smi::FromInt(year), SKIP_WRITE_BARRIER);
- set_month(Smi::FromInt(month), SKIP_WRITE_BARRIER);
- set_day(Smi::FromInt(day), SKIP_WRITE_BARRIER);
- set_weekday(Smi::FromInt(weekday), SKIP_WRITE_BARRIER);
- set_hour(Smi::FromInt(hour), SKIP_WRITE_BARRIER);
- set_min(Smi::FromInt(min), SKIP_WRITE_BARRIER);
- set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/objects.h b/src/3rdparty/v8/src/objects.h
deleted file mode 100644
index 07bb288..0000000
--- a/src/3rdparty/v8/src/objects.h
+++ /dev/null
@@ -1,9150 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_OBJECTS_H_
-#define V8_OBJECTS_H_
-
-#include "allocation.h"
-#include "builtins.h"
-#include "elements-kind.h"
-#include "list.h"
-#include "property-details.h"
-#include "smart-pointers.h"
-#include "unicode-inl.h"
-#if V8_TARGET_ARCH_ARM
-#include "arm/constants-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/constants-mips.h"
-#endif
-#include "v8checks.h"
-#include "zone.h"
-
-
-//
-// Most object types in the V8 JavaScript are described in this file.
-//
-// Inheritance hierarchy:
-// - MaybeObject (an object or a failure)
-// - Failure (immediate for marking failed operation)
-// - Object
-// - Smi (immediate small integer)
-// - HeapObject (superclass for everything allocated in the heap)
-// - JSReceiver (suitable for property access)
-// - JSObject
-// - JSArray
-// - JSSet
-// - JSMap
-// - JSWeakMap
-// - JSRegExp
-// - JSFunction
-// - JSModule
-// - GlobalObject
-// - JSGlobalObject
-// - JSBuiltinsObject
-// - JSGlobalProxy
-// - JSValue
-// - JSDate
-// - JSMessageObject
-// - JSProxy
-// - JSFunctionProxy
-// - FixedArrayBase
-// - ByteArray
-// - FixedArray
-// - DescriptorArray
-// - HashTable
-// - Dictionary
-// - StringTable
-// - CompilationCacheTable
-// - CodeCacheHashTable
-// - MapCache
-// - Context
-// - JSFunctionResultCache
-// - ScopeInfo
-// - TransitionArray
-// - FixedDoubleArray
-// - ExternalArray
-// - ExternalPixelArray
-// - ExternalByteArray
-// - ExternalUnsignedByteArray
-// - ExternalShortArray
-// - ExternalUnsignedShortArray
-// - ExternalIntArray
-// - ExternalUnsignedIntArray
-// - ExternalFloatArray
-// - Name
-// - String
-// - SeqString
-// - SeqOneByteString
-// - SeqTwoByteString
-// - SlicedString
-// - ConsString
-// - ExternalString
-// - ExternalAsciiString
-// - ExternalTwoByteString
-// - InternalizedString
-// - SeqInternalizedString
-// - SeqOneByteInternalizedString
-// - SeqTwoByteInternalizedString
-// - ConsInternalizedString
-// - ExternalInternalizedString
-// - ExternalAsciiInternalizedString
-// - ExternalTwoByteInternalizedString
-// - Symbol
-// - HeapNumber
-// - Code
-// - Map
-// - Oddball
-// - Foreign
-// - SharedFunctionInfo
-// - Struct
-// - DeclaredAccessorDescriptor
-// - AccessorInfo
-// - DeclaredAccessorInfo
-// - ExecutableAccessorInfo
-// - AccessorPair
-// - AccessCheckInfo
-// - InterceptorInfo
-// - CallHandlerInfo
-// - TemplateInfo
-// - FunctionTemplateInfo
-// - ObjectTemplateInfo
-// - Script
-// - SignatureInfo
-// - TypeSwitchInfo
-// - DebugInfo
-// - BreakPointInfo
-// - CodeCache
-//
-// Formats of Object*:
-// Smi: [31 bit signed int] 0
-// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
-// Failure: [30 bit signed int] 11
-
-namespace v8 {
-namespace internal {
-
-enum CompareMapMode {
- REQUIRE_EXACT_MAP,
- ALLOW_ELEMENT_TRANSITION_MAPS
-};
-
-enum KeyedAccessGrowMode {
- DO_NOT_ALLOW_JSARRAY_GROWTH,
- ALLOW_JSARRAY_GROWTH
-};
-
-// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
-enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
-
-
-// PropertyNormalizationMode is used to specify whether to keep
-// inobject properties when normalizing properties of a JSObject.
-enum PropertyNormalizationMode {
- CLEAR_INOBJECT_PROPERTIES,
- KEEP_INOBJECT_PROPERTIES
-};
-
-
-// NormalizedMapSharingMode is used to specify whether a map may be shared
-// by different objects with normalized properties.
-enum NormalizedMapSharingMode {
- UNIQUE_NORMALIZED_MAP,
- SHARED_NORMALIZED_MAP
-};
-
-
-// Indicates whether a get method should implicitly create the object looked up.
-enum CreationFlag {
- ALLOW_CREATION,
- OMIT_CREATION
-};
-
-
-// Indicates whether transitions can be added to a source map or not.
-enum TransitionFlag {
- INSERT_TRANSITION,
- OMIT_TRANSITION
-};
-
-
-enum DebugExtraICState {
- DEBUG_BREAK,
- DEBUG_PREPARE_STEP_IN
-};
-
-
-// Indicates whether the transition is simple: the target map of the transition
-// either extends the current map with a new property, or it modifies the
-// property that was added last to the current map.
-enum SimpleTransitionFlag {
- SIMPLE_TRANSITION,
- FULL_TRANSITION
-};
-
-
-// Indicates whether we are only interested in the descriptors of a particular
-// map, or in all descriptors in the descriptor array.
-enum DescriptorFlag {
- ALL_DESCRIPTORS,
- OWN_DESCRIPTORS
-};
-
-// The GC maintains a bit of information, the MarkingParity, which toggles
-// from odd to even and back every time marking is completed. Incremental
-// marking can visit an object twice during a marking phase, so algorithms that
-// that piggy-back on marking can use the parity to ensure that they only
-// perform an operation on an object once per marking phase: they record the
-// MarkingParity when they visit an object, and only re-visit the object when it
-// is marked again and the MarkingParity changes.
-enum MarkingParity {
- NO_MARKING_PARITY,
- ODD_MARKING_PARITY,
- EVEN_MARKING_PARITY
-};
-
-// Instance size sentinel for objects of variable size.
-const int kVariableSizeSentinel = 0;
-
-const int kStubMajorKeyBits = 6;
-const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
-
-// All Maps have a field instance_type containing a InstanceType.
-// It describes the type of the instances.
-//
-// As an example, a JavaScript object is a heap object and its map
-// instance_type is JS_OBJECT_TYPE.
-//
-// The names of the string instance types are intended to systematically
-// mirror their encoding in the instance_type field of the map. The default
-// encoding is considered TWO_BYTE. It is not mentioned in the name. ASCII
-// encoding is mentioned explicitly in the name. Likewise, the default
-// representation is considered sequential. It is not mentioned in the
-// name. The other representations (e.g. CONS, EXTERNAL) are explicitly
-// mentioned. Finally, the string is either a STRING_TYPE (if it is a normal
-// string) or a INTERNALIZED_STRING_TYPE (if it is a internalized string).
-//
-// NOTE: The following things are some that depend on the string types having
-// instance_types that are less than those of all other types:
-// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
-// Object::IsString.
-//
-// NOTE: Everything following JS_VALUE_TYPE is considered a
-// JSObject for GC purposes. The first four entries here have typeof
-// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST_ALL(V) \
- V(STRING_TYPE) \
- V(ASCII_STRING_TYPE) \
- V(CONS_STRING_TYPE) \
- V(CONS_ASCII_STRING_TYPE) \
- V(SLICED_STRING_TYPE) \
- V(EXTERNAL_STRING_TYPE) \
- V(EXTERNAL_ASCII_STRING_TYPE) \
- V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
- V(SHORT_EXTERNAL_STRING_TYPE) \
- V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \
- V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
- \
- V(INTERNALIZED_STRING_TYPE) \
- V(ASCII_INTERNALIZED_STRING_TYPE) \
- V(CONS_INTERNALIZED_STRING_TYPE) \
- V(CONS_ASCII_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE) \
- \
- V(SYMBOL_TYPE) \
- V(MAP_TYPE) \
- V(CODE_TYPE) \
- V(ODDBALL_TYPE) \
- V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
- \
- V(HEAP_NUMBER_TYPE) \
- V(FOREIGN_TYPE) \
- V(BYTE_ARRAY_TYPE) \
- V(FREE_SPACE_TYPE) \
- /* Note: the order of these external array */ \
- /* types is relied upon in */ \
- /* Object::IsExternalArray(). */ \
- V(EXTERNAL_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_INT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT_ARRAY_TYPE) \
- V(EXTERNAL_PIXEL_ARRAY_TYPE) \
- V(FILLER_TYPE) \
- \
- V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \
- V(DECLARED_ACCESSOR_INFO_TYPE) \
- V(EXECUTABLE_ACCESSOR_INFO_TYPE) \
- V(ACCESSOR_PAIR_TYPE) \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
- V(CALL_HANDLER_INFO_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_TYPE) \
- V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(SIGNATURE_INFO_TYPE) \
- V(TYPE_SWITCH_INFO_TYPE) \
- V(ALLOCATION_SITE_INFO_TYPE) \
- V(SCRIPT_TYPE) \
- V(CODE_CACHE_TYPE) \
- V(POLYMORPHIC_CODE_CACHE_TYPE) \
- V(TYPE_FEEDBACK_INFO_TYPE) \
- V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
- \
- V(FIXED_ARRAY_TYPE) \
- V(FIXED_DOUBLE_ARRAY_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- \
- V(JS_MESSAGE_OBJECT_TYPE) \
- \
- V(JS_VALUE_TYPE) \
- V(JS_DATE_TYPE) \
- V(JS_OBJECT_TYPE) \
- V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JS_MODULE_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_BUILTINS_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_PROXY_TYPE) \
- V(JS_WEAK_MAP_TYPE) \
- V(JS_REGEXP_TYPE) \
- \
- V(JS_FUNCTION_TYPE) \
- V(JS_FUNCTION_PROXY_TYPE) \
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
- V(DEBUG_INFO_TYPE) \
- V(BREAK_POINT_INFO_TYPE)
-#else
-#define INSTANCE_TYPE_LIST_DEBUGGER(V)
-#endif
-
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_ALL(V) \
- INSTANCE_TYPE_LIST_DEBUGGER(V)
-
-
-// Since string types are not consecutive, this macro is used to
-// iterate over them.
-#define STRING_TYPE_LIST(V) \
- V(STRING_TYPE, \
- kVariableSizeSentinel, \
- string, \
- String) \
- V(ASCII_STRING_TYPE, \
- kVariableSizeSentinel, \
- ascii_string, \
- AsciiString) \
- V(CONS_STRING_TYPE, \
- ConsString::kSize, \
- cons_string, \
- ConsString) \
- V(CONS_ASCII_STRING_TYPE, \
- ConsString::kSize, \
- cons_ascii_string, \
- ConsAsciiString) \
- V(SLICED_STRING_TYPE, \
- SlicedString::kSize, \
- sliced_string, \
- SlicedString) \
- V(SLICED_ASCII_STRING_TYPE, \
- SlicedString::kSize, \
- sliced_ascii_string, \
- SlicedAsciiString) \
- V(EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- external_string, \
- ExternalString) \
- V(EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- external_ascii_string, \
- ExternalAsciiString) \
- V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_string_with_ascii_data, \
- ExternalStringWithAsciiData) \
- V(SHORT_EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_string, \
- ShortExternalString) \
- V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kShortSize, \
- short_external_ascii_string, \
- ShortExternalAsciiString) \
- V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_string_with_ascii_data, \
- ShortExternalStringWithAsciiData) \
- \
- V(INTERNALIZED_STRING_TYPE, \
- kVariableSizeSentinel, \
- internalized_string, \
- InternalizedString) \
- V(ASCII_INTERNALIZED_STRING_TYPE, \
- kVariableSizeSentinel, \
- ascii_internalized_string, \
- AsciiInternalizedString) \
- V(CONS_INTERNALIZED_STRING_TYPE, \
- ConsString::kSize, \
- cons_internalized_string, \
- ConsInternalizedString) \
- V(CONS_ASCII_INTERNALIZED_STRING_TYPE, \
- ConsString::kSize, \
- cons_ascii_internalized_string, \
- ConsAsciiInternalizedString) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- external_internalized_string, \
- ExternalInternalizedString) \
- V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- external_ascii_internalized_string, \
- ExternalAsciiInternalizedString) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_internalized_string_with_ascii_data, \
- ExternalInternalizedStringWithAsciiData) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_internalized_string, \
- ShortExternalInternalizedString) \
- V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \
- ExternalAsciiString::kShortSize, \
- short_external_ascii_internalized_string, \
- ShortExternalAsciiInternalizedString) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_internalized_string_with_ascii_data, \
- ShortExternalInternalizedStringWithAsciiData) \
-
-// A struct is a simple object a set of object-valued fields. Including an
-// object type in this causes the compiler to generate most of the boilerplate
-// code for the class including allocation and garbage collection routines,
-// casts and predicates. All you need to define is the class, methods and
-// object verification routines. Easy, no?
-//
-// Note that for subtle reasons related to the ordering or numerical values of
-// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
-// manually.
-#define STRUCT_LIST_ALL(V) \
- V(DECLARED_ACCESSOR_DESCRIPTOR, \
- DeclaredAccessorDescriptor, \
- declared_accessor_descriptor) \
- V(DECLARED_ACCESSOR_INFO, DeclaredAccessorInfo, declared_accessor_info) \
- V(EXECUTABLE_ACCESSOR_INFO, ExecutableAccessorInfo, executable_accessor_info)\
- V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
- V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
- V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
- V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
- V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
- V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(SIGNATURE_INFO, SignatureInfo, signature_info) \
- V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
- V(SCRIPT, Script, script) \
- V(ALLOCATION_SITE_INFO, AllocationSiteInfo, allocation_site_info) \
- V(CODE_CACHE, CodeCache, code_cache) \
- V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \
- V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
- V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry)
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define STRUCT_LIST_DEBUGGER(V) \
- V(DEBUG_INFO, DebugInfo, debug_info) \
- V(BREAK_POINT_INFO, BreakPointInfo, break_point_info)
-#else
-#define STRUCT_LIST_DEBUGGER(V)
-#endif
-
-#define STRUCT_LIST(V) \
- STRUCT_LIST_ALL(V) \
- STRUCT_LIST_DEBUGGER(V)
-
-// We use the full 8 bits of the instance_type field to encode heap object
-// instance types. The high-order bit (bit 7) is set if the object is not a
-// string, and cleared if it is a string.
-const uint32_t kIsNotStringMask = 0x80;
-const uint32_t kStringTag = 0x0;
-const uint32_t kNotStringTag = 0x80;
-
-// Bit 6 indicates that the object is an internalized string (if set) or not.
-// There are not enough types that the non-string types (with bit 7 set) can
-// have bit 6 set too.
-const uint32_t kIsInternalizedMask = 0x40;
-const uint32_t kNotInternalizedTag = 0x0;
-const uint32_t kInternalizedTag = 0x40;
-
-// If bit 7 is clear then bit 2 indicates whether the string consists of
-// two-byte characters or one-byte characters.
-const uint32_t kStringEncodingMask = 0x4;
-const uint32_t kTwoByteStringTag = 0x0;
-const uint32_t kOneByteStringTag = 0x4;
-
-// If bit 7 is clear, the low-order 2 bits indicate the representation
-// of the string.
-const uint32_t kStringRepresentationMask = 0x03;
-enum StringRepresentationTag {
- kSeqStringTag = 0x0,
- kConsStringTag = 0x1,
- kExternalStringTag = 0x2,
- kSlicedStringTag = 0x3
-};
-const uint32_t kIsIndirectStringMask = 0x1;
-const uint32_t kIsIndirectStringTag = 0x1;
-STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0);
-STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0);
-STATIC_ASSERT(
- (kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
-STATIC_ASSERT(
- (kSlicedStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
-
-// Use this mask to distinguish between cons and slice only after making
-// sure that the string is one of the two (an indirect string).
-const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag;
-STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
-
-// If bit 7 is clear, then bit 3 indicates whether this two-byte
-// string actually contains ASCII data.
-const uint32_t kAsciiDataHintMask = 0x08;
-const uint32_t kAsciiDataHintTag = 0x08;
-
-// If bit 7 is clear and string representation indicates an external string,
-// then bit 4 indicates whether the data pointer is cached.
-const uint32_t kShortExternalStringMask = 0x10;
-const uint32_t kShortExternalStringTag = 0x10;
-
-
-// A ConsString with an empty string as the right side is a candidate
-// for being shortcut by the garbage collector unless it is internalized.
-// It's not common to have non-flat internalized strings, so we do not
-// shortcut them thereby avoiding turning internalized strings into strings.
-// See heap.cc and mark-compact.cc.
-const uint32_t kShortcutTypeMask =
- kIsNotStringMask |
- kIsInternalizedMask |
- kStringRepresentationMask;
-const uint32_t kShortcutTypeTag = kConsStringTag;
-
-
-enum InstanceType {
- // String types.
- STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
- ASCII_STRING_TYPE = kOneByteStringTag | kSeqStringTag,
- CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
- CONS_ASCII_STRING_TYPE = kOneByteStringTag | kConsStringTag,
- SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
- SLICED_ASCII_STRING_TYPE = kOneByteStringTag | kSlicedStringTag,
- EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
- EXTERNAL_ASCII_STRING_TYPE = kOneByteStringTag | kExternalStringTag,
- EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
- EXTERNAL_STRING_TYPE | kAsciiDataHintTag,
- SHORT_EXTERNAL_STRING_TYPE = EXTERNAL_STRING_TYPE | kShortExternalStringTag,
- SHORT_EXTERNAL_ASCII_STRING_TYPE =
- EXTERNAL_ASCII_STRING_TYPE | kShortExternalStringTag,
- SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
- EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kShortExternalStringTag,
-
- INTERNALIZED_STRING_TYPE = STRING_TYPE | kInternalizedTag,
- ASCII_INTERNALIZED_STRING_TYPE = ASCII_STRING_TYPE | kInternalizedTag,
- CONS_INTERNALIZED_STRING_TYPE = CONS_STRING_TYPE | kInternalizedTag,
- CONS_ASCII_INTERNALIZED_STRING_TYPE =
- CONS_ASCII_STRING_TYPE | kInternalizedTag,
- EXTERNAL_INTERNALIZED_STRING_TYPE = EXTERNAL_STRING_TYPE | kInternalizedTag,
- EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE =
- EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag,
- EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE =
- EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kInternalizedTag,
- SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE =
- SHORT_EXTERNAL_STRING_TYPE | kInternalizedTag,
- SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE =
- SHORT_EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag,
- SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE =
- SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kInternalizedTag,
-
- // Non-string names
- SYMBOL_TYPE = kNotStringTag, // LAST_NAME_TYPE, FIRST_NONSTRING_TYPE
-
- // Objects allocated in their own spaces (never in new space).
- MAP_TYPE,
- CODE_TYPE,
- ODDBALL_TYPE,
- JS_GLOBAL_PROPERTY_CELL_TYPE,
-
- // "Data", objects that cannot contain non-map-word pointers to heap
- // objects.
- HEAP_NUMBER_TYPE,
- FOREIGN_TYPE,
- BYTE_ARRAY_TYPE,
- FREE_SPACE_TYPE,
- EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
- EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
- EXTERNAL_SHORT_ARRAY_TYPE,
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
- EXTERNAL_INT_ARRAY_TYPE,
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- EXTERNAL_FLOAT_ARRAY_TYPE,
- EXTERNAL_DOUBLE_ARRAY_TYPE,
- EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
- FIXED_DOUBLE_ARRAY_TYPE,
- FILLER_TYPE, // LAST_DATA_TYPE
-
- // Structs.
- DECLARED_ACCESSOR_DESCRIPTOR_TYPE,
- DECLARED_ACCESSOR_INFO_TYPE,
- EXECUTABLE_ACCESSOR_INFO_TYPE,
- ACCESSOR_PAIR_TYPE,
- ACCESS_CHECK_INFO_TYPE,
- INTERCEPTOR_INFO_TYPE,
- CALL_HANDLER_INFO_TYPE,
- FUNCTION_TEMPLATE_INFO_TYPE,
- OBJECT_TEMPLATE_INFO_TYPE,
- SIGNATURE_INFO_TYPE,
- TYPE_SWITCH_INFO_TYPE,
- ALLOCATION_SITE_INFO_TYPE,
- SCRIPT_TYPE,
- CODE_CACHE_TYPE,
- POLYMORPHIC_CODE_CACHE_TYPE,
- TYPE_FEEDBACK_INFO_TYPE,
- ALIASED_ARGUMENTS_ENTRY_TYPE,
- // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
- // is defined. However as include/v8.h contain some of the instance type
- // constants always having them avoids them getting different numbers
- // depending on whether ENABLE_DEBUGGER_SUPPORT is defined or not.
- DEBUG_INFO_TYPE,
- BREAK_POINT_INFO_TYPE,
-
- FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
-
- JS_MESSAGE_OBJECT_TYPE,
-
- // All the following types are subtypes of JSReceiver, which corresponds to
- // objects in the JS sense. The first and the last type in this range are
- // the two forms of function. This organization enables using the same
- // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
- // NONCALLABLE_JS_OBJECT range.
- JS_FUNCTION_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
- JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE
-
- JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
- JS_DATE_TYPE,
- JS_OBJECT_TYPE,
- JS_CONTEXT_EXTENSION_OBJECT_TYPE,
- JS_MODULE_TYPE,
- JS_GLOBAL_OBJECT_TYPE,
- JS_BUILTINS_OBJECT_TYPE,
- JS_GLOBAL_PROXY_TYPE,
- JS_ARRAY_TYPE,
- JS_SET_TYPE,
- JS_MAP_TYPE,
- JS_WEAK_MAP_TYPE,
-
- JS_REGEXP_TYPE,
-
- JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
-
- // Pseudo-types
- FIRST_TYPE = 0x0,
- LAST_TYPE = JS_FUNCTION_TYPE,
- INVALID_TYPE = FIRST_TYPE - 1,
- FIRST_NAME_TYPE = FIRST_TYPE,
- LAST_NAME_TYPE = SYMBOL_TYPE,
- FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
- LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
- FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
- // Boundaries for testing for an external array.
- FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
- LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
- // Boundary for promotion to old data space/old pointer space.
- LAST_DATA_TYPE = FILLER_TYPE,
- // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
- // Note that there is no range for JSObject or JSProxy, since their subtypes
- // are not continuous in this enum! The enum ranges instead reflect the
- // external class names, where proxies are treated as either ordinary objects,
- // or functions.
- FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE,
- LAST_JS_RECEIVER_TYPE = LAST_TYPE,
- // Boundaries for testing the types represented as JSObject
- FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
- LAST_JS_OBJECT_TYPE = LAST_TYPE,
- // Boundaries for testing the types represented as JSProxy
- FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE,
- LAST_JS_PROXY_TYPE = JS_PROXY_TYPE,
- // Boundaries for testing whether the type is a JavaScript object.
- FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE,
- LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE,
- // Boundaries for testing the types for which typeof is "object".
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
- // Note that the types for which typeof is "function" are not continuous.
- // Define this so that we can put assertions on discrete checks.
- NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
-};
-
-const int kExternalArrayTypeCount =
- LAST_EXTERNAL_ARRAY_TYPE - FIRST_EXTERNAL_ARRAY_TYPE + 1;
-
-STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
-STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
-STATIC_CHECK(ODDBALL_TYPE == Internals::kOddballType);
-STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
-
-
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
- V(FAST_ELEMENTS_SUB_TYPE) \
- V(DICTIONARY_ELEMENTS_SUB_TYPE) \
- V(FAST_PROPERTIES_SUB_TYPE) \
- V(DICTIONARY_PROPERTIES_SUB_TYPE) \
- V(MAP_CODE_CACHE_SUB_TYPE) \
- V(SCOPE_INFO_SUB_TYPE) \
- V(STRING_TABLE_SUB_TYPE) \
- V(DESCRIPTOR_ARRAY_SUB_TYPE) \
- V(TRANSITION_ARRAY_SUB_TYPE)
-
-enum FixedArraySubInstanceType {
-#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
-#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
- LAST_FIXED_ARRAY_SUB_TYPE = TRANSITION_ARRAY_SUB_TYPE
-};
-
-
-enum CompareResult {
- LESS = -1,
- EQUAL = 0,
- GREATER = 1,
-
- NOT_EQUAL = GREATER
-};
-
-
-#define DECL_BOOLEAN_ACCESSORS(name) \
- inline bool name(); \
- inline void set_##name(bool value); \
-
-
-#define DECL_ACCESSORS(name, type) \
- inline type* name(); \
- inline void set_##name(type* value, \
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
-
-
-class AccessorPair;
-class DictionaryElementsAccessor;
-class ElementsAccessor;
-class Failure;
-class FixedArrayBase;
-class ObjectVisitor;
-class StringStream;
-
-struct ValueInfo : public Malloced {
- ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
- InstanceType type;
- Object* ptr;
- const char* str;
- double number;
-};
-
-
-// A template-ized version of the IsXXX functions.
-template <class C> static inline bool Is(Object* obj);
-
-#ifdef VERIFY_HEAP
-#define DECLARE_VERIFIER(Name) void Name##Verify();
-#else
-#define DECLARE_VERIFIER(Name)
-#endif
-
-#ifdef OBJECT_PRINT
-#define DECLARE_PRINTER(Name) void Name##Print(FILE* out = stdout);
-#else
-#define DECLARE_PRINTER(Name)
-#endif
-
-class MaybeObject BASE_EMBEDDED {
- public:
- inline bool IsFailure();
- inline bool IsRetryAfterGC();
- inline bool IsOutOfMemory();
- inline bool IsException();
- INLINE(bool IsTheHole());
- inline bool ToObject(Object** obj) {
- if (IsFailure()) return false;
- *obj = reinterpret_cast<Object*>(this);
- return true;
- }
- inline Failure* ToFailureUnchecked() {
- ASSERT(IsFailure());
- return reinterpret_cast<Failure*>(this);
- }
- inline Object* ToObjectUnchecked() {
- // TODO(jkummerow): Turn this back into an ASSERT when we can be certain
- // that it never fires in Release mode in the wild.
- CHECK(!IsFailure());
- return reinterpret_cast<Object*>(this);
- }
- inline Object* ToObjectChecked() {
- CHECK(!IsFailure());
- return reinterpret_cast<Object*>(this);
- }
-
- template<typename T>
- inline bool To(T** obj) {
- if (IsFailure()) return false;
- *obj = T::cast(reinterpret_cast<Object*>(this));
- return true;
- }
-
- template<typename T>
- inline bool ToHandle(Handle<T>* obj, Isolate* isolate) {
- if (IsFailure()) return false;
- *obj = handle(T::cast(reinterpret_cast<Object*>(this)), isolate);
- return true;
- }
-
-#ifdef OBJECT_PRINT
- // Prints this object with details.
- inline void Print() {
- Print(stdout);
- }
- inline void PrintLn() {
- PrintLn(stdout);
- }
- void Print(FILE* out);
- void PrintLn(FILE* out);
-#endif
-#ifdef VERIFY_HEAP
- // Verifies the object.
- void Verify();
-#endif
-};
-
-
-#define OBJECT_TYPE_LIST(V) \
- V(Smi) \
- V(HeapObject) \
- V(Number) \
-
-#define HEAP_OBJECT_TYPE_LIST(V) \
- V(HeapNumber) \
- V(Name) \
- V(UniqueName) \
- V(String) \
- V(SeqString) \
- V(ExternalString) \
- V(ConsString) \
- V(SlicedString) \
- V(ExternalTwoByteString) \
- V(ExternalAsciiString) \
- V(SeqTwoByteString) \
- V(SeqOneByteString) \
- V(InternalizedString) \
- V(Symbol) \
- \
- V(ExternalArray) \
- V(ExternalByteArray) \
- V(ExternalUnsignedByteArray) \
- V(ExternalShortArray) \
- V(ExternalUnsignedShortArray) \
- V(ExternalIntArray) \
- V(ExternalUnsignedIntArray) \
- V(ExternalFloatArray) \
- V(ExternalDoubleArray) \
- V(ExternalPixelArray) \
- V(ByteArray) \
- V(FreeSpace) \
- V(JSReceiver) \
- V(JSObject) \
- V(JSContextExtensionObject) \
- V(JSModule) \
- V(Map) \
- V(DescriptorArray) \
- V(TransitionArray) \
- V(DeoptimizationInputData) \
- V(DeoptimizationOutputData) \
- V(DependentCode) \
- V(TypeFeedbackCells) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(Context) \
- V(NativeContext) \
- V(ScopeInfo) \
- V(JSFunction) \
- V(Code) \
- V(Oddball) \
- V(SharedFunctionInfo) \
- V(JSValue) \
- V(JSDate) \
- V(JSMessageObject) \
- V(StringWrapper) \
- V(Foreign) \
- V(Boolean) \
- V(JSArray) \
- V(JSProxy) \
- V(JSFunctionProxy) \
- V(JSSet) \
- V(JSMap) \
- V(JSWeakMap) \
- V(JSRegExp) \
- V(HashTable) \
- V(Dictionary) \
- V(StringTable) \
- V(JSFunctionResultCache) \
- V(NormalizedMapCache) \
- V(CompilationCacheTable) \
- V(CodeCacheHashTable) \
- V(PolymorphicCodeCacheHashTable) \
- V(MapCache) \
- V(Primitive) \
- V(GlobalObject) \
- V(JSGlobalObject) \
- V(JSBuiltinsObject) \
- V(JSGlobalProxy) \
- V(UndetectableObject) \
- V(AccessCheckNeeded) \
- V(JSGlobalPropertyCell) \
- V(ObjectHashTable) \
-
-
-class JSReceiver;
-
-// Object is the abstract superclass for all classes in the
-// object hierarchy.
-// Object does not use any virtual functions to avoid the
-// allocation of the C++ vtable.
-// Since Smi and Failure are subclasses of Object no
-// data members can be present in Object.
-class Object : public MaybeObject {
- public:
- // Type testing.
- bool IsObject() { return true; }
-
-#define IS_TYPE_FUNCTION_DECL(type_) inline bool Is##type_();
- OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
- HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
-#undef IS_TYPE_FUNCTION_DECL
-
- inline bool IsFixedArrayBase();
- inline bool IsExternal();
- inline bool IsAccessorInfo();
-
- // Returns true if this object is an instance of the specified
- // function template.
- inline bool IsInstanceOf(FunctionTemplateInfo* type);
-
- inline bool IsStruct();
-#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) inline bool Is##Name();
- STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
-#undef DECLARE_STRUCT_PREDICATE
-
- INLINE(bool IsSpecObject());
- INLINE(bool IsSpecFunction());
-
- // Oddball testing.
- INLINE(bool IsUndefined());
- INLINE(bool IsNull());
- INLINE(bool IsTheHole()); // Shadows MaybeObject's implementation.
- INLINE(bool IsTrue());
- INLINE(bool IsFalse());
- inline bool IsArgumentsMarker();
- inline bool NonFailureIsHeapObject();
-
- // Filler objects (fillers and free space objects).
- inline bool IsFiller();
-
- // Extract the number.
- inline double Number();
- inline bool IsNaN();
-
- // Returns true if the object is of the correct type to be used as a
- // implementation of a JSObject's elements.
- inline bool HasValidElements();
-
- inline bool HasSpecificClassOf(String* name);
-
- MUST_USE_RESULT MaybeObject* ToObject(); // ECMA-262 9.9.
- Object* ToBoolean(); // ECMA-262 9.2.
-
- // Convert to a JSObject if needed.
- // native_context is used when creating wrapper object.
- MUST_USE_RESULT MaybeObject* ToObject(Context* native_context);
-
- // Converts this to a Smi if possible.
- // Failure is returned otherwise.
- MUST_USE_RESULT inline MaybeObject* ToSmi();
-
- void Lookup(String* name, LookupResult* result);
-
- // Property access.
- MUST_USE_RESULT inline MaybeObject* GetProperty(String* key);
- MUST_USE_RESULT inline MaybeObject* GetProperty(
- String* key,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyWithReceiver(
- Object* receiver,
- String* key,
- PropertyAttributes* attributes);
-
- static Handle<Object> GetProperty(Handle<Object> object, Handle<String> key);
- static Handle<Object> GetProperty(Handle<Object> object,
- Handle<Object> receiver,
- LookupResult* result,
- Handle<String> key,
- PropertyAttributes* attributes);
-
- MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver,
- LookupResult* result,
- String* key,
- PropertyAttributes* attributes);
-
- MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
- JSReceiver* getter);
-
- static Handle<Object> GetElement(Handle<Object> object, uint32_t index);
- MUST_USE_RESULT inline MaybeObject* GetElement(uint32_t index);
- // For use when we know that no exception can be thrown.
- inline Object* GetElementNoExceptionThrown(uint32_t index);
- MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Object* receiver,
- uint32_t index);
-
- // Return the object's prototype (might be Heap::null_value()).
- Object* GetPrototype(Isolate* isolate);
-
- // Return the prototype, or the method holder for a value-like object.
- Object* GetDelegate(Isolate* isolate);
-
- // Returns the permanent hash code associated with this object depending on
- // the actual object type. Might return a failure in case no hash was
- // created yet or GC was caused by creation.
- MUST_USE_RESULT MaybeObject* GetHash(CreationFlag flag);
-
- // Checks whether this object has the same value as the given one. This
- // function is implemented according to ES5, section 9.12 and can be used
- // to implement the Harmony "egal" function.
- bool SameValue(Object* other);
-
- // Tries to convert an object to an array index. Returns true and sets
- // the output parameter if it succeeds.
- inline bool ToArrayIndex(uint32_t* index);
-
- // Returns true if this is a JSValue containing a string and the index is
- // < the length of the string. Used to implement [] on strings.
- inline bool IsStringObjectWithCharacterAt(uint32_t index);
-
-#ifdef VERIFY_HEAP
- // Verify a pointer is a valid object pointer.
- static void VerifyPointer(Object* p);
-#endif
-
- inline void VerifyApiCallResultType();
-
- // Prints this object without details.
- inline void ShortPrint() {
- ShortPrint(stdout);
- }
- void ShortPrint(FILE* out);
-
- // Prints this object without details to a message accumulator.
- void ShortPrint(StringStream* accumulator);
-
- // Casting: This cast is only needed to satisfy macros in objects-inl.h.
- static Object* cast(Object* value) { return value; }
-
- // Layout description.
- static const int kHeaderSize = 0; // Object does not take up any space.
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
-};
-
-
-// Smi represents integer Numbers that can be stored in 31 bits.
-// Smis are immediate which means they are NOT allocated in the heap.
-// The this pointer has the following format: [31 bit signed int] 0
-// For long smis it has the following format:
-// [32 bit signed int] [31 bits zero padding] 0
-// Smi stands for small integer.
-class Smi: public Object {
- public:
- // Returns the integer value.
- inline int value();
-
- // Convert a value to a Smi object.
- static inline Smi* FromInt(int value);
-
- static inline Smi* FromIntptr(intptr_t value);
-
- // Returns whether value can be represented in a Smi.
- static inline bool IsValid(intptr_t value);
-
- // Casting.
- static inline Smi* cast(Object* object);
-
- // Dispatched behavior.
- inline void SmiPrint() {
- SmiPrint(stdout);
- }
- void SmiPrint(FILE* out);
- void SmiPrint(StringStream* accumulator);
-
- DECLARE_VERIFIER(Smi)
-
- static const int kMinValue =
- (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
- static const int kMaxValue = -(kMinValue + 1);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Smi);
-};
-
-
-// Failure is used for reporting out of memory situations and
-// propagating exceptions through the runtime system. Failure objects
-// are transient and cannot occur as part of the object graph.
-//
-// Failures are a single word, encoded as follows:
-// +-------------------------+---+--+--+
-// |.........unused..........|sss|tt|11|
-// +-------------------------+---+--+--+
-// 7 6 4 32 10
-//
-//
-// The low two bits, 0-1, are the failure tag, 11. The next two bits,
-// 2-3, are a failure type tag 'tt' with possible values:
-// 00 RETRY_AFTER_GC
-// 01 EXCEPTION
-// 10 INTERNAL_ERROR
-// 11 OUT_OF_MEMORY_EXCEPTION
-//
-// The next three bits, 4-6, are an allocation space tag 'sss'. The
-// allocation space tag is 000 for all failure types except
-// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the
-// allocation spaces (the encoding is found in globals.h).
-
-// Failure type tag info.
-const int kFailureTypeTagSize = 2;
-const int kFailureTypeTagMask = (1 << kFailureTypeTagSize) - 1;
-
-class Failure: public MaybeObject {
- public:
- // RuntimeStubs assumes EXCEPTION = 1 in the compiler-generated code.
- enum Type {
- RETRY_AFTER_GC = 0,
- EXCEPTION = 1, // Returning this marker tells the real exception
- // is in Isolate::pending_exception.
- INTERNAL_ERROR = 2,
- OUT_OF_MEMORY_EXCEPTION = 3
- };
-
- inline Type type() const;
-
- // Returns the space that needs to be collected for RetryAfterGC failures.
- inline AllocationSpace allocation_space() const;
-
- inline bool IsInternalError() const;
- inline bool IsOutOfMemoryException() const;
-
- static inline Failure* RetryAfterGC(AllocationSpace space);
- static inline Failure* RetryAfterGC(); // NEW_SPACE
- static inline Failure* Exception();
- static inline Failure* InternalError();
- // TODO(jkummerow): The value is temporary instrumentation. Remove it
- // when it has served its purpose.
- static inline Failure* OutOfMemoryException(intptr_t value);
- // Casting.
- static inline Failure* cast(MaybeObject* object);
-
- // Dispatched behavior.
- inline void FailurePrint() {
- FailurePrint(stdout);
- }
- void FailurePrint(FILE* out);
- void FailurePrint(StringStream* accumulator);
-
- DECLARE_VERIFIER(Failure)
-
- private:
- inline intptr_t value() const;
- static inline Failure* Construct(Type type, intptr_t value = 0);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Failure);
-};
-
-
-// Heap objects typically have a map pointer in their first word. However,
-// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
-// encoded in the first word. The class MapWord is an abstraction of the
-// value in a heap object's first word.
-class MapWord BASE_EMBEDDED {
- public:
- // Normal state: the map word contains a map pointer.
-
- // Create a map word from a map pointer.
- static inline MapWord FromMap(Map* map);
-
- // View this map word as a map pointer.
- inline Map* ToMap();
-
-
- // Scavenge collection: the map word of live objects in the from space
- // contains a forwarding address (a heap object pointer in the to space).
-
- // True if this map word is a forwarding address for a scavenge
- // collection. Only valid during a scavenge collection (specifically,
- // when all map words are heap object pointers, i.e. not during a full GC).
- inline bool IsForwardingAddress();
-
- // Create a map word from a forwarding address.
- static inline MapWord FromForwardingAddress(HeapObject* object);
-
- // View this map word as a forwarding address.
- inline HeapObject* ToForwardingAddress();
-
- static inline MapWord FromRawValue(uintptr_t value) {
- return MapWord(value);
- }
-
- inline uintptr_t ToRawValue() {
- return value_;
- }
-
- private:
- // HeapObject calls the private constructor and directly reads the value.
- friend class HeapObject;
-
- explicit MapWord(uintptr_t value) : value_(value) {}
-
- uintptr_t value_;
-};
-
-
-// HeapObject is the superclass for all classes describing heap allocated
-// objects.
-class HeapObject: public Object {
- public:
- // [map]: Contains a map which contains the object's reflective
- // information.
- inline Map* map();
- inline void set_map(Map* value);
- // The no-write-barrier version. This is OK if the object is white and in
- // new space, or if the value is an immortal immutable object, like the maps
- // of primitive (non-JS) objects like strings, heap numbers etc.
- inline void set_map_no_write_barrier(Map* value);
-
- // During garbage collection, the map word of a heap object does not
- // necessarily contain a map pointer.
- inline MapWord map_word();
- inline void set_map_word(MapWord map_word);
-
- // The Heap the object was allocated in. Used also to access Isolate.
- inline Heap* GetHeap();
-
- // Convenience method to get current isolate. This method can be
- // accessed only when its result is the same as
- // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
- inline Isolate* GetIsolate();
-
- // Converts an address to a HeapObject pointer.
- static inline HeapObject* FromAddress(Address address);
-
- // Returns the address of this HeapObject.
- inline Address address();
-
- // Iterates over pointers contained in the object (including the Map)
- void Iterate(ObjectVisitor* v);
-
- // Iterates over all pointers contained in the object except the
- // first map pointer. The object type is given in the first
- // parameter. This function does not access the map pointer in the
- // object, and so is safe to call while the map pointer is modified.
- void IterateBody(InstanceType type, int object_size, ObjectVisitor* v);
-
- // Returns the heap object's size in bytes
- inline int Size();
-
- // Given a heap object's map pointer, returns the heap size in bytes
- // Useful when the map pointer field is used for other purposes.
- // GC internal.
- inline int SizeFromMap(Map* map);
-
- // Returns the field at offset in obj, as a read/write Object* reference.
- // Does no checking, and is safe to use during GC, while maps are invalid.
- // Does not invoke write barrier, so should only be assigned to
- // during marking GC.
- static inline Object** RawField(HeapObject* obj, int offset);
-
- // Casting.
- static inline HeapObject* cast(Object* obj);
-
- // Return the write barrier mode for this. Callers of this function
- // must be able to present a reference to an AssertNoAllocation
- // object as a sign that they are not going to use this function
- // from code that allocates and thus invalidates the returned write
- // barrier mode.
- inline WriteBarrierMode GetWriteBarrierMode(const AssertNoAllocation&);
-
- // Dispatched behavior.
- void HeapObjectShortPrint(StringStream* accumulator);
-#ifdef OBJECT_PRINT
- inline void HeapObjectPrint() {
- HeapObjectPrint(stdout);
- }
- void HeapObjectPrint(FILE* out);
- void PrintHeader(FILE* out, const char* id);
-#endif
- DECLARE_VERIFIER(HeapObject)
-#ifdef VERIFY_HEAP
- inline void VerifyObjectField(int offset);
- inline void VerifySmiField(int offset);
-
- // Verify a pointer is a valid HeapObject pointer that points to object
- // areas in the heap.
- static void VerifyHeapPointer(Object* p);
-#endif
-
- // Layout description.
- // First field in a heap object is map.
- static const int kMapOffset = Object::kHeaderSize;
- static const int kHeaderSize = kMapOffset + kPointerSize;
-
- STATIC_CHECK(kMapOffset == Internals::kHeapObjectMapOffset);
-
- protected:
- // helpers for calling an ObjectVisitor to iterate over pointers in the
- // half-open range [start, end) specified as integer offsets
- inline void IteratePointers(ObjectVisitor* v, int start, int end);
- // as above, for the single element at "offset"
- inline void IteratePointer(ObjectVisitor* v, int offset);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
-};
-
-
-// This class describes a body of an object of a fixed size
-// in which all pointer fields are located in the [start_offset, end_offset)
-// interval.
-template<int start_offset, int end_offset, int size>
-class FixedBodyDescriptor {
- public:
- static const int kStartOffset = start_offset;
- static const int kEndOffset = end_offset;
- static const int kSize = size;
-
- static inline void IterateBody(HeapObject* obj, ObjectVisitor* v);
-
- template<typename StaticVisitor>
- static inline void IterateBody(HeapObject* obj) {
- StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, end_offset));
- }
-};
-
-
-// This class describes a body of an object of a variable size
-// in which all pointer fields are located in the [start_offset, object_size)
-// interval.
-template<int start_offset>
-class FlexibleBodyDescriptor {
- public:
- static const int kStartOffset = start_offset;
-
- static inline void IterateBody(HeapObject* obj,
- int object_size,
- ObjectVisitor* v);
-
- template<typename StaticVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size) {
- StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, object_size));
- }
-};
-
-
-// The HeapNumber class describes heap allocated numbers that cannot be
-// represented in a Smi (small integer)
-class HeapNumber: public HeapObject {
- public:
- // [value]: number value.
- inline double value();
- inline void set_value(double value);
-
- // Casting.
- static inline HeapNumber* cast(Object* obj);
-
- // Dispatched behavior.
- Object* HeapNumberToBoolean();
- inline void HeapNumberPrint() {
- HeapNumberPrint(stdout);
- }
- void HeapNumberPrint(FILE* out);
- void HeapNumberPrint(StringStream* accumulator);
- DECLARE_VERIFIER(HeapNumber)
-
- inline int get_exponent();
- inline int get_sign();
-
- // Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
- // IEEE doubles are two 32 bit words. The first is just mantissa, the second
- // is a mixture of sign, exponent and mantissa. Our current platforms are all
- // little endian apart from non-EABI arm which is little endian with big
- // endian floating point word ordering!
- static const int kMantissaOffset = kValueOffset;
- static const int kExponentOffset = kValueOffset + 4;
-
- static const int kSize = kValueOffset + kDoubleSize;
- static const uint32_t kSignMask = 0x80000000u;
- static const uint32_t kExponentMask = 0x7ff00000u;
- static const uint32_t kMantissaMask = 0xfffffu;
- static const int kMantissaBits = 52;
- static const int kExponentBits = 11;
- static const int kExponentBias = 1023;
- static const int kExponentShift = 20;
- static const int kMantissaBitsInTopWord = 20;
- static const int kNonMantissaBitsInTopWord = 12;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
-};
-
-
-enum EnsureElementsMode {
- DONT_ALLOW_DOUBLE_ELEMENTS,
- ALLOW_COPIED_DOUBLE_ELEMENTS,
- ALLOW_CONVERTED_DOUBLE_ELEMENTS
-};
-
-
-// Indicates whether a property should be set or (re)defined. Setting of a
-// property causes attributes to remain unchanged, writability to be checked
-// and callbacks to be called. Defining of a property causes attributes to
-// be updated and callbacks to be overridden.
-enum SetPropertyMode {
- SET_PROPERTY,
- DEFINE_PROPERTY
-};
-
-
-// Indicator for one component of an AccessorPair.
-enum AccessorComponent {
- ACCESSOR_GETTER,
- ACCESSOR_SETTER
-};
-
-
-// JSReceiver includes types on which properties can be defined, i.e.,
-// JSObject and JSProxy.
-class JSReceiver: public HeapObject {
- public:
- enum DeleteMode {
- NORMAL_DELETION,
- STRICT_DELETION,
- FORCE_DELETION
- };
-
- // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
- // a keyed store is of the form a[expression] = foo.
- enum StoreFromKeyed {
- MAY_BE_STORE_FROM_KEYED,
- CERTAINLY_NOT_STORE_FROM_KEYED
- };
-
- // Internal properties (e.g. the hidden properties dictionary) might
- // be added even though the receiver is non-extensible.
- enum ExtensibilityCheck {
- PERFORM_EXTENSIBILITY_CHECK,
- OMIT_EXTENSIBILITY_CHECK
- };
-
- // Casting.
- static inline JSReceiver* cast(Object* obj);
-
- static Handle<Object> SetProperty(Handle<JSReceiver> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool skip_fallback_interceptor = false);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetProperty(
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED,
- bool skip_fallback_interceptor = false);
- MUST_USE_RESULT MaybeObject* SetProperty(
- LookupResult* result,
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value);
-
- MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
-
- // Set the index'th array element.
- // Can cause GC, or return failure if GC is required.
- MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype);
-
- // Tests for the fast common case for property enumeration.
- bool IsSimpleEnum();
-
- // Returns the class name ([[Class]] property in the specification).
- String* class_name();
-
- // Returns the constructor name (the name (possibly, inferred name) of the
- // function that was used to instantiate the object).
- String* constructor_name();
-
- inline PropertyAttributes GetPropertyAttribute(String* name);
- PropertyAttributes GetPropertyAttributeWithReceiver(JSReceiver* receiver,
- String* name);
- PropertyAttributes GetLocalPropertyAttribute(String* name);
-
- inline PropertyAttributes GetElementAttribute(uint32_t index);
- inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
-
- // Can cause a GC.
- inline bool HasProperty(String* name);
- inline bool HasLocalProperty(String* name);
- inline bool HasElement(uint32_t index);
- inline bool HasLocalElement(uint32_t index);
-
- // Return the object's prototype (might be Heap::null_value()).
- inline Object* GetPrototype();
-
- // Return the constructor function (may be Heap::null_value()).
- inline Object* GetConstructor();
-
- // Set the object's prototype (only JSReceiver and null are allowed).
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
- bool skip_hidden_prototypes);
-
- // Retrieves a permanent object identity hash code. The undefined value might
- // be returned in case no hash was created yet and OMIT_CREATION was used.
- inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
-
- // Lookup a property. If found, the result is valid and has
- // detailed information.
- void LocalLookup(String* name, LookupResult* result,
- bool search_hidden_prototypes = false,
- bool skip_fallback_interceptor = false);
- void Lookup(String* name, LookupResult* result,
- bool skip_fallback_interceptor = false);
-
- protected:
- Smi* GenerateIdentityHash();
-
- private:
- PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
-};
-
-// The JSObject describes real heap allocated JavaScript objects with
-// properties.
-// Note that the map of JSObject changes during execution to enable inline
-// caching.
-class JSObject: public JSReceiver {
- public:
- // [properties]: Backing storage for properties.
- // properties is a FixedArray in the fast case and a Dictionary in the
- // slow case.
- DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
- inline void initialize_properties();
- inline bool HasFastProperties();
- inline StringDictionary* property_dictionary(); // Gets slow properties.
-
- // [elements]: The elements (properties with names that are integers).
- //
- // Elements can be in two general modes: fast and slow. Each mode
- // corrensponds to a set of object representations of elements that
- // have something in common.
- //
- // In the fast mode elements is a FixedArray and so each element can
- // be quickly accessed. This fact is used in the generated code. The
- // elements array can have one of three maps in this mode:
- // fixed_array_map, non_strict_arguments_elements_map or
- // fixed_cow_array_map (for copy-on-write arrays). In the latter case
- // the elements array may be shared by a few objects and so before
- // writing to any element the array must be copied. Use
- // EnsureWritableFastElements in this case.
- //
- // In the slow mode the elements is either a NumberDictionary, an
- // ExternalArray, or a FixedArray parameter map for a (non-strict)
- // arguments object.
- DECL_ACCESSORS(elements, FixedArrayBase)
- inline void initialize_elements();
- MUST_USE_RESULT inline MaybeObject* ResetElements();
- inline ElementsKind GetElementsKind();
- inline ElementsAccessor* GetElementsAccessor();
- // Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind.
- inline bool HasFastSmiElements();
- // Returns true if an object has elements of FAST_ELEMENTS ElementsKind.
- inline bool HasFastObjectElements();
- // Returns true if an object has elements of FAST_ELEMENTS or
- // FAST_SMI_ONLY_ELEMENTS.
- inline bool HasFastSmiOrObjectElements();
- // Returns true if an object has any of the fast elements kinds.
- inline bool HasFastElements();
- // Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
- // ElementsKind.
- inline bool HasFastDoubleElements();
- // Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS
- // ElementsKind.
- inline bool HasFastHoleyElements();
- inline bool HasNonStrictArgumentsElements();
- inline bool HasDictionaryElements();
- inline bool HasExternalPixelElements();
- inline bool HasExternalArrayElements();
- inline bool HasExternalByteElements();
- inline bool HasExternalUnsignedByteElements();
- inline bool HasExternalShortElements();
- inline bool HasExternalUnsignedShortElements();
- inline bool HasExternalIntElements();
- inline bool HasExternalUnsignedIntElements();
- inline bool HasExternalFloatElements();
- inline bool HasExternalDoubleElements();
- bool HasFastArgumentsElements();
- bool HasDictionaryArgumentsElements();
- inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
-
- inline void set_map_and_elements(
- Map* map,
- FixedArrayBase* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Requires: HasFastElements().
- MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
-
- // Collects elements starting at index 0.
- // Undefined values are placed after non-undefined values.
- // Returns the number of non-undefined values.
- MUST_USE_RESULT MaybeObject* PrepareElementsForSort(uint32_t limit);
- // As PrepareElementsForSort, but only on objects where elements is
- // a dictionary, and it will stay a dictionary.
- MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
-
- MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name);
-
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
- LookupResult* result,
- String* name,
- Object* value,
- bool check_prototype,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(
- Object* structure,
- String* name,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check);
-
- static Handle<Object> SetLocalPropertyIgnoreAttributes(
- Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
- // Try to follow an existing transition to a field with attributes NONE. The
- // return value indicates whether the transition was successful.
- static inline bool TryTransitionToField(Handle<JSObject> object,
- Handle<String> key);
-
- inline int LastAddedFieldIndex();
-
- // Extend the receiver with a single fast property appeared first in the
- // passed map. This also extends the property backing store if necessary.
- static void AddFastPropertyUsingMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* map);
-
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
- String* key,
- Object* value,
- PropertyAttributes attributes);
-
- // Retrieve a value in a normalized object given a lookup result.
- // Handles the special representation of JS global objects.
- Object* GetNormalizedProperty(LookupResult* result);
-
- // Sets the property value in a normalized object given a lookup result.
- // Handles the special representation of JS global objects.
- Object* SetNormalizedProperty(LookupResult* result, Object* value);
-
- // Sets the property value in a normalized object given (key, value, details).
- // Handles the special representation of JS global objects.
- static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyDetails details);
-
- MUST_USE_RESULT MaybeObject* SetNormalizedProperty(String* name,
- Object* value,
- PropertyDetails details);
-
- // Deletes the named property in a normalized object.
- MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(String* name,
- DeleteMode mode);
-
- MUST_USE_RESULT MaybeObject* OptimizeAsPrototype();
-
- // Retrieve interceptors.
- InterceptorInfo* GetNamedInterceptor();
- InterceptorInfo* GetIndexedInterceptor();
-
- // Used from JSReceiver.
- PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
- PropertyAttributes GetElementAttributeWithReceiver(JSReceiver* receiver,
- uint32_t index,
- bool continue_search);
-
- static void DefineAccessor(Handle<JSObject> object,
- Handle<String> name,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes);
- // Try to define a single accessor paying attention to map transitions.
- // Returns a JavaScript null if this was not possible and we have to use the
- // slow case. Note that we can fail due to allocations, too.
- MUST_USE_RESULT MaybeObject* DefineFastAccessor(
- String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes);
- Object* LookupAccessor(String* name, AccessorComponent component);
-
- MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
-
- // Used from Object::GetProperty().
- MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor(
- Object* receiver,
- String* name,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor(
- Object* receiver,
- String* name,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor(
- Object* receiver,
- String* name,
- PropertyAttributes* attributes);
-
- // Returns true if this is an instance of an api function and has
- // been modified since it was created. May give false positives.
- bool IsDirty();
-
- // If the receiver is a JSGlobalProxy this method will return its prototype,
- // otherwise the result is the receiver itself.
- inline Object* BypassGlobalProxy();
-
- // Accessors for hidden properties object.
- //
- // Hidden properties are not local properties of the object itself.
- // Instead they are stored in an auxiliary structure kept as a local
- // property with a special name Heap::hidden_string(). But if the
- // receiver is a JSGlobalProxy then the auxiliary object is a property
- // of its prototype, and if it's a detached proxy, then you can't have
- // hidden properties.
-
- // Sets a hidden property on this object. Returns this object if successful,
- // undefined if called on a detached proxy.
- static Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
- Handle<Object> value);
- // Returns a failure if a GC is required.
- MUST_USE_RESULT MaybeObject* SetHiddenProperty(String* key, Object* value);
- // Gets the value of a hidden property with the given key. Returns undefined
- // if the property doesn't exist (or if called on a detached proxy),
- // otherwise returns the value set for the key.
- Object* GetHiddenProperty(String* key);
- // Deletes a hidden property. Deleting a non-existing property is
- // considered successful.
- void DeleteHiddenProperty(String* key);
- // Returns true if the object has a property with the hidden string as name.
- bool HasHiddenProperties();
-
- static int GetIdentityHash(Handle<JSObject> obj);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
- MUST_USE_RESULT MaybeObject* SetIdentityHash(Smi* hash, CreationFlag flag);
-
- static Handle<Object> DeleteProperty(Handle<JSObject> obj,
- Handle<String> name);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
-
- static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
-
- inline void ValidateElements();
-
- // Makes sure that this object can contain HeapObject as elements.
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements();
-
- // Makes sure that this object can contain the specified elements.
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
- Object** elements,
- uint32_t count,
- EnsureElementsMode mode);
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
- FixedArrayBase* elements,
- uint32_t length,
- EnsureElementsMode mode);
- MUST_USE_RESULT MaybeObject* EnsureCanContainElements(
- Arguments* arguments,
- uint32_t first_arg,
- uint32_t arg_count,
- EnsureElementsMode mode);
-
- // Do we want to keep the elements in fast case when increasing the
- // capacity?
- bool ShouldConvertToSlowElements(int new_capacity);
- // Returns true if the backing storage for the slow-case elements of
- // this object takes up nearly as much space as a fast-case backing
- // storage would. In that case the JSObject should have fast
- // elements.
- bool ShouldConvertToFastElements();
- // Returns true if the elements of JSObject contains only values that can be
- // represented in a FixedDoubleArray and has at least one value that can only
- // be represented as a double and not a Smi.
- bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements);
-
- // Computes the new capacity when expanding the elements of a JSObject.
- static int NewElementsCapacity(int old_capacity) {
- // (old_capacity + 50%) + 16
- return old_capacity + (old_capacity >> 1) + 16;
- }
-
- PropertyType GetLocalPropertyType(String* name);
- PropertyType GetLocalElementType(uint32_t index);
-
- // These methods do not perform access checks!
- AccessorPair* GetLocalPropertyAccessorPair(String* name);
- AccessorPair* GetLocalElementAccessorPair(uint32_t index);
-
- MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype);
-
- MUST_USE_RESULT MaybeObject* SetDictionaryElement(
- uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode = SET_PROPERTY);
-
- MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype = true);
-
- static Handle<Object> SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode);
-
- // Empty handle is returned if the element cannot be set to the given value.
- static Handle<Object> SetElement(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode,
- SetPropertyMode set_mode = SET_PROPERTY);
-
- // A Failure object is returned if GC is needed.
- MUST_USE_RESULT MaybeObject* SetElement(
- uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype = true,
- SetPropertyMode set_mode = SET_PROPERTY);
-
- // Returns the index'th element.
- // The undefined object if index is out of bounds.
- MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver,
- uint32_t index);
-
- enum SetFastElementsCapacitySmiMode {
- kAllowSmiElements,
- kForceSmiElements,
- kDontAllowSmiElements
- };
-
- // Replace the elements' backing store with fast elements of the given
- // capacity. Update the length for JSArrays. Returns the new backing
- // store.
- MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
- int capacity,
- int length,
- SetFastElementsCapacitySmiMode smi_mode);
- MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
- int capacity,
- int length);
-
- // Lookup interceptors are used for handling properties controlled by host
- // objects.
- inline bool HasNamedInterceptor();
- inline bool HasIndexedInterceptor();
-
- // Support functions for v8 api (needed for correct interceptor behavior).
- bool HasRealNamedProperty(String* key);
- bool HasRealElementProperty(uint32_t index);
- bool HasRealNamedCallbackProperty(String* key);
-
- // Get the header size for a JSObject. Used to compute the index of
- // internal fields as well as the number of internal fields.
- inline int GetHeaderSize();
-
- inline int GetInternalFieldCount();
- inline int GetInternalFieldOffset(int index);
- inline Object* GetInternalField(int index);
- inline void SetInternalField(int index, Object* value);
- inline void SetInternalField(int index, Smi* value);
-
- inline void SetExternalResourceObject(Object* value);
- inline Object *GetExternalResourceObject();
-
- // The following lookup functions skip interceptors.
- void LocalLookupRealNamedProperty(String* name, LookupResult* result);
- void LookupRealNamedProperty(String* name, LookupResult* result);
- void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
- MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
- uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
- void LookupCallbackProperty(String* name, LookupResult* result);
-
- // Returns the number of properties on this object filtering out properties
- // with the specified attributes (ignoring interceptors).
- int NumberOfLocalProperties(PropertyAttributes filter = NONE);
- // Fill in details for properties into storage starting at the specified
- // index.
- void GetLocalPropertyNames(FixedArray* storage, int index);
-
- // Returns the number of properties on this object filtering out properties
- // with the specified attributes (ignoring interceptors).
- int NumberOfLocalElements(PropertyAttributes filter);
- // Returns the number of enumerable elements (ignoring interceptors).
- int NumberOfEnumElements();
- // Returns the number of elements on this object filtering out elements
- // with the specified attributes (ignoring interceptors).
- int GetLocalElementKeys(FixedArray* storage, PropertyAttributes filter);
- // Count and fill in the enumerable elements into storage.
- // (storage->length() == NumberOfEnumElements()).
- // If storage is NULL, will count the elements without adding
- // them to any storage.
- // Returns the number of enumerable elements.
- int GetEnumElementKeys(FixedArray* storage);
-
- // Add a property to a fast-case object using a map transition to
- // new_map.
- MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* new_map,
- String* name,
- Object* value,
- int field_index);
-
- // Add a constant function property to a fast-case object.
- // This leaves a CONSTANT_TRANSITION in the old map, and
- // if it is called on a second object with this map, a
- // normal property is added instead, with a map transition.
- // This avoids the creation of many maps with the same constant
- // function, all orphaned.
- MUST_USE_RESULT MaybeObject* AddConstantFunctionProperty(
- String* name,
- JSFunction* function,
- PropertyAttributes attributes);
-
- MUST_USE_RESULT MaybeObject* ReplaceSlowProperty(
- String* name,
- Object* value,
- PropertyAttributes attributes);
-
- // Returns a new map with all transitions dropped from the object's current
- // map and the ElementsKind set.
- static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind to_kind);
- inline MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
- Isolate* isolate,
- ElementsKind elements_kind);
- MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow(
- ElementsKind elements_kind);
-
- static Handle<Object> TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind);
-
- MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* UpdateAllocationSiteInfo(
- ElementsKind to_kind);
-
- // Replaces an existing transition with a transition to a map with a FIELD.
- MUST_USE_RESULT MaybeObject* ConvertTransitionToMapTransition(
- int transition_index,
- String* name,
- Object* new_value,
- PropertyAttributes attributes);
-
- // Converts a descriptor of any other type to a real field, backed by the
- // properties array.
- MUST_USE_RESULT MaybeObject* ConvertDescriptorToField(
- String* name,
- Object* new_value,
- PropertyAttributes attributes);
-
- // Add a property to a fast-case object.
- MUST_USE_RESULT MaybeObject* AddFastProperty(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
-
- // Add a property to a slow-case object.
- MUST_USE_RESULT MaybeObject* AddSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes);
-
- // Add a property to an object. May cause GC.
- MUST_USE_RESULT MaybeObject* AddProperty(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
-
- // Convert the object to use the canonical dictionary
- // representation. If the object is expected to have additional properties
- // added this number can be indicated to have the backing store allocated to
- // an initial capacity for holding these properties.
- static void NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode,
- int expected_additional_properties);
-
- MUST_USE_RESULT MaybeObject* NormalizeProperties(
- PropertyNormalizationMode mode,
- int expected_additional_properties);
-
- // Convert and update the elements backing store to be a
- // SeededNumberDictionary dictionary. Returns the backing after conversion.
- static Handle<SeededNumberDictionary> NormalizeElements(
- Handle<JSObject> object);
-
- MUST_USE_RESULT MaybeObject* NormalizeElements();
-
- static void UpdateMapCodeCache(Handle<JSObject> object,
- Handle<String> name,
- Handle<Code> code);
-
- MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
-
- // Transform slow named properties to fast variants.
- // Returns failure if allocation failed.
- static void TransformToFastProperties(Handle<JSObject> object,
- int unused_property_fields);
-
- MUST_USE_RESULT MaybeObject* TransformToFastProperties(
- int unused_property_fields);
-
- // Access fast-case object properties at index.
- inline Object* FastPropertyAt(int index);
- inline Object* FastPropertyAtPut(int index, Object* value);
-
- // Access to in object properties.
- inline int GetInObjectPropertyOffset(int index);
- inline Object* InObjectPropertyAt(int index);
- inline Object* InObjectPropertyAtPut(int index,
- Object* value,
- WriteBarrierMode mode
- = UPDATE_WRITE_BARRIER);
-
- // Initializes the body after properties slot, properties slot is
- // initialized by set_properties. Fill the pre-allocated fields with
- // pre_allocated_value and the rest with filler_value.
- // Note: this call does not update write barrier, the caller is responsible
- // to ensure that |filler_value| can be collected without WB here.
- inline void InitializeBody(Map* map,
- Object* pre_allocated_value,
- Object* filler_value);
-
- // Check whether this object references another object
- bool ReferencesObject(Object* obj);
-
- // Casting.
- static inline JSObject* cast(Object* obj);
-
- // Disalow further properties to be added to the object.
- static Handle<Object> PreventExtensions(Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* PreventExtensions();
-
-
- // Dispatched behavior.
- void JSObjectShortPrint(StringStream* accumulator);
- DECLARE_PRINTER(JSObject)
- DECLARE_VERIFIER(JSObject)
-#ifdef OBJECT_PRINT
- inline void PrintProperties() {
- PrintProperties(stdout);
- }
- void PrintProperties(FILE* out);
-
- inline void PrintElements() {
- PrintElements(stdout);
- }
- void PrintElements(FILE* out);
- inline void PrintTransitions() {
- PrintTransitions(stdout);
- }
- void PrintTransitions(FILE* out);
-#endif
-
- void PrintElementsTransition(
- FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
- ElementsKind to_kind, FixedArrayBase* to_elements);
-
-#ifdef DEBUG
- // Structure for collecting spill information about JSObjects.
- class SpillInformation {
- public:
- void Clear();
- void Print();
- int number_of_objects_;
- int number_of_objects_with_fast_properties_;
- int number_of_objects_with_fast_elements_;
- int number_of_fast_used_fields_;
- int number_of_fast_unused_fields_;
- int number_of_slow_used_properties_;
- int number_of_slow_unused_properties_;
- int number_of_fast_used_elements_;
- int number_of_fast_unused_elements_;
- int number_of_slow_used_elements_;
- int number_of_slow_unused_elements_;
- };
-
- void IncrementSpillStatistics(SpillInformation* info);
-#endif
- Object* SlowReverseLookup(Object* value);
-
- // Maximal number of fast properties for the JSObject. Used to
- // restrict the number of map transitions to avoid an explosion in
- // the number of maps for objects used as dictionaries.
- inline bool TooManyFastProperties(int properties, StoreFromKeyed store_mode);
-
- // Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
- // Also maximal value of JSArray's length property.
- static const uint32_t kMaxElementCount = 0xffffffffu;
-
- // Constants for heuristics controlling conversion of fast elements
- // to slow elements.
-
- // Maximal gap that can be introduced by adding an element beyond
- // the current elements length.
- static const uint32_t kMaxGap = 1024;
-
- // Maximal length of fast elements array that won't be checked for
- // being dense enough on expansion.
- static const int kMaxUncheckedFastElementsLength = 5000;
-
- // Same as above but for old arrays. This limit is more strict. We
- // don't want to be wasteful with long lived objects.
- static const int kMaxUncheckedOldFastElementsLength = 500;
-
- static const int kInitialMaxFastElementArray = 100000;
- static const int kFastPropertiesSoftLimit = 12;
- static const int kMaxFastProperties = 64;
- static const int kMaxInstanceSize = 255 * kPointerSize;
- // When extending the backing storage for property values, we increase
- // its size by more than the 1 entry necessary, so sequentially adding fields
- // to the same object requires fewer allocations and copies.
- static const int kFieldsAdded = 3;
-
- // Layout description.
- static const int kPropertiesOffset = HeapObject::kHeaderSize;
- static const int kElementsOffset = kPropertiesOffset + kPointerSize;
- static const int kHeaderSize = kElementsOffset + kPointerSize;
-
- STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
-
- class BodyDescriptor : public FlexibleBodyDescriptor<kPropertiesOffset> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object);
- };
-
- // Enqueue change record for Object.observe. May cause GC.
- static void EnqueueChangeRecord(Handle<JSObject> object,
- const char* type,
- Handle<String> name,
- Handle<Object> old_value);
-
- // Deliver change records to observers. May cause GC.
- static void DeliverChangeRecords(Isolate* isolate);
-
- private:
- friend class DictionaryElementsAccessor;
-
- MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
- Object* structure,
- uint32_t index,
- Object* holder);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithInterceptor(
- JSReceiver* receiver,
- uint32_t index,
- bool continue_search);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver,
- uint32_t index,
- bool continue_search);
- MUST_USE_RESULT MaybeObject* SetElementWithCallback(
- Object* structure,
- uint32_t index,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
- uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor(
- uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode);
-
- // Searches the prototype chain for property 'name'. If it is found and
- // has a setter, invoke it and set '*done' to true. If it is found and is
- // read-only, reject and set '*done' to true. Otherwise, set '*done' to
- // false. Can cause GC and can return a failure result with '*done==true'.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypes(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* done);
-
- MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
- DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name);
-
- MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
-
- MUST_USE_RESULT MaybeObject* DeleteFastElement(uint32_t index);
- MUST_USE_RESULT MaybeObject* DeleteDictionaryElement(uint32_t index,
- DeleteMode mode);
-
- bool ReferencesObjectFromElements(FixedArray* elements,
- ElementsKind kind,
- Object* object);
-
- // Returns true if most of the elements backing storage is used.
- bool HasDenseElements();
-
- // Gets the current elements capacity and the number of used elements.
- void GetElementsCapacityAndUsage(int* capacity, int* used);
-
- bool CanSetCallback(String* name);
- MUST_USE_RESULT MaybeObject* SetElementCallback(
- uint32_t index,
- Object* structure,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* SetPropertyCallback(
- String* name,
- Object* structure,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* DefineElementAccessor(
- uint32_t index,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* CreateAccessorPairFor(String* name);
- MUST_USE_RESULT MaybeObject* DefinePropertyAccessor(
- String* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes);
-
-
- enum InitializeHiddenProperties {
- CREATE_NEW_IF_ABSENT,
- ONLY_RETURN_INLINE_VALUE
- };
-
- // If create_if_absent is true, return the hash table backing store
- // for hidden properties. If there is no backing store, allocate one.
- // If create_if_absent is false, return the hash table backing store
- // or the inline stored identity hash, whatever is found.
- MUST_USE_RESULT MaybeObject* GetHiddenPropertiesHashTable(
- InitializeHiddenProperties init_option);
- // Set the hidden property backing store to either a hash table or
- // the inline-stored identity hash.
- MUST_USE_RESULT MaybeObject* SetHiddenPropertiesHashTable(
- Object* value);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
-};
-
-
-// Common superclass for FixedArrays that allow implementations to share
-// common accessors and some code paths.
-class FixedArrayBase: public HeapObject {
- public:
- // [length]: length of the array.
- inline int length();
- inline void set_length(int value);
-
- inline static FixedArrayBase* cast(Object* object);
-
- // Layout description.
- // Length is smi tagged when it is stored.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
-};
-
-
-class FixedDoubleArray;
-class IncrementalMarking;
-
-
-// FixedArray describes fixed-sized arrays with element type Object*.
-class FixedArray: public FixedArrayBase {
- public:
- // Setter and getter for elements.
- inline Object* get(int index);
- // Setter that uses write barrier.
- inline void set(int index, Object* value);
- inline bool is_the_hole(int index);
-
- // Setter that doesn't need write barrier).
- inline void set(int index, Smi* value);
- // Setter with explicit barrier mode.
- inline void set(int index, Object* value, WriteBarrierMode mode);
-
- // Setters for frequently used oddballs located in old space.
- inline void set_undefined(int index);
- // TODO(isolates): duplicate.
- inline void set_undefined(Heap* heap, int index);
- inline void set_null(int index);
- // TODO(isolates): duplicate.
- inline void set_null(Heap* heap, int index);
- inline void set_the_hole(int index);
-
- // Setters with less debug checks for the GC to use.
- inline void set_unchecked(int index, Smi* value);
- inline void set_null_unchecked(Heap* heap, int index);
- inline void set_unchecked(Heap* heap, int index, Object* value,
- WriteBarrierMode mode);
-
- inline Object** GetFirstElementAddress();
- inline bool ContainsOnlySmisOrHoles();
-
- // Gives access to raw memory which stores the array's data.
- inline Object** data_start();
-
- // Copy operations.
- MUST_USE_RESULT inline MaybeObject* Copy();
- MUST_USE_RESULT MaybeObject* CopySize(int new_length);
-
- // Add the elements of a JSArray to this FixedArray.
- MUST_USE_RESULT MaybeObject* AddKeysFromJSArray(JSArray* array);
-
- // Compute the union of this and other.
- MUST_USE_RESULT MaybeObject* UnionOfKeys(FixedArray* other);
-
- // Copy a sub array from the receiver to dest.
- void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
-
- // Garbage collection support.
- static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; }
-
- // Code Generation support.
- static int OffsetOfElementAt(int index) { return SizeFor(index); }
-
- // Casting.
- static inline FixedArray* cast(Object* obj);
-
- // Maximal allowed size, in bytes, of a single FixedArray.
- // Prevents overflowing size computations, as well as extreme memory
- // consumption.
- static const int kMaxSize = 128 * MB * kPointerSize;
- // Maximally allowed length of a FixedArray.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
-
- // Dispatched behavior.
- DECLARE_PRINTER(FixedArray)
- DECLARE_VERIFIER(FixedArray)
-#ifdef DEBUG
- // Checks if two FixedArrays have identical contents.
- bool IsEqualTo(FixedArray* other);
-#endif
-
- // Swap two elements in a pair of arrays. If this array and the
- // numbers array are the same object, the elements are only swapped
- // once.
- void SwapPairs(FixedArray* numbers, int i, int j);
-
- // Sort prefix of this array and the numbers array as pairs wrt. the
- // numbers. If the numbers array and the this array are the same
- // object, the prefix of this array is sorted.
- void SortPairs(FixedArray* numbers, uint32_t len);
-
- class BodyDescriptor : public FlexibleBodyDescriptor<kHeaderSize> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object) {
- return SizeFor(reinterpret_cast<FixedArray*>(object)->length());
- }
- };
-
- protected:
- // Set operation on FixedArray without using write barriers. Can
- // only be used for storing old space objects or smis.
- static inline void NoWriteBarrierSet(FixedArray* array,
- int index,
- Object* value);
-
- // Set operation on FixedArray without incremental write barrier. Can
- // only be used if the object is guaranteed to be white (whiteness witness
- // is present).
- static inline void NoIncrementalWriteBarrierSet(FixedArray* array,
- int index,
- Object* value);
-
- private:
- STATIC_CHECK(kHeaderSize == Internals::kFixedArrayHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
-};
-
-
-// FixedDoubleArray describes fixed-sized arrays with element type double.
-class FixedDoubleArray: public FixedArrayBase {
- public:
- // Setter and getter for elements.
- inline double get_scalar(int index);
- inline int64_t get_representation(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, double value);
- inline void set_the_hole(int index);
-
- // Checking for the hole.
- inline bool is_the_hole(int index);
-
- // Copy operations
- MUST_USE_RESULT inline MaybeObject* Copy();
-
- // Garbage collection support.
- inline static int SizeFor(int length) {
- return kHeaderSize + length * kDoubleSize;
- }
-
- // Gives access to raw memory which stores the array's data.
- inline double* data_start();
-
- // Code Generation support.
- static int OffsetOfElementAt(int index) { return SizeFor(index); }
-
- inline static bool is_the_hole_nan(double value);
- inline static double hole_nan_as_double();
- inline static double canonical_not_the_hole_nan_as_double();
-
- // Casting.
- static inline FixedDoubleArray* cast(Object* obj);
-
- // Maximal allowed size, in bytes, of a single FixedDoubleArray.
- // Prevents overflowing size computations, as well as extreme memory
- // consumption.
- static const int kMaxSize = 512 * MB;
- // Maximally allowed length of a FixedArray.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
-
- // Dispatched behavior.
- DECLARE_PRINTER(FixedDoubleArray)
- DECLARE_VERIFIER(FixedDoubleArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
-};
-
-
-// DescriptorArrays are fixed arrays used to hold instance descriptors.
-// The format of the these objects is:
-// [0]: Number of descriptors
-// [1]: Either Smi(0) if uninitialized, or a pointer to small fixed array:
-// [0]: pointer to fixed array with enum cache
-// [1]: either Smi(0) or pointer to fixed array with indices
-// [2]: first key
-// [2 + number of descriptors * kDescriptorSize]: start of slack
-class DescriptorArray: public FixedArray {
- public:
- // WhitenessWitness is used to prove that a descriptor array is white
- // (unmarked), so incremental write barriers can be skipped because the
- // marking invariant cannot be broken and slots pointing into evacuation
- // candidates will be discovered when the object is scanned. A witness is
- // always stack-allocated right after creating an array. By allocating a
- // witness, incremental marking is globally disabled. The witness is then
- // passed along wherever needed to statically prove that the array is known to
- // be white.
- class WhitenessWitness {
- public:
- inline explicit WhitenessWitness(FixedArray* array);
- inline ~WhitenessWitness();
-
- private:
- IncrementalMarking* marking_;
- };
-
- // Returns true for both shared empty_descriptor_array and for smis, which the
- // map uses to encode additional bit fields when the descriptor array is not
- // yet used.
- inline bool IsEmpty();
-
- // Returns the number of descriptors in the array.
- int number_of_descriptors() {
- ASSERT(length() >= kFirstIndex || IsEmpty());
- int len = length();
- return len == 0 ? 0 : Smi::cast(get(kDescriptorLengthIndex))->value();
- }
-
- int number_of_descriptors_storage() {
- int len = length();
- return len == 0 ? 0 : (len - kFirstIndex) / kDescriptorSize;
- }
-
- int NumberOfSlackDescriptors() {
- return number_of_descriptors_storage() - number_of_descriptors();
- }
-
- inline void SetNumberOfDescriptors(int number_of_descriptors);
- inline int number_of_entries() { return number_of_descriptors(); }
-
- bool HasEnumCache() {
- return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi();
- }
-
- void CopyEnumCacheFrom(DescriptorArray* array) {
- set(kEnumCacheIndex, array->get(kEnumCacheIndex));
- }
-
- FixedArray* GetEnumCache() {
- ASSERT(HasEnumCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
- return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex));
- }
-
- bool HasEnumIndicesCache() {
- if (IsEmpty()) return false;
- Object* object = get(kEnumCacheIndex);
- if (object->IsSmi()) return false;
- FixedArray* bridge = FixedArray::cast(object);
- return !bridge->get(kEnumCacheBridgeIndicesCacheIndex)->IsSmi();
- }
-
- FixedArray* GetEnumIndicesCache() {
- ASSERT(HasEnumIndicesCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
- return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex));
- }
-
- Object** GetEnumCacheSlot() {
- ASSERT(HasEnumCache());
- return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kEnumCacheOffset);
- }
-
- void ClearEnumCache();
-
- // Initialize or change the enum cache,
- // using the supplied storage for the small "bridge".
- void SetEnumCache(FixedArray* bridge_storage,
- FixedArray* new_cache,
- Object* new_index_cache);
-
- // Accessors for fetching instance descriptor at descriptor number.
- inline String* GetKey(int descriptor_number);
- inline Object** GetKeySlot(int descriptor_number);
- inline Object* GetValue(int descriptor_number);
- inline Object** GetValueSlot(int descriptor_number);
- inline Object** GetDescriptorStartSlot(int descriptor_number);
- inline Object** GetDescriptorEndSlot(int descriptor_number);
- inline PropertyDetails GetDetails(int descriptor_number);
- inline PropertyType GetType(int descriptor_number);
- inline int GetFieldIndex(int descriptor_number);
- inline JSFunction* GetConstantFunction(int descriptor_number);
- inline Object* GetCallbacksObject(int descriptor_number);
- inline AccessorDescriptor* GetCallbacks(int descriptor_number);
-
- inline String* GetSortedKey(int descriptor_number);
- inline int GetSortedKeyIndex(int descriptor_number);
- inline void SetSortedKey(int pointer, int descriptor_number);
-
- // Accessor for complete descriptor.
- inline void Get(int descriptor_number, Descriptor* desc);
- inline void Set(int descriptor_number,
- Descriptor* desc,
- const WhitenessWitness&);
- inline void Set(int descriptor_number, Descriptor* desc);
-
- // Append automatically sets the enumeration index. This should only be used
- // to add descriptors in bulk at the end, followed by sorting the descriptor
- // array.
- inline void Append(Descriptor* desc, const WhitenessWitness&);
- inline void Append(Descriptor* desc);
-
- // Transfer a complete descriptor from the src descriptor array to this
- // descriptor array.
- void CopyFrom(int dst_index,
- DescriptorArray* src,
- int src_index,
- const WhitenessWitness&);
-
- MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index);
-
- // Sort the instance descriptors by the hash codes of their keys.
- void Sort();
-
- // Search the instance descriptors for given name.
- INLINE(int Search(String* name, int number_of_own_descriptors));
-
- // As the above, but uses DescriptorLookupCache and updates it when
- // necessary.
- INLINE(int SearchWithCache(String* name, Map* map));
-
- // Allocates a DescriptorArray, but returns the singleton
- // empty descriptor array object if number_of_descriptors is 0.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors,
- int slack = 0);
-
- // Casting.
- static inline DescriptorArray* cast(Object* obj);
-
- // Constant for denoting key was not found.
- static const int kNotFound = -1;
-
- static const int kDescriptorLengthIndex = 0;
- static const int kEnumCacheIndex = 1;
- static const int kFirstIndex = 2;
-
- // The length of the "bridge" to the enum cache.
- static const int kEnumCacheBridgeLength = 2;
- static const int kEnumCacheBridgeCacheIndex = 0;
- static const int kEnumCacheBridgeIndicesCacheIndex = 1;
-
- // Layout description.
- static const int kDescriptorLengthOffset = FixedArray::kHeaderSize;
- static const int kEnumCacheOffset = kDescriptorLengthOffset + kPointerSize;
- static const int kFirstOffset = kEnumCacheOffset + kPointerSize;
-
- // Layout description for the bridge array.
- static const int kEnumCacheBridgeCacheOffset = FixedArray::kHeaderSize;
-
- // Layout of descriptor.
- static const int kDescriptorKey = 0;
- static const int kDescriptorDetails = 1;
- static const int kDescriptorValue = 2;
- static const int kDescriptorSize = 3;
-
-#ifdef OBJECT_PRINT
- // Print all the descriptors.
- inline void PrintDescriptors() {
- PrintDescriptors(stdout);
- }
- void PrintDescriptors(FILE* out);
-#endif
-
-#ifdef DEBUG
- // Is the descriptor array sorted and without duplicates?
- bool IsSortedNoDuplicates(int valid_descriptors = -1);
-
- // Is the descriptor array consistent with the back pointers in targets?
- bool IsConsistentWithBackPointers(Map* current_map);
-
- // Are two DescriptorArrays equal?
- bool IsEqualTo(DescriptorArray* other);
-#endif
-
- // The maximum number of descriptors we want in a descriptor array (should
- // fit in a page).
- static const int kMaxNumberOfDescriptors = 1024 + 512;
-
- // Returns the fixed array length required to hold number_of_descriptors
- // descriptors.
- static int LengthFor(int number_of_descriptors) {
- return ToKeyIndex(number_of_descriptors);
- }
-
- private:
- // An entry in a DescriptorArray, represented as an (array, index) pair.
- class Entry {
- public:
- inline explicit Entry(DescriptorArray* descs, int index) :
- descs_(descs), index_(index) { }
-
- inline PropertyType type() { return descs_->GetType(index_); }
- inline Object* GetCallbackObject() { return descs_->GetValue(index_); }
-
- private:
- DescriptorArray* descs_;
- int index_;
- };
-
- // Conversion from descriptor number to array indices.
- static int ToKeyIndex(int descriptor_number) {
- return kFirstIndex +
- (descriptor_number * kDescriptorSize) +
- kDescriptorKey;
- }
-
- static int ToDetailsIndex(int descriptor_number) {
- return kFirstIndex +
- (descriptor_number * kDescriptorSize) +
- kDescriptorDetails;
- }
-
- static int ToValueIndex(int descriptor_number) {
- return kFirstIndex +
- (descriptor_number * kDescriptorSize) +
- kDescriptorValue;
- }
-
- // Swap first and second descriptor.
- inline void SwapSortedKeys(int first, int second);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(DescriptorArray);
-};
-
-
-enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
-
-template<SearchMode search_mode, typename T>
-inline int LinearSearch(T* array, String* name, int len, int valid_entries);
-
-
-template<SearchMode search_mode, typename T>
-inline int Search(T* array, String* name, int valid_entries = 0);
-
-
-// HashTable is a subclass of FixedArray that implements a hash table
-// that uses open addressing and quadratic probing.
-//
-// In order for the quadratic probing to work, elements that have not
-// yet been used and elements that have been deleted are
-// distinguished. Probing continues when deleted elements are
-// encountered and stops when unused elements are encountered.
-//
-// - Elements with key == undefined have not been used yet.
-// - Elements with key == the_hole have been deleted.
-//
-// The hash table class is parameterized with a Shape and a Key.
-// Shape must be a class with the following interface:
-// class ExampleShape {
-// public:
-// // Tells whether key matches other.
-// static bool IsMatch(Key key, Object* other);
-// // Returns the hash value for key.
-// static uint32_t Hash(Key key);
-// // Returns the hash value for object.
-// static uint32_t HashForObject(Key key, Object* object);
-// // Convert key to an object.
-// static inline Object* AsObject(Key key);
-// // The prefix size indicates number of elements in the beginning
-// // of the backing storage.
-// static const int kPrefixSize = ..;
-// // The Element size indicates number of elements per entry.
-// static const int kEntrySize = ..;
-// };
-// The prefix size indicates an amount of memory in the
-// beginning of the backing storage that can be used for non-element
-// information by subclasses.
-
-template<typename Key>
-class BaseShape {
- public:
- static const bool UsesSeed = false;
- static uint32_t Hash(Key key) { return 0; }
- static uint32_t SeededHash(Key key, uint32_t seed) {
- ASSERT(UsesSeed);
- return Hash(key);
- }
- static uint32_t HashForObject(Key key, Object* object) { return 0; }
- static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) {
- ASSERT(UsesSeed);
- return HashForObject(key, object);
- }
-};
-
-template<typename Shape, typename Key>
-class HashTable: public FixedArray {
- public:
- enum MinimumCapacity {
- USE_DEFAULT_MINIMUM_CAPACITY,
- USE_CUSTOM_MINIMUM_CAPACITY
- };
-
- // Wrapper methods
- inline uint32_t Hash(Key key) {
- if (Shape::UsesSeed) {
- return Shape::SeededHash(key,
- GetHeap()->HashSeed());
- } else {
- return Shape::Hash(key);
- }
- }
-
- inline uint32_t HashForObject(Key key, Object* object) {
- if (Shape::UsesSeed) {
- return Shape::SeededHashForObject(key,
- GetHeap()->HashSeed(), object);
- } else {
- return Shape::HashForObject(key, object);
- }
- }
-
- // Returns the number of elements in the hash table.
- int NumberOfElements() {
- return Smi::cast(get(kNumberOfElementsIndex))->value();
- }
-
- // Returns the number of deleted elements in the hash table.
- int NumberOfDeletedElements() {
- return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
- }
-
- // Returns the capacity of the hash table.
- int Capacity() {
- return Smi::cast(get(kCapacityIndex))->value();
- }
-
- // ElementAdded should be called whenever an element is added to a
- // hash table.
- void ElementAdded() { SetNumberOfElements(NumberOfElements() + 1); }
-
- // ElementRemoved should be called whenever an element is removed from
- // a hash table.
- void ElementRemoved() {
- SetNumberOfElements(NumberOfElements() - 1);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
- }
- void ElementsRemoved(int n) {
- SetNumberOfElements(NumberOfElements() - n);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
- }
-
- // Returns a new HashTable object. Might return Failure.
- MUST_USE_RESULT static MaybeObject* Allocate(
- int at_least_space_for,
- MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Computes the required capacity for a table holding the given
- // number of elements. May be more than HashTable::kMaxCapacity.
- static int ComputeCapacity(int at_least_space_for);
-
- // Returns the key at entry.
- Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
-
- // Tells whether k is a real key. The hole and undefined are not allowed
- // as keys and can be used to indicate missing or deleted elements.
- bool IsKey(Object* k) {
- return !k->IsTheHole() && !k->IsUndefined();
- }
-
- // Garbage collection support.
- void IteratePrefix(ObjectVisitor* visitor);
- void IterateElements(ObjectVisitor* visitor);
-
- // Casting.
- static inline HashTable* cast(Object* obj);
-
- // Compute the probe offset (quadratic probing).
- INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
- return (n + n * n) >> 1;
- }
-
- static const int kNumberOfElementsIndex = 0;
- static const int kNumberOfDeletedElementsIndex = 1;
- static const int kCapacityIndex = 2;
- static const int kPrefixStartIndex = 3;
- static const int kElementsStartIndex =
- kPrefixStartIndex + Shape::kPrefixSize;
- static const int kEntrySize = Shape::kEntrySize;
- static const int kElementsStartOffset =
- kHeaderSize + kElementsStartIndex * kPointerSize;
- static const int kCapacityOffset =
- kHeaderSize + kCapacityIndex * kPointerSize;
-
- // Constant used for denoting a absent entry.
- static const int kNotFound = -1;
-
- // Maximal capacity of HashTable. Based on maximal length of underlying
- // FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
- // cannot overflow.
- static const int kMaxCapacity =
- (FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
-
- // Find entry for key otherwise return kNotFound.
- inline int FindEntry(Key key);
- int FindEntry(Isolate* isolate, Key key);
-
- protected:
- // Find the entry at which to insert element with the given key that
- // has the given hash value.
- uint32_t FindInsertionEntry(uint32_t hash);
-
- // Returns the index for an entry (of the key)
- static inline int EntryToIndex(int entry) {
- return (entry * kEntrySize) + kElementsStartIndex;
- }
-
- // Update the number of elements in the hash table.
- void SetNumberOfElements(int nof) {
- set(kNumberOfElementsIndex, Smi::FromInt(nof));
- }
-
- // Update the number of deleted elements in the hash table.
- void SetNumberOfDeletedElements(int nod) {
- set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
- }
-
- // Sets the capacity of the hash table.
- void SetCapacity(int capacity) {
- // To scale a computed hash code to fit within the hash table, we
- // use bit-wise AND with a mask, so the capacity must be positive
- // and non-zero.
- ASSERT(capacity > 0);
- ASSERT(capacity <= kMaxCapacity);
- set(kCapacityIndex, Smi::FromInt(capacity));
- }
-
-
- // Returns probe entry.
- static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) {
- ASSERT(IsPowerOf2(size));
- return (hash + GetProbeOffset(number)) & (size - 1);
- }
-
- inline static uint32_t FirstProbe(uint32_t hash, uint32_t size) {
- return hash & (size - 1);
- }
-
- inline static uint32_t NextProbe(
- uint32_t last, uint32_t number, uint32_t size) {
- return (last + number) & (size - 1);
- }
-
- // Rehashes this hash-table into the new table.
- MUST_USE_RESULT MaybeObject* Rehash(HashTable* new_table, Key key);
-
- // Attempt to shrink hash table after removal of key.
- MUST_USE_RESULT MaybeObject* Shrink(Key key);
-
- // Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
-};
-
-
-// HashTableKey is an abstract superclass for virtual key behavior.
-class HashTableKey {
- public:
- // Returns whether the other object matches this key.
- virtual bool IsMatch(Object* other) = 0;
- // Returns the hash value for this key.
- virtual uint32_t Hash() = 0;
- // Returns the hash value for object.
- virtual uint32_t HashForObject(Object* key) = 0;
- // Returns the key object for storing into the hash table.
- // If allocations fails a failure object is returned.
- MUST_USE_RESULT virtual MaybeObject* AsObject() = 0;
- // Required.
- virtual ~HashTableKey() {}
-};
-
-
-class StringTableShape : public BaseShape<HashTableKey*> {
- public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
- static inline uint32_t Hash(HashTableKey* key) {
- return key->Hash();
- }
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
- }
- MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
- }
-
- static const int kPrefixSize = 0;
- static const int kEntrySize = 1;
-};
-
-class SeqOneByteString;
-
-// StringTable.
-//
-// No special elements in the prefix and the element size is 1
-// because only the string itself (the key) needs to be stored.
-class StringTable: public HashTable<StringTableShape, HashTableKey*> {
- public:
- // Find string in the string table. If it is not there yet, it is
- // added. The return value is the string table which might have
- // been enlarged. If the return value is not a failure, the string
- // pointer *s is set to the string found.
- MUST_USE_RESULT MaybeObject* LookupUtf8String(
- Vector<const char> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupOneByteString(
- Vector<const uint8_t> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupSubStringOneByteString(
- Handle<SeqOneByteString> str,
- int from,
- int length,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupTwoByteString(
- Vector<const uc16> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s);
-
- // Looks up a string that is equal to the given string and returns
- // true if it is found, assigning the string to the given output
- // parameter.
- bool LookupStringIfExists(String* str, String** result);
- bool LookupTwoCharsStringIfExists(uint16_t c1, uint16_t c2, String** result);
-
- // Casting.
- static inline StringTable* cast(Object* obj);
-
- private:
- MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s);
-
- template <bool seq_ascii> friend class JsonParser;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
-};
-
-
-class MapCacheShape : public BaseShape<HashTableKey*> {
- public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
- static inline uint32_t Hash(HashTableKey* key) {
- return key->Hash();
- }
-
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
- }
-
- MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
- }
-
- static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
-};
-
-
-// MapCache.
-//
-// Maps keys that are a fixed array of internalized strings to a map.
-// Used for canonicalize maps for object literals.
-class MapCache: public HashTable<MapCacheShape, HashTableKey*> {
- public:
- // Find cached value for a string key, otherwise return null.
- Object* Lookup(FixedArray* key);
- MUST_USE_RESULT MaybeObject* Put(FixedArray* key, Map* value);
- static inline MapCache* cast(Object* obj);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(MapCache);
-};
-
-
-template <typename Shape, typename Key>
-class Dictionary: public HashTable<Shape, Key> {
- public:
- static inline Dictionary<Shape, Key>* cast(Object* obj) {
- return reinterpret_cast<Dictionary<Shape, Key>*>(obj);
- }
-
- // Returns the value at entry.
- Object* ValueAt(int entry) {
- return this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 1);
- }
-
- // Set the value for entry.
- void ValueAtPut(int entry, Object* value) {
- this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 1, value);
- }
-
- // Returns the property details for the property at entry.
- PropertyDetails DetailsAt(int entry) {
- ASSERT(entry >= 0); // Not found is -1, which is not caught by get().
- return PropertyDetails(
- Smi::cast(this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 2)));
- }
-
- // Set the details for entry.
- void DetailsAtPut(int entry, PropertyDetails value) {
- this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi());
- }
-
- // Sorting support
- void CopyValuesTo(FixedArray* elements);
-
- // Delete a property from the dictionary.
- Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
-
- // Attempt to shrink the dictionary after deletion of key.
- MUST_USE_RESULT MaybeObject* Shrink(Key key);
-
- // Returns the number of elements in the dictionary filtering out properties
- // with the specified attributes.
- int NumberOfElementsFilterAttributes(PropertyAttributes filter);
-
- // Returns the number of enumerable elements in the dictionary.
- int NumberOfEnumElements();
-
- enum SortMode { UNSORTED, SORTED };
- // Copies keys to preallocated fixed array.
- void CopyKeysTo(FixedArray* storage,
- PropertyAttributes filter,
- SortMode sort_mode);
- // Fill in details for properties into storage.
- void CopyKeysTo(FixedArray* storage, int index, SortMode sort_mode);
-
- // Accessors for next enumeration index.
- void SetNextEnumerationIndex(int index) {
- ASSERT(index != 0);
- this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
- }
-
- int NextEnumerationIndex() {
- return Smi::cast(FixedArray::get(kNextEnumerationIndexIndex))->value();
- }
-
- // Returns a new array for dictionary usage. Might return Failure.
- MUST_USE_RESULT static MaybeObject* Allocate(int at_least_space_for);
-
- // Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
-
-#ifdef OBJECT_PRINT
- inline void Print() {
- Print(stdout);
- }
- void Print(FILE* out);
-#endif
- // Returns the key (slow).
- Object* SlowReverseLookup(Object* value);
-
- // Sets the entry to (key, value) pair.
- inline void SetEntry(int entry,
- Object* key,
- Object* value);
- inline void SetEntry(int entry,
- Object* key,
- Object* value,
- PropertyDetails details);
-
- MUST_USE_RESULT MaybeObject* Add(Key key,
- Object* value,
- PropertyDetails details);
-
- protected:
- // Generic at put operation.
- MUST_USE_RESULT MaybeObject* AtPut(Key key, Object* value);
-
- // Add entry to dictionary.
- MUST_USE_RESULT MaybeObject* AddEntry(Key key,
- Object* value,
- PropertyDetails details,
- uint32_t hash);
-
- // Generate new enumeration indices to avoid enumeration index overflow.
- MUST_USE_RESULT MaybeObject* GenerateNewEnumerationIndices();
- static const int kMaxNumberKeyIndex =
- HashTable<Shape, Key>::kPrefixStartIndex;
- static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
-};
-
-
-class StringDictionaryShape : public BaseShape<String*> {
- public:
- static inline bool IsMatch(String* key, Object* other);
- static inline uint32_t Hash(String* key);
- static inline uint32_t HashForObject(String* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(String* key);
- static const int kPrefixSize = 2;
- static const int kEntrySize = 3;
- static const bool kIsEnumerable = true;
-};
-
-
-class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
- public:
- static inline StringDictionary* cast(Object* obj) {
- ASSERT(obj->IsDictionary());
- return reinterpret_cast<StringDictionary*>(obj);
- }
-
- // Copies enumerable keys to preallocated fixed array.
- FixedArray* CopyEnumKeysTo(FixedArray* storage);
- static void DoGenerateNewEnumerationIndices(
- Handle<StringDictionary> dictionary);
-
- // For transforming properties of a JSObject.
- MUST_USE_RESULT MaybeObject* TransformPropertiesToFastFor(
- JSObject* obj,
- int unused_property_fields);
-
- // Find entry for key, otherwise return kNotFound. Optimized version of
- // HashTable::FindEntry.
- int FindEntry(String* key);
-};
-
-
-class NumberDictionaryShape : public BaseShape<uint32_t> {
- public:
- static inline bool IsMatch(uint32_t key, Object* other);
- MUST_USE_RESULT static inline MaybeObject* AsObject(uint32_t key);
- static const int kEntrySize = 3;
- static const bool kIsEnumerable = false;
-};
-
-
-class SeededNumberDictionaryShape : public NumberDictionaryShape {
- public:
- static const bool UsesSeed = true;
- static const int kPrefixSize = 2;
-
- static inline uint32_t SeededHash(uint32_t key, uint32_t seed);
- static inline uint32_t SeededHashForObject(uint32_t key,
- uint32_t seed,
- Object* object);
-};
-
-
-class UnseededNumberDictionaryShape : public NumberDictionaryShape {
- public:
- static const int kPrefixSize = 0;
-
- static inline uint32_t Hash(uint32_t key);
- static inline uint32_t HashForObject(uint32_t key, Object* object);
-};
-
-
-class SeededNumberDictionary
- : public Dictionary<SeededNumberDictionaryShape, uint32_t> {
- public:
- static SeededNumberDictionary* cast(Object* obj) {
- ASSERT(obj->IsDictionary());
- return reinterpret_cast<SeededNumberDictionary*>(obj);
- }
-
- // Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
- MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details);
-
- // Set an existing entry or add a new one if needed.
- // Return the updated dictionary.
- MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details);
-
- MUST_USE_RESULT MaybeObject* Set(uint32_t key,
- Object* value,
- PropertyDetails details);
-
- void UpdateMaxNumberKey(uint32_t key);
-
- // If slow elements are required we will never go back to fast-case
- // for the elements kept in this dictionary. We require slow
- // elements if an element has been added at an index larger than
- // kRequiresSlowElementsLimit or set_requires_slow_elements() has been called
- // when defining a getter or setter with a number key.
- inline bool requires_slow_elements();
- inline void set_requires_slow_elements();
-
- // Get the value of the max number key that has been added to this
- // dictionary. max_number_key can only be called if
- // requires_slow_elements returns false.
- inline uint32_t max_number_key();
-
- // Bit masks.
- static const int kRequiresSlowElementsMask = 1;
- static const int kRequiresSlowElementsTagSize = 1;
- static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1;
-};
-
-
-class UnseededNumberDictionary
- : public Dictionary<UnseededNumberDictionaryShape, uint32_t> {
- public:
- static UnseededNumberDictionary* cast(Object* obj) {
- ASSERT(obj->IsDictionary());
- return reinterpret_cast<UnseededNumberDictionary*>(obj);
- }
-
- // Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
- MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key, Object* value);
-
- // Set an existing entry or add a new one if needed.
- // Return the updated dictionary.
- MUST_USE_RESULT static Handle<UnseededNumberDictionary> Set(
- Handle<UnseededNumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value);
-
- MUST_USE_RESULT MaybeObject* Set(uint32_t key, Object* value);
-};
-
-
-template <int entrysize>
-class ObjectHashTableShape : public BaseShape<Object*> {
- public:
- static inline bool IsMatch(Object* key, Object* other);
- static inline uint32_t Hash(Object* key);
- static inline uint32_t HashForObject(Object* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(Object* key);
- static const int kPrefixSize = 0;
- static const int kEntrySize = entrysize;
-};
-
-
-// ObjectHashSet holds keys that are arbitrary objects by using the identity
-// hash of the key for hashing purposes.
-class ObjectHashSet: public HashTable<ObjectHashTableShape<1>, Object*> {
- public:
- static inline ObjectHashSet* cast(Object* obj) {
- ASSERT(obj->IsHashTable());
- return reinterpret_cast<ObjectHashSet*>(obj);
- }
-
- // Looks up whether the given key is part of this hash set.
- bool Contains(Object* key);
-
- // Adds the given key to this hash set.
- MUST_USE_RESULT MaybeObject* Add(Object* key);
-
- // Removes the given key from this hash set.
- MUST_USE_RESULT MaybeObject* Remove(Object* key);
-};
-
-
-// ObjectHashTable maps keys that are arbitrary objects to object values by
-// using the identity hash of the key for hashing purposes.
-class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
- public:
- static inline ObjectHashTable* cast(Object* obj) {
- ASSERT(obj->IsHashTable());
- return reinterpret_cast<ObjectHashTable*>(obj);
- }
-
- // Looks up the value associated with the given key. The hole value is
- // returned in case the key is not present.
- Object* Lookup(Object* key);
-
- // Adds (or overwrites) the value associated with the given key. Mapping a
- // key to the hole value causes removal of the whole entry.
- MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
-
- private:
- friend class MarkCompactCollector;
-
- void AddEntry(int entry, Object* key, Object* value);
- void RemoveEntry(int entry);
-
- // Returns the index to the value of an entry.
- static inline int EntryToValueIndex(int entry) {
- return EntryToIndex(entry) + 1;
- }
-};
-
-
-// JSFunctionResultCache caches results of some JSFunction invocation.
-// It is a fixed array with fixed structure:
-// [0]: factory function
-// [1]: finger index
-// [2]: current cache size
-// [3]: dummy field.
-// The rest of array are key/value pairs.
-class JSFunctionResultCache: public FixedArray {
- public:
- static const int kFactoryIndex = 0;
- static const int kFingerIndex = kFactoryIndex + 1;
- static const int kCacheSizeIndex = kFingerIndex + 1;
- static const int kDummyIndex = kCacheSizeIndex + 1;
- static const int kEntriesIndex = kDummyIndex + 1;
-
- static const int kEntrySize = 2; // key + value
-
- static const int kFactoryOffset = kHeaderSize;
- static const int kFingerOffset = kFactoryOffset + kPointerSize;
- static const int kCacheSizeOffset = kFingerOffset + kPointerSize;
-
- inline void MakeZeroSize();
- inline void Clear();
-
- inline int size();
- inline void set_size(int size);
- inline int finger_index();
- inline void set_finger_index(int finger_index);
-
- // Casting
- static inline JSFunctionResultCache* cast(Object* obj);
-
- DECLARE_VERIFIER(JSFunctionResultCache)
-};
-
-
-// ScopeInfo represents information about different scopes of a source
-// program and the allocation of the scope's variables. Scope information
-// is stored in a compressed form in ScopeInfo objects and is used
-// at runtime (stack dumps, deoptimization, etc.).
-
-// This object provides quick access to scope info details for runtime
-// routines.
-class ScopeInfo : public FixedArray {
- public:
- static inline ScopeInfo* cast(Object* object);
-
- // Return the type of this scope.
- ScopeType Type();
-
- // Does this scope call eval?
- bool CallsEval();
-
- // Return the language mode of this scope.
- LanguageMode language_mode();
-
- // Is this scope a qml mode scope?
- bool IsQmlMode();
-
- // Does this scope make a non-strict eval call?
- bool CallsNonStrictEval() {
- return CallsEval() && (language_mode() == CLASSIC_MODE);
- }
-
- // Return the total number of locals allocated on the stack and in the
- // context. This includes the parameters that are allocated in the context.
- int LocalCount();
-
- // Return the number of stack slots for code. This number consists of two
- // parts:
- // 1. One stack slot per stack allocated local.
- // 2. One stack slot for the function name if it is stack allocated.
- int StackSlotCount();
-
- // Return the number of context slots for code if a context is allocated. This
- // number consists of three parts:
- // 1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
- // 2. One context slot per context allocated local.
- // 3. One context slot for the function name if it is context allocated.
- // Parameters allocated in the context count as context allocated locals. If
- // no contexts are allocated for this scope ContextLength returns 0.
- int ContextLength(bool qml_function = false);
-
- // Is this scope the scope of a named function expression?
- bool HasFunctionName();
-
- // Return if this has context allocated locals.
- bool HasHeapAllocatedLocals();
-
- // Return if contexts are allocated for this scope.
- bool HasContext();
-
- // Return the function_name if present.
- String* FunctionName();
-
- // Return the name of the given parameter.
- String* ParameterName(int var);
-
- // Return the name of the given local.
- String* LocalName(int var);
-
- // Return the name of the given stack local.
- String* StackLocalName(int var);
-
- // Return the name of the given context local.
- String* ContextLocalName(int var);
-
- // Return the mode of the given context local.
- VariableMode ContextLocalMode(int var);
-
- // Return the initialization flag of the given context local.
- InitializationFlag ContextLocalInitFlag(int var);
-
- // Lookup support for serialized scope info. Returns the
- // the stack slot index for a given slot name if the slot is
- // present; otherwise returns a value < 0. The name must be an internalized
- // string.
- int StackSlotIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the
- // context slot index for a given slot name if the slot is present; otherwise
- // returns a value < 0. The name must be an internalized string.
- // If the slot is present and mode != NULL, sets *mode to the corresponding
- // mode for that variable.
- int ContextSlotIndex(String* name,
- VariableMode* mode,
- InitializationFlag* init_flag);
-
- // Lookup support for serialized scope info. Returns the
- // parameter index for a given parameter name if the parameter is present;
- // otherwise returns a value < 0. The name must be an internalized string.
- int ParameterIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the function context
- // slot index if the function name is present and context-allocated (named
- // function expressions, only), otherwise returns a value < 0. The name
- // must be an internalized string.
- int FunctionContextSlotIndex(String* name, VariableMode* mode);
-
-
- // Copies all the context locals into an object used to materialize a scope.
- bool CopyContextLocalsToScopeObject(Isolate* isolate,
- Handle<Context> context,
- Handle<JSObject> scope_object);
-
-
- static Handle<ScopeInfo> Create(Scope* scope, Zone* zone);
-
- // Serializes empty scope info.
- static ScopeInfo* Empty(Isolate* isolate);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- // The layout of the static part of a ScopeInfo is as follows. Each entry is
- // numeric and occupies one array slot.
- // 1. A set of properties of the scope
- // 2. The number of parameters. This only applies to function scopes. For
- // non-function scopes this is 0.
- // 3. The number of non-parameter variables allocated on the stack.
- // 4. The number of non-parameter and parameter variables allocated in the
- // context.
- // 5. The number of non-parameter and parameter variables allocated in the
- // QML context. (technically placeholder)
-#define FOR_EACH_NUMERIC_FIELD(V) \
- V(Flags) \
- V(ParameterCount) \
- V(StackLocalCount) \
- V(ContextLocalCount) \
- V(QmlContextLocalCount)
-
-#define FIELD_ACCESSORS(name) \
- void Set##name(int value) { \
- set(k##name, Smi::FromInt(value)); \
- } \
- int name() { \
- if (length() > 0) { \
- return Smi::cast(get(k##name))->value(); \
- } else { \
- return 0; \
- } \
- }
- FOR_EACH_NUMERIC_FIELD(FIELD_ACCESSORS)
-#undef FIELD_ACCESSORS
-
- private:
- enum {
-#define DECL_INDEX(name) k##name,
- FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
-#undef DECL_INDEX
-#undef FOR_EACH_NUMERIC_FIELD
- kVariablePartIndex
- };
-
- // The layout of the variable part of a ScopeInfo is as follows:
- // 1. ParameterEntries:
- // This part stores the names of the parameters for function scopes. One
- // slot is used per parameter, so in total this part occupies
- // ParameterCount() slots in the array. For other scopes than function
- // scopes ParameterCount() is 0.
- // 2. StackLocalEntries:
- // Contains the names of local variables that are allocated on the stack,
- // in increasing order of the stack slot index. One slot is used per stack
- // local, so in total this part occupies StackLocalCount() slots in the
- // array.
- // 3. ContextLocalNameEntries:
- // Contains the names of local variables and parameters that are allocated
- // in the context. They are stored in increasing order of the context slot
- // index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
- // context local, so in total this part occupies ContextLocalCount() slots
- // in the array.
- // 4. ContextLocalInfoEntries:
- // Contains the variable modes and initialization flags corresponding to
- // the context locals in ContextLocalNameEntries. One slot is used per
- // context local, so in total this part occupies ContextLocalCount()
- // slots in the array.
- // 5. FunctionNameEntryIndex:
- // If the scope belongs to a named function expression this part contains
- // information about the function variable. It always occupies two array
- // slots: a. The name of the function variable.
- // b. The context or stack slot index for the variable.
- int ParameterEntriesIndex();
- int StackLocalEntriesIndex();
- int ContextLocalNameEntriesIndex();
- int ContextLocalInfoEntriesIndex();
- int FunctionNameEntryIndex();
-
- // Location of the function variable for named function expressions.
- enum FunctionVariableInfo {
- NONE, // No function name present.
- STACK, // Function
- CONTEXT,
- UNUSED
- };
-
- // Properties of scopes.
- class TypeField: public BitField<ScopeType, 0, 3> {};
- class CallsEvalField: public BitField<bool, 3, 1> {};
- class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
- class QmlModeField: public BitField<bool, 6, 1> {};
- class FunctionVariableField: public BitField<FunctionVariableInfo, 7, 2> {};
- class FunctionVariableMode: public BitField<VariableMode, 9, 3> {};
-
- // BitFields representing the encoded information for context locals in the
- // ContextLocalInfoEntries part.
- class ContextLocalMode: public BitField<VariableMode, 0, 3> {};
- class ContextLocalInitFlag: public BitField<InitializationFlag, 3, 1> {};
-};
-
-
-// The cache for maps used by normalized (dictionary mode) objects.
-// Such maps do not have property descriptors, so a typical program
-// needs very limited number of distinct normalized maps.
-class NormalizedMapCache: public FixedArray {
- public:
- static const int kEntries = 64;
-
- MUST_USE_RESULT MaybeObject* Get(JSObject* object,
- PropertyNormalizationMode mode);
-
- void Clear();
-
- // Casting
- static inline NormalizedMapCache* cast(Object* obj);
-
- DECLARE_VERIFIER(NormalizedMapCache)
-};
-
-
-// ByteArray represents fixed sized byte arrays. Used for the relocation info
-// that is attached to code objects.
-class ByteArray: public FixedArrayBase {
- public:
- inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
-
- // Setter and getter.
- inline byte get(int index);
- inline void set(int index, byte value);
-
- // Treat contents as an int array.
- inline int get_int(int index);
-
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length);
- }
- // We use byte arrays for free blocks in the heap. Given a desired size in
- // bytes that is a multiple of the word size and big enough to hold a byte
- // array, this function returns the number of elements a byte array should
- // have.
- static int LengthFor(int size_in_bytes) {
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
- ASSERT(size_in_bytes >= kHeaderSize);
- return size_in_bytes - kHeaderSize;
- }
-
- // Returns data start address.
- inline Address GetDataStartAddress();
-
- // Returns a pointer to the ByteArray object for a given data start address.
- static inline ByteArray* FromDataStartAddress(Address address);
-
- // Casting.
- static inline ByteArray* cast(Object* obj);
-
- // Dispatched behavior.
- inline int ByteArraySize() {
- return SizeFor(this->length());
- }
- DECLARE_PRINTER(ByteArray)
- DECLARE_VERIFIER(ByteArray)
-
- // Layout description.
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- // Maximal memory consumption for a single ByteArray.
- static const int kMaxSize = 512 * MB;
- // Maximal length of a single ByteArray.
- static const int kMaxLength = kMaxSize - kHeaderSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
-};
-
-
-// FreeSpace represents fixed sized areas of the heap that are not currently in
-// use. Used by the heap and GC.
-class FreeSpace: public HeapObject {
- public:
- // [size]: size of the free space including the header.
- inline int size();
- inline void set_size(int value);
-
- inline int Size() { return size(); }
-
- // Casting.
- static inline FreeSpace* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(FreeSpace)
- DECLARE_VERIFIER(FreeSpace)
-
- // Layout description.
- // Size is smi tagged when it is stored.
- static const int kSizeOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kSizeOffset + kPointerSize;
-
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
-};
-
-
-// An ExternalArray represents a fixed-size array of primitive values
-// which live outside the JavaScript heap. Its subclasses are used to
-// implement the CanvasArray types being defined in the WebGL
-// specification. As of this writing the first public draft is not yet
-// available, but Khronos members can access the draft at:
-// https://cvs.khronos.org/svn/repos/3dweb/trunk/doc/spec/WebGL-spec.html
-//
-// The semantics of these arrays differ from CanvasPixelArray.
-// Out-of-range values passed to the setter are converted via a C
-// cast, not clamping. Out-of-range indices cause exceptions to be
-// raised rather than being silently ignored.
-class ExternalArray: public FixedArrayBase {
- public:
- inline bool is_the_hole(int index) { return false; }
-
- // [external_pointer]: The pointer to the external memory area backing this
- // external array.
- DECL_ACCESSORS(external_pointer, void) // Pointer to the data store.
-
- // Casting.
- static inline ExternalArray* cast(Object* obj);
-
- // Maximal acceptable length for an external array.
- static const int kMaxLength = 0x3fffffff;
-
- // ExternalArray headers are not quadword aligned.
- static const int kExternalPointerOffset =
- POINTER_SIZE_ALIGN(FixedArrayBase::kLengthOffset + kPointerSize);
- static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
-};
-
-
-// A ExternalPixelArray represents a fixed-size byte array with special
-// semantics used for implementing the CanvasPixelArray object. Please see the
-// specification at:
-
-// http://www.whatwg.org/specs/web-apps/current-work/
-// multipage/the-canvas-element.html#canvaspixelarray
-// In particular, write access clamps the value written to 0 or 255 if the
-// value written is outside this range.
-class ExternalPixelArray: public ExternalArray {
- public:
- inline uint8_t* external_pixel_pointer();
-
- // Setter and getter.
- inline uint8_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, uint8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber and
- // undefined and clamps the converted value between 0 and 255.
- Object* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalPixelArray* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalPixelArray)
- DECLARE_VERIFIER(ExternalPixelArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalPixelArray);
-};
-
-
-class ExternalByteArray: public ExternalArray {
- public:
- // Setter and getter.
- inline int8_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, int8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalByteArray* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalByteArray)
- DECLARE_VERIFIER(ExternalByteArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalByteArray);
-};
-
-
-class ExternalUnsignedByteArray: public ExternalArray {
- public:
- // Setter and getter.
- inline uint8_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, uint8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalUnsignedByteArray* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalUnsignedByteArray)
- DECLARE_VERIFIER(ExternalUnsignedByteArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedByteArray);
-};
-
-
-class ExternalShortArray: public ExternalArray {
- public:
- // Setter and getter.
- inline int16_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, int16_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalShortArray* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalShortArray)
- DECLARE_VERIFIER(ExternalShortArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalShortArray);
-};
-
-
-class ExternalUnsignedShortArray: public ExternalArray {
- public:
- // Setter and getter.
- inline uint16_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, uint16_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalUnsignedShortArray* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalUnsignedShortArray)
- DECLARE_VERIFIER(ExternalUnsignedShortArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedShortArray);
-};
-
-
-class ExternalIntArray: public ExternalArray {
- public:
- // Setter and getter.
- inline int32_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, int32_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalIntArray* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalIntArray)
- DECLARE_VERIFIER(ExternalIntArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalIntArray);
-};
-
-
-class ExternalUnsignedIntArray: public ExternalArray {
- public:
- // Setter and getter.
- inline uint32_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, uint32_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalUnsignedIntArray* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalUnsignedIntArray)
- DECLARE_VERIFIER(ExternalUnsignedIntArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedIntArray);
-};
-
-
-class ExternalFloatArray: public ExternalArray {
- public:
- // Setter and getter.
- inline float get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, float value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalFloatArray* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalFloatArray)
- DECLARE_VERIFIER(ExternalFloatArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloatArray);
-};
-
-
-class ExternalDoubleArray: public ExternalArray {
- public:
- // Setter and getter.
- inline double get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
- inline void set(int index, double value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline ExternalDoubleArray* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalDoubleArray)
- DECLARE_VERIFIER(ExternalDoubleArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalDoubleArray);
-};
-
-
-// DeoptimizationInputData is a fixed array used to hold the deoptimization
-// data for code generated by the Hydrogen/Lithium compiler. It also
-// contains information about functions that were inlined. If N different
-// functions were inlined then first N elements of the literal array will
-// contain these functions.
-//
-// It can be empty.
-class DeoptimizationInputData: public FixedArray {
- public:
- // Layout description. Indices in the array.
- static const int kTranslationByteArrayIndex = 0;
- static const int kInlinedFunctionCountIndex = 1;
- static const int kLiteralArrayIndex = 2;
- static const int kOsrAstIdIndex = 3;
- static const int kOsrPcOffsetIndex = 4;
- static const int kFirstDeoptEntryIndex = 5;
-
- // Offsets of deopt entry elements relative to the start of the entry.
- static const int kAstIdRawOffset = 0;
- static const int kTranslationIndexOffset = 1;
- static const int kArgumentsStackHeightOffset = 2;
- static const int kPcOffset = 3;
- static const int kDeoptEntrySize = 4;
-
- // Simple element accessors.
-#define DEFINE_ELEMENT_ACCESSORS(name, type) \
- type* name() { \
- return type::cast(get(k##name##Index)); \
- } \
- void Set##name(type* value) { \
- set(k##name##Index, value); \
- }
-
- DEFINE_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
- DEFINE_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
- DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
- DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
- DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
-
-#undef DEFINE_ELEMENT_ACCESSORS
-
- // Accessors for elements of the ith deoptimization entry.
-#define DEFINE_ENTRY_ACCESSORS(name, type) \
- type* name(int i) { \
- return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
- } \
- void Set##name(int i, type* value) { \
- set(IndexForEntry(i) + k##name##Offset, value); \
- }
-
- DEFINE_ENTRY_ACCESSORS(AstIdRaw, Smi)
- DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
- DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
- DEFINE_ENTRY_ACCESSORS(Pc, Smi)
-
-#undef DEFINE_ENTRY_ACCESSORS
-
- BailoutId AstId(int i) {
- return BailoutId(AstIdRaw(i)->value());
- }
-
- void SetAstId(int i, BailoutId value) {
- SetAstIdRaw(i, Smi::FromInt(value.ToInt()));
- }
-
- int DeoptCount() {
- return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
- }
-
- // Allocates a DeoptimizationInputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int deopt_entry_count,
- PretenureFlag pretenure);
-
- // Casting.
- static inline DeoptimizationInputData* cast(Object* obj);
-
-#ifdef ENABLE_DISASSEMBLER
- void DeoptimizationInputDataPrint(FILE* out);
-#endif
-
- private:
- static int IndexForEntry(int i) {
- return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
- }
-
- static int LengthFor(int entry_count) {
- return IndexForEntry(entry_count);
- }
-};
-
-
-// DeoptimizationOutputData is a fixed array used to hold the deoptimization
-// data for code generated by the full compiler.
-// The format of the these objects is
-// [i * 2]: Ast ID for ith deoptimization.
-// [i * 2 + 1]: PC and state of ith deoptimization
-class DeoptimizationOutputData: public FixedArray {
- public:
- int DeoptPoints() { return length() / 2; }
-
- BailoutId AstId(int index) {
- return BailoutId(Smi::cast(get(index * 2))->value());
- }
-
- void SetAstId(int index, BailoutId id) {
- set(index * 2, Smi::FromInt(id.ToInt()));
- }
-
- Smi* PcAndState(int index) { return Smi::cast(get(1 + index * 2)); }
- void SetPcAndState(int index, Smi* offset) { set(1 + index * 2, offset); }
-
- static int LengthOfFixedArray(int deopt_points) {
- return deopt_points * 2;
- }
-
- // Allocates a DeoptimizationOutputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_deopt_points,
- PretenureFlag pretenure);
-
- // Casting.
- static inline DeoptimizationOutputData* cast(Object* obj);
-
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
- void DeoptimizationOutputDataPrint(FILE* out);
-#endif
-};
-
-
-// Forward declaration.
-class JSGlobalPropertyCell;
-
-// TypeFeedbackCells is a fixed array used to hold the association between
-// cache cells and AST ids for code generated by the full compiler.
-// The format of the these objects is
-// [i * 2]: Global property cell of ith cache cell.
-// [i * 2 + 1]: Ast ID for ith cache cell.
-class TypeFeedbackCells: public FixedArray {
- public:
- int CellCount() { return length() / 2; }
- static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
-
- // Accessors for AST ids associated with cache values.
- inline TypeFeedbackId AstId(int index);
- inline void SetAstId(int index, TypeFeedbackId id);
-
- // Accessors for global property cells holding the cache values.
- inline JSGlobalPropertyCell* Cell(int index);
- inline void SetCell(int index, JSGlobalPropertyCell* cell);
-
- // The object that indicates an uninitialized cache.
- static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
-
- // The object that indicates a megamorphic state.
- static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
-
- // The object that indicates a monomorphic state of Array with
- // ElementsKind
- static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind);
-
- // A raw version of the uninitialized sentinel that's safe to read during
- // garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Heap* heap);
-
- // Casting.
- static inline TypeFeedbackCells* cast(Object* obj);
-
- static const int kForInFastCaseMarker = 0;
- static const int kForInSlowCaseMarker = 1;
-};
-
-
-// Forward declaration.
-class SafepointEntry;
-class TypeFeedbackInfo;
-
-// Code describes objects with on-the-fly generated machine code.
-class Code: public HeapObject {
- public:
- // Opaque data type for encapsulating code flags like kind, inline
- // cache state, and arguments count.
- // FLAGS_MIN_VALUE and FLAGS_MAX_VALUE are specified to ensure that
- // enumeration type has correct value range (see Issue 830 for more details).
- enum Flags {
- FLAGS_MIN_VALUE = kMinInt,
- FLAGS_MAX_VALUE = kMaxInt
- };
-
-#define CODE_KIND_LIST(V) \
- V(FUNCTION) \
- V(OPTIMIZED_FUNCTION) \
- V(STUB) \
- V(COMPILED_STUB) \
- V(BUILTIN) \
- V(LOAD_IC) \
- V(KEYED_LOAD_IC) \
- V(CALL_IC) \
- V(KEYED_CALL_IC) \
- V(STORE_IC) \
- V(KEYED_STORE_IC) \
- V(UNARY_OP_IC) \
- V(BINARY_OP_IC) \
- V(COMPARE_IC) \
- V(TO_BOOLEAN_IC)
-
- enum Kind {
-#define DEFINE_CODE_KIND_ENUM(name) name,
- CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
-#undef DEFINE_CODE_KIND_ENUM
-
- // Pseudo-kinds.
- LAST_CODE_KIND = TO_BOOLEAN_IC,
- REGEXP = BUILTIN,
- FIRST_IC_KIND = LOAD_IC,
- LAST_IC_KIND = TO_BOOLEAN_IC
- };
-
- // No more than 16 kinds. The value is currently encoded in four bits in
- // Flags.
- STATIC_ASSERT(LAST_CODE_KIND < 16);
-
- static const char* Kind2String(Kind kind);
-
- // Types of stubs.
- enum StubType {
- NORMAL,
- FIELD,
- CONSTANT_FUNCTION,
- CALLBACKS,
- INTERCEPTOR,
- MAP_TRANSITION,
- NONEXISTENT
- };
-
- enum IcFragment {
- IC_FRAGMENT,
- HANDLER_FRAGMENT
- };
-
- enum {
- NUMBER_OF_KINDS = LAST_IC_KIND + 1
- };
-
- typedef int ExtraICState;
-
- static const ExtraICState kNoExtraICState = 0;
-
-#ifdef ENABLE_DISASSEMBLER
- // Printing
- static const char* ICState2String(InlineCacheState state);
- static const char* StubType2String(StubType type);
- static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
- inline void Disassemble(const char* name) {
- Disassemble(name, stdout);
- }
- void Disassemble(const char* name, FILE* out);
-#endif // ENABLE_DISASSEMBLER
-
- // [instruction_size]: Size of the native instructions
- inline int instruction_size();
- inline void set_instruction_size(int value);
-
- // [relocation_info]: Code relocation information
- DECL_ACCESSORS(relocation_info, ByteArray)
- void InvalidateRelocation();
-
- // [handler_table]: Fixed array containing offsets of exception handlers.
- DECL_ACCESSORS(handler_table, FixedArray)
-
- // [deoptimization_data]: Array containing data for deopt.
- DECL_ACCESSORS(deoptimization_data, FixedArray)
-
- // [type_feedback_info]: Struct containing type feedback information for
- // unoptimized code. Optimized code can temporarily store the head of
- // the list of the dependent optimized functions during deoptimization.
- // STUBs can use this slot to store arbitrary information as a Smi.
- // Will contain either a TypeFeedbackInfo object, or JSFunction object,
- // or undefined, or a Smi.
- DECL_ACCESSORS(type_feedback_info, Object)
- inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
- inline int stub_info();
- inline void set_stub_info(int info);
- inline Object* deoptimizing_functions();
- inline void set_deoptimizing_functions(Object* value);
-
- // [gc_metadata]: Field used to hold GC related metadata. The contents of this
- // field does not have to be traced during garbage collection since
- // it is only used by the garbage collector itself.
- DECL_ACCESSORS(gc_metadata, Object)
-
- // [ic_age]: Inline caching age: the value of the Heap::global_ic_age
- // at the moment when this object was created.
- inline void set_ic_age(int count);
- inline int ic_age();
-
- // [prologue_offset]: Offset of the function prologue, used for aging
- // FUNCTIONs and OPTIMIZED_FUNCTIONs.
- inline int prologue_offset();
- inline void set_prologue_offset(int offset);
-
- // Unchecked accessors to be used during GC.
- inline ByteArray* unchecked_relocation_info();
- inline FixedArray* unchecked_deoptimization_data();
-
- inline int relocation_size();
-
- // [flags]: Various code flags.
- inline Flags flags();
- inline void set_flags(Flags flags);
-
- // [flags]: Access to specific code flags.
- inline Kind kind();
- inline InlineCacheState ic_state(); // Only valid for IC stubs.
- inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
- inline StubType type(); // Only valid for monomorphic IC stubs.
- inline int arguments_count(); // Only valid for call IC stubs.
-
- // Testers for IC stub kinds.
- inline bool is_inline_cache_stub();
- inline bool is_debug_break();
- inline bool is_load_stub() { return kind() == LOAD_IC; }
- inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
- inline bool is_store_stub() { return kind() == STORE_IC; }
- inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
- inline bool is_call_stub() { return kind() == CALL_IC; }
- inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
- inline bool is_unary_op_stub() { return kind() == UNARY_OP_IC; }
- inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
- inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
- inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
-
- // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
- inline int major_key();
- inline void set_major_key(int value);
-
- // For stubs, tells whether they should always exist, so that they can be
- // called from other stubs.
- inline bool is_pregenerated();
- inline void set_is_pregenerated(bool value);
-
- // [optimizable]: For FUNCTION kind, tells if it is optimizable.
- inline bool optimizable();
- inline void set_optimizable(bool value);
-
- // [has_deoptimization_support]: For FUNCTION kind, tells if it has
- // deoptimization support.
- inline bool has_deoptimization_support();
- inline void set_has_deoptimization_support(bool value);
-
- // [has_debug_break_slots]: For FUNCTION kind, tells if it has
- // been compiled with debug break slots.
- inline bool has_debug_break_slots();
- inline void set_has_debug_break_slots(bool value);
-
- // [compiled_with_optimizing]: For FUNCTION kind, tells if it has
- // been compiled with IsOptimizing set to true.
- inline bool is_compiled_optimizable();
- inline void set_compiled_optimizable(bool value);
-
- // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
- // how long the function has been marked for OSR and therefore which
- // level of loop nesting we are willing to do on-stack replacement
- // for.
- inline void set_allow_osr_at_loop_nesting_level(int level);
- inline int allow_osr_at_loop_nesting_level();
-
- // [profiler_ticks]: For FUNCTION kind, tells for how many profiler ticks
- // the code object was seen on the stack with no IC patching going on.
- inline int profiler_ticks();
- inline void set_profiler_ticks(int ticks);
-
- // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
- // reserved in the code prologue.
- inline unsigned stack_slots();
- inline void set_stack_slots(unsigned slots);
-
- // [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
- // the instruction stream where the safepoint table starts.
- inline unsigned safepoint_table_offset();
- inline void set_safepoint_table_offset(unsigned offset);
-
- // [stack_check_table_start]: For kind FUNCTION, the offset in the
- // instruction stream where the stack check table starts.
- inline unsigned stack_check_table_offset();
- inline void set_stack_check_table_offset(unsigned offset);
-
- // [check type]: For kind CALL_IC, tells how to check if the
- // receiver is valid for the given call.
- inline CheckType check_type();
- inline void set_check_type(CheckType value);
-
- // [type-recording unary op type]: For kind UNARY_OP_IC.
- inline byte unary_op_type();
- inline void set_unary_op_type(byte value);
-
- // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
- inline byte to_boolean_state();
- inline void set_to_boolean_state(byte value);
-
- // [has_function_cache]: For kind STUB tells whether there is a function
- // cache is passed to the stub.
- inline bool has_function_cache();
- inline void set_has_function_cache(bool flag);
-
-
- // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
- // the code is going to be deoptimized because of dead embedded maps.
- inline bool marked_for_deoptimization();
- inline void set_marked_for_deoptimization(bool flag);
-
- bool allowed_in_shared_map_code_cache();
-
- // Get the safepoint entry for the given pc.
- SafepointEntry GetSafepointEntry(Address pc);
-
- // Mark this code object as not having a stack check table. Assumes kind
- // is FUNCTION.
- void SetNoStackCheckTable();
-
- // Find the first map in an IC stub.
- Map* FindFirstMap();
- void FindAllMaps(MapHandleList* maps);
-
- // Find the first code in an IC stub.
- Code* FindFirstCode();
- void FindAllCode(CodeHandleList* code_list, int length);
-
- class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
- class ExtraICStateKeyedAccessGrowMode:
- public BitField<KeyedAccessGrowMode, 1, 1> {}; // NOLINT
-
- static const int kExtraICStateGrowModeShift = 1;
-
- static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) {
- return ExtraICStateStrictMode::decode(extra_ic_state);
- }
-
- static inline KeyedAccessGrowMode GetKeyedAccessGrowMode(
- ExtraICState extra_ic_state) {
- return ExtraICStateKeyedAccessGrowMode::decode(extra_ic_state);
- }
-
- static inline ExtraICState ComputeExtraICState(
- KeyedAccessGrowMode grow_mode,
- StrictModeFlag strict_mode) {
- return ExtraICStateKeyedAccessGrowMode::encode(grow_mode) |
- ExtraICStateStrictMode::encode(strict_mode);
- }
-
- // Flags operations.
- static inline Flags ComputeFlags(
- Kind kind,
- InlineCacheState ic_state = UNINITIALIZED,
- ExtraICState extra_ic_state = kNoExtraICState,
- StubType type = NORMAL,
- int argc = -1,
- InlineCacheHolderFlag holder = OWN_MAP);
-
- static inline Flags ComputeMonomorphicFlags(
- Kind kind,
- ExtraICState extra_ic_state = kNoExtraICState,
- StubType type = NORMAL,
- int argc = -1,
- InlineCacheHolderFlag holder = OWN_MAP);
-
- static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
- static inline StubType ExtractTypeFromFlags(Flags flags);
- static inline Kind ExtractKindFromFlags(Flags flags);
- static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
- static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
- static inline int ExtractArgumentsCountFromFlags(Flags flags);
-
- static inline Flags RemoveTypeFromFlags(Flags flags);
-
- // Convert a target address into a code object.
- static inline Code* GetCodeFromTargetAddress(Address address);
-
- // Convert an entry address into an object.
- static inline Object* GetObjectFromEntryAddress(Address location_of_address);
-
- // Returns the address of the first instruction.
- inline byte* instruction_start();
-
- // Returns the address right after the last instruction.
- inline byte* instruction_end();
-
- // Returns the size of the instructions, padding, and relocation information.
- inline int body_size();
-
- // Returns the address of the first relocation info (read backwards!).
- inline byte* relocation_start();
-
- // Code entry point.
- inline byte* entry();
-
- // Returns true if pc is inside this object's instructions.
- inline bool contains(byte* pc);
-
- // Relocate the code by delta bytes. Called to signal that this code
- // object has been moved by delta bytes.
- void Relocate(intptr_t delta);
-
- // Migrate code described by desc.
- void CopyFrom(const CodeDesc& desc);
-
- // Returns the object size for a given body (used for allocation).
- static int SizeFor(int body_size) {
- ASSERT_SIZE_TAG_ALIGNED(body_size);
- return RoundUp(kHeaderSize + body_size, kCodeAlignment);
- }
-
- // Calculate the size of the code object to report for log events. This takes
- // the layout of the code object into account.
- int ExecutableSize() {
- // Check that the assumptions about the layout of the code object holds.
- ASSERT_EQ(static_cast<int>(instruction_start() - address()),
- Code::kHeaderSize);
- return instruction_size() + Code::kHeaderSize;
- }
-
- // Locating source position.
- int SourcePosition(Address pc);
- int SourceStatementPosition(Address pc);
-
- // Casting.
- static inline Code* cast(Object* obj);
-
- // Dispatched behavior.
- int CodeSize() { return SizeFor(body_size()); }
- inline void CodeIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void CodeIterateBody(Heap* heap);
-
- DECLARE_PRINTER(Code)
- DECLARE_VERIFIER(Code)
-
- void ClearInlineCaches();
- void ClearTypeFeedbackCells(Heap* heap);
-
-#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
- enum Age {
- kNoAge = 0,
- CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM)
- kAfterLastCodeAge,
- kLastCodeAge = kAfterLastCodeAge - 1,
- kCodeAgeCount = kAfterLastCodeAge - 1
- };
-#undef DECLARE_CODE_AGE_ENUM
-
- // Code aging
- static void MakeCodeAgeSequenceYoung(byte* sequence);
- void MakeOlder(MarkingParity);
- static bool IsYoungSequence(byte* sequence);
- bool IsOld();
-
- void PrintDeoptLocation(int bailout_id);
-
-#ifdef VERIFY_HEAP
- void VerifyEmbeddedMapsDependency();
-#endif
-
- // Max loop nesting marker used to postpose OSR. We don't take loop
- // nesting that is deeper than 5 levels into account.
- static const int kMaxLoopNestingMarker = 6;
-
- // Layout description.
- static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
- static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
- static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
- static const int kDeoptimizationDataOffset =
- kHandlerTableOffset + kPointerSize;
- static const int kTypeFeedbackInfoOffset =
- kDeoptimizationDataOffset + kPointerSize;
- static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
- static const int kICAgeOffset =
- kGCMetadataOffset + kPointerSize;
- static const int kFlagsOffset = kICAgeOffset + kIntSize;
- static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
- static const int kKindSpecificFlags2Offset =
- kKindSpecificFlags1Offset + kIntSize;
- // Note: We might be able to squeeze this into the flags above.
- static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
-
- static const int kHeaderPaddingStart = kPrologueOffset + kIntSize;
-
- // Add padding to align the instruction start following right after
- // the Code object header.
- static const int kHeaderSize =
- (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
-
- // Byte offsets within kKindSpecificFlags1Offset.
- static const int kOptimizableOffset = kKindSpecificFlags1Offset;
- static const int kCheckTypeOffset = kKindSpecificFlags1Offset;
-
- static const int kFullCodeFlags = kOptimizableOffset + 1;
- class FullCodeFlagsHasDeoptimizationSupportField:
- public BitField<bool, 0, 1> {}; // NOLINT
- class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
- class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
-
- static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
- static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
-
- // Flags layout. BitField<type, shift, size>.
- class ICStateField: public BitField<InlineCacheState, 0, 3> {};
- class TypeField: public BitField<StubType, 3, 3> {};
- class CacheHolderField: public BitField<InlineCacheHolderFlag, 6, 1> {};
- class KindField: public BitField<Kind, 7, 4> {};
- class ExtraICStateField: public BitField<ExtraICState, 11, 2> {};
- class IsPregeneratedField: public BitField<bool, 13, 1> {};
-
- // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
- static const int kStackSlotsFirstBit = 0;
- static const int kStackSlotsBitCount = 24;
- static const int kUnaryOpTypeFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kUnaryOpTypeBitCount = 3;
- static const int kToBooleanStateFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kToBooleanStateBitCount = 8;
- static const int kHasFunctionCacheFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kHasFunctionCacheBitCount = 1;
- static const int kMarkedForDeoptimizationFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount + 1;
- static const int kMarkedForDeoptimizationBitCount = 1;
-
- STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
- STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
- STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
- STATIC_ASSERT(kMarkedForDeoptimizationFirstBit +
- kMarkedForDeoptimizationBitCount <= 32);
-
- class StackSlotsField: public BitField<int,
- kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
- class UnaryOpTypeField: public BitField<int,
- kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
- class ToBooleanStateField: public BitField<int,
- kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
- class HasFunctionCacheField: public BitField<bool,
- kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
- class MarkedForDeoptimizationField: public BitField<bool,
- kMarkedForDeoptimizationFirstBit,
- kMarkedForDeoptimizationBitCount> {}; // NOLINT
-
- // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
- static const int kStubMajorKeyFirstBit = 0;
- static const int kSafepointTableOffsetFirstBit =
- kStubMajorKeyFirstBit + kStubMajorKeyBits;
- static const int kSafepointTableOffsetBitCount = 26;
-
- STATIC_ASSERT(kStubMajorKeyFirstBit + kStubMajorKeyBits <= 32);
- STATIC_ASSERT(kSafepointTableOffsetFirstBit +
- kSafepointTableOffsetBitCount <= 32);
-
- class SafepointTableOffsetField: public BitField<int,
- kSafepointTableOffsetFirstBit,
- kSafepointTableOffsetBitCount> {}; // NOLINT
- class StubMajorKeyField: public BitField<int,
- kStubMajorKeyFirstBit, kStubMajorKeyBits> {}; // NOLINT
-
- // KindSpecificFlags2 layout (FUNCTION)
- class StackCheckTableOffsetField: public BitField<int, 0, 31> {};
-
- // Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 14;
- static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
-
- // This constant should be encodable in an ARM instruction.
- static const int kFlagsNotUsedInLookup =
- TypeField::kMask | CacheHolderField::kMask;
-
- private:
- friend class RelocIterator;
-
- // Code aging
- byte* FindCodeAgeSequence();
- static void GetCodeAgeAndParity(Code* code, Age* age,
- MarkingParity* parity);
- static void GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity);
- static Code* GetCodeAgeStub(Age age, MarkingParity parity);
-
- // Code aging -- platform-specific
- static void PatchPlatformCodeAge(byte* sequence, Age age,
- MarkingParity parity);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
-};
-
-
-// This class describes the layout of dependent codes array of a map. The
-// array is partitioned into several groups of dependent codes. Each group
-// contains codes with the same dependency on the map. The array has the
-// following layout for n dependency groups:
-//
-// +----+----+-----+----+---------+----------+-----+---------+-----------+
-// | C1 | C2 | ... | Cn | group 1 | group 2 | ... | group n | undefined |
-// +----+----+-----+----+---------+----------+-----+---------+-----------+
-//
-// The first n elements are Smis, each of them specifies the number of codes
-// in the corresponding group. The subsequent elements contain grouped code
-// objects. The suffix of the array can be filled with the undefined value if
-// the number of codes is less than the length of the array. The order of the
-// code objects within a group is not preserved.
-//
-// All code indexes used in the class are counted starting from the first
-// code object of the first group. In other words, code index 0 corresponds
-// to array index n = kCodesStartIndex.
-
-class DependentCode: public FixedArray {
- public:
- enum DependencyGroup {
- // Group of code that weakly embed this map and depend on being
- // deoptimized when the map is garbage collected.
- kWeaklyEmbeddedGroup,
- // Group of code that omit run-time prototype checks for prototypes
- // described by this map. The group is deoptimized whenever an object
- // described by this map changes shape (and transitions to a new map),
- // possibly invalidating the assumptions embedded in the code.
- kPrototypeCheckGroup,
- kGroupCount = kPrototypeCheckGroup + 1
- };
-
- // Array for holding the index of the first code object of each group.
- // The last element stores the total number of code objects.
- class GroupStartIndexes {
- public:
- explicit GroupStartIndexes(DependentCode* entries);
- void Recompute(DependentCode* entries);
- int at(int i) { return start_indexes_[i]; }
- int number_of_entries() { return start_indexes_[kGroupCount]; }
- private:
- int start_indexes_[kGroupCount + 1];
- };
-
- bool Contains(DependencyGroup group, Code* code);
- static Handle<DependentCode> Insert(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<Code> value);
- void DeoptimizeDependentCodeGroup(DependentCode::DependencyGroup group);
-
- // The following low-level accessors should only be used by this class
- // and the mark compact collector.
- inline int number_of_entries(DependencyGroup group);
- inline void set_number_of_entries(DependencyGroup group, int value);
- inline Code* code_at(int i);
- inline void set_code_at(int i, Code* value);
- inline Object** code_slot_at(int i);
- inline void clear_code_at(int i);
- static inline DependentCode* cast(Object* object);
-
- private:
- // Make a room at the end of the given group by moving out the first
- // code objects of the subsequent groups.
- inline void ExtendGroup(DependencyGroup group);
- static const int kCodesStartIndex = kGroupCount;
-};
-
-
-// All heap objects have a Map that describes their structure.
-// A Map contains information about:
-// - Size information about the object
-// - How to iterate over an object (for garbage collection)
-class Map: public HeapObject {
- public:
- // Instance size.
- // Size in bytes or kVariableSizeSentinel if instances do not have
- // a fixed size.
- inline int instance_size();
- inline void set_instance_size(int value);
-
- // Count of properties allocated in the object.
- inline int inobject_properties();
- inline void set_inobject_properties(int value);
-
- // Count of property fields pre-allocated in the object when first allocated.
- inline int pre_allocated_property_fields();
- inline void set_pre_allocated_property_fields(int value);
-
- // Instance type.
- inline InstanceType instance_type();
- inline void set_instance_type(InstanceType value);
-
- // Tells how many unused property fields are available in the
- // instance (only used for JSObject in fast mode).
- inline int unused_property_fields();
- inline void set_unused_property_fields(int value);
-
- // Bit field.
- inline byte bit_field();
- inline void set_bit_field(byte value);
-
- // Bit field 2.
- inline byte bit_field2();
- inline void set_bit_field2(byte value);
-
- // Bit field 3.
- inline int bit_field3();
- inline void set_bit_field3(int value);
-
- class EnumLengthBits: public BitField<int, 0, 11> {};
- class NumberOfOwnDescriptorsBits: public BitField<int, 11, 11> {};
- class IsShared: public BitField<bool, 22, 1> {};
- class FunctionWithPrototype: public BitField<bool, 23, 1> {};
- class DictionaryMap: public BitField<bool, 24, 1> {};
- class OwnsDescriptors: public BitField<bool, 25, 1> {};
- class IsObserved: public BitField<bool, 26, 1> {};
- class NamedInterceptorIsFallback: public BitField<bool, 27, 1> {};
- class HasInstanceCallHandler: public BitField<bool, 28, 1> {};
- class AttachedToSharedFunctionInfo: public BitField<bool, 29, 1> {};
-
- // Tells whether the object in the prototype property will be used
- // for instances created from this function. If the prototype
- // property is set to a value that is not a JSObject, the prototype
- // property will not be used to create instances of the function.
- // See ECMA-262, 13.2.2.
- inline void set_non_instance_prototype(bool value);
- inline bool has_non_instance_prototype();
-
- // Tells whether function has special prototype property. If not, prototype
- // property will not be created when accessed (will return undefined),
- // and construction from this function will not be allowed.
- inline void set_function_with_prototype(bool value);
- inline bool function_with_prototype();
-
- // Tells whether the instance with this map should be ignored by the
- // __proto__ accessor.
- inline void set_is_hidden_prototype() {
- set_bit_field(bit_field() | (1 << kIsHiddenPrototype));
- }
-
- inline bool is_hidden_prototype() {
- return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
- }
-
- // Records and queries whether the instance has a named interceptor.
- inline void set_has_named_interceptor() {
- set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
- }
-
- inline bool has_named_interceptor() {
- return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
- }
-
- // Records and queries whether the instance has an indexed interceptor.
- inline void set_has_indexed_interceptor() {
- set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
- }
-
- inline bool has_indexed_interceptor() {
- return ((1 << kHasIndexedInterceptor) & bit_field()) != 0;
- }
-
- // Tells whether the instance is undetectable.
- // An undetectable object is a special class of JSObject: 'typeof' operator
- // returns undefined, ToBoolean returns false. Otherwise it behaves like
- // a normal JS object. It is useful for implementing undetectable
- // document.all in Firefox & Safari.
- // See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
- inline void set_is_undetectable() {
- set_bit_field(bit_field() | (1 << kIsUndetectable));
- }
-
- inline bool is_undetectable() {
- return ((1 << kIsUndetectable) & bit_field()) != 0;
- }
-
- // Tells whether the instance has a call-as-function handler.
- inline void set_has_instance_call_handler() {
- set_bit_field3(HasInstanceCallHandler::update(bit_field3(), true));
- }
-
- inline bool has_instance_call_handler() {
- return HasInstanceCallHandler::decode(bit_field3());
- }
-
- inline void set_is_extensible(bool value);
- inline bool is_extensible();
-
- inline void set_elements_kind(ElementsKind elements_kind) {
- ASSERT(elements_kind < kElementsKindCount);
- ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount));
- ASSERT(!is_observed() ||
- elements_kind == DICTIONARY_ELEMENTS ||
- elements_kind == NON_STRICT_ARGUMENTS_ELEMENTS ||
- IsExternalArrayElementsKind(elements_kind));
- set_bit_field2((bit_field2() & ~kElementsKindMask) |
- (elements_kind << kElementsKindShift));
- ASSERT(this->elements_kind() == elements_kind);
- }
-
- inline ElementsKind elements_kind() {
- return static_cast<ElementsKind>(
- (bit_field2() & kElementsKindMask) >> kElementsKindShift);
- }
-
- // Tells whether the instance has fast elements that are only Smis.
- inline bool has_fast_smi_elements() {
- return IsFastSmiElementsKind(elements_kind());
- }
-
- // Tells whether the instance has fast elements.
- inline bool has_fast_object_elements() {
- return IsFastObjectElementsKind(elements_kind());
- }
-
- inline bool has_fast_smi_or_object_elements() {
- return IsFastSmiOrObjectElementsKind(elements_kind());
- }
-
- inline bool has_fast_double_elements() {
- return IsFastDoubleElementsKind(elements_kind());
- }
-
- inline bool has_fast_elements() {
- return IsFastElementsKind(elements_kind());
- }
-
- inline bool has_non_strict_arguments_elements() {
- return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
- }
-
- inline bool has_external_array_elements() {
- return IsExternalArrayElementsKind(elements_kind());
- }
-
- inline bool has_dictionary_elements() {
- return IsDictionaryElementsKind(elements_kind());
- }
-
- inline bool has_slow_elements_kind() {
- return elements_kind() == DICTIONARY_ELEMENTS
- || elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
- }
-
- static bool IsValidElementsTransition(ElementsKind from_kind,
- ElementsKind to_kind);
-
- inline bool HasTransitionArray();
- inline bool HasElementsTransition();
- inline Map* elements_transition_map();
- MUST_USE_RESULT inline MaybeObject* set_elements_transition_map(
- Map* transitioned_map);
- inline void SetTransition(int transition_index, Map* target);
- inline Map* GetTransition(int transition_index);
- MUST_USE_RESULT inline MaybeObject* AddTransition(String* key,
- Map* target,
- SimpleTransitionFlag flag);
- DECL_ACCESSORS(transitions, TransitionArray)
- inline void ClearTransitions(Heap* heap,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Tells whether the map is attached to SharedFunctionInfo
- // (for inobject slack tracking).
- inline void set_attached_to_shared_function_info(bool value);
-
- inline bool attached_to_shared_function_info();
-
- // Tells whether the map is shared between objects that may have different
- // behavior. If true, the map should never be modified, instead a clone
- // should be created and modified.
- inline void set_is_shared(bool value);
- inline bool is_shared();
-
- // Tells whether the map is used for JSObjects in dictionary mode (ie
- // normalized objects, ie objects for which HasFastProperties returns false).
- // A map can never be used for both dictionary mode and fast mode JSObjects.
- // False by default and for HeapObjects that are not JSObjects.
- inline void set_dictionary_map(bool value);
- inline bool is_dictionary_map();
-
- // Tells whether the instance needs security checks when accessing its
- // properties.
- inline void set_is_access_check_needed(bool access_check_needed);
- inline bool is_access_check_needed();
-
- // Whether the named interceptor is a fallback interceptor or not
- inline void set_named_interceptor_is_fallback(bool value);
- inline bool named_interceptor_is_fallback();
-
- // Tells whether the instance has the space for an external resource
- // object
- inline void set_has_external_resource(bool value);
- inline bool has_external_resource();
-
- // Tells whether the user object comparison callback should be used for
- // comparisons involving this object
- inline void set_use_user_object_comparison(bool value);
- inline bool use_user_object_comparison();
-
- // [prototype]: implicit prototype object.
- DECL_ACCESSORS(prototype, Object)
-
- // [constructor]: points back to the function responsible for this map.
- DECL_ACCESSORS(constructor, Object)
-
- inline JSFunction* unchecked_constructor();
-
- // [instance descriptors]: describes the object.
- DECL_ACCESSORS(instance_descriptors, DescriptorArray)
- inline void InitializeDescriptors(DescriptorArray* descriptors);
-
- // [stub cache]: contains stubs compiled for this map.
- DECL_ACCESSORS(code_cache, Object)
-
- // [dependent code]: list of optimized codes that have this map embedded.
- DECL_ACCESSORS(dependent_code, DependentCode)
-
- // [back pointer]: points back to the parent map from which a transition
- // leads to this map. The field overlaps with prototype transitions and the
- // back pointer will be moved into the prototype transitions array if
- // required.
- inline Object* GetBackPointer();
- inline void SetBackPointer(Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void init_back_pointer(Object* undefined);
-
- // [prototype transitions]: cache of prototype transitions.
- // Prototype transition is a transition that happens
- // when we change object's prototype to a new one.
- // Cache format:
- // 0: finger - index of the first free cell in the cache
- // 1: back pointer that overlaps with prototype transitions field.
- // 2 + 2 * i: prototype
- // 3 + 2 * i: target map
- inline FixedArray* GetPrototypeTransitions();
- MUST_USE_RESULT inline MaybeObject* SetPrototypeTransitions(
- FixedArray* prototype_transitions);
- inline bool HasPrototypeTransitions();
-
- inline HeapObject* UncheckedPrototypeTransitions();
- inline TransitionArray* unchecked_transition_array();
-
- static const int kProtoTransitionHeaderSize = 1;
- static const int kProtoTransitionNumberOfEntriesOffset = 0;
- static const int kProtoTransitionElementsPerEntry = 2;
- static const int kProtoTransitionPrototypeOffset = 0;
- static const int kProtoTransitionMapOffset = 1;
-
- inline int NumberOfProtoTransitions() {
- FixedArray* cache = GetPrototypeTransitions();
- if (cache->length() == 0) return 0;
- return
- Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
- }
-
- inline void SetNumberOfProtoTransitions(int value) {
- FixedArray* cache = GetPrototypeTransitions();
- ASSERT(cache->length() != 0);
- cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
- Smi::FromInt(value));
- }
-
- // Lookup in the map's instance descriptors and fill out the result
- // with the given holder if the name is found. The holder may be
- // NULL when this function is used from the compiler.
- inline void LookupDescriptor(JSObject* holder,
- String* name,
- LookupResult* result);
-
- inline void LookupTransition(JSObject* holder,
- String* name,
- LookupResult* result);
-
- // The size of transition arrays are limited so they do not end up in large
- // object space. Otherwise ClearNonLiveTransitions would leak memory while
- // applying in-place right trimming.
- inline bool CanHaveMoreTransitions();
-
- int LastAdded() {
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- ASSERT(number_of_own_descriptors > 0);
- return number_of_own_descriptors - 1;
- }
-
- int NumberOfOwnDescriptors() {
- return NumberOfOwnDescriptorsBits::decode(bit_field3());
- }
-
- void SetNumberOfOwnDescriptors(int number) {
- ASSERT(number <= instance_descriptors()->number_of_descriptors());
- set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
- }
-
- inline JSGlobalPropertyCell* RetrieveDescriptorsPointer();
-
- int EnumLength() {
- return EnumLengthBits::decode(bit_field3());
- }
-
- void SetEnumLength(int length) {
- if (length != kInvalidEnumCache) {
- ASSERT(length >= 0);
- ASSERT(length == 0 || instance_descriptors()->HasEnumCache());
- ASSERT(length <= NumberOfOwnDescriptors());
- }
- set_bit_field3(EnumLengthBits::update(bit_field3(), length));
- }
-
- inline bool CanTrackAllocationSite();
- inline bool owns_descriptors();
- inline void set_owns_descriptors(bool is_shared);
- inline bool is_observed();
- inline void set_is_observed(bool is_observed);
-
- MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
- MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
- MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
- MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
- DescriptorArray* descriptors,
- String* name,
- TransitionFlag flag,
- int descriptor_index);
- MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
- Descriptor* descriptor);
- MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
- TransitionFlag flag);
- MUST_USE_RESULT MaybeObject* CopyInsertDescriptor(Descriptor* descriptor,
- TransitionFlag flag);
- MUST_USE_RESULT MaybeObject* CopyReplaceDescriptor(
- DescriptorArray* descriptors,
- Descriptor* descriptor,
- int index,
- TransitionFlag flag);
- MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
- TransitionFlag flag);
-
- MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing);
-
- inline void AppendDescriptor(Descriptor* desc,
- const DescriptorArray::WhitenessWitness&);
-
- // Returns a copy of the map, with all transitions dropped from the
- // instance descriptors.
- MUST_USE_RESULT MaybeObject* Copy();
-
- // Returns the property index for name (only valid for FAST MODE).
- int PropertyIndexFor(String* name);
-
- // Returns the next free property index (only valid for FAST MODE).
- int NextFreePropertyIndex();
-
- // Returns the number of properties described in instance_descriptors
- // filtering out properties with the specified attributes.
- int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
- PropertyAttributes filter = NONE);
-
- // Casting.
- static inline Map* cast(Object* obj);
-
- // Locate an accessor in the instance descriptor.
- AccessorDescriptor* FindAccessor(String* name);
-
- // Code cache operations.
-
- // Clears the code cache.
- inline void ClearCodeCache(Heap* heap);
-
- // Update code cache.
- static void UpdateCodeCache(Handle<Map> map,
- Handle<String> name,
- Handle<Code> code);
- MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
-
- // Extend the descriptor array of the map with the list of descriptors.
- // In case of duplicates, the latest descriptor is used.
- static void AppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors);
-
- static void EnsureDescriptorSlack(Handle<Map> map, int slack);
-
- // Returns the found code or undefined if absent.
- Object* FindInCodeCache(String* name, Code::Flags flags);
-
- // Returns the non-negative index of the code object if it is in the
- // cache and -1 otherwise.
- int IndexInCodeCache(Object* name, Code* code);
-
- // Removes a code object from the code cache at the given index.
- void RemoveFromCodeCache(String* name, Code* code, int index);
-
- // Set all map transitions from this map to dead maps to null. Also clear
- // back pointers in transition targets so that we do not process this map
- // again while following back pointers.
- void ClearNonLiveTransitions(Heap* heap);
-
- // Computes a hash value for this map, to be used in HashTables and such.
- int Hash();
-
- // Compares this map to another to see if they describe equivalent objects.
- // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
- // it had exactly zero inobject properties.
- // The "shared" flags of both this map and |other| are ignored.
- bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
-
- // Returns the map that this map transitions to if its elements_kind
- // is changed to |elements_kind|, or NULL if no such map is cached yet.
- // |safe_to_add_transitions| is set to false if adding transitions is not
- // allowed.
- Map* LookupElementsTransitionMap(ElementsKind elements_kind);
-
- // Returns the transitioned map for this map with the most generic
- // elements_kind that's found in |candidates|, or null handle if no match is
- // found at all.
- Handle<Map> FindTransitionedMap(MapHandleList* candidates);
- Map* FindTransitionedMap(MapList* candidates);
-
- // Zaps the contents of backing data structures. Note that the
- // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects
- // holding weak references when incremental marking is used, because it also
- // iterates over objects that are otherwise unreachable.
- // In general we only want to call these functions in release mode when
- // heap verification is turned on.
- void ZapPrototypeTransitions();
- void ZapTransitions();
-
- bool CanTransition() {
- // Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- return instance_type() >= FIRST_JS_OBJECT_TYPE;
- }
-
- // Fires when the layout of an object with a leaf map changes.
- // This includes adding transitions to the leaf map or changing
- // the descriptor array.
- inline void NotifyLeafMapLayoutChange();
-
- inline bool CanOmitPrototypeChecks();
-
- inline void AddDependentCode(DependentCode::DependencyGroup group,
- Handle<Code> code);
-
- // Dispatched behavior.
- DECLARE_PRINTER(Map)
- DECLARE_VERIFIER(Map)
-
-#ifdef VERIFY_HEAP
- void SharedMapVerify();
- void VerifyOmittedPrototypeChecks();
-#endif
-
- inline int visitor_id();
- inline void set_visitor_id(int visitor_id);
-
- typedef void (*TraverseCallback)(Map* map, void* data);
-
- void TraverseTransitionTree(TraverseCallback callback, void* data);
-
- // When you set the prototype of an object using the __proto__ accessor you
- // need a new map for the object (the prototype is stored in the map). In
- // order not to multiply maps unnecessarily we store these as transitions in
- // the original map. That way we can transition to the same map if the same
- // prototype is set, rather than creating a new map every time. The
- // transitions are in the form of a map where the keys are prototype objects
- // and the values are the maps the are transitioned to.
- static const int kMaxCachedPrototypeTransitions = 256;
-
- Map* GetPrototypeTransition(Object* prototype);
-
- MUST_USE_RESULT MaybeObject* PutPrototypeTransition(Object* prototype,
- Map* map);
-
- static const int kMaxPreAllocatedPropertyFields = 255;
-
- // Constant for denoting that the enum cache is not yet initialized.
- static const int kInvalidEnumCache = EnumLengthBits::kMax;
-
- // Layout description.
- static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
- static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
- static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
- static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
- // Storage for the transition array is overloaded to directly contain a back
- // pointer if unused. When the map has transitions, the back pointer is
- // transferred to the transition array and accessed through an extra
- // indirection.
- static const int kTransitionsOrBackPointerOffset =
- kConstructorOffset + kPointerSize;
- static const int kDescriptorsOffset =
- kTransitionsOrBackPointerOffset + kPointerSize;
- static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
- static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
- static const int kBitField3Offset = kDependentCodeOffset + kPointerSize;
- static const int kSize = kBitField3Offset + kPointerSize;
-
- // Layout of pointer fields. Heap iteration code relies on them
- // being continuously allocated.
- static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
- static const int kPointerFieldsEndOffset = kBitField3Offset + kPointerSize;
-
- // Byte offsets within kInstanceSizesOffset.
- static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
- static const int kInObjectPropertiesByte = 1;
- static const int kInObjectPropertiesOffset =
- kInstanceSizesOffset + kInObjectPropertiesByte;
- static const int kPreAllocatedPropertyFieldsByte = 2;
- static const int kPreAllocatedPropertyFieldsOffset =
- kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte;
- static const int kVisitorIdByte = 3;
- static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte;
-
- // Byte offsets within kInstanceAttributesOffset attributes.
- static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
- static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 1;
- static const int kBitFieldOffset = kInstanceAttributesOffset + 2;
- static const int kBitField2Offset = kInstanceAttributesOffset + 3;
-
- STATIC_CHECK(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
-
- // Bit positions for bit field.
- static const int kUnused = 0; // To be used for marking recently used maps.
- static const int kHasNonInstancePrototype = 1;
- static const int kIsHiddenPrototype = 2;
- static const int kHasNamedInterceptor = 3;
- static const int kHasIndexedInterceptor = 4;
- static const int kIsUndetectable = 5;
- static const int kHasExternalResource = 6;
- static const int kIsAccessCheckNeeded = 7;
-
- // Bit positions for bit field 2
- static const int kIsExtensible = 0;
- static const int kStringWrapperSafeForDefaultValueOf = 1;
- static const int kUseUserObjectComparison = 2;
- // No bits can be used after kElementsKindFirstBit, they are all reserved for
- // storing ElementKind.
- static const int kElementsKindShift = 3;
- static const int kElementsKindBitCount = 5;
-
- // Derived values from bit field 2
- static const int kElementsKindMask = (-1 << kElementsKindShift) &
- ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
- static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
- (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
- static const int8_t kMaximumBitField2FastSmiElementValue =
- static_cast<int8_t>((FAST_SMI_ELEMENTS + 1) <<
- Map::kElementsKindShift) - 1;
- static const int8_t kMaximumBitField2FastHoleyElementValue =
- static_cast<int8_t>((FAST_HOLEY_ELEMENTS + 1) <<
- Map::kElementsKindShift) - 1;
- static const int8_t kMaximumBitField2FastHoleySmiElementValue =
- static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1) <<
- Map::kElementsKindShift) - 1;
-
- typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
- kPointerFieldsEndOffset,
- kSize> BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
-};
-
-
-// An abstract superclass, a marker class really, for simple structure classes.
-// It doesn't carry much functionality but allows struct classes to be
-// identified in the type system.
-class Struct: public HeapObject {
- public:
- inline void InitializeBody(int object_size);
- static inline Struct* cast(Object* that);
-};
-
-
-// Script describes a script which has been added to the VM.
-class Script: public Struct {
- public:
- // Script types.
- enum Type {
- TYPE_NATIVE = 0,
- TYPE_EXTENSION = 1,
- TYPE_NORMAL = 2
- };
-
- // Script compilation types.
- enum CompilationType {
- COMPILATION_TYPE_HOST = 0,
- COMPILATION_TYPE_EVAL = 1
- };
-
- // Script compilation state.
- enum CompilationState {
- COMPILATION_STATE_INITIAL = 0,
- COMPILATION_STATE_COMPILED = 1
- };
-
- // [source]: the script source.
- DECL_ACCESSORS(source, Object)
-
- // [name]: the script name.
- DECL_ACCESSORS(name, Object)
-
- // [id]: the script id.
- DECL_ACCESSORS(id, Object)
-
- // [line_offset]: script line offset in resource from where it was extracted.
- DECL_ACCESSORS(line_offset, Smi)
-
- // [column_offset]: script column offset in resource from where it was
- // extracted.
- DECL_ACCESSORS(column_offset, Smi)
-
- // [data]: additional data associated with this script.
- DECL_ACCESSORS(data, Object)
-
- // [context_data]: context data for the context this script was compiled in.
- DECL_ACCESSORS(context_data, Object)
-
- // [wrapper]: the wrapper cache.
- DECL_ACCESSORS(wrapper, Foreign)
-
- // [type]: the script type.
- DECL_ACCESSORS(type, Smi)
-
- // [compilation]: how the the script was compiled.
- DECL_ACCESSORS(compilation_type, Smi)
-
- // [is_compiled]: determines whether the script has already been compiled.
- DECL_ACCESSORS(compilation_state, Smi)
-
- // [line_ends]: FixedArray of line ends positions.
- DECL_ACCESSORS(line_ends, Object)
-
- // [eval_from_shared]: for eval scripts the shared funcion info for the
- // function from which eval was called.
- DECL_ACCESSORS(eval_from_shared, Object)
-
- // [eval_from_instructions_offset]: the instruction offset in the code for the
- // function from which eval was called where eval was called.
- DECL_ACCESSORS(eval_from_instructions_offset, Smi)
-
- static inline Script* cast(Object* obj);
-
- // If script source is an external string, check that the underlying
- // resource is accessible. Otherwise, always return true.
- inline bool HasValidSource();
-
- // Dispatched behavior.
- DECLARE_PRINTER(Script)
- DECLARE_VERIFIER(Script)
-
- static const int kSourceOffset = HeapObject::kHeaderSize;
- static const int kNameOffset = kSourceOffset + kPointerSize;
- static const int kLineOffsetOffset = kNameOffset + kPointerSize;
- static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
- static const int kDataOffset = kColumnOffsetOffset + kPointerSize;
- static const int kContextOffset = kDataOffset + kPointerSize;
- static const int kWrapperOffset = kContextOffset + kPointerSize;
- static const int kTypeOffset = kWrapperOffset + kPointerSize;
- static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
- static const int kCompilationStateOffset =
- kCompilationTypeOffset + kPointerSize;
- static const int kLineEndsOffset = kCompilationStateOffset + kPointerSize;
- static const int kIdOffset = kLineEndsOffset + kPointerSize;
- static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
- static const int kEvalFrominstructionsOffsetOffset =
- kEvalFromSharedOffset + kPointerSize;
- static const int kSize = kEvalFrominstructionsOffsetOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
-};
-
-
-// List of builtin functions we want to identify to improve code
-// generation.
-//
-// Each entry has a name of a global object property holding an object
-// optionally followed by ".prototype", a name of a builtin function
-// on the object (the one the id is set for), and a label.
-//
-// Installation of ids for the selected builtin functions is handled
-// by the bootstrapper.
-//
-// NOTE: Order is important: math functions should be at the end of
-// the list and MathFloor should be the first math function.
-#define FUNCTIONS_WITH_ID_LIST(V) \
- V(Array.prototype, push, ArrayPush) \
- V(Array.prototype, pop, ArrayPop) \
- V(Function.prototype, apply, FunctionApply) \
- V(String.prototype, charCodeAt, StringCharCodeAt) \
- V(String.prototype, charAt, StringCharAt) \
- V(String, fromCharCode, StringFromCharCode) \
- V(Math, floor, MathFloor) \
- V(Math, round, MathRound) \
- V(Math, ceil, MathCeil) \
- V(Math, abs, MathAbs) \
- V(Math, log, MathLog) \
- V(Math, sin, MathSin) \
- V(Math, cos, MathCos) \
- V(Math, tan, MathTan) \
- V(Math, asin, MathASin) \
- V(Math, acos, MathACos) \
- V(Math, atan, MathATan) \
- V(Math, exp, MathExp) \
- V(Math, sqrt, MathSqrt) \
- V(Math, pow, MathPow) \
- V(Math, random, MathRandom) \
- V(Math, max, MathMax) \
- V(Math, min, MathMin)
-
-
-enum BuiltinFunctionId {
-#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
- k##name,
- FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
-#undef DECLARE_FUNCTION_ID
- // Fake id for a special case of Math.pow. Note, it continues the
- // list of math functions.
- kMathPowHalf,
- kFirstMathFunctionId = kMathFloor
-};
-
-
-// SharedFunctionInfo describes the JSFunction information that can be
-// shared by multiple instances of the function.
-class SharedFunctionInfo: public HeapObject {
- public:
- // [name]: Function name.
- DECL_ACCESSORS(name, Object)
-
- // [code]: Function code.
- DECL_ACCESSORS(code, Code)
- inline void ReplaceCode(Code* code);
-
- // [optimized_code_map]: Map from native context to optimized code
- // and a shared literals array or Smi 0 if none.
- DECL_ACCESSORS(optimized_code_map, Object)
-
- // Returns index i of the entry with the specified context. At position
- // i - 1 is the context, position i the code, and i + 1 the literals array.
- // Returns -1 when no matching entry is found.
- int SearchOptimizedCodeMap(Context* native_context);
-
- // Installs optimized code from the code map on the given closure. The
- // index has to be consistent with a search result as defined above.
- void InstallFromOptimizedCodeMap(JSFunction* function, int index);
-
- // Clear optimized code map.
- inline void ClearOptimizedCodeMap();
-
- // Add a new entry to the optimized code map.
- static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- Handle<Code> code,
- Handle<FixedArray> literals);
- static const int kEntryLength = 3;
-
- // [scope_info]: Scope info.
- DECL_ACCESSORS(scope_info, ScopeInfo)
-
- // [construct stub]: Code stub for constructing instances of this function.
- DECL_ACCESSORS(construct_stub, Code)
-
- inline Code* unchecked_code();
-
- // Returns if this function has been compiled to native code yet.
- inline bool is_compiled();
-
- // [length]: The function length - usually the number of declared parameters.
- // Use up to 2^30 parameters.
- inline int length();
- inline void set_length(int value);
-
- // [formal parameter count]: The declared number of parameters.
- inline int formal_parameter_count();
- inline void set_formal_parameter_count(int value);
-
- // Set the formal parameter count so the function code will be
- // called without using argument adaptor frames.
- inline void DontAdaptArguments();
-
- // [expected_nof_properties]: Expected number of properties for the function.
- inline int expected_nof_properties();
- inline void set_expected_nof_properties(int value);
-
- // Inobject slack tracking is the way to reclaim unused inobject space.
- //
- // The instance size is initially determined by adding some slack to
- // expected_nof_properties (to allow for a few extra properties added
- // after the constructor). There is no guarantee that the extra space
- // will not be wasted.
- //
- // Here is the algorithm to reclaim the unused inobject space:
- // - Detect the first constructor call for this SharedFunctionInfo.
- // When it happens enter the "in progress" state: remember the
- // constructor's initial_map and install a special construct stub that
- // counts constructor calls.
- // - While the tracking is in progress create objects filled with
- // one_pointer_filler_map instead of undefined_value. This way they can be
- // resized quickly and safely.
- // - Once enough (kGenerousAllocationCount) objects have been created
- // compute the 'slack' (traverse the map transition tree starting from the
- // initial_map and find the lowest value of unused_property_fields).
- // - Traverse the transition tree again and decrease the instance size
- // of every map. Existing objects will resize automatically (they are
- // filled with one_pointer_filler_map). All further allocations will
- // use the adjusted instance size.
- // - Decrease expected_nof_properties so that an allocations made from
- // another context will use the adjusted instance size too.
- // - Exit "in progress" state by clearing the reference to the initial_map
- // and setting the regular construct stub (generic or inline).
- //
- // The above is the main event sequence. Some special cases are possible
- // while the tracking is in progress:
- //
- // - GC occurs.
- // Check if the initial_map is referenced by any live objects (except this
- // SharedFunctionInfo). If it is, continue tracking as usual.
- // If it is not, clear the reference and reset the tracking state. The
- // tracking will be initiated again on the next constructor call.
- //
- // - The constructor is called from another context.
- // Immediately complete the tracking, perform all the necessary changes
- // to maps. This is necessary because there is no efficient way to track
- // multiple initial_maps.
- // Proceed to create an object in the current context (with the adjusted
- // size).
- //
- // - A different constructor function sharing the same SharedFunctionInfo is
- // called in the same context. This could be another closure in the same
- // context, or the first function could have been disposed.
- // This is handled the same way as the previous case.
- //
- // Important: inobject slack tracking is not attempted during the snapshot
- // creation.
-
- static const int kGenerousAllocationCount = 8;
-
- // [construction_count]: Counter for constructor calls made during
- // the tracking phase.
- inline int construction_count();
- inline void set_construction_count(int value);
-
- // [initial_map]: initial map of the first function called as a constructor.
- // Saved for the duration of the tracking phase.
- // This is a weak link (GC resets it to undefined_value if no other live
- // object reference this map).
- DECL_ACCESSORS(initial_map, Object)
-
- // True if the initial_map is not undefined and the countdown stub is
- // installed.
- inline bool IsInobjectSlackTrackingInProgress();
-
- // Starts the tracking.
- // Stores the initial map and installs the countdown stub.
- // IsInobjectSlackTrackingInProgress is normally true after this call,
- // except when tracking have not been started (e.g. the map has no unused
- // properties or the snapshot is being built).
- void StartInobjectSlackTracking(Map* map);
-
- // Completes the tracking.
- // IsInobjectSlackTrackingInProgress is false after this call.
- void CompleteInobjectSlackTracking();
-
- // Invoked before pointers in SharedFunctionInfo are being marked.
- // Also clears the optimized code map.
- inline void BeforeVisitingPointers();
-
- // Clears the initial_map before the GC marking phase to ensure the reference
- // is weak. IsInobjectSlackTrackingInProgress is false after this call.
- void DetachInitialMap();
-
- // Restores the link to the initial map after the GC marking phase.
- // IsInobjectSlackTrackingInProgress is true after this call.
- void AttachInitialMap(Map* map);
-
- // False if there are definitely no live objects created from this function.
- // True if live objects _may_ exist (existence not guaranteed).
- // May go back from true to false after GC.
- DECL_BOOLEAN_ACCESSORS(live_objects_may_exist)
-
- // [instance class name]: class name for instances.
- DECL_ACCESSORS(instance_class_name, Object)
-
- // [function data]: This field holds some additional data for function.
- // Currently it either has FunctionTemplateInfo to make benefit the API
- // or Smi identifying a builtin function.
- // In the long run we don't want all functions to have this field but
- // we can fix that when we have a better model for storing hidden data
- // on objects.
- DECL_ACCESSORS(function_data, Object)
-
- inline bool IsApiFunction();
- inline FunctionTemplateInfo* get_api_func_data();
- inline bool HasBuiltinFunctionId();
- inline BuiltinFunctionId builtin_function_id();
-
- // [script info]: Script from which the function originates.
- DECL_ACCESSORS(script, Object)
-
- // [num_literals]: Number of literals used by this function.
- inline int num_literals();
- inline void set_num_literals(int value);
-
- // [start_position_and_type]: Field used to store both the source code
- // position, whether or not the function is a function expression,
- // and whether or not the function is a toplevel function. The two
- // least significants bit indicates whether the function is an
- // expression and the rest contains the source code position.
- inline int start_position_and_type();
- inline void set_start_position_and_type(int value);
-
- // [debug info]: Debug information.
- DECL_ACCESSORS(debug_info, Object)
-
- // [inferred name]: Name inferred from variable or property
- // assignment of this function. Used to facilitate debugging and
- // profiling of JavaScript code written in OO style, where almost
- // all functions are anonymous but are assigned to object
- // properties.
- DECL_ACCESSORS(inferred_name, String)
-
- // The function's name if it is non-empty, otherwise the inferred name.
- String* DebugName();
-
- // Position of the 'function' token in the script source.
- inline int function_token_position();
- inline void set_function_token_position(int function_token_position);
-
- // Position of this function in the script source.
- inline int start_position();
- inline void set_start_position(int start_position);
-
- // End position of this function in the script source.
- inline int end_position();
- inline void set_end_position(int end_position);
-
- // Is this function a function expression in the source code.
- DECL_BOOLEAN_ACCESSORS(is_expression)
-
- // Is this function a top-level function (scripts, evals).
- DECL_BOOLEAN_ACCESSORS(is_toplevel)
-
- // Bit field containing various information collected by the compiler to
- // drive optimization.
- inline int compiler_hints();
- inline void set_compiler_hints(int value);
-
- inline int ast_node_count();
- inline void set_ast_node_count(int count);
-
- // A counter used to determine when to stress the deoptimizer with a
- // deopt.
- inline int stress_deopt_counter();
- inline void set_stress_deopt_counter(int counter);
-
- inline int profiler_ticks();
-
- // Inline cache age is used to infer whether the function survived a context
- // disposal or not. In the former case we reset the opt_count.
- inline int ic_age();
- inline void set_ic_age(int age);
-
- // Add information on assignments of the form this.x = ...;
- void SetThisPropertyAssignmentsInfo(
- bool has_only_simple_this_property_assignments,
- FixedArray* this_property_assignments);
-
- // Clear information on assignments of the form this.x = ...;
- void ClearThisPropertyAssignmentsInfo();
-
- // Indicate that this function only consists of assignments of the form
- // this.x = y; where y is either a constant or refers to an argument.
- inline bool has_only_simple_this_property_assignments();
-
- // Indicates if this function can be lazy compiled.
- // This is used to determine if we can safely flush code from a function
- // when doing GC if we expect that the function will no longer be used.
- DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
-
- // Indicates if this function can be lazy compiled without a context.
- // This is used to determine if we can force compilation without reaching
- // the function through program execution but through other means (e.g. heap
- // iteration by the debugger).
- DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation_without_context)
-
- // Indicates how many full GCs this function has survived with assigned
- // code object. Used to determine when it is relatively safe to flush
- // this code object and replace it with lazy compilation stub.
- // Age is reset when GC notices that the code object is referenced
- // from the stack or compilation cache.
- inline int code_age();
- inline void set_code_age(int age);
-
- // Indicates whether optimizations have been disabled for this
- // shared function info. If a function is repeatedly optimized or if
- // we cannot optimize the function we disable optimization to avoid
- // spending time attempting to optimize it again.
- DECL_BOOLEAN_ACCESSORS(optimization_disabled)
-
- // Indicates the language mode of the function's code as defined by the
- // current harmony drafts for the next ES language standard. Possible
- // values are:
- // 1. CLASSIC_MODE - Unrestricted syntax and semantics, same as in ES5.
- // 2. STRICT_MODE - Restricted syntax and semantics, same as in ES5.
- // 3. EXTENDED_MODE - Only available under the harmony flag, not part of ES5.
- inline LanguageMode language_mode();
- inline void set_language_mode(LanguageMode language_mode);
-
- // Indicates whether the language mode of this function is CLASSIC_MODE.
- inline bool is_classic_mode();
-
- // Indicates whether the language mode of this function is EXTENDED_MODE.
- inline bool is_extended_mode();
-
- // Indicates whether the function is a qml mode function.
- DECL_BOOLEAN_ACCESSORS(qml_mode)
-
- // False if the function definitely does not allocate an arguments object.
- DECL_BOOLEAN_ACCESSORS(uses_arguments)
-
- // True if the function has any duplicated parameter names.
- DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
-
- // Indicates whether the function is a native function.
- // These needs special treatment in .call and .apply since
- // null passed as the receiver should not be translated to the
- // global object.
- DECL_BOOLEAN_ACCESSORS(native)
-
- // Indicates that the function was created by the Function function.
- // Though it's anonymous, toString should treat it as if it had the name
- // "anonymous". We don't set the name itself so that the system does not
- // see a binding for it.
- DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
-
- // Indicates whether the function is a bound function created using
- // the bind function.
- DECL_BOOLEAN_ACCESSORS(bound)
-
- // Indicates that the function is anonymous (the name field can be set
- // through the API, which does not change this flag).
- DECL_BOOLEAN_ACCESSORS(is_anonymous)
-
- // Is this a function or top-level/eval code.
- DECL_BOOLEAN_ACCESSORS(is_function)
-
- // Indicates that the function cannot be optimized.
- DECL_BOOLEAN_ACCESSORS(dont_optimize)
-
- // Indicates that the function cannot be inlined.
- DECL_BOOLEAN_ACCESSORS(dont_inline)
-
- // Indicates that code for this function cannot be cached.
- DECL_BOOLEAN_ACCESSORS(dont_cache)
-
- // Indicates whether or not the code in the shared function support
- // deoptimization.
- inline bool has_deoptimization_support();
-
- // Enable deoptimization support through recompiled code.
- void EnableDeoptimizationSupport(Code* recompiled);
-
- // Disable (further) attempted optimization of all functions sharing this
- // shared function info.
- void DisableOptimization(const char* reason);
-
- // Lookup the bailout ID and ASSERT that it exists in the non-optimized
- // code, returns whether it asserted (i.e., always true if assertions are
- // disabled).
- bool VerifyBailoutId(BailoutId id);
-
- // Check whether a inlined constructor can be generated with the given
- // prototype.
- bool CanGenerateInlineConstructor(Object* prototype);
-
- // Prevents further attempts to generate inline constructors.
- // To be called if generation failed for any reason.
- void ForbidInlineConstructor();
-
- // For functions which only contains this property assignments this provides
- // access to the names for the properties assigned.
- DECL_ACCESSORS(this_property_assignments, Object)
- inline int this_property_assignments_count();
- inline void set_this_property_assignments_count(int value);
- String* GetThisPropertyAssignmentName(int index);
- bool IsThisPropertyAssignmentArgument(int index);
- int GetThisPropertyAssignmentArgument(int index);
- Object* GetThisPropertyAssignmentConstant(int index);
-
- // [source code]: Source code for the function.
- bool HasSourceCode();
- Handle<Object> GetSourceCode();
-
- // Number of times the function was optimized.
- inline int opt_count();
- inline void set_opt_count(int opt_count);
-
- // Number of times the function was deoptimized.
- inline void set_deopt_count(int value);
- inline int deopt_count();
- inline void increment_deopt_count();
-
- // Number of time we tried to re-enable optimization after it
- // was disabled due to high number of deoptimizations.
- inline void set_opt_reenable_tries(int value);
- inline int opt_reenable_tries();
-
- inline void TryReenableOptimization();
-
- // Stores deopt_count, opt_reenable_tries and ic_age as bit-fields.
- inline void set_counters(int value);
- inline int counters();
-
- // Source size of this function.
- int SourceSize();
-
- // Calculate the instance size.
- int CalculateInstanceSize();
-
- // Calculate the number of in-object properties.
- int CalculateInObjectProperties();
-
- // Dispatched behavior.
- // Set max_length to -1 for unlimited length.
- void SourceCodePrint(StringStream* accumulator, int max_length);
- DECLARE_PRINTER(SharedFunctionInfo)
- DECLARE_VERIFIER(SharedFunctionInfo)
-
- void ResetForNewContext(int new_ic_age);
-
- // Helper to compile the shared code. Returns true on success, false on
- // failure (e.g., stack overflow during compilation). This is only used by
- // the debugger, it is not possible to compile without a context otherwise.
- static bool CompileLazy(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
-
- // Casting.
- static inline SharedFunctionInfo* cast(Object* obj);
-
- // Constants.
- static const int kDontAdaptArgumentsSentinel = -1;
-
- // Layout description.
- // Pointer fields.
- static const int kNameOffset = HeapObject::kHeaderSize;
- static const int kCodeOffset = kNameOffset + kPointerSize;
- static const int kOptimizedCodeMapOffset = kCodeOffset + kPointerSize;
- static const int kScopeInfoOffset = kOptimizedCodeMapOffset + kPointerSize;
- static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize;
- static const int kInstanceClassNameOffset =
- kConstructStubOffset + kPointerSize;
- static const int kFunctionDataOffset =
- kInstanceClassNameOffset + kPointerSize;
- static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
- static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
- static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
- static const int kInitialMapOffset =
- kInferredNameOffset + kPointerSize;
- static const int kThisPropertyAssignmentsOffset =
- kInitialMapOffset + kPointerSize;
- // ast_node_count is a Smi field. It could be grouped with another Smi field
- // into a PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
- static const int kAstNodeCountOffset =
- kThisPropertyAssignmentsOffset + kPointerSize;
-#if V8_HOST_ARCH_32_BIT
- // Smi fields.
- static const int kLengthOffset =
- kAstNodeCountOffset + kPointerSize;
- static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
- static const int kExpectedNofPropertiesOffset =
- kFormalParameterCountOffset + kPointerSize;
- static const int kNumLiteralsOffset =
- kExpectedNofPropertiesOffset + kPointerSize;
- static const int kStartPositionAndTypeOffset =
- kNumLiteralsOffset + kPointerSize;
- static const int kEndPositionOffset =
- kStartPositionAndTypeOffset + kPointerSize;
- static const int kFunctionTokenPositionOffset =
- kEndPositionOffset + kPointerSize;
- static const int kCompilerHintsOffset =
- kFunctionTokenPositionOffset + kPointerSize;
- static const int kThisPropertyAssignmentsCountOffset =
- kCompilerHintsOffset + kPointerSize;
- static const int kOptCountOffset =
- kThisPropertyAssignmentsCountOffset + kPointerSize;
- static const int kCountersOffset = kOptCountOffset + kPointerSize;
- static const int kStressDeoptCounterOffset = kCountersOffset + kPointerSize;
-
- // Total size.
- static const int kSize = kStressDeoptCounterOffset + kPointerSize;
-#else
- // The only reason to use smi fields instead of int fields
- // is to allow iteration without maps decoding during
- // garbage collections.
- // To avoid wasting space on 64-bit architectures we use
- // the following trick: we group integer fields into pairs
- // First integer in each pair is shifted left by 1.
- // By doing this we guarantee that LSB of each kPointerSize aligned
- // word is not set and thus this word cannot be treated as pointer
- // to HeapObject during old space traversal.
- static const int kLengthOffset =
- kAstNodeCountOffset + kPointerSize;
- static const int kFormalParameterCountOffset =
- kLengthOffset + kIntSize;
-
- static const int kExpectedNofPropertiesOffset =
- kFormalParameterCountOffset + kIntSize;
- static const int kNumLiteralsOffset =
- kExpectedNofPropertiesOffset + kIntSize;
-
- static const int kEndPositionOffset =
- kNumLiteralsOffset + kIntSize;
- static const int kStartPositionAndTypeOffset =
- kEndPositionOffset + kIntSize;
-
- static const int kFunctionTokenPositionOffset =
- kStartPositionAndTypeOffset + kIntSize;
- static const int kCompilerHintsOffset =
- kFunctionTokenPositionOffset + kIntSize;
-
- static const int kThisPropertyAssignmentsCountOffset =
- kCompilerHintsOffset + kIntSize;
- static const int kOptCountOffset =
- kThisPropertyAssignmentsCountOffset + kIntSize;
-
- static const int kCountersOffset = kOptCountOffset + kIntSize;
- static const int kStressDeoptCounterOffset = kCountersOffset + kIntSize;
-
- // Total size.
- static const int kSize = kStressDeoptCounterOffset + kIntSize;
-
-#endif
-
- // The construction counter for inobject slack tracking is stored in the
- // most significant byte of compiler_hints which is otherwise unused.
- // Its offset depends on the endian-ness of the architecture.
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- static const int kConstructionCountOffset = kCompilerHintsOffset + 3;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- static const int kConstructionCountOffset = kCompilerHintsOffset + 0;
-#else
-#error Unknown byte ordering
-#endif
-
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
-
- typedef FixedBodyDescriptor<kNameOffset,
- kThisPropertyAssignmentsOffset + kPointerSize,
- kSize> BodyDescriptor;
-
- // Bit positions in start_position_and_type.
- // The source code start position is in the 30 most significant bits of
- // the start_position_and_type field.
- static const int kIsExpressionBit = 0;
- static const int kIsTopLevelBit = 1;
- static const int kStartPositionShift = 2;
- static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
-
- // Bit positions in compiler_hints.
- static const int kCodeAgeSize = 3;
- static const int kCodeAgeMask = (1 << kCodeAgeSize) - 1;
-
- enum CompilerHints {
- kHasOnlySimpleThisPropertyAssignments,
- kAllowLazyCompilation,
- kAllowLazyCompilationWithoutContext,
- kLiveObjectsMayExist,
- kCodeAgeShift,
- kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
- kStrictModeFunction,
- kExtendedModeFunction,
- kQmlModeFunction,
- kUsesArguments,
- kHasDuplicateParameters,
- kNative,
- kBoundFunction,
- kIsAnonymous,
- kNameShouldPrintAsAnonymous,
- kIsFunction,
- kDontOptimize,
- kDontInline,
- kDontCache,
- kCompilerHintsCount // Pseudo entry
- };
-
- class DeoptCountBits: public BitField<int, 0, 4> {};
- class OptReenableTriesBits: public BitField<int, 4, 18> {};
- class ICAgeBits: public BitField<int, 22, 8> {};
-
- private:
-#if V8_HOST_ARCH_32_BIT
- // On 32 bit platforms, compiler hints is a smi.
- static const int kCompilerHintsSmiTagSize = kSmiTagSize;
- static const int kCompilerHintsSize = kPointerSize;
-#else
- // On 64 bit platforms, compiler hints is not a smi, see comment above.
- static const int kCompilerHintsSmiTagSize = 0;
- static const int kCompilerHintsSize = kIntSize;
-#endif
-
- STATIC_ASSERT(SharedFunctionInfo::kCompilerHintsCount <=
- SharedFunctionInfo::kCompilerHintsSize * kBitsPerByte);
-
- public:
- // Constants for optimizing codegen for strict mode function and
- // native tests.
- // Allows to use byte-width instructions.
- static const int kStrictModeBitWithinByte =
- (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
- static const int kExtendedModeBitWithinByte =
- (kExtendedModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
- static const int kNativeBitWithinByte =
- (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kExtendedModeByteOffset = kCompilerHintsOffset +
- (kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kNativeByteOffset = kCompilerHintsOffset +
- (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kExtendedModeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kNativeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
-#else
-#error Unknown byte ordering
-#endif
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
-};
-
-
-// Representation for module instance objects.
-class JSModule: public JSObject {
- public:
- // [context]: the context holding the module's locals, or undefined if none.
- DECL_ACCESSORS(context, Object)
-
- // [scope_info]: Scope info.
- DECL_ACCESSORS(scope_info, ScopeInfo)
-
- // Casting.
- static inline JSModule* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSModule)
- DECLARE_VERIFIER(JSModule)
-
- // Layout description.
- static const int kContextOffset = JSObject::kHeaderSize;
- static const int kScopeInfoOffset = kContextOffset + kPointerSize;
- static const int kSize = kScopeInfoOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSModule);
-};
-
-
-// JSFunction describes JavaScript functions.
-class JSFunction: public JSObject {
- public:
- // [prototype_or_initial_map]:
- DECL_ACCESSORS(prototype_or_initial_map, Object)
-
- // [shared]: The information about the function that
- // can be shared by instances.
- DECL_ACCESSORS(shared, SharedFunctionInfo)
-
- inline SharedFunctionInfo* unchecked_shared();
-
- // [context]: The context for this function.
- inline Context* context();
- inline Object* unchecked_context();
- inline void set_context(Object* context);
-
- // [code]: The generated code object for this function. Executed
- // when the function is invoked, e.g. foo() or new foo(). See
- // [[Call]] and [[Construct]] description in ECMA-262, section
- // 8.6.2, page 27.
- inline Code* code();
- inline void set_code(Code* code);
- inline void ReplaceCode(Code* code);
-
- inline Code* unchecked_code();
-
- // Tells whether this function is builtin.
- inline bool IsBuiltin();
-
- // Tells whether or not the function needs arguments adaption.
- inline bool NeedsArgumentsAdaption();
-
- // Tells whether or not this function has been optimized.
- inline bool IsOptimized();
-
- // Tells whether or not this function can be optimized.
- inline bool IsOptimizable();
-
- // Mark this function for lazy recompilation. The function will be
- // recompiled the next time it is executed.
- void MarkForLazyRecompilation();
- void MarkForParallelRecompilation();
-
- // Helpers to compile this function. Returns true on success, false on
- // failure (e.g., stack overflow during compilation).
- static bool EnsureCompiled(Handle<JSFunction> function,
- ClearExceptionFlag flag);
- static bool CompileLazy(Handle<JSFunction> function,
- ClearExceptionFlag flag);
- static bool CompileOptimized(Handle<JSFunction> function,
- BailoutId osr_ast_id,
- ClearExceptionFlag flag);
-
- // Tells whether or not the function is already marked for lazy
- // recompilation.
- inline bool IsMarkedForLazyRecompilation();
- inline bool IsMarkedForParallelRecompilation();
-
- // Tells whether or not the function is on the parallel
- // recompilation queue.
- inline bool IsInRecompileQueue();
-
- // Check whether or not this function is inlineable.
- bool IsInlineable();
-
- // [literals_or_bindings]: Fixed array holding either
- // the materialized literals or the bindings of a bound function.
- //
- // If the function contains object, regexp or array literals, the
- // literals array prefix contains the object, regexp, and array
- // function to be used when creating these literals. This is
- // necessary so that we do not dynamically lookup the object, regexp
- // or array functions. Performing a dynamic lookup, we might end up
- // using the functions from a new context that we should not have
- // access to.
- //
- // On bound functions, the array is a (copy-on-write) fixed-array containing
- // the function that was bound, bound this-value and any bound
- // arguments. Bound functions never contain literals.
- DECL_ACCESSORS(literals_or_bindings, FixedArray)
-
- inline FixedArray* literals();
- inline void set_literals(FixedArray* literals);
-
- inline FixedArray* function_bindings();
- inline void set_function_bindings(FixedArray* bindings);
-
- // The initial map for an object created by this constructor.
- inline Map* initial_map();
- inline void set_initial_map(Map* value);
- inline bool has_initial_map();
-
- // Get and set the prototype property on a JSFunction. If the
- // function has an initial map the prototype is set on the initial
- // map. Otherwise, the prototype is put in the initial map field
- // until an initial map is needed.
- inline bool has_prototype();
- inline bool has_instance_prototype();
- inline Object* prototype();
- inline Object* instance_prototype();
- MUST_USE_RESULT MaybeObject* SetInstancePrototype(Object* value);
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value);
-
- // After prototype is removed, it will not be created when accessed, and
- // [[Construct]] from this function will not be allowed.
- void RemovePrototype();
- inline bool should_have_prototype();
-
- // Accessor for this function's initial map's [[class]]
- // property. This is primarily used by ECMA native functions. This
- // method sets the class_name field of this function's initial map
- // to a given value. It creates an initial map if this function does
- // not have one. Note that this method does not copy the initial map
- // if it has one already, but simply replaces it with the new value.
- // Instances created afterwards will have a map whose [[class]] is
- // set to 'value', but there is no guarantees on instances created
- // before.
- void SetInstanceClassName(String* name);
-
- // Returns if this function has been compiled to native code yet.
- inline bool is_compiled();
-
- // [next_function_link]: Field for linking functions. This list is treated as
- // a weak list by the GC.
- DECL_ACCESSORS(next_function_link, Object)
-
- // Prints the name of the function using PrintF.
- inline void PrintName() {
- PrintName(stdout);
- }
- void PrintName(FILE* out);
-
- // Casting.
- static inline JSFunction* cast(Object* obj);
-
- // Iterates the objects, including code objects indirectly referenced
- // through pointers to the first instruction in the code object.
- void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSFunction)
- DECLARE_VERIFIER(JSFunction)
-
- // Returns the number of allocated literals.
- inline int NumberOfLiterals();
-
- // Retrieve the native context from a function's literal array.
- static Context* NativeContextFromLiterals(FixedArray* literals);
-
-#ifdef DEBUG
- bool FunctionsInFunctionListShareSameCode() {
- Object* current = this;
- while (!current->IsUndefined()) {
- JSFunction* function = JSFunction::cast(current);
- current = function->next_function_link();
- if (function->code() != this->code()) return false;
- }
- return true;
- }
-#endif
-
- // Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
- // kSize) is weak and has special handling during garbage collection.
- static const int kCodeEntryOffset = JSObject::kHeaderSize;
- static const int kPrototypeOrInitialMapOffset =
- kCodeEntryOffset + kPointerSize;
- static const int kSharedFunctionInfoOffset =
- kPrototypeOrInitialMapOffset + kPointerSize;
- static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
- static const int kLiteralsOffset = kContextOffset + kPointerSize;
- static const int kNonWeakFieldsEndOffset = kLiteralsOffset + kPointerSize;
- static const int kNextFunctionLinkOffset = kNonWeakFieldsEndOffset;
- static const int kSize = kNextFunctionLinkOffset + kPointerSize;
-
- // Layout of the literals array.
- static const int kLiteralsPrefixSize = 1;
- static const int kLiteralNativeContextIndex = 0;
-
- // Layout of the bound-function binding array.
- static const int kBoundFunctionIndex = 0;
- static const int kBoundThisIndex = 1;
- static const int kBoundArgumentsStartIndex = 2;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
-};
-
-
-// JSGlobalProxy's prototype must be a JSGlobalObject or null,
-// and the prototype is hidden. JSGlobalProxy always delegates
-// property accesses to its prototype if the prototype is not null.
-//
-// A JSGlobalProxy can be reinitialized which will preserve its identity.
-//
-// Accessing a JSGlobalProxy requires security check.
-
-class JSGlobalProxy : public JSObject {
- public:
- // [native_context]: the owner native context of this global proxy object.
- // It is null value if this object is not used by any context.
- DECL_ACCESSORS(native_context, Object)
-
- // Casting.
- static inline JSGlobalProxy* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSGlobalProxy)
- DECLARE_VERIFIER(JSGlobalProxy)
-
- // Layout description.
- static const int kNativeContextOffset = JSObject::kHeaderSize;
- static const int kSize = kNativeContextOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
-};
-
-
-// Forward declaration.
-class JSBuiltinsObject;
-
-// Common super class for JavaScript global objects and the special
-// builtins global objects.
-class GlobalObject: public JSObject {
- public:
- // [builtins]: the object holding the runtime routines written in JS.
- DECL_ACCESSORS(builtins, JSBuiltinsObject)
-
- // [native context]: the natives corresponding to this global object.
- DECL_ACCESSORS(native_context, Context)
-
- // [global context]: the most recent (i.e. innermost) global context.
- DECL_ACCESSORS(global_context, Context)
-
- // [global receiver]: the global receiver object of the context
- DECL_ACCESSORS(global_receiver, JSObject)
-
- // Retrieve the property cell used to store a property.
- JSGlobalPropertyCell* GetPropertyCell(LookupResult* result);
-
- // This is like GetProperty, but is used when you know the lookup won't fail
- // by throwing an exception. This is for the debug and builtins global
- // objects, where it is known which properties can be expected to be present
- // on the object.
- Object* GetPropertyNoExceptionThrown(String* key) {
- Object* answer = GetProperty(key)->ToObjectUnchecked();
- return answer;
- }
-
- // Ensure that the global object has a cell for the given property name.
- static Handle<JSGlobalPropertyCell> EnsurePropertyCell(
- Handle<GlobalObject> global,
- Handle<String> name);
- // TODO(kmillikin): This function can be eliminated once the stub cache is
- // fully handlified (and the static helper can be written directly).
- MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name);
-
- // Casting.
- static inline GlobalObject* cast(Object* obj);
-
- // Layout description.
- static const int kBuiltinsOffset = JSObject::kHeaderSize;
- static const int kNativeContextOffset = kBuiltinsOffset + kPointerSize;
- static const int kGlobalContextOffset = kNativeContextOffset + kPointerSize;
- static const int kGlobalReceiverOffset = kGlobalContextOffset + kPointerSize;
- static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
-};
-
-
-// JavaScript global object.
-class JSGlobalObject: public GlobalObject {
- public:
- // Casting.
- static inline JSGlobalObject* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSGlobalObject)
- DECLARE_VERIFIER(JSGlobalObject)
-
- // Layout description.
- static const int kSize = GlobalObject::kHeaderSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalObject);
-};
-
-
-// Builtins global object which holds the runtime routines written in
-// JavaScript.
-class JSBuiltinsObject: public GlobalObject {
- public:
- // Accessors for the runtime routines written in JavaScript.
- inline Object* javascript_builtin(Builtins::JavaScript id);
- inline void set_javascript_builtin(Builtins::JavaScript id, Object* value);
-
- // Accessors for code of the runtime routines written in JavaScript.
- inline Code* javascript_builtin_code(Builtins::JavaScript id);
- inline void set_javascript_builtin_code(Builtins::JavaScript id, Code* value);
-
- // Casting.
- static inline JSBuiltinsObject* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSBuiltinsObject)
- DECLARE_VERIFIER(JSBuiltinsObject)
-
- // Layout description. The size of the builtins object includes
- // room for two pointers per runtime routine written in javascript
- // (function and code object).
- static const int kJSBuiltinsCount = Builtins::id_count;
- static const int kJSBuiltinsOffset = GlobalObject::kHeaderSize;
- static const int kJSBuiltinsCodeOffset =
- GlobalObject::kHeaderSize + (kJSBuiltinsCount * kPointerSize);
- static const int kSize =
- kJSBuiltinsCodeOffset + (kJSBuiltinsCount * kPointerSize);
-
- static int OffsetOfFunctionWithId(Builtins::JavaScript id) {
- return kJSBuiltinsOffset + id * kPointerSize;
- }
-
- static int OffsetOfCodeWithId(Builtins::JavaScript id) {
- return kJSBuiltinsCodeOffset + id * kPointerSize;
- }
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
-};
-
-
-// Representation for JS Wrapper objects, String, Number, Boolean, etc.
-class JSValue: public JSObject {
- public:
- // [value]: the object being wrapped.
- DECL_ACCESSORS(value, Object)
-
- // Casting.
- static inline JSValue* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSValue)
- DECLARE_VERIFIER(JSValue)
-
- // Layout description.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSValue);
-};
-
-
-class DateCache;
-
-// Representation for JS date objects.
-class JSDate: public JSObject {
- public:
- // If one component is NaN, all of them are, indicating a NaN time value.
- // [value]: the time value.
- DECL_ACCESSORS(value, Object)
- // [year]: caches year. Either undefined, smi, or NaN.
- DECL_ACCESSORS(year, Object)
- // [month]: caches month. Either undefined, smi, or NaN.
- DECL_ACCESSORS(month, Object)
- // [day]: caches day. Either undefined, smi, or NaN.
- DECL_ACCESSORS(day, Object)
- // [weekday]: caches day of week. Either undefined, smi, or NaN.
- DECL_ACCESSORS(weekday, Object)
- // [hour]: caches hours. Either undefined, smi, or NaN.
- DECL_ACCESSORS(hour, Object)
- // [min]: caches minutes. Either undefined, smi, or NaN.
- DECL_ACCESSORS(min, Object)
- // [sec]: caches seconds. Either undefined, smi, or NaN.
- DECL_ACCESSORS(sec, Object)
- // [cache stamp]: sample of the date cache stamp at the
- // moment when local fields were cached.
- DECL_ACCESSORS(cache_stamp, Object)
-
- // Casting.
- static inline JSDate* cast(Object* obj);
-
- // Returns the date field with the specified index.
- // See FieldIndex for the list of date fields.
- static Object* GetField(Object* date, Smi* index);
-
- void SetValue(Object* value, bool is_value_nan);
-
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSDate)
- DECLARE_VERIFIER(JSDate)
-
- // The order is important. It must be kept in sync with date macros
- // in macros.py.
- enum FieldIndex {
- kDateValue,
- kYear,
- kMonth,
- kDay,
- kWeekday,
- kHour,
- kMinute,
- kSecond,
- kFirstUncachedField,
- kMillisecond = kFirstUncachedField,
- kDays,
- kTimeInDay,
- kFirstUTCField,
- kYearUTC = kFirstUTCField,
- kMonthUTC,
- kDayUTC,
- kWeekdayUTC,
- kHourUTC,
- kMinuteUTC,
- kSecondUTC,
- kMillisecondUTC,
- kDaysUTC,
- kTimeInDayUTC,
- kTimezoneOffset
- };
-
- // Layout description.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kYearOffset = kValueOffset + kPointerSize;
- static const int kMonthOffset = kYearOffset + kPointerSize;
- static const int kDayOffset = kMonthOffset + kPointerSize;
- static const int kWeekdayOffset = kDayOffset + kPointerSize;
- static const int kHourOffset = kWeekdayOffset + kPointerSize;
- static const int kMinOffset = kHourOffset + kPointerSize;
- static const int kSecOffset = kMinOffset + kPointerSize;
- static const int kCacheStampOffset = kSecOffset + kPointerSize;
- static const int kSize = kCacheStampOffset + kPointerSize;
-
- private:
- inline Object* DoGetField(FieldIndex index);
-
- Object* GetUTCField(FieldIndex index, double value, DateCache* date_cache);
-
- // Computes and caches the cacheable fields of the date.
- inline void SetLocalFields(int64_t local_time_ms, DateCache* date_cache);
-
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSDate);
-};
-
-
-// Representation of message objects used for error reporting through
-// the API. The messages are formatted in JavaScript so this object is
-// a real JavaScript object. The information used for formatting the
-// error messages are not directly accessible from JavaScript to
-// prevent leaking information to user code called during error
-// formatting.
-class JSMessageObject: public JSObject {
- public:
- // [type]: the type of error message.
- DECL_ACCESSORS(type, String)
-
- // [arguments]: the arguments for formatting the error message.
- DECL_ACCESSORS(arguments, JSArray)
-
- // [script]: the script from which the error message originated.
- DECL_ACCESSORS(script, Object)
-
- // [stack_trace]: the stack trace for this error message.
- DECL_ACCESSORS(stack_trace, Object)
-
- // [stack_frames]: an array of stack frames for this error object.
- DECL_ACCESSORS(stack_frames, Object)
-
- // [start_position]: the start position in the script for the error message.
- inline int start_position();
- inline void set_start_position(int value);
-
- // [end_position]: the end position in the script for the error message.
- inline int end_position();
- inline void set_end_position(int value);
-
- // Casting.
- static inline JSMessageObject* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSMessageObject)
- DECLARE_VERIFIER(JSMessageObject)
-
- // Layout description.
- static const int kTypeOffset = JSObject::kHeaderSize;
- static const int kArgumentsOffset = kTypeOffset + kPointerSize;
- static const int kScriptOffset = kArgumentsOffset + kPointerSize;
- static const int kStackTraceOffset = kScriptOffset + kPointerSize;
- static const int kStackFramesOffset = kStackTraceOffset + kPointerSize;
- static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
- static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
- static const int kSize = kEndPositionOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<HeapObject::kMapOffset,
- kStackFramesOffset + kPointerSize,
- kSize> BodyDescriptor;
-};
-
-
-// Regular expressions
-// The regular expression holds a single reference to a FixedArray in
-// the kDataOffset field.
-// The FixedArray contains the following data:
-// - tag : type of regexp implementation (not compiled yet, atom or irregexp)
-// - reference to the original source string
-// - reference to the original flag string
-// If it is an atom regexp
-// - a reference to a literal string to search for
-// If it is an irregexp regexp:
-// - a reference to code for ASCII inputs (bytecode or compiled), or a smi
-// used for tracking the last usage (used for code flushing).
-// - a reference to code for UC16 inputs (bytecode or compiled), or a smi
-// used for tracking the last usage (used for code flushing)..
-// - max number of registers used by irregexp implementations.
-// - number of capture registers (output values) of the regexp.
-class JSRegExp: public JSObject {
- public:
- // Meaning of Type:
- // NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
- // ATOM: A simple string to match against using an indexOf operation.
- // IRREGEXP: Compiled with Irregexp.
- // IRREGEXP_NATIVE: Compiled to native code with Irregexp.
- enum Type { NOT_COMPILED, ATOM, IRREGEXP };
- enum Flag { NONE = 0, GLOBAL = 1, IGNORE_CASE = 2, MULTILINE = 4 };
-
- class Flags {
- public:
- explicit Flags(uint32_t value) : value_(value) { }
- bool is_global() { return (value_ & GLOBAL) != 0; }
- bool is_ignore_case() { return (value_ & IGNORE_CASE) != 0; }
- bool is_multiline() { return (value_ & MULTILINE) != 0; }
- uint32_t value() { return value_; }
- private:
- uint32_t value_;
- };
-
- DECL_ACCESSORS(data, Object)
-
- inline Type TypeTag();
- inline int CaptureCount();
- inline Flags GetFlags();
- inline String* Pattern();
- inline Object* DataAt(int index);
- // Set implementation data after the object has been prepared.
- inline void SetDataAt(int index, Object* value);
-
- // Used during GC when flushing code or setting age.
- inline Object* DataAtUnchecked(int index);
- inline void SetDataAtUnchecked(int index, Object* value, Heap* heap);
- inline Type TypeTagUnchecked();
-
- static int code_index(bool is_ascii) {
- if (is_ascii) {
- return kIrregexpASCIICodeIndex;
- } else {
- return kIrregexpUC16CodeIndex;
- }
- }
-
- static int saved_code_index(bool is_ascii) {
- if (is_ascii) {
- return kIrregexpASCIICodeSavedIndex;
- } else {
- return kIrregexpUC16CodeSavedIndex;
- }
- }
-
- static inline JSRegExp* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_VERIFIER(JSRegExp)
-
- static const int kDataOffset = JSObject::kHeaderSize;
- static const int kSize = kDataOffset + kPointerSize;
-
- // Indices in the data array.
- static const int kTagIndex = 0;
- static const int kSourceIndex = kTagIndex + 1;
- static const int kFlagsIndex = kSourceIndex + 1;
- static const int kDataIndex = kFlagsIndex + 1;
- // The data fields are used in different ways depending on the
- // value of the tag.
- // Atom regexps (literal strings).
- static const int kAtomPatternIndex = kDataIndex;
-
- static const int kAtomDataSize = kAtomPatternIndex + 1;
-
- // Irregexp compiled code or bytecode for ASCII. If compilation
- // fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpASCIICodeIndex = kDataIndex;
- // Irregexp compiled code or bytecode for UC16. If compilation
- // fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
-
- // Saved instance of Irregexp compiled code or bytecode for ASCII that
- // is a potential candidate for flushing.
- static const int kIrregexpASCIICodeSavedIndex = kDataIndex + 2;
- // Saved instance of Irregexp compiled code or bytecode for UC16 that is
- // a potential candidate for flushing.
- static const int kIrregexpUC16CodeSavedIndex = kDataIndex + 3;
-
- // Maximal number of registers used by either ASCII or UC16.
- // Only used to check that there is enough stack space
- static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
- // Number of captures in the compiled regexp.
- static const int kIrregexpCaptureCountIndex = kDataIndex + 5;
-
- static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
-
- // Offsets directly into the data fixed array.
- static const int kDataTagOffset =
- FixedArray::kHeaderSize + kTagIndex * kPointerSize;
- static const int kDataAsciiCodeOffset =
- FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
- static const int kDataUC16CodeOffset =
- FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
- static const int kIrregexpCaptureCountOffset =
- FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
-
- // In-object fields.
- static const int kSourceFieldIndex = 0;
- static const int kGlobalFieldIndex = 1;
- static const int kIgnoreCaseFieldIndex = 2;
- static const int kMultilineFieldIndex = 3;
- static const int kLastIndexFieldIndex = 4;
- static const int kInObjectFieldCount = 5;
-
- // The uninitialized value for a regexp code object.
- static const int kUninitializedValue = -1;
-
- // The compilation error value for the regexp code object. The real error
- // object is in the saved code field.
- static const int kCompilationErrorValue = -2;
-
- // When we store the sweep generation at which we moved the code from the
- // code index to the saved code index we mask it of to be in the [0:255]
- // range.
- static const int kCodeAgeMask = 0xff;
-};
-
-
-class CompilationCacheShape : public BaseShape<HashTableKey*> {
- public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
-
- static inline uint32_t Hash(HashTableKey* key) {
- return key->Hash();
- }
-
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
- }
-
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
- }
-
- static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
-};
-
-
-class CompilationCacheTable: public HashTable<CompilationCacheShape,
- HashTableKey*> {
- public:
- // Find cached value for a string key, otherwise return null.
- Object* Lookup(String* src, Context* context);
- Object* LookupEval(String* src,
- Context* context,
- LanguageMode language_mode,
- int scope_position);
- Object* LookupRegExp(String* source, JSRegExp::Flags flags);
- MUST_USE_RESULT MaybeObject* Put(String* src,
- Context* context,
- Object* value);
- MUST_USE_RESULT MaybeObject* PutEval(String* src,
- Context* context,
- SharedFunctionInfo* value,
- int scope_position);
- MUST_USE_RESULT MaybeObject* PutRegExp(String* src,
- JSRegExp::Flags flags,
- FixedArray* value);
-
- // Remove given value from cache.
- void Remove(Object* value);
-
- static inline CompilationCacheTable* cast(Object* obj);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheTable);
-};
-
-
-class CodeCache: public Struct {
- public:
- DECL_ACCESSORS(default_cache, FixedArray)
- DECL_ACCESSORS(normal_type_cache, Object)
-
- // Add the code object to the cache.
- MUST_USE_RESULT MaybeObject* Update(String* name, Code* code);
-
- // Lookup code object in the cache. Returns code object if found and undefined
- // if not.
- Object* Lookup(String* name, Code::Flags flags);
-
- // Get the internal index of a code object in the cache. Returns -1 if the
- // code object is not in that cache. This index can be used to later call
- // RemoveByIndex. The cache cannot be modified between a call to GetIndex and
- // RemoveByIndex.
- int GetIndex(Object* name, Code* code);
-
- // Remove an object from the cache with the provided internal index.
- void RemoveByIndex(Object* name, Code* code, int index);
-
- static inline CodeCache* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(CodeCache)
- DECLARE_VERIFIER(CodeCache)
-
- static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
- static const int kNormalTypeCacheOffset =
- kDefaultCacheOffset + kPointerSize;
- static const int kSize = kNormalTypeCacheOffset + kPointerSize;
-
- private:
- MUST_USE_RESULT MaybeObject* UpdateDefaultCache(String* name, Code* code);
- MUST_USE_RESULT MaybeObject* UpdateNormalTypeCache(String* name, Code* code);
- Object* LookupDefaultCache(String* name, Code::Flags flags);
- Object* LookupNormalTypeCache(String* name, Code::Flags flags);
-
- // Code cache layout of the default cache. Elements are alternating name and
- // code objects for non normal load/store/call IC's.
- static const int kCodeCacheEntrySize = 2;
- static const int kCodeCacheEntryNameOffset = 0;
- static const int kCodeCacheEntryCodeOffset = 1;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCache);
-};
-
-
-class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
- public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
-
- static inline uint32_t Hash(HashTableKey* key) {
- return key->Hash();
- }
-
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
- }
-
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
- }
-
- static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
-};
-
-
-class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape,
- HashTableKey*> {
- public:
- Object* Lookup(String* name, Code::Flags flags);
- MUST_USE_RESULT MaybeObject* Put(String* name, Code* code);
-
- int GetIndex(String* name, Code::Flags flags);
- void RemoveByIndex(int index);
-
- static inline CodeCacheHashTable* cast(Object* obj);
-
- // Initial size of the fixed array backing the hash table.
- static const int kInitialSize = 64;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCacheHashTable);
-};
-
-
-class PolymorphicCodeCache: public Struct {
- public:
- DECL_ACCESSORS(cache, Object)
-
- static void Update(Handle<PolymorphicCodeCache> cache,
- MapHandleList* maps,
- Code::Flags flags,
- Handle<Code> code);
-
- MUST_USE_RESULT MaybeObject* Update(MapHandleList* maps,
- Code::Flags flags,
- Code* code);
-
- // Returns an undefined value if the entry is not found.
- Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags);
-
- static inline PolymorphicCodeCache* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(PolymorphicCodeCache)
- DECLARE_VERIFIER(PolymorphicCodeCache)
-
- static const int kCacheOffset = HeapObject::kHeaderSize;
- static const int kSize = kCacheOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCache);
-};
-
-
-class PolymorphicCodeCacheHashTable
- : public HashTable<CodeCacheHashTableShape, HashTableKey*> {
- public:
- Object* Lookup(MapHandleList* maps, int code_kind);
-
- MUST_USE_RESULT MaybeObject* Put(MapHandleList* maps,
- int code_kind,
- Code* code);
-
- static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
-
- static const int kInitialSize = 64;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCacheHashTable);
-};
-
-
-class TypeFeedbackInfo: public Struct {
- public:
- inline int ic_total_count();
- inline void set_ic_total_count(int count);
-
- inline int ic_with_type_info_count();
- inline void change_ic_with_type_info_count(int count);
-
- inline void initialize_storage();
-
- inline void change_own_type_change_checksum();
- inline int own_type_change_checksum();
-
- inline void set_inlined_type_change_checksum(int checksum);
- inline bool matches_inlined_type_change_checksum(int checksum);
-
- DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
-
- static inline TypeFeedbackInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(TypeFeedbackInfo)
- DECLARE_VERIFIER(TypeFeedbackInfo)
-
- static const int kStorage1Offset = HeapObject::kHeaderSize;
- static const int kStorage2Offset = kStorage1Offset + kPointerSize;
- static const int kTypeFeedbackCellsOffset = kStorage2Offset + kPointerSize;
- static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
-
- private:
- static const int kTypeChangeChecksumBits = 7;
-
- class ICTotalCountField: public BitField<int, 0,
- kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
- class OwnTypeChangeChecksum: public BitField<int,
- kSmiValueSize - kTypeChangeChecksumBits,
- kTypeChangeChecksumBits> {}; // NOLINT
- class ICsWithTypeInfoCountField: public BitField<int, 0,
- kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
- class InlinedTypeChangeChecksum: public BitField<int,
- kSmiValueSize - kTypeChangeChecksumBits,
- kTypeChangeChecksumBits> {}; // NOLINT
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo);
-};
-
-
-enum AllocationSiteMode {
- DONT_TRACK_ALLOCATION_SITE,
- TRACK_ALLOCATION_SITE,
- LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
-};
-
-
-class AllocationSiteInfo: public Struct {
- public:
- DECL_ACCESSORS(payload, Object)
-
- static inline AllocationSiteInfo* cast(Object* obj);
-
- DECLARE_PRINTER(AllocationSiteInfo)
- DECLARE_VERIFIER(AllocationSiteInfo)
-
- // Returns NULL if no AllocationSiteInfo is available for object.
- static AllocationSiteInfo* FindForJSObject(JSObject* object);
-
- static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind);
- static AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
-
- static const int kPayloadOffset = HeapObject::kHeaderSize;
- static const int kSize = kPayloadOffset + kPointerSize;
- static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
-
- bool GetElementsKindPayload(ElementsKind* kind);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSiteInfo);
-};
-
-
-// Representation of a slow alias as part of a non-strict arguments objects.
-// For fast aliases (if HasNonStrictArgumentsElements()):
-// - the parameter map contains an index into the context
-// - all attributes of the element have default values
-// For slow aliases (if HasDictionaryArgumentsElements()):
-// - the parameter map contains no fast alias mapping (i.e. the hole)
-// - this struct (in the slow backing store) contains an index into the context
-// - all attributes are available as part if the property details
-class AliasedArgumentsEntry: public Struct {
- public:
- inline int aliased_context_slot();
- inline void set_aliased_context_slot(int count);
-
- static inline AliasedArgumentsEntry* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(AliasedArgumentsEntry)
- DECLARE_VERIFIER(AliasedArgumentsEntry)
-
- static const int kAliasedContextSlot = HeapObject::kHeaderSize;
- static const int kSize = kAliasedContextSlot + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AliasedArgumentsEntry);
-};
-
-
-enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
-enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
-
-
-class StringHasher {
- public:
- explicit inline StringHasher(int length, uint32_t seed);
-
- template <typename schar>
- static inline uint32_t HashSequentialString(const schar* chars,
- int length,
- uint32_t seed);
-
- // Reads all the data, even for long strings and computes the utf16 length.
- static uint32_t ComputeUtf8Hash(Vector<const char> chars,
- uint32_t seed,
- int* utf16_length_out);
-
- // Calculated hash value for a string consisting of 1 to
- // String::kMaxArrayIndexSize digits with no leading zeros (except "0").
- // value is represented decimal value.
- static uint32_t MakeArrayIndexHash(uint32_t value, int length);
-
- // No string is allowed to have a hash of zero. That value is reserved
- // for internal properties. If the hash calculation yields zero then we
- // use 27 instead.
- static const int kZeroHash = 27;
-
- // Reusable parts of the hashing algorithm.
- INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c));
- INLINE(static uint32_t GetHashCore(uint32_t running_hash));
-
- protected:
- // Returns the value to store in the hash field of a string with
- // the given length and contents.
- uint32_t GetHashField();
- // Returns true if the hash of this string can be computed without
- // looking at the contents.
- inline bool has_trivial_hash();
- // Adds a block of characters to the hash.
- template<typename Char>
- inline void AddCharacters(const Char* chars, int len);
-
- private:
- // Add a character to the hash.
- inline void AddCharacter(uint16_t c);
- // Update index. Returns true if string is still an index.
- inline bool UpdateIndex(uint16_t c);
-
- int length_;
- uint32_t raw_running_hash_;
- uint32_t array_index_;
- bool is_array_index_;
- bool is_first_char_;
- DISALLOW_COPY_AND_ASSIGN(StringHasher);
-};
-
-
-// The characteristics of a string are stored in its map. Retrieving these
-// few bits of information is moderately expensive, involving two memory
-// loads where the second is dependent on the first. To improve efficiency
-// the shape of the string is given its own class so that it can be retrieved
-// once and used for several string operations. A StringShape is small enough
-// to be passed by value and is immutable, but be aware that flattening a
-// string can potentially alter its shape. Also be aware that a GC caused by
-// something else can alter the shape of a string due to ConsString
-// shortcutting. Keeping these restrictions in mind has proven to be error-
-// prone and so we no longer put StringShapes in variables unless there is a
-// concrete performance benefit at that particular point in the code.
-class StringShape BASE_EMBEDDED {
- public:
- inline explicit StringShape(String* s);
- inline explicit StringShape(Map* s);
- inline explicit StringShape(InstanceType t);
- inline bool IsSequential();
- inline bool IsExternal();
- inline bool IsCons();
- inline bool IsSliced();
- inline bool IsIndirect();
- inline bool IsExternalAscii();
- inline bool IsExternalTwoByte();
- inline bool IsSequentialAscii();
- inline bool IsSequentialTwoByte();
- inline bool IsInternalized();
- inline StringRepresentationTag representation_tag();
- inline uint32_t encoding_tag();
- inline uint32_t full_representation_tag();
- inline uint32_t size_tag();
-#ifdef DEBUG
- inline uint32_t type() { return type_; }
- inline void invalidate() { valid_ = false; }
- inline bool valid() { return valid_; }
-#else
- inline void invalidate() { }
-#endif
-
- private:
- uint32_t type_;
-#ifdef DEBUG
- inline void set_valid() { valid_ = true; }
- bool valid_;
-#else
- inline void set_valid() { }
-#endif
-};
-
-
-// The Name abstract class captures anything that can be used as a property
-// name, i.e., strings and symbols. All names store a hash value.
-class Name: public HeapObject {
- public:
- // Get and set the hash field of the name.
- inline uint32_t hash_field();
- inline void set_hash_field(uint32_t value);
-
- // Tells whether the hash code has been computed.
- inline bool HasHashCode();
-
- // Returns a hash value used for the property table
- inline uint32_t Hash();
-
- // Casting.
- static inline Name* cast(Object* obj);
-
- // Layout description.
- static const int kHashFieldOffset = HeapObject::kHeaderSize;
- static const int kSize = kHashFieldOffset + kPointerSize;
-
- // Mask constant for checking if a name has a computed hash code
- // and if it is a string that is an array index. The least significant bit
- // indicates whether a hash code has been computed. If the hash code has
- // been computed the 2nd bit tells whether the string can be used as an
- // array index.
- static const int kHashNotComputedMask = 1;
- static const int kIsNotArrayIndexMask = 1 << 1;
- static const int kNofHashBitFields = 2;
-
- // Shift constant retrieving hash code from hash field.
- static const int kHashShift = kNofHashBitFields;
-
- // Only these bits are relevant in the hash, since the top two are shifted
- // out.
- static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
-
- // Array index strings this short can keep their index in the hash field.
- static const int kMaxCachedArrayIndexLength = 7;
-
- // For strings which are array indexes the hash value has the string length
- // mixed into the hash, mainly to avoid a hash value of zero which would be
- // the case for the string '0'. 24 bits are used for the array index value.
- static const int kArrayIndexValueBits = 24;
- static const int kArrayIndexLengthBits =
- kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
-
- STATIC_CHECK((kArrayIndexLengthBits > 0));
-
- static const int kArrayIndexHashLengthShift =
- kArrayIndexValueBits + kNofHashBitFields;
-
- static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
-
- static const int kArrayIndexValueMask =
- ((1 << kArrayIndexValueBits) - 1) << kHashShift;
-
- // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
- // could use a mask to test if the length of string is less than or equal to
- // kMaxCachedArrayIndexLength.
- STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
-
- static const int kContainsCachedArrayIndexMask =
- (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
- kIsNotArrayIndexMask;
-
- // Value of empty hash field indicating that the hash is not computed.
- static const int kEmptyHashField =
- kIsNotArrayIndexMask | kHashNotComputedMask;
-
- protected:
- static inline bool IsHashFieldComputed(uint32_t field);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Name);
-};
-
-
-// ES6 symbols.
-class Symbol: public Name {
- public:
- // Casting.
- static inline Symbol* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(Symbol)
- DECLARE_VERIFIER(Symbol)
-
- // Layout description.
- static const int kSize = Name::kSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
-};
-
-
-// The String abstract class captures JavaScript string values:
-//
-// Ecma-262:
-// 4.3.16 String Value
-// A string value is a member of the type String and is a finite
-// ordered sequence of zero or more 16-bit unsigned integer values.
-//
-// All string values have a length field.
-class String: public Name {
- public:
- enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
-
- // Representation of the flat content of a String.
- // A non-flat string doesn't have flat content.
- // A flat string has content that's encoded as a sequence of either
- // ASCII chars or two-byte UC16.
- // Returned by String::GetFlatContent().
- class FlatContent {
- public:
- // Returns true if the string is flat and this structure contains content.
- bool IsFlat() { return state_ != NON_FLAT; }
- // Returns true if the structure contains ASCII content.
- bool IsAscii() { return state_ == ASCII; }
- // Returns true if the structure contains two-byte content.
- bool IsTwoByte() { return state_ == TWO_BYTE; }
-
- // Return the one byte content of the string. Only use if IsAscii() returns
- // true.
- Vector<const uint8_t> ToOneByteVector() {
- ASSERT_EQ(ASCII, state_);
- return buffer_;
- }
- // Return the two-byte content of the string. Only use if IsTwoByte()
- // returns true.
- Vector<const uc16> ToUC16Vector() {
- ASSERT_EQ(TWO_BYTE, state_);
- return Vector<const uc16>::cast(buffer_);
- }
-
- private:
- enum State { NON_FLAT, ASCII, TWO_BYTE };
-
- // Constructors only used by String::GetFlatContent().
- explicit FlatContent(Vector<const uint8_t> chars)
- : buffer_(chars),
- state_(ASCII) { }
- explicit FlatContent(Vector<const uc16> chars)
- : buffer_(Vector<const byte>::cast(chars)),
- state_(TWO_BYTE) { }
- FlatContent() : buffer_(), state_(NON_FLAT) { }
-
- Vector<const uint8_t> buffer_;
- State state_;
-
- friend class String;
- };
-
- // Get and set the length of the string.
- inline int length();
- inline void set_length(int value);
-
- // Returns whether this string has only ASCII chars, i.e. all of them can
- // be ASCII encoded. This might be the case even if the string is
- // two-byte. Such strings may appear when the embedder prefers
- // two-byte external representations even for ASCII data.
- inline bool IsOneByteRepresentation();
- inline bool IsTwoByteRepresentation();
-
- // Cons and slices have an encoding flag that may not represent the actual
- // encoding of the underlying string. This is taken into account here.
- // Requires: this->IsFlat()
- inline bool IsOneByteRepresentationUnderneath();
- inline bool IsTwoByteRepresentationUnderneath();
-
- // NOTE: this should be considered only a hint. False negatives are
- // possible.
- inline bool HasOnlyAsciiChars();
-
- inline bool IsOneByteConvertible();
-
- // Get and set individual two byte chars in the string.
- inline void Set(int index, uint16_t value);
- // Get individual two byte char in the string. Repeated calls
- // to this method are not efficient unless the string is flat.
- INLINE(uint16_t Get(int index));
-
- // Try to flatten the string. Checks first inline to see if it is
- // necessary. Does nothing if the string is not a cons string.
- // Flattening allocates a sequential string with the same data as
- // the given string and mutates the cons string to a degenerate
- // form, where the first component is the new sequential string and
- // the second component is the empty string. If allocation fails,
- // this function returns a failure. If flattening succeeds, this
- // function returns the sequential string that is now the first
- // component of the cons string.
- //
- // Degenerate cons strings are handled specially by the garbage
- // collector (see IsShortcutCandidate).
- //
- // Use FlattenString from Handles.cc to flatten even in case an
- // allocation failure happens.
- inline MaybeObject* TryFlatten(PretenureFlag pretenure = NOT_TENURED);
-
- // Convenience function. Has exactly the same behavior as
- // TryFlatten(), except in the case of failure returns the original
- // string.
- inline String* TryFlattenGetString(PretenureFlag pretenure = NOT_TENURED);
-
- // Tries to return the content of a flat string as a structure holding either
- // a flat vector of char or of uc16.
- // If the string isn't flat, and therefore doesn't have flat content, the
- // returned structure will report so, and can't provide a vector of either
- // kind.
- FlatContent GetFlatContent();
-
- // Returns the parent of a sliced string or first part of a flat cons string.
- // Requires: StringShape(this).IsIndirect() && this->IsFlat()
- inline String* GetUnderlying();
-
- // Mark the string as an undetectable object. It only applies to
- // ASCII and two byte string types.
- bool MarkAsUndetectable();
-
- // Return a substring.
- MUST_USE_RESULT MaybeObject* SubString(int from,
- int to,
- PretenureFlag pretenure = NOT_TENURED);
-
- // String equality operations.
- inline bool Equals(String* other);
- bool IsUtf8EqualTo(Vector<const char> str);
- bool IsOneByteEqualTo(Vector<const uint8_t> str);
- bool IsTwoByteEqualTo(Vector<const uc16> str);
-
- // Return a UTF8 representation of the string. The string is null
- // terminated but may optionally contain nulls. Length is returned
- // in length_output if length_output is not a null pointer The string
- // should be nearly flat, otherwise the performance of this method may
- // be very slow (quadratic in the length). Setting robustness_flag to
- // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
- // handles unexpected data without causing assert failures and it does not
- // do any heap allocations. This is useful when printing stack traces.
- SmartArrayPointer<char> ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robustness_flag,
- int offset,
- int length,
- int* length_output = 0);
- SmartArrayPointer<char> ToCString(
- AllowNullsFlag allow_nulls = DISALLOW_NULLS,
- RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
- int* length_output = 0);
-
- // Return a 16 bit Unicode representation of the string.
- // The string should be nearly flat, otherwise the performance of
- // of this method may be very bad. Setting robustness_flag to
- // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
- // handles unexpected data without causing assert failures and it does not
- // do any heap allocations. This is useful when printing stack traces.
- SmartArrayPointer<uc16> ToWideCString(
- RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
-
- bool ComputeArrayIndex(uint32_t* index);
-
- // Externalization.
- bool MakeExternal(v8::String::ExternalStringResource* resource);
- bool MakeExternal(v8::String::ExternalAsciiStringResource* resource);
-
- // Conversion.
- inline bool AsArrayIndex(uint32_t* index);
-
- // Casting.
- static inline String* cast(Object* obj);
-
- void PrintOn(FILE* out);
-
- // For use during stack traces. Performs rudimentary sanity check.
- bool LooksValid();
-
- // Dispatched behavior.
- void StringShortPrint(StringStream* accumulator);
-#ifdef OBJECT_PRINT
- inline void StringPrint() {
- StringPrint(stdout);
- }
- void StringPrint(FILE* out);
-
- char* ToAsciiArray();
-#endif
- DECLARE_VERIFIER(String)
-
- inline bool IsFlat();
-
- // Layout description.
- static const int kLengthOffset = Name::kSize;
- static const int kSize = kLengthOffset + kPointerSize;
-
- // Maximum number of characters to consider when trying to convert a string
- // value into an array index.
- static const int kMaxArrayIndexSize = 10;
- STATIC_CHECK(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
-
- // Max char codes.
- static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
- static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
- static const int kMaxUtf16CodeUnit = 0xffff;
-
- // Value of hash field containing computed hash equal to zero.
- static const int kEmptyStringHash = kIsNotArrayIndexMask;
-
- // Maximal string length.
- static const int kMaxLength = (1 << (32 - 2)) - 1;
-
- // Max length for computing hash. For strings longer than this limit the
- // string length is used as the hash value.
- static const int kMaxHashCalcLength = 16383;
-
- // Limit for truncation in short printing.
- static const int kMaxShortPrintLength = 1024;
-
- // Support for regular expressions.
- const uc16* GetTwoByteData();
- const uc16* GetTwoByteData(unsigned start);
-
- // Helper function for flattening strings.
- template <typename sinkchar>
- static void WriteToFlat(String* source,
- sinkchar* sink,
- int from,
- int to);
-
- // The return value may point to the first aligned word containing the
- // first non-ascii character, rather than directly to the non-ascii character.
- // If the return value is >= the passed length, the entire string was ASCII.
- static inline int NonAsciiStart(const char* chars, int length) {
- const char* start = chars;
- const char* limit = chars + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- ASSERT(unibrow::Utf8::kMaxOneByteChar == 0x7F);
- const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80;
- while (chars + sizeof(uintptr_t) <= limit) {
- if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) {
- return static_cast<int>(chars - start);
- }
- chars += sizeof(uintptr_t);
- }
-#endif
- while (chars < limit) {
- if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
- return static_cast<int>(chars - start);
- }
- ++chars;
- }
- return static_cast<int>(chars - start);
- }
-
- static inline bool IsAscii(const char* chars, int length) {
- return NonAsciiStart(chars, length) >= length;
- }
-
- static inline bool IsAscii(const uint8_t* chars, int length) {
- return
- NonAsciiStart(reinterpret_cast<const char*>(chars), length) >= length;
- }
-
- static inline int NonOneByteStart(const uc16* chars, int length) {
- const uc16* limit = chars + length;
- const uc16* start = chars;
- while (chars < limit) {
- if (*chars > kMaxOneByteCharCodeU) return static_cast<int>(chars - start);
- ++chars;
- }
- return static_cast<int>(chars - start);
- }
-
- static inline bool IsOneByte(const uc16* chars, int length) {
- return NonOneByteStart(chars, length) >= length;
- }
-
- template<class Visitor, class ConsOp>
- static inline void Visit(String* string,
- unsigned offset,
- Visitor& visitor,
- ConsOp& cons_op,
- int32_t type,
- unsigned length);
-
- private:
- friend class Name;
-
- // Try to flatten the top level ConsString that is hiding behind this
- // string. This is a no-op unless the string is a ConsString. Flatten
- // mutates the ConsString and might return a failure.
- MUST_USE_RESULT MaybeObject* SlowTryFlatten(PretenureFlag pretenure);
-
- // Slow case of String::Equals. This implementation works on any strings
- // but it is most efficient on strings that are almost flat.
- bool SlowEquals(String* other);
-
- // Slow case of AsArrayIndex.
- bool SlowAsArrayIndex(uint32_t* index);
-
- // Compute and set the hash code.
- uint32_t ComputeAndSetHash();
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(String);
-};
-
-
-// The SeqString abstract class captures sequential string values.
-class SeqString: public String {
- public:
- // Casting.
- static inline SeqString* cast(Object* obj);
-
- // Get and set the symbol id of the string
- inline int symbol_id();
- inline void set_symbol_id(int value);
-
- // Layout description.
- static const int kSymbolIdOffset = String::kSize;
- static const int kHeaderSize = kSymbolIdOffset + kPointerSize;
-
- // Truncate the string in-place if possible and return the result.
- // In case of new_length == 0, the empty string is returned without
- // truncating the original string.
- MUST_USE_RESULT String* Truncate(int new_length);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
-};
-
-
-// The AsciiString class captures sequential ASCII string objects.
-// Each character in the AsciiString is an ASCII character.
-class SeqOneByteString: public SeqString {
- public:
- static const bool kHasAsciiEncoding = true;
-
- // Dispatched behavior.
- inline uint16_t SeqOneByteStringGet(int index);
- inline void SeqOneByteStringSet(int index, uint16_t value);
-
- // Get the address of the characters in this string.
- inline Address GetCharsAddress();
-
- inline uint8_t* GetChars();
-
- // Casting
- static inline SeqOneByteString* cast(Object* obj);
-
- // Garbage collection support. This method is called by the
- // garbage collector to compute the actual size of an AsciiString
- // instance.
- inline int SeqOneByteStringSize(InstanceType instance_type);
-
- // Computes the size for an AsciiString instance of a given length.
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
- }
-
- // Maximal memory usage for a single sequential ASCII string.
- static const int kMaxSize = 512 * MB - 1;
- // Maximal length of a single sequential ASCII string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize);
-
- DECLARE_VERIFIER(SeqOneByteString)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
-};
-
-
-// The TwoByteString class captures sequential unicode string objects.
-// Each character in the TwoByteString is a two-byte uint16_t.
-class SeqTwoByteString: public SeqString {
- public:
- static const bool kHasAsciiEncoding = false;
-
- // Dispatched behavior.
- inline uint16_t SeqTwoByteStringGet(int index);
- inline void SeqTwoByteStringSet(int index, uint16_t value);
-
- // Get the address of the characters in this string.
- inline Address GetCharsAddress();
-
- inline uc16* GetChars();
-
- // For regexp code.
- const uint16_t* SeqTwoByteStringGetData(unsigned start);
-
- // Casting
- static inline SeqTwoByteString* cast(Object* obj);
-
- // Garbage collection support. This method is called by the
- // garbage collector to compute the actual size of a TwoByteString
- // instance.
- inline int SeqTwoByteStringSize(InstanceType instance_type);
-
- // Computes the size for a TwoByteString instance of a given length.
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
- }
-
- // Maximal memory usage for a single sequential two-byte string.
- static const int kMaxSize = 512 * MB - 1;
- // Maximal length of a single sequential two-byte string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
-};
-
-
-// The ConsString class describes string values built by using the
-// addition operator on strings. A ConsString is a pair where the
-// first and second components are pointers to other string values.
-// One or both components of a ConsString can be pointers to other
-// ConsStrings, creating a binary tree of ConsStrings where the leaves
-// are non-ConsString string values. The string value represented by
-// a ConsString can be obtained by concatenating the leaf string
-// values in a left-to-right depth-first traversal of the tree.
-class ConsString: public String {
- public:
- // First string of the cons cell.
- inline String* first();
- // Doesn't check that the result is a string, even in debug mode. This is
- // useful during GC where the mark bits confuse the checks.
- inline Object* unchecked_first();
- inline void set_first(String* first,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Second string of the cons cell.
- inline String* second();
- // Doesn't check that the result is a string, even in debug mode. This is
- // useful during GC where the mark bits confuse the checks.
- inline Object* unchecked_second();
- inline void set_second(String* second,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Dispatched behavior.
- uint16_t ConsStringGet(int index);
-
- // Casting.
- static inline ConsString* cast(Object* obj);
-
- // Layout description.
- static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kSecondOffset = kFirstOffset + kPointerSize;
- static const int kSize = kSecondOffset + kPointerSize;
-
- // Minimum length for a cons string.
- static const int kMinLength = 13;
-
- typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
- BodyDescriptor;
-
- DECLARE_VERIFIER(ConsString)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
-};
-
-
-// The Sliced String class describes strings that are substrings of another
-// sequential string. The motivation is to save time and memory when creating
-// a substring. A Sliced String is described as a pointer to the parent,
-// the offset from the start of the parent string and the length. Using
-// a Sliced String therefore requires unpacking of the parent string and
-// adding the offset to the start address. A substring of a Sliced String
-// are not nested since the double indirection is simplified when creating
-// such a substring.
-// Currently missing features are:
-// - handling externalized parent strings
-// - external strings as parent
-// - truncating sliced string to enable otherwise unneeded parent to be GC'ed.
-class SlicedString: public String {
- public:
- inline String* parent();
- inline void set_parent(String* parent,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline int offset();
- inline void set_offset(int offset);
-
- // Dispatched behavior.
- uint16_t SlicedStringGet(int index);
-
- // Casting.
- static inline SlicedString* cast(Object* obj);
-
- // Layout description.
- static const int kParentOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kOffsetOffset = kParentOffset + kPointerSize;
- static const int kSize = kOffsetOffset + kPointerSize;
-
- // Minimum length for a sliced string.
- static const int kMinLength = 13;
-
- typedef FixedBodyDescriptor<kParentOffset,
- kOffsetOffset + kPointerSize, kSize>
- BodyDescriptor;
-
- DECLARE_VERIFIER(SlicedString)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
-};
-
-
-// The ExternalString class describes string values that are backed by
-// a string resource that lies outside the V8 heap. ExternalStrings
-// consist of the length field common to all strings, a pointer to the
-// external resource. It is important to ensure (externally) that the
-// resource is not deallocated while the ExternalString is live in the
-// V8 heap.
-//
-// The API expects that all ExternalStrings are created through the
-// API. Therefore, ExternalStrings should not be used internally.
-class ExternalString: public String {
- public:
- // Casting
- static inline ExternalString* cast(Object* obj);
-
- // Layout description.
- static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kShortSize = kResourceOffset + kPointerSize;
- static const int kResourceDataOffset = kResourceOffset + kPointerSize;
- static const int kSize = kResourceDataOffset + kPointerSize;
-
- static const int kMaxShortLength =
- (kShortSize - SeqString::kHeaderSize) / kCharSize;
-
- // Return whether external string is short (data pointer is not cached).
- inline bool is_short();
-
- STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
-};
-
-
-// The ExternalAsciiString class is an external string backed by an
-// ASCII string.
-class ExternalAsciiString: public ExternalString {
- public:
- static const bool kHasAsciiEncoding = true;
-
- typedef v8::String::ExternalAsciiStringResource Resource;
-
- // The underlying resource.
- inline const Resource* resource();
- inline void set_resource(const Resource* buffer);
-
- // Update the pointer cache to the external character array.
- // The cached pointer is always valid, as the external character array does =
- // not move during lifetime. Deserialization is the only exception, after
- // which the pointer cache has to be refreshed.
- inline void update_data_cache();
-
- inline const uint8_t* GetChars();
-
- // Dispatched behavior.
- inline uint16_t ExternalAsciiStringGet(int index);
-
- // Casting.
- static inline ExternalAsciiString* cast(Object* obj);
-
- // Garbage collection support.
- inline void ExternalAsciiStringIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ExternalAsciiStringIterateBody();
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
-};
-
-
-// The ExternalTwoByteString class is an external string backed by a UTF-16
-// encoded string.
-class ExternalTwoByteString: public ExternalString {
- public:
- static const bool kHasAsciiEncoding = false;
-
- typedef v8::String::ExternalStringResource Resource;
-
- // The underlying string resource.
- inline const Resource* resource();
- inline void set_resource(const Resource* buffer);
-
- // Update the pointer cache to the external character array.
- // The cached pointer is always valid, as the external character array does =
- // not move during lifetime. Deserialization is the only exception, after
- // which the pointer cache has to be refreshed.
- inline void update_data_cache();
-
- inline const uint16_t* GetChars();
-
- // Dispatched behavior.
- inline uint16_t ExternalTwoByteStringGet(int index);
-
- // For regexp code.
- inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
-
- // Casting.
- static inline ExternalTwoByteString* cast(Object* obj);
-
- // Garbage collection support.
- inline void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ExternalTwoByteStringIterateBody();
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
-};
-
-
-// Utility superclass for stack-allocated objects that must be updated
-// on gc. It provides two ways for the gc to update instances, either
-// iterating or updating after gc.
-class Relocatable BASE_EMBEDDED {
- public:
- explicit inline Relocatable(Isolate* isolate);
- inline virtual ~Relocatable();
- virtual void IterateInstance(ObjectVisitor* v) { }
- virtual void PostGarbageCollection() { }
-
- static void PostGarbageCollectionProcessing();
- static int ArchiveSpacePerThread();
- static char* ArchiveState(Isolate* isolate, char* to);
- static char* RestoreState(Isolate* isolate, char* from);
- static void Iterate(ObjectVisitor* v);
- static void Iterate(ObjectVisitor* v, Relocatable* top);
- static char* Iterate(ObjectVisitor* v, char* t);
- private:
- Isolate* isolate_;
- Relocatable* prev_;
-};
-
-
-// A flat string reader provides random access to the contents of a
-// string independent of the character width of the string. The handle
-// must be valid as long as the reader is being used.
-class FlatStringReader : public Relocatable {
- public:
- FlatStringReader(Isolate* isolate, Handle<String> str);
- FlatStringReader(Isolate* isolate, Vector<const char> input);
- void PostGarbageCollection();
- inline uc32 Get(int index);
- int length() { return length_; }
- private:
- String** str_;
- bool is_ascii_;
- int length_;
- const void* start_;
-};
-
-
-// A ConsStringOp that returns null.
-// Useful when the operation to apply on a ConsString
-// requires an expensive data structure.
-class ConsStringNullOp {
- public:
- inline ConsStringNullOp() {}
- static inline String* Operate(String*, unsigned*, int32_t*, unsigned*);
- private:
- DISALLOW_COPY_AND_ASSIGN(ConsStringNullOp);
-};
-
-
-// This maintains an off-stack representation of the stack frames required
-// to traverse a ConsString, allowing an entirely iterative and restartable
-// traversal of the entire string
-// Note: this class is not GC-safe.
-class ConsStringIteratorOp {
- public:
- inline ConsStringIteratorOp() {}
- String* Operate(String* string,
- unsigned* offset_out,
- int32_t* type_out,
- unsigned* length_out);
- inline String* ContinueOperation(int32_t* type_out, unsigned* length_out);
- inline void Reset();
- inline bool HasMore();
-
- private:
- // TODO(dcarney): Templatize this out for different stack sizes.
- static const unsigned kStackSize = 32;
- // Use a mask instead of doing modulo operations for stack wrapping.
- static const unsigned kDepthMask = kStackSize-1;
- STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize));
- static inline unsigned OffsetForDepth(unsigned depth);
-
- inline void PushLeft(ConsString* string);
- inline void PushRight(ConsString* string);
- inline void AdjustMaximumDepth();
- inline void Pop();
- String* NextLeaf(bool* blew_stack, int32_t* type_out, unsigned* length_out);
- String* Search(unsigned* offset_out,
- int32_t* type_out,
- unsigned* length_out);
-
- unsigned depth_;
- unsigned maximum_depth_;
- // Stack must always contain only frames for which right traversal
- // has not yet been performed.
- ConsString* frames_[kStackSize];
- unsigned consumed_;
- ConsString* root_;
- DISALLOW_COPY_AND_ASSIGN(ConsStringIteratorOp);
-};
-
-
-// Note: this class is not GC-safe.
-class StringCharacterStream {
- public:
- inline StringCharacterStream(String* string,
- ConsStringIteratorOp* op,
- unsigned offset = 0);
- inline uint16_t GetNext();
- inline bool HasMore();
- inline void Reset(String* string, unsigned offset = 0);
- inline void VisitOneByteString(const uint8_t* chars, unsigned length);
- inline void VisitTwoByteString(const uint16_t* chars, unsigned length);
-
- private:
- bool is_one_byte_;
- union {
- const uint8_t* buffer8_;
- const uint16_t* buffer16_;
- };
- const uint8_t* end_;
- ConsStringIteratorOp* op_;
- DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
-};
-
-
-template <typename T>
-class VectorIterator {
- public:
- VectorIterator(T* d, int l) : data_(Vector<const T>(d, l)), index_(0) { }
- explicit VectorIterator(Vector<const T> data) : data_(data), index_(0) { }
- T GetNext() { return data_[index_++]; }
- bool has_more() { return index_ < data_.length(); }
- private:
- Vector<const T> data_;
- int index_;
-};
-
-
-// The Oddball describes objects null, undefined, true, and false.
-class Oddball: public HeapObject {
- public:
- // [to_string]: Cached to_string computed at startup.
- DECL_ACCESSORS(to_string, String)
-
- // [to_number]: Cached to_number computed at startup.
- DECL_ACCESSORS(to_number, Object)
-
- inline byte kind();
- inline void set_kind(byte kind);
-
- // Casting.
- static inline Oddball* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_VERIFIER(Oddball)
-
- // Initialize the fields.
- MUST_USE_RESULT MaybeObject* Initialize(const char* to_string,
- Object* to_number,
- byte kind);
-
- // Layout description.
- static const int kToStringOffset = HeapObject::kHeaderSize;
- static const int kToNumberOffset = kToStringOffset + kPointerSize;
- static const int kKindOffset = kToNumberOffset + kPointerSize;
- static const int kSize = kKindOffset + kPointerSize;
-
- static const byte kFalse = 0;
- static const byte kTrue = 1;
- static const byte kNotBooleanMask = ~1;
- static const byte kTheHole = 2;
- static const byte kNull = 3;
- static const byte kArgumentMarker = 4;
- static const byte kUndefined = 5;
- static const byte kOther = 6;
-
- typedef FixedBodyDescriptor<kToStringOffset,
- kToNumberOffset + kPointerSize,
- kSize> BodyDescriptor;
-
- STATIC_CHECK(kKindOffset == Internals::kOddballKindOffset);
- STATIC_CHECK(kNull == Internals::kNullOddballKind);
- STATIC_CHECK(kUndefined == Internals::kUndefinedOddballKind);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball);
-};
-
-
-class JSGlobalPropertyCell: public HeapObject {
- public:
- // [value]: value of the global property.
- DECL_ACCESSORS(value, Object)
-
- // Casting.
- static inline JSGlobalPropertyCell* cast(Object* obj);
-
- static inline JSGlobalPropertyCell* FromValueAddress(Address value) {
- return cast(FromAddress(value - kValueOffset));
- }
-
- inline Address ValueAddress() {
- return address() + kValueOffset;
- }
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSGlobalPropertyCell)
- DECLARE_VERIFIER(JSGlobalPropertyCell)
-
- // Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<kValueOffset,
- kValueOffset + kPointerSize,
- kSize> BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
-};
-
-
-// The JSProxy describes EcmaScript Harmony proxies
-class JSProxy: public JSReceiver {
- public:
- // [handler]: The handler property.
- DECL_ACCESSORS(handler, Object)
-
- // [hash]: The hash code property (undefined if not initialized yet).
- DECL_ACCESSORS(hash, Object)
-
- // Casting.
- static inline JSProxy* cast(Object* obj);
-
- bool HasPropertyWithHandler(String* name);
- bool HasElementWithHandler(uint32_t index);
-
- MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
- Object* receiver,
- String* name);
- MUST_USE_RESULT MaybeObject* GetElementWithHandler(
- Object* receiver,
- uint32_t index);
-
- MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
- JSReceiver* receiver,
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithHandler(
- JSReceiver* receiver,
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode);
-
- // If the handler defines an accessor property with a setter, invoke it.
- // If it defines an accessor property without a setter, or a data property
- // that is read-only, throw. In all these cases set '*done' to true,
- // otherwise set it to false.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver,
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* done);
-
- MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
- String* name,
- DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElementWithHandler(
- uint32_t index,
- DeleteMode mode);
-
- MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
- JSReceiver* receiver,
- String* name);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
- JSReceiver* receiver,
- uint32_t index);
-
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
-
- // Turn this into an (empty) JSObject.
- void Fix();
-
- // Initializes the body after the handler slot.
- inline void InitializeBody(int object_size, Object* value);
-
- // Invoke a trap by name. If the trap does not exist on this's handler,
- // but derived_trap is non-NULL, invoke that instead. May cause GC.
- Handle<Object> CallTrap(const char* name,
- Handle<Object> derived_trap,
- int argc,
- Handle<Object> args[]);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSProxy)
- DECLARE_VERIFIER(JSProxy)
-
- // Layout description. We add padding so that a proxy has the same
- // size as a virgin JSObject. This is essential for becoming a JSObject
- // upon freeze.
- static const int kHandlerOffset = HeapObject::kHeaderSize;
- static const int kHashOffset = kHandlerOffset + kPointerSize;
- static const int kPaddingOffset = kHashOffset + kPointerSize;
- static const int kSize = JSObject::kHeaderSize;
- static const int kHeaderSize = kPaddingOffset;
- static const int kPaddingSize = kSize - kPaddingOffset;
-
- STATIC_CHECK(kPaddingSize >= 0);
-
- typedef FixedBodyDescriptor<kHandlerOffset,
- kPaddingOffset,
- kSize> BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
-};
-
-
-class JSFunctionProxy: public JSProxy {
- public:
- // [call_trap]: The call trap.
- DECL_ACCESSORS(call_trap, Object)
-
- // [construct_trap]: The construct trap.
- DECL_ACCESSORS(construct_trap, Object)
-
- // Casting.
- static inline JSFunctionProxy* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSFunctionProxy)
- DECLARE_VERIFIER(JSFunctionProxy)
-
- // Layout description.
- static const int kCallTrapOffset = JSProxy::kPaddingOffset;
- static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
- static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
- static const int kSize = JSFunction::kSize;
- static const int kPaddingSize = kSize - kPaddingOffset;
-
- STATIC_CHECK(kPaddingSize >= 0);
-
- typedef FixedBodyDescriptor<kHandlerOffset,
- kConstructTrapOffset + kPointerSize,
- kSize> BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunctionProxy);
-};
-
-
-// The JSSet describes EcmaScript Harmony sets
-class JSSet: public JSObject {
- public:
- // [set]: the backing hash set containing keys.
- DECL_ACCESSORS(table, Object)
-
- // Casting.
- static inline JSSet* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSSet)
- DECLARE_VERIFIER(JSSet)
-
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kSize = kTableOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
-};
-
-
-// The JSMap describes EcmaScript Harmony maps
-class JSMap: public JSObject {
- public:
- // [table]: the backing hash table mapping keys to values.
- DECL_ACCESSORS(table, Object)
-
- // Casting.
- static inline JSMap* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSMap)
- DECLARE_VERIFIER(JSMap)
-
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kSize = kTableOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
-};
-
-
-// The JSWeakMap describes EcmaScript Harmony weak maps
-class JSWeakMap: public JSObject {
- public:
- // [table]: the backing hash table mapping keys to values.
- DECL_ACCESSORS(table, Object)
-
- // [next]: linked list of encountered weak maps during GC.
- DECL_ACCESSORS(next, Object)
-
- // Casting.
- static inline JSWeakMap* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSWeakMap)
- DECLARE_VERIFIER(JSWeakMap)
-
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kNextOffset = kTableOffset + kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
-};
-
-
-// Foreign describes objects pointing from JavaScript to C structures.
-// Since they cannot contain references to JS HeapObjects they can be
-// placed in old_data_space.
-class Foreign: public HeapObject {
- public:
- // [address]: field containing the address.
- inline Address foreign_address();
- inline void set_foreign_address(Address value);
-
- // Casting.
- static inline Foreign* cast(Object* obj);
-
- // Dispatched behavior.
- inline void ForeignIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ForeignIterateBody();
-
- // Dispatched behavior.
- DECLARE_PRINTER(Foreign)
- DECLARE_VERIFIER(Foreign)
-
- // Layout description.
-
- static const int kForeignAddressOffset = HeapObject::kHeaderSize;
- static const int kSize = kForeignAddressOffset + kPointerSize;
-
- STATIC_CHECK(kForeignAddressOffset == Internals::kForeignAddressOffset);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
-};
-
-
-// The JSArray describes JavaScript Arrays
-// Such an array can be in one of two modes:
-// - fast, backing storage is a FixedArray and length <= elements.length();
-// Please note: push and pop can be used to grow and shrink the array.
-// - slow, backing storage is a HashTable with numbers as keys.
-class JSArray: public JSObject {
- public:
- // [length]: The length property.
- DECL_ACCESSORS(length, Object)
-
- // Overload the length setter to skip write barrier when the length
- // is set to a smi. This matches the set function on FixedArray.
- inline void set_length(Smi* length);
-
- MUST_USE_RESULT MaybeObject* JSArrayUpdateLengthFromIndex(uint32_t index,
- Object* value);
-
- // Initialize the array with the given capacity. The function may
- // fail due to out-of-memory situations, but only if the requested
- // capacity is non-zero.
- MUST_USE_RESULT MaybeObject* Initialize(int capacity, int length = 0);
-
- // Initializes the array to a certain length.
- inline bool AllowsSetElementsLength();
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
-
- // Set the content of the array to the content of storage.
- MUST_USE_RESULT inline MaybeObject* SetContent(FixedArrayBase* storage);
-
- // Casting.
- static inline JSArray* cast(Object* obj);
-
- // Uses handles. Ensures that the fixed array backing the JSArray has at
- // least the stated size.
- inline void EnsureSize(int minimum_size_of_backing_fixed_array);
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSArray)
- DECLARE_VERIFIER(JSArray)
-
- // Number of element slots to pre-allocate for an empty array.
- static const int kPreallocatedArrayElements = 4;
-
- // Layout description.
- static const int kLengthOffset = JSObject::kHeaderSize;
- static const int kSize = kLengthOffset + kPointerSize;
-
- private:
- // Expand the fixed array backing of a fast-case JSArray to at least
- // the requested size.
- void Expand(int minimum_size_of_backing_fixed_array);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
-};
-
-
-// JSRegExpResult is just a JSArray with a specific initial map.
-// This initial map adds in-object properties for "index" and "input"
-// properties, as assigned by RegExp.prototype.exec, which allows
-// faster creation of RegExp exec results.
-// This class just holds constants used when creating the result.
-// After creation the result must be treated as a JSArray in all regards.
-class JSRegExpResult: public JSArray {
- public:
- // Offsets of object fields.
- static const int kIndexOffset = JSArray::kSize;
- static const int kInputOffset = kIndexOffset + kPointerSize;
- static const int kSize = kInputOffset + kPointerSize;
- // Indices of in-object properties.
- static const int kIndexIndex = 0;
- static const int kInputIndex = 1;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult);
-};
-
-
-class AccessorInfo: public Struct {
- public:
- DECL_ACCESSORS(name, Object)
- DECL_ACCESSORS(flag, Smi)
- DECL_ACCESSORS(expected_receiver_type, Object)
-
- inline bool all_can_read();
- inline void set_all_can_read(bool value);
-
- inline bool all_can_write();
- inline void set_all_can_write(bool value);
-
- inline bool prohibits_overwriting();
- inline void set_prohibits_overwriting(bool value);
-
- inline PropertyAttributes property_attributes();
- inline void set_property_attributes(PropertyAttributes attributes);
-
- // Checks whether the given receiver is compatible with this accessor.
- inline bool IsCompatibleReceiver(Object* receiver);
-
- static inline AccessorInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_VERIFIER(AccessorInfo)
-
-
- static const int kNameOffset = HeapObject::kHeaderSize;
- static const int kFlagOffset = kNameOffset + kPointerSize;
- static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
- static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
-
- private:
- // Bit positions in flag.
- static const int kAllCanReadBit = 0;
- static const int kAllCanWriteBit = 1;
- static const int kProhibitsOverwritingBit = 2;
- class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
-};
-
-
-class DeclaredAccessorDescriptor: public Struct {
- public:
- // TODO(dcarney): Fill out this class.
- DECL_ACCESSORS(internal_field, Smi)
-
- static inline DeclaredAccessorDescriptor* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(DeclaredAccessorDescriptor)
- DECLARE_VERIFIER(DeclaredAccessorDescriptor)
-
- static const int kInternalFieldOffset = HeapObject::kHeaderSize;
- static const int kSize = kInternalFieldOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorDescriptor);
-};
-
-
-class DeclaredAccessorInfo: public AccessorInfo {
- public:
- DECL_ACCESSORS(descriptor, DeclaredAccessorDescriptor)
-
- static inline DeclaredAccessorInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(DeclaredAccessorInfo)
- DECLARE_VERIFIER(DeclaredAccessorInfo)
-
- static const int kDescriptorOffset = AccessorInfo::kSize;
- static const int kSize = kDescriptorOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorInfo);
-};
-
-
-// An accessor must have a getter, but can have no setter.
-//
-// When setting a property, V8 searches accessors in prototypes.
-// If an accessor was found and it does not have a setter,
-// the request is ignored.
-//
-// If the accessor in the prototype has the READ_ONLY property attribute, then
-// a new value is added to the local object when the property is set.
-// This shadows the accessor in the prototype.
-class ExecutableAccessorInfo: public AccessorInfo {
- public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
- DECL_ACCESSORS(data, Object)
-
- static inline ExecutableAccessorInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExecutableAccessorInfo)
- DECLARE_VERIFIER(ExecutableAccessorInfo)
-
- static const int kGetterOffset = AccessorInfo::kSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kDataOffset = kSetterOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo);
-};
-
-
-// Support for JavaScript accessors: A pair of a getter and a setter. Each
-// accessor can either be
-// * a pointer to a JavaScript function or proxy: a real accessor
-// * undefined: considered an accessor by the spec, too, strangely enough
-// * the hole: an accessor which has not been set
-// * a pointer to a map: a transition used to ensure map sharing
-class AccessorPair: public Struct {
- public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
-
- static inline AccessorPair* cast(Object* obj);
-
- MUST_USE_RESULT MaybeObject* Copy();
-
- Object* get(AccessorComponent component) {
- return component == ACCESSOR_GETTER ? getter() : setter();
- }
-
- void set(AccessorComponent component, Object* value) {
- if (component == ACCESSOR_GETTER) {
- set_getter(value);
- } else {
- set_setter(value);
- }
- }
-
- // Note: Returns undefined instead in case of a hole.
- Object* GetComponent(AccessorComponent component);
-
- // Set both components, skipping arguments which are a JavaScript null.
- void SetComponents(Object* getter, Object* setter) {
- if (!getter->IsNull()) set_getter(getter);
- if (!setter->IsNull()) set_setter(setter);
- }
-
- bool ContainsAccessor() {
- return IsJSAccessor(getter()) || IsJSAccessor(setter());
- }
-
- // Dispatched behavior.
- DECLARE_PRINTER(AccessorPair)
- DECLARE_VERIFIER(AccessorPair)
-
- static const int kGetterOffset = HeapObject::kHeaderSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kSize = kSetterOffset + kPointerSize;
-
- private:
- // Strangely enough, in addition to functions and harmony proxies, the spec
- // requires us to consider undefined as a kind of accessor, too:
- // var obj = {};
- // Object.defineProperty(obj, "foo", {get: undefined});
- // assertTrue("foo" in obj);
- bool IsJSAccessor(Object* obj) {
- return obj->IsSpecFunction() || obj->IsUndefined();
- }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorPair);
-};
-
-
-class AccessCheckInfo: public Struct {
- public:
- DECL_ACCESSORS(named_callback, Object)
- DECL_ACCESSORS(indexed_callback, Object)
- DECL_ACCESSORS(data, Object)
-
- static inline AccessCheckInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(AccessCheckInfo)
- DECLARE_VERIFIER(AccessCheckInfo)
-
- static const int kNamedCallbackOffset = HeapObject::kHeaderSize;
- static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
- static const int kDataOffset = kIndexedCallbackOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AccessCheckInfo);
-};
-
-
-class InterceptorInfo: public Struct {
- public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
- DECL_ACCESSORS(query, Object)
- DECL_ACCESSORS(deleter, Object)
- DECL_ACCESSORS(enumerator, Object)
- DECL_ACCESSORS(data, Object)
- DECL_ACCESSORS(is_fallback, Smi)
-
- static inline InterceptorInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(InterceptorInfo)
- DECLARE_VERIFIER(InterceptorInfo)
-
- static const int kGetterOffset = HeapObject::kHeaderSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kQueryOffset = kSetterOffset + kPointerSize;
- static const int kDeleterOffset = kQueryOffset + kPointerSize;
- static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
- static const int kDataOffset = kEnumeratorOffset + kPointerSize;
- static const int kFallbackOffset = kDataOffset + kPointerSize;
- static const int kSize = kFallbackOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
-};
-
-
-class CallHandlerInfo: public Struct {
- public:
- DECL_ACCESSORS(callback, Object)
- DECL_ACCESSORS(data, Object)
-
- static inline CallHandlerInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(CallHandlerInfo)
- DECLARE_VERIFIER(CallHandlerInfo)
-
- static const int kCallbackOffset = HeapObject::kHeaderSize;
- static const int kDataOffset = kCallbackOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
-};
-
-
-class TemplateInfo: public Struct {
- public:
- DECL_ACCESSORS(tag, Object)
- DECL_ACCESSORS(property_list, Object)
-
- DECLARE_VERIFIER(TemplateInfo)
-
- static const int kTagOffset = HeapObject::kHeaderSize;
- static const int kPropertyListOffset = kTagOffset + kPointerSize;
- static const int kHeaderSize = kPropertyListOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
-};
-
-
-class FunctionTemplateInfo: public TemplateInfo {
- public:
- DECL_ACCESSORS(serial_number, Object)
- DECL_ACCESSORS(call_code, Object)
- DECL_ACCESSORS(property_accessors, Object)
- DECL_ACCESSORS(prototype_template, Object)
- DECL_ACCESSORS(parent_template, Object)
- DECL_ACCESSORS(named_property_handler, Object)
- DECL_ACCESSORS(indexed_property_handler, Object)
- DECL_ACCESSORS(instance_template, Object)
- DECL_ACCESSORS(class_name, Object)
- DECL_ACCESSORS(signature, Object)
- DECL_ACCESSORS(instance_call_handler, Object)
- DECL_ACCESSORS(access_check_info, Object)
- DECL_ACCESSORS(flag, Smi)
-
- inline int length();
- inline void set_length(int value);
-
- // Following properties use flag bits.
- DECL_BOOLEAN_ACCESSORS(hidden_prototype)
- DECL_BOOLEAN_ACCESSORS(undetectable)
- // If the bit is set, object instances created by this function
- // requires access check.
- DECL_BOOLEAN_ACCESSORS(needs_access_check)
- DECL_BOOLEAN_ACCESSORS(read_only_prototype)
-
- static inline FunctionTemplateInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(FunctionTemplateInfo)
- DECLARE_VERIFIER(FunctionTemplateInfo)
-
- static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
- static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
- static const int kPropertyAccessorsOffset = kCallCodeOffset + kPointerSize;
- static const int kPrototypeTemplateOffset =
- kPropertyAccessorsOffset + kPointerSize;
- static const int kParentTemplateOffset =
- kPrototypeTemplateOffset + kPointerSize;
- static const int kNamedPropertyHandlerOffset =
- kParentTemplateOffset + kPointerSize;
- static const int kIndexedPropertyHandlerOffset =
- kNamedPropertyHandlerOffset + kPointerSize;
- static const int kInstanceTemplateOffset =
- kIndexedPropertyHandlerOffset + kPointerSize;
- static const int kClassNameOffset = kInstanceTemplateOffset + kPointerSize;
- static const int kSignatureOffset = kClassNameOffset + kPointerSize;
- static const int kInstanceCallHandlerOffset = kSignatureOffset + kPointerSize;
- static const int kAccessCheckInfoOffset =
- kInstanceCallHandlerOffset + kPointerSize;
- static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
- static const int kLengthOffset = kFlagOffset + kPointerSize;
- static const int kSize = kLengthOffset + kPointerSize;
-
- private:
- // Bit position in the flag, from least significant bit position.
- static const int kHiddenPrototypeBit = 0;
- static const int kUndetectableBit = 1;
- static const int kNeedsAccessCheckBit = 2;
- static const int kReadOnlyPrototypeBit = 3;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
-};
-
-
-class ObjectTemplateInfo: public TemplateInfo {
- public:
- DECL_ACCESSORS(constructor, Object)
- DECL_ACCESSORS(internal_field_count, Object)
- DECL_ACCESSORS(has_external_resource, Object)
- DECL_ACCESSORS(use_user_object_comparison, Object)
-
- static inline ObjectTemplateInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(ObjectTemplateInfo)
- DECLARE_VERIFIER(ObjectTemplateInfo)
-
- static const int kConstructorOffset = TemplateInfo::kHeaderSize;
- static const int kInternalFieldCountOffset =
- kConstructorOffset + kPointerSize;
- static const int kHasExternalResourceOffset =
- kInternalFieldCountOffset + kPointerSize;
- static const int kUseUserObjectComparisonOffset =
- kHasExternalResourceOffset + kPointerSize;
- static const int kSize = kUseUserObjectComparisonOffset + kPointerSize;
-};
-
-
-class SignatureInfo: public Struct {
- public:
- DECL_ACCESSORS(receiver, Object)
- DECL_ACCESSORS(args, Object)
-
- static inline SignatureInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(SignatureInfo)
- DECLARE_VERIFIER(SignatureInfo)
-
- static const int kReceiverOffset = Struct::kHeaderSize;
- static const int kArgsOffset = kReceiverOffset + kPointerSize;
- static const int kSize = kArgsOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SignatureInfo);
-};
-
-
-class TypeSwitchInfo: public Struct {
- public:
- DECL_ACCESSORS(types, Object)
-
- static inline TypeSwitchInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(TypeSwitchInfo)
- DECLARE_VERIFIER(TypeSwitchInfo)
-
- static const int kTypesOffset = Struct::kHeaderSize;
- static const int kSize = kTypesOffset + kPointerSize;
-};
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-// The DebugInfo class holds additional information for a function being
-// debugged.
-class DebugInfo: public Struct {
- public:
- // The shared function info for the source being debugged.
- DECL_ACCESSORS(shared, SharedFunctionInfo)
- // Code object for the original code.
- DECL_ACCESSORS(original_code, Code)
- // Code object for the patched code. This code object is the code object
- // currently active for the function.
- DECL_ACCESSORS(code, Code)
- // Fixed array holding status information for each active break point.
- DECL_ACCESSORS(break_points, FixedArray)
-
- // Check if there is a break point at a code position.
- bool HasBreakPoint(int code_position);
- // Get the break point info object for a code position.
- Object* GetBreakPointInfo(int code_position);
- // Clear a break point.
- static void ClearBreakPoint(Handle<DebugInfo> debug_info,
- int code_position,
- Handle<Object> break_point_object);
- // Set a break point.
- static void SetBreakPoint(Handle<DebugInfo> debug_info, int code_position,
- int source_position, int statement_position,
- Handle<Object> break_point_object);
- // Get the break point objects for a code position.
- Object* GetBreakPointObjects(int code_position);
- // Find the break point info holding this break point object.
- static Object* FindBreakPointInfo(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object);
- // Get the number of break points for this function.
- int GetBreakPointCount();
-
- static inline DebugInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(DebugInfo)
- DECLARE_VERIFIER(DebugInfo)
-
- static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
- static const int kOriginalCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
- static const int kPatchedCodeIndex = kOriginalCodeIndex + kPointerSize;
- static const int kActiveBreakPointsCountIndex =
- kPatchedCodeIndex + kPointerSize;
- static const int kBreakPointsStateIndex =
- kActiveBreakPointsCountIndex + kPointerSize;
- static const int kSize = kBreakPointsStateIndex + kPointerSize;
-
- private:
- static const int kNoBreakPointInfo = -1;
-
- // Lookup the index in the break_points array for a code position.
- int GetBreakPointInfoIndex(int code_position);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
-};
-
-
-// The BreakPointInfo class holds information for break points set in a
-// function. The DebugInfo object holds a BreakPointInfo object for each code
-// position with one or more break points.
-class BreakPointInfo: public Struct {
- public:
- // The position in the code for the break point.
- DECL_ACCESSORS(code_position, Smi)
- // The position in the source for the break position.
- DECL_ACCESSORS(source_position, Smi)
- // The position in the source for the last statement before this break
- // position.
- DECL_ACCESSORS(statement_position, Smi)
- // List of related JavaScript break points.
- DECL_ACCESSORS(break_point_objects, Object)
-
- // Removes a break point.
- static void ClearBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Set a break point.
- static void SetBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Check if break point info has this break point object.
- static bool HasBreakPointObject(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Get the number of break points for this code position.
- int GetBreakPointCount();
-
- static inline BreakPointInfo* cast(Object* obj);
-
- // Dispatched behavior.
- DECLARE_PRINTER(BreakPointInfo)
- DECLARE_VERIFIER(BreakPointInfo)
-
- static const int kCodePositionIndex = Struct::kHeaderSize;
- static const int kSourcePositionIndex = kCodePositionIndex + kPointerSize;
- static const int kStatementPositionIndex =
- kSourcePositionIndex + kPointerSize;
- static const int kBreakPointObjectsIndex =
- kStatementPositionIndex + kPointerSize;
- static const int kSize = kBreakPointObjectsIndex + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
-};
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-#undef DECL_BOOLEAN_ACCESSORS
-#undef DECL_ACCESSORS
-#undef DECLARE_VERIFIER
-
-#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
- V(kStringTable, "string_table", "(Internalized strings)") \
- V(kExternalStringsTable, "external_strings_table", "(External strings)") \
- V(kStrongRootList, "strong_root_list", "(Strong roots)") \
- V(kInternalizedString, "internalized_string", "(Internal string)") \
- V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
- V(kTop, "top", "(Isolate)") \
- V(kRelocatable, "relocatable", "(Relocatable)") \
- V(kDebug, "debug", "(Debugger)") \
- V(kCompilationCache, "compilationcache", "(Compilation cache)") \
- V(kHandleScope, "handlescope", "(Handle scope)") \
- V(kBuiltins, "builtins", "(Builtins)") \
- V(kGlobalHandles, "globalhandles", "(Global handles)") \
- V(kThreadManager, "threadmanager", "(Thread manager)") \
- V(kExtensions, "Extensions", "(Extensions)")
-
-class VisitorSynchronization : public AllStatic {
- public:
-#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
- enum SyncTag {
- VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_ENUM)
- kNumberOfSyncTags
- };
-#undef DECLARE_ENUM
-
- static const char* const kTags[kNumberOfSyncTags];
- static const char* const kTagNames[kNumberOfSyncTags];
-};
-
-// Abstract base class for visiting, and optionally modifying, the
-// pointers contained in Objects. Used in GC and serialization/deserialization.
-class ObjectVisitor BASE_EMBEDDED {
- public:
- virtual ~ObjectVisitor() {}
-
- // Visits a contiguous arrays of pointers in the half-open range
- // [start, end). Any or all of the values may be modified on return.
- virtual void VisitPointers(Object** start, Object** end) = 0;
-
- // To allow lazy clearing of inline caches the visitor has
- // a rich interface for iterating over Code objects..
-
- // Visits a code target in the instruction stream.
- virtual void VisitCodeTarget(RelocInfo* rinfo);
-
- // Visits a code entry in a JS function.
- virtual void VisitCodeEntry(Address entry_address);
-
- // Visits a global property cell reference in the instruction stream.
- virtual void VisitGlobalPropertyCell(RelocInfo* rinfo);
-
- // Visits a runtime entry in the instruction stream.
- virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
-
- // Visits the resource of an ASCII or two-byte string.
- virtual void VisitExternalAsciiString(
- v8::String::ExternalAsciiStringResource** resource) {}
- virtual void VisitExternalTwoByteString(
- v8::String::ExternalStringResource** resource) {}
-
- // Visits a debug call target in the instruction stream.
- virtual void VisitDebugTarget(RelocInfo* rinfo);
-
- // Visits the byte sequence in a function's prologue that contains information
- // about the code's age.
- virtual void VisitCodeAgeSequence(RelocInfo* rinfo);
-
- // Handy shorthand for visiting a single pointer.
- virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
-
- // Visit pointer embedded into a code object.
- virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
-
- // Visits a contiguous arrays of external references (references to the C++
- // heap) in the half-open range [start, end). Any or all of the values
- // may be modified on return.
- virtual void VisitExternalReferences(Address* start, Address* end) {}
-
- virtual void VisitExternalReference(RelocInfo* rinfo);
-
- inline void VisitExternalReference(Address* p) {
- VisitExternalReferences(p, p + 1);
- }
-
- // Visits a handle that has an embedder-assigned class ID.
- virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
-
- // Intended for serialization/deserialization checking: insert, or
- // check for the presence of, a tag at this position in the stream.
- // Also used for marking up GC roots in heap snapshots.
- virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
-};
-
-
-class StructBodyDescriptor : public
- FlexibleBodyDescriptor<HeapObject::kHeaderSize> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object) {
- return map->instance_size();
- }
-};
-
-
-// BooleanBit is a helper class for setting and getting a bit in an
-// integer or Smi.
-class BooleanBit : public AllStatic {
- public:
- static inline bool get(Smi* smi, int bit_position) {
- return get(smi->value(), bit_position);
- }
-
- static inline bool get(int value, int bit_position) {
- return (value & (1 << bit_position)) != 0;
- }
-
- static inline Smi* set(Smi* smi, int bit_position, bool v) {
- return Smi::FromInt(set(smi->value(), bit_position, v));
- }
-
- static inline int set(int value, int bit_position, bool v) {
- if (v) {
- value |= (1 << bit_position);
- } else {
- value &= ~(1 << bit_position);
- }
- return value;
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_OBJECTS_H_
diff --git a/src/3rdparty/v8/src/once.cc b/src/3rdparty/v8/src/once.cc
deleted file mode 100644
index 37fe369..0000000
--- a/src/3rdparty/v8/src/once.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "once.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <sched.h>
-#endif
-
-#include "atomicops.h"
-#include "checks.h"
-
-namespace v8 {
-namespace internal {
-
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
- AtomicWord state = Acquire_Load(once);
- // Fast path. The provided function was already executed.
- if (state == ONCE_STATE_DONE) {
- return;
- }
-
- // The function execution did not complete yet. The once object can be in one
- // of the two following states:
- // - UNINITIALIZED: We are the first thread calling this function.
- // - EXECUTING_FUNCTION: Another thread is already executing the function.
- //
- // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
- // atomically.
- state = Acquire_CompareAndSwap(
- once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION);
- if (state == ONCE_STATE_UNINITIALIZED) {
- // We are the first thread to call this function, so we have to call the
- // function.
- init_func(arg);
- Release_Store(once, ONCE_STATE_DONE);
- } else {
- // Another thread has already started executing the function. We need to
- // wait until it completes the initialization.
- while (state == ONCE_STATE_EXECUTING_FUNCTION) {
-#ifdef _WIN32
- ::Sleep(0);
-#else
- sched_yield();
-#endif
- state = Acquire_Load(once);
- }
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/once.h b/src/3rdparty/v8/src/once.h
deleted file mode 100644
index a44b8fa..0000000
--- a/src/3rdparty/v8/src/once.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// emulates google3/base/once.h
-//
-// This header is intended to be included only by v8's internal code. Users
-// should not use this directly.
-//
-// This is basically a portable version of pthread_once().
-//
-// This header declares:
-// * A type called OnceType.
-// * A macro V8_DECLARE_ONCE() which declares a (global) variable of type
-// OnceType.
-// * A function CallOnce(OnceType* once, void (*init_func)()).
-// This function, when invoked multiple times given the same OnceType object,
-// will invoke init_func on the first call only, and will make sure none of
-// the calls return before that first call to init_func has finished.
-//
-// Additionally, the following features are supported:
-// * A macro V8_ONCE_INIT which is expanded into the expression used to
-// initialize a OnceType. This is only useful when clients embed a OnceType
-// into a structure of their own and want to initialize it statically.
-// * The user can provide a parameter which CallOnce() forwards to the
-// user-provided function when it is called. Usage example:
-// CallOnce(&my_once, &MyFunctionExpectingIntArgument, 10);
-// * This implementation guarantees that OnceType is a POD (i.e. no static
-// initializer generated).
-//
-// This implements a way to perform lazy initialization. It's more efficient
-// than using mutexes as no lock is needed if initialization has already
-// happened.
-//
-// Example usage:
-// void Init();
-// V8_DECLARE_ONCE(once_init);
-//
-// // Calls Init() exactly once.
-// void InitOnce() {
-// CallOnce(&once_init, &Init);
-// }
-//
-// Note that if CallOnce() is called before main() has begun, it must
-// only be called by the thread that will eventually call main() -- that is,
-// the thread that performs dynamic initialization. In general this is a safe
-// assumption since people don't usually construct threads before main() starts,
-// but it is technically not guaranteed. Unfortunately, Win32 provides no way
-// whatsoever to statically-initialize its synchronization primitives, so our
-// only choice is to assume that dynamic initialization is single-threaded.
-
-#ifndef V8_ONCE_H_
-#define V8_ONCE_H_
-
-#include "atomicops.h"
-
-namespace v8 {
-namespace internal {
-
-typedef AtomicWord OnceType;
-
-#define V8_ONCE_INIT 0
-
-#define V8_DECLARE_ONCE(NAME) ::v8::internal::OnceType NAME
-
-enum {
- ONCE_STATE_UNINITIALIZED = 0,
- ONCE_STATE_EXECUTING_FUNCTION = 1,
- ONCE_STATE_DONE = 2
-};
-
-typedef void (*NoArgFunction)();
-typedef void (*PointerArgFunction)(void* arg);
-
-template <typename T>
-struct OneArgFunction {
- typedef void (*type)(T);
-};
-
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
-
-inline void CallOnce(OnceType* once, NoArgFunction init_func) {
- if (Acquire_Load(once) != ONCE_STATE_DONE) {
- CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
- }
-}
-
-
-template <typename Arg>
-inline void CallOnce(OnceType* once,
- typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
- if (Acquire_Load(once) != ONCE_STATE_DONE) {
- CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
- static_cast<void*>(arg));
- }
-}
-
-} } // namespace v8::internal
-
-#endif // V8_ONCE_H_
diff --git a/src/3rdparty/v8/src/optimizing-compiler-thread.cc b/src/3rdparty/v8/src/optimizing-compiler-thread.cc
deleted file mode 100644
index 39b45b1..0000000
--- a/src/3rdparty/v8/src/optimizing-compiler-thread.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// For Windows CE, Windows headers need to be included first as they define ASSERT
-#ifdef _WIN32_WCE
-# include "win32-headers.h"
-#endif
-
-#include "optimizing-compiler-thread.h"
-
-#include "v8.h"
-
-#include "hydrogen.h"
-#include "isolate.h"
-#include "v8threads.h"
-
-namespace v8 {
-namespace internal {
-
-
-void OptimizingCompilerThread::Run() {
-#ifdef DEBUG
- thread_id_ = ThreadId::Current().ToInteger();
-#endif
- Isolate::SetIsolateThreadLocals(isolate_, NULL);
-
- int64_t epoch = 0;
- if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
-
- while (true) {
- input_queue_semaphore_->Wait();
- Logger::TimerEventScope timer(
- isolate_, Logger::TimerEventScope::v8_recompile_parallel);
- if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
- if (FLAG_trace_parallel_recompilation) {
- time_spent_total_ = OS::Ticks() - epoch;
- }
- return;
- }
-
- int64_t compiling_start = 0;
- if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
-
- Heap::RelocationLock relocation_lock(isolate_->heap());
- OptimizingCompiler* optimizing_compiler = NULL;
- input_queue_.Dequeue(&optimizing_compiler);
- Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
-
- ASSERT(!optimizing_compiler->info()->closure()->IsOptimized());
-
- OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
- ASSERT(status != OptimizingCompiler::FAILED);
- // Prevent an unused-variable error in release mode.
- USE(status);
-
- output_queue_.Enqueue(optimizing_compiler);
- if (!FLAG_manual_parallel_recompilation) {
- isolate_->stack_guard()->RequestCodeReadyEvent();
- } else {
- // In manual mode, do not trigger a code ready event.
- // Instead, wait for the optimized functions to be installed manually.
- output_queue_semaphore_->Signal();
- }
-
- if (FLAG_trace_parallel_recompilation) {
- time_spent_compiling_ += OS::Ticks() - compiling_start;
- }
- }
-}
-
-
-void OptimizingCompilerThread::Stop() {
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- input_queue_semaphore_->Signal();
- stop_semaphore_->Wait();
-
- if (FLAG_trace_parallel_recompilation) {
- double compile_time = static_cast<double>(time_spent_compiling_);
- double total_time = static_cast<double>(time_spent_total_);
- double percentage = (compile_time * 100) / total_time;
- PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
- }
-}
-
-
-void OptimizingCompilerThread::InstallOptimizedFunctions() {
- HandleScope handle_scope(isolate_);
- int functions_installed = 0;
- while (!output_queue_.IsEmpty()) {
- if (FLAG_manual_parallel_recompilation) {
- output_queue_semaphore_->Wait();
- }
- OptimizingCompiler* compiler = NULL;
- output_queue_.Dequeue(&compiler);
- Compiler::InstallOptimizedCode(compiler);
- functions_installed++;
- }
- if (FLAG_trace_parallel_recompilation && functions_installed != 0) {
- PrintF(" ** Installed %d function(s).\n", functions_installed);
- }
-}
-
-
-Handle<SharedFunctionInfo>
- OptimizingCompilerThread::InstallNextOptimizedFunction() {
- ASSERT(FLAG_manual_parallel_recompilation);
- output_queue_semaphore_->Wait();
- OptimizingCompiler* compiler = NULL;
- output_queue_.Dequeue(&compiler);
- Handle<SharedFunctionInfo> shared = compiler->info()->shared_info();
- Compiler::InstallOptimizedCode(compiler);
- return shared;
-}
-
-
-void OptimizingCompilerThread::QueueForOptimization(
- OptimizingCompiler* optimizing_compiler) {
- ASSERT(IsQueueAvailable());
- Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
- input_queue_.Enqueue(optimizing_compiler);
- input_queue_semaphore_->Signal();
-}
-
-#ifdef DEBUG
-bool OptimizingCompilerThread::IsOptimizerThread() {
- if (!FLAG_parallel_recompilation) return false;
- return ThreadId::Current().ToInteger() == thread_id_;
-}
-#endif
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/optimizing-compiler-thread.h b/src/3rdparty/v8/src/optimizing-compiler-thread.h
deleted file mode 100644
index 7aad78c..0000000
--- a/src/3rdparty/v8/src/optimizing-compiler-thread.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_OPTIMIZING_COMPILER_THREAD_H_
-#define V8_OPTIMIZING_COMPILER_THREAD_H_
-
-#include "atomicops.h"
-#include "flags.h"
-#include "platform.h"
-#include "unbound-queue.h"
-
-namespace v8 {
-namespace internal {
-
-class HOptimizedGraphBuilder;
-class OptimizingCompiler;
-class SharedFunctionInfo;
-
-class OptimizingCompilerThread : public Thread {
- public:
- explicit OptimizingCompilerThread(Isolate *isolate) :
- Thread("OptimizingCompilerThread"),
-#ifdef DEBUG
- thread_id_(0),
-#endif
- isolate_(isolate),
- stop_semaphore_(OS::CreateSemaphore(0)),
- input_queue_semaphore_(OS::CreateSemaphore(0)),
- output_queue_semaphore_(OS::CreateSemaphore(0)),
- time_spent_compiling_(0),
- time_spent_total_(0) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
- NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
- }
-
- void Run();
- void Stop();
- void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
- void InstallOptimizedFunctions();
-
- // Wait for the next optimized function and install it.
- Handle<SharedFunctionInfo> InstallNextOptimizedFunction();
-
- inline bool IsQueueAvailable() {
- // We don't need a barrier since we have a data dependency right
- // after.
- Atomic32 current_length = NoBarrier_Load(&queue_length_);
-
- // This can be queried only from the execution thread.
- ASSERT(!IsOptimizerThread());
- // Since only the execution thread increments queue_length_ and
- // only one thread can run inside an Isolate at one time, a direct
- // doesn't introduce a race -- queue_length_ may decreased in
- // meantime, but not increased.
- return (current_length < FLAG_parallel_recompilation_queue_length);
- }
-
-#ifdef DEBUG
- bool IsOptimizerThread();
-#endif
-
- ~OptimizingCompilerThread() {
- delete output_queue_semaphore_; // Only used for manual mode.
- delete input_queue_semaphore_;
- delete stop_semaphore_;
- }
-
- private:
-#ifdef DEBUG
- int thread_id_;
-#endif
-
- Isolate* isolate_;
- Semaphore* stop_semaphore_;
- Semaphore* input_queue_semaphore_;
- Semaphore* output_queue_semaphore_;
- UnboundQueue<OptimizingCompiler*> input_queue_;
- UnboundQueue<OptimizingCompiler*> output_queue_;
- volatile AtomicWord stop_thread_;
- volatile Atomic32 queue_length_;
- int64_t time_spent_compiling_;
- int64_t time_spent_total_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_OPTIMIZING_COMPILER_THREAD_H_
diff --git a/src/3rdparty/v8/src/parser.cc b/src/3rdparty/v8/src/parser.cc
deleted file mode 100644
index b93cf43..0000000
--- a/src/3rdparty/v8/src/parser.cc
+++ /dev/null
@@ -1,5980 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "ast.h"
-#include "bootstrapper.h"
-#include "char-predicates-inl.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "func-name-inferrer.h"
-#include "messages.h"
-#include "parser.h"
-#include "platform.h"
-#include "preparser.h"
-#include "runtime.h"
-#include "scanner-character-streams.h"
-#include "scopeinfo.h"
-#include "string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-// PositionStack is used for on-stack allocation of token positions for
-// new expressions. Please look at ParseNewExpression.
-
-class PositionStack {
- public:
- explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
- ~PositionStack() {
- ASSERT(!*ok_ || is_empty());
- USE(ok_);
- }
-
- class Element {
- public:
- Element(PositionStack* stack, int value) {
- previous_ = stack->top();
- value_ = value;
- stack->set_top(this);
- }
-
- private:
- Element* previous() { return previous_; }
- int value() { return value_; }
- friend class PositionStack;
- Element* previous_;
- int value_;
- };
-
- bool is_empty() { return top_ == NULL; }
- int pop() {
- ASSERT(!is_empty());
- int result = top_->value();
- top_ = top_->previous();
- return result;
- }
-
- private:
- Element* top() { return top_; }
- void set_top(Element* value) { top_ = value; }
- Element* top_;
- bool* ok_;
-};
-
-
-RegExpBuilder::RegExpBuilder(Zone* zone)
- : zone_(zone),
- pending_empty_(false),
- characters_(NULL),
- terms_(),
- alternatives_()
-#ifdef DEBUG
- , last_added_(ADD_NONE)
-#endif
- {}
-
-
-void RegExpBuilder::FlushCharacters() {
- pending_empty_ = false;
- if (characters_ != NULL) {
- RegExpTree* atom = new(zone()) RegExpAtom(characters_->ToConstVector());
- characters_ = NULL;
- text_.Add(atom, zone());
- LAST(ADD_ATOM);
- }
-}
-
-
-void RegExpBuilder::FlushText() {
- FlushCharacters();
- int num_text = text_.length();
- if (num_text == 0) {
- return;
- } else if (num_text == 1) {
- terms_.Add(text_.last(), zone());
- } else {
- RegExpText* text = new(zone()) RegExpText(zone());
- for (int i = 0; i < num_text; i++)
- text_.Get(i)->AppendToText(text, zone());
- terms_.Add(text, zone());
- }
- text_.Clear();
-}
-
-
-void RegExpBuilder::AddCharacter(uc16 c) {
- pending_empty_ = false;
- if (characters_ == NULL) {
- characters_ = new(zone()) ZoneList<uc16>(4, zone());
- }
- characters_->Add(c, zone());
- LAST(ADD_CHAR);
-}
-
-
-void RegExpBuilder::AddEmpty() {
- pending_empty_ = true;
-}
-
-
-void RegExpBuilder::AddAtom(RegExpTree* term) {
- if (term->IsEmpty()) {
- AddEmpty();
- return;
- }
- if (term->IsTextElement()) {
- FlushCharacters();
- text_.Add(term, zone());
- } else {
- FlushText();
- terms_.Add(term, zone());
- }
- LAST(ADD_ATOM);
-}
-
-
-void RegExpBuilder::AddAssertion(RegExpTree* assert) {
- FlushText();
- terms_.Add(assert, zone());
- LAST(ADD_ASSERT);
-}
-
-
-void RegExpBuilder::NewAlternative() {
- FlushTerms();
-}
-
-
-void RegExpBuilder::FlushTerms() {
- FlushText();
- int num_terms = terms_.length();
- RegExpTree* alternative;
- if (num_terms == 0) {
- alternative = RegExpEmpty::GetInstance();
- } else if (num_terms == 1) {
- alternative = terms_.last();
- } else {
- alternative = new(zone()) RegExpAlternative(terms_.GetList(zone()));
- }
- alternatives_.Add(alternative, zone());
- terms_.Clear();
- LAST(ADD_NONE);
-}
-
-
-RegExpTree* RegExpBuilder::ToRegExp() {
- FlushTerms();
- int num_alternatives = alternatives_.length();
- if (num_alternatives == 0) {
- return RegExpEmpty::GetInstance();
- }
- if (num_alternatives == 1) {
- return alternatives_.last();
- }
- return new(zone()) RegExpDisjunction(alternatives_.GetList(zone()));
-}
-
-
-void RegExpBuilder::AddQuantifierToAtom(int min,
- int max,
- RegExpQuantifier::Type type) {
- if (pending_empty_) {
- pending_empty_ = false;
- return;
- }
- RegExpTree* atom;
- if (characters_ != NULL) {
- ASSERT(last_added_ == ADD_CHAR);
- // Last atom was character.
- Vector<const uc16> char_vector = characters_->ToConstVector();
- int num_chars = char_vector.length();
- if (num_chars > 1) {
- Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
- text_.Add(new(zone()) RegExpAtom(prefix), zone());
- char_vector = char_vector.SubVector(num_chars - 1, num_chars);
- }
- characters_ = NULL;
- atom = new(zone()) RegExpAtom(char_vector);
- FlushText();
- } else if (text_.length() > 0) {
- ASSERT(last_added_ == ADD_ATOM);
- atom = text_.RemoveLast();
- FlushText();
- } else if (terms_.length() > 0) {
- ASSERT(last_added_ == ADD_ATOM);
- atom = terms_.RemoveLast();
- if (atom->max_match() == 0) {
- // Guaranteed to only match an empty string.
- LAST(ADD_TERM);
- if (min == 0) {
- return;
- }
- terms_.Add(atom, zone());
- return;
- }
- } else {
- // Only call immediately after adding an atom or character!
- UNREACHABLE();
- return;
- }
- terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom), zone());
- LAST(ADD_TERM);
-}
-
-
-Handle<String> Parser::LookupSymbol(int symbol_id) {
- // Length of symbol cache is the number of identified symbols.
- // If we are larger than that, or negative, it's not a cached symbol.
- // This might also happen if there is no preparser symbol data, even
- // if there is some preparser data.
- if (static_cast<unsigned>(symbol_id)
- >= static_cast<unsigned>(symbol_cache_.length())) {
- if (scanner().is_literal_ascii()) {
- return isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
- } else {
- return isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
- }
- }
- return LookupCachedSymbol(symbol_id);
-}
-
-
-Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
- // Make sure the cache is large enough to hold the symbol identifier.
- if (symbol_cache_.length() <= symbol_id) {
- // Increase length to index + 1.
- symbol_cache_.AddBlock(Handle<String>::null(),
- symbol_id + 1 - symbol_cache_.length(), zone());
- }
- Handle<String> result = symbol_cache_.at(symbol_id);
- if (result.is_null()) {
- if (scanner().is_literal_ascii()) {
- result = isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
- } else {
- result = isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
- }
- symbol_cache_.at(symbol_id) = result;
- return result;
- }
- isolate()->counters()->total_preparse_symbols_skipped()->Increment();
- return result;
-}
-
-
-FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
- // The current pre-data entry must be a FunctionEntry with the given
- // start position.
- if ((function_index_ + FunctionEntry::kSize <= store_.length())
- && (static_cast<int>(store_[function_index_]) == start)) {
- int index = function_index_;
- function_index_ += FunctionEntry::kSize;
- return FunctionEntry(store_.SubVector(index,
- index + FunctionEntry::kSize));
- }
- return FunctionEntry();
-}
-
-
-int ScriptDataImpl::GetSymbolIdentifier() {
- return ReadNumber(&symbol_data_);
-}
-
-
-bool ScriptDataImpl::SanityCheck() {
- // Check that the header data is valid and doesn't specify
- // point to positions outside the store.
- if (store_.length() < PreparseDataConstants::kHeaderSize) return false;
- if (magic() != PreparseDataConstants::kMagicNumber) return false;
- if (version() != PreparseDataConstants::kCurrentVersion) return false;
- if (has_error()) {
- // Extra sane sanity check for error message encoding.
- if (store_.length() <= PreparseDataConstants::kHeaderSize
- + PreparseDataConstants::kMessageTextPos) {
- return false;
- }
- if (Read(PreparseDataConstants::kMessageStartPos) >
- Read(PreparseDataConstants::kMessageEndPos)) {
- return false;
- }
- unsigned arg_count = Read(PreparseDataConstants::kMessageArgCountPos);
- int pos = PreparseDataConstants::kMessageTextPos;
- for (unsigned int i = 0; i <= arg_count; i++) {
- if (store_.length() <= PreparseDataConstants::kHeaderSize + pos) {
- return false;
- }
- int length = static_cast<int>(Read(pos));
- if (length < 0) return false;
- pos += 1 + length;
- }
- if (store_.length() < PreparseDataConstants::kHeaderSize + pos) {
- return false;
- }
- return true;
- }
- // Check that the space allocated for function entries is sane.
- int functions_size =
- static_cast<int>(store_[PreparseDataConstants::kFunctionsSizeOffset]);
- if (functions_size < 0) return false;
- if (functions_size % FunctionEntry::kSize != 0) return false;
- // Check that the count of symbols is non-negative.
- int symbol_count =
- static_cast<int>(store_[PreparseDataConstants::kSymbolCountOffset]);
- if (symbol_count < 0) return false;
- // Check that the total size has room for header and function entries.
- int minimum_size =
- PreparseDataConstants::kHeaderSize + functions_size;
- if (store_.length() < minimum_size) return false;
- return true;
-}
-
-
-
-const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
- int length = start[0];
- char* result = NewArray<char>(length + 1);
- for (int i = 0; i < length; i++) {
- result[i] = start[i + 1];
- }
- result[length] = '\0';
- if (chars != NULL) *chars = length;
- return result;
-}
-
-Scanner::Location ScriptDataImpl::MessageLocation() {
- int beg_pos = Read(PreparseDataConstants::kMessageStartPos);
- int end_pos = Read(PreparseDataConstants::kMessageEndPos);
- return Scanner::Location(beg_pos, end_pos);
-}
-
-
-const char* ScriptDataImpl::BuildMessage() {
- unsigned* start = ReadAddress(PreparseDataConstants::kMessageTextPos);
- return ReadString(start, NULL);
-}
-
-
-Vector<const char*> ScriptDataImpl::BuildArgs() {
- int arg_count = Read(PreparseDataConstants::kMessageArgCountPos);
- const char** array = NewArray<const char*>(arg_count);
- // Position after text found by skipping past length field and
- // length field content words.
- int pos = PreparseDataConstants::kMessageTextPos + 1
- + Read(PreparseDataConstants::kMessageTextPos);
- for (int i = 0; i < arg_count; i++) {
- int count = 0;
- array[i] = ReadString(ReadAddress(pos), &count);
- pos += count + 1;
- }
- return Vector<const char*>(array, arg_count);
-}
-
-
-unsigned ScriptDataImpl::Read(int position) {
- return store_[PreparseDataConstants::kHeaderSize + position];
-}
-
-
-unsigned* ScriptDataImpl::ReadAddress(int position) {
- return &store_[PreparseDataConstants::kHeaderSize + position];
-}
-
-
-Scope* Parser::NewScope(Scope* parent, ScopeType type) {
- Scope* result = new(zone()) Scope(parent, type, zone());
- result->Initialize();
- return result;
-}
-
-
-// ----------------------------------------------------------------------------
-// Target is a support class to facilitate manipulation of the
-// Parser's target_stack_ (the stack of potential 'break' and
-// 'continue' statement targets). Upon construction, a new target is
-// added; it is removed upon destruction.
-
-class Target BASE_EMBEDDED {
- public:
- Target(Target** variable, AstNode* node)
- : variable_(variable), node_(node), previous_(*variable) {
- *variable = this;
- }
-
- ~Target() {
- *variable_ = previous_;
- }
-
- Target* previous() { return previous_; }
- AstNode* node() { return node_; }
-
- private:
- Target** variable_;
- AstNode* node_;
- Target* previous_;
-};
-
-
-class TargetScope BASE_EMBEDDED {
- public:
- explicit TargetScope(Target** variable)
- : variable_(variable), previous_(*variable) {
- *variable = NULL;
- }
-
- ~TargetScope() {
- *variable_ = previous_;
- }
-
- private:
- Target** variable_;
- Target* previous_;
-};
-
-
-// ----------------------------------------------------------------------------
-// FunctionState and BlockState together implement the parser's scope stack.
-// The parser's current scope is in top_scope_. The BlockState and
-// FunctionState constructors push on the scope stack and the destructors
-// pop. They are also used to hold the parser's per-function and per-block
-// state.
-
-class Parser::BlockState BASE_EMBEDDED {
- public:
- BlockState(Parser* parser, Scope* scope)
- : parser_(parser),
- outer_scope_(parser->top_scope_) {
- parser->top_scope_ = scope;
- }
-
- ~BlockState() { parser_->top_scope_ = outer_scope_; }
-
- private:
- Parser* parser_;
- Scope* outer_scope_;
-};
-
-
-Parser::FunctionState::FunctionState(Parser* parser,
- Scope* scope,
- Isolate* isolate)
- : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
- next_handler_index_(0),
- expected_property_count_(0),
- only_simple_this_property_assignments_(false),
- this_property_assignments_(isolate->factory()->empty_fixed_array()),
- parser_(parser),
- outer_function_state_(parser->current_function_state_),
- outer_scope_(parser->top_scope_),
- saved_ast_node_id_(isolate->ast_node_id()),
- factory_(isolate, parser->zone()) {
- parser->top_scope_ = scope;
- parser->current_function_state_ = this;
- isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
-}
-
-
-Parser::FunctionState::~FunctionState() {
- parser_->top_scope_ = outer_scope_;
- parser_->current_function_state_ = outer_function_state_;
- if (outer_function_state_ != NULL) {
- parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// The CHECK_OK macro is a convenient macro to enforce error
-// handling for functions that may fail (by returning !*ok).
-//
-// CAUTION: This macro appends extra statements after a call,
-// thus it must never be used where only a single statement
-// is correct (e.g. an if statement branch w/o braces)!
-
-#define CHECK_OK ok); \
- if (!*ok) return NULL; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-#define CHECK_FAILED /**/); \
- if (failed_) return NULL; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-// ----------------------------------------------------------------------------
-// Implementation of Parser
-
-Parser::Parser(CompilationInfo* info,
- int parser_flags,
- v8::Extension* extension,
- ScriptDataImpl* pre_data)
- : isolate_(info->isolate()),
- symbol_cache_(pre_data ? pre_data->symbol_count() : 0, info->zone()),
- script_(info->script()),
- scanner_(isolate_->unicode_cache()),
- reusable_preparser_(NULL),
- top_scope_(NULL),
- current_function_state_(NULL),
- target_stack_(NULL),
- extension_(extension),
- pre_data_(pre_data),
- fni_(NULL),
- allow_natives_syntax_((parser_flags & kAllowNativesSyntax) != 0),
- allow_lazy_((parser_flags & kAllowLazy) != 0),
- allow_modules_((parser_flags & kAllowModules) != 0),
- stack_overflow_(false),
- parenthesized_function_(false),
- zone_(info->zone()),
- info_(info) {
- ASSERT(!script_.is_null());
- isolate_->set_ast_node_id(0);
- if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
- scanner().SetHarmonyScoping(true);
- }
- if ((parser_flags & kAllowModules) != 0) {
- scanner().SetHarmonyModules(true);
- }
-}
-
-
-FunctionLiteral* Parser::ParseProgram() {
- ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
- HistogramTimerScope timer(isolate()->counters()->parse());
- Handle<String> source(String::cast(script_->source()));
- isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
- fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
-
- // Initialize parser state.
- source->TryFlatten();
- FunctionLiteral* result;
- if (source->IsExternalTwoByteString()) {
- // Notice that the stream is destroyed at the end of the branch block.
- // The last line of the blocks can't be moved outside, even though they're
- // identical calls.
- ExternalTwoByteStringUtf16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source), 0, source->length());
- scanner_.Initialize(&stream);
- result = DoParseProgram(info(), source, &zone_scope);
- } else {
- GenericStringUtf16CharacterStream stream(source, 0, source->length());
- scanner_.Initialize(&stream);
- result = DoParseProgram(info(), source, &zone_scope);
- }
-
- if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
- if (info()->is_eval()) {
- PrintF("[parsing eval");
- } else if (info()->script()->name()->IsString()) {
- String* name = String::cast(info()->script()->name());
- SmartArrayPointer<char> name_chars = name->ToCString();
- PrintF("[parsing script: %s", *name_chars);
- } else {
- PrintF("[parsing script");
- }
- PrintF(" - took %0.3f ms]\n", ms);
- }
- return result;
-}
-
-
-FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
- Handle<String> source,
- ZoneScope* zone_scope) {
- ASSERT(top_scope_ == NULL);
- ASSERT(target_stack_ == NULL);
- if (pre_data_ != NULL) pre_data_->Initialize();
-
- Handle<String> no_name = isolate()->factory()->empty_string();
-
- FunctionLiteral* result = NULL;
- { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
- info->SetGlobalScope(scope);
- if (!info->context().is_null()) {
- scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
- }
- if (info->is_eval()) {
- if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
- scope = NewScope(scope, EVAL_SCOPE);
- }
- } else if (info->is_global()) {
- scope = NewScope(scope, GLOBAL_SCOPE);
- }
- scope->set_start_position(0);
- scope->set_end_position(source->length());
-
- // Compute the parsing mode.
- Mode mode = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives_syntax_ || extension_ != NULL || scope->is_eval_scope()) {
- mode = PARSE_EAGERLY;
- }
- ParsingModeScope parsing_mode(this, mode);
-
- FunctionState function_state(this, scope, isolate()); // Enters 'scope'.
- top_scope_->SetLanguageMode(info->language_mode());
- if (info->is_qml_mode()) {
- scope->EnableQmlModeFlag();
- }
- ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
- bool ok = true;
- int beg_loc = scanner().location().beg_pos;
- ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
- if (ok && !top_scope_->is_classic_mode()) {
- CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
- }
-
- if (ok && is_extended_mode()) {
- CheckConflictingVarDeclarations(top_scope_, &ok);
- }
-
- if (ok) {
- result = factory()->NewFunctionLiteral(
- no_name,
- top_scope_,
- body,
- function_state.materialized_literal_count(),
- function_state.expected_property_count(),
- function_state.handler_count(),
- function_state.only_simple_this_property_assignments(),
- function_state.this_property_assignments(),
- 0,
- FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::ANONYMOUS_EXPRESSION,
- FunctionLiteral::kGlobalOrEval,
- FunctionLiteral::kNotParenthesized);
- result->set_ast_properties(factory()->visitor()->ast_properties());
- } else if (stack_overflow_) {
- isolate()->StackOverflow();
- }
- }
-
- // Make sure the target stack is empty.
- ASSERT(target_stack_ == NULL);
-
- // If there was a syntax error we have to get rid of the AST
- // and it is not safe to do so before the scope has been deleted.
- if (result == NULL) zone_scope->DeleteOnExit();
- return result;
-}
-
-
-FunctionLiteral* Parser::ParseLazy() {
- ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
- HistogramTimerScope timer(isolate()->counters()->parse_lazy());
- Handle<String> source(String::cast(script_->source()));
- isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
- Handle<SharedFunctionInfo> shared_info = info()->shared_info();
-
- // Initialize parser state.
- source->TryFlatten();
- FunctionLiteral* result;
- if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUtf16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source),
- shared_info->start_position(),
- shared_info->end_position());
- result = ParseLazy(&stream, &zone_scope);
- } else {
- GenericStringUtf16CharacterStream stream(source,
- shared_info->start_position(),
- shared_info->end_position());
- result = ParseLazy(&stream, &zone_scope);
- }
-
- if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
- SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
- PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
- }
- return result;
-}
-
-
-FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
- ZoneScope* zone_scope) {
- Handle<SharedFunctionInfo> shared_info = info()->shared_info();
- scanner_.Initialize(source);
- ASSERT(top_scope_ == NULL);
- ASSERT(target_stack_ == NULL);
-
- Handle<String> name(String::cast(shared_info->name()));
- fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
- fni_->PushEnclosingName(name);
-
- ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
-
- // Place holder for the result.
- FunctionLiteral* result = NULL;
-
- {
- // Parse the function literal.
- Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
- info()->SetGlobalScope(scope);
- if (!info()->closure().is_null()) {
- scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
- zone());
- }
- FunctionState function_state(this, scope, isolate());
- ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
- ASSERT(scope->language_mode() != EXTENDED_MODE ||
- info()->is_extended_mode());
- ASSERT(info()->language_mode() == shared_info->language_mode());
- scope->SetLanguageMode(shared_info->language_mode());
- if (shared_info->qml_mode()) {
- top_scope_->EnableQmlModeFlag();
- }
- FunctionLiteral::Type type = shared_info->is_expression()
- ? (shared_info->is_anonymous()
- ? FunctionLiteral::ANONYMOUS_EXPRESSION
- : FunctionLiteral::NAMED_EXPRESSION)
- : FunctionLiteral::DECLARATION;
- bool ok = true;
- result = ParseFunctionLiteral(name,
- false, // Strict mode name already checked.
- RelocInfo::kNoPosition,
- type,
- &ok);
- // Make sure the results agree.
- ASSERT(ok == (result != NULL));
- }
-
- // Make sure the target stack is empty.
- ASSERT(target_stack_ == NULL);
-
- // If there was a stack overflow we have to get rid of AST and it is
- // not safe to do before scope has been deleted.
- if (result == NULL) {
- zone_scope->DeleteOnExit();
- if (stack_overflow_) isolate()->StackOverflow();
- } else {
- Handle<String> inferred_name(shared_info->inferred_name());
- result->set_inferred_name(inferred_name);
- }
- return result;
-}
-
-
-Handle<String> Parser::GetSymbol(bool* ok) {
- int symbol_id = -1;
- if (pre_data() != NULL) {
- symbol_id = pre_data()->GetSymbolIdentifier();
- }
- return LookupSymbol(symbol_id);
-}
-
-
-void Parser::ReportMessage(const char* type, Vector<const char*> args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, type, args);
-}
-
-
-void Parser::ReportMessage(const char* type, Vector<Handle<String> > args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, type, args);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* type,
- Vector<const char*> args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
- elements->set(i, *arg_string);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(type, array);
- isolate()->Throw(*result, &location);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* type,
- Vector<Handle<String> > args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- elements->set(i, *args[i]);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(type, array);
- isolate()->Throw(*result, &location);
-}
-
-
-// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
-// this.x = ...;, where x is a named property. It also determines whether a
-// function contains only assignments of this type.
-class ThisNamedPropertyAssignmentFinder {
- public:
- ThisNamedPropertyAssignmentFinder(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- only_simple_this_property_assignments_(true),
- names_(0, zone),
- assigned_arguments_(0, zone),
- assigned_constants_(0, zone),
- zone_(zone) {
- }
-
- static Assignment* AsAssignment(Statement* stat) {
- if (stat == NULL) return NULL;
- ExpressionStatement* exp_stat = stat->AsExpressionStatement();
- if (exp_stat == NULL) return NULL;
- return exp_stat->expression()->AsAssignment();
- }
-
- void Update(Scope* scope, Statement* stat) {
- // Bail out if function already has property assignment that are
- // not simple this property assignments.
- if (!only_simple_this_property_assignments_) {
- return;
- }
-
- // Check whether this statement is of the form this.x = ...;
- Assignment* assignment = AsAssignment(stat);
- if (IsThisPropertyAssignment(assignment)) {
- HandleThisPropertyAssignment(scope, assignment);
- } else {
- only_simple_this_property_assignments_ = false;
- }
- }
-
- // Returns whether only statements of the form this.x = y; where y is either a
- // constant or a function argument was encountered.
- bool only_simple_this_property_assignments() {
- return only_simple_this_property_assignments_;
- }
-
- // Returns a fixed array containing three elements for each assignment of the
- // form this.x = y;
- Handle<FixedArray> GetThisPropertyAssignments() {
- if (names_.is_empty()) {
- return isolate_->factory()->empty_fixed_array();
- }
- ASSERT_EQ(names_.length(), assigned_arguments_.length());
- ASSERT_EQ(names_.length(), assigned_constants_.length());
- Handle<FixedArray> assignments =
- isolate_->factory()->NewFixedArray(names_.length() * 3);
- for (int i = 0; i < names_.length(); ++i) {
- assignments->set(i * 3, *names_[i]);
- assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i]));
- assignments->set(i * 3 + 2, *assigned_constants_[i]);
- }
- return assignments;
- }
-
- private:
- bool IsThisPropertyAssignment(Assignment* assignment) {
- if (assignment != NULL) {
- Property* property = assignment->target()->AsProperty();
- return assignment->op() == Token::ASSIGN
- && property != NULL
- && property->obj()->AsVariableProxy() != NULL
- && property->obj()->AsVariableProxy()->is_this();
- }
- return false;
- }
-
- void HandleThisPropertyAssignment(Scope* scope, Assignment* assignment) {
- // Check that the property assigned to is a named property, which is not
- // __proto__.
- Property* property = assignment->target()->AsProperty();
- ASSERT(property != NULL);
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsString() &&
- !String::cast(*(literal->handle()))->Equals(
- isolate_->heap()->proto_string()) &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
- Handle<String> key = Handle<String>::cast(literal->handle());
-
- // Check whether the value assigned is either a constant or matches the
- // name of one of the arguments to the function.
- if (assignment->value()->AsLiteral() != NULL) {
- // Constant assigned.
- Literal* literal = assignment->value()->AsLiteral();
- AssignmentFromConstant(key, literal->handle());
- return;
- } else if (assignment->value()->AsVariableProxy() != NULL) {
- // Variable assigned.
- Handle<String> name =
- assignment->value()->AsVariableProxy()->name();
- // Check whether the variable assigned matches an argument name.
- for (int i = 0; i < scope->num_parameters(); i++) {
- if (*scope->parameter(i)->name() == *name) {
- // Assigned from function argument.
- AssignmentFromParameter(key, i);
- return;
- }
- }
- }
- }
- // It is not a simple "this.x = value;" assignment with a constant
- // or parameter value.
- AssignmentFromSomethingElse();
- }
-
-
-
-
- // We will potentially reorder the property assignments, so they must be
- // simple enough that the ordering does not matter.
- void AssignmentFromParameter(Handle<String> name, int index) {
- EnsureInitialized();
- for (int i = 0; i < names_.length(); ++i) {
- if (name->Equals(*names_[i])) {
- assigned_arguments_[i] = index;
- assigned_constants_[i] = isolate_->factory()->undefined_value();
- return;
- }
- }
- names_.Add(name, zone());
- assigned_arguments_.Add(index, zone());
- assigned_constants_.Add(isolate_->factory()->undefined_value(), zone());
- }
-
- void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
- EnsureInitialized();
- for (int i = 0; i < names_.length(); ++i) {
- if (name->Equals(*names_[i])) {
- assigned_arguments_[i] = -1;
- assigned_constants_[i] = value;
- return;
- }
- }
- names_.Add(name, zone());
- assigned_arguments_.Add(-1, zone());
- assigned_constants_.Add(value, zone());
- }
-
- void AssignmentFromSomethingElse() {
- // The this assignment is not a simple one.
- only_simple_this_property_assignments_ = false;
- }
-
- void EnsureInitialized() {
- if (names_.capacity() == 0) {
- ASSERT(assigned_arguments_.capacity() == 0);
- ASSERT(assigned_constants_.capacity() == 0);
- names_.Initialize(4, zone());
- assigned_arguments_.Initialize(4, zone());
- assigned_constants_.Initialize(4, zone());
- }
- }
-
- Zone* zone() const { return zone_; }
-
- Isolate* isolate_;
- bool only_simple_this_property_assignments_;
- ZoneStringList names_;
- ZoneList<int> assigned_arguments_;
- ZoneObjectList assigned_constants_;
- Zone* zone_;
-};
-
-
-void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
- int end_token,
- bool is_eval,
- bool is_global,
- bool* ok) {
- // SourceElements ::
- // (ModuleElement)* <end_token>
-
- // Allocate a target stack to use for this set of source
- // elements. This way, all scripts and functions get their own
- // target stack thus avoiding illegal breaks and continues across
- // functions.
- TargetScope scope(&this->target_stack_);
-
- ASSERT(processor != NULL);
- ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate(),
- zone());
- bool directive_prologue = true; // Parsing directive prologue.
-
- while (peek() != end_token) {
- if (directive_prologue && peek() != Token::STRING) {
- directive_prologue = false;
- }
-
- Scanner::Location token_loc = scanner().peek_location();
- Statement* stat;
- if (is_global && !is_eval) {
- stat = ParseModuleElement(NULL, CHECK_OK);
- } else {
- stat = ParseBlockElement(NULL, CHECK_OK);
- }
- if (stat == NULL || stat->IsEmpty()) {
- directive_prologue = false; // End of directive prologue.
- continue;
- }
-
- if (directive_prologue) {
- // A shot at a directive.
- ExpressionStatement* e_stat;
- Literal* literal;
- // Still processing directive prologue?
- if ((e_stat = stat->AsExpressionStatement()) != NULL &&
- (literal = e_stat->expression()->AsLiteral()) != NULL &&
- literal->handle()->IsString()) {
- Handle<String> directive = Handle<String>::cast(literal->handle());
-
- // Check "use strict" directive (ES5 14.1).
- if (top_scope_->is_classic_mode() &&
- directive->Equals(isolate()->heap()->use_strict_string()) &&
- token_loc.end_pos - token_loc.beg_pos ==
- isolate()->heap()->use_strict_string()->length() + 2) {
- // TODO(mstarzinger): Global strict eval calls, need their own scope
- // as specified in ES5 10.4.2(3). The correct fix would be to always
- // add this scope in DoParseProgram(), but that requires adaptations
- // all over the code base, so we go with a quick-fix for now.
- // In the same manner, we have to patch the parsing mode.
- if (is_eval && !top_scope_->is_eval_scope()) {
- ASSERT(top_scope_->is_global_scope());
- Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
- scope->set_start_position(top_scope_->start_position());
- scope->set_end_position(top_scope_->end_position());
- top_scope_ = scope;
- mode_ = PARSE_EAGERLY;
- }
- // TODO(ES6): Fix entering extended mode, once it is specified.
- top_scope_->SetLanguageMode(FLAG_harmony_scoping
- ? EXTENDED_MODE : STRICT_MODE);
- // "use strict" is the only directive for now.
- directive_prologue = false;
- }
- } else {
- // End of the directive prologue.
- directive_prologue = false;
- }
- }
-
- // Find and mark all assignments to named properties in this (this.x =)
- if (top_scope_->is_function_scope()) {
- this_property_assignment_finder.Update(top_scope_, stat);
- }
- processor->Add(stat, zone());
- }
-
- // Propagate the collected information on this property assignments.
- if (top_scope_->is_function_scope()) {
- bool only_simple_this_property_assignments =
- this_property_assignment_finder.only_simple_this_property_assignments()
- && top_scope_->declarations()->length() == 0;
- if (only_simple_this_property_assignments) {
- current_function_state_->SetThisPropertyAssignmentInfo(
- only_simple_this_property_assignments,
- this_property_assignment_finder.GetThisPropertyAssignments());
- }
- }
-
- return 0;
-}
-
-
-Statement* Parser::ParseModuleElement(ZoneStringList* labels,
- bool* ok) {
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- //
- // In harmony mode we allow additionally the following productions
- // ModuleElement:
- // LetDeclaration
- // ConstDeclaration
- // ModuleDeclaration
- // ImportDeclaration
- // ExportDeclaration
-
- switch (peek()) {
- case Token::FUNCTION:
- return ParseFunctionDeclaration(NULL, ok);
- case Token::LET:
- case Token::CONST:
- return ParseVariableStatement(kModuleElement, NULL, ok);
- case Token::IMPORT:
- return ParseImportDeclaration(ok);
- case Token::EXPORT:
- return ParseExportDeclaration(ok);
- default: {
- Statement* stmt = ParseStatement(labels, CHECK_OK);
- // Handle 'module' as a context-sensitive keyword.
- if (FLAG_harmony_modules &&
- peek() == Token::IDENTIFIER &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
- stmt != NULL) {
- ExpressionStatement* estmt = stmt->AsExpressionStatement();
- if (estmt != NULL &&
- estmt->expression()->AsVariableProxy() != NULL &&
- estmt->expression()->AsVariableProxy()->name()->Equals(
- isolate()->heap()->module_string()) &&
- !scanner().literal_contains_escapes()) {
- return ParseModuleDeclaration(NULL, ok);
- }
- }
- return stmt;
- }
- }
-}
-
-
-Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
- // ModuleDeclaration:
- // 'module' Identifier Module
-
- Handle<String> name = ParseIdentifier(CHECK_OK);
-
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Module %s...\n", name->ToAsciiArray());
-#endif
-
- Module* module = ParseModule(CHECK_OK);
- VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
- Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, top_scope_);
- Declare(declaration, true, CHECK_OK);
-
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Module %s.\n", name->ToAsciiArray());
-
- if (FLAG_print_interfaces) {
- PrintF("module %s : ", name->ToAsciiArray());
- module->interface()->Print();
- }
-#endif
-
- if (names) names->Add(name, zone());
- if (module->body() == NULL)
- return factory()->NewEmptyStatement();
- else
- return factory()->NewModuleStatement(proxy, module->body());
-}
-
-
-Module* Parser::ParseModule(bool* ok) {
- // Module:
- // '{' ModuleElement '}'
- // '=' ModulePath ';'
- // 'at' String ';'
-
- switch (peek()) {
- case Token::LBRACE:
- return ParseModuleLiteral(ok);
-
- case Token::ASSIGN: {
- Expect(Token::ASSIGN, CHECK_OK);
- Module* result = ParseModulePath(CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
- }
-
- default: {
- ExpectContextualKeyword("at", CHECK_OK);
- Module* result = ParseModuleUrl(CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
- }
- }
-}
-
-
-Module* Parser::ParseModuleLiteral(bool* ok) {
- // Module:
- // '{' ModuleElement '}'
-
- // Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(NULL, 16, false);
-#ifdef DEBUG
- if (FLAG_print_interface_details) PrintF("# Literal ");
-#endif
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
-
- Expect(Token::LBRACE, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
- scope->SetLanguageMode(EXTENDED_MODE);
-
- {
- BlockState block_state(this, scope);
- TargetCollector collector(zone());
- Target target(&this->target_stack_, &collector);
- Target target_body(&this->target_stack_, body);
-
- while (peek() != Token::RBRACE) {
- Statement* stat = ParseModuleElement(NULL, CHECK_OK);
- if (stat && !stat->IsEmpty()) {
- body->AddStatement(stat, zone());
- }
- }
- }
-
- Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
- body->set_scope(scope);
-
- // Check that all exports are bound.
- Interface* interface = scope->interface();
- for (Interface::Iterator it = interface->iterator();
- !it.done(); it.Advance()) {
- if (scope->LocalLookup(it.name()) == NULL) {
- Handle<String> name(it.name());
- ReportMessage("module_export_undefined",
- Vector<Handle<String> >(&name, 1));
- *ok = false;
- return NULL;
- }
- }
-
- interface->MakeModule(ok);
- ASSERT(*ok);
- interface->Freeze(ok);
- ASSERT(*ok);
- return factory()->NewModuleLiteral(body, interface);
-}
-
-
-Module* Parser::ParseModulePath(bool* ok) {
- // ModulePath:
- // Identifier
- // ModulePath '.' Identifier
-
- Module* result = ParseModuleVariable(CHECK_OK);
- while (Check(Token::PERIOD)) {
- Handle<String> name = ParseIdentifierName(CHECK_OK);
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Path .%s ", name->ToAsciiArray());
-#endif
- Module* member = factory()->NewModulePath(result, name);
- result->interface()->Add(name, member->interface(), zone(), ok);
- if (!*ok) {
-#ifdef DEBUG
- if (FLAG_print_interfaces) {
- PrintF("PATH TYPE ERROR at '%s'\n", name->ToAsciiArray());
- PrintF("result: ");
- result->interface()->Print();
- PrintF("member: ");
- member->interface()->Print();
- }
-#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
- return NULL;
- }
- result = member;
- }
-
- return result;
-}
-
-
-Module* Parser::ParseModuleVariable(bool* ok) {
- // ModulePath:
- // Identifier
-
- Handle<String> name = ParseIdentifier(CHECK_OK);
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Module variable %s ", name->ToAsciiArray());
-#endif
- VariableProxy* proxy = top_scope_->NewUnresolved(
- factory(), name, Interface::NewModule(zone()),
- scanner().location().beg_pos);
-
- return factory()->NewModuleVariable(proxy);
-}
-
-
-Module* Parser::ParseModuleUrl(bool* ok) {
- // Module:
- // String
-
- Expect(Token::STRING, CHECK_OK);
- Handle<String> symbol = GetSymbol(CHECK_OK);
-
- // TODO(ES6): Request JS resource from environment...
-
-#ifdef DEBUG
- if (FLAG_print_interface_details) PrintF("# Url ");
-#endif
-
- // Create an empty literal as long as the feature isn't finished.
- USE(symbol);
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
- Block* body = factory()->NewBlock(NULL, 1, false);
- body->set_scope(scope);
- Interface* interface = scope->interface();
- Module* result = factory()->NewModuleLiteral(body, interface);
- interface->Freeze(ok);
- ASSERT(*ok);
- interface->Unify(scope->interface(), zone(), ok);
- ASSERT(*ok);
- return result;
-}
-
-
-Module* Parser::ParseModuleSpecifier(bool* ok) {
- // ModuleSpecifier:
- // String
- // ModulePath
-
- if (peek() == Token::STRING) {
- return ParseModuleUrl(ok);
- } else {
- return ParseModulePath(ok);
- }
-}
-
-
-Block* Parser::ParseImportDeclaration(bool* ok) {
- // ImportDeclaration:
- // 'import' IdentifierName (',' IdentifierName)* 'from' ModuleSpecifier ';'
- //
- // TODO(ES6): implement destructuring ImportSpecifiers
-
- Expect(Token::IMPORT, CHECK_OK);
- ZoneStringList names(1, zone());
-
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- names.Add(name, zone());
- while (peek() == Token::COMMA) {
- Consume(Token::COMMA);
- name = ParseIdentifierName(CHECK_OK);
- names.Add(name, zone());
- }
-
- ExpectContextualKeyword("from", CHECK_OK);
- Module* module = ParseModuleSpecifier(CHECK_OK);
- ExpectSemicolon(CHECK_OK);
-
- // Generate a separate declaration for each identifier.
- // TODO(ES6): once we implement destructuring, make that one declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
- for (int i = 0; i < names.length(); ++i) {
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Import %s ", names[i]->ToAsciiArray());
-#endif
- Interface* interface = Interface::NewUnknown(zone());
- module->interface()->Add(names[i], interface, zone(), ok);
- if (!*ok) {
-#ifdef DEBUG
- if (FLAG_print_interfaces) {
- PrintF("IMPORT TYPE ERROR at '%s'\n", names[i]->ToAsciiArray());
- PrintF("module: ");
- module->interface()->Print();
- }
-#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
- return NULL;
- }
- VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
- Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, top_scope_);
- Declare(declaration, true, CHECK_OK);
- }
-
- return block;
-}
-
-
-Statement* Parser::ParseExportDeclaration(bool* ok) {
- // ExportDeclaration:
- // 'export' Identifier (',' Identifier)* ';'
- // 'export' VariableDeclaration
- // 'export' FunctionDeclaration
- // 'export' ModuleDeclaration
- //
- // TODO(ES6): implement structuring ExportSpecifiers
-
- Expect(Token::EXPORT, CHECK_OK);
-
- Statement* result = NULL;
- ZoneStringList names(1, zone());
- switch (peek()) {
- case Token::IDENTIFIER: {
- Handle<String> name = ParseIdentifier(CHECK_OK);
- // Handle 'module' as a context-sensitive keyword.
- if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) {
- names.Add(name, zone());
- while (peek() == Token::COMMA) {
- Consume(Token::COMMA);
- name = ParseIdentifier(CHECK_OK);
- names.Add(name, zone());
- }
- ExpectSemicolon(CHECK_OK);
- result = factory()->NewEmptyStatement();
- } else {
- result = ParseModuleDeclaration(&names, CHECK_OK);
- }
- break;
- }
-
- case Token::FUNCTION:
- result = ParseFunctionDeclaration(&names, CHECK_OK);
- break;
-
- case Token::VAR:
- case Token::LET:
- case Token::CONST:
- result = ParseVariableStatement(kModuleElement, &names, CHECK_OK);
- break;
-
- default:
- *ok = false;
- ReportUnexpectedToken(scanner().current_token());
- return NULL;
- }
-
- // Extract declared names into export declarations and interface.
- Interface* interface = top_scope_->interface();
- for (int i = 0; i < names.length(); ++i) {
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Export %s ", names[i]->ToAsciiArray());
-#endif
- Interface* inner = Interface::NewUnknown(zone());
- interface->Add(names[i], inner, zone(), CHECK_OK);
- if (!*ok)
- return NULL;
- VariableProxy* proxy = NewUnresolved(names[i], LET, inner);
- USE(proxy);
- // TODO(rossberg): Rethink whether we actually need to store export
- // declarations (for compilation?).
- // ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, top_scope_);
- // top_scope_->AddDeclaration(declaration);
- }
-
- ASSERT(result != NULL);
- return result;
-}
-
-
-Statement* Parser::ParseBlockElement(ZoneStringList* labels,
- bool* ok) {
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- //
- // In harmony mode we allow additionally the following productions
- // BlockElement (aka SourceElement):
- // LetDeclaration
- // ConstDeclaration
-
- switch (peek()) {
- case Token::FUNCTION:
- return ParseFunctionDeclaration(NULL, ok);
- case Token::LET:
- case Token::CONST:
- return ParseVariableStatement(kModuleElement, NULL, ok);
- default:
- return ParseStatement(labels, ok);
- }
-}
-
-
-Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
- // Statement ::
- // Block
- // VariableStatement
- // EmptyStatement
- // ExpressionStatement
- // IfStatement
- // IterationStatement
- // ContinueStatement
- // BreakStatement
- // ReturnStatement
- // WithStatement
- // LabelledStatement
- // SwitchStatement
- // ThrowStatement
- // TryStatement
- // DebuggerStatement
-
- // Note: Since labels can only be used by 'break' and 'continue'
- // statements, which themselves are only valid within blocks,
- // iterations or 'switch' statements (i.e., BreakableStatements),
- // labels can be simply ignored in all other cases; except for
- // trivial labeled break statements 'label: break label' which is
- // parsed into an empty statement.
-
- // Keep the source position of the statement
- int statement_pos = scanner().peek_location().beg_pos;
- Statement* stmt = NULL;
- switch (peek()) {
- case Token::LBRACE:
- return ParseBlock(labels, ok);
-
- case Token::CONST: // fall through
- case Token::LET:
- case Token::VAR:
- stmt = ParseVariableStatement(kStatement, NULL, ok);
- break;
-
- case Token::SEMICOLON:
- Next();
- return factory()->NewEmptyStatement();
-
- case Token::IF:
- stmt = ParseIfStatement(labels, ok);
- break;
-
- case Token::DO:
- stmt = ParseDoWhileStatement(labels, ok);
- break;
-
- case Token::WHILE:
- stmt = ParseWhileStatement(labels, ok);
- break;
-
- case Token::FOR:
- stmt = ParseForStatement(labels, ok);
- break;
-
- case Token::CONTINUE:
- stmt = ParseContinueStatement(ok);
- break;
-
- case Token::BREAK:
- stmt = ParseBreakStatement(labels, ok);
- break;
-
- case Token::RETURN:
- stmt = ParseReturnStatement(ok);
- break;
-
- case Token::WITH:
- stmt = ParseWithStatement(labels, ok);
- break;
-
- case Token::SWITCH:
- stmt = ParseSwitchStatement(labels, ok);
- break;
-
- case Token::THROW:
- stmt = ParseThrowStatement(ok);
- break;
-
- case Token::TRY: {
- // NOTE: It is somewhat complicated to have labels on
- // try-statements. When breaking out of a try-finally statement,
- // one must take great care not to treat it as a
- // fall-through. It is much easier just to wrap the entire
- // try-statement in a statement block and put the labels there
- Block* result = factory()->NewBlock(labels, 1, false);
- Target target(&this->target_stack_, result);
- TryStatement* statement = ParseTryStatement(CHECK_OK);
- if (statement) {
- statement->set_statement_pos(statement_pos);
- }
- if (result) result->AddStatement(statement, zone());
- return result;
- }
-
- case Token::FUNCTION: {
- // FunctionDeclaration is only allowed in the context of SourceElements
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- // Common language extension is to allow function declaration in place
- // of any statement. This language extension is disabled in strict mode.
- if (!top_scope_->is_classic_mode()) {
- ReportMessageAt(scanner().peek_location(), "strict_function",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- return ParseFunctionDeclaration(NULL, ok);
- }
-
- case Token::DEBUGGER:
- stmt = ParseDebuggerStatement(ok);
- break;
-
- default:
- stmt = ParseExpressionOrLabelledStatement(labels, ok);
- }
-
- // Store the source position of the statement
- if (stmt != NULL) stmt->set_statement_pos(statement_pos);
- return stmt;
-}
-
-
-VariableProxy* Parser::NewUnresolved(
- Handle<String> name, VariableMode mode, Interface* interface) {
- // If we are inside a function, a declaration of a var/const variable is a
- // truly local variable, and the scope of the variable is always the function
- // scope.
- // Let/const variables in harmony mode are always added to the immediately
- // enclosing scope.
- return DeclarationScope(mode)->NewUnresolved(
- factory(), name, interface, scanner().location().beg_pos);
-}
-
-
-void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
- VariableProxy* proxy = declaration->proxy();
- Handle<String> name = proxy->name();
- VariableMode mode = declaration->mode();
- Scope* declaration_scope = DeclarationScope(mode);
- Variable* var = NULL;
-
- // If a suitable scope exists, then we can statically declare this
- // variable and also set its mode. In any case, a Declaration node
- // will be added to the scope so that the declaration can be added
- // to the corresponding activation frame at runtime if necessary.
- // For instance declarations inside an eval scope need to be added
- // to the calling function context.
- // Similarly, strict mode eval scope does not leak variable declarations to
- // the caller's scope so we declare all locals, too.
- if (declaration_scope->is_function_scope() ||
- declaration_scope->is_strict_or_extended_eval_scope() ||
- declaration_scope->is_block_scope() ||
- declaration_scope->is_module_scope() ||
- declaration_scope->is_global_scope()) {
- // Declare the variable in the declaration scope.
- // For the global scope, we have to check for collisions with earlier
- // (i.e., enclosing) global scopes, to maintain the illusion of a single
- // global scope.
- var = declaration_scope->is_global_scope()
- ? declaration_scope->Lookup(name)
- : declaration_scope->LocalLookup(name);
- if (var == NULL) {
- // Declare the name.
- var = declaration_scope->DeclareLocal(
- name, mode, declaration->initialization(), proxy->interface());
- } else if ((mode != VAR || var->mode() != VAR) &&
- (!declaration_scope->is_global_scope() ||
- IsLexicalVariableMode(mode) ||
- IsLexicalVariableMode(var->mode()))) {
- // The name was declared in this scope before; check for conflicting
- // re-declarations. We have a conflict if either of the declarations is
- // not a var (in the global scope, we also have to ignore legacy const for
- // compatibility). There is similar code in runtime.cc in the Declare
- // functions. The function CheckNonConflictingScope checks for conflicting
- // var and let bindings from different scopes whereas this is a check for
- // conflicting declarations within the same scope. This check also covers
- // the special case
- //
- // function () { let x; { var x; } }
- //
- // because the var declaration is hoisted to the function scope where 'x'
- // is already bound.
- ASSERT(IsDeclaredVariableMode(var->mode()));
- if (is_extended_mode()) {
- // In harmony mode we treat re-declarations as early errors. See
- // ES5 16 for a definition of early errors.
- SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Variable", *c_string };
- Vector<const char*> args(elms, 2);
- ReportMessage("redeclaration", args);
- *ok = false;
- return;
- }
- Handle<String> type_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector("Variable"),
- TENURED);
- Expression* expression =
- NewThrowTypeError(isolate()->factory()->redeclaration_string(),
- type_string, name);
- declaration_scope->SetIllegalRedeclaration(expression);
- }
- }
-
- // We add a declaration node for every declaration. The compiler
- // will only generate code if necessary. In particular, declarations
- // for inner local variables that do not represent functions won't
- // result in any generated code.
- //
- // Note that we always add an unresolved proxy even if it's not
- // used, simply because we don't know in this method (w/o extra
- // parameters) if the proxy is needed or not. The proxy will be
- // bound during variable resolution time unless it was pre-bound
- // below.
- //
- // WARNING: This will lead to multiple declaration nodes for the
- // same variable if it is declared several times. This is not a
- // semantic issue as long as we keep the source order, but it may be
- // a performance issue since it may lead to repeated
- // Runtime::DeclareContextSlot() calls.
- declaration_scope->AddDeclaration(declaration);
-
- if (mode == CONST && declaration_scope->is_global_scope()) {
- // For global const variables we bind the proxy to a variable.
- ASSERT(resolve); // should be set by all callers
- Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(declaration_scope,
- name,
- mode,
- true,
- kind,
- kNeedsInitialization);
- } else if (declaration_scope->is_eval_scope() &&
- declaration_scope->is_classic_mode()) {
- // For variable declarations in a non-strict eval scope the proxy is bound
- // to a lookup variable to force a dynamic declaration using the
- // DeclareContextSlot runtime function.
- Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(declaration_scope,
- name,
- mode,
- true,
- kind,
- declaration->initialization());
- var->AllocateTo(Variable::LOOKUP, -1);
- resolve = true;
- }
-
- // If requested and we have a local variable, bind the proxy to the variable
- // at parse-time. This is used for functions (and consts) declared inside
- // statements: the corresponding function (or const) variable must be in the
- // function scope and not a statement-local scope, e.g. as provided with a
- // 'with' statement:
- //
- // with (obj) {
- // function f() {}
- // }
- //
- // which is translated into:
- //
- // with (obj) {
- // // in this case this is not: 'var f; f = function () {};'
- // var f = function () {};
- // }
- //
- // Note that if 'f' is accessed from inside the 'with' statement, it
- // will be allocated in the context (because we must be able to look
- // it up dynamically) but it will also be accessed statically, i.e.,
- // with a context slot index and a context chain length for this
- // initialization code. Thus, inside the 'with' statement, we need
- // both access to the static and the dynamic context chain; the
- // runtime needs to provide both.
- if (resolve && var != NULL) {
- if (declaration_scope->is_qml_mode()) {
- Handle<GlobalObject> global = isolate_->global_object();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
- // Get the context before the debugger was entered.
- SaveContext *save = isolate_->save_context();
- while (save != NULL &&
- *save->context() == *isolate_->debug()->debug_context()) {
- save = save->prev();
- }
-
- global = Handle<GlobalObject>(save->context()->global_object());
- }
-#endif
-
- if (!global->HasProperty(*(proxy->name()))) {
- var->set_is_qml_global(true);
- }
- }
-
- proxy->BindTo(var);
-
- if (FLAG_harmony_modules) {
- bool ok;
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Declare %s\n", var->name()->ToAsciiArray());
-#endif
- proxy->interface()->Unify(var->interface(), zone(), &ok);
- if (!ok) {
-#ifdef DEBUG
- if (FLAG_print_interfaces) {
- PrintF("DECLARE TYPE ERROR\n");
- PrintF("proxy: ");
- proxy->interface()->Print();
- PrintF("var: ");
- var->interface()->Print();
- }
-#endif
- ReportMessage("module_type_error", Vector<Handle<String> >(&name, 1));
- }
- }
- }
-}
-
-
-// Language extension which is only enabled for source files loaded
-// through the API's extension mechanism. A native function
-// declaration is resolved by looking up the function through a
-// callback provided by the extension.
-Statement* Parser::ParseNativeDeclaration(bool* ok) {
- Expect(Token::FUNCTION, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- ParseIdentifier(CHECK_OK);
- done = (peek() == Token::RPAREN);
- if (!done) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RPAREN, CHECK_OK);
- Expect(Token::SEMICOLON, CHECK_OK);
-
- // Make sure that the function containing the native declaration
- // isn't lazily compiled. The extension structures are only
- // accessible while parsing the first time not when reparsing
- // because of lazy compilation.
- DeclarationScope(VAR)->ForceEagerCompilation();
-
- // Compute the function template for the native function.
- v8::Handle<v8::FunctionTemplate> fun_template =
- extension_->GetNativeFunction(v8::Utils::ToLocal(name));
- ASSERT(!fun_template.IsEmpty());
-
- // Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
- const int literals = fun->NumberOfLiterals();
- Handle<Code> code = Handle<Code>(fun->shared()->code());
- Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
- Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
- Handle<ScopeInfo>(fun->shared()->scope_info()));
- shared->set_construct_stub(*construct_stub);
-
- // Copy the function data to the shared function info.
- shared->set_function_data(fun->shared()->function_data());
- int parameters = fun->shared()->formal_parameter_count();
- shared->set_formal_parameter_count(parameters);
-
- // TODO(1240846): It's weird that native function declarations are
- // introduced dynamically when we meet their declarations, whereas
- // other functions are set up when entering the surrounding scope.
- VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, top_scope_);
- Declare(declaration, true, CHECK_OK);
- SharedFunctionInfoLiteral* lit =
- factory()->NewSharedFunctionInfoLiteral(shared);
- return factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition));
-}
-
-
-Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
- // FunctionDeclaration ::
- // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
- Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
- bool is_strict_reserved = false;
- Handle<String> name = ParseIdentifierOrStrictReservedWord(
- &is_strict_reserved, CHECK_OK);
- FunctionLiteral* fun = ParseFunctionLiteral(name,
- is_strict_reserved,
- function_token_position,
- FunctionLiteral::DECLARATION,
- CHECK_OK);
- // Even if we're not at the top-level of the global or a function
- // scope, we treat it as such and introduce the function with its
- // initial value upon entering the corresponding scope.
- // In extended mode, a function behaves as a lexical binding, except in the
- // global scope.
- VariableMode mode =
- is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
- VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
- Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_);
- Declare(declaration, true, CHECK_OK);
- if (names) names->Add(name, zone());
- return factory()->NewEmptyStatement();
-}
-
-
-Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
- if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
-
- // Block ::
- // '{' Statement* '}'
-
- // Note that a Block does not introduce a new execution scope!
- // (ECMA-262, 3rd, 12.2)
- //
- // Construct block expecting 16 statements.
- Block* result = factory()->NewBlock(labels, 16, false);
- Target target(&this->target_stack_, result);
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- Statement* stat = ParseStatement(NULL, CHECK_OK);
- if (stat && !stat->IsEmpty()) {
- result->AddStatement(stat, zone());
- }
- }
- Expect(Token::RBRACE, CHECK_OK);
- return result;
-}
-
-
-Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
- // The harmony mode uses block elements instead of statements.
- //
- // Block ::
- // '{' BlockElement* '}'
-
- // Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(labels, 16, false);
- Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
-
- // Parse the statements and collect escaping labels.
- Expect(Token::LBRACE, CHECK_OK);
- block_scope->set_start_position(scanner().location().beg_pos);
- { BlockState block_state(this, block_scope);
- TargetCollector collector(zone());
- Target target(&this->target_stack_, &collector);
- Target target_body(&this->target_stack_, body);
-
- while (peek() != Token::RBRACE) {
- Statement* stat = ParseBlockElement(NULL, CHECK_OK);
- if (stat && !stat->IsEmpty()) {
- body->AddStatement(stat, zone());
- }
- }
- }
- Expect(Token::RBRACE, CHECK_OK);
- block_scope->set_end_position(scanner().location().end_pos);
- block_scope = block_scope->FinalizeBlockScope();
- body->set_scope(block_scope);
- return body;
-}
-
-
-Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
- ZoneStringList* names,
- bool* ok) {
- // VariableStatement ::
- // VariableDeclarations ';'
-
- Handle<String> ignore;
- Block* result =
- ParseVariableDeclarations(var_context, NULL, names, &ignore, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
-}
-
-
-bool Parser::IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(isolate()->factory()->eval_string()) ||
- string.is_identical_to(isolate()->factory()->arguments_string());
-}
-
-
-// If the variable declaration declares exactly one non-const
-// variable, then *out is set to that variable. In all other cases,
-// *out is untouched; in particular, it is the caller's responsibility
-// to initialize it properly. This mechanism is used for the parsing
-// of 'for-in' loops.
-Block* Parser::ParseVariableDeclarations(
- VariableDeclarationContext var_context,
- VariableDeclarationProperties* decl_props,
- ZoneStringList* names,
- Handle<String>* out,
- bool* ok) {
- // VariableDeclarations ::
- // ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
- //
- // The ES6 Draft Rev3 specifies the following grammar for const declarations
- //
- // ConstDeclaration ::
- // const ConstBinding (',' ConstBinding)* ';'
- // ConstBinding ::
- // Identifier '=' AssignmentExpression
- //
- // TODO(ES6):
- // ConstBinding ::
- // BindingPattern '=' AssignmentExpression
- VariableMode mode = VAR;
- // True if the binding needs initialization. 'let' and 'const' declared
- // bindings are created uninitialized by their declaration nodes and
- // need initialization. 'var' declared bindings are always initialized
- // immediately by their declaration nodes.
- bool needs_init = false;
- bool is_const = false;
- Token::Value init_op = Token::INIT_VAR;
- if (peek() == Token::VAR) {
- Consume(Token::VAR);
- } else if (peek() == Token::CONST) {
- // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
- //
- // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
- //
- // * It is a Syntax Error if the code that matches this production is not
- // contained in extended code.
- //
- // However disallowing const in classic mode will break compatibility with
- // existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in classic mode.
- Consume(Token::CONST);
- switch (top_scope_->language_mode()) {
- case CLASSIC_MODE:
- mode = CONST;
- init_op = Token::INIT_CONST;
- break;
- case STRICT_MODE:
- ReportMessage("strict_const", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- case EXTENDED_MODE:
- if (var_context == kStatement) {
- // In extended mode 'const' declarations are only allowed in source
- // element positions.
- ReportMessage("unprotected_const", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- mode = CONST_HARMONY;
- init_op = Token::INIT_CONST_HARMONY;
- }
- is_const = true;
- needs_init = true;
- } else if (peek() == Token::LET) {
- // ES6 Draft Rev4 section 12.2.1:
- //
- // LetDeclaration : let LetBindingList ;
- //
- // * It is a Syntax Error if the code that matches this production is not
- // contained in extended code.
- if (!is_extended_mode()) {
- ReportMessage("illegal_let", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- Consume(Token::LET);
- if (var_context == kStatement) {
- // Let declarations are only allowed in source element positions.
- ReportMessage("unprotected_let", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- mode = LET;
- needs_init = true;
- init_op = Token::INIT_LET;
- } else {
- UNREACHABLE(); // by current callers
- }
-
- Scope* declaration_scope = DeclarationScope(mode);
-
- // The scope of a var/const declared variable anywhere inside a function
- // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
- // transform a source-level var/const declaration into a (Function)
- // Scope declaration, and rewrite the source-level initialization into an
- // assignment statement. We use a block to collect multiple assignments.
- //
- // We mark the block as initializer block because we don't want the
- // rewriter to add a '.result' assignment to such a block (to get compliant
- // behavior for code such as print(eval('var x = 7')), and for cosmetic
- // reasons when pretty-printing. Also, unless an assignment (initialization)
- // is inside an initializer block, it is ignored.
- //
- // Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
- int nvars = 0; // the number of variables declared
- Handle<String> name;
- do {
- if (fni_ != NULL) fni_->Enter();
-
- // Parse variable name.
- if (nvars > 0) Consume(Token::COMMA);
- name = ParseIdentifier(CHECK_OK);
- if (fni_ != NULL) fni_->PushVariableName(name);
-
- // Strict mode variables may not be named eval or arguments
- if (!declaration_scope->is_classic_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_var_name", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- // Declare variable.
- // Note that we *always* must treat the initial value via a separate init
- // assignment for variables and constants because the value must be assigned
- // when the variable is encountered in the source. But the variable/constant
- // is declared (and set to 'undefined') upon entering the function within
- // which the variable or constant is declared. Only function variables have
- // an initial value in the declaration (because they are initialized upon
- // entering the function).
- //
- // If we have a const declaration, in an inner scope, the proxy is always
- // bound to the declared variable (independent of possibly surrounding with
- // statements).
- // For let/const declarations in harmony mode, we can also immediately
- // pre-resolve the proxy because it resides in the same scope as the
- // declaration.
- Interface* interface =
- is_const ? Interface::NewConst() : Interface::NewValue();
- VariableProxy* proxy = NewUnresolved(name, mode, interface);
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, top_scope_);
- Declare(declaration, mode != VAR, CHECK_OK);
- nvars++;
- if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
- ReportMessageAt(scanner().location(), "too_many_variables",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- if (names) names->Add(name, zone());
-
- // Parse initialization expression if present and/or needed. A
- // declaration of the form:
- //
- // var v = x;
- //
- // is syntactic sugar for:
- //
- // var v; v = x;
- //
- // In particular, we need to re-lookup 'v' (in top_scope_, not
- // declaration_scope) as it may be a different 'v' than the 'v' in the
- // declaration (e.g., if we are inside a 'with' statement or 'catch'
- // block).
- //
- // However, note that const declarations are different! A const
- // declaration of the form:
- //
- // const c = x;
- //
- // is *not* syntactic sugar for:
- //
- // const c; c = x;
- //
- // The "variable" c initialized to x is the same as the declared
- // one - there is no re-lookup (see the last parameter of the
- // Declare() call above).
-
- Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
- Expression* value = NULL;
- int position = -1;
- // Harmony consts have non-optional initializers.
- if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
- Expect(Token::ASSIGN, CHECK_OK);
- position = scanner().location().beg_pos;
- value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
- // Don't infer if it is "a = function(){...}();"-like expression.
- if (fni_ != NULL &&
- value->AsCall() == NULL &&
- value->AsCallNew() == NULL) {
- fni_->Infer();
- } else {
- fni_->RemoveLastFunction();
- }
- if (decl_props != NULL) *decl_props = kHasInitializers;
- }
-
- // Record the end position of the initializer.
- if (proxy->var() != NULL) {
- proxy->var()->set_initializer_position(scanner().location().end_pos);
- }
-
- // Make sure that 'const x' and 'let x' initialize 'x' to undefined.
- if (value == NULL && needs_init) {
- value = GetLiteralUndefined();
- }
-
- // Global variable declarations must be compiled in a specific
- // way. When the script containing the global variable declaration
- // is entered, the global variable must be declared, so that if it
- // doesn't exist (on the global object itself, see ES5 errata) it
- // gets created with an initial undefined value. This is handled
- // by the declarations part of the function representing the
- // top-level global code; see Runtime::DeclareGlobalVariable. If
- // it already exists (in the object or in a prototype), it is
- // *not* touched until the variable declaration statement is
- // executed.
- //
- // Executing the variable declaration statement will always
- // guarantee to give the global object a "local" variable; a
- // variable defined in the global object and not in any
- // prototype. This way, global variable declarations can shadow
- // properties in the prototype chain, but only after the variable
- // declaration statement has been executed. This is important in
- // browsers where the global object (window) has lots of
- // properties defined in prototype objects.
- if (initialization_scope->is_global_scope() &&
- !IsLexicalVariableMode(mode)) {
- // Compute the arguments for the runtime call.
- ZoneList<Expression*>* arguments =
- new(zone()) ZoneList<Expression*>(3, zone());
- // We have at least 1 parameter.
- arguments->Add(factory()->NewLiteral(name), zone());
- CallRuntime* initialize;
-
- if (is_const) {
- arguments->Add(value, zone());
- value = NULL; // zap the value to avoid the unnecessary assignment
-
- int qml_mode = 0;
- if (top_scope_->is_qml_mode()
- && !Isolate::Current()->global_object()->HasProperty(*name))
- qml_mode = 1;
- arguments->Add(factory()->NewNumberLiteral(qml_mode), zone());
-
- // Construct the call to Runtime_InitializeConstGlobal
- // and add it to the initialization statement block.
- // Note that the function does different things depending on
- // the number of arguments (1 or 2).
- initialize = factory()->NewCallRuntime(
- isolate()->factory()->InitializeConstGlobal_string(),
- Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
- arguments);
- } else {
- // Add strict mode.
- // We may want to pass singleton to avoid Literal allocations.
- LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode), zone());
-
- int qml_mode = 0;
- if (top_scope_->is_qml_mode()
- && !Isolate::Current()->global_object()->HasProperty(*name))
- qml_mode = 1;
- arguments->Add(factory()->NewNumberLiteral(qml_mode), zone());
-
- // Be careful not to assign a value to the global variable if
- // we're in a with. The initialization value should not
- // necessarily be stored in the global object in that case,
- // which is why we need to generate a separate assignment node.
- if (value != NULL && !inside_with()) {
- arguments->Add(value, zone());
- value = NULL; // zap the value to avoid the unnecessary assignment
- }
-
- // Construct the call to Runtime_InitializeVarGlobal
- // and add it to the initialization statement block.
- // Note that the function does different things depending on
- // the number of arguments (2 or 3).
- initialize = factory()->NewCallRuntime(
- isolate()->factory()->InitializeVarGlobal_string(),
- Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
- arguments);
- }
-
- block->AddStatement(factory()->NewExpressionStatement(initialize),
- zone());
- } else if (needs_init) {
- // Constant initializations always assign to the declared constant which
- // is always at the function scope level. This is only relevant for
- // dynamically looked-up variables and constants (the start context for
- // constant lookups is always the function context, while it is the top
- // context for var declared variables). Sigh...
- // For 'let' and 'const' declared variables in harmony mode the
- // initialization also always assigns to the declared variable.
- ASSERT(proxy != NULL);
- ASSERT(proxy->var() != NULL);
- ASSERT(value != NULL);
- Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment),
- zone());
- value = NULL;
- }
-
- // Add an assignment node to the initialization statement block if we still
- // have a pending initialization value.
- if (value != NULL) {
- ASSERT(mode == VAR);
- // 'var' initializations are simply assignments (with all the consequences
- // if they are inside a 'with' statement - they may change a 'with' object
- // property).
- VariableProxy* proxy =
- initialization_scope->NewUnresolved(factory(), name, interface);
- Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment),
- zone());
- }
-
- if (fni_ != NULL) fni_->Leave();
- } while (peek() == Token::COMMA);
-
- // If there was a single non-const declaration, return it in the output
- // parameter for possible use by for/in.
- if (nvars == 1 && !is_const) {
- *out = name;
- }
-
- return block;
-}
-
-
-static bool ContainsLabel(ZoneStringList* labels, Handle<String> label) {
- ASSERT(!label.is_null());
- if (labels != NULL)
- for (int i = labels->length(); i-- > 0; )
- if (labels->at(i).is_identical_to(label))
- return true;
-
- return false;
-}
-
-
-Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
- bool* ok) {
- // ExpressionStatement | LabelledStatement ::
- // Expression ';'
- // Identifier ':' Statement
- bool starts_with_idenfifier = peek_any_identifier();
- Expression* expr = ParseExpression(true, CHECK_OK);
- if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL &&
- expr->AsVariableProxy() != NULL &&
- !expr->AsVariableProxy()->is_this()) {
- // Expression is a single identifier, and not, e.g., a parenthesized
- // identifier.
- VariableProxy* var = expr->AsVariableProxy();
- Handle<String> label = var->name();
- // TODO(1240780): We don't check for redeclaration of labels
- // during preparsing since keeping track of the set of active
- // labels requires nontrivial changes to the way scopes are
- // structured. However, these are probably changes we want to
- // make later anyway so we should go back and fix this then.
- if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
- SmartArrayPointer<char> c_string = label->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Label", *c_string };
- Vector<const char*> args(elms, 2);
- ReportMessage("redeclaration", args);
- *ok = false;
- return NULL;
- }
- if (labels == NULL) {
- labels = new(zone()) ZoneStringList(4, zone());
- }
- labels->Add(label, zone());
- // Remove the "ghost" variable that turned out to be a label
- // from the top scope. This way, we don't try to resolve it
- // during the scope processing.
- top_scope_->RemoveUnresolved(var);
- Expect(Token::COLON, CHECK_OK);
- return ParseStatement(labels, ok);
- }
-
- // If we have an extension, we allow a native function declaration.
- // A native function declaration starts with "native function" with
- // no line-terminator between the two words.
- if (extension_ != NULL &&
- peek() == Token::FUNCTION &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
- expr != NULL &&
- expr->AsVariableProxy() != NULL &&
- expr->AsVariableProxy()->name()->Equals(
- isolate()->heap()->native_string()) &&
- !scanner().literal_contains_escapes()) {
- return ParseNativeDeclaration(ok);
- }
-
- // Parsed expression statement, or the context-sensitive 'module' keyword.
- // Only expect semicolon in the former case.
- if (!FLAG_harmony_modules ||
- peek() != Token::IDENTIFIER ||
- scanner().HasAnyLineTerminatorBeforeNext() ||
- expr->AsVariableProxy() == NULL ||
- !expr->AsVariableProxy()->name()->Equals(
- isolate()->heap()->module_string()) ||
- scanner().literal_contains_escapes()) {
- ExpectSemicolon(CHECK_OK);
- }
- return factory()->NewExpressionStatement(expr);
-}
-
-
-IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
- // IfStatement ::
- // 'if' '(' Expression ')' Statement ('else' Statement)?
-
- Expect(Token::IF, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- Expression* condition = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- Statement* then_statement = ParseStatement(labels, CHECK_OK);
- Statement* else_statement = NULL;
- if (peek() == Token::ELSE) {
- Next();
- else_statement = ParseStatement(labels, CHECK_OK);
- } else {
- else_statement = factory()->NewEmptyStatement();
- }
- return factory()->NewIfStatement(condition, then_statement, else_statement);
-}
-
-
-Statement* Parser::ParseContinueStatement(bool* ok) {
- // ContinueStatement ::
- // 'continue' Identifier? ';'
-
- Expect(Token::CONTINUE, CHECK_OK);
- Handle<String> label = Handle<String>::null();
- Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
- tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- label = ParseIdentifier(CHECK_OK);
- }
- IterationStatement* target = NULL;
- target = LookupContinueTarget(label, CHECK_OK);
- if (target == NULL) {
- // Illegal continue statement.
- const char* message = "illegal_continue";
- Vector<Handle<String> > args;
- if (!label.is_null()) {
- message = "unknown_label";
- args = Vector<Handle<String> >(&label, 1);
- }
- ReportMessageAt(scanner().location(), message, args);
- *ok = false;
- return NULL;
- }
- ExpectSemicolon(CHECK_OK);
- return factory()->NewContinueStatement(target);
-}
-
-
-Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
- // BreakStatement ::
- // 'break' Identifier? ';'
-
- Expect(Token::BREAK, CHECK_OK);
- Handle<String> label;
- Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
- tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- label = ParseIdentifier(CHECK_OK);
- }
- // Parse labeled break statements that target themselves into
- // empty statements, e.g. 'l1: l2: l3: break l2;'
- if (!label.is_null() && ContainsLabel(labels, label)) {
- ExpectSemicolon(CHECK_OK);
- return factory()->NewEmptyStatement();
- }
- BreakableStatement* target = NULL;
- target = LookupBreakTarget(label, CHECK_OK);
- if (target == NULL) {
- // Illegal break statement.
- const char* message = "illegal_break";
- Vector<Handle<String> > args;
- if (!label.is_null()) {
- message = "unknown_label";
- args = Vector<Handle<String> >(&label, 1);
- }
- ReportMessageAt(scanner().location(), message, args);
- *ok = false;
- return NULL;
- }
- ExpectSemicolon(CHECK_OK);
- return factory()->NewBreakStatement(target);
-}
-
-
-Statement* Parser::ParseReturnStatement(bool* ok) {
- // ReturnStatement ::
- // 'return' Expression? ';'
-
- // Consume the return token. It is necessary to do the before
- // reporting any errors on it, because of the way errors are
- // reported (underlining).
- Expect(Token::RETURN, CHECK_OK);
-
- Token::Value tok = peek();
- Statement* result;
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
- tok == Token::SEMICOLON ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- ExpectSemicolon(CHECK_OK);
- result = factory()->NewReturnStatement(GetLiteralUndefined());
- } else {
- Expression* expr = ParseExpression(true, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- result = factory()->NewReturnStatement(expr);
- }
-
- // An ECMAScript program is considered syntactically incorrect if it
- // contains a return statement that is not within the body of a
- // function. See ECMA-262, section 12.9, page 67.
- //
- // To be consistent with KJS we report the syntax error at runtime.
- Scope* declaration_scope = top_scope_->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_eval_scope()) {
- Handle<String> type = isolate()->factory()->illegal_return_string();
- Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
- return factory()->NewExpressionStatement(throw_error);
- }
- return result;
-}
-
-
-Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
- // WithStatement ::
- // 'with' '(' Expression ')' Statement
-
- Expect(Token::WITH, CHECK_OK);
-
- if (!top_scope_->is_classic_mode()) {
- ReportMessage("strict_mode_with", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- Expect(Token::LPAREN, CHECK_OK);
- Expression* expr = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- top_scope_->DeclarationScope()->RecordWithStatement();
- Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
- Statement* stmt;
- { BlockState block_state(this, with_scope);
- with_scope->set_start_position(scanner().peek_location().beg_pos);
- stmt = ParseStatement(labels, CHECK_OK);
- with_scope->set_end_position(scanner().location().end_pos);
- }
- return factory()->NewWithStatement(expr, stmt);
-}
-
-
-CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
- // CaseClause ::
- // 'case' Expression ':' Statement*
- // 'default' ':' Statement*
-
- Expression* label = NULL; // NULL expression indicates default case
- if (peek() == Token::CASE) {
- Expect(Token::CASE, CHECK_OK);
- label = ParseExpression(true, CHECK_OK);
- } else {
- Expect(Token::DEFAULT, CHECK_OK);
- if (*default_seen_ptr) {
- ReportMessage("multiple_defaults_in_switch",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- *default_seen_ptr = true;
- }
- Expect(Token::COLON, CHECK_OK);
- int pos = scanner().location().beg_pos;
- ZoneList<Statement*>* statements =
- new(zone()) ZoneList<Statement*>(5, zone());
- while (peek() != Token::CASE &&
- peek() != Token::DEFAULT &&
- peek() != Token::RBRACE) {
- Statement* stat = ParseStatement(NULL, CHECK_OK);
- statements->Add(stat, zone());
- }
-
- return new(zone()) CaseClause(isolate(), label, statements, pos);
-}
-
-
-SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
- bool* ok) {
- // SwitchStatement ::
- // 'switch' '(' Expression ')' '{' CaseClause* '}'
-
- SwitchStatement* statement = factory()->NewSwitchStatement(labels);
- Target target(&this->target_stack_, statement);
-
- Expect(Token::SWITCH, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- Expression* tag = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- bool default_seen = false;
- ZoneList<CaseClause*>* cases = new(zone()) ZoneList<CaseClause*>(4, zone());
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
- cases->Add(clause, zone());
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- if (statement) statement->Initialize(tag, cases);
- return statement;
-}
-
-
-Statement* Parser::ParseThrowStatement(bool* ok) {
- // ThrowStatement ::
- // 'throw' Expression ';'
-
- Expect(Token::THROW, CHECK_OK);
- int pos = scanner().location().beg_pos;
- if (scanner().HasAnyLineTerminatorBeforeNext()) {
- ReportMessage("newline_after_throw", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- Expression* exception = ParseExpression(true, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
-
- return factory()->NewExpressionStatement(factory()->NewThrow(exception, pos));
-}
-
-
-TryStatement* Parser::ParseTryStatement(bool* ok) {
- // TryStatement ::
- // 'try' Block Catch
- // 'try' Block Finally
- // 'try' Block Catch Finally
- //
- // Catch ::
- // 'catch' '(' Identifier ')' Block
- //
- // Finally ::
- // 'finally' Block
-
- Expect(Token::TRY, CHECK_OK);
-
- TargetCollector try_collector(zone());
- Block* try_block;
-
- { Target target(&this->target_stack_, &try_collector);
- try_block = ParseBlock(NULL, CHECK_OK);
- }
-
- Token::Value tok = peek();
- if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessage("no_catch_or_finally", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- // If we can break out from the catch block and there is a finally block,
- // then we will need to collect escaping targets from the catch
- // block. Since we don't know yet if there will be a finally block, we
- // always collect the targets.
- TargetCollector catch_collector(zone());
- Scope* catch_scope = NULL;
- Variable* catch_variable = NULL;
- Block* catch_block = NULL;
- Handle<String> name;
- if (tok == Token::CATCH) {
- Consume(Token::CATCH);
-
- Expect(Token::LPAREN, CHECK_OK);
- catch_scope = NewScope(top_scope_, CATCH_SCOPE);
- catch_scope->set_start_position(scanner().location().beg_pos);
- name = ParseIdentifier(CHECK_OK);
-
- if (!top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_catch_variable", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- Expect(Token::RPAREN, CHECK_OK);
-
- if (peek() == Token::LBRACE) {
- Target target(&this->target_stack_, &catch_collector);
- VariableMode mode = is_extended_mode() ? LET : VAR;
- catch_variable =
- catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
-
- BlockState block_state(this, catch_scope);
- catch_block = ParseBlock(NULL, CHECK_OK);
- } else {
- Expect(Token::LBRACE, CHECK_OK);
- }
- catch_scope->set_end_position(scanner().location().end_pos);
- tok = peek();
- }
-
- Block* finally_block = NULL;
- if (tok == Token::FINALLY || catch_block == NULL) {
- Consume(Token::FINALLY);
- finally_block = ParseBlock(NULL, CHECK_OK);
- }
-
- // Simplify the AST nodes by converting:
- // 'try B0 catch B1 finally B2'
- // to:
- // 'try { try B0 catch B1 } finally B2'
-
- if (catch_block != NULL && finally_block != NULL) {
- // If we have both, create an inner try/catch.
- ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
- TryCatchStatement* statement = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block);
- statement->set_escaping_targets(try_collector.targets());
- try_block = factory()->NewBlock(NULL, 1, false);
- try_block->AddStatement(statement, zone());
- catch_block = NULL; // Clear to indicate it's been handled.
- }
-
- TryStatement* result = NULL;
- if (catch_block != NULL) {
- ASSERT(finally_block == NULL);
- ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
- result = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block);
- } else {
- ASSERT(finally_block != NULL);
- int index = current_function_state_->NextHandlerIndex();
- result = factory()->NewTryFinallyStatement(index, try_block, finally_block);
- // Combine the jump targets of the try block and the possible catch block.
- try_collector.targets()->AddAll(*catch_collector.targets(), zone());
- }
-
- result->set_escaping_targets(try_collector.targets());
- return result;
-}
-
-
-DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
- bool* ok) {
- // DoStatement ::
- // 'do' Statement 'while' '(' Expression ')' ';'
-
- DoWhileStatement* loop = factory()->NewDoWhileStatement(labels);
- Target target(&this->target_stack_, loop);
-
- Expect(Token::DO, CHECK_OK);
- Statement* body = ParseStatement(NULL, CHECK_OK);
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
-
- if (loop != NULL) {
- int position = scanner().location().beg_pos;
- loop->set_condition_position(position);
- }
-
- Expression* cond = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- // Allow do-statements to be terminated with and without
- // semi-colons. This allows code such as 'do;while(0)return' to
- // parse, which would not be the case if we had used the
- // ExpectSemicolon() functionality here.
- if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
-
- if (loop != NULL) loop->Initialize(cond, body);
- return loop;
-}
-
-
-WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
- // WhileStatement ::
- // 'while' '(' Expression ')' Statement
-
- WhileStatement* loop = factory()->NewWhileStatement(labels);
- Target target(&this->target_stack_, loop);
-
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- Expression* cond = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- Statement* body = ParseStatement(NULL, CHECK_OK);
-
- if (loop != NULL) loop->Initialize(cond, body);
- return loop;
-}
-
-
-Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
- // ForStatement ::
- // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
-
- Statement* init = NULL;
-
- // Create an in-between scope for let-bound iteration variables.
- Scope* saved_scope = top_scope_;
- Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
- top_scope_ = for_scope;
-
- Expect(Token::FOR, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- for_scope->set_start_position(scanner().location().beg_pos);
- if (peek() != Token::SEMICOLON) {
- if (peek() == Token::VAR || peek() == Token::CONST) {
- bool is_const = peek() == Token::CONST;
- Handle<String> name;
- Block* variable_statement =
- ParseVariableDeclarations(kForStatement, NULL, NULL, &name, CHECK_OK);
-
- if (peek() == Token::IN && !name.is_null()) {
- Interface* interface =
- is_const ? Interface::NewConst() : Interface::NewValue();
- ForInStatement* loop = factory()->NewForInStatement(labels);
- Target target(&this->target_stack_, loop);
-
- Expect(Token::IN, CHECK_OK);
- Expression* enumerable = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, interface);
- Statement* body = ParseStatement(NULL, CHECK_OK);
- loop->Initialize(each, enumerable, body);
- Block* result = factory()->NewBlock(NULL, 2, false);
- result->AddStatement(variable_statement, zone());
- result->AddStatement(loop, zone());
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
- for_scope = for_scope->FinalizeBlockScope();
- ASSERT(for_scope == NULL);
- // Parsed for-in loop w/ variable/const declaration.
- return result;
- } else {
- init = variable_statement;
- }
- } else if (peek() == Token::LET) {
- Handle<String> name;
- VariableDeclarationProperties decl_props = kHasNoInitializers;
- Block* variable_statement =
- ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
- CHECK_OK);
- bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
- if (peek() == Token::IN && accept_IN) {
- // Rewrite a for-in statement of the form
- //
- // for (let x in e) b
- //
- // into
- //
- // <let x' be a temporary variable>
- // for (x' in e) {
- // let x;
- // x = x';
- // b;
- // }
-
- // TODO(keuchel): Move the temporary variable to the block scope, after
- // implementing stack allocated block scoped variables.
- Factory* heap_factory = isolate()->factory();
- Handle<String> tempstr =
- heap_factory->NewConsString(heap_factory->dot_for_string(), name);
- Handle<String> tempname = heap_factory->InternalizeString(tempstr);
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
- VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- ForInStatement* loop = factory()->NewForInStatement(labels);
- Target target(&this->target_stack_, loop);
-
- // The expression does not see the loop variable.
- Expect(Token::IN, CHECK_OK);
- top_scope_ = saved_scope;
- Expression* enumerable = ParseExpression(true, CHECK_OK);
- top_scope_ = for_scope;
- Expect(Token::RPAREN, CHECK_OK);
-
- VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
- Statement* body = ParseStatement(NULL, CHECK_OK);
- Block* body_block = factory()->NewBlock(NULL, 3, false);
- Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
- Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment);
- body_block->AddStatement(variable_statement, zone());
- body_block->AddStatement(assignment_statement, zone());
- body_block->AddStatement(body, zone());
- loop->Initialize(temp_proxy, enumerable, body_block);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
- for_scope = for_scope->FinalizeBlockScope();
- body_block->set_scope(for_scope);
- // Parsed for-in loop w/ let declaration.
- return loop;
-
- } else {
- init = variable_statement;
- }
- } else {
- Expression* expression = ParseExpression(false, CHECK_OK);
- if (peek() == Token::IN) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report
- // the error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_for_in_string();
- expression = NewThrowReferenceError(type);
- }
- ForInStatement* loop = factory()->NewForInStatement(labels);
- Target target(&this->target_stack_, loop);
-
- Expect(Token::IN, CHECK_OK);
- Expression* enumerable = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- Statement* body = ParseStatement(NULL, CHECK_OK);
- if (loop) loop->Initialize(expression, enumerable, body);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
- for_scope = for_scope->FinalizeBlockScope();
- ASSERT(for_scope == NULL);
- // Parsed for-in loop.
- return loop;
-
- } else {
- init = factory()->NewExpressionStatement(expression);
- }
- }
- }
-
- // Standard 'for' loop
- ForStatement* loop = factory()->NewForStatement(labels);
- Target target(&this->target_stack_, loop);
-
- // Parsed initializer at this point.
- Expect(Token::SEMICOLON, CHECK_OK);
-
- Expression* cond = NULL;
- if (peek() != Token::SEMICOLON) {
- cond = ParseExpression(true, CHECK_OK);
- }
- Expect(Token::SEMICOLON, CHECK_OK);
-
- Statement* next = NULL;
- if (peek() != Token::RPAREN) {
- Expression* exp = ParseExpression(true, CHECK_OK);
- next = factory()->NewExpressionStatement(exp);
- }
- Expect(Token::RPAREN, CHECK_OK);
-
- Statement* body = ParseStatement(NULL, CHECK_OK);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
- for_scope = for_scope->FinalizeBlockScope();
- if (for_scope != NULL) {
- // Rewrite a for statement of the form
- //
- // for (let x = i; c; n) b
- //
- // into
- //
- // {
- // let x = i;
- // for (; c; n) b
- // }
- ASSERT(init != NULL);
- Block* result = factory()->NewBlock(NULL, 2, false);
- result->AddStatement(init, zone());
- result->AddStatement(loop, zone());
- result->set_scope(for_scope);
- if (loop) loop->Initialize(NULL, cond, next, body);
- return result;
- } else {
- if (loop) loop->Initialize(init, cond, next, body);
- return loop;
- }
-}
-
-
-// Precedence = 1
-Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == Token::COMMA) {
- Expect(Token::COMMA, CHECK_OK);
- int position = scanner().location().beg_pos;
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result =
- factory()->NewBinaryOperation(Token::COMMA, result, right, position);
- }
- return result;
-}
-
-
-// Precedence = 2
-Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- if (fni_ != NULL) fni_->Enter();
- Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!Token::IsAssignmentOp(peek())) {
- if (fni_ != NULL) fni_->Leave();
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- // Signal a reference error if the expression is an invalid left-hand
- // side expression. We could report this as a syntax error here but
- // for compatibility with JSC we choose to report the error at
- // runtime.
- // TODO(ES5): Should change parsing for spec conformance.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_assignment_string();
- expression = NewThrowReferenceError(type);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Assignment to eval or arguments is disallowed in strict mode.
- CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
- }
- MarkAsLValue(expression);
-
- Token::Value op = Next(); // Get assignment operator.
- int pos = scanner().location().beg_pos;
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- // TODO(1231235): We try to estimate the set of properties set by
- // constructors. We define a new property whenever there is an
- // assignment to a property of 'this'. We should probably only add
- // properties if we haven't seen them before. Otherwise we'll
- // probably overestimate the number of properties.
- Property* property = expression ? expression->AsProperty() : NULL;
- if (op == Token::ASSIGN &&
- property != NULL &&
- property->obj()->AsVariableProxy() != NULL &&
- property->obj()->AsVariableProxy()->is_this()) {
- current_function_state_->AddProperty();
- }
-
- // If we assign a function literal to a property we pretenure the
- // literal so it can be added as a constant function property.
- if (property != NULL && right->AsFunctionLiteral() != NULL) {
- right->AsFunctionLiteral()->set_pretenure();
- }
-
- if (fni_ != NULL) {
- // Check if the right hand side is a call to avoid inferring a
- // name if we're dealing with "a = function(){...}();"-like
- // expression.
- if ((op == Token::INIT_VAR
- || op == Token::INIT_CONST
- || op == Token::ASSIGN)
- && (right->AsCall() == NULL && right->AsCallNew() == NULL)) {
- fni_->Infer();
- } else {
- fni_->RemoveLastFunction();
- }
- fni_->Leave();
- }
-
- return factory()->NewAssignment(op, expression, right, pos);
-}
-
-
-// Precedence = 3
-Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- // We start using the binary expression parser for prec >= 4 only!
- Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != Token::CONDITIONAL) return expression;
- Consume(Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- int left_position = scanner().peek_location().beg_pos;
- Expression* left = ParseAssignmentExpression(true, CHECK_OK);
- Expect(Token::COLON, CHECK_OK);
- int right_position = scanner().peek_location().beg_pos;
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return factory()->NewConditional(
- expression, left, right, left_position, right_position);
-}
-
-
-static int Precedence(Token::Value tok, bool accept_IN) {
- if (tok == Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return Token::Precedence(tok);
-}
-
-
-// Precedence >= 4
-Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
- ASSERT(prec >= 4);
- Expression* x = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Token::Value op = Next();
- int position = scanner().location().beg_pos;
- Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
-
- // Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber() &&
- y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
- double x_val = x->AsLiteral()->handle()->Number();
- double y_val = y->AsLiteral()->handle()->Number();
-
- switch (op) {
- case Token::ADD:
- x = factory()->NewNumberLiteral(x_val + y_val);
- continue;
- case Token::SUB:
- x = factory()->NewNumberLiteral(x_val - y_val);
- continue;
- case Token::MUL:
- x = factory()->NewNumberLiteral(x_val * y_val);
- continue;
- case Token::DIV:
- x = factory()->NewNumberLiteral(x_val / y_val);
- continue;
- case Token::BIT_OR: {
- int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
- continue;
- }
- case Token::BIT_AND: {
- int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
- continue;
- }
- case Token::BIT_XOR: {
- int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
- continue;
- }
- case Token::SHL: {
- int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- x = factory()->NewNumberLiteral(value);
- continue;
- }
- case Token::SHR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- uint32_t value = DoubleToUint32(x_val) >> shift;
- x = factory()->NewNumberLiteral(value);
- continue;
- }
- case Token::SAR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- x = factory()->NewNumberLiteral(value);
- continue;
- }
- default:
- break;
- }
- }
-
- // For now we distinguish between comparisons and other binary
- // operations. (We could combine the two and get rid of this
- // code and AST node eventually.)
- if (Token::IsCompareOp(op)) {
- // We have a comparison.
- Token::Value cmp = op;
- switch (op) {
- case Token::NE: cmp = Token::EQ; break;
- case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
- default: break;
- }
- x = factory()->NewCompareOperation(cmp, x, y, position);
- if (cmp != op) {
- // The comparison was negated - add a NOT.
- x = factory()->NewUnaryOperation(Token::NOT, x, position);
- }
-
- } else {
- // We have a "normal" binary operation.
- x = factory()->NewBinaryOperation(op, x, y, position);
- }
- }
- }
- return x;
-}
-
-
-Expression* Parser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- Token::Value op = peek();
- if (Token::IsUnaryOp(op)) {
- op = Next();
- int position = scanner().location().beg_pos;
- Expression* expression = ParseUnaryExpression(CHECK_OK);
-
- if (expression != NULL && (expression->AsLiteral() != NULL)) {
- Handle<Object> literal = expression->AsLiteral()->handle();
- if (op == Token::NOT) {
- // Convert the literal to a boolean condition and negate it.
- bool condition = literal->ToBoolean()->IsTrue();
- Handle<Object> result(isolate()->heap()->ToBoolean(!condition),
- isolate());
- return factory()->NewLiteral(result);
- } else if (literal->IsNumber()) {
- // Compute some expressions involving only number literals.
- double value = literal->Number();
- switch (op) {
- case Token::ADD:
- return expression;
- case Token::SUB:
- return factory()->NewNumberLiteral(-value);
- case Token::BIT_NOT:
- return factory()->NewNumberLiteral(~DoubleToInt32(value));
- default:
- break;
- }
- }
- }
-
- // "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
- VariableProxy* operand = expression->AsVariableProxy();
- if (operand != NULL && !operand->is_this()) {
- ReportMessage("strict_delete", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- }
-
- return factory()->NewUnaryOperation(op, expression, position);
-
- } else if (Token::IsCountOp(op)) {
- op = Next();
- Expression* expression = ParseUnaryExpression(CHECK_OK);
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_prefix_op_string();
- expression = NewThrowReferenceError(type);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Prefix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
- }
- MarkAsLValue(expression);
-
- int position = scanner().location().beg_pos;
- return factory()->NewCountOperation(op,
- true /* prefix */,
- expression,
- position);
-
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-Expression* Parser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
- Token::IsCountOp(peek())) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_postfix_op_string();
- expression = NewThrowReferenceError(type);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Postfix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
- }
- MarkAsLValue(expression);
-
- Token::Value next = Next();
- int position = scanner().location().beg_pos;
- expression =
- factory()->NewCountOperation(next,
- false /* postfix */,
- expression,
- position);
- }
- return expression;
-}
-
-
-Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
- Expression* index = ParseExpression(true, CHECK_OK);
- result = factory()->NewProperty(result, index, pos);
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
-
- case Token::LPAREN: {
- int pos;
- if (scanner().current_token() == Token::IDENTIFIER) {
- // For call of an identifier we want to report position of
- // the identifier as position of the call in the stack trace.
- pos = scanner().location().beg_pos;
- } else {
- // For other kinds of calls we record position of the parenthesis as
- // position of the call. Note that this is extremely important for
- // expressions of the form function(){...}() for which call position
- // should not point to the closing brace otherwise it will intersect
- // with positions recorded for function literal and confuse debugger.
- pos = scanner().peek_location().beg_pos;
- // Also the trailing parenthesis are a hint that the function will
- // be called immediately. If we happen to have parsed a preceding
- // function literal eagerly, we can also compile it eagerly.
- if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
- result->AsFunctionLiteral()->set_parenthesized();
- }
- }
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
-
- // Keep track of eval() calls since they disable all local variable
- // optimizations.
- // The calls that need special treatment are the
- // direct eval calls. These calls are all of the form eval(...), with
- // no explicit receiver.
- // These calls are marked as potentially direct eval calls. Whether
- // they are actually direct calls to eval is determined at run time.
- VariableProxy* callee = result->AsVariableProxy();
- if (callee != NULL &&
- callee->IsVariable(isolate()->factory()->eval_string())) {
- top_scope_->DeclarationScope()->RecordEvalCall();
- }
- result = factory()->NewCall(result, args, pos);
- break;
- }
-
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result =
- factory()->NewProperty(result, factory()->NewLiteral(name), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- Expect(Token::NEW, CHECK_OK);
- PositionStack::Element pos(stack, scanner().location().beg_pos);
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewPrefix(stack, CHECK_OK);
- } else {
- result = ParseMemberWithNewPrefixesExpression(stack, CHECK_OK);
- }
-
- if (!stack->is_empty()) {
- int last = stack->pop();
- result = factory()->NewCallNew(
- result, new(zone()) ZoneList<Expression*>(0, zone()), last);
- }
- return result;
-}
-
-
-Expression* Parser::ParseNewExpression(bool* ok) {
- PositionStack stack(ok);
- return ParseNewPrefix(&stack, ok);
-}
-
-
-Expression* Parser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(NULL, ok);
-}
-
-
-Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression* result = NULL;
- if (peek() == Token::FUNCTION) {
- Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
- Handle<String> name;
- bool is_strict_reserved_name = false;
- if (peek_any_identifier()) {
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
- CHECK_OK);
- }
- FunctionLiteral::Type type = name.is_null()
- ? FunctionLiteral::ANONYMOUS_EXPRESSION
- : FunctionLiteral::NAMED_EXPRESSION;
- result = ParseFunctionLiteral(name,
- is_strict_reserved_name,
- function_token_position,
- type,
- CHECK_OK);
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
- Expression* index = ParseExpression(true, CHECK_OK);
- result = factory()->NewProperty(result, index, pos);
- if (fni_ != NULL) {
- if (index->IsPropertyName()) {
- fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
- } else {
- fni_->PushLiteralName(
- isolate()->factory()->anonymous_function_string());
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result =
- factory()->NewProperty(result, factory()->NewLiteral(name), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
- case Token::LPAREN: {
- if ((stack == NULL) || stack->is_empty()) return result;
- // Consume one of the new prefixes (already parsed).
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int last = stack->pop();
- result = factory()->NewCallNew(result, args, last);
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
-DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
- // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
- // contexts this is used as a statement which invokes the debugger as i a
- // break point is present.
- // DebuggerStatement ::
- // 'debugger' ';'
-
- Expect(Token::DEBUGGER, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return factory()->NewDebuggerStatement();
-}
-
-
-void Parser::ReportUnexpectedToken(Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram/ParseJson.
- if (token == Token::ILLEGAL && stack_overflow_) return;
- // Four of the tokens are treated specially
- switch (token) {
- case Token::EOS:
- return ReportMessage("unexpected_eos", Vector<const char*>::empty());
- case Token::NUMBER:
- return ReportMessage("unexpected_token_number",
- Vector<const char*>::empty());
- case Token::STRING:
- return ReportMessage("unexpected_token_string",
- Vector<const char*>::empty());
- case Token::IDENTIFIER:
- return ReportMessage("unexpected_token_identifier",
- Vector<const char*>::empty());
- case Token::FUTURE_RESERVED_WORD:
- return ReportMessage("unexpected_reserved",
- Vector<const char*>::empty());
- case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessage(top_scope_->is_classic_mode() ?
- "unexpected_token_identifier" :
- "unexpected_strict_reserved",
- Vector<const char*>::empty());
- default:
- const char* name = Token::String(token);
- ASSERT(name != NULL);
- ReportMessage("unexpected_token", Vector<const char*>(&name, 1));
- }
-}
-
-
-void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
- SmartArrayPointer<char> name_string = name->ToCString(DISALLOW_NULLS);
- const char* element[1] = { *name_string };
- ReportMessage("invalid_preparser_data",
- Vector<const char*>(element, 1));
- *ok = false;
-}
-
-
-Expression* Parser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- Expression* result = NULL;
- switch (peek()) {
- case Token::THIS: {
- Consume(Token::THIS);
- result = factory()->NewVariableProxy(top_scope_->receiver());
- break;
- }
-
- case Token::NULL_LITERAL:
- Consume(Token::NULL_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->null_value());
- break;
-
- case Token::TRUE_LITERAL:
- Consume(Token::TRUE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->true_value());
- break;
-
- case Token::FALSE_LITERAL:
- Consume(Token::FALSE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->false_value());
- break;
-
- case Token::IDENTIFIER:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
- Handle<String> name = ParseIdentifier(CHECK_OK);
- if (fni_ != NULL) fni_->PushVariableName(name);
- // The name may refer to a module instance object, so its type is unknown.
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Variable %s ", name->ToAsciiArray());
-#endif
- Interface* interface = Interface::NewUnknown(zone());
- result = top_scope_->NewUnresolved(
- factory(), name, interface, scanner().location().beg_pos);
- break;
- }
-
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTALS);
- result = factory()->NewNumberLiteral(value);
- break;
- }
-
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> symbol = GetSymbol(CHECK_OK);
- result = factory()->NewLiteral(symbol);
- if (fni_ != NULL) fni_->PushLiteralName(symbol);
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- // Heuristically try to detect immediately called functions before
- // seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- break;
-
- case Token::MOD:
- if (allow_natives_syntax_ || extension_ != NULL) {
- result = ParseV8Intrinsic(CHECK_OK);
- break;
- }
- // If we're not allowing special syntax we fall-through to the
- // default case.
-
- default: {
- Token::Value tok = Next();
- ReportUnexpectedToken(tok);
- *ok = false;
- return NULL;
- }
- }
-
- return result;
-}
-
-
-void Parser::BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* values,
- Handle<FixedArray> literals,
- bool* is_simple,
- int* depth) {
- // Fill in the literals.
- // Accumulate output values in local variables.
- bool is_simple_acc = true;
- int depth_acc = 1;
- for (int i = 0; i < values->length(); i++) {
- MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() >= depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
- Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsUndefined()) {
- literals->set_the_hole(i);
- is_simple_acc = false;
- } else {
- literals->set(i, *boilerplate_value);
- }
- }
-
- *is_simple = is_simple_acc;
- *depth = depth_acc;
-}
-
-
-Expression* Parser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
-
- ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone());
- Expect(Token::LBRACK, CHECK_OK);
- while (peek() != Token::RBRACK) {
- Expression* elem;
- if (peek() == Token::COMMA) {
- elem = GetLiteralTheHole();
- } else {
- elem = ParseAssignmentExpression(true, CHECK_OK);
- }
- values->Add(elem, zone());
- if (peek() != Token::RBRACK) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
-
- // Update the scope information before the pre-parsing bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- // Allocate a fixed array to hold all the object literals.
- Handle<JSArray> array =
- isolate()->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
- isolate()->factory()->SetElementsCapacityAndLength(
- array, values->length(), values->length());
-
- // Fill in the literals.
- Heap* heap = isolate()->heap();
- bool is_simple = true;
- int depth = 1;
- bool is_holey = false;
- for (int i = 0, n = values->length(); i < n; i++) {
- MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() + 1 > depth) {
- depth = m_literal->depth() + 1;
- }
- Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsTheHole()) {
- is_holey = true;
- } else if (boilerplate_value->IsUndefined()) {
- is_simple = false;
- JSObject::SetOwnElement(
- array, i, handle(Smi::FromInt(0), isolate()), kNonStrictMode);
- } else {
- JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
- }
- }
-
- Handle<FixedArrayBase> element_values(array->elements());
-
- // Simple and shallow arrays can be lazily copied, we transform the
- // elements array to a copy-on-write array.
- if (is_simple && depth == 1 && values->length() > 0 &&
- array->HasFastSmiOrObjectElements()) {
- element_values->set_map(heap->fixed_cow_array_map());
- }
-
- // Remember both the literal's constant values as well as the ElementsKind
- // in a 2-element FixedArray.
- Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(2, TENURED);
-
- ElementsKind kind = array->GetElementsKind();
- kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
-
- literals->set(0, Smi::FromInt(kind));
- literals->set(1, *element_values);
-
- return factory()->NewArrayLiteral(
- literals, values, literal_index, is_simple, depth);
-}
-
-
-bool Parser::IsBoilerplateProperty(ObjectLiteral::Property* property) {
- return property != NULL &&
- property->kind() != ObjectLiteral::Property::PROTOTYPE;
-}
-
-
-bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
- if (expression->AsLiteral() != NULL) return true;
- MaterializedLiteral* lit = expression->AsMaterializedLiteral();
- return lit != NULL && lit->is_simple();
-}
-
-
-bool CompileTimeValue::ArrayLiteralElementNeedsInitialization(
- Expression* value) {
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) return false;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) return false;
- return true;
-}
-
-
-Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
- ASSERT(IsCompileTimeValue(expression));
- Handle<FixedArray> result = FACTORY->NewFixedArray(2, TENURED);
- ObjectLiteral* object_literal = expression->AsObjectLiteral();
- if (object_literal != NULL) {
- ASSERT(object_literal->is_simple());
- if (object_literal->fast_elements()) {
- result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
- } else {
- result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
- }
- result->set(kElementsSlot, *object_literal->constant_properties());
- } else {
- ArrayLiteral* array_literal = expression->AsArrayLiteral();
- ASSERT(array_literal != NULL && array_literal->is_simple());
- result->set(kTypeSlot, Smi::FromInt(ARRAY_LITERAL));
- result->set(kElementsSlot, *array_literal->constant_elements());
- }
- return result;
-}
-
-
-CompileTimeValue::Type CompileTimeValue::GetType(Handle<FixedArray> value) {
- Smi* type_value = Smi::cast(value->get(kTypeSlot));
- return static_cast<Type>(type_value->value());
-}
-
-
-Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
- return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
-}
-
-
-Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
- if (expression->AsLiteral() != NULL) {
- return expression->AsLiteral()->handle();
- }
- if (CompileTimeValue::IsCompileTimeValue(expression)) {
- return CompileTimeValue::GetValue(expression);
- }
- return isolate()->factory()->undefined_value();
-}
-
-// Validation per 11.1.5 Object Initialiser
-class ObjectLiteralPropertyChecker {
- public:
- ObjectLiteralPropertyChecker(Parser* parser, LanguageMode language_mode) :
- props_(Literal::Match),
- parser_(parser),
- language_mode_(language_mode) {
- }
-
- void CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok);
-
- private:
- enum PropertyKind {
- kGetAccessor = 0x01,
- kSetAccessor = 0x02,
- kAccessor = kGetAccessor | kSetAccessor,
- kData = 0x04
- };
-
- static intptr_t GetPropertyKind(ObjectLiteral::Property* property) {
- switch (property->kind()) {
- case ObjectLiteral::Property::GETTER:
- return kGetAccessor;
- case ObjectLiteral::Property::SETTER:
- return kSetAccessor;
- default:
- return kData;
- }
- }
-
- HashMap props_;
- Parser* parser_;
- LanguageMode language_mode_;
-};
-
-
-void ObjectLiteralPropertyChecker::CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok) {
- ASSERT(property != NULL);
- Literal* literal = property->key();
- HashMap::Entry* entry = props_.Lookup(literal, literal->Hash(), true);
- intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
- intptr_t curr = GetPropertyKind(property);
-
- // Duplicate data properties are illegal in strict or extended mode.
- if (language_mode_ != CLASSIC_MODE && (curr & prev & kData) != 0) {
- parser_->ReportMessageAt(loc, "strict_duplicate_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Data property conflicting with an accessor.
- if (((curr & kData) && (prev & kAccessor)) ||
- ((prev & kData) && (curr & kAccessor))) {
- parser_->ReportMessageAt(loc, "accessor_data_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Two accessors of the same type conflicting
- if ((curr & prev & kAccessor) != 0) {
- parser_->ReportMessageAt(loc, "accessor_get_set",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
-
- // Update map
- entry->value = reinterpret_cast<void*> (prev | curr);
- *ok = true;
-}
-
-
-void Parser::BuildObjectLiteralConstantProperties(
- ZoneList<ObjectLiteral::Property*>* properties,
- Handle<FixedArray> constant_properties,
- bool* is_simple,
- bool* fast_elements,
- int* depth) {
- int position = 0;
- // Accumulate the value in local variables and store it at the end.
- bool is_simple_acc = true;
- int depth_acc = 1;
- uint32_t max_element_index = 0;
- uint32_t elements = 0;
- for (int i = 0; i < properties->length(); i++) {
- ObjectLiteral::Property* property = properties->at(i);
- if (!IsBoilerplateProperty(property)) {
- is_simple_acc = false;
- continue;
- }
- MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() >= depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
-
- // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
- // value for COMPUTED properties, the real value is filled in at
- // runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->handle();
- Handle<Object> value = GetBoilerplateValue(property->value());
- is_simple_acc = is_simple_acc && !value->IsUndefined();
-
- // Keep track of the number of elements in the object literal and
- // the largest element index. If the largest element index is
- // much larger than the number of elements, creating an object
- // literal with fast elements will be a waste of space.
- uint32_t element_index = 0;
- if (key->IsString()
- && Handle<String>::cast(key)->AsArrayIndex(&element_index)
- && element_index > max_element_index) {
- max_element_index = element_index;
- elements++;
- } else if (key->IsSmi()) {
- int key_value = Smi::cast(*key)->value();
- if (key_value > 0
- && static_cast<uint32_t>(key_value) > max_element_index) {
- max_element_index = key_value;
- }
- elements++;
- }
-
- // Add name, value pair to the fixed array.
- constant_properties->set(position++, *key);
- constant_properties->set(position++, *value);
- }
- *fast_elements =
- (max_element_index <= 32) || ((2 * elements) >= max_element_index);
- *is_simple = is_simple_acc;
- *depth = depth_acc;
-}
-
-
-ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
- bool* ok) {
- // Special handling of getter and setter syntax:
- // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
- // We have already read the "get" or "set" keyword.
- Token::Value next = Next();
- bool is_keyword = Token::IsKeyword(next);
- if (next == Token::IDENTIFIER || next == Token::NUMBER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::STRING || is_keyword) {
- Handle<String> name;
- if (is_keyword) {
- name = isolate_->factory()->InternalizeUtf8String(Token::String(next));
- } else {
- name = GetSymbol(CHECK_OK);
- }
- FunctionLiteral* value =
- ParseFunctionLiteral(name,
- false, // reserved words are allowed here
- RelocInfo::kNoPosition,
- FunctionLiteral::ANONYMOUS_EXPRESSION,
- CHECK_OK);
- // Allow any number of parameters for compatibilty with JSC.
- // Specification only allows zero parameters for get and one for set.
- return factory()->NewObjectLiteralProperty(is_getter, value);
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
-}
-
-
-Expression* Parser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- ZoneList<ObjectLiteral::Property*>* properties =
- new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone());
- int number_of_boilerplate_properties = 0;
- bool has_function = false;
-
- ObjectLiteralPropertyChecker checker(this, top_scope_->language_mode());
-
- Expect(Token::LBRACE, CHECK_OK);
-
- while (peek() != Token::RBRACE) {
- if (fni_ != NULL) fni_->Enter();
-
- Literal* key = NULL;
- Token::Value next = peek();
-
- // Location of the property name token
- Scanner::Location loc = scanner().peek_location();
-
- switch (next) {
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::IDENTIFIER: {
- bool is_getter = false;
- bool is_setter = false;
- Handle<String> id =
- ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if (fni_ != NULL) fni_->PushLiteralName(id);
-
- if ((is_getter || is_setter) && peek() != Token::COLON) {
- // Update loc to point to the identifier
- loc = scanner().peek_location();
- ObjectLiteral::Property* property =
- ParseObjectLiteralGetSet(is_getter, CHECK_OK);
- if (IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- // Validate the property.
- checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property, zone());
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- continue; // restart the while
- }
- // Failed to parse as get/set property, so it's just a property
- // called "get" or "set".
- key = factory()->NewLiteral(id);
- break;
- }
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> string = GetSymbol(CHECK_OK);
- if (fni_ != NULL) fni_->PushLiteralName(string);
- uint32_t index;
- if (!string.is_null() && string->AsArrayIndex(&index)) {
- key = factory()->NewNumberLiteral(index);
- break;
- }
- key = factory()->NewLiteral(string);
- break;
- }
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTALS);
- key = factory()->NewNumberLiteral(value);
- break;
- }
- default:
- if (Token::IsKeyword(next)) {
- Consume(next);
- Handle<String> string = GetSymbol(CHECK_OK);
- key = factory()->NewLiteral(string);
- } else {
- // Unexpected token.
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
- }
-
- Expect(Token::COLON, CHECK_OK);
- Expression* value = ParseAssignmentExpression(true, CHECK_OK);
-
- ObjectLiteral::Property* property =
- new(zone()) ObjectLiteral::Property(key, value, isolate());
-
- // Mark top-level object literals that contain function literals and
- // pretenure the literal so it can be added as a constant function
- // property.
- if (top_scope_->DeclarationScope()->is_global_scope() &&
- value->AsFunctionLiteral() != NULL) {
- has_function = true;
- value->AsFunctionLiteral()->set_pretenure();
- }
-
- // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
- // Validate the property
- checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property, zone());
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- // Computation of literal_index must happen before pre parse bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
- number_of_boilerplate_properties * 2, TENURED);
-
- bool is_simple = true;
- bool fast_elements = true;
- int depth = 1;
- BuildObjectLiteralConstantProperties(properties,
- constant_properties,
- &is_simple,
- &fast_elements,
- &depth);
- return factory()->NewObjectLiteral(constant_properties,
- properties,
- literal_index,
- is_simple,
- fast_elements,
- depth,
- has_function);
-}
-
-
-Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
- if (!scanner().ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessage("unterminated_regexp", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- Handle<String> js_pattern = NextLiteralString(TENURED);
- scanner().ScanRegExpFlags();
- Handle<String> js_flags = NextLiteralString(TENURED);
- Next();
-
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index);
-}
-
-
-ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4, zone());
- Expect(Token::LPAREN, CHECK_OK);
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
- result->Add(argument, zone());
- if (result->length() > kMaxNumFunctionParameters) {
- ReportMessageAt(scanner().location(), "too_many_arguments",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- done = (peek() == Token::RPAREN);
- if (!done) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RPAREN, CHECK_OK);
- return result;
-}
-
-
-class SingletonLogger : public ParserRecorder {
- public:
- SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
- virtual ~SingletonLogger() { }
-
- void Reset() { has_error_ = false; }
-
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties,
- LanguageMode mode) {
- ASSERT(!has_error_);
- start_ = start;
- end_ = end;
- literals_ = literals;
- properties_ = properties;
- mode_ = mode;
- };
-
- // Logs a symbol creation of a literal or identifier.
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
-
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt) {
- if (has_error_) return;
- has_error_ = true;
- start_ = start;
- end_ = end;
- message_ = message;
- argument_opt_ = argument_opt;
- }
-
- virtual int function_position() { return 0; }
-
- virtual int symbol_position() { return 0; }
-
- virtual int symbol_ids() { return -1; }
-
- virtual Vector<unsigned> ExtractData() {
- UNREACHABLE();
- return Vector<unsigned>();
- }
-
- virtual void PauseRecording() { }
-
- virtual void ResumeRecording() { }
-
- bool has_error() { return has_error_; }
-
- int start() { return start_; }
- int end() { return end_; }
- int literals() {
- ASSERT(!has_error_);
- return literals_;
- }
- int properties() {
- ASSERT(!has_error_);
- return properties_;
- }
- LanguageMode language_mode() {
- ASSERT(!has_error_);
- return mode_;
- }
- const char* message() {
- ASSERT(has_error_);
- return message_;
- }
- const char* argument_opt() {
- ASSERT(has_error_);
- return argument_opt_;
- }
-
- private:
- bool has_error_;
- int start_;
- int end_;
- // For function entries.
- int literals_;
- int properties_;
- LanguageMode mode_;
- // For error messages.
- const char* message_;
- const char* argument_opt_;
-};
-
-
-FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
- bool name_is_strict_reserved,
- int function_token_position,
- FunctionLiteral::Type type,
- bool* ok) {
- // Function ::
- // '(' FormalParameterList? ')' '{' FunctionBody '}'
-
- // Anonymous functions were passed either the empty symbol or a null
- // handle as the function name. Remember if we were passed a non-empty
- // handle to decide whether to invoke function name inference.
- bool should_infer_name = function_name.is_null();
-
- // We want a non-null handle as the function name.
- if (should_infer_name) {
- function_name = isolate()->factory()->empty_string();
- }
-
- int num_parameters = 0;
- // Function declarations are function scoped in normal mode, so they are
- // hoisted. In harmony block scoping mode they are block scoped, so they
- // are not hoisted.
- Scope* scope = (type == FunctionLiteral::DECLARATION && !is_extended_mode())
- ? NewScope(top_scope_->DeclarationScope(), FUNCTION_SCOPE)
- : NewScope(top_scope_, FUNCTION_SCOPE);
- ZoneList<Statement*>* body = NULL;
- int materialized_literal_count = -1;
- int expected_property_count = -1;
- int handler_count = 0;
- bool only_simple_this_property_assignments;
- Handle<FixedArray> this_property_assignments;
- FunctionLiteral::ParameterFlag duplicate_parameters =
- FunctionLiteral::kNoDuplicateParameters;
- FunctionLiteral::IsParenthesizedFlag parenthesized = parenthesized_function_
- ? FunctionLiteral::kIsParenthesized
- : FunctionLiteral::kNotParenthesized;
- AstProperties ast_properties;
- // Parse function body.
- { FunctionState function_state(this, scope, isolate());
- top_scope_->SetScopeName(function_name);
-
- // FormalParameterList ::
- // '(' (Identifier)*[','] ')'
- Expect(Token::LPAREN, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
- Scanner::Location name_loc = Scanner::Location::invalid();
- Scanner::Location dupe_loc = Scanner::Location::invalid();
- Scanner::Location reserved_loc = Scanner::Location::invalid();
-
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- bool is_strict_reserved = false;
- Handle<String> param_name =
- ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
- CHECK_OK);
-
- // Store locations for possible future error reports.
- if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
- name_loc = scanner().location();
- }
- if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
- duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
- dupe_loc = scanner().location();
- }
- if (!reserved_loc.IsValid() && is_strict_reserved) {
- reserved_loc = scanner().location();
- }
-
- top_scope_->DeclareParameter(param_name, VAR);
- num_parameters++;
- if (num_parameters > kMaxNumFunctionParameters) {
- ReportMessageAt(scanner().location(), "too_many_parameters",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- done = (peek() == Token::RPAREN);
- if (!done) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RPAREN, CHECK_OK);
-
- Expect(Token::LBRACE, CHECK_OK);
-
- // If we have a named function expression, we add a local variable
- // declaration to the body of the function with the name of the
- // function and let it refer to the function itself (closure).
- // NOTE: We create a proxy and resolve it here so that in the
- // future we can change the AST to only refer to VariableProxies
- // instead of Variables and Proxis as is the case now.
- Variable* fvar = NULL;
- Token::Value fvar_init_op = Token::INIT_CONST;
- if (type == FunctionLiteral::NAMED_EXPRESSION) {
- if (is_extended_mode()) fvar_init_op = Token::INIT_CONST_HARMONY;
- VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
- fvar = new(zone()) Variable(top_scope_,
- function_name, fvar_mode, true /* is valid LHS */,
- Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
- VariableProxy* proxy = factory()->NewVariableProxy(fvar);
- VariableDeclaration* fvar_declaration =
- factory()->NewVariableDeclaration(proxy, fvar_mode, top_scope_);
- top_scope_->DeclareFunctionVar(fvar_declaration);
- }
-
- // Determine whether the function will be lazily compiled.
- // The heuristics are:
- // - It must not have been prohibited by the caller to Parse (some callers
- // need a full AST).
- // - The outer scope must allow lazy compilation of inner functions.
- // - The function mustn't be a function expression with an open parenthesis
- // before; we consider that a hint that the function will be called
- // immediately, and it would be a waste of time to make it lazily
- // compiled.
- // These are all things we can know at this point, without looking at the
- // function itself.
- bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
- top_scope_->AllowsLazyCompilation() &&
- !parenthesized_function_);
- parenthesized_function_ = false; // The bit was set for this function only.
-
- if (is_lazily_compiled) {
- int function_block_pos = scanner().location().beg_pos;
- FunctionEntry entry;
- if (pre_data_ != NULL) {
- // If we have pre_data_, we use it to skip parsing the function body.
- // the preparser data contains the information we need to construct the
- // lazy function.
- entry = pre_data()->GetFunctionEntry(function_block_pos);
- if (entry.is_valid()) {
- if (entry.end_pos() <= function_block_pos) {
- // End position greater than end of stream is safe, and hard
- // to check.
- ReportInvalidPreparseData(function_name, CHECK_OK);
- }
- scanner().SeekForward(entry.end_pos() - 1);
-
- scope->set_end_position(entry.end_pos());
- Expect(Token::RBRACE, CHECK_OK);
- isolate()->counters()->total_preparse_skipped()->Increment(
- scope->end_position() - function_block_pos);
- materialized_literal_count = entry.literal_count();
- expected_property_count = entry.property_count();
- top_scope_->SetLanguageMode(entry.language_mode());
- only_simple_this_property_assignments = false;
- this_property_assignments = isolate()->factory()->empty_fixed_array();
- } else {
- is_lazily_compiled = false;
- }
- } else {
- // With no preparser data, we partially parse the function, without
- // building an AST. This gathers the data needed to build a lazy
- // function.
- SingletonLogger logger;
- preparser::PreParser::PreParseResult result =
- LazyParseFunctionLiteral(&logger);
- if (result == preparser::PreParser::kPreParseStackOverflow) {
- // Propagate stack overflow.
- stack_overflow_ = true;
- *ok = false;
- return NULL;
- }
- if (logger.has_error()) {
- const char* arg = logger.argument_opt();
- Vector<const char*> args;
- if (arg != NULL) {
- args = Vector<const char*>(&arg, 1);
- }
- ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
- logger.message(), args);
- *ok = false;
- return NULL;
- }
- scope->set_end_position(logger.end());
- Expect(Token::RBRACE, CHECK_OK);
- isolate()->counters()->total_preparse_skipped()->Increment(
- scope->end_position() - function_block_pos);
- materialized_literal_count = logger.literals();
- expected_property_count = logger.properties();
- top_scope_->SetLanguageMode(logger.language_mode());
- only_simple_this_property_assignments = false;
- this_property_assignments = isolate()->factory()->empty_fixed_array();
- }
- }
-
- if (!is_lazily_compiled) {
- ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
- body = new(zone()) ZoneList<Statement*>(8, zone());
- if (fvar != NULL) {
- VariableProxy* fproxy = top_scope_->NewUnresolved(
- factory(), function_name, Interface::NewConst());
- fproxy->BindTo(fvar);
- body->Add(factory()->NewExpressionStatement(
- factory()->NewAssignment(fvar_init_op,
- fproxy,
- factory()->NewThisFunction(),
- RelocInfo::kNoPosition)),
- zone());
- }
- ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
-
- materialized_literal_count = function_state.materialized_literal_count();
- expected_property_count = function_state.expected_property_count();
- handler_count = function_state.handler_count();
- only_simple_this_property_assignments =
- function_state.only_simple_this_property_assignments();
- this_property_assignments = function_state.this_property_assignments();
-
- Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
- }
-
- // Validate strict mode.
- if (!top_scope_->is_classic_mode()) {
- if (IsEvalOrArguments(function_name)) {
- int start_pos = scope->start_position();
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
- Scanner::Location location = Scanner::Location(position, start_pos);
- ReportMessageAt(location,
- "strict_function_name", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- if (name_loc.IsValid()) {
- ReportMessageAt(name_loc, "strict_param_name",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- if (dupe_loc.IsValid()) {
- ReportMessageAt(dupe_loc, "strict_param_dupe",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- if (name_is_strict_reserved) {
- int start_pos = scope->start_position();
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
- Scanner::Location location = Scanner::Location(position, start_pos);
- ReportMessageAt(location, "strict_reserved_word",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- if (reserved_loc.IsValid()) {
- ReportMessageAt(reserved_loc, "strict_reserved_word",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- CheckOctalLiteral(scope->start_position(),
- scope->end_position(),
- CHECK_OK);
- }
- ast_properties = *factory()->visitor()->ast_properties();
- }
-
- if (is_extended_mode()) {
- CheckConflictingVarDeclarations(scope, CHECK_OK);
- }
-
- FunctionLiteral* function_literal =
- factory()->NewFunctionLiteral(function_name,
- scope,
- body,
- materialized_literal_count,
- expected_property_count,
- handler_count,
- only_simple_this_property_assignments,
- this_property_assignments,
- num_parameters,
- duplicate_parameters,
- type,
- FunctionLiteral::kIsFunction,
- parenthesized);
- function_literal->set_function_token_position(function_token_position);
- function_literal->set_ast_properties(&ast_properties);
-
- if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
- return function_literal;
-}
-
-
-preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
- SingletonLogger* logger) {
- HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
- ASSERT_EQ(Token::LBRACE, scanner().current_token());
-
- if (reusable_preparser_ == NULL) {
- intptr_t stack_limit = isolate()->stack_guard()->real_climit();
- bool do_allow_lazy = true;
- reusable_preparser_ = new preparser::PreParser(&scanner_,
- NULL,
- stack_limit,
- do_allow_lazy,
- allow_natives_syntax_,
- allow_modules_);
- }
- preparser::PreParser::PreParseResult result =
- reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
- logger);
- return result;
-}
-
-
-Expression* Parser::ParseV8Intrinsic(bool* ok) {
- // CallRuntime ::
- // '%' Identifier Arguments
-
- Expect(Token::MOD, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
-
- if (extension_ != NULL) {
- // The extension structures are only accessible while parsing the
- // very first time not when reparsing because of lazy compilation.
- top_scope_->DeclarationScope()->ForceEagerCompilation();
- }
-
- const Runtime::Function* function = Runtime::FunctionForName(name);
-
- // Check for built-in IS_VAR macro.
- if (function != NULL &&
- function->intrinsic_type == Runtime::RUNTIME &&
- function->function_id == Runtime::kIS_VAR) {
- // %IS_VAR(x) evaluates to x if x is a variable,
- // leads to a parse error otherwise. Could be implemented as an
- // inline function %_IS_VAR(x) to eliminate this special case.
- if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
- return args->at(0);
- } else {
- ReportMessage("unable_to_parse", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- }
-
- // Check that the expected number of arguments are being passed.
- if (function != NULL &&
- function->nargs != -1 &&
- function->nargs != args->length()) {
- ReportMessage("illegal_access", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- // Check that the function is defined if it's an inline runtime call.
- if (function == NULL && name->Get(0) == '_') {
- ReportMessage("not_defined", Vector<Handle<String> >(&name, 1));
- *ok = false;
- return NULL;
- }
-
- // We have a valid intrinsics call or a call to a builtin.
- return factory()->NewCallRuntime(name, function, args);
-}
-
-
-bool Parser::peek_any_identifier() {
- Token::Value next = peek();
- return next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD;
-}
-
-
-void Parser::Consume(Token::Value token) {
- Token::Value next = Next();
- USE(next);
- USE(token);
- ASSERT(next == token);
-}
-
-
-void Parser::Expect(Token::Value token, bool* ok) {
- Token::Value next = Next();
- if (next == token) return;
- ReportUnexpectedToken(next);
- *ok = false;
-}
-
-
-bool Parser::Check(Token::Value token) {
- Token::Value next = peek();
- if (next == token) {
- Consume(next);
- return true;
- }
- return false;
-}
-
-
-void Parser::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- Token::Value tok = peek();
- if (tok == Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- return;
- }
- Expect(Token::SEMICOLON, ok);
-}
-
-
-void Parser::ExpectContextualKeyword(const char* keyword, bool* ok) {
- Expect(Token::IDENTIFIER, ok);
- if (!*ok) return;
- Handle<String> symbol = GetSymbol(ok);
- if (!*ok) return;
- if (!symbol->IsUtf8EqualTo(CStrVector(keyword))) {
- *ok = false;
- ReportUnexpectedToken(scanner().current_token());
- }
-}
-
-
-Literal* Parser::GetLiteralUndefined() {
- return factory()->NewLiteral(isolate()->factory()->undefined_value());
-}
-
-
-Literal* Parser::GetLiteralTheHole() {
- return factory()->NewLiteral(isolate()->factory()->the_hole_value());
-}
-
-
-// Parses an identifier that is valid for the current scope, in particular it
-// fails on strict mode future reserved keywords in a strict scope.
-Handle<String> Parser::ParseIdentifier(bool* ok) {
- if (!top_scope_->is_classic_mode()) {
- Expect(Token::IDENTIFIER, ok);
- } else if (!Check(Token::IDENTIFIER)) {
- Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
- }
- if (!*ok) return Handle<String>();
- return GetSymbol(ok);
-}
-
-
-// Parses and identifier or a strict mode future reserved word, and indicate
-// whether it is strict mode future reserved.
-Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok) {
- *is_strict_reserved = false;
- if (!Check(Token::IDENTIFIER)) {
- Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
- *is_strict_reserved = true;
- }
- if (!*ok) return Handle<String>();
- return GetSymbol(ok);
-}
-
-
-Handle<String> Parser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol(ok);
-}
-
-
-void Parser::MarkAsLValue(Expression* expression) {
- VariableProxy* proxy = expression != NULL
- ? expression->AsVariableProxy()
- : NULL;
-
- if (proxy != NULL) proxy->MarkAsLValue();
-}
-
-
-// Checks LHS expression for assignment and prefix/postfix increment/decrement
-// in strict mode.
-void Parser::CheckStrictModeLValue(Expression* expression,
- const char* error,
- bool* ok) {
- ASSERT(!top_scope_->is_classic_mode());
- VariableProxy* lhs = expression != NULL
- ? expression->AsVariableProxy()
- : NULL;
-
- if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
- ReportMessage(error, Vector<const char*>::empty());
- *ok = false;
- }
-}
-
-
-// Checks whether an octal literal was last seen between beg_pos and end_pos.
-// If so, reports an error. Only called for strict mode.
-void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- Scanner::Location octal = scanner().octal_position();
- if (octal.IsValid() &&
- beg_pos <= octal.beg_pos &&
- octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal",
- Vector<const char*>::empty());
- scanner().clear_octal_position();
- *ok = false;
- }
-}
-
-
-void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
- Declaration* decl = scope->CheckConflictingVarDeclarations();
- if (decl != NULL) {
- // In harmony mode we treat conflicting variable bindinds as early
- // errors. See ES5 16 for a definition of early errors.
- Handle<String> name = decl->proxy()->name();
- SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Variable", *c_string };
- Vector<const char*> args(elms, 2);
- int position = decl->proxy()->position();
- Scanner::Location location = position == RelocInfo::kNoPosition
- ? Scanner::Location::invalid()
- : Scanner::Location(position, position + 1);
- ReportMessageAt(location, "redeclaration", args);
- *ok = false;
- }
-}
-
-
-// This function reads an identifier name and determines whether or not it
-// is 'get' or 'set'.
-Handle<String> Parser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Handle<String> result = ParseIdentifierName(ok);
- if (!*ok) return Handle<String>();
- if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
- const char* token = scanner().literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
-// ----------------------------------------------------------------------------
-// Parser support
-
-
-bool Parser::TargetStackContainsLabel(Handle<String> label) {
- for (Target* t = target_stack_; t != NULL; t = t->previous()) {
- BreakableStatement* stat = t->node()->AsBreakableStatement();
- if (stat != NULL && ContainsLabel(stat->labels(), label))
- return true;
- }
- return false;
-}
-
-
-BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) {
- bool anonymous = label.is_null();
- for (Target* t = target_stack_; t != NULL; t = t->previous()) {
- BreakableStatement* stat = t->node()->AsBreakableStatement();
- if (stat == NULL) continue;
- if ((anonymous && stat->is_target_for_anonymous()) ||
- (!anonymous && ContainsLabel(stat->labels(), label))) {
- RegisterTargetUse(stat->break_target(), t->previous());
- return stat;
- }
- }
- return NULL;
-}
-
-
-IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
- bool* ok) {
- bool anonymous = label.is_null();
- for (Target* t = target_stack_; t != NULL; t = t->previous()) {
- IterationStatement* stat = t->node()->AsIterationStatement();
- if (stat == NULL) continue;
-
- ASSERT(stat->is_target_for_anonymous());
- if (anonymous || ContainsLabel(stat->labels(), label)) {
- RegisterTargetUse(stat->continue_target(), t->previous());
- return stat;
- }
- }
- return NULL;
-}
-
-
-void Parser::RegisterTargetUse(Label* target, Target* stop) {
- // Register that a break target found at the given stop in the
- // target stack has been used from the top of the target stack. Add
- // the break target to any TargetCollectors passed on the stack.
- for (Target* t = target_stack_; t != stop; t = t->previous()) {
- TargetCollector* collector = t->node()->AsTargetCollector();
- if (collector != NULL) collector->AddTarget(target, zone());
- }
-}
-
-
-Expression* Parser::NewThrowReferenceError(Handle<String> type) {
- return NewThrowError(isolate()->factory()->MakeReferenceError_string(),
- type, HandleVector<Object>(NULL, 0));
-}
-
-
-Expression* Parser::NewThrowSyntaxError(Handle<String> type,
- Handle<Object> first) {
- int argc = first.is_null() ? 0 : 1;
- Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
- return NewThrowError(
- isolate()->factory()->MakeSyntaxError_string(), type, arguments);
-}
-
-
-Expression* Parser::NewThrowTypeError(Handle<String> type,
- Handle<Object> first,
- Handle<Object> second) {
- ASSERT(!first.is_null() && !second.is_null());
- Handle<Object> elements[] = { first, second };
- Vector< Handle<Object> > arguments =
- HandleVector<Object>(elements, ARRAY_SIZE(elements));
- return NewThrowError(
- isolate()->factory()->MakeTypeError_string(), type, arguments);
-}
-
-
-Expression* Parser::NewThrowError(Handle<String> constructor,
- Handle<String> type,
- Vector< Handle<Object> > arguments) {
- int argc = arguments.length();
- Handle<FixedArray> elements = isolate()->factory()->NewFixedArray(argc,
- TENURED);
- for (int i = 0; i < argc; i++) {
- Handle<Object> element = arguments[i];
- if (!element.is_null()) {
- elements->set(i, *element);
- }
- }
- Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
- elements, FAST_ELEMENTS, TENURED);
-
- ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone());
- args->Add(factory()->NewLiteral(type), zone());
- args->Add(factory()->NewLiteral(array), zone());
- CallRuntime* call_constructor =
- factory()->NewCallRuntime(constructor, NULL, args);
- return factory()->NewThrow(call_constructor, scanner().location().beg_pos);
-}
-
-// ----------------------------------------------------------------------------
-// Regular expressions
-
-
-RegExpParser::RegExpParser(FlatStringReader* in,
- Handle<String>* error,
- bool multiline,
- Zone* zone)
- : isolate_(Isolate::Current()),
- zone_(zone),
- error_(error),
- captures_(NULL),
- in_(in),
- current_(kEndMarker),
- next_pos_(0),
- capture_count_(0),
- has_more_(true),
- multiline_(multiline),
- simple_(false),
- contains_anchor_(false),
- is_scanned_for_captures_(false),
- failed_(false) {
- Advance();
-}
-
-
-uc32 RegExpParser::Next() {
- if (has_next()) {
- return in()->Get(next_pos_);
- } else {
- return kEndMarker;
- }
-}
-
-
-void RegExpParser::Advance() {
- if (next_pos_ < in()->length()) {
- StackLimitCheck check(isolate());
- if (check.HasOverflowed()) {
- ReportError(CStrVector(Isolate::kStackOverflowMessage));
- } else if (zone()->excess_allocation()) {
- ReportError(CStrVector("Regular expression too large"));
- } else {
- current_ = in()->Get(next_pos_);
- next_pos_++;
- }
- } else {
- current_ = kEndMarker;
- has_more_ = false;
- }
-}
-
-
-void RegExpParser::Reset(int pos) {
- next_pos_ = pos;
- Advance();
-}
-
-
-void RegExpParser::Advance(int dist) {
- next_pos_ += dist - 1;
- Advance();
-}
-
-
-bool RegExpParser::simple() {
- return simple_;
-}
-
-RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
- failed_ = true;
- *error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED);
- // Zip to the end to make sure the no more input is read.
- current_ = kEndMarker;
- next_pos_ = in()->length();
- return NULL;
-}
-
-
-// Pattern ::
-// Disjunction
-RegExpTree* RegExpParser::ParsePattern() {
- RegExpTree* result = ParseDisjunction(CHECK_FAILED);
- ASSERT(!has_more());
- // If the result of parsing is a literal string atom, and it has the
- // same length as the input, then the atom is identical to the input.
- if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
- simple_ = true;
- }
- return result;
-}
-
-
-// Disjunction ::
-// Alternative
-// Alternative | Disjunction
-// Alternative ::
-// [empty]
-// Term Alternative
-// Term ::
-// Assertion
-// Atom
-// Atom Quantifier
-RegExpTree* RegExpParser::ParseDisjunction() {
- // Used to store current state while parsing subexpressions.
- RegExpParserState initial_state(NULL, INITIAL, 0, zone());
- RegExpParserState* stored_state = &initial_state;
- // Cache the builder in a local variable for quick access.
- RegExpBuilder* builder = initial_state.builder();
- while (true) {
- switch (current()) {
- case kEndMarker:
- if (stored_state->IsSubexpression()) {
- // Inside a parenthesized group when hitting end of input.
- ReportError(CStrVector("Unterminated group") CHECK_FAILED);
- }
- ASSERT_EQ(INITIAL, stored_state->group_type());
- // Parsing completed successfully.
- return builder->ToRegExp();
- case ')': {
- if (!stored_state->IsSubexpression()) {
- ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
- }
- ASSERT_NE(INITIAL, stored_state->group_type());
-
- Advance();
- // End disjunction parsing and convert builder content to new single
- // regexp atom.
- RegExpTree* body = builder->ToRegExp();
-
- int end_capture_index = captures_started();
-
- int capture_index = stored_state->capture_index();
- SubexpressionType type = stored_state->group_type();
-
- // Restore previous state.
- stored_state = stored_state->previous_state();
- builder = stored_state->builder();
-
- // Build result of subexpression.
- if (type == CAPTURE) {
- RegExpCapture* capture = new(zone()) RegExpCapture(body, capture_index);
- captures_->at(capture_index - 1) = capture;
- body = capture;
- } else if (type != GROUPING) {
- ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
- bool is_positive = (type == POSITIVE_LOOKAHEAD);
- body = new(zone()) RegExpLookahead(body,
- is_positive,
- end_capture_index - capture_index,
- capture_index);
- }
- builder->AddAtom(body);
- // For compatability with JSC and ES3, we allow quantifiers after
- // lookaheads, and break in all cases.
- break;
- }
- case '|': {
- Advance();
- builder->NewAlternative();
- continue;
- }
- case '*':
- case '+':
- case '?':
- return ReportError(CStrVector("Nothing to repeat"));
- case '^': {
- Advance();
- if (multiline_) {
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
- } else {
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
- set_contains_anchor();
- }
- continue;
- }
- case '$': {
- Advance();
- RegExpAssertion::Type type =
- multiline_ ? RegExpAssertion::END_OF_LINE :
- RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(new(zone()) RegExpAssertion(type));
- continue;
- }
- case '.': {
- Advance();
- // everything except \x0a, \x0d, \u2028 and \u2029
- ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2, zone());
- CharacterRange::AddClassEscape('.', ranges, zone());
- RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
- break;
- }
- case '(': {
- SubexpressionType type = CAPTURE;
- Advance();
- if (current() == '?') {
- switch (Next()) {
- case ':':
- type = GROUPING;
- break;
- case '=':
- type = POSITIVE_LOOKAHEAD;
- break;
- case '!':
- type = NEGATIVE_LOOKAHEAD;
- break;
- default:
- ReportError(CStrVector("Invalid group") CHECK_FAILED);
- break;
- }
- Advance(2);
- } else {
- if (captures_ == NULL) {
- captures_ = new(zone()) ZoneList<RegExpCapture*>(2, zone());
- }
- if (captures_started() >= kMaxCaptures) {
- ReportError(CStrVector("Too many captures") CHECK_FAILED);
- }
- captures_->Add(NULL, zone());
- }
- // Store current state and begin new disjunction parsing.
- stored_state = new(zone()) RegExpParserState(stored_state, type,
- captures_started(), zone());
- builder = stored_state->builder();
- continue;
- }
- case '[': {
- RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
- builder->AddAtom(atom);
- break;
- }
- // Atom ::
- // \ AtomEscape
- case '\\':
- switch (Next()) {
- case kEndMarker:
- return ReportError(CStrVector("\\ at end of pattern"));
- case 'b':
- Advance(2);
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
- continue;
- case 'B':
- Advance(2);
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
- continue;
- // AtomEscape ::
- // CharacterClassEscape
- //
- // CharacterClassEscape :: one of
- // d D s S w W
- case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
- uc32 c = Next();
- Advance(2);
- ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2, zone());
- CharacterRange::AddClassEscape(c, ranges, zone());
- RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
- break;
- }
- case '1': case '2': case '3': case '4': case '5': case '6':
- case '7': case '8': case '9': {
- int index = 0;
- if (ParseBackReferenceIndex(&index)) {
- RegExpCapture* capture = NULL;
- if (captures_ != NULL && index <= captures_->length()) {
- capture = captures_->at(index - 1);
- }
- if (capture == NULL) {
- builder->AddEmpty();
- break;
- }
- RegExpTree* atom = new(zone()) RegExpBackReference(capture);
- builder->AddAtom(atom);
- break;
- }
- uc32 first_digit = Next();
- if (first_digit == '8' || first_digit == '9') {
- // Treat as identity escape
- builder->AddCharacter(first_digit);
- Advance(2);
- break;
- }
- }
- // FALLTHROUGH
- case '0': {
- Advance();
- uc32 octal = ParseOctalLiteral();
- builder->AddCharacter(octal);
- break;
- }
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance(2);
- builder->AddCharacter('\f');
- break;
- case 'n':
- Advance(2);
- builder->AddCharacter('\n');
- break;
- case 'r':
- Advance(2);
- builder->AddCharacter('\r');
- break;
- case 't':
- Advance(2);
- builder->AddCharacter('\t');
- break;
- case 'v':
- Advance(2);
- builder->AddCharacter('\v');
- break;
- case 'c': {
- Advance();
- uc32 controlLetter = Next();
- // Special case if it is an ASCII letter.
- // Convert lower case letters to uppercase.
- uc32 letter = controlLetter & ~('a' ^ 'A');
- if (letter < 'A' || 'Z' < letter) {
- // controlLetter is not in range 'A'-'Z' or 'a'-'z'.
- // This is outside the specification. We match JSC in
- // reading the backslash as a literal character instead
- // of as starting an escape.
- builder->AddCharacter('\\');
- } else {
- Advance(2);
- builder->AddCharacter(controlLetter & 0x1f);
- }
- break;
- }
- case 'x': {
- Advance(2);
- uc32 value;
- if (ParseHexEscape(2, &value)) {
- builder->AddCharacter(value);
- } else {
- builder->AddCharacter('x');
- }
- break;
- }
- case 'u': {
- Advance(2);
- uc32 value;
- if (ParseHexEscape(4, &value)) {
- builder->AddCharacter(value);
- } else {
- builder->AddCharacter('u');
- }
- break;
- }
- default:
- // Identity escape.
- builder->AddCharacter(Next());
- Advance(2);
- break;
- }
- break;
- case '{': {
- int dummy;
- if (ParseIntervalQuantifier(&dummy, &dummy)) {
- ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
- }
- // fallthrough
- }
- default:
- builder->AddCharacter(current());
- Advance();
- break;
- } // end switch(current())
-
- int min;
- int max;
- switch (current()) {
- // QuantifierPrefix ::
- // *
- // +
- // ?
- // {
- case '*':
- min = 0;
- max = RegExpTree::kInfinity;
- Advance();
- break;
- case '+':
- min = 1;
- max = RegExpTree::kInfinity;
- Advance();
- break;
- case '?':
- min = 0;
- max = 1;
- Advance();
- break;
- case '{':
- if (ParseIntervalQuantifier(&min, &max)) {
- if (max < min) {
- ReportError(CStrVector("numbers out of order in {} quantifier.")
- CHECK_FAILED);
- }
- break;
- } else {
- continue;
- }
- default:
- continue;
- }
- RegExpQuantifier::Type type = RegExpQuantifier::GREEDY;
- if (current() == '?') {
- type = RegExpQuantifier::NON_GREEDY;
- Advance();
- } else if (FLAG_regexp_possessive_quantifier && current() == '+') {
- // FLAG_regexp_possessive_quantifier is a debug-only flag.
- type = RegExpQuantifier::POSSESSIVE;
- Advance();
- }
- builder->AddQuantifierToAtom(min, max, type);
- }
-}
-
-
-#ifdef DEBUG
-// Currently only used in an ASSERT.
-static bool IsSpecialClassEscape(uc32 c) {
- switch (c) {
- case 'd': case 'D':
- case 's': case 'S':
- case 'w': case 'W':
- return true;
- default:
- return false;
- }
-}
-#endif
-
-
-// In order to know whether an escape is a backreference or not we have to scan
-// the entire regexp and find the number of capturing parentheses. However we
-// don't want to scan the regexp twice unless it is necessary. This mini-parser
-// is called when needed. It can see the difference between capturing and
-// noncapturing parentheses and can skip character classes and backslash-escaped
-// characters.
-void RegExpParser::ScanForCaptures() {
- // Start with captures started previous to current position
- int capture_count = captures_started();
- // Add count of captures after this position.
- int n;
- while ((n = current()) != kEndMarker) {
- Advance();
- switch (n) {
- case '\\':
- Advance();
- break;
- case '[': {
- int c;
- while ((c = current()) != kEndMarker) {
- Advance();
- if (c == '\\') {
- Advance();
- } else {
- if (c == ']') break;
- }
- }
- break;
- }
- case '(':
- if (current() != '?') capture_count++;
- break;
- }
- }
- capture_count_ = capture_count;
- is_scanned_for_captures_ = true;
-}
-
-
-bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
- ASSERT_EQ('\\', current());
- ASSERT('1' <= Next() && Next() <= '9');
- // Try to parse a decimal literal that is no greater than the total number
- // of left capturing parentheses in the input.
- int start = position();
- int value = Next() - '0';
- Advance(2);
- while (true) {
- uc32 c = current();
- if (IsDecimalDigit(c)) {
- value = 10 * value + (c - '0');
- if (value > kMaxCaptures) {
- Reset(start);
- return false;
- }
- Advance();
- } else {
- break;
- }
- }
- if (value > captures_started()) {
- if (!is_scanned_for_captures_) {
- int saved_position = position();
- ScanForCaptures();
- Reset(saved_position);
- }
- if (value > capture_count_) {
- Reset(start);
- return false;
- }
- }
- *index_out = value;
- return true;
-}
-
-
-// QuantifierPrefix ::
-// { DecimalDigits }
-// { DecimalDigits , }
-// { DecimalDigits , DecimalDigits }
-//
-// Returns true if parsing succeeds, and set the min_out and max_out
-// values. Values are truncated to RegExpTree::kInfinity if they overflow.
-bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
- ASSERT_EQ(current(), '{');
- int start = position();
- Advance();
- int min = 0;
- if (!IsDecimalDigit(current())) {
- Reset(start);
- return false;
- }
- while (IsDecimalDigit(current())) {
- int next = current() - '0';
- if (min > (RegExpTree::kInfinity - next) / 10) {
- // Overflow. Skip past remaining decimal digits and return -1.
- do {
- Advance();
- } while (IsDecimalDigit(current()));
- min = RegExpTree::kInfinity;
- break;
- }
- min = 10 * min + next;
- Advance();
- }
- int max = 0;
- if (current() == '}') {
- max = min;
- Advance();
- } else if (current() == ',') {
- Advance();
- if (current() == '}') {
- max = RegExpTree::kInfinity;
- Advance();
- } else {
- while (IsDecimalDigit(current())) {
- int next = current() - '0';
- if (max > (RegExpTree::kInfinity - next) / 10) {
- do {
- Advance();
- } while (IsDecimalDigit(current()));
- max = RegExpTree::kInfinity;
- break;
- }
- max = 10 * max + next;
- Advance();
- }
- if (current() != '}') {
- Reset(start);
- return false;
- }
- Advance();
- }
- } else {
- Reset(start);
- return false;
- }
- *min_out = min;
- *max_out = max;
- return true;
-}
-
-
-uc32 RegExpParser::ParseOctalLiteral() {
- ASSERT('0' <= current() && current() <= '7');
- // For compatibility with some other browsers (not all), we parse
- // up to three octal digits with a value below 256.
- uc32 value = current() - '0';
- Advance();
- if ('0' <= current() && current() <= '7') {
- value = value * 8 + current() - '0';
- Advance();
- if (value < 32 && '0' <= current() && current() <= '7') {
- value = value * 8 + current() - '0';
- Advance();
- }
- }
- return value;
-}
-
-
-bool RegExpParser::ParseHexEscape(int length, uc32 *value) {
- int start = position();
- uc32 val = 0;
- bool done = false;
- for (int i = 0; !done; i++) {
- uc32 c = current();
- int d = HexValue(c);
- if (d < 0) {
- Reset(start);
- return false;
- }
- val = val * 16 + d;
- Advance();
- if (i == length - 1) {
- done = true;
- }
- }
- *value = val;
- return true;
-}
-
-
-uc32 RegExpParser::ParseClassCharacterEscape() {
- ASSERT(current() == '\\');
- ASSERT(has_next() && !IsSpecialClassEscape(Next()));
- Advance();
- switch (current()) {
- case 'b':
- Advance();
- return '\b';
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance();
- return '\f';
- case 'n':
- Advance();
- return '\n';
- case 'r':
- Advance();
- return '\r';
- case 't':
- Advance();
- return '\t';
- case 'v':
- Advance();
- return '\v';
- case 'c': {
- uc32 controlLetter = Next();
- uc32 letter = controlLetter & ~('A' ^ 'a');
- // For compatibility with JSC, inside a character class
- // we also accept digits and underscore as control characters.
- if ((controlLetter >= '0' && controlLetter <= '9') ||
- controlLetter == '_' ||
- (letter >= 'A' && letter <= 'Z')) {
- Advance(2);
- // Control letters mapped to ASCII control characters in the range
- // 0x00-0x1f.
- return controlLetter & 0x1f;
- }
- // We match JSC in reading the backslash as a literal
- // character instead of as starting an escape.
- return '\\';
- }
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7':
- // For compatibility, we interpret a decimal escape that isn't
- // a back reference (and therefore either \0 or not valid according
- // to the specification) as a 1..3 digit octal character code.
- return ParseOctalLiteral();
- case 'x': {
- Advance();
- uc32 value;
- if (ParseHexEscape(2, &value)) {
- return value;
- }
- // If \x is not followed by a two-digit hexadecimal, treat it
- // as an identity escape.
- return 'x';
- }
- case 'u': {
- Advance();
- uc32 value;
- if (ParseHexEscape(4, &value)) {
- return value;
- }
- // If \u is not followed by a four-digit hexadecimal, treat it
- // as an identity escape.
- return 'u';
- }
- default: {
- // Extended identity escape. We accept any character that hasn't
- // been matched by a more specific case, not just the subset required
- // by the ECMAScript specification.
- uc32 result = current();
- Advance();
- return result;
- }
- }
- return 0;
-}
-
-
-CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
- ASSERT_EQ(0, *char_class);
- uc32 first = current();
- if (first == '\\') {
- switch (Next()) {
- case 'w': case 'W': case 'd': case 'D': case 's': case 'S': {
- *char_class = Next();
- Advance(2);
- return CharacterRange::Singleton(0); // Return dummy value.
- }
- case kEndMarker:
- return ReportError(CStrVector("\\ at end of pattern"));
- default:
- uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
- return CharacterRange::Singleton(c);
- }
- } else {
- Advance();
- return CharacterRange::Singleton(first);
- }
-}
-
-
-static const uc16 kNoCharClass = 0;
-
-// Adds range or pre-defined character class to character ranges.
-// If char_class is not kInvalidClass, it's interpreted as a class
-// escape (i.e., 's' means whitespace, from '\s').
-static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
- uc16 char_class,
- CharacterRange range,
- Zone* zone) {
- if (char_class != kNoCharClass) {
- CharacterRange::AddClassEscape(char_class, ranges, zone);
- } else {
- ranges->Add(range, zone);
- }
-}
-
-
-RegExpTree* RegExpParser::ParseCharacterClass() {
- static const char* kUnterminated = "Unterminated character class";
- static const char* kRangeOutOfOrder = "Range out of order in character class";
-
- ASSERT_EQ(current(), '[');
- Advance();
- bool is_negated = false;
- if (current() == '^') {
- is_negated = true;
- Advance();
- }
- ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2, zone());
- while (has_more() && current() != ']') {
- uc16 char_class = kNoCharClass;
- CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
- if (current() == '-') {
- Advance();
- if (current() == kEndMarker) {
- // If we reach the end we break out of the loop and let the
- // following code report an error.
- break;
- } else if (current() == ']') {
- AddRangeOrEscape(ranges, char_class, first, zone());
- ranges->Add(CharacterRange::Singleton('-'), zone());
- break;
- }
- uc16 char_class_2 = kNoCharClass;
- CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
- if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
- // Either end is an escaped character class. Treat the '-' verbatim.
- AddRangeOrEscape(ranges, char_class, first, zone());
- ranges->Add(CharacterRange::Singleton('-'), zone());
- AddRangeOrEscape(ranges, char_class_2, next, zone());
- continue;
- }
- if (first.from() > next.to()) {
- return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
- }
- ranges->Add(CharacterRange::Range(first.from(), next.to()), zone());
- } else {
- AddRangeOrEscape(ranges, char_class, first, zone());
- }
- }
- if (!has_more()) {
- return ReportError(CStrVector(kUnterminated) CHECK_FAILED);
- }
- Advance();
- if (ranges->length() == 0) {
- ranges->Add(CharacterRange::Everything(), zone());
- is_negated = !is_negated;
- }
- return new(zone()) RegExpCharacterClass(ranges, is_negated);
-}
-
-
-// ----------------------------------------------------------------------------
-// The Parser interface.
-
-ParserMessage::~ParserMessage() {
- for (int i = 0; i < args().length(); i++)
- DeleteArray(args()[i]);
- DeleteArray(args().start());
-}
-
-
-ScriptDataImpl::~ScriptDataImpl() {
- if (owns_store_) store_.Dispose();
-}
-
-
-int ScriptDataImpl::Length() {
- return store_.length() * sizeof(unsigned);
-}
-
-
-const char* ScriptDataImpl::Data() {
- return reinterpret_cast<const char*>(store_.start());
-}
-
-
-bool ScriptDataImpl::HasError() {
- return has_error();
-}
-
-
-void ScriptDataImpl::Initialize() {
- // Prepares state for use.
- if (store_.length() >= PreparseDataConstants::kHeaderSize) {
- function_index_ = PreparseDataConstants::kHeaderSize;
- int symbol_data_offset = PreparseDataConstants::kHeaderSize
- + store_[PreparseDataConstants::kFunctionsSizeOffset];
- if (store_.length() > symbol_data_offset) {
- symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]);
- } else {
- // Partial preparse causes no symbol information.
- symbol_data_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
- }
- symbol_data_end_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
- }
-}
-
-
-int ScriptDataImpl::ReadNumber(byte** source) {
- // Reads a number from symbol_data_ in base 128. The most significant
- // bit marks that there are more digits.
- // If the first byte is 0x80 (kNumberTerminator), it would normally
- // represent a leading zero. Since that is useless, and therefore won't
- // appear as the first digit of any actual value, it is used to
- // mark the end of the input stream.
- byte* data = *source;
- if (data >= symbol_data_end_) return -1;
- byte input = *data;
- if (input == PreparseDataConstants::kNumberTerminator) {
- // End of stream marker.
- return -1;
- }
- int result = input & 0x7f;
- data++;
- while ((input & 0x80u) != 0) {
- if (data >= symbol_data_end_) return -1;
- input = *data;
- result = (result << 7) | (input & 0x7f);
- data++;
- }
- *source = data;
- return result;
-}
-
-
-// Create a Scanner for the preparser to use as input, and preparse the source.
-static ScriptDataImpl* DoPreParse(Utf16CharacterStream* source,
- int flags,
- ParserRecorder* recorder) {
- Isolate* isolate = Isolate::Current();
- HistogramTimerScope timer(isolate->counters()->pre_parse());
- Scanner scanner(isolate->unicode_cache());
- scanner.SetHarmonyScoping(FLAG_harmony_scoping);
- scanner.Initialize(source);
- intptr_t stack_limit = isolate->stack_guard()->real_climit();
- preparser::PreParser::PreParseResult result =
- preparser::PreParser::PreParseProgram(&scanner,
- recorder,
- flags,
- stack_limit);
- if (result == preparser::PreParser::kPreParseStackOverflow) {
- isolate->StackOverflow();
- return NULL;
- }
-
- // Extract the accumulated data from the recorder as a single
- // contiguous vector that we are responsible for disposing.
- Vector<unsigned> store = recorder->ExtractData();
- return new ScriptDataImpl(store);
-}
-
-
-ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
- v8::Extension* extension,
- int flags) {
- Handle<Script> no_script;
- if (FLAG_lazy && (extension == NULL)) {
- flags |= kAllowLazy;
- }
- CompleteParserRecorder recorder;
- return DoPreParse(source, flags, &recorder);
-}
-
-
-bool RegExpParser::ParseRegExp(FlatStringReader* input,
- bool multiline,
- RegExpCompileData* result,
- Zone* zone) {
- ASSERT(result != NULL);
- RegExpParser parser(input, &result->error, multiline, zone);
- RegExpTree* tree = parser.ParsePattern();
- if (parser.failed()) {
- ASSERT(tree == NULL);
- ASSERT(!result->error.is_null());
- } else {
- ASSERT(tree != NULL);
- ASSERT(result->error.is_null());
- result->tree = tree;
- int capture_count = parser.captures_started();
- result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
- result->contains_anchor = parser.contains_anchor();
- result->capture_count = capture_count;
- }
- return !parser.failed();
-}
-
-
-bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
- ASSERT(info->function() == NULL);
- FunctionLiteral* result = NULL;
- ASSERT((parsing_flags & kLanguageModeMask) == CLASSIC_MODE);
- if (!info->is_native() && FLAG_harmony_scoping) {
- // Harmony scoping is requested.
- parsing_flags |= EXTENDED_MODE;
- }
- if (!info->is_native() && FLAG_harmony_modules) {
- parsing_flags |= kAllowModules;
- }
- if (FLAG_allow_natives_syntax || info->is_native()) {
- // We require %identifier(..) syntax.
- parsing_flags |= kAllowNativesSyntax;
- }
- if (info->is_lazy()) {
- ASSERT(!info->is_eval());
- Parser parser(info, parsing_flags, NULL, NULL);
- if (info->shared_info()->is_function()) {
- result = parser.ParseLazy();
- } else {
- result = parser.ParseProgram();
- }
- } else {
- ScriptDataImpl* pre_data = info->pre_parse_data();
- Parser parser(info, parsing_flags, info->extension(), pre_data);
- if (pre_data != NULL && pre_data->has_error()) {
- Scanner::Location loc = pre_data->MessageLocation();
- const char* message = pre_data->BuildMessage();
- Vector<const char*> args = pre_data->BuildArgs();
- parser.ReportMessageAt(loc, message, args);
- DeleteArray(message);
- for (int i = 0; i < args.length(); i++) {
- DeleteArray(args[i]);
- }
- DeleteArray(args.start());
- ASSERT(info->isolate()->has_pending_exception());
- } else {
- result = parser.ParseProgram();
- }
- }
- info->SetFunction(result);
- return (result != NULL);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/parser.h b/src/3rdparty/v8/src/parser.h
deleted file mode 100644
index 0f85f91..0000000
--- a/src/3rdparty/v8/src/parser.h
+++ /dev/null
@@ -1,886 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PARSER_H_
-#define V8_PARSER_H_
-
-#include "allocation.h"
-#include "ast.h"
-#include "preparse-data-format.h"
-#include "preparse-data.h"
-#include "scopes.h"
-#include "preparser.h"
-
-namespace v8 {
-namespace internal {
-
-class CompilationInfo;
-class FuncNameInferrer;
-class ParserLog;
-class PositionStack;
-class Target;
-
-template <typename T> class ZoneListWrapper;
-
-
-class ParserMessage : public Malloced {
- public:
- ParserMessage(Scanner::Location loc, const char* message,
- Vector<const char*> args)
- : loc_(loc),
- message_(message),
- args_(args) { }
- ~ParserMessage();
- Scanner::Location location() { return loc_; }
- const char* message() { return message_; }
- Vector<const char*> args() { return args_; }
- private:
- Scanner::Location loc_;
- const char* message_;
- Vector<const char*> args_;
-};
-
-
-class FunctionEntry BASE_EMBEDDED {
- public:
- enum {
- kStartPositionIndex,
- kEndPositionIndex,
- kLiteralCountIndex,
- kPropertyCountIndex,
- kLanguageModeIndex,
- kSize
- };
-
- explicit FunctionEntry(Vector<unsigned> backing)
- : backing_(backing) { }
-
- FunctionEntry() : backing_() { }
-
- int start_pos() { return backing_[kStartPositionIndex]; }
- int end_pos() { return backing_[kEndPositionIndex]; }
- int literal_count() { return backing_[kLiteralCountIndex]; }
- int property_count() { return backing_[kPropertyCountIndex]; }
- LanguageMode language_mode() {
- ASSERT(backing_[kLanguageModeIndex] == CLASSIC_MODE ||
- backing_[kLanguageModeIndex] == STRICT_MODE ||
- backing_[kLanguageModeIndex] == EXTENDED_MODE);
- return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
- }
-
- bool is_valid() { return !backing_.is_empty(); }
-
- private:
- Vector<unsigned> backing_;
-};
-
-
-class ScriptDataImpl : public ScriptData {
- public:
- explicit ScriptDataImpl(Vector<unsigned> store)
- : store_(store),
- owns_store_(true) { }
-
- // Create an empty ScriptDataImpl that is guaranteed to not satisfy
- // a SanityCheck.
- ScriptDataImpl() : owns_store_(false) { }
-
- virtual ~ScriptDataImpl();
- virtual int Length();
- virtual const char* Data();
- virtual bool HasError();
-
- void Initialize();
- void ReadNextSymbolPosition();
-
- FunctionEntry GetFunctionEntry(int start);
- int GetSymbolIdentifier();
- bool SanityCheck();
-
- Scanner::Location MessageLocation();
- const char* BuildMessage();
- Vector<const char*> BuildArgs();
-
- int symbol_count() {
- return (store_.length() > PreparseDataConstants::kHeaderSize)
- ? store_[PreparseDataConstants::kSymbolCountOffset]
- : 0;
- }
- // The following functions should only be called if SanityCheck has
- // returned true.
- bool has_error() { return store_[PreparseDataConstants::kHasErrorOffset]; }
- unsigned magic() { return store_[PreparseDataConstants::kMagicOffset]; }
- unsigned version() { return store_[PreparseDataConstants::kVersionOffset]; }
-
- private:
- Vector<unsigned> store_;
- unsigned char* symbol_data_;
- unsigned char* symbol_data_end_;
- int function_index_;
- bool owns_store_;
-
- unsigned Read(int position);
- unsigned* ReadAddress(int position);
- // Reads a number from the current symbols
- int ReadNumber(byte** source);
-
- ScriptDataImpl(const char* backing_store, int length)
- : store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
- length / static_cast<int>(sizeof(unsigned))),
- owns_store_(false) {
- ASSERT_EQ(0, static_cast<int>(
- reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned)));
- }
-
- // Read strings written by ParserRecorder::WriteString.
- static const char* ReadString(unsigned* start, int* chars);
-
- friend class ScriptData;
-};
-
-
-class ParserApi {
- public:
- // Parses the source code represented by the compilation info and sets its
- // function literal. Returns false (and deallocates any allocated AST
- // nodes) if parsing failed.
- static bool Parse(CompilationInfo* info, int flags);
-
- // Generic preparser generating full preparse data.
- static ScriptDataImpl* PreParse(Utf16CharacterStream* source,
- v8::Extension* extension,
- int flags);
-};
-
-// ----------------------------------------------------------------------------
-// REGEXP PARSING
-
-// A BufferedZoneList is an automatically growing list, just like (and backed
-// by) a ZoneList, that is optimized for the case of adding and removing
-// a single element. The last element added is stored outside the backing list,
-// and if no more than one element is ever added, the ZoneList isn't even
-// allocated.
-// Elements must not be NULL pointers.
-template <typename T, int initial_size>
-class BufferedZoneList {
- public:
- BufferedZoneList() : list_(NULL), last_(NULL) {}
-
- // Adds element at end of list. This element is buffered and can
- // be read using last() or removed using RemoveLast until a new Add or until
- // RemoveLast or GetList has been called.
- void Add(T* value, Zone* zone) {
- if (last_ != NULL) {
- if (list_ == NULL) {
- list_ = new(zone) ZoneList<T*>(initial_size, zone);
- }
- list_->Add(last_, zone);
- }
- last_ = value;
- }
-
- T* last() {
- ASSERT(last_ != NULL);
- return last_;
- }
-
- T* RemoveLast() {
- ASSERT(last_ != NULL);
- T* result = last_;
- if ((list_ != NULL) && (list_->length() > 0))
- last_ = list_->RemoveLast();
- else
- last_ = NULL;
- return result;
- }
-
- T* Get(int i) {
- ASSERT((0 <= i) && (i < length()));
- if (list_ == NULL) {
- ASSERT_EQ(0, i);
- return last_;
- } else {
- if (i == list_->length()) {
- ASSERT(last_ != NULL);
- return last_;
- } else {
- return list_->at(i);
- }
- }
- }
-
- void Clear() {
- list_ = NULL;
- last_ = NULL;
- }
-
- int length() {
- int length = (list_ == NULL) ? 0 : list_->length();
- return length + ((last_ == NULL) ? 0 : 1);
- }
-
- ZoneList<T*>* GetList(Zone* zone) {
- if (list_ == NULL) {
- list_ = new(zone) ZoneList<T*>(initial_size, zone);
- }
- if (last_ != NULL) {
- list_->Add(last_, zone);
- last_ = NULL;
- }
- return list_;
- }
-
- private:
- ZoneList<T*>* list_;
- T* last_;
-};
-
-
-// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
-class RegExpBuilder: public ZoneObject {
- public:
- explicit RegExpBuilder(Zone* zone);
- void AddCharacter(uc16 character);
- // "Adds" an empty expression. Does nothing except consume a
- // following quantifier
- void AddEmpty();
- void AddAtom(RegExpTree* tree);
- void AddAssertion(RegExpTree* tree);
- void NewAlternative(); // '|'
- void AddQuantifierToAtom(int min, int max, RegExpQuantifier::Type type);
- RegExpTree* ToRegExp();
-
- private:
- void FlushCharacters();
- void FlushText();
- void FlushTerms();
- Zone* zone() const { return zone_; }
-
- Zone* zone_;
- bool pending_empty_;
- ZoneList<uc16>* characters_;
- BufferedZoneList<RegExpTree, 2> terms_;
- BufferedZoneList<RegExpTree, 2> text_;
- BufferedZoneList<RegExpTree, 2> alternatives_;
-#ifdef DEBUG
- enum {ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM} last_added_;
-#define LAST(x) last_added_ = x;
-#else
-#define LAST(x)
-#endif
-};
-
-
-class RegExpParser {
- public:
- RegExpParser(FlatStringReader* in,
- Handle<String>* error,
- bool multiline_mode,
- Zone* zone);
-
- static bool ParseRegExp(FlatStringReader* input,
- bool multiline,
- RegExpCompileData* result,
- Zone* zone);
-
- RegExpTree* ParsePattern();
- RegExpTree* ParseDisjunction();
- RegExpTree* ParseGroup();
- RegExpTree* ParseCharacterClass();
-
- // Parses a {...,...} quantifier and stores the range in the given
- // out parameters.
- bool ParseIntervalQuantifier(int* min_out, int* max_out);
-
- // Parses and returns a single escaped character. The character
- // must not be 'b' or 'B' since they are usually handle specially.
- uc32 ParseClassCharacterEscape();
-
- // Checks whether the following is a length-digit hexadecimal number,
- // and sets the value if it is.
- bool ParseHexEscape(int length, uc32* value);
-
- uc32 ParseOctalLiteral();
-
- // Tries to parse the input as a back reference. If successful it
- // stores the result in the output parameter and returns true. If
- // it fails it will push back the characters read so the same characters
- // can be reparsed.
- bool ParseBackReferenceIndex(int* index_out);
-
- CharacterRange ParseClassAtom(uc16* char_class);
- RegExpTree* ReportError(Vector<const char> message);
- void Advance();
- void Advance(int dist);
- void Reset(int pos);
-
- // Reports whether the pattern might be used as a literal search string.
- // Only use if the result of the parse is a single atom node.
- bool simple();
- bool contains_anchor() { return contains_anchor_; }
- void set_contains_anchor() { contains_anchor_ = true; }
- int captures_started() { return captures_ == NULL ? 0 : captures_->length(); }
- int position() { return next_pos_ - 1; }
- bool failed() { return failed_; }
-
- static const int kMaxCaptures = 1 << 16;
- static const uc32 kEndMarker = (1 << 21);
-
- private:
- enum SubexpressionType {
- INITIAL,
- CAPTURE, // All positive values represent captures.
- POSITIVE_LOOKAHEAD,
- NEGATIVE_LOOKAHEAD,
- GROUPING
- };
-
- class RegExpParserState : public ZoneObject {
- public:
- RegExpParserState(RegExpParserState* previous_state,
- SubexpressionType group_type,
- int disjunction_capture_index,
- Zone* zone)
- : previous_state_(previous_state),
- builder_(new(zone) RegExpBuilder(zone)),
- group_type_(group_type),
- disjunction_capture_index_(disjunction_capture_index) {}
- // Parser state of containing expression, if any.
- RegExpParserState* previous_state() { return previous_state_; }
- bool IsSubexpression() { return previous_state_ != NULL; }
- // RegExpBuilder building this regexp's AST.
- RegExpBuilder* builder() { return builder_; }
- // Type of regexp being parsed (parenthesized group or entire regexp).
- SubexpressionType group_type() { return group_type_; }
- // Index in captures array of first capture in this sub-expression, if any.
- // Also the capture index of this sub-expression itself, if group_type
- // is CAPTURE.
- int capture_index() { return disjunction_capture_index_; }
-
- private:
- // Linked list implementation of stack of states.
- RegExpParserState* previous_state_;
- // Builder for the stored disjunction.
- RegExpBuilder* builder_;
- // Stored disjunction type (capture, look-ahead or grouping), if any.
- SubexpressionType group_type_;
- // Stored disjunction's capture index (if any).
- int disjunction_capture_index_;
- };
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
-
- uc32 current() { return current_; }
- bool has_more() { return has_more_; }
- bool has_next() { return next_pos_ < in()->length(); }
- uc32 Next();
- FlatStringReader* in() { return in_; }
- void ScanForCaptures();
-
- Isolate* isolate_;
- Zone* zone_;
- Handle<String>* error_;
- ZoneList<RegExpCapture*>* captures_;
- FlatStringReader* in_;
- uc32 current_;
- int next_pos_;
- // The capture count is only valid after we have scanned for captures.
- int capture_count_;
- bool has_more_;
- bool multiline_;
- bool simple_;
- bool contains_anchor_;
- bool is_scanned_for_captures_;
- bool failed_;
-};
-
-// ----------------------------------------------------------------------------
-// JAVASCRIPT PARSING
-
-// Forward declaration.
-class SingletonLogger;
-
-class Parser {
- public:
- Parser(CompilationInfo* info,
- int parsing_flags, // Combination of ParsingFlags
- v8::Extension* extension,
- ScriptDataImpl* pre_data);
- virtual ~Parser() {
- delete reusable_preparser_;
- reusable_preparser_ = NULL;
- }
-
- // Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram();
- FunctionLiteral* ParseLazy();
-
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<Handle<String> > args);
-
- private:
- // Limit on number of function parameters is chosen arbitrarily.
- // Code::Flags uses only the low 17 bits of num-parameters to
- // construct a hashable id, so if more than 2^17 are allowed, this
- // should be checked.
- static const int kMaxNumFunctionParameters = 32766;
- static const int kMaxNumFunctionLocals = 131071; // 2^17-1
-
- enum Mode {
- PARSE_LAZILY,
- PARSE_EAGERLY
- };
-
- enum VariableDeclarationContext {
- kModuleElement,
- kBlockElement,
- kStatement,
- kForStatement
- };
-
- // If a list of variable declarations includes any initializers.
- enum VariableDeclarationProperties {
- kHasInitializers,
- kHasNoInitializers
- };
-
- class BlockState;
-
- class FunctionState BASE_EMBEDDED {
- public:
- FunctionState(Parser* parser,
- Scope* scope,
- Isolate* isolate);
- ~FunctionState();
-
- int NextMaterializedLiteralIndex() {
- return next_materialized_literal_index_++;
- }
- int materialized_literal_count() {
- return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
- }
-
- int NextHandlerIndex() { return next_handler_index_++; }
- int handler_count() { return next_handler_index_; }
-
- void SetThisPropertyAssignmentInfo(
- bool only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments) {
- only_simple_this_property_assignments_ =
- only_simple_this_property_assignments;
- this_property_assignments_ = this_property_assignments;
- }
- bool only_simple_this_property_assignments() {
- return only_simple_this_property_assignments_;
- }
- Handle<FixedArray> this_property_assignments() {
- return this_property_assignments_;
- }
-
- void AddProperty() { expected_property_count_++; }
- int expected_property_count() { return expected_property_count_; }
-
- AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
-
- private:
- // Used to assign an index to each literal that needs materialization in
- // the function. Includes regexp literals, and boilerplate for object and
- // array literals.
- int next_materialized_literal_index_;
-
- // Used to assign a per-function index to try and catch handlers.
- int next_handler_index_;
-
- // Properties count estimation.
- int expected_property_count_;
-
- // Keeps track of assignments to properties of this. Used for
- // optimizing constructors.
- bool only_simple_this_property_assignments_;
- Handle<FixedArray> this_property_assignments_;
-
- Parser* parser_;
- FunctionState* outer_function_state_;
- Scope* outer_scope_;
- int saved_ast_node_id_;
- AstNodeFactory<AstConstructionVisitor> factory_;
- };
-
- class ParsingModeScope BASE_EMBEDDED {
- public:
- ParsingModeScope(Parser* parser, Mode mode)
- : parser_(parser),
- old_mode_(parser->mode()) {
- parser_->mode_ = mode;
- }
- ~ParsingModeScope() {
- parser_->mode_ = old_mode_;
- }
-
- private:
- Parser* parser_;
- Mode old_mode_;
- };
-
- FunctionLiteral* ParseLazy(Utf16CharacterStream* source,
- ZoneScope* zone_scope);
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
- CompilationInfo* info() const { return info_; }
-
- // Called by ParseProgram after setting up the scanner.
- FunctionLiteral* DoParseProgram(CompilationInfo* info,
- Handle<String> source,
- ZoneScope* zone_scope);
-
- // Report syntax error
- void ReportUnexpectedToken(Token::Value token);
- void ReportInvalidPreparseData(Handle<String> name, bool* ok);
- void ReportMessage(const char* message, Vector<const char*> args);
- void ReportMessage(const char* message, Vector<Handle<String> > args);
-
- bool inside_with() const { return top_scope_->inside_with(); }
- Scanner& scanner() { return scanner_; }
- Mode mode() const { return mode_; }
- ScriptDataImpl* pre_data() const { return pre_data_; }
- bool is_extended_mode() {
- ASSERT(top_scope_ != NULL);
- return top_scope_->is_extended_mode();
- }
- Scope* DeclarationScope(VariableMode mode) {
- return IsLexicalVariableMode(mode)
- ? top_scope_ : top_scope_->DeclarationScope();
- }
-
- // Check if the given string is 'eval' or 'arguments'.
- bool IsEvalOrArguments(Handle<String> string);
-
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites.
- void* ParseSourceElements(ZoneList<Statement*>* processor, int end_token,
- bool is_eval, bool is_global, bool* ok);
- Statement* ParseModuleElement(ZoneStringList* labels, bool* ok);
- Statement* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
- Module* ParseModule(bool* ok);
- Module* ParseModuleLiteral(bool* ok);
- Module* ParseModulePath(bool* ok);
- Module* ParseModuleVariable(bool* ok);
- Module* ParseModuleUrl(bool* ok);
- Module* ParseModuleSpecifier(bool* ok);
- Block* ParseImportDeclaration(bool* ok);
- Statement* ParseExportDeclaration(bool* ok);
- Statement* ParseBlockElement(ZoneStringList* labels, bool* ok);
- Statement* ParseStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseFunctionDeclaration(ZoneStringList* names, bool* ok);
- Statement* ParseNativeDeclaration(bool* ok);
- Block* ParseBlock(ZoneStringList* labels, bool* ok);
- Block* ParseVariableStatement(VariableDeclarationContext var_context,
- ZoneStringList* names,
- bool* ok);
- Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
- VariableDeclarationProperties* decl_props,
- ZoneStringList* names,
- Handle<String>* out,
- bool* ok);
- Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
- bool* ok);
- IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseContinueStatement(bool* ok);
- Statement* ParseBreakStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseReturnStatement(bool* ok);
- Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
- CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
- SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
- DoWhileStatement* ParseDoWhileStatement(ZoneStringList* labels, bool* ok);
- WhileStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseForStatement(ZoneStringList* labels, bool* ok);
- Statement* ParseThrowStatement(bool* ok);
- Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
- TryStatement* ParseTryStatement(bool* ok);
- DebuggerStatement* ParseDebuggerStatement(bool* ok);
-
- // Support for hamony block scoped bindings.
- Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
-
- Expression* ParseExpression(bool accept_IN, bool* ok);
- Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression* ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression* ParseUnaryExpression(bool* ok);
- Expression* ParsePostfixExpression(bool* ok);
- Expression* ParseLeftHandSideExpression(bool* ok);
- Expression* ParseNewExpression(bool* ok);
- Expression* ParseMemberExpression(bool* ok);
- Expression* ParseNewPrefix(PositionStack* stack, bool* ok);
- Expression* ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok);
- Expression* ParsePrimaryExpression(bool* ok);
- Expression* ParseArrayLiteral(bool* ok);
- Expression* ParseObjectLiteral(bool* ok);
- ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
- Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
-
- // Populate the constant properties fixed array for a materialized object
- // literal.
- void BuildObjectLiteralConstantProperties(
- ZoneList<ObjectLiteral::Property*>* properties,
- Handle<FixedArray> constants,
- bool* is_simple,
- bool* fast_elements,
- int* depth);
-
- // Populate the literals fixed array for a materialized array literal.
- void BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* properties,
- Handle<FixedArray> constants,
- bool* is_simple,
- int* depth);
-
- // Decide if a property should be in the object boilerplate.
- bool IsBoilerplateProperty(ObjectLiteral::Property* property);
- // If the expression is a literal, return the literal value;
- // if the expression is a materialized literal and is simple return a
- // compile time value as encoded by CompileTimeValue::GetValue().
- // Otherwise, return undefined literal as the placeholder
- // in the object literal boilerplate.
- Handle<Object> GetBoilerplateValue(Expression* expression);
-
- ZoneList<Expression*>* ParseArguments(bool* ok);
- FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
- bool name_is_reserved,
- int function_token_position,
- FunctionLiteral::Type type,
- bool* ok);
-
-
- // Magical syntax support.
- Expression* ParseV8Intrinsic(bool* ok);
-
- INLINE(Token::Value peek()) {
- if (stack_overflow_) return Token::ILLEGAL;
- return scanner().peek();
- }
-
- INLINE(Token::Value Next()) {
- // BUG 1215673: Find a thread safe way to set a stack limit in
- // pre-parse mode. Otherwise, we cannot safely pre-parse from other
- // threads.
- if (stack_overflow_) {
- return Token::ILLEGAL;
- }
- if (StackLimitCheck(isolate()).HasOverflowed()) {
- // Any further calls to Next or peek will return the illegal token.
- // The current call must return the next token, which might already
- // have been peek'ed.
- stack_overflow_ = true;
- }
- return scanner().Next();
- }
-
- bool peek_any_identifier();
-
- INLINE(void Consume(Token::Value token));
- void Expect(Token::Value token, bool* ok);
- bool Check(Token::Value token);
- void ExpectSemicolon(bool* ok);
- void ExpectContextualKeyword(const char* keyword, bool* ok);
-
- Handle<String> LiteralString(PretenureFlag tenured) {
- if (scanner().is_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().literal_utf16_string(), tenured);
- }
- }
-
- Handle<String> NextLiteralString(PretenureFlag tenured) {
- if (scanner().is_next_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().next_literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().next_literal_utf16_string(), tenured);
- }
- }
-
- Handle<String> GetSymbol(bool* ok);
-
- // Get odd-ball literals.
- Literal* GetLiteralUndefined();
- Literal* GetLiteralTheHole();
-
- Handle<String> ParseIdentifier(bool* ok);
- Handle<String> ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok);
- Handle<String> ParseIdentifierName(bool* ok);
- Handle<String> ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
- // Determine if the expression is a variable proxy and mark it as being used
- // in an assignment or with a increment/decrement operator. This is currently
- // used on for the statically checking assignments to harmony const bindings.
- void MarkAsLValue(Expression* expression);
-
- // Strict mode validation of LValue expressions
- void CheckStrictModeLValue(Expression* expression,
- const char* error,
- bool* ok);
-
- // Strict mode octal literal validation.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
-
- // For harmony block scoping mode: Check if the scope has conflicting var/let
- // declarations from different scopes. It covers for example
- //
- // function f() { { { var x; } let x; } }
- // function g() { { var x; let x; } }
- //
- // The var declarations are hoisted to the function scope, but originate from
- // a scope where the name has also been let bound or the var declaration is
- // hoisted over such a scope.
- void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
-
- // Parser support
- VariableProxy* NewUnresolved(Handle<String> name,
- VariableMode mode,
- Interface* interface);
- void Declare(Declaration* declaration, bool resolve, bool* ok);
-
- bool TargetStackContainsLabel(Handle<String> label);
- BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
- IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
-
- void RegisterTargetUse(Label* target, Target* stop);
-
- // Factory methods.
-
- Scope* NewScope(Scope* parent, ScopeType type);
-
- Handle<String> LookupSymbol(int symbol_id);
-
- Handle<String> LookupCachedSymbol(int symbol_id);
-
- // Generate AST node that throw a ReferenceError with the given type.
- Expression* NewThrowReferenceError(Handle<String> type);
-
- // Generate AST node that throw a SyntaxError with the given
- // type. The first argument may be null (in the handle sense) in
- // which case no arguments are passed to the constructor.
- Expression* NewThrowSyntaxError(Handle<String> type, Handle<Object> first);
-
- // Generate AST node that throw a TypeError with the given
- // type. Both arguments must be non-null (in the handle sense).
- Expression* NewThrowTypeError(Handle<String> type,
- Handle<Object> first,
- Handle<Object> second);
-
- // Generic AST generator for throwing errors from compiled code.
- Expression* NewThrowError(Handle<String> constructor,
- Handle<String> type,
- Vector< Handle<Object> > arguments);
-
- preparser::PreParser::PreParseResult LazyParseFunctionLiteral(
- SingletonLogger* logger);
-
- AstNodeFactory<AstConstructionVisitor>* factory() {
- return current_function_state_->factory();
- }
-
- Isolate* isolate_;
- ZoneList<Handle<String> > symbol_cache_;
-
- Handle<Script> script_;
- Scanner scanner_;
- preparser::PreParser* reusable_preparser_;
- Scope* top_scope_;
- FunctionState* current_function_state_;
- Target* target_stack_; // for break, continue statements
- v8::Extension* extension_;
- ScriptDataImpl* pre_data_;
- FuncNameInferrer* fni_;
-
- Mode mode_;
- bool allow_natives_syntax_;
- bool allow_lazy_;
- bool allow_modules_;
- bool stack_overflow_;
- // If true, the next (and immediately following) function literal is
- // preceded by a parenthesis.
- // Heuristically that means that the function will be called immediately,
- // so never lazily compile it.
- bool parenthesized_function_;
-
- Zone* zone_;
- CompilationInfo* info_;
- friend class BlockState;
- friend class FunctionState;
-};
-
-
-// Support for handling complex values (array and object literals) that
-// can be fully handled at compile time.
-class CompileTimeValue: public AllStatic {
- public:
- enum Type {
- OBJECT_LITERAL_FAST_ELEMENTS,
- OBJECT_LITERAL_SLOW_ELEMENTS,
- ARRAY_LITERAL
- };
-
- static bool IsCompileTimeValue(Expression* expression);
-
- static bool ArrayLiteralElementNeedsInitialization(Expression* value);
-
- // Get the value as a compile time value.
- static Handle<FixedArray> GetValue(Expression* expression);
-
- // Get the type of a compile time value returned by GetValue().
- static Type GetType(Handle<FixedArray> value);
-
- // Get the elements array of a compile time value returned by GetValue().
- static Handle<FixedArray> GetElements(Handle<FixedArray> value);
-
- private:
- static const int kTypeSlot = 0;
- static const int kElementsSlot = 1;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PARSER_H_
diff --git a/src/3rdparty/v8/src/platform-cygwin.cc b/src/3rdparty/v8/src/platform-cygwin.cc
deleted file mode 100644
index f7e7d5e..0000000
--- a/src/3rdparty/v8/src/platform-cygwin.cc
+++ /dev/null
@@ -1,796 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Cygwin goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <errno.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <stdarg.h>
-#include <strings.h> // index
-#include <sys/time.h>
-#include <sys/mman.h> // mmap & munmap
-#include <unistd.h> // sysconf
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform-posix.h"
-#include "platform.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-#include "win32-headers.h"
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Nothing special about Cygwin.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
- *ptr = value;
-}
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return tzname[0]; // The location of the timezone string on Cygwin.
-}
-
-
-double OS::LocalTimeOffset() {
- // On Cygwin, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- ASSERT(utc != -1);
- struct tm* loc = localtime(&utc);
- ASSERT(loc != NULL);
- // time - localtime includes any daylight savings offset, so subtract it.
- return static_cast<double>((mktime(loc) - utc) * msPerSecond -
- (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::ProtectCode(void* address, const size_t size) {
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-
-void OS::Guard(void* address, const size_t size) {
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- // This function assumes that the layout of the file is as follows:
- // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
- // If we encounter an unexpected situation we abort scanning further entries.
- FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return;
-
- // Allocate enough room to be able to store a full file name.
- const int kLibNameLen = FILENAME_MAX + 1;
- char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
- i::Isolate* isolate = ISOLATE;
- // This loop will terminate once the scanning hits an EOF.
- while (true) {
- uintptr_t start, end;
- char attr_r, attr_w, attr_x, attr_p;
- // Parse the addresses and permission bits at the beginning of the line.
- if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
- if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
- int c;
- if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
- // Found a read-only executable entry. Skip characters until we reach
- // the beginning of the filename or the end of the line.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/'));
- if (c == EOF) break; // EOF: Was unexpected, just exit.
-
- // Process the filename if found.
- if (c == '/') {
- ungetc(c, fp); // Push the '/' back into the stream to be read below.
-
- // Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
- // Drop the newline character read by fgets. We do not need to check
- // for a zero-length string because we know that we at least read the
- // '/' character.
- lib_name[strlen(lib_name) - 1] = '\0';
- } else {
- // No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
- }
- LOG(isolate, SharedLibraryEvent(lib_name, start, end));
- } else {
- // Entry not describing executable data. Skip to end of line to set up
- // reading the next entry.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n'));
- if (c == EOF) break;
- }
- }
- free(lib_name);
- fclose(fp);
-}
-
-
-void OS::SignalCodeMovingGC() {
- // Nothing to do on Cygwin.
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // Not supported on Cygwin.
- return 0;
-}
-
-
-// The VirtualMemory implementation is taken from platform-win32.cc.
-// The mmap-based virtual memory implementation as it is used on most posix
-// platforms does not work well because Cygwin does not support MAP_FIXED.
-// This causes VirtualMemory::Commit to not always commit the memory region
-// specified.
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
- }
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- ASSERT(IsReserved());
- return VirtualFree(address, size, MEM_DECOMMIT) != false;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- if (NULL == VirtualAlloc(address,
- OS::CommitPageSize(),
- MEM_COMMIT,
- PAGE_READONLY | PAGE_GUARD)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-static inline Thread::LocalStorageKey PthreadKeyToLocalKey(
- pthread_key_t pthread_key) {
- // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
- // because pthread_key_t is a pointer type on Cygwin. This will probably not
- // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
- return static_cast<Thread::LocalStorageKey>(ptr_key);
-}
-
-
-static inline pthread_key_t LocalKeyToPthreadKey(
- Thread::LocalStorageKey local_key) {
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = static_cast<intptr_t>(local_key);
- return reinterpret_cast<pthread_key_t>(ptr_key);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return PthreadKeyToLocalKey(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class CygwinMutex : public Mutex {
- public:
- CygwinMutex() {
- pthread_mutexattr_t attrs;
- memset(&attrs, 0, sizeof(attrs));
-
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- }
-
- virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new CygwinMutex();
-}
-
-
-class CygwinSemaphore : public Semaphore {
- public:
- explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void CygwinSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool CygwinSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new CygwinSemaphore(count);
-}
-
-
-// ----------------------------------------------------------------------------
-// Cygwin profiler support.
-//
-// On Cygwin we use the same sampler implementation as on win32.
-
-class Sampler::PlatformData : public Malloced {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
- if (sample == NULL) sample = &sample_obj;
-
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
- sample->state = sampler->isolate()->current_vm_state();
-
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
-#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- ResumeThread(profiled_thread);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SamplerThread::SetUp();
-}
-
-
-void OS::TearDown() {
- SamplerThread::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
- return false;
-}
-
-
-void Sampler::DoSample() {
-}
-
-
-void Sampler::StartProfiling() {
-}
-
-
-void Sampler::StopProfiling() {
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-freebsd.cc b/src/3rdparty/v8/src/platform-freebsd.cc
deleted file mode 100644
index 1af928e..0000000
--- a/src/3rdparty/v8/src/platform-freebsd.cc
+++ /dev/null
@@ -1,918 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for FreeBSD goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <sys/ucontext.h>
-#include <stdlib.h>
-
-#include <sys/types.h> // mmap & munmap
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <sys/fcntl.h> // open
-#include <unistd.h> // getpagesize
-// If you don't have execinfo.h then you need devel/libexecinfo from ports.
-#include <execinfo.h> // backtrace, backtrace_symbols
-#include <strings.h> // index
-#include <errno.h>
-#include <stdarg.h>
-#include <limits.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-#include "v8threads.h"
-
-#include "platform-posix.h"
-#include "platform.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on FreeBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- // Correct as on OS X
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // FreeBSD runs on anything.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // 16 byte alignment on FreeBSD
- return 16;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return getpagesize();
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* buf, const size_t length) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(buf, length);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#else
- asm("int $3");
-#endif
-}
-
-
-void OS::DumpBacktrace() {
- void* trace[100];
- int size = backtrace(trace, ARRAY_SIZE(trace));
- char** symbols = backtrace_symbols(trace, size);
- fprintf(stderr, "\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- fprintf(stderr, "(empty)\n");
- } else if (symbols == NULL) {
- fprintf(stderr, "(no symbols)\n");
- } else {
- for (int i = 1; i < size; ++i) {
- fprintf(stderr, "%2d: ", i);
- char mangled[201];
- if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
- fprintf(stderr, "%s\n", mangled);
- } else {
- fprintf(stderr, "??\n");
- }
- }
- }
- fflush(stderr);
- free(symbols);
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- static const int MAP_LENGTH = 1024;
- int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return;
- while (true) {
- char addr_buffer[11];
- addr_buffer[0] = '0';
- addr_buffer[1] = 'x';
- addr_buffer[10] = 0;
- int result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned start = StringToLong(addr_buffer);
- result = read(fd, addr_buffer + 2, 1);
- if (result < 1) break;
- if (addr_buffer[2] != '-') break;
- result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned end = StringToLong(addr_buffer);
- char buffer[MAP_LENGTH];
- int bytes_read = -1;
- do {
- bytes_read++;
- if (bytes_read >= MAP_LENGTH - 1)
- break;
- result = read(fd, buffer + bytes_read, 1);
- if (result < 1) break;
- } while (buffer[bytes_read] != '\n');
- buffer[bytes_read] = 0;
- // Ignore mappings that are not executable.
- if (buffer[3] != 'x') continue;
- char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
- }
- close(fd);
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(base, size);
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class FreeBSDMutex : public Mutex {
- public:
- FreeBSDMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new FreeBSDMutex();
-}
-
-
-class FreeBSDSemaphore : public Semaphore {
- public:
- explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void FreeBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-bool FreeBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new FreeBSDSemaphore(count);
-}
-
-
-static pthread_t GetThreadID() {
- pthread_t thread_id = pthread_self();
- return thread_id;
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
-#elif V8_HOST_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class SignalSender : public Thread {
- public:
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Install a signal handler.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-
- // Start a thread that sends SIGPROF signal to VM threads.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
-
- // Restore the old signal handler.
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep() {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SignalSender::SetUp();
-}
-
-
-void OS::TearDown() {
- SignalSender::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
- return false;
-}
-
-
-void Sampler::DoSample() {
-}
-
-
-void Sampler::StartProfiling() {
-}
-
-
-void Sampler::StopProfiling() {
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-linux.cc b/src/3rdparty/v8/src/platform-linux.cc
deleted file mode 100644
index f571b99..0000000
--- a/src/3rdparty/v8/src/platform-linux.cc
+++ /dev/null
@@ -1,1393 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Linux goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/prctl.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <stdlib.h>
-
-#if defined(__GLIBC__)
-#include <execinfo.h>
-#include <cxxabi.h>
-#endif
-
-// Ubuntu Dapper requires memory pages to be marked as
-// executable. Otherwise, OS raises an exception when executing code
-// in that page.
-#include <sys/types.h> // mmap & munmap
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <fcntl.h> // open
-#include <unistd.h> // sysconf
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
-#include <execinfo.h> // backtrace, backtrace_symbols
-#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
-#include <strings.h> // index
-#include <errno.h>
-#include <stdarg.h>
-
-// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
-// Old versions of the C library <signal.h> didn't define the type.
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h>
-#endif
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform-posix.h"
-#include "platform.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on Linux since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Linux runs on anything.
-}
-
-
-#ifdef __arm__
-static bool CPUInfoContainsString(const char * search_string) {
- const char* file_name = "/proc/cpuinfo";
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r")))
- return false;
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- // Simple detection of VFP at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to ARM (mid 2009), no similar
- // facility is universally available on the ARM architectures,
- // so it's up to individual OSes to provide such.
- switch (feature) {
- case VFP2:
- search_string = "vfp";
- break;
- case VFP3:
- search_string = "vfpv3";
- break;
- case ARMv7:
- search_string = "ARMv7";
- break;
- case SUDIV:
- search_string = "idiva";
- break;
- case VFP32DREGS:
- // This case is handled specially below.
- break;
- default:
- UNREACHABLE();
- }
-
- if (feature == VFP32DREGS) {
- return ArmCpuHasFeature(VFP3) && !CPUInfoContainsString("d16");
- }
-
- if (CPUInfoContainsString(search_string)) {
- return true;
- }
-
- if (feature == VFP3) {
- // Some old kernels will report vfp not vfpv3. Here we make a last attempt
- // to detect vfpv3 by checking for vfp *and* neon, since neon is only
- // available on architectures with vfpv3.
- // Checking neon on its own is not enough as it is possible to have neon
- // without vfp.
- if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
- return true;
- }
- }
-
- return false;
-}
-
-
-CpuImplementer OS::GetCpuImplementer() {
- static bool use_cached_value = false;
- static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
- if (use_cached_value) {
- return cached_value;
- }
- if (CPUInfoContainsString("CPU implementer\t: 0x41")) {
- cached_value = ARM_IMPLEMENTER;
- } else if (CPUInfoContainsString("CPU implementer\t: 0x51")) {
- cached_value = QUALCOMM_IMPLEMENTER;
- } else {
- cached_value = UNKNOWN_IMPLEMENTER;
- }
- use_cached_value = true;
- return cached_value;
-}
-
-
-bool OS::ArmUsingHardFloat() {
- // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
- // the Floating Point ABI used (PCS stands for Procedure Call Standard).
- // We use these as well as a couple of other defines to statically determine
- // what FP ABI used.
- // GCC versions 4.4 and below don't support hard-fp.
- // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
- // __ARM_PCS_VFP.
-
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40600
-#if defined(__ARM_PCS_VFP)
- return true;
-#else
- return false;
-#endif
-
-#elif GCC_VERSION < 40500
- return false;
-
-#else
-#if defined(__ARM_PCS_VFP)
- return true;
-#elif defined(__ARM_PCS) || defined(__SOFTFP) || !defined(__VFP_FP__)
- return false;
-#else
-#error "Your version of GCC does not report the FP ABI compiled for." \
- "Please report it on this issue" \
- "http://code.google.com/p/v8/issues/detail?id=2140"
-
-#endif
-#endif
-#undef GCC_VERSION
-}
-
-#endif // def __arm__
-
-
-#ifdef __mips__
-bool OS::MipsCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- const char* file_name = "/proc/cpuinfo";
- // Simple detection of FPU at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to MIPS (early 2010), no similar
- // facility is universally available on the MIPS architectures,
- // so it's up to individual OSes to provide such.
- //
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
-
- switch (feature) {
- case FPU:
- search_string = "FPU";
- break;
- default:
- UNREACHABLE();
- }
-
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r")))
- return false;
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-#endif // def __mips__
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef V8_TARGET_ARCH_ARM
- // On EABI ARM targets this is required for fp correctness in the
- // runtime system.
- return 8;
-#elif V8_TARGET_ARCH_MIPS
- return 8;
-#endif
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
- (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
- // Only use on ARM or MIPS hardware.
- MemoryBarrier();
-#else
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
-#endif
- *ptr = value;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = OS::GetRandomMmapAddr();
- void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(i::Isolate::Current(),
- StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- if (FLAG_break_on_abort) {
- DebugBreak();
- }
- abort();
-}
-
-
-void OS::DebugBreak() {
-// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
-// which is the architecture of generated code).
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#elif defined(__mips__)
- asm("break");
-#else
- asm("int $3");
-#endif
-}
-
-
-void OS::DumpBacktrace() {
-#if defined(__GLIBC__)
- void* trace[100];
- int size = backtrace(trace, ARRAY_SIZE(trace));
- char** symbols = backtrace_symbols(trace, size);
- fprintf(stderr, "\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- fprintf(stderr, "(empty)\n");
- } else if (symbols == NULL) {
- fprintf(stderr, "(no symbols)\n");
- } else {
- for (int i = 1; i < size; ++i) {
- fprintf(stderr, "%2d: ", i);
- char mangled[201];
- if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
- int status;
- size_t length;
- char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
- fprintf(stderr, "%s\n", demangled ? demangled : mangled);
- free(demangled);
- } else {
- fprintf(stderr, "??\n");
- }
- }
- }
- fflush(stderr);
- free(symbols);
-#endif
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- // This function assumes that the layout of the file is as follows:
- // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
- // If we encounter an unexpected situation we abort scanning further entries.
- FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return;
-
- // Allocate enough room to be able to store a full file name.
- const int kLibNameLen = FILENAME_MAX + 1;
- char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
- i::Isolate* isolate = ISOLATE;
- // This loop will terminate once the scanning hits an EOF.
- while (true) {
- uintptr_t start, end;
- char attr_r, attr_w, attr_x, attr_p;
- // Parse the addresses and permission bits at the beginning of the line.
- if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
- if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
- int c;
- if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
- // Found a read-only executable entry. Skip characters until we reach
- // the beginning of the filename or the end of the line.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
- if (c == EOF) break; // EOF: Was unexpected, just exit.
-
- // Process the filename if found.
- if ((c == '/') || (c == '[')) {
- // Push the '/' or '[' back into the stream to be read below.
- ungetc(c, fp);
-
- // Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
- // Drop the newline character read by fgets. We do not need to check
- // for a zero-length string because we know that we at least read the
- // '/' or '[' character.
- lib_name[strlen(lib_name) - 1] = '\0';
- } else {
- // No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
- }
- LOG(isolate, SharedLibraryEvent(lib_name, start, end));
- } else {
- // Entry not describing executable data. Skip to end of line to set up
- // reading the next entry.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n'));
- if (c == EOF) break;
- }
- }
- free(lib_name);
- fclose(fp);
-}
-
-
-void OS::SignalCodeMovingGC() {
- // Support for ll_prof.py.
- //
- // The Linux profiler built into the kernel logs all mmap's with
- // PROT_EXEC so that analysis tools can properly attribute ticks. We
- // do a mmap with a name known by ll_prof.py and immediately munmap
- // it. This injects a GC marker into the stream of events generated
- // by the kernel and allows us to synchronize V8 code log and the
- // kernel log.
- int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
- void* addr = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_EXEC,
- MAP_PRIVATE,
- fileno(f),
- 0);
- ASSERT(addr != MAP_FAILED);
- OS::Free(addr, size);
- fclose(f);
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-#else // defined(__GLIBC__) && !defined(__UCLIBC__)
- return 0;
-#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(base, size);
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- return true;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
-#ifdef PR_SET_NAME
- prctl(PR_SET_NAME,
- reinterpret_cast<unsigned long>(thread->name()), // NOLINT
- 0, 0, 0);
-#endif
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- CHECK_EQ(0, result);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class LinuxMutex : public Mutex {
- public:
- LinuxMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new LinuxMutex();
-}
-
-
-class LinuxSemaphore : public Semaphore {
- public:
- explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void LinuxSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool LinuxSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result > 0) {
- // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
- errno = result;
- result = -1;
- }
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new LinuxSemaphore(count);
-}
-
-
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-// Not all versions of Android's C library provide ucontext_t.
-// Detect this and provide custom but compatible definitions. Note that these
-// follow the GLibc naming convention to access register values from
-// mcontext_t.
-//
-// See http://code.google.com/p/android/issues/detail?id=34784
-
-#if defined(__arm__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__mips__)
-// MIPS version of sigcontext, for Android bionic.
-typedef struct {
- uint32_t regmask;
- uint32_t status;
- uint64_t pc;
- uint64_t gregs[32];
- uint64_t fpregs[32];
- uint32_t acx;
- uint32_t fpc_csr;
- uint32_t fpc_eir;
- uint32_t used_math;
- uint32_t dsp;
- uint64_t mdhi;
- uint64_t mdlo;
- uint32_t hi1;
- uint32_t lo1;
- uint32_t hi2;
- uint32_t lo2;
- uint32_t hi3;
- uint32_t lo3;
-} mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__i386__)
-// x86 version for Android.
-typedef struct {
- uint32_t gregs[19];
- void* fpregs;
- uint32_t oldmask;
- uint32_t cr2;
-} mcontext_t;
-
-typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
-#endif
-
-#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-static int GetThreadID() {
-#if defined(__ANDROID__)
- // Android's C library provides gettid(2).
- return gettid();
-#else
- // Glibc doesn't provide a wrapper for gettid(2).
- return syscall(SYS_gettid);
-#endif
-}
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
-#elif V8_HOST_ARCH_ARM
-#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
- (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
- // Old GLibc ARM versions used a gregs[] array to access the register
- // values from mcontext_t.
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
-#else
- sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
- sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
- sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
- // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-#elif V8_HOST_ARCH_MIPS
- sample->pc = reinterpret_cast<Address>(mcontext.pc);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#endif // V8_HOST_ARCH_*
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class CpuProfilerSignalHandler {
- public:
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static bool RegisterProfilingSampler() {
- ScopedLock lock(mutex_);
- if (!profiling_samplers_count_) InstallSignalHandler();
- ++profiling_samplers_count_;
- return signal_handler_installed_;
- }
-
- static void UnregisterProfilingSampler() {
- ScopedLock lock(mutex_);
- ASSERT(profiling_samplers_count_ > 0);
- if (!profiling_samplers_count_) return;
- if (profiling_samplers_count_ == 1) RestoreSignalHandler();
- --profiling_samplers_count_;
- }
-
- private:
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static int profiling_samplers_count_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-};
-
-
-Mutex* CpuProfilerSignalHandler::mutex_ = NULL;
-int CpuProfilerSignalHandler::profiling_samplers_count_ = 0;
-bool CpuProfilerSignalHandler::signal_handler_installed_ = false;
-struct sigaction CpuProfilerSignalHandler::old_signal_handler_;
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData()
- : vm_tgid_(getpid()),
- vm_tid_(GetThreadID()),
- signal_handler_installed_(false) {}
-
- void set_signal_handler_installed(bool value) {
- signal_handler_installed_ = value;
- }
-
- void SendProfilingSignal() {
- if (!signal_handler_installed_) return;
- // Glibc doesn't provide a wrapper for tgkill(2).
-#if defined(ANDROID)
- syscall(__NR_tgkill, vm_tgid_, vm_tid_, SIGPROF);
-#else
- int result = syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
- USE(result);
- ASSERT(result == 0);
-#endif
- }
-
- private:
- const int vm_tgid_;
- const int vm_tid_;
- bool signal_handler_installed_;
-};
-
-
-class SignalSender : public Thread {
- public:
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- sampler->DoSample();
- }
-
- void Sleep() {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
-#if defined(ANDROID)
- usleep(interval);
-#else
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif // DEBUG
- USE(result);
-#endif // ANDROID
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-
-#ifdef __arm__
- // When running on ARM hardware check that the EABI used by V8 and
- // by the C code is the same.
- bool hard_float = OS::ArmUsingHardFloat();
- if (hard_float) {
-#if !USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- } else {
-#if USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- }
-#endif
- SignalSender::SetUp();
- CpuProfilerSignalHandler::SetUp();
-}
-
-
-void OS::TearDown() {
- CpuProfilerSignalHandler::TearDown();
- SignalSender::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- has_processing_thread_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
- return true;
-}
-
-
-void Sampler::DoSample() {
- platform_data()->SendProfilingSignal();
-}
-
-
-void Sampler::StartProfiling() {
- platform_data()->set_signal_handler_installed(
- CpuProfilerSignalHandler::RegisterProfilingSampler());
-}
-
-
-void Sampler::StopProfiling() {
- CpuProfilerSignalHandler::UnregisterProfilingSampler();
- platform_data()->set_signal_handler_installed(false);
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-macos.cc b/src/3rdparty/v8/src/platform-macos.cc
deleted file mode 100644
index 7913981..0000000
--- a/src/3rdparty/v8/src/platform-macos.cc
+++ /dev/null
@@ -1,942 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for MacOS goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <dlfcn.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <mach/mach_init.h>
-#include <mach-o/dyld.h>
-#include <mach-o/getsect.h>
-
-#include <AvailabilityMacros.h>
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <libkern/OSAtomic.h>
-#include <mach/mach.h>
-#include <mach/semaphore.h>
-#include <mach/task.h>
-#include <mach/vm_statistics.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform-posix.h"
-#include "platform.h"
-#include "vm-state-inl.h"
-
-// Manually define these here as weak imports, rather than including execinfo.h.
-// This lets us launch on 10.4 which does not have these calls.
-extern "C" {
- extern int backtrace(void**, int) __attribute__((weak_import));
- extern char** backtrace_symbols(void* const*, int)
- __attribute__((weak_import));
- extern void backtrace_symbols_fd(void* const*, int, int)
- __attribute__((weak_import));
-}
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on MacOSX since a pthread_t is
-// a pointer.
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- // Correct Mac OS X Leopard 'ceil' behavior.
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return getpagesize();
-}
-
-
-// Constants used for mmap.
-// kMmapFd is used to pass vm_alloc flags to tag the region with the user
-// defined tag 255 This helps identify V8-allocated regions in memory analysis
-// tools like vmmap(1).
-static const int kMmapFd = VM_MAKE_TAG(255);
-static const off_t kMmapFdOffset = 0;
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(OS::GetRandomMmapAddr(),
- msize,
- prot,
- MAP_PRIVATE | MAP_ANON,
- kMmapFd,
- kMmapFdOffset);
- if (mbase == MAP_FAILED) {
- LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- usleep(1000 * milliseconds);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- unsigned int images_count = _dyld_image_count();
- for (unsigned int i = 0; i < images_count; ++i) {
- const mach_header* header = _dyld_get_image_header(i);
- if (header == NULL) continue;
-#if V8_HOST_ARCH_X64
- uint64_t size;
- char* code_ptr = getsectdatafromheader_64(
- reinterpret_cast<const mach_header_64*>(header),
- SEG_TEXT,
- SECT_TEXT,
- &size);
-#else
- unsigned int size;
- char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
-#endif
- if (code_ptr == NULL) continue;
- const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
- const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- LOG(Isolate::Current(),
- SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
- }
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- // MacOSX requires all these to install so we can assume they are present.
- // These constants are defined by the CPUid instructions.
- const uint64_t one = 1;
- return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
-}
-
-
-int OS::ActivationFrameAlignment() {
- // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
- // Function Call Guide".
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- OSMemoryBarrier();
- *ptr = value;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-int OS::StackWalk(Vector<StackFrame> frames) {
- // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL)
- return 0;
-
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text,
- kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-bool VirtualMemory::CommitRegion(void* address,
- size_t size,
- bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, size);
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
- return mmap(address,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void SetThreadName(const char* name) {
- // pthread_setname_np is only available in 10.6 or later, so test
- // for it at runtime.
- int (*dynamic_pthread_setname_np)(const char*);
- *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
- dlsym(RTLD_DEFAULT, "pthread_setname_np");
- if (!dynamic_pthread_setname_np)
- return;
-
- // Mac OS X does not expose the length limit of the name, so hardcode it.
- static const int kMaxNameLength = 63;
- USE(kMaxNameLength);
- ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
- dynamic_pthread_setname_np(name);
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- SetThreadName(thread->name());
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-#ifdef V8_FAST_TLS_SUPPORTED
-
-static Atomic32 tls_base_offset_initialized = 0;
-intptr_t kMacTlsBaseOffset = 0;
-
-// It's safe to do the initialization more that once, but it has to be
-// done at least once.
-static void InitializeTlsBaseOffset() {
- const size_t kBufferSize = 128;
- char buffer[kBufferSize];
- size_t buffer_size = kBufferSize;
- int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
- if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
- }
- // The buffer now contains a string of the form XX.YY.ZZ, where
- // XX is the major kernel version component.
- // Make sure the buffer is 0-terminated.
- buffer[kBufferSize - 1] = '\0';
- char* period_pos = strchr(buffer, '.');
- *period_pos = '\0';
- int kernel_version_major =
- static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
- // The constants below are taken from pthreads.s from the XNU kernel
- // sources archive at www.opensource.apple.com.
- if (kernel_version_major < 11) {
- // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
- // same offsets.
-#if defined(V8_HOST_ARCH_IA32)
- kMacTlsBaseOffset = 0x48;
-#else
- kMacTlsBaseOffset = 0x60;
-#endif
- } else {
- // 11.x.x (Lion) changed the offset.
- kMacTlsBaseOffset = 0;
- }
-
- Release_Store(&tls_base_offset_initialized, 1);
-}
-
-static void CheckFastTls(Thread::LocalStorageKey key) {
- void* expected = reinterpret_cast<void*>(0x1234CAFE);
- Thread::SetThreadLocal(key, expected);
- void* actual = Thread::GetExistingThreadLocal(key);
- if (expected != actual) {
- V8_Fatal(__FILE__, __LINE__,
- "V8 failed to initialize fast TLS on current kernel");
- }
- Thread::SetThreadLocal(key, NULL);
-}
-
-#endif // V8_FAST_TLS_SUPPORTED
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-#ifdef V8_FAST_TLS_SUPPORTED
- bool check_fast_tls = false;
- if (tls_base_offset_initialized == 0) {
- check_fast_tls = true;
- InitializeTlsBaseOffset();
- }
-#endif
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
-#ifdef V8_FAST_TLS_SUPPORTED
- // If we just initialized fast TLS support, make sure it works.
- if (check_fast_tls) CheckFastTls(typed_key);
-#endif
- return typed_key;
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class MacOSMutex : public Mutex {
- public:
- MacOSMutex() {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mutex_, &attr);
- }
-
- virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() { return pthread_mutex_lock(&mutex_); }
- virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
- return new MacOSMutex();
-}
-
-
-class MacOSSemaphore : public Semaphore {
- public:
- explicit MacOSSemaphore(int count) {
- int r;
- r = semaphore_create(mach_task_self(),
- &semaphore_,
- SYNC_POLICY_FIFO,
- count);
- ASSERT(r == KERN_SUCCESS);
- }
-
- ~MacOSSemaphore() {
- int r;
- r = semaphore_destroy(mach_task_self(), semaphore_);
- ASSERT(r == KERN_SUCCESS);
- }
-
- void Wait() {
- int r;
- do {
- r = semaphore_wait(semaphore_);
- ASSERT(r == KERN_SUCCESS || r == KERN_ABORTED);
- } while (r == KERN_ABORTED);
- }
-
- bool Wait(int timeout);
-
- void Signal() { semaphore_signal(semaphore_); }
-
- private:
- semaphore_t semaphore_;
-};
-
-
-bool MacOSSemaphore::Wait(int timeout) {
- mach_timespec_t ts;
- ts.tv_sec = timeout / 1000000;
- ts.tv_nsec = (timeout % 1000000) * 1000;
- return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new MacOSSemaphore(count);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : profiled_thread_(mach_thread_self()) {}
-
- ~PlatformData() {
- // Deallocate Mach port for thread.
- mach_port_deallocate(mach_task_self(), profiled_thread_);
- }
-
- thread_act_t profiled_thread() { return profiled_thread_; }
-
- private:
- // Note: for profiled_thread_ Mach primitives are used instead of PThread's
- // because the latter doesn't provide thread manipulation primitives required.
- // For details, consult "Mac OS X Internals" book, Section 7.3.
- thread_act_t profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- void SampleContext(Sampler* sampler) {
- thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
- if (sample == NULL) sample = &sample_obj;
-
- if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
-
-#if V8_HOST_ARCH_X64
- thread_state_flavor_t flavor = x86_THREAD_STATE64;
- x86_thread_state64_t state;
- mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __r ## name
-#else
-#define REGISTER_FIELD(name) r ## name
-#endif // __DARWIN_UNIX03
-#elif V8_HOST_ARCH_IA32
- thread_state_flavor_t flavor = i386_THREAD_STATE;
- i386_thread_state_t state;
- mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __e ## name
-#else
-#define REGISTER_FIELD(name) e ## name
-#endif // __DARWIN_UNIX03
-#else
-#error Unsupported Mac OS X host architecture.
-#endif // V8_HOST_ARCH
-
- if (thread_get_state(profiled_thread,
- flavor,
- reinterpret_cast<natural_t*>(&state),
- &count) == KERN_SUCCESS) {
- sample->state = sampler->isolate()->current_vm_state();
- sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
- sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
- sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- thread_resume(profiled_thread);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-#undef REGISTER_FIELD
-
-
-Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SamplerThread::SetUp();
-}
-
-
-void OS::TearDown() {
- SamplerThread::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
- return false;
-}
-
-
-void Sampler::DoSample() {
-}
-
-
-void Sampler::StartProfiling() {
-}
-
-
-void Sampler::StopProfiling() {
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-nullos.cc b/src/3rdparty/v8/src/platform-nullos.cc
deleted file mode 100644
index 20d8801..0000000
--- a/src/3rdparty/v8/src/platform-nullos.cc
+++ /dev/null
@@ -1,549 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for NULLOS goes here
-
-// Minimal include to get access to abort, fprintf and friends for bootstrapping
-// messages.
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Give V8 the opportunity to override the default ceil behaviour.
-double ceiling(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Give V8 the opportunity to override the default fmod behavior.
-double modulo(double x, double y) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_sin(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_cos(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_tan(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_log(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Initialize OS class early in the V8 startup.
-void OS::SetUp() {
- // Seed the random number generator.
- UNIMPLEMENTED();
-}
-
-
-void OS::PostSetUp() {
- UNIMPLEMENTED();
-}
-
-
-void OS::TearDown() {
- UNIMPLEMENTED();
-}
-
-
-// Returns the accumulated user time for thread.
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- UNIMPLEMENTED();
- *secs = 0;
- *usecs = 0;
- return 0;
-}
-
-
-// Returns current time as the number of milliseconds since
-// 00:00:00 UTC, January 1, 1970.
-double OS::TimeCurrentMillis() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns ticks in microsecond resolution.
-int64_t OS::Ticks() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns a string identifying the current timezone taking into
-// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- UNIMPLEMENTED();
- return "<none>";
-}
-
-
-// Returns the daylight savings offset in milliseconds for the given time.
-double OS::DaylightSavingsOffset(double time) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::GetLastError() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns the local time offset in milliseconds east of UTC without
-// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Print (debug) message to console.
-void OS::Print(const char* format, ...) {
- UNIMPLEMENTED();
-}
-
-
-// Print (debug) message to console.
-void OS::VPrint(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stdout, format, args);
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
- vfprintf(out, format, args);
-}
-
-
-// Print error message to console.
-void OS::PrintError(const char* format, ...) {
- // Minimalistic implementation for bootstrapping.
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-// Print error message to console.
-void OS::VPrintError(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stderr, format, args);
-}
-
-
-int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0;
-}
-
-
-double OS::nan_value() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-CpuImplementer OS::GetCpuImplementer() {
- UNIMPLEMENTED();
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- UNIMPLEMENTED();
-}
-
-
-bool OS::ArmUsingHardFloat() {
- UNIMPLEMENTED();
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-size_t OS::AllocateAlignment() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::Free(void* buf, const size_t length) {
- // TODO(1240712): potential system call return value which is ignored here.
- UNIMPLEMENTED();
-}
-
-
-void OS::Guard(void* address, const size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Sleep(int milliseconds) {
- UNIMPLEMENTED();
-}
-
-
-int OS::NumberOfCores() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-void OS::Abort() {
- // Minimalistic implementation for bootstrapping.
- abort();
-}
-
-
-void OS::DebugBreak() {
- UNIMPLEMENTED();
-}
-
-
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- UNIMPLEMENTED();
-}
-
-
-void OS::SignalCodeMovingGC() {
- UNIMPLEMENTED();
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
- UNIMPLEMENTED();
-}
-
-
-VirtualMemory::~VirtualMemory() {
- UNIMPLEMENTED();
-}
-
-
-bool VirtualMemory::IsReserved() {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() {
- UNIMPLEMENTED();
- }
-
- void* pd_data_;
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size) {
- set_name(options.name);
- UNIMPLEMENTED();
-}
-
-
-Thread::Thread(const char* name)
- : data_(new PlatformData()),
- stack_size_(0) {
- set_name(name);
- UNIMPLEMENTED();
-}
-
-
-Thread::~Thread() {
- delete data_;
- UNIMPLEMENTED();
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- UNIMPLEMENTED();
-}
-
-
-void Thread::Join() {
- UNIMPLEMENTED();
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- UNIMPLEMENTED();
- return static_cast<LocalStorageKey>(0);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- UNIMPLEMENTED();
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- UNIMPLEMENTED();
-}
-
-
-void Thread::YieldCPU() {
- UNIMPLEMENTED();
-}
-
-
-class NullMutex : public Mutex {
- public:
- NullMutex() : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullMutex() {
- UNIMPLEMENTED();
- }
-
- virtual int Lock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- virtual int Unlock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- void* data_;
-};
-
-
-Mutex* OS::CreateMutex() {
- UNIMPLEMENTED();
- return new NullMutex();
-}
-
-
-class NullSemaphore : public Semaphore {
- public:
- explicit NullSemaphore(int count) : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullSemaphore() {
- UNIMPLEMENTED();
- }
-
- virtual void Wait() {
- UNIMPLEMENTED();
- }
-
- virtual void Signal() {
- UNIMPLEMENTED();
- }
- private:
- void* data_;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- UNIMPLEMENTED();
- return new NullSemaphore(count);
-}
-
-
-class ProfileSampler::PlatformData : public Malloced {
- public:
- PlatformData() {
- UNIMPLEMENTED();
- }
-};
-
-
-ProfileSampler::ProfileSampler(int interval) {
- UNIMPLEMENTED();
- // Shared setup follows.
- data_ = new PlatformData();
- interval_ = interval;
- active_ = false;
-}
-
-
-ProfileSampler::~ProfileSampler() {
- UNIMPLEMENTED();
- // Shared tear down follows.
- delete data_;
-}
-
-
-void ProfileSampler::Start() {
- UNIMPLEMENTED();
-}
-
-
-void ProfileSampler::Stop() {
- UNIMPLEMENTED();
-}
-
-
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
- UNIMPLEMENTED();
- return false;
-}
-
-
-void Sampler::DoSample() {
- UNIMPLEMENTED();
-}
-
-
-void Sampler::StartProfiling() {
- UNIMPLEMENTED();
-}
-
-
-void Sampler::StopProfiling() {
- UNIMPLEMENTED();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-openbsd.cc b/src/3rdparty/v8/src/platform-openbsd.cc
deleted file mode 100644
index ccccedc..0000000
--- a/src/3rdparty/v8/src/platform-openbsd.cc
+++ /dev/null
@@ -1,975 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
-// comaptible parts the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <stdlib.h>
-
-#include <sys/types.h> // mmap & munmap
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <fcntl.h> // open
-#include <unistd.h> // sysconf
-#include <execinfo.h> // backtrace, backtrace_symbols
-#include <strings.h> // index
-#include <errno.h>
-#include <stdarg.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform-posix.h"
-#include "platform.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-static void* GetRandomMmapAddr() {
- Isolate* isolate = Isolate::UncheckedCurrent();
- // Note that the current isolate isn't set up in a call path via
- // CpuFeatures::Probe. We don't care about randomization in this case because
- // the code page is immediately freed.
- if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc).
- raw_addr &= 0x3ffff000;
- raw_addr += 0x20000000;
-#endif
- return reinterpret_cast<void*>(raw_addr);
- }
- return NULL;
-}
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0;
-}
-
-
-int OS::ActivationFrameAlignment() {
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
- *ptr = value;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = GetRandomMmapAddr();
- void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(i::Isolate::Current(),
- StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- // This function assumes that the layout of the file is as follows:
- // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
- // If we encounter an unexpected situation we abort scanning further entries.
- FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return;
-
- // Allocate enough room to be able to store a full file name.
- const int kLibNameLen = FILENAME_MAX + 1;
- char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
- i::Isolate* isolate = ISOLATE;
- // This loop will terminate once the scanning hits an EOF.
- while (true) {
- uintptr_t start, end;
- char attr_r, attr_w, attr_x, attr_p;
- // Parse the addresses and permission bits at the beginning of the line.
- if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
- if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
- int c;
- if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
- // Found a read-only executable entry. Skip characters until we reach
- // the beginning of the filename or the end of the line.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/'));
- if (c == EOF) break; // EOF: Was unexpected, just exit.
-
- // Process the filename if found.
- if (c == '/') {
- ungetc(c, fp); // Push the '/' back into the stream to be read below.
-
- // Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
- // Drop the newline character read by fgets. We do not need to check
- // for a zero-length string because we know that we at least read the
- // '/' character.
- lib_name[strlen(lib_name) - 1] = '\0';
- } else {
- // No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
- }
- LOG(isolate, SharedLibraryEvent(lib_name, start, end));
- } else {
- // Entry not describing executable data. Skip to end of line to set up
- // reading the next entry.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n'));
- if (c == EOF) break;
- }
- }
- free(lib_name);
- fclose(fp);
-}
-
-
-void OS::SignalCodeMovingGC() {
- // Support for ll_prof.py.
- //
- // The Linux profiler built into the kernel logs all mmap's with
- // PROT_EXEC so that analysis tools can properly attribute ticks. We
- // do a mmap with a name known by ll_prof.py and immediately munmap
- // it. This injects a GC marker into the stream of events generated
- // by the kernel and allows us to synchronize V8 code log and the
- // kernel log.
- int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
- void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
- fileno(f), 0);
- ASSERT(addr != MAP_FAILED);
- OS::Free(addr, size);
- fclose(f);
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(base, size);
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
-#ifdef PR_SET_NAME
- prctl(PR_SET_NAME,
- reinterpret_cast<unsigned long>(thread->name()), // NOLINT
- 0, 0, 0);
-#endif
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class OpenBSDMutex : public Mutex {
- public:
- OpenBSDMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new OpenBSDMutex();
-}
-
-
-class OpenBSDSemaphore : public Semaphore {
- public:
- explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void OpenBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool OpenBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
-
- int to = ts.tv_sec;
-
- while (true) {
- int result = sem_trywait(&sem_);
- if (result == 0) return true; // Successfully got semaphore.
- if (!to) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- usleep(ts.tv_nsec / 1000);
- to--;
- }
-}
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new OpenBSDSemaphore(count);
-}
-
-
-static pthread_t GetThreadID() {
- return pthread_self();
-}
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- sample->state = isolate->current_vm_state();
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#ifdef __NetBSD__
- mcontext_t& mcontext = ucontext->uc_mcontext;
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
-#endif // V8_HOST_ARCH
-#else // OpenBSD
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#endif // V8_HOST_ARCH
-#endif // __NetBSD__
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- vm_tgid_(getpid()),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- RestoreSignalHandler();
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- if (!signal_handler_installed_) InstallSignalHandler();
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (signal_handler_installed_) RestoreSignalHandler();
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep() {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int vm_tgid_;
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SignalSender::SetUp();
-}
-
-
-void OS::TearDown() {
- SignalSender::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
- return false;
-}
-
-
-void Sampler::DoSample() {
-}
-
-
-void Sampler::StartProfiling() {
-}
-
-
-void Sampler::StopProfiling() {
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-posix.cc b/src/3rdparty/v8/src/platform-posix.cc
deleted file mode 100644
index 0016d59..0000000
--- a/src/3rdparty/v8/src/platform-posix.cc
+++ /dev/null
@@ -1,559 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for POSIX goes here. This is not a platform on its
-// own but contains the parts which are the same across POSIX platforms Linux,
-// Mac OS, FreeBSD and OpenBSD.
-
-#include "platform-posix.h"
-
-#include <unistd.h>
-#include <errno.h>
-#include <time.h>
-
-#include <sys/mman.h>
-#include <sys/socket.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <netdb.h>
-
-#undef MAP_TYPE
-
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-#define LOG_TAG "v8"
-#include <android/log.h>
-#endif
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Maximum size of the virtual memory. 0 means there is no artificial
-// limit.
-
-intptr_t OS::MaxVirtualMemory() {
- struct rlimit limit;
- int result = getrlimit(RLIMIT_DATA, &limit);
- if (result != 0) return 0;
- return limit.rlim_cur;
-}
-
-
-intptr_t OS::CommitPageSize() {
- static intptr_t page_size = getpagesize();
- return page_size;
-}
-
-
-#ifndef __CYGWIN__
-// Get rid of writable permission on code allocations.
-void OS::ProtectCode(void* address, const size_t size) {
- mprotect(address, size, PROT_READ | PROT_EXEC);
-}
-
-
-// Create guard pages.
-void OS::Guard(void* address, const size_t size) {
- mprotect(address, size, PROT_NONE);
-}
-#endif // __CYGWIN__
-
-
-void* OS::GetRandomMmapAddr() {
- Isolate* isolate = Isolate::UncheckedCurrent();
- // Note that the current isolate isn't set up in a call path via
- // CpuFeatures::Probe. We don't care about randomization in this case because
- // the code page is immediately freed.
- if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
- // 10.6 and 10.7.
- raw_addr &= 0x3ffff000;
- raw_addr += 0x20000000;
-#endif
- return reinterpret_cast<void*>(raw_addr);
- }
- return NULL;
-}
-
-
-// ----------------------------------------------------------------------------
-// Math functions
-
-double modulo(double x, double y) {
- return fmod(x, y);
-}
-
-
-#define UNARY_MATH_FUNCTION(name, generator) \
-static UnaryMathFunction fast_##name##_function = NULL; \
-void init_fast_##name##_function() { \
- fast_##name##_function = generator; \
-} \
-double fast_##name(double x) { \
- return (*fast_##name##_function)(x); \
-}
-
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
-UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
-UNARY_MATH_FUNCTION(exp, CreateExpFunction())
-UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-
-#undef MATH_FUNCTION
-
-
-void lazily_initialize_fast_exp() {
- if (fast_exp_function == NULL) {
- init_fast_exp_function();
- }
-}
-
-
-double OS::nan_value() {
- // NAN from math.h is defined in C99 and not in POSIX.
- return NAN;
-}
-
-
-int OS::GetCurrentProcessId() {
- return static_cast<int>(getpid());
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX date/time support.
-//
-
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- struct rusage usage;
-
- if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
- *secs = usage.ru_utime.tv_sec;
- *usecs = usage.ru_utime.tv_usec;
- return 0;
-}
-
-
-double OS::TimeCurrentMillis() {
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0) return 0.0;
- return (static_cast<double>(tv.tv_sec) * 1000) +
- (static_cast<double>(tv.tv_usec) / 1000);
-}
-
-
-int64_t OS::Ticks() {
- // gettimeofday has microsecond resolution.
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0)
- return 0;
- return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
-}
-
-
-double OS::DaylightSavingsOffset(double time) {
- if (isnan(time)) return nan_value();
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return nan_value();
- return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
-}
-
-
-int OS::GetLastError() {
- return errno;
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX stdio support.
-//
-
-FILE* OS::FOpen(const char* path, const char* mode) {
- FILE* file = fopen(path, mode);
- if (file == NULL) return NULL;
- struct stat file_stat;
- if (fstat(fileno(file), &file_stat) != 0) return NULL;
- bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
- if (is_regular_file) return file;
- fclose(file);
- return NULL;
-}
-
-
-bool OS::Remove(const char* path) {
- return (remove(path) == 0);
-}
-
-
-FILE* OS::OpenTemporaryFile() {
- return tmpfile();
-}
-
-
-const char* const OS::LogFileOpenMode = "w";
-
-
-void OS::Print(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrint(format, args);
- va_end(args);
-}
-
-
-void OS::VPrint(const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
-#else
- vprintf(format, args);
-#endif
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
-#else
- vfprintf(out, format, args);
-#endif
-}
-
-
-void OS::PrintError(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-void OS::VPrintError(const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
-#else
- vfprintf(stderr, format, args);
-#endif
-}
-
-
-int OS::SNPrintF(Vector<char> str, const char* format, ...) {
- va_list args;
- va_start(args, format);
- int result = VSNPrintF(str, format, args);
- va_end(args);
- return result;
-}
-
-
-int OS::VSNPrintF(Vector<char> str,
- const char* format,
- va_list args) {
- int n = vsnprintf(str.start(), str.length(), format, args);
- if (n < 0 || n >= str.length()) {
- // If the length is zero, the assignment fails.
- if (str.length() > 0)
- str[str.length() - 1] = '\0';
- return -1;
- } else {
- return n;
- }
-}
-
-
-#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
-// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
-
-// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
-}
-#endif // V8_TARGET_ARCH_IA32
-
-
-void POSIXPostSetUp() {
-#if defined(V8_TARGET_ARCH_IA32)
- memcopy_function = CreateMemCopyFunction();
-#endif
- init_fast_sin_function();
- init_fast_cos_function();
- init_fast_tan_function();
- init_fast_log_function();
- // fast_exp is initialized lazily.
- init_fast_sqrt_function();
-}
-
-// ----------------------------------------------------------------------------
-// POSIX string support.
-//
-
-char* OS::StrChr(char* str, int c) {
- return strchr(str, c);
-}
-
-
-void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
- strncpy(dest.start(), src, n);
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX socket support.
-//
-
-class POSIXSocket : public Socket {
- public:
- explicit POSIXSocket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- if (IsValid()) {
- // Allow rapid reuse.
- static const int kOn = 1;
- int ret = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- &kOn, sizeof(kOn));
- ASSERT(ret == 0);
- USE(ret);
- }
- }
- explicit POSIXSocket(int socket): socket_(socket) { }
- virtual ~POSIXSocket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != -1; }
-
- private:
- int socket_;
-};
-
-
-bool POSIXSocket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- BitCast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
-}
-
-
-bool POSIXSocket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
-}
-
-
-Socket* POSIXSocket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- int socket;
- do {
- socket = accept(socket_, NULL, NULL);
- } while (socket == -1 && errno == EINTR);
-
- if (socket == -1) {
- return NULL;
- } else {
- return new POSIXSocket(socket);
- }
-}
-
-
-bool POSIXSocket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
- }
-
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
-
- // Connect.
- do {
- status = connect(socket_, result->ai_addr, result->ai_addrlen);
- } while (status == -1 && errno == EINTR);
- freeaddrinfo(result);
- return status == 0;
-}
-
-
-bool POSIXSocket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SHUT_RDWR);
- close(socket_);
- socket_ = -1;
- return status == 0;
- }
- return true;
-}
-
-
-int POSIXSocket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else if (errno != EINTR) {
- return 0;
- }
- }
- return written;
-}
-
-
-int POSIXSocket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
- int status;
- do {
- status = recv(socket_, data, len, 0);
- } while (status == -1 && errno == EINTR);
- return (status < 0) ? 0 : status;
-}
-
-
-bool POSIXSocket::SetReuseAddress(bool reuse_address) {
- int on = reuse_address ? 1 : 0;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
- return status == 0;
-}
-
-
-bool Socket::SetUp() {
- // Nothing to do on POSIX.
- return true;
-}
-
-
-int Socket::LastError() {
- return errno;
-}
-
-
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
-}
-
-
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
-}
-
-
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
-}
-
-
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
-}
-
-
-Socket* OS::CreateSocket() {
- return new POSIXSocket();
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-posix.h b/src/3rdparty/v8/src/platform-posix.h
deleted file mode 100644
index 7a982ed..0000000
--- a/src/3rdparty/v8/src/platform-posix.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_POSIX_H_
-#define V8_PLATFORM_POSIX_H_
-
-namespace v8 {
-namespace internal {
-
-// Used by platform implementation files during OS::PostSetUp().
-void POSIXPostSetUp();
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_POSIX_H_
diff --git a/src/3rdparty/v8/src/platform-qnx.cc b/src/3rdparty/v8/src/platform-qnx.cc
deleted file mode 100644
index 83946f9..0000000
--- a/src/3rdparty/v8/src/platform-qnx.cc
+++ /dev/null
@@ -1,1086 +0,0 @@
-// Copyright 2012 Research in Motion. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for QNX goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <stdlib.h>
-#include <ucontext.h>
-#include <backtrace.h>
-
-// QNX requires memory pages to be marked as
-// executable. Otherwise, OS raises an exception when executing code
-// in that page.
-#include <sys/types.h> // mmap & munmap
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <fcntl.h> // open
-#include <unistd.h> // sysconf
-#include <strings.h> // index
-#include <errno.h>
-#include <stdarg.h>
-#include <sys/procfs.h>
-#include <sys/syspage.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-#include "platform-posix.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on QNX since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // QNX runs on anything.
-}
-
-
-#ifdef __arm__
-static bool CPUInfoContainsString(const char * search_string) {
- const char* file_name = "/proc/cpuinfo";
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on QNX, it's reading from a (non-mmap-able)
- // character special device.
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r")))
- return false;
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- switch (feature) {
- case VFP2:
- case VFP3:
- // All shipping devices currently support this and QNX has no easy way to
- // determine this at runtime.
- return true;
- case ARMv7:
- return (SYSPAGE_ENTRY(cpuinfo)->flags & ARM_CPU_FLAG_V7) != 0;
- case SUDIV:
- return CPUInfoContainsString("idiva");
- case VFP32DREGS:
- // We could even return true here, shipping devices have all
- // 32 double-precision registers afaik.
- return !CPUInfoContainsString("d16");
- default:
- UNREACHABLE();
- }
-
- return false;
-}
-
-CpuImplementer OS::GetCpuImplementer() {
- // We do NOT return QUALCOMM_IMPLEMENTER, even though /proc/cpuinfo
- // has "CPU implementer : 0x51" in it, as that leads to a runtime
- // error on the first JS function call.
- return UNKNOWN_IMPLEMENTER;
-}
-
-bool OS::ArmUsingHardFloat() {
- // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
- // the Floating Point ABI used (PCS stands for Procedure Call Standard).
- // We use these as well as a couple of other defines to statically determine
- // what FP ABI used.
- // GCC versions 4.4 and below don't support hard-fp.
- // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
- // __ARM_PCS_VFP.
-
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40600
-#if defined(__ARM_PCS_VFP)
- return true;
-#else
- return false;
-#endif
-
-#elif GCC_VERSION < 40500
- return false;
-
-#else
-#if defined(__ARM_PCS_VFP)
- return true;
-#elif defined(__ARM_PCS) || defined(__SOFTFP) || !defined(__VFP_FP__)
- return false;
-#else
-#error "Your version of GCC does not report the FP ABI compiled for." \
- "Please report it on this issue" \
- "http://code.google.com/p/v8/issues/detail?id=2140"
-
-#endif
-#endif
-#undef GCC_VERSION
-}
-
-#endif // def __arm__
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef V8_TARGET_ARCH_ARM
- // On EABI ARM targets this is required for fp correctness in the
- // runtime system.
- return 8;
-#endif
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
- // Only use on ARM hardware.
- MemoryBarrier();
-#else
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
-#endif
- *ptr = value;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = GetRandomMmapAddr();
- void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(i::Isolate::Current(),
- StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
-// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
-// which is the architecture of generated code).
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#else
- asm("int $3");
-#endif
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- procfs_mapinfo *mapinfos = NULL, *mapinfo;
- int proc_fd, num, i;
-
- struct {
- procfs_debuginfo info;
- char buff[PATH_MAX];
- } map;
-
- char buf[PATH_MAX + 1];
- sprintf(buf, "/proc/%d/as", getpid());
-
- if ((proc_fd = open(buf, O_RDONLY)) == -1) {
- close(proc_fd);
- return;
- }
-
- /* Get the number of map entrys. */
- if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
- close(proc_fd);
- return;
- }
-
- mapinfos =(procfs_mapinfo *)malloc(num * sizeof(procfs_mapinfo));
- if (mapinfos == NULL) {
- close(proc_fd);
- return;
- }
-
- /* Fill the map entrys. */
- if (devctl(proc_fd, DCMD_PROC_PAGEDATA, mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
- free(mapinfos);
- close(proc_fd);
- return;
- }
-
- i::Isolate* isolate = ISOLATE;
-
- for (i = 0; i < num; i++) {
- mapinfo = mapinfos + i;
- if (mapinfo->flags & MAP_ELF) {
- map.info.vaddr = mapinfo->vaddr;
- if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK)
- continue;
-
- LOG(isolate, SharedLibraryEvent(map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
- }
- }
- free(mapinfos);
- close(proc_fd);
-}
-
-
-static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
-
-
-void OS::SignalCodeMovingGC() {
- // Support for ll_prof.py.
- //
- // The QNX profiler built into the kernel logs all mmap's with
- // PROT_EXEC so that analysis tools can properly attribute ticks. We
- // do a mmap with a name known by ll_prof.py and immediately munmap
- // it. This injects a GC marker into the stream of events generated
- // by the kernel and allows us to synchronize V8 code log and the
- // kernel log.
- int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(kGCFakeMmap, "w+");
- void* addr = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_EXEC,
- MAP_PRIVATE,
- fileno(f),
- 0);
- ASSERT(addr != MAP_FAILED);
- munmap(addr, size);
- fclose(f);
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- int frames_size = frames.length();
- bt_addr_t addresses[frames_size];
- bt_accessor_t acc;
- bt_memmap_t memmap;
- bt_init_accessor(&acc, BT_SELF);
- bt_load_memmap(&acc, &memmap);
- int frames_count = bt_get_backtrace(&acc, addresses, frames_size);
- bt_addr_t temp_addr[1];
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = reinterpret_cast<void*>(addresses[i]);
- temp_addr[0] = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- bt_sprnf_addrs(&memmap, temp_addr, 1, "%a", frames[i].text, kStackWalkMaxTextLen, 0);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
- bt_unload_memmap(&memmap);
- bt_release_accessor(&acc);
- return 0;
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(base, size);
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-bool VirtualMemory::HasLazyCommits() {
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
-#ifdef PR_SET_NAME
- prctl(PR_SET_NAME,
- reinterpret_cast<unsigned long>(thread->name()), // NOLINT
- 0, 0, 0);
-#endif
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- CHECK_EQ(0, result);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class QNXMutex : public Mutex {
- public:
- QNXMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~QNXMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new QNXMutex();
-}
-
-
-class QNXSemaphore : public Semaphore {
- public:
- explicit QNXSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~QNXSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void QNXSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool QNXSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new QNXSemaphore(count);
-}
-
-
-static int GetThreadID() {
- pthread_t thread_id = pthread_self();
- return thread_id;
-}
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.cpu.eip);
- sample->sp = reinterpret_cast<Address>(mcontext.cpu.esp);
- sample->fp = reinterpret_cast<Address>(mcontext.cpu.ebp);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.cpu.rip);
- sample->sp = reinterpret_cast<Address>(mcontext.cpu.rsp);
- sample->fp = reinterpret_cast<Address>(mcontext.cpu.rbp);
-#elif V8_HOST_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]);
- sample->sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
- sample->fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- int vm_tid() const { return vm_tid_; }
-
- private:
- const int vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- static const int kSignalSenderStackSize = 32 * KB;
-
- explicit SignalSender(int interval)
- : Thread("SignalSender"),
- vm_tgid_(getpid()),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- RestoreSignalHandler();
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- if (!signal_handler_installed_) InstallSignalHandler();
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (signal_handler_installed_) RestoreSignalHandler();
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- void SendProfilingSignal(int tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep() {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int vm_tgid_;
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-
-#ifdef __arm__
- // When running on ARM hardware check that the EABI used by V8 and
- // by the C code is the same.
- bool hard_float = OS::ArmUsingHardFloat();
- if (hard_float) {
-#if !USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- } else {
-#if USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- }
-#endif
- SignalSender::SetUp();
-}
-
-
-void OS::TearDown() {
- SignalSender::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
- return false;
-}
-
-
-void Sampler::DoSample() {
-}
-
-
-void Sampler::StartProfiling() {
-}
-
-
-void Sampler::StopProfiling() {
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-solaris.cc b/src/3rdparty/v8/src/platform-solaris.cc
deleted file mode 100644
index 88d197f..0000000
--- a/src/3rdparty/v8/src/platform-solaris.cc
+++ /dev/null
@@ -1,893 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
-// parts the implementation is in platform-posix.cc.
-
-#ifdef __sparc
-# error "V8 does not support the SPARC CPU architecture."
-#endif
-
-#include <sys/stack.h> // for stack alignment
-#include <unistd.h> // getpagesize(), usleep()
-#include <sys/mman.h> // mmap()
-#include <ucontext.h> // walkstack(), getcontext()
-#include <dlfcn.h> // dladdr
-#include <pthread.h>
-#include <sched.h> // for sched_yield
-#include <semaphore.h>
-#include <time.h>
-#include <sys/time.h> // gettimeofday(), timeradd()
-#include <errno.h>
-#include <ieeefp.h> // finite()
-#include <signal.h> // sigemptyset(), etc
-#include <sys/regset.h>
-
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform-posix.h"
-#include "platform.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-
-
-// It seems there is a bug in some Solaris distributions (experienced in
-// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
-// access signbit() despite the availability of other C99 math functions.
-#ifndef signbit
-// Test sign - usually defined in math.h
-int signbit(double x) {
- // We need to take care of the special case of both positive and negative
- // versions of zero.
- if (x == 0) {
- return fpclass(x) & FP_NZERO;
- } else {
- // This won't detect negative NaN but that should be okay since we don't
- // assume that behavior.
- return x < 0;
- }
-}
-#endif // signbit
-
-namespace v8 {
-namespace internal {
-
-
-// 0 is never a valid thread id on Solaris since the main thread is 1 and
-// subsequent have their ids incremented from there
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Solaris runs on a lot of things.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // GCC generates code that requires 16 byte alignment such as movdqa.
- return Max(STACK_ALIGN, 16);
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return tzname[0]; // The location of the timezone string on Solaris.
-}
-
-
-double OS::LocalTimeOffset() {
- tzset();
- return -static_cast<double>(timezone * msPerSecond);
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return static_cast<size_t>(getpagesize());
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- useconds_t ms = static_cast<useconds_t>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-struct StackWalker {
- Vector<OS::StackFrame>& frames;
- int index;
-};
-
-
-static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
- struct StackWalker* walker = static_cast<struct StackWalker*>(data);
- Dl_info info;
-
- int i = walker->index;
-
- walker->frames[i].address = reinterpret_cast<void*>(pc);
-
- // Make sure line termination is in place.
- walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
-
- Vector<char> text = MutableCStrVector(walker->frames[i].text,
- OS::kStackWalkMaxTextLen);
-
- if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) {
- OS::SNPrintF(text, "[0x%p]", pc);
- } else if ((info.dli_fname != NULL && info.dli_sname != NULL)) {
- // We have symbol info.
- OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc);
- } else {
- // No local symbol info.
- OS::SNPrintF(text,
- "%s'0x%p [0x%p]",
- info.dli_fname,
- pc - reinterpret_cast<uintptr_t>(info.dli_fbase),
- pc);
- }
- walker->index++;
- return 0;
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- ucontext_t ctx;
- struct StackWalker walker = { frames, 0 };
-
- if (getcontext(&ctx) < 0) return kStackWalkError;
-
- if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
- return kStackWalkError;
- }
-
- return walker.index;
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(base, size);
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) { }
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- }
- pthread_create(&data_->thread_, NULL, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class SolarisMutex : public Mutex {
- public:
- SolarisMutex() {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mutex_, &attr);
- }
-
- ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
-
- int Lock() { return pthread_mutex_lock(&mutex_); }
-
- int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
- return new SolarisMutex();
-}
-
-
-class SolarisSemaphore : public Semaphore {
- public:
- explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void SolarisSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-#ifndef timeradd
-#define timeradd(a, b, result) \
- do { \
- (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
- (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
- if ((result)->tv_usec >= 1000000) { \
- ++(result)->tv_sec; \
- (result)->tv_usec -= 1000000; \
- } \
- } while (0)
-#endif
-
-
-bool SolarisSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new SolarisSemaphore(count);
-}
-
-
-static pthread_t GetThreadID() {
- return pthread_self();
-}
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- RestoreSignalHandler();
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- if (!signal_handler_installed_) InstallSignalHandler();
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (signal_handler_installed_) RestoreSignalHandler();
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep() {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly will cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SignalSender::SetUp();
-}
-
-
-void OS::TearDown() {
- SignalSender::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
- return false;
-}
-
-
-void Sampler::DoSample() {
-}
-
-
-void Sampler::StartProfiling() {
-}
-
-
-void Sampler::StopProfiling() {
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-tls-mac.h b/src/3rdparty/v8/src/platform-tls-mac.h
deleted file mode 100644
index 728524e..0000000
--- a/src/3rdparty/v8/src/platform-tls-mac.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_TLS_MAC_H_
-#define V8_PLATFORM_TLS_MAC_H_
-
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-extern intptr_t kMacTlsBaseOffset;
-
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- intptr_t result;
-#if defined(V8_HOST_ARCH_IA32)
- asm("movl %%gs:(%1,%2,4), %0;"
- :"=r"(result) // Output must be a writable register.
- :"r"(kMacTlsBaseOffset), "r"(index));
-#else
- asm("movq %%gs:(%1,%2,8), %0;"
- :"=r"(result)
- :"r"(kMacTlsBaseOffset), "r"(index));
-#endif
- return result;
-}
-
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_TLS_MAC_H_
diff --git a/src/3rdparty/v8/src/platform-tls-win32.h b/src/3rdparty/v8/src/platform-tls-win32.h
deleted file mode 100644
index a981d18..0000000
--- a/src/3rdparty/v8/src/platform-tls-win32.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_TLS_WIN32_H_
-#define V8_PLATFORM_TLS_WIN32_H_
-
-#include "checks.h"
-#include "globals.h"
-#include "win32-headers.h"
-
-namespace v8 {
-namespace internal {
-
-#if defined(_WIN32) && !defined(_WIN64) && !defined(_WIN32_WCE)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- const intptr_t kTibInlineTlsOffset = 0xE10;
- const intptr_t kTibExtraTlsOffset = 0xF94;
- const intptr_t kMaxInlineSlots = 64;
- const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
- ASSERT(0 <= index && index < kMaxSlots);
- if (index < kMaxInlineSlots) {
- return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
- kPointerSize * index));
- }
- intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
- ASSERT(extra != 0);
- return *reinterpret_cast<intptr_t*>(extra +
- kPointerSize * (index - kMaxInlineSlots));
-}
-
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_TLS_WIN32_H_
diff --git a/src/3rdparty/v8/src/platform-tls.h b/src/3rdparty/v8/src/platform-tls.h
deleted file mode 100644
index 3251663..0000000
--- a/src/3rdparty/v8/src/platform-tls.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform and architecture specific thread local store functions.
-
-#ifndef V8_PLATFORM_TLS_H_
-#define V8_PLATFORM_TLS_H_
-
-#ifndef V8_NO_FAST_TLS
-
-// When fast TLS is requested we include the appropriate
-// implementation header.
-//
-// The implementation header defines V8_FAST_TLS_SUPPORTED if it
-// provides fast TLS support for the current platform and architecture
-// combination.
-
-#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
-#include "platform-tls-win32.h"
-#elif defined(__APPLE__)
-#include "platform-tls-mac.h"
-#endif
-
-#endif
-
-#endif // V8_PLATFORM_TLS_H_
diff --git a/src/3rdparty/v8/src/platform-win32.cc b/src/3rdparty/v8/src/platform-win32.cc
deleted file mode 100644
index 2383fad..0000000
--- a/src/3rdparty/v8/src/platform-win32.cc
+++ /dev/null
@@ -1,2271 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Win32.
-
-// Secure API functions are not available using MinGW with msvcrt.dll
-// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
-// disable definition of secure API functions in standard headers that
-// would conflict with our own implementation.
-#ifdef __MINGW32__
-#include <_mingw.h>
-#ifdef MINGW_HAS_SECURE_API
-#undef MINGW_HAS_SECURE_API
-#endif // MINGW_HAS_SECURE_API
-#endif // __MINGW32__
-
-#define V8_WIN32_HEADERS_FULL
-#include "win32-headers.h"
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "platform.h"
-#include "vm-state-inl.h"
-
-#ifdef _MSC_VER
-
-// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
-// defined in strings.h.
-int strncasecmp(const char* s1, const char* s2, int n) {
- return _strnicmp(s1, s2, n);
-}
-
-#endif // _MSC_VER
-
-
-#ifdef _WIN32_WCE
-// Convert a Latin1 string into a utf16 string
-wchar_t* wce_mbtowc(const char* a) {
- int length = strlen(a);
- wchar_t *wbuf = new wchar_t[length];
-
- for (int i = 0; i < length; ++i)
- wbuf[i] = (wchar_t)a[i];
-
- return wbuf;
-}
-#endif // _WIN32_WCE
-
-
-// Extra functions for MinGW. Most of these are the _s functions which are in
-// the Microsoft Visual Studio C++ CRT.
-#ifdef __MINGW32__
-
-
-#ifndef __MINGW64_VERSION_MAJOR
-
-#define _TRUNCATE 0
-#define STRUNCATE 80
-
-inline void MemoryBarrier() {
- int barrier = 0;
- __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
-}
-
-#endif // __MINGW64_VERSION_MAJOR
-
-
-int localtime_s(tm* out_tm, const time_t* time) {
- tm* posix_local_time_struct = localtime(time);
- if (posix_local_time_struct == NULL) return 1;
- *out_tm = *posix_local_time_struct;
- return 0;
-}
-
-
-int fopen_s(FILE** pFile, const char* filename, const char* mode) {
- *pFile = fopen(filename, mode);
- return *pFile != NULL ? 0 : 1;
-}
-
-int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
- const char* format, va_list argptr) {
- ASSERT(count == _TRUNCATE);
- return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
-}
-
-
-int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
- CHECK(source != NULL);
- CHECK(dest != NULL);
- CHECK_GT(dest_size, 0);
-
- if (count == _TRUNCATE) {
- while (dest_size > 0 && *source != 0) {
- *(dest++) = *(source++);
- --dest_size;
- }
- if (dest_size == 0) {
- *(dest - 1) = 0;
- return STRUNCATE;
- }
- } else {
- while (dest_size > 0 && count > 0 && *source != 0) {
- *(dest++) = *(source++);
- --dest_size;
- --count;
- }
- }
- CHECK_GT(dest_size, 0);
- *dest = 0;
- return 0;
-}
-
-#endif // __MINGW32__
-
-// Generate a pseudo-random number in the range 0-2^31-1. Usually
-// defined in stdlib.h. Missing in both Microsoft Visual Studio C++ and MinGW.
-int random() {
- return rand();
-}
-
-
-namespace v8 {
-namespace internal {
-
-intptr_t OS::MaxVirtualMemory() {
- return 0;
-}
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
-// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
-
-// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
-}
-#endif // V8_TARGET_ARCH_IA32
-
-#ifdef _WIN32_WCE
-// TODO: Implement
-CpuImplementer OS::GetCpuImplementer() {
- return UNKNOWN_IMPLEMENTER;
-}
-#endif // _WIN32_WCE
-
-#ifdef _WIN64
-typedef double (*ModuloFunction)(double, double);
-static ModuloFunction modulo_function = NULL;
-// Defined in codegen-x64.cc.
-ModuloFunction CreateModuloFunction();
-
-void init_modulo_function() {
- modulo_function = CreateModuloFunction();
-}
-
-double modulo(double x, double y) {
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- return (*modulo_function)(x, y);
-}
-#else // Win32
-
-double modulo(double x, double y) {
- // Workaround MS fmod bugs. ECMA-262 says:
- // dividend is finite and divisor is an infinity => result equals dividend
- // dividend is a zero and divisor is nonzero finite => result equals dividend
- if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
- !(x == 0 && (y != 0 && isfinite(y)))) {
- x = fmod(x, y);
- }
- return x;
-}
-
-#endif // _WIN64
-
-
-#define UNARY_MATH_FUNCTION(name, generator) \
-static UnaryMathFunction fast_##name##_function = NULL; \
-void init_fast_##name##_function() { \
- fast_##name##_function = generator; \
-} \
-double fast_##name(double x) { \
- return (*fast_##name##_function)(x); \
-}
-
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
-UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
-UNARY_MATH_FUNCTION(exp, CreateExpFunction())
-UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-
-#undef UNARY_MATH_FUNCTION
-
-
-void lazily_initialize_fast_exp() {
- if (fast_exp_function == NULL) {
- init_fast_exp_function();
- }
-}
-
-
-void MathSetup() {
-#ifdef _WIN64
- init_modulo_function();
-#endif
- init_fast_sin_function();
- init_fast_cos_function();
- init_fast_tan_function();
- init_fast_log_function();
- // fast_exp is initialized lazily.
- init_fast_sqrt_function();
-}
-
-
-// ----------------------------------------------------------------------------
-// The Time class represents time on win32. A timestamp is represented as
-// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
-// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
-// January 1, 1970.
-
-class Time {
- public:
- // Constructors.
- Time();
- explicit Time(double jstime);
- Time(int year, int mon, int day, int hour, int min, int sec);
-
- // Convert timestamp to JavaScript representation.
- double ToJSTime();
-
- // Set timestamp to current time.
- void SetToCurrentTime();
-
- // Returns the local timezone offset in milliseconds east of UTC. This is
- // the number of milliseconds you must add to UTC to get local time, i.e.
- // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
- // routine also takes into account whether daylight saving is effect
- // at the time.
- int64_t LocalOffset();
-
- // Returns the daylight savings time offset for the time in milliseconds.
- int64_t DaylightSavingsOffset();
-
- // Returns a string identifying the current timezone for the
- // timestamp taking into account daylight saving.
- char* LocalTimezone();
-
- private:
- // Constants for time conversion.
- static const int64_t kTimeEpoc = 116444736000000000LL;
- static const int64_t kTimeScaler = 10000;
- static const int64_t kMsPerMinute = 60000;
-
- // Constants for timezone information.
- static const int kTzNameSize = 128;
- static const bool kShortTzNames = false;
-
- // Timezone information. We need to have static buffers for the
- // timezone names because we return pointers to these in
- // LocalTimezone().
- static bool tz_initialized_;
- static TIME_ZONE_INFORMATION tzinfo_;
- static char std_tz_name_[kTzNameSize];
- static char dst_tz_name_[kTzNameSize];
-
- // Initialize the timezone information (if not already done).
- static void TzSet();
-
- // Guess the name of the timezone from the bias.
- static const char* GuessTimezoneNameFromBias(int bias);
-
- // Return whether or not daylight savings time is in effect at this time.
- bool InDST();
-
- // Return the difference (in milliseconds) between this timestamp and
- // another timestamp.
- int64_t Diff(Time* other);
-
- // Accessor for FILETIME representation.
- FILETIME& ft() { return time_.ft_; }
-
- // Accessor for integer representation.
- int64_t& t() { return time_.t_; }
-
- // Although win32 uses 64-bit integers for representing timestamps,
- // these are packed into a FILETIME structure. The FILETIME structure
- // is just a struct representing a 64-bit integer. The TimeStamp union
- // allows access to both a FILETIME and an integer representation of
- // the timestamp.
- union TimeStamp {
- FILETIME ft_;
- int64_t t_;
- };
-
- TimeStamp time_;
-};
-
-// Static variables.
-bool Time::tz_initialized_ = false;
-TIME_ZONE_INFORMATION Time::tzinfo_;
-char Time::std_tz_name_[kTzNameSize];
-char Time::dst_tz_name_[kTzNameSize];
-
-
-// Initialize timestamp to start of epoc.
-Time::Time() {
- t() = 0;
-}
-
-
-// Initialize timestamp from a JavaScript timestamp.
-Time::Time(double jstime) {
- t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
-}
-
-
-// Initialize timestamp from date/time components.
-Time::Time(int year, int mon, int day, int hour, int min, int sec) {
- SYSTEMTIME st;
- st.wYear = year;
- st.wMonth = mon;
- st.wDay = day;
- st.wHour = hour;
- st.wMinute = min;
- st.wSecond = sec;
- st.wMilliseconds = 0;
- SystemTimeToFileTime(&st, &ft());
-}
-
-
-// Convert timestamp to JavaScript timestamp.
-double Time::ToJSTime() {
- return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
-}
-
-
-// Guess the name of the timezone from the bias.
-// The guess is very biased towards the northern hemisphere.
-const char* Time::GuessTimezoneNameFromBias(int bias) {
- static const int kHour = 60;
- switch (-bias) {
- case -9*kHour: return "Alaska";
- case -8*kHour: return "Pacific";
- case -7*kHour: return "Mountain";
- case -6*kHour: return "Central";
- case -5*kHour: return "Eastern";
- case -4*kHour: return "Atlantic";
- case 0*kHour: return "GMT";
- case +1*kHour: return "Central Europe";
- case +2*kHour: return "Eastern Europe";
- case +3*kHour: return "Russia";
- case +5*kHour + 30: return "India";
- case +8*kHour: return "China";
- case +9*kHour: return "Japan";
- case +12*kHour: return "New Zealand";
- default: return "Local";
- }
-}
-
-
-// Initialize timezone information. The timezone information is obtained from
-// windows. If we cannot get the timezone information we fall back to CET.
-// Please notice that this code is not thread-safe.
-void Time::TzSet() {
- // Just return if timezone information has already been initialized.
- if (tz_initialized_) return;
-
- // Initialize POSIX time zone data.
-#ifndef _WIN32_WCE
- _tzset();
-#endif // _WIN32_WCE
- // Obtain timezone information from operating system.
- memset(&tzinfo_, 0, sizeof(tzinfo_));
- if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
- // If we cannot get timezone information we fall back to CET.
- tzinfo_.Bias = -60;
- tzinfo_.StandardDate.wMonth = 10;
- tzinfo_.StandardDate.wDay = 5;
- tzinfo_.StandardDate.wHour = 3;
- tzinfo_.StandardBias = 0;
- tzinfo_.DaylightDate.wMonth = 3;
- tzinfo_.DaylightDate.wDay = 5;
- tzinfo_.DaylightDate.wHour = 2;
- tzinfo_.DaylightBias = -60;
- }
-
- // Make standard and DST timezone names.
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
- std_tz_name_, kTzNameSize, NULL, NULL);
- std_tz_name_[kTzNameSize - 1] = '\0';
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
- dst_tz_name_, kTzNameSize, NULL, NULL);
- dst_tz_name_[kTzNameSize - 1] = '\0';
-
- // If OS returned empty string or resource id (like "@tzres.dll,-211")
- // simply guess the name from the UTC bias of the timezone.
- // To properly resolve the resource identifier requires a library load,
- // which is not possible in a sandbox.
- if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize - 1),
- "%s Standard Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
- if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize - 1),
- "%s Daylight Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
-
- // Timezone information initialized.
- tz_initialized_ = true;
-}
-
-
-// Return the difference in milliseconds between this and another timestamp.
-int64_t Time::Diff(Time* other) {
- return (t() - other->t()) / kTimeScaler;
-}
-
-
-// Set timestamp to current time.
-void Time::SetToCurrentTime() {
- // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
- // Because we're fast, we like fast timers which have at least a
- // 1ms resolution.
- //
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for v8 wants fast
- // timers, it can use timeBeginPeriod to increase the resolution.
- //
- // Using timeGetTime() has a drawback because it is a 32bit value
- // and hence rolls-over every ~49days.
- //
- // To use the clock, we use GetSystemTimeAsFileTime as our base;
- // and then use timeGetTime to extrapolate current time from the
- // start time. To deal with rollovers, we resync the clock
- // any time when more than kMaxClockElapsedTime has passed or
- // whenever timeGetTime creates a rollover.
-
- static bool initialized = false;
- static TimeStamp init_time;
- static DWORD init_ticks;
- static const int64_t kHundredNanosecondsPerSecond = 10000000;
- static const int64_t kMaxClockElapsedTime =
- 60*kHundredNanosecondsPerSecond; // 1 minute
-
- // If we are uninitialized, we need to resync the clock.
- bool needs_resync = !initialized;
-
- // Get the current time.
- TimeStamp time_now;
- GetSystemTimeAsFileTime(&time_now.ft_);
- DWORD ticks_now = timeGetTime();
-
- // Check if we need to resync due to clock rollover.
- needs_resync |= ticks_now < init_ticks;
-
- // Check if we need to resync due to elapsed time.
- needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
-
- // Check if we need to resync due to backwards time change.
- needs_resync |= time_now.t_ < init_time.t_;
-
- // Resync the clock if necessary.
- if (needs_resync) {
- GetSystemTimeAsFileTime(&init_time.ft_);
- init_ticks = ticks_now = timeGetTime();
- initialized = true;
- }
-
- // Finally, compute the actual time. Why is this so hard.
- DWORD elapsed = ticks_now - init_ticks;
- this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
-}
-
-
-// Return the local timezone offset in milliseconds east of UTC. This
-// takes into account whether daylight saving is in effect at the time.
-// Only times in the 32-bit Unix range may be passed to this function.
-// Also, adding the time-zone offset to the input must not overflow.
-// The function EquivalentTime() in date.js guarantees this.
-int64_t Time::LocalOffset() {
-#ifndef _WIN32_WCE
- // Initialize timezone information, if needed.
- TzSet();
-
- Time rounded_to_second(*this);
- rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
- 1000 * kTimeScaler;
- // Convert to local time using POSIX localtime function.
- // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
- // very slow. Other browsers use localtime().
-
- // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
- // POSIX seconds past 1/1/1970 0:00:00.
- double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
- if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
- return 0;
- }
- // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
- time_t posix_time = static_cast<time_t>(unchecked_posix_time);
-
- // Convert to local time, as struct with fields for day, hour, year, etc.
- tm posix_local_time_struct;
- if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
-
- if (posix_local_time_struct.tm_isdst > 0) {
- return (tzinfo_.Bias + tzinfo_.DaylightBias) * -kMsPerMinute;
- } else if (posix_local_time_struct.tm_isdst == 0) {
- return (tzinfo_.Bias + tzinfo_.StandardBias) * -kMsPerMinute;
- } else {
- return tzinfo_.Bias * -kMsPerMinute;
- }
-#else
- // Windows CE has a different handling of Timezones.
- // TODO: Adapt this for Windows CE
- return 0;
-#endif
-}
-
-
-// Return whether or not daylight savings time is in effect at this time.
-bool Time::InDST() {
- // Initialize timezone information, if needed.
- TzSet();
-
- // Determine if DST is in effect at the specified time.
- bool in_dst = false;
- if (tzinfo_.StandardDate.wMonth != 0 || tzinfo_.DaylightDate.wMonth != 0) {
- // Get the local timezone offset for the timestamp in milliseconds.
- int64_t offset = LocalOffset();
-
- // Compute the offset for DST. The bias parameters in the timezone info
- // are specified in minutes. These must be converted to milliseconds.
- int64_t dstofs = -(tzinfo_.Bias + tzinfo_.DaylightBias) * kMsPerMinute;
-
- // If the local time offset equals the timezone bias plus the daylight
- // bias then DST is in effect.
- in_dst = offset == dstofs;
- }
-
- return in_dst;
-}
-
-
-// Return the daylight savings time offset for this time.
-int64_t Time::DaylightSavingsOffset() {
- return InDST() ? 60 * kMsPerMinute : 0;
-}
-
-
-// Returns a string identifying the current timezone for the
-// timestamp taking into account daylight saving.
-char* Time::LocalTimezone() {
- // Return the standard or DST time zone name based on whether daylight
- // saving is in effect at the given time.
- return InDST() ? dst_tz_name_ : std_tz_name_;
-}
-
-
-void OS::PostSetUp() {
- // Math functions depend on CPU features therefore they are initialized after
- // CPU.
- MathSetup();
-#if defined(V8_TARGET_ARCH_IA32)
- memcopy_function = CreateMemCopyFunction();
-#endif
-}
-
-#ifdef V8_TARGET_ARCH_ARM
-// TODO: Implement
-// Windows CE is the only platform right now that supports ARM.
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- return false;
-}
-#endif // V8_TARGET_ARCH_ARM
-
-
-// Returns the accumulated user time for thread.
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- FILETIME dummy;
- uint64_t usertime;
-
- // Get the amount of time that the thread has executed in user mode.
- if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
- reinterpret_cast<FILETIME*>(&usertime))) return -1;
-
- // Adjust the resolution to micro-seconds.
- usertime /= 10;
-
- // Convert to seconds and microseconds
- *secs = static_cast<uint32_t>(usertime / 1000000);
- *usecs = static_cast<uint32_t>(usertime % 1000000);
- return 0;
-}
-
-
-// Returns current time as the number of milliseconds since
-// 00:00:00 UTC, January 1, 1970.
-double OS::TimeCurrentMillis() {
- Time t;
- t.SetToCurrentTime();
- return t.ToJSTime();
-}
-
-// Returns the tickcounter based on timeGetTime.
-int64_t OS::Ticks() {
- return timeGetTime() * 1000; // Convert to microseconds.
-}
-
-
-// Returns a string identifying the current timezone taking into
-// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- return Time(time).LocalTimezone();
-}
-
-
-// Returns the local time offset in milliseconds east of UTC without
-// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
- // Use current time, rounded to the millisecond.
- Time t(TimeCurrentMillis());
- // Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
- return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
-}
-
-
-// Returns the daylight savings offset in milliseconds for the given
-// time.
-double OS::DaylightSavingsOffset(double time) {
- int64_t offset = Time(time).DaylightSavingsOffset();
- return static_cast<double>(offset);
-}
-
-
-int OS::GetLastError() {
- return ::GetLastError();
-}
-
-
-int OS::GetCurrentProcessId() {
- return static_cast<int>(::GetCurrentProcessId());
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 console output.
-//
-// If a Win32 application is linked as a console application it has a normal
-// standard output and standard error. In this case normal printf works fine
-// for output. However, if the application is linked as a GUI application,
-// the process doesn't have a console, and therefore (debugging) output is lost.
-// This is the case if we are embedded in a windows program (like a browser).
-// In order to be able to get debug output in this case the the debugging
-// facility using OutputDebugString. This output goes to the active debugger
-// for the process (if any). Else the output can be monitored using DBMON.EXE.
-
-enum OutputMode {
- UNKNOWN, // Output method has not yet been determined.
- CONSOLE, // Output is written to stdout.
- ODS // Output is written to debug facility.
-};
-
-static OutputMode output_mode = UNKNOWN; // Current output mode.
-
-
-// Determine if the process has a console for output.
-static bool HasConsole() {
- // Only check the first time. Eventual race conditions are not a problem,
- // because all threads will eventually determine the same mode.
-#ifndef _WIN32_WCE
- if (output_mode == UNKNOWN) {
- // We cannot just check that the standard output is attached to a console
- // because this would fail if output is redirected to a file. Therefore we
- // say that a process does not have an output console if either the
- // standard output handle is invalid or its file type is unknown.
- if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
- GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
- output_mode = CONSOLE;
- else
- output_mode = ODS;
- }
- return output_mode == CONSOLE;
-#else
- // Windows CE has no shell enabled in the standard BSP
- return false;
-#endif // _WIN32_WCE
-}
-
-
-static void VPrintHelper(FILE* stream, const char* format, va_list args) {
- if (HasConsole()) {
- vfprintf(stream, format, args);
- } else {
- // It is important to use safe print here in order to avoid
- // overflowing the buffer. We might truncate the output, but this
- // does not crash.
- EmbeddedVector<char, 4096> buffer;
- OS::VSNPrintF(buffer, format, args);
-#ifdef _WIN32_WCE
- wchar_t wbuf[4096];
- for (int i = 0; i < 4096; ++i)
- wbuf[i] = (wchar_t)buffer.start()[i];
- OutputDebugStringW(wbuf);
-#else
- OutputDebugStringA(buffer.start());
-#endif // _WIN32_WCE
- }
-}
-
-
-FILE* OS::FOpen(const char* path, const char* mode) {
- FILE* result;
- if (fopen_s(&result, path, mode) == 0) {
- return result;
- } else {
- return NULL;
- }
-}
-
-
-bool OS::Remove(const char* path) {
-#ifndef _WIN32_WCE
- return (DeleteFileA(path) != 0);
-#else
- wchar_t *wpath = wce_mbtowc(path);
- bool ret = (DeleteFileW(wpath) != 0);
- delete wpath;
- return ret;
-#endif // _WIN32_WCE
-}
-
-
-FILE* OS::OpenTemporaryFile() {
- // tmpfile_s tries to use the root dir, don't use it.
- wchar_t tempPathBuffer[MAX_PATH];
- DWORD path_result = 0;
- path_result = GetTempPathW(MAX_PATH, tempPathBuffer);
- if (path_result > MAX_PATH || path_result == 0) return NULL;
- UINT name_result = 0;
- wchar_t tempNameBuffer[MAX_PATH];
- name_result = GetTempFileNameW(tempPathBuffer, L"", 0, tempNameBuffer);
- if (name_result == 0) return NULL;
- FILE* result = _wfopen(tempNameBuffer, L"w+"); // Same mode as tmpfile uses.
- if (result != NULL) {
- DeleteFileW(tempNameBuffer); // Delete on close.
- }
- return result;
-}
-
-
-// Open log file in binary mode to avoid /n -> /r/n conversion.
-const char* const OS::LogFileOpenMode = "wb";
-
-
-// Print (debug) message to console.
-void OS::Print(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrint(format, args);
- va_end(args);
-}
-
-
-void OS::VPrint(const char* format, va_list args) {
- VPrintHelper(stdout, format, args);
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
- VPrintHelper(out, format, args);
-}
-
-
-// Print error message to console.
-void OS::PrintError(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-void OS::VPrintError(const char* format, va_list args) {
- VPrintHelper(stderr, format, args);
-}
-
-
-int OS::SNPrintF(Vector<char> str, const char* format, ...) {
- va_list args;
- va_start(args, format);
- int result = VSNPrintF(str, format, args);
- va_end(args);
- return result;
-}
-
-
-int OS::VSNPrintF(Vector<char> str, const char* format, va_list args) {
- int n = _vsnprintf_s(str.start(), str.length(), _TRUNCATE, format, args);
- // Make sure to zero-terminate the string if the output was
- // truncated or if there was an error.
- if (n < 0 || n >= str.length()) {
- if (str.length() > 0)
- str[str.length() - 1] = '\0';
- return -1;
- } else {
- return n;
- }
-}
-
-
-char* OS::StrChr(char* str, int c) {
- return const_cast<char*>(strchr(str, c));
-}
-
-
-void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
- // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
- size_t buffer_size = static_cast<size_t>(dest.length());
- if (n + 1 > buffer_size) // count for trailing '\0'
- n = _TRUNCATE;
- int result = strncpy_s(dest.start(), dest.length(), src, n);
- USE(result);
- ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
-}
-
-
-#undef _TRUNCATE
-#undef STRUNCATE
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* pointer) {
- if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
- return true;
- // Ask the Windows API
- if (IsBadWritePtr(pointer, 1))
- return true;
- return false;
-}
-
-
-// Get the system's page size used by VirtualAlloc() or the next power
-// of two. The reason for always returning a power of two is that the
-// rounding up in OS::Allocate expects that.
-static size_t GetPageSize() {
- static size_t page_size = 0;
- if (page_size == 0) {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- page_size = RoundUpToPowerOf2(info.dwPageSize);
- }
- return page_size;
-}
-
-
-// The allocation alignment is the guaranteed alignment for
-// VirtualAlloc'ed blocks of memory.
-size_t OS::AllocateAlignment() {
- static size_t allocate_alignment = 0;
- if (allocate_alignment == 0) {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- allocate_alignment = info.dwAllocationGranularity;
- }
- return allocate_alignment;
-}
-
-
-static void* GetRandomAddr() {
- Isolate* isolate = Isolate::UncheckedCurrent();
- // Note that the current isolate isn't set up in a call path via
- // CpuFeatures::Probe. We don't care about randomization in this case because
- // the code page is immediately freed.
- if (isolate != NULL) {
- // The address range used to randomize RWX allocations in OS::Allocate
- // Try not to map pages into the default range that windows loads DLLs
- // Use a multiple of 64k to prevent committing unused memory.
- // Note: This does not guarantee RWX regions will be within the
- // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
- static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
- static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
- static const intptr_t kAllocationRandomAddressMin = 0x04000000;
- static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
- uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
- | kAllocationRandomAddressMin;
- address &= kAllocationRandomAddressMax;
- return reinterpret_cast<void *>(address);
- }
- return NULL;
-}
-
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
- LPVOID base = NULL;
-
- if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
- // For exectutable pages try and randomize the allocation address
- for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
- base = VirtualAlloc(GetRandomAddr(), size, action, protection);
- }
- }
-
- // After three attempts give up and let the OS find an address to use.
- if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
- return base;
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- // VirtualAlloc rounds allocated size to page size automatically.
- size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
-
- // Windows XP SP2 allows Data Excution Prevention (DEP).
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-
- LPVOID mbase = RandomizedVirtualAlloc(msize,
- MEM_COMMIT | MEM_RESERVE,
- prot);
-
- if (mbase == NULL) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
- return NULL;
- }
-
- ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
-
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): VirtualFree has a return value which is ignored here.
- VirtualFree(address, 0, MEM_RELEASE);
- USE(size);
-}
-
-
-intptr_t OS::CommitPageSize() {
- return 4096;
-}
-
-
-void OS::ProtectCode(void* address, const size_t size) {
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-
-void OS::Guard(void* address, const size_t size) {
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
-}
-
-
-void OS::Sleep(int milliseconds) {
- ::Sleep(milliseconds);
-}
-
-
-int OS::NumberOfCores() {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- return info.dwNumberOfProcessors;
-}
-
-
-void OS::Abort() {
- if (IsDebuggerPresent() || FLAG_break_on_abort) {
- DebugBreak();
- } else {
- // Make the MSVCRT do a silent abort.
-#ifndef _WIN32_WCE
- raise(SIGABRT);
-#else
- exit(3);
-#endif // _WIN32_WCE
- }
-}
-
-
-void OS::DebugBreak() {
-#ifdef _MSC_VER
- __debugbreak();
-#else
- ::DebugBreak();
-#endif
-}
-
-
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
-class Win32MemoryMappedFile : public OS::MemoryMappedFile {
- public:
- Win32MemoryMappedFile(HANDLE file,
- HANDLE file_mapping,
- void* memory,
- int size)
- : file_(file),
- file_mapping_(file_mapping),
- memory_(memory),
- size_(size) { }
- virtual ~Win32MemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- HANDLE file_;
- HANDLE file_mapping_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- // Open a physical file
-#ifndef _WIN32_WCE
- HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
-#else
- wchar_t *wname = wce_mbtowc(name);
- HANDLE file = CreateFileW(wname, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
- delete wname;
-#endif // _WIN32_WCE
- if (file == INVALID_HANDLE_VALUE) return NULL;
-
- int size = static_cast<int>(GetFileSize(file, NULL));
-
- // Create a file mapping for the physical file
- HANDLE file_mapping = CreateFileMapping(file, NULL,
- PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
- if (file_mapping == NULL) return NULL;
-
- // Map a view of the file into memory
- void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- return new Win32MemoryMappedFile(file, file_mapping, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- // Open a physical file
-#ifndef _WIN32_WCE
- HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
-#else
- wchar_t *wname = wce_mbtowc(name);
- HANDLE file = CreateFileW(wname, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
- delete wname;
-#endif // _WIN32_WCE
- if (file == NULL) return NULL;
- // Create a file mapping for the physical file
- HANDLE file_mapping = CreateFileMapping(file, NULL,
- PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
- if (file_mapping == NULL) return NULL;
- // Map a view of the file into memory
- void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- if (memory) memmove(memory, initial, size);
- return new Win32MemoryMappedFile(file, file_mapping, memory, size);
-}
-
-
-Win32MemoryMappedFile::~Win32MemoryMappedFile() {
- if (memory_ != NULL)
- UnmapViewOfFile(memory_);
- CloseHandle(file_mapping_);
- CloseHandle(file_);
-}
-
-
-// The following code loads functions defined in DbhHelp.h and TlHelp32.h
-// dynamically. This is to avoid being depending on dbghelp.dll and
-// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to
-// kernel32.dll at some point so loading functions defines in TlHelp32.h
-// dynamically might not be necessary any more - for some versions of Windows?).
-
-// Function pointers to functions dynamically loaded from dbghelp.dll.
-#define DBGHELP_FUNCTION_LIST(V) \
- V(SymInitialize) \
- V(SymGetOptions) \
- V(SymSetOptions) \
- V(SymGetSearchPath) \
- V(SymLoadModule64) \
- V(StackWalk64) \
- V(SymGetSymFromAddr64) \
- V(SymGetLineFromAddr64) \
- V(SymFunctionTableAccess64) \
- V(SymGetModuleBase64)
-
-// Function pointers to functions dynamically loaded from dbghelp.dll.
-#define TLHELP32_FUNCTION_LIST(V) \
- V(CreateToolhelp32Snapshot) \
- V(Module32FirstW) \
- V(Module32NextW)
-
-// Define the decoration to use for the type and variable name used for
-// dynamically loaded DLL function..
-#define DLL_FUNC_TYPE(name) _##name##_
-#define DLL_FUNC_VAR(name) _##name
-
-// Define the type for each dynamically loaded DLL function. The function
-// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros
-// from the Windows include files are redefined here to have the function
-// definitions to be as close to the ones in the original .h files as possible.
-#ifndef IN
-#define IN
-#endif
-#ifndef VOID
-#define VOID void
-#endif
-
-// DbgHelp isn't supported on MinGW yet, nor does Windows CE have it
-#if !defined(__MINGW32__) && !defined(_WIN32_WCE)
-// DbgHelp.h functions.
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
- IN PSTR UserSearchPath,
- IN BOOL fInvadeProcess);
-typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID);
-typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(
- IN HANDLE hProcess,
- OUT PSTR SearchPath,
- IN DWORD SearchPathLength);
-typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(
- IN HANDLE hProcess,
- IN HANDLE hFile,
- IN PSTR ImageName,
- IN PSTR ModuleName,
- IN DWORD64 BaseOfDll,
- IN DWORD SizeOfDll);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(
- DWORD MachineType,
- HANDLE hProcess,
- HANDLE hThread,
- LPSTACKFRAME64 StackFrame,
- PVOID ContextRecord,
- PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
- PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
- PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
- PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(
- IN HANDLE hProcess,
- IN DWORD64 qwAddr,
- OUT PDWORD64 pdwDisplacement,
- OUT PIMAGEHLP_SYMBOL64 Symbol);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(
- IN HANDLE hProcess,
- IN DWORD64 qwAddr,
- OUT PDWORD pdwDisplacement,
- OUT PIMAGEHLP_LINE64 Line64);
-// DbgHelp.h typedefs. Implementation found in dbghelp.dll.
-typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(
- HANDLE hProcess,
- DWORD64 AddrBase); // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64
-typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(
- HANDLE hProcess,
- DWORD64 AddrBase); // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64
-
-// TlHelp32.h functions.
-typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(
- DWORD dwFlags,
- DWORD th32ProcessID);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot,
- LPMODULEENTRY32W lpme);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
- LPMODULEENTRY32W lpme);
-
-#undef IN
-#undef VOID
-
-// Declare a variable for each dynamically loaded DLL function.
-#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
-DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
-TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
-#undef DEF_DLL_FUNCTION
-
-// Load the functions. This function has a lot of "ugly" macros in order to
-// keep down code duplication.
-
-static bool LoadDbgHelpAndTlHelp32() {
- static bool dbghelp_loaded = false;
-
- if (dbghelp_loaded) return true;
-
- HMODULE module;
-
- // Load functions from the dbghelp.dll module.
- module = LoadLibrary(TEXT("dbghelp.dll"));
- if (module == NULL) {
- return false;
- }
-
-#define LOAD_DLL_FUNC(name) \
- DLL_FUNC_VAR(name) = \
- reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
-
-DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
-
-#undef LOAD_DLL_FUNC
-
- // Load functions from the kernel32.dll module (the TlHelp32.h function used
- // to be in tlhelp32.dll but are now moved to kernel32.dll).
- module = LoadLibrary(TEXT("kernel32.dll"));
- if (module == NULL) {
- return false;
- }
-
-#define LOAD_DLL_FUNC(name) \
- DLL_FUNC_VAR(name) = \
- reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
-
-TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
-
-#undef LOAD_DLL_FUNC
-
- // Check that all functions where loaded.
- bool result =
-#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
-
-DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
-TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
-
-#undef DLL_FUNC_LOADED
- true;
-
- dbghelp_loaded = result;
- return result;
- // NOTE: The modules are never unloaded and will stay around until the
- // application is closed.
-}
-
-#undef DBGHELP_FUNCTION_LIST
-#undef TLHELP32_FUNCTION_LIST
-#undef DLL_FUNC_VAR
-#undef DLL_FUNC_TYPE
-
-
-// Load the symbols for generating stack traces.
-static bool LoadSymbols(HANDLE process_handle) {
- static bool symbols_loaded = false;
-
- if (symbols_loaded) return true;
-
- BOOL ok;
-
- // Initialize the symbol engine.
- ok = _SymInitialize(process_handle, // hProcess
- NULL, // UserSearchPath
- false); // fInvadeProcess
- if (!ok) return false;
-
- DWORD options = _SymGetOptions();
- options |= SYMOPT_LOAD_LINES;
- options |= SYMOPT_FAIL_CRITICAL_ERRORS;
- options = _SymSetOptions(options);
-
- char buf[OS::kStackWalkMaxNameLen] = {0};
- ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
- if (!ok) {
- int err = GetLastError();
- PrintF("%d\n", err);
- return false;
- }
-
- HANDLE snapshot = _CreateToolhelp32Snapshot(
- TH32CS_SNAPMODULE, // dwFlags
- GetCurrentProcessId()); // th32ProcessId
- if (snapshot == INVALID_HANDLE_VALUE) return false;
- MODULEENTRY32W module_entry;
- module_entry.dwSize = sizeof(module_entry); // Set the size of the structure.
- BOOL cont = _Module32FirstW(snapshot, &module_entry);
- while (cont) {
- DWORD64 base;
- // NOTE the SymLoadModule64 function has the peculiarity of accepting a
- // both unicode and ASCII strings even though the parameter is PSTR.
- base = _SymLoadModule64(
- process_handle, // hProcess
- 0, // hFile
- reinterpret_cast<PSTR>(module_entry.szExePath), // ImageName
- reinterpret_cast<PSTR>(module_entry.szModule), // ModuleName
- reinterpret_cast<DWORD64>(module_entry.modBaseAddr), // BaseOfDll
- module_entry.modBaseSize); // SizeOfDll
- if (base == 0) {
- int err = GetLastError();
- if (err != ERROR_MOD_NOT_FOUND &&
- err != ERROR_INVALID_HANDLE) return false;
- }
- LOG(i::Isolate::Current(),
- SharedLibraryEvent(
- module_entry.szExePath,
- reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
- reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
- module_entry.modBaseSize)));
- cont = _Module32NextW(snapshot, &module_entry);
- }
- CloseHandle(snapshot);
-
- symbols_loaded = true;
- return true;
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- // SharedLibraryEvents are logged when loading symbol information.
- // Only the shared libraries loaded at the time of the call to
- // LogSharedLibraryAddresses are logged. DLLs loaded after
- // initialization are not accounted for.
- if (!LoadDbgHelpAndTlHelp32()) return;
- HANDLE process_handle = GetCurrentProcess();
- LoadSymbols(process_handle);
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
-
-// Switch off warning 4748 (/GS can not protect parameters and local variables
-// from local buffer overrun because optimizations are disabled in function) as
-// it is triggered by the use of inline assembler.
-#pragma warning(push)
-#pragma warning(disable : 4748)
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- BOOL ok;
-
- // Load the required functions from DLL's.
- if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
-
- // Get the process and thread handles.
- HANDLE process_handle = GetCurrentProcess();
- HANDLE thread_handle = GetCurrentThread();
-
- // Read the symbols.
- if (!LoadSymbols(process_handle)) return kStackWalkError;
-
- // Capture current context.
- CONTEXT context;
- RtlCaptureContext(&context);
-
- // Initialize the stack walking
- STACKFRAME64 stack_frame;
- memset(&stack_frame, 0, sizeof(stack_frame));
-#ifdef _WIN64
- stack_frame.AddrPC.Offset = context.Rip;
- stack_frame.AddrFrame.Offset = context.Rbp;
- stack_frame.AddrStack.Offset = context.Rsp;
-#else
- stack_frame.AddrPC.Offset = context.Eip;
- stack_frame.AddrFrame.Offset = context.Ebp;
- stack_frame.AddrStack.Offset = context.Esp;
-#endif
- stack_frame.AddrPC.Mode = AddrModeFlat;
- stack_frame.AddrFrame.Mode = AddrModeFlat;
- stack_frame.AddrStack.Mode = AddrModeFlat;
- int frames_count = 0;
-
- // Collect stack frames.
- int frames_size = frames.length();
- while (frames_count < frames_size) {
- ok = _StackWalk64(
- IMAGE_FILE_MACHINE_I386, // MachineType
- process_handle, // hProcess
- thread_handle, // hThread
- &stack_frame, // StackFrame
- &context, // ContextRecord
- NULL, // ReadMemoryRoutine
- _SymFunctionTableAccess64, // FunctionTableAccessRoutine
- _SymGetModuleBase64, // GetModuleBaseRoutine
- NULL); // TranslateAddress
- if (!ok) break;
-
- // Store the address.
- ASSERT((stack_frame.AddrPC.Offset >> 32) == 0); // 32-bit address.
- frames[frames_count].address =
- reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
-
- // Try to locate a symbol for this frame.
- DWORD64 symbol_displacement;
- SmartArrayPointer<IMAGEHLP_SYMBOL64> symbol(
- NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
- if (symbol.is_empty()) return kStackWalkError; // Out of memory.
- memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
- (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
- (*symbol)->MaxNameLength = kStackWalkMaxNameLen;
- ok = _SymGetSymFromAddr64(process_handle, // hProcess
- stack_frame.AddrPC.Offset, // Address
- &symbol_displacement, // Displacement
- *symbol); // Symbol
- if (ok) {
- // Try to locate more source information for the symbol.
- IMAGEHLP_LINE64 Line;
- memset(&Line, 0, sizeof(Line));
- Line.SizeOfStruct = sizeof(Line);
- DWORD line_displacement;
- ok = _SymGetLineFromAddr64(
- process_handle, // hProcess
- stack_frame.AddrPC.Offset, // dwAddr
- &line_displacement, // pdwDisplacement
- &Line); // Line
- // Format a text representation of the frame based on the information
- // available.
- if (ok) {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s %s:%d:%d",
- (*symbol)->Name, Line.FileName, Line.LineNumber,
- line_displacement);
- } else {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s",
- (*symbol)->Name);
- }
- // Make sure line termination is in place.
- frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
- } else {
- // No text representation of this frame
- frames[frames_count].text[0] = '\0';
-
- // Continue if we are just missing a module (for non C/C++ frames a
- // module will never be found).
- int err = GetLastError();
- if (err != ERROR_MOD_NOT_FOUND) {
- break;
- }
- }
-
- frames_count++;
- }
-
- // Return the number of frames filled in.
- return frames_count;
-}
-
-// Restore warnings to previous settings.
-#pragma warning(pop)
-
-#else // __MINGW32__
-void OS::LogSharedLibraryAddresses() { }
-void OS::SignalCodeMovingGC() { }
-int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
-#endif // __MINGW32__
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Windows runs on anything.
-}
-
-
-double OS::nan_value() {
-#ifdef _MSC_VER
- // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits
- // in mask set, so value equals mask.
- static const __int64 nanval = kQuietNaNMask;
- return *reinterpret_cast<const double*>(&nanval);
-#else // _MSC_VER
- return NAN;
-#endif // _MSC_VER
-}
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef _WIN64
- return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
-#elif defined(__MINGW32__)
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-#else
- return 8; // Floating-point math runs faster with 8-byte alignment.
-#endif
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size);
- if (address == NULL) return;
- Address base = RoundUp(static_cast<Address>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- ASSERT(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != NULL) {
- request_size = size;
- ASSERT(base == static_cast<Address>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size);
- if (address == NULL) return;
- }
- address_ = address;
- size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address_, size_);
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- if (CommitRegion(address, size, is_executable)) {
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
- return true;
- }
- return false;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- ASSERT(IsReserved());
- return UncommitRegion(address, size);
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
- return true;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- if (NULL == VirtualAlloc(address,
- OS::CommitPageSize(),
- MEM_COMMIT,
- PAGE_READONLY | PAGE_GUARD)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 thread support.
-
-// Definition of invalid thread handle and id.
-static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
-
-// Entry point for threads. The supplied argument is a pointer to the thread
-// object. The entry function dispatches to the run method in the thread
-// object. It is important that this function has __stdcall calling
-// convention.
-static unsigned int __stdcall ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- thread->Run();
- return 0;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- explicit PlatformData(HANDLE thread) : thread_(thread) {}
- HANDLE thread_;
- unsigned thread_id_;
-};
-
-
-// Initialize a Win32 thread object. The thread has an invalid thread
-// handle until it is started.
-
-Thread::Thread(const Options& options)
- : stack_size_(options.stack_size()) {
- data_ = new PlatformData(kNoThread);
- set_name(options.name());
-}
-
-
-void Thread::set_name(const char* name) {
- OS::StrNCpy(Vector<char>(name_, sizeof(name_)), name, strlen(name));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-// Close our own handle for the thread.
-Thread::~Thread() {
- if (data_->thread_ != kNoThread) CloseHandle(data_->thread_);
- delete data_;
-}
-
-
-// Create a new thread. It is important to use _beginthreadex() instead of
-// the Win32 function CreateThread(), because the CreateThread() does not
-// initialize thread specific structures in the C runtime library.
-void Thread::Start() {
-#ifndef _WIN32_WCE
- data_->thread_ = reinterpret_cast<HANDLE>(
- _beginthreadex(NULL,
- static_cast<unsigned>(stack_size_),
- ThreadEntry,
- this,
- 0,
- &data_->thread_id_));
-#else
- unsigned initflag = 0;
- if (stack_size_ > 0)
- initflag |= STACK_SIZE_PARAM_IS_A_RESERVATION;
- data_->thread_ = reinterpret_cast<HANDLE>(
- CreateThread( NULL,
- static_cast<unsigned>(stack_size_),
- (LPTHREAD_START_ROUTINE)ThreadEntry,
- this,
- initflag,
- (LPDWORD)&data_->thread_id_));
-#endif // _WIN32_WCE
-}
-
-
-// Wait for thread to terminate.
-void Thread::Join() {
- if (data_->thread_id_ != GetCurrentThreadId()) {
- WaitForSingleObject(data_->thread_, INFINITE);
- }
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- DWORD result = TlsAlloc();
- ASSERT(result != TLS_OUT_OF_INDEXES);
- return static_cast<LocalStorageKey>(result);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- BOOL result = TlsFree(static_cast<DWORD>(key));
- USE(result);
- ASSERT(result);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- return TlsGetValue(static_cast<DWORD>(key));
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
- USE(result);
- ASSERT(result);
-}
-
-
-
-void Thread::YieldCPU() {
- Sleep(0);
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 mutex support.
-//
-// On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are
-// faster than Win32 Mutex objects because they are implemented using user mode
-// atomic instructions. Therefore we only do ring transitions if there is lock
-// contention.
-
-class Win32Mutex : public Mutex {
- public:
- Win32Mutex() { InitializeCriticalSection(&cs_); }
-
- virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
-
- virtual int Lock() {
- EnterCriticalSection(&cs_);
- return 0;
- }
-
- virtual int Unlock() {
- LeaveCriticalSection(&cs_);
- return 0;
- }
-
-
- virtual bool TryLock() {
- // Returns non-zero if critical section is entered successfully entered.
- return TryEnterCriticalSection(&cs_);
- }
-
- private:
- CRITICAL_SECTION cs_; // Critical section used for mutex
-};
-
-
-Mutex* OS::CreateMutex() {
- return new Win32Mutex();
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 semaphore support.
-//
-// On Win32 semaphores are implemented using Win32 Semaphore objects. The
-// semaphores are anonymous. Also, the semaphores are initialized to have
-// no upper limit on count.
-
-
-class Win32Semaphore : public Semaphore {
- public:
- explicit Win32Semaphore(int count) {
- sem = ::CreateSemaphoreW(NULL, count, 0x7fffffff, NULL);
- }
-
- ~Win32Semaphore() {
- CloseHandle(sem);
- }
-
- void Wait() {
- WaitForSingleObject(sem, INFINITE);
- }
-
- bool Wait(int timeout) {
- // Timeout in Windows API is in milliseconds.
- DWORD millis_timeout = timeout / 1000;
- return WaitForSingleObject(sem, millis_timeout) != WAIT_TIMEOUT;
- }
-
- void Signal() {
- LONG dummy;
- ReleaseSemaphore(sem, 1, &dummy);
- }
-
- private:
- HANDLE sem;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new Win32Semaphore(count);
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 socket support.
-//
-
-class Win32Socket : public Socket {
- public:
- explicit Win32Socket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- }
- explicit Win32Socket(SOCKET socket): socket_(socket) { }
- virtual ~Win32Socket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != INVALID_SOCKET; }
-
- private:
- SOCKET socket_;
-};
-
-
-bool Win32Socket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- reinterpret_cast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
-}
-
-
-bool Win32Socket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
-}
-
-
-Socket* Win32Socket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- SOCKET socket = accept(socket_, NULL, NULL);
- if (socket == INVALID_SOCKET) {
- return NULL;
- } else {
- return new Win32Socket(socket);
- }
-}
-
-
-bool Win32Socket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
- }
-
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
-
- // Connect.
- status = connect(socket_,
- result->ai_addr,
- static_cast<int>(result->ai_addrlen));
- freeaddrinfo(result);
- return status == 0;
-}
-
-
-bool Win32Socket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SD_BOTH);
- closesocket(socket_);
- socket_ = INVALID_SOCKET;
- return status == SOCKET_ERROR;
- }
- return true;
-}
-
-
-int Win32Socket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else {
- return 0;
- }
- }
- return written;
-}
-
-
-int Win32Socket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
- int status = recv(socket_, data, len, 0);
- return (status == SOCKET_ERROR) ? 0 : status;
-}
-
-
-bool Win32Socket::SetReuseAddress(bool reuse_address) {
- BOOL on = reuse_address ? true : false;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- reinterpret_cast<char*>(&on), sizeof(on));
- return status == SOCKET_ERROR;
-}
-
-
-bool Socket::SetUp() {
- // Initialize Winsock32
- int err;
- WSADATA winsock_data;
- WORD version_requested = MAKEWORD(1, 0);
- err = WSAStartup(version_requested, &winsock_data);
- if (err != 0) {
- PrintF("Unable to initialize Winsock, err = %d\n", Socket::LastError());
- }
-
- return err == 0;
-}
-
-
-int Socket::LastError() {
- return WSAGetLastError();
-}
-
-
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
-}
-
-
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
-}
-
-
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
-}
-
-
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
-}
-
-
-Socket* OS::CreateSocket() {
- return new Win32Socket();
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 profiler support.
-
-class Sampler::PlatformData : public Malloced {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
- if (sample == NULL) sample = &sample_obj;
-
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
- sample->state = sampler->isolate()->current_vm_state();
-
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
-#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
-#elif V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
-#elif V8_HOST_ARCH_ARM
- // Taken from http://msdn.microsoft.com/en-us/library/aa448762.aspx
- sample->pc = reinterpret_cast<Address>(context.Pc);
- sample->sp = reinterpret_cast<Address>(context.Sp);
- sample->fp = reinterpret_cast<Address>(context.R11);
-#else
-#error This Platform is not supported.
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- ResumeThread(profiled_thread);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srand(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SamplerThread::SetUp();
-}
-
-
-void OS::TearDown() {
- SamplerThread::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
- return false;
-}
-
-
-void Sampler::DoSample() {
-}
-
-
-void Sampler::StartProfiling() {
-}
-
-
-void Sampler::StopProfiling() {
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform.h b/src/3rdparty/v8/src/platform.h
deleted file mode 100644
index bf1a1dc..0000000
--- a/src/3rdparty/v8/src/platform.h
+++ /dev/null
@@ -1,828 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This module contains the platform-specific code. This make the rest of the
-// code less dependent on operating system, compilers and runtime libraries.
-// This module does specifically not deal with differences between different
-// processor architecture.
-// The platform classes have the same definition for all platforms. The
-// implementation for a particular platform is put in platform_<os>.cc.
-// The build system then uses the implementation for the target platform.
-//
-// This design has been chosen because it is simple and fast. Alternatively,
-// the platform dependent classes could have been implemented using abstract
-// superclasses with virtual methods and having specializations for each
-// platform. This design was rejected because it was more complicated and
-// slower. It would require factory methods for selecting the right
-// implementation and the overhead of virtual methods for performance
-// sensitive like mutex locking/unlocking.
-
-#ifndef V8_PLATFORM_H_
-#define V8_PLATFORM_H_
-
-#ifdef __sun
-# ifndef signbit
-int signbit(double x);
-# endif
-#endif
-
-// GCC specific stuff
-#ifdef __GNUC__
-
-// Needed for va_list on at least MinGW and Android.
-#include <stdarg.h>
-
-#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
-
-#endif // __GNUC__
-
-
-// Windows specific stuff.
-#ifdef _WIN32
-
-// Microsoft Visual C++ specific stuff.
-#ifdef _MSC_VER
-
-#include "win32-math.h"
-
-int strncasecmp(const char* s1, const char* s2, int n);
-
-inline int lrint(double flt) {
- int intgr;
-#if defined(V8_TARGET_ARCH_IA32)
- __asm {
- fld flt
- fistp intgr
- };
-#else
- intgr = static_cast<int>(flt + 0.5);
- if ((intgr & 1) != 0 && intgr - flt == 0.5) {
- // If the number is halfway between two integers, round to the even one.
- intgr--;
- }
-#endif
- return intgr;
-}
-
-
-#endif // _MSC_VER
-
-// Random is missing on both Visual Studio and MinGW.
-int random();
-
-#endif // _WIN32
-
-#include "atomicops.h"
-#include "lazy-instance.h"
-#include "platform-tls.h"
-#include "utils.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Use AtomicWord for a machine-sized pointer. It is assumed that
-// reads and writes of naturally aligned values of this type are atomic.
-#if defined(__OpenBSD__) && defined(__i386__)
-typedef Atomic32 AtomicWord;
-#else
-typedef intptr_t AtomicWord;
-#endif
-
-class Semaphore;
-class Mutex;
-
-double ceiling(double x);
-double modulo(double x, double y);
-
-// Custom implementation of math functions.
-double fast_sin(double input);
-double fast_cos(double input);
-double fast_tan(double input);
-double fast_log(double input);
-double fast_exp(double input);
-double fast_sqrt(double input);
-// The custom exp implementation needs 16KB of lookup data; initialize it
-// on demand.
-void lazily_initialize_fast_exp();
-
-// Forward declarations.
-class Socket;
-
-// ----------------------------------------------------------------------------
-// OS
-//
-// This class has static methods for the different platform specific
-// functions. Add methods here to cope with differences between the
-// supported platforms.
-
-class OS {
- public:
- // Initializes the platform OS support. Called once at VM startup.
- static void SetUp();
-
- // Initializes the platform OS support that depend on CPU features. This is
- // called after CPU initialization.
- static void PostSetUp();
-
- // Clean up platform-OS-related things. Called once at VM shutdown.
- static void TearDown();
-
- // Returns the accumulated user time for thread. This routine
- // can be used for profiling. The implementation should
- // strive for high-precision timer resolution, preferable
- // micro-second resolution.
- static int GetUserTime(uint32_t* secs, uint32_t* usecs);
-
- // Get a tick counter normalized to one tick per microsecond.
- // Used for calculating time intervals.
- static int64_t Ticks();
-
- // Returns current time as the number of milliseconds since
- // 00:00:00 UTC, January 1, 1970.
- static double TimeCurrentMillis();
-
- // Returns a string identifying the current time zone. The
- // timestamp is used for determining if DST is in effect.
- static const char* LocalTimezone(double time);
-
- // Returns the local time offset in milliseconds east of UTC without
- // taking daylight savings time into account.
- static double LocalTimeOffset();
-
- // Returns the daylight savings offset for the given time.
- static double DaylightSavingsOffset(double time);
-
- // Returns last OS error.
- static int GetLastError();
-
- static FILE* FOpen(const char* path, const char* mode);
- static bool Remove(const char* path);
-
- // Opens a temporary file, the file is auto removed on close.
- static FILE* OpenTemporaryFile();
-
- // Log file open mode is platform-dependent due to line ends issues.
- static const char* const LogFileOpenMode;
-
- // Print output to console. This is mostly used for debugging output.
- // On platforms that has standard terminal output, the output
- // should go to stdout.
- static void Print(const char* format, ...);
- static void VPrint(const char* format, va_list args);
-
- // Print output to a file. This is mostly used for debugging output.
- static void FPrint(FILE* out, const char* format, ...);
- static void VFPrint(FILE* out, const char* format, va_list args);
-
- // Print error output to console. This is mostly used for error message
- // output. On platforms that has standard terminal output, the output
- // should go to stderr.
- static void PrintError(const char* format, ...);
- static void VPrintError(const char* format, va_list args);
-
- // Allocate/Free memory used by JS heap. Pages are readable/writable, but
- // they are not guaranteed to be executable unless 'executable' is true.
- // Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable);
- static void Free(void* address, const size_t size);
-
- // This is the granularity at which the ProtectCode(...) call can set page
- // permissions.
- static intptr_t CommitPageSize();
-
- // Mark code segments non-writable.
- static void ProtectCode(void* address, const size_t size);
-
- // Assign memory as a guard page so that access will cause an exception.
- static void Guard(void* address, const size_t size);
-
- // Generate a random address to be used for hinting mmap().
- static void* GetRandomMmapAddr();
-
- // Get the Alignment guaranteed by Allocate().
- static size_t AllocateAlignment();
-
- // Returns an indication of whether a pointer is in a space that
- // has been allocated by Allocate(). This method may conservatively
- // always return false, but giving more accurate information may
- // improve the robustness of the stack dump code in the presence of
- // heap corruption.
- static bool IsOutsideAllocatedSpace(void* pointer);
-
- // Sleep for a number of milliseconds.
- static void Sleep(const int milliseconds);
-
- static int NumberOfCores();
-
- // Abort the current process.
- static void Abort();
-
- // Debug break.
- static void DebugBreak();
-
- // Dump C++ current stack trace (only functional on Linux).
- static void DumpBacktrace();
-
- // Walk the stack.
- static const int kStackWalkError = -1;
- static const int kStackWalkMaxNameLen = 256;
- static const int kStackWalkMaxTextLen = 256;
- struct StackFrame {
- void* address;
- char text[kStackWalkMaxTextLen];
- };
-
- static int StackWalk(Vector<StackFrame> frames);
-
- // Factory method for creating platform dependent Mutex.
- // Please use delete to reclaim the storage for the returned Mutex.
- static Mutex* CreateMutex();
-
- // Factory method for creating platform dependent Semaphore.
- // Please use delete to reclaim the storage for the returned Semaphore.
- static Semaphore* CreateSemaphore(int count);
-
- // Factory method for creating platform dependent Socket.
- // Please use delete to reclaim the storage for the returned Socket.
- static Socket* CreateSocket();
-
- class MemoryMappedFile {
- public:
- static MemoryMappedFile* open(const char* name);
- static MemoryMappedFile* create(const char* name, int size, void* initial);
- virtual ~MemoryMappedFile() { }
- virtual void* memory() = 0;
- virtual int size() = 0;
- };
-
- // Safe formatting print. Ensures that str is always null-terminated.
- // Returns the number of chars written, or -1 if output was truncated.
- static int SNPrintF(Vector<char> str, const char* format, ...);
- static int VSNPrintF(Vector<char> str,
- const char* format,
- va_list args);
-
- static char* StrChr(char* str, int c);
- static void StrNCpy(Vector<char> dest, const char* src, size_t n);
-
- // Support for the profiler. Can do nothing, in which case ticks
- // occuring in shared libraries will not be properly accounted for.
- static void LogSharedLibraryAddresses();
-
- // Support for the profiler. Notifies the external profiling
- // process that a code moving garbage collection starts. Can do
- // nothing, in which case the code objects must not move (e.g., by
- // using --never-compact) if accurate profiling is desired.
- static void SignalCodeMovingGC();
-
- // The return value indicates the CPU features we are sure of because of the
- // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
- // instructions.
- // This is a little messy because the interpretation is subject to the cross
- // of the CPU and the OS. The bits in the answer correspond to the bit
- // positions indicated by the members of the CpuFeature enum from globals.h
- static uint64_t CpuFeaturesImpliedByPlatform();
-
- // Maximum size of the virtual memory. 0 means there is no artificial
- // limit.
- static intptr_t MaxVirtualMemory();
-
- // Returns the double constant NAN
- static double nan_value();
-
- // Support runtime detection of Cpu implementer
- static CpuImplementer GetCpuImplementer();
-
- // Support runtime detection of VFP3 on ARM CPUs.
- static bool ArmCpuHasFeature(CpuFeature feature);
-
- // Support runtime detection of whether the hard float option of the
- // EABI is used.
- static bool ArmUsingHardFloat();
-
- // Support runtime detection of FPU on MIPS CPUs.
- static bool MipsCpuHasFeature(CpuFeature feature);
-
- // Returns the activation frame alignment constraint or zero if
- // the platform doesn't care. Guaranteed to be a power of two.
- static int ActivationFrameAlignment();
-
- static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
-
-#if defined(V8_TARGET_ARCH_IA32)
- // Copy memory area to disjoint memory area.
- static void MemCopy(void* dest, const void* src, size_t size);
- // Limit below which the extra overhead of the MemCopy function is likely
- // to outweigh the benefits of faster copying.
- static const int kMinComplexMemCopy = 64;
- typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
-
-#else // V8_TARGET_ARCH_IA32
- static void MemCopy(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
- }
- static const int kMinComplexMemCopy = 256;
-#endif // V8_TARGET_ARCH_IA32
-
- static int GetCurrentProcessId();
-
- private:
- static const int msPerSecond = 1000;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
-};
-
-// Represents and controls an area of reserved memory.
-// Control of the reserved memory can be assigned to another VirtualMemory
-// object by assignment or copy-contructing. This removes the reserved memory
-// from the original object.
-class VirtualMemory {
- public:
- // Empty VirtualMemory object, controlling no reserved memory.
- VirtualMemory();
-
- // Reserves virtual memory with size.
- explicit VirtualMemory(size_t size);
-
- // Reserves virtual memory containing an area of the given size that
- // is aligned per alignment. This may not be at the position returned
- // by address().
- VirtualMemory(size_t size, size_t alignment);
-
- // Releases the reserved memory, if any, controlled by this VirtualMemory
- // object.
- ~VirtualMemory();
-
- // Returns whether the memory has been reserved.
- bool IsReserved();
-
- // Initialize or resets an embedded VirtualMemory object.
- void Reset();
-
- // Returns the start address of the reserved memory.
- // If the memory was reserved with an alignment, this address is not
- // necessarily aligned. The user might need to round it up to a multiple of
- // the alignment to get the start of the aligned block.
- void* address() {
- ASSERT(IsReserved());
- return address_;
- }
-
- // Returns the size of the reserved memory. The returned value is only
- // meaningful when IsReserved() returns true.
- // If the memory was reserved with an alignment, this size may be larger
- // than the requested size.
- size_t size() { return size_; }
-
- // Commits real memory. Returns whether the operation succeeded.
- bool Commit(void* address, size_t size, bool is_executable);
-
- // Uncommit real memory. Returns whether the operation succeeded.
- bool Uncommit(void* address, size_t size);
-
- // Creates a single guard page at the given address.
- bool Guard(void* address);
-
- void Release() {
- ASSERT(IsReserved());
- // Notice: Order is important here. The VirtualMemory object might live
- // inside the allocated region.
- void* address = address_;
- size_t size = size_;
- Reset();
- bool result = ReleaseRegion(address, size);
- USE(result);
- ASSERT(result);
- }
-
- // Assign control of the reserved region to a different VirtualMemory object.
- // The old object is no longer functional (IsReserved() returns false).
- void TakeControl(VirtualMemory* from) {
- ASSERT(!IsReserved());
- address_ = from->address_;
- size_ = from->size_;
- from->Reset();
- }
-
- static void* ReserveRegion(size_t size);
-
- static bool CommitRegion(void* base, size_t size, bool is_executable);
-
- static bool UncommitRegion(void* base, size_t size);
-
- // Must be called with a base pointer that has been returned by ReserveRegion
- // and the same size it was reserved with.
- static bool ReleaseRegion(void* base, size_t size);
-
- // Returns true if OS performs lazy commits, i.e. the memory allocation call
- // defers actual physical memory allocation till the first memory access.
- // Otherwise returns false.
- static bool HasLazyCommits();
-
- private:
- void* address_; // Start address of the virtual memory.
- size_t size_; // Size of the virtual memory.
-};
-
-
-// ----------------------------------------------------------------------------
-// Thread
-//
-// Thread objects are used for creating and running threads. When the start()
-// method is called the new thread starts running the run() method in the new
-// thread. The Thread object should not be deallocated before the thread has
-// terminated.
-
-class Thread {
- public:
- // Opaque data type for thread-local storage keys.
- // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
- // to ensure that enumeration type has correct value range (see Issue 830 for
- // more details).
- enum LocalStorageKey {
- LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
- LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
- };
-
- class Options {
- public:
- Options() : name_("v8:<unknown>"), stack_size_(0) {}
- Options(const char* name, int stack_size = 0)
- : name_(name), stack_size_(stack_size) {}
-
- const char* name() const { return name_; }
- int stack_size() const { return stack_size_; }
-
- private:
- const char* name_;
- int stack_size_;
- };
-
- // Create new thread.
- explicit Thread(const Options& options);
- virtual ~Thread();
-
- // Start new thread by calling the Run() method in the new thread.
- void Start();
-
- // Wait until thread terminates.
- void Join();
-
- inline const char* name() const {
- return name_;
- }
-
- // Abstract method for run handler.
- virtual void Run() = 0;
-
- // Thread-local storage.
- static LocalStorageKey CreateThreadLocalKey();
- static void DeleteThreadLocalKey(LocalStorageKey key);
- static void* GetThreadLocal(LocalStorageKey key);
- static int GetThreadLocalInt(LocalStorageKey key) {
- return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
- }
- static void SetThreadLocal(LocalStorageKey key, void* value);
- static void SetThreadLocalInt(LocalStorageKey key, int value) {
- SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
- }
- static bool HasThreadLocal(LocalStorageKey key) {
- return GetThreadLocal(key) != NULL;
- }
-
-#ifdef V8_FAST_TLS_SUPPORTED
- static inline void* GetExistingThreadLocal(LocalStorageKey key) {
- void* result = reinterpret_cast<void*>(
- InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
- ASSERT(result == GetThreadLocal(key));
- return result;
- }
-#else
- static inline void* GetExistingThreadLocal(LocalStorageKey key) {
- return GetThreadLocal(key);
- }
-#endif
-
- // A hint to the scheduler to let another thread run.
- static void YieldCPU();
-
-
- // The thread name length is limited to 16 based on Linux's implementation of
- // prctl().
- static const int kMaxThreadNameLength = 16;
-
- class PlatformData;
- PlatformData* data() { return data_; }
-
- private:
- void set_name(const char* name);
-
- PlatformData* data_;
-
- char name_[kMaxThreadNameLength];
- int stack_size_;
-
- DISALLOW_COPY_AND_ASSIGN(Thread);
-};
-
-
-// ----------------------------------------------------------------------------
-// Mutex
-//
-// Mutexes are used for serializing access to non-reentrant sections of code.
-// The implementations of mutex should allow for nested/recursive locking.
-
-class Mutex {
- public:
- virtual ~Mutex() {}
-
- // Locks the given mutex. If the mutex is currently unlocked, it becomes
- // locked and owned by the calling thread, and immediately. If the mutex
- // is already locked by another thread, suspends the calling thread until
- // the mutex is unlocked.
- virtual int Lock() = 0;
-
- // Unlocks the given mutex. The mutex is assumed to be locked and owned by
- // the calling thread on entrance.
- virtual int Unlock() = 0;
-
- // Tries to lock the given mutex. Returns whether the mutex was
- // successfully locked.
- virtual bool TryLock() = 0;
-};
-
-struct CreateMutexTrait {
- static Mutex* Create() {
- return OS::CreateMutex();
- }
-};
-
-// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
-//
-// void my_function() {
-// ScopedLock my_lock(my_mutex.Pointer());
-// // Do something.
-// }
-//
-typedef LazyDynamicInstance<
- Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
-
-#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-// ----------------------------------------------------------------------------
-// ScopedLock
-//
-// Stack-allocated ScopedLocks provide block-scoped locking and
-// unlocking of a mutex.
-class ScopedLock {
- public:
- explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
- ASSERT(mutex_ != NULL);
- mutex_->Lock();
- }
- ~ScopedLock() {
- mutex_->Unlock();
- }
-
- private:
- Mutex* mutex_;
- DISALLOW_COPY_AND_ASSIGN(ScopedLock);
-};
-
-
-// ----------------------------------------------------------------------------
-// Semaphore
-//
-// A semaphore object is a synchronization object that maintains a count. The
-// count is decremented each time a thread completes a wait for the semaphore
-// object and incremented each time a thread signals the semaphore. When the
-// count reaches zero, threads waiting for the semaphore blocks until the
-// count becomes non-zero.
-
-class Semaphore {
- public:
- virtual ~Semaphore() {}
-
- // Suspends the calling thread until the semaphore counter is non zero
- // and then decrements the semaphore counter.
- virtual void Wait() = 0;
-
- // Suspends the calling thread until the counter is non zero or the timeout
- // time has passed. If timeout happens the return value is false and the
- // counter is unchanged. Otherwise the semaphore counter is decremented and
- // true is returned. The timeout value is specified in microseconds.
- virtual bool Wait(int timeout) = 0;
-
- // Increments the semaphore counter.
- virtual void Signal() = 0;
-};
-
-template <int InitialValue>
-struct CreateSemaphoreTrait {
- static Semaphore* Create() {
- return OS::CreateSemaphore(InitialValue);
- }
-};
-
-// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// // The following semaphore starts at 0.
-// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
-//
-// void my_function() {
-// // Do something with my_semaphore.Pointer().
-// }
-//
-template <int InitialValue>
-struct LazySemaphore {
- typedef typename LazyDynamicInstance<
- Semaphore, CreateSemaphoreTrait<InitialValue>,
- ThreadSafeInitOnceTrait>::type type;
-};
-
-#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-
-// ----------------------------------------------------------------------------
-// Socket
-//
-
-class Socket {
- public:
- virtual ~Socket() {}
-
- // Server initialization.
- virtual bool Bind(const int port) = 0;
- virtual bool Listen(int backlog) const = 0;
- virtual Socket* Accept() const = 0;
-
- // Client initialization.
- virtual bool Connect(const char* host, const char* port) = 0;
-
- // Shutdown socket for both read and write. This causes blocking Send and
- // Receive calls to exit. After Shutdown the Socket object cannot be used for
- // any communication.
- virtual bool Shutdown() = 0;
-
- // Data Transimission
- // Return 0 on failure.
- virtual int Send(const char* data, int len) const = 0;
- virtual int Receive(char* data, int len) const = 0;
-
- // Set the value of the SO_REUSEADDR socket option.
- virtual bool SetReuseAddress(bool reuse_address) = 0;
-
- virtual bool IsValid() const = 0;
-
- static bool SetUp();
- static int LastError();
- static uint16_t HToN(uint16_t value);
- static uint16_t NToH(uint16_t value);
- static uint32_t HToN(uint32_t value);
- static uint32_t NToH(uint32_t value);
-};
-
-
-// ----------------------------------------------------------------------------
-// Sampler
-//
-// A sampler periodically samples the state of the VM and optionally
-// (if used for profiling) the program counter and stack pointer for
-// the thread that created it.
-
-// TickSample captures the information collected for each sample.
-class TickSample {
- public:
- TickSample()
- : state(OTHER),
- pc(NULL),
- sp(NULL),
- fp(NULL),
- tos(NULL),
- frames_count(0),
- has_external_callback(false) {}
- StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- Address sp; // Stack pointer.
- Address fp; // Frame pointer.
- union {
- Address tos; // Top stack value (*sp).
- Address external_callback;
- };
- static const int kMaxFramesCount = 64;
- Address stack[kMaxFramesCount]; // Call stack.
- int frames_count : 8; // Number of captured frames.
- bool has_external_callback : 1;
-};
-
-class Sampler {
- public:
- // Initialize sampler.
- Sampler(Isolate* isolate, int interval);
- virtual ~Sampler();
-
- int interval() const { return interval_; }
-
- // Performs stack sampling.
- void SampleStack(TickSample* sample) {
- DoSampleStack(sample);
- IncSamplesTaken();
- }
-
- // This method is called for each sampling period with the current
- // program counter.
- virtual void Tick(TickSample* sample) = 0;
-
- // Start and stop sampler.
- void Start();
- void Stop();
-
- // Whether the sampling thread should use this Sampler for CPU profiling?
- bool IsProfiling() const {
- return NoBarrier_Load(&profiling_) > 0 &&
- !NoBarrier_Load(&has_processing_thread_);
- }
- void IncreaseProfilingDepth() {
- if (NoBarrier_AtomicIncrement(&profiling_, 1) == 1) StartProfiling();
- }
- void DecreaseProfilingDepth() {
- if (!NoBarrier_AtomicIncrement(&profiling_, -1)) StopProfiling();
- }
-
- // Whether the sampler is running (that is, consumes resources).
- bool IsActive() const { return NoBarrier_Load(&active_); }
-
- Isolate* isolate() { return isolate_; }
-
- // Used in tests to make sure that stack sampling is performed.
- int samples_taken() const { return samples_taken_; }
- void ResetSamplesTaken() { samples_taken_ = 0; }
-
- class PlatformData;
- PlatformData* data() { return data_; }
-
- PlatformData* platform_data() { return data_; }
-
- // If true next sample must be initiated on the profiler event processor
- // thread right after latest sample is processed.
- static bool CanSampleOnProfilerEventsProcessorThread();
- void DoSample();
- void SetHasProcessingThread(bool value) {
- NoBarrier_Store(&has_processing_thread_, value);
- }
-
- protected:
- virtual void DoSampleStack(TickSample* sample) = 0;
-
- private:
- void SetActive(bool value) { NoBarrier_Store(&active_, value); }
- void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
-
- // Perform platform-specific initialization before DoSample() may be invoked.
- void StartProfiling();
- // Perform platform-specific cleanup after profiling.
- void StopProfiling();
-
- Isolate* isolate_;
- const int interval_;
- Atomic32 profiling_;
- Atomic32 has_processing_thread_;
- Atomic32 active_;
- PlatformData* data_; // Platform specific data.
- int samples_taken_; // Counts stack samples taken.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_H_
diff --git a/src/3rdparty/v8/src/preparse-data-format.h b/src/3rdparty/v8/src/preparse-data-format.h
deleted file mode 100644
index e64326e..0000000
--- a/src/3rdparty/v8/src/preparse-data-format.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PREPARSE_DATA_FORMAT_H_
-#define V8_PREPARSE_DATA_FORMAT_H_
-
-namespace v8 {
-namespace internal {
-
-// Generic and general data used by preparse data recorders and readers.
-
-struct PreparseDataConstants {
- public:
- // Layout and constants of the preparse data exchange format.
- static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 7;
-
- static const int kMagicOffset = 0;
- static const int kVersionOffset = 1;
- static const int kHasErrorOffset = 2;
- static const int kFunctionsSizeOffset = 3;
- static const int kSymbolCountOffset = 4;
- static const int kSizeOffset = 5;
- static const int kHeaderSize = 6;
-
- // If encoding a message, the following positions are fixed.
- static const int kMessageStartPos = 0;
- static const int kMessageEndPos = 1;
- static const int kMessageArgCountPos = 2;
- static const int kMessageTextPos = 3;
-
- static const unsigned char kNumberTerminator = 0x80u;
-};
-
-
-} } // namespace v8::internal.
-
-#endif // V8_PREPARSE_DATA_FORMAT_H_
diff --git a/src/3rdparty/v8/src/preparse-data.cc b/src/3rdparty/v8/src/preparse-data.cc
deleted file mode 100644
index d0425b4..0000000
--- a/src/3rdparty/v8/src/preparse-data.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-
-#include "preparse-data-format.h"
-#include "preparse-data.h"
-
-#include "checks.h"
-#include "globals.h"
-#include "hashmap.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder
-
-FunctionLoggingParserRecorder::FunctionLoggingParserRecorder()
- : function_store_(0),
- is_recording_(true),
- pause_count_(0) {
- preamble_[PreparseDataConstants::kMagicOffset] =
- PreparseDataConstants::kMagicNumber;
- preamble_[PreparseDataConstants::kVersionOffset] =
- PreparseDataConstants::kCurrentVersion;
- preamble_[PreparseDataConstants::kHasErrorOffset] = false;
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = 0;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- preamble_[PreparseDataConstants::kSizeOffset] = 0;
- ASSERT_EQ(6, PreparseDataConstants::kHeaderSize);
-#ifdef DEBUG
- prev_start_ = -1;
-#endif
-}
-
-
-void FunctionLoggingParserRecorder::LogMessage(int start_pos,
- int end_pos,
- const char* message,
- const char* arg_opt) {
- if (has_error()) return;
- preamble_[PreparseDataConstants::kHasErrorOffset] = true;
- function_store_.Reset();
- STATIC_ASSERT(PreparseDataConstants::kMessageStartPos == 0);
- function_store_.Add(start_pos);
- STATIC_ASSERT(PreparseDataConstants::kMessageEndPos == 1);
- function_store_.Add(end_pos);
- STATIC_ASSERT(PreparseDataConstants::kMessageArgCountPos == 2);
- function_store_.Add((arg_opt == NULL) ? 0 : 1);
- STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 3);
- WriteString(CStrVector(message));
- if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
- is_recording_ = false;
-}
-
-
-void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
- function_store_.Add(str.length());
- for (int i = 0; i < str.length(); i++) {
- function_store_.Add(str[i]);
- }
-}
-
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record both function entries and symbols.
-
-Vector<unsigned> PartialParserRecorder::ExtractData() {
- int function_size = function_store_.size();
- int total_size = PreparseDataConstants::kHeaderSize + function_size;
- Vector<unsigned> data = Vector<unsigned>::New(total_size);
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- memcpy(data.start(), preamble_, sizeof(preamble_));
- int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
- if (function_size > 0) {
- function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
- symbol_start));
- }
- return data;
-}
-
-
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
-
-CompleteParserRecorder::CompleteParserRecorder()
- : FunctionLoggingParserRecorder(),
- literal_chars_(0),
- symbol_store_(0),
- symbol_keys_(0),
- string_table_(vector_compare),
- symbol_id_(0) {
-}
-
-
-void CompleteParserRecorder::LogSymbol(int start,
- int hash,
- bool is_ascii,
- Vector<const byte> literal_bytes) {
- Key key = { is_ascii, literal_bytes };
- HashMap::Entry* entry = string_table_.Lookup(&key, hash, true);
- int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- if (id == 0) {
- // Copy literal contents for later comparison.
- key.literal_bytes =
- Vector<const byte>::cast(literal_chars_.AddBlock(literal_bytes));
- // Put (symbol_id_ + 1) into entry and increment it.
- id = ++symbol_id_;
- entry->value = reinterpret_cast<void*>(id);
- Vector<Key> symbol = symbol_keys_.AddBlock(1, key);
- entry->key = &symbol[0];
- }
- WriteNumber(id - 1);
-}
-
-
-Vector<unsigned> CompleteParserRecorder::ExtractData() {
- int function_size = function_store_.size();
- // Add terminator to symbols, then pad to unsigned size.
- int symbol_size = symbol_store_.size();
- int padding = sizeof(unsigned) - (symbol_size % sizeof(unsigned));
- symbol_store_.AddBlock(padding, PreparseDataConstants::kNumberTerminator);
- symbol_size += padding;
- int total_size = PreparseDataConstants::kHeaderSize + function_size
- + (symbol_size / sizeof(unsigned));
- Vector<unsigned> data = Vector<unsigned>::New(total_size);
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = symbol_id_;
- memcpy(data.start(), preamble_, sizeof(preamble_));
- int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
- if (function_size > 0) {
- function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
- symbol_start));
- }
- if (!has_error()) {
- symbol_store_.WriteTo(
- Vector<byte>::cast(data.SubVector(symbol_start, total_size)));
- }
- return data;
-}
-
-
-void CompleteParserRecorder::WriteNumber(int number) {
- ASSERT(number >= 0);
-
- int mask = (1 << 28) - 1;
- for (int i = 28; i > 0; i -= 7) {
- if (number > mask) {
- symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
- number &= mask;
- }
- mask >>= 7;
- }
- symbol_store_.Add(static_cast<byte>(number));
-}
-
-
-} } // namespace v8::internal.
diff --git a/src/3rdparty/v8/src/preparse-data.h b/src/3rdparty/v8/src/preparse-data.h
deleted file mode 100644
index 3a1e99d..0000000
--- a/src/3rdparty/v8/src/preparse-data.h
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PREPARSE_DATA_H_
-#define V8_PREPARSE_DATA_H_
-
-#include "allocation.h"
-#include "hashmap.h"
-#include "utils-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// ParserRecorder - Logging of preparser data.
-
-// Abstract interface for preparse data recorder.
-class ParserRecorder {
- public:
- ParserRecorder() { }
- virtual ~ParserRecorder() { }
-
- // Logs the scope and some details of a function literal in the source.
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties,
- LanguageMode language_mode) = 0;
-
- // Logs a symbol creation of a literal or identifier.
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
-
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt) = 0;
-
- virtual int function_position() = 0;
-
- virtual int symbol_position() = 0;
-
- virtual int symbol_ids() = 0;
-
- virtual Vector<unsigned> ExtractData() = 0;
-
- virtual void PauseRecording() = 0;
-
- virtual void ResumeRecording() = 0;
-};
-
-
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder - Record only function entries
-
-class FunctionLoggingParserRecorder : public ParserRecorder {
- public:
- FunctionLoggingParserRecorder();
- virtual ~FunctionLoggingParserRecorder() {}
-
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties,
- LanguageMode language_mode) {
- function_store_.Add(start);
- function_store_.Add(end);
- function_store_.Add(literals);
- function_store_.Add(properties);
- function_store_.Add(language_mode);
- }
-
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt);
-
- virtual int function_position() { return function_store_.size(); }
-
-
- virtual Vector<unsigned> ExtractData() = 0;
-
- virtual void PauseRecording() {
- pause_count_++;
- is_recording_ = false;
- }
-
- virtual void ResumeRecording() {
- ASSERT(pause_count_ > 0);
- if (--pause_count_ == 0) is_recording_ = !has_error();
- }
-
- protected:
- bool has_error() {
- return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]);
- }
-
- bool is_recording() {
- return is_recording_;
- }
-
- void WriteString(Vector<const char> str);
-
- Collector<unsigned> function_store_;
- unsigned preamble_[PreparseDataConstants::kHeaderSize];
- bool is_recording_;
- int pause_count_;
-
-#ifdef DEBUG
- int prev_start_;
-#endif
-};
-
-
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record only function entries
-
-class PartialParserRecorder : public FunctionLoggingParserRecorder {
- public:
- PartialParserRecorder() : FunctionLoggingParserRecorder() { }
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
- virtual ~PartialParserRecorder() { }
- virtual Vector<unsigned> ExtractData();
- virtual int symbol_position() { return 0; }
- virtual int symbol_ids() { return 0; }
-};
-
-
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
-
-class CompleteParserRecorder: public FunctionLoggingParserRecorder {
- public:
- CompleteParserRecorder();
- virtual ~CompleteParserRecorder() { }
-
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, true, Vector<const byte>::cast(literal));
- }
-
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
- }
-
- virtual Vector<unsigned> ExtractData();
-
- virtual int symbol_position() { return symbol_store_.size(); }
- virtual int symbol_ids() { return symbol_id_; }
-
- private:
- struct Key {
- bool is_ascii;
- Vector<const byte> literal_bytes;
- };
-
- virtual void LogSymbol(int start,
- int hash,
- bool is_ascii,
- Vector<const byte> literal);
-
- template <typename Char>
- static int vector_hash(Vector<const Char> string) {
- int hash = 0;
- for (int i = 0; i < string.length(); i++) {
- int c = static_cast<int>(string[i]);
- hash += c;
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
- return hash;
- }
-
- static bool vector_compare(void* a, void* b) {
- Key* string1 = reinterpret_cast<Key*>(a);
- Key* string2 = reinterpret_cast<Key*>(b);
- if (string1->is_ascii != string2->is_ascii) return false;
- int length = string1->literal_bytes.length();
- if (string2->literal_bytes.length() != length) return false;
- return memcmp(string1->literal_bytes.start(),
- string2->literal_bytes.start(), length) == 0;
- }
-
- // Write a non-negative number to the symbol store.
- void WriteNumber(int number);
-
- Collector<byte> literal_chars_;
- Collector<byte> symbol_store_;
- Collector<Key> symbol_keys_;
- HashMap string_table_;
- int symbol_id_;
-};
-
-
-} } // namespace v8::internal.
-
-#endif // V8_PREPARSE_DATA_H_
diff --git a/src/3rdparty/v8/src/preparser-api.cc b/src/3rdparty/v8/src/preparser-api.cc
deleted file mode 100644
index 6e8556a..0000000
--- a/src/3rdparty/v8/src/preparser-api.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef _MSC_VER
-#define V8_WIN32_LEAN_AND_MEAN
-#include "win32-headers.h"
-#endif
-
-#include "../include/v8-preparser.h"
-
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list.h"
-#include "hashmap.h"
-#include "preparse-data-format.h"
-#include "preparse-data.h"
-#include "preparser.h"
-
-namespace v8 {
-namespace internal {
-
-// UTF16Buffer based on a v8::UnicodeInputStream.
-class InputStreamUtf16Buffer : public Utf16CharacterStream {
- public:
- /* The InputStreamUtf16Buffer maintains an internal buffer
- * that is filled in chunks from the Utf16CharacterStream.
- * It also maintains unlimited pushback capability, but optimized
- * for small pushbacks.
- * The pushback_buffer_ pointer points to the limit of pushbacks
- * in the current buffer. There is room for a few pushback'ed chars before
- * the buffer containing the most recently read chunk. If this is overflowed,
- * an external buffer is allocated/reused to hold further pushbacks, and
- * pushback_buffer_ and buffer_cursor_/buffer_end_ now points to the
- * new buffer. When this buffer is read to the end again, the cursor is
- * switched back to the internal buffer
- */
- explicit InputStreamUtf16Buffer(v8::UnicodeInputStream* stream)
- : Utf16CharacterStream(),
- stream_(stream),
- pushback_buffer_(buffer_),
- pushback_buffer_end_cache_(NULL),
- pushback_buffer_backing_(NULL),
- pushback_buffer_backing_size_(0) {
- buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
- }
-
- virtual ~InputStreamUtf16Buffer() {
- if (pushback_buffer_backing_ != NULL) {
- DeleteArray(pushback_buffer_backing_);
- }
- }
-
- virtual void PushBack(uc32 ch) {
- ASSERT(pos_ > 0);
- if (ch == kEndOfInput) {
- pos_--;
- return;
- }
- if (buffer_cursor_ <= pushback_buffer_) {
- // No more room in the current buffer to do pushbacks.
- if (pushback_buffer_end_cache_ == NULL) {
- // We have overflowed the pushback space at the beginning of buffer_.
- // Switch to using a separate allocated pushback buffer.
- if (pushback_buffer_backing_ == NULL) {
- // Allocate a buffer the first time we need it.
- pushback_buffer_backing_ = NewArray<uc16>(kPushBackSize);
- pushback_buffer_backing_size_ = kPushBackSize;
- }
- pushback_buffer_ = pushback_buffer_backing_;
- pushback_buffer_end_cache_ = buffer_end_;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- buffer_cursor_ = buffer_end_ - 1;
- } else {
- // Hit the bottom of the allocated pushback buffer.
- // Double the buffer and continue.
- uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
- memcpy(new_buffer + pushback_buffer_backing_size_,
- pushback_buffer_backing_,
- pushback_buffer_backing_size_);
- DeleteArray(pushback_buffer_backing_);
- buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
- pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- }
- }
- pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] =
- static_cast<uc16>(ch);
- pos_--;
- }
-
- protected:
- virtual bool ReadBlock() {
- if (pushback_buffer_end_cache_ != NULL) {
- buffer_cursor_ = buffer_;
- buffer_end_ = pushback_buffer_end_cache_;
- pushback_buffer_end_cache_ = NULL;
- return buffer_end_ > buffer_cursor_;
- }
- // Copy the top of the buffer into the pushback area.
- int32_t value;
- uc16* buffer_start = buffer_ + kPushBackSize;
- buffer_cursor_ = buffer_end_ = buffer_start;
- while ((value = stream_->Next()) >= 0) {
- if (value >
- static_cast<int32_t>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- buffer_start[buffer_end_++ - buffer_start] =
- unibrow::Utf16::LeadSurrogate(value);
- buffer_start[buffer_end_++ - buffer_start] =
- unibrow::Utf16::TrailSurrogate(value);
- } else {
- // buffer_end_ is a const pointer, but buffer_ is writable.
- buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
- }
- // Stop one before the end of the buffer in case we get a surrogate pair.
- if (buffer_end_ <= buffer_ + 1 + kPushBackSize + kBufferSize) break;
- }
- return buffer_end_ > buffer_start;
- }
-
- virtual unsigned SlowSeekForward(unsigned pos) {
- // Seeking in the input is not used by preparsing.
- // It's only used by the real parser based on preparser data.
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- static const unsigned kBufferSize = 512;
- static const unsigned kPushBackSize = 16;
- v8::UnicodeInputStream* const stream_;
- // Buffer holding first kPushBackSize characters of pushback buffer,
- // then kBufferSize chars of read-ahead.
- // The pushback buffer is only used if pushing back characters past
- // the start of a block.
- uc16 buffer_[kPushBackSize + kBufferSize];
- // Limit of pushbacks before new allocation is necessary.
- uc16* pushback_buffer_;
- // Only if that pushback buffer at the start of buffer_ isn't sufficient
- // is the following used.
- const uc16* pushback_buffer_end_cache_;
- uc16* pushback_buffer_backing_;
- unsigned pushback_buffer_backing_size_;
-};
-
-
-// Functions declared by allocation.h and implemented in both api.cc (for v8)
-// or here (for a stand-alone preparser).
-
-void FatalProcessOutOfMemory(const char* reason) {
- V8_Fatal(__FILE__, __LINE__, reason);
-}
-
-bool EnableSlowAsserts() { return true; }
-
-} // namespace internal.
-
-
-UnicodeInputStream::~UnicodeInputStream() { }
-
-
-PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
- internal::InputStreamUtf16Buffer buffer(input);
- uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
- internal::UnicodeCache unicode_cache;
- internal::Scanner scanner(&unicode_cache);
- scanner.Initialize(&buffer);
- internal::CompleteParserRecorder recorder;
- preparser::PreParser::PreParseResult result =
- preparser::PreParser::PreParseProgram(&scanner,
- &recorder,
- internal::kAllowLazy,
- stack_limit);
- if (result == preparser::PreParser::kPreParseStackOverflow) {
- return PreParserData::StackOverflow();
- }
- internal::Vector<unsigned> pre_data = recorder.ExtractData();
- size_t size = pre_data.length() * sizeof(pre_data[0]);
- unsigned char* data = reinterpret_cast<unsigned char*>(pre_data.start());
- return PreParserData(size, data);
-}
-
-} // namespace v8.
-
-
-// Used by ASSERT macros and other immediate exits.
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
- exit(EXIT_FAILURE);
-}
diff --git a/src/3rdparty/v8/src/preparser.cc b/src/3rdparty/v8/src/preparser.cc
deleted file mode 100644
index 21da4f8..0000000
--- a/src/3rdparty/v8/src/preparser.cc
+++ /dev/null
@@ -1,1789 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <math.h>
-
-#include "../include/v8stdint.h"
-
-#include "allocation.h"
-#include "checks.h"
-#include "conversions.h"
-#include "conversions-inl.h"
-#include "globals.h"
-#include "hashmap.h"
-#include "list.h"
-#include "preparse-data-format.h"
-#include "preparse-data.h"
-#include "preparser.h"
-#include "unicode.h"
-#include "utils.h"
-
-namespace v8 {
-
-#ifdef _MSC_VER
-// Usually defined in math.h, but not in MSVC.
-// Abstracted to work
-int isfinite(double value);
-#endif
-
-namespace preparser {
-
-PreParser::PreParseResult PreParser::PreParseLazyFunction(
- i::LanguageMode mode, i::ParserRecorder* log) {
- log_ = log;
- // Lazy functions always have trivial outer scopes (no with/catch scopes).
- Scope top_scope(&scope_, kTopLevelScope);
- set_language_mode(mode);
- Scope function_scope(&scope_, kFunctionScope);
- ASSERT_EQ(i::Token::LBRACE, scanner_->current_token());
- bool ok = true;
- int start_position = scanner_->peek_location().beg_pos;
- ParseLazyFunctionLiteralBody(&ok);
- if (stack_overflow_) return kPreParseStackOverflow;
- if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
- } else {
- ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
- if (!is_classic_mode()) {
- int end_pos = scanner_->location().end_pos;
- CheckOctalLiteral(start_position, end_pos, &ok);
- if (ok) {
- CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
- }
- }
- }
- return kPreParseSuccess;
-}
-
-
-// Preparsing checks a JavaScript program and emits preparse-data that helps
-// a later parsing to be faster.
-// See preparser-data.h for the data.
-
-// The PreParser checks that the syntax follows the grammar for JavaScript,
-// and collects some information about the program along the way.
-// The grammar check is only performed in order to understand the program
-// sufficiently to deduce some information about it, that can be used
-// to speed up later parsing. Finding errors is not the goal of pre-parsing,
-// rather it is to speed up properly written and correct programs.
-// That means that contextual checks (like a label being declared where
-// it is used) are generally omitted.
-
-void PreParser::ReportUnexpectedToken(i::Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram.
- if (token == i::Token::ILLEGAL && stack_overflow_) {
- return;
- }
- i::Scanner::Location source_location = scanner_->location();
-
- // Four of the tokens are treated specially
- switch (token) {
- case i::Token::EOS:
- return ReportMessageAt(source_location, "unexpected_eos", NULL);
- case i::Token::NUMBER:
- return ReportMessageAt(source_location, "unexpected_token_number", NULL);
- case i::Token::STRING:
- return ReportMessageAt(source_location, "unexpected_token_string", NULL);
- case i::Token::IDENTIFIER:
- return ReportMessageAt(source_location,
- "unexpected_token_identifier", NULL);
- case i::Token::FUTURE_RESERVED_WORD:
- return ReportMessageAt(source_location, "unexpected_reserved", NULL);
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessageAt(source_location,
- "unexpected_strict_reserved", NULL);
- default:
- const char* name = i::Token::String(token);
- ReportMessageAt(source_location, "unexpected_token", name);
- }
-}
-
-
-// Checks whether octal literal last seen is between beg_pos and end_pos.
-// If so, reports an error.
-void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- i::Scanner::Location octal = scanner_->octal_position();
- if (beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal", NULL);
- scanner_->clear_octal_position();
- *ok = false;
- }
-}
-
-
-#define CHECK_OK ok); \
- if (!*ok) return kUnknownSourceElements; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-
-PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- //
- // In harmony mode we allow additionally the following productions
- // SourceElement:
- // LetDeclaration
- // ConstDeclaration
-
- switch (peek()) {
- case i::Token::FUNCTION:
- return ParseFunctionDeclaration(ok);
- case i::Token::LET:
- case i::Token::CONST:
- return ParseVariableStatement(kSourceElement, ok);
- default:
- return ParseStatement(ok);
- }
-}
-
-
-PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
- bool* ok) {
- // SourceElements ::
- // (Statement)* <end_token>
-
- bool allow_directive_prologue = true;
- while (peek() != end_token) {
- Statement statement = ParseSourceElement(CHECK_OK);
- if (allow_directive_prologue) {
- if (statement.IsUseStrictLiteral()) {
- set_language_mode(harmony_scoping_ ?
- i::EXTENDED_MODE : i::STRICT_MODE);
- } else if (!statement.IsStringLiteral()) {
- allow_directive_prologue = false;
- }
- }
- }
- return kUnknownSourceElements;
-}
-
-
-#undef CHECK_OK
-#define CHECK_OK ok); \
- if (!*ok) return Statement::Default(); \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-
-PreParser::Statement PreParser::ParseStatement(bool* ok) {
- // Statement ::
- // Block
- // VariableStatement
- // EmptyStatement
- // ExpressionStatement
- // IfStatement
- // IterationStatement
- // ContinueStatement
- // BreakStatement
- // ReturnStatement
- // WithStatement
- // LabelledStatement
- // SwitchStatement
- // ThrowStatement
- // TryStatement
- // DebuggerStatement
-
- // Note: Since labels can only be used by 'break' and 'continue'
- // statements, which themselves are only valid within blocks,
- // iterations or 'switch' statements (i.e., BreakableStatements),
- // labels can be simply ignored in all other cases; except for
- // trivial labeled break statements 'label: break label' which is
- // parsed into an empty statement.
-
- // Keep the source position of the statement
- switch (peek()) {
- case i::Token::LBRACE:
- return ParseBlock(ok);
-
- case i::Token::CONST:
- case i::Token::LET:
- case i::Token::VAR:
- return ParseVariableStatement(kStatement, ok);
-
- case i::Token::SEMICOLON:
- Next();
- return Statement::Default();
-
- case i::Token::IF:
- return ParseIfStatement(ok);
-
- case i::Token::DO:
- return ParseDoWhileStatement(ok);
-
- case i::Token::WHILE:
- return ParseWhileStatement(ok);
-
- case i::Token::FOR:
- return ParseForStatement(ok);
-
- case i::Token::CONTINUE:
- return ParseContinueStatement(ok);
-
- case i::Token::BREAK:
- return ParseBreakStatement(ok);
-
- case i::Token::RETURN:
- return ParseReturnStatement(ok);
-
- case i::Token::WITH:
- return ParseWithStatement(ok);
-
- case i::Token::SWITCH:
- return ParseSwitchStatement(ok);
-
- case i::Token::THROW:
- return ParseThrowStatement(ok);
-
- case i::Token::TRY:
- return ParseTryStatement(ok);
-
- case i::Token::FUNCTION: {
- i::Scanner::Location start_location = scanner_->peek_location();
- Statement statement = ParseFunctionDeclaration(CHECK_OK);
- i::Scanner::Location end_location = scanner_->location();
- if (!is_classic_mode()) {
- ReportMessageAt(start_location.beg_pos, end_location.end_pos,
- "strict_function", NULL);
- *ok = false;
- return Statement::Default();
- } else {
- return statement;
- }
- }
-
- case i::Token::DEBUGGER:
- return ParseDebuggerStatement(ok);
-
- default:
- return ParseExpressionOrLabelledStatement(ok);
- }
-}
-
-
-PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
- // FunctionDeclaration ::
- // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
- Expect(i::Token::FUNCTION, CHECK_OK);
-
- Identifier identifier = ParseIdentifier(CHECK_OK);
- i::Scanner::Location location = scanner_->location();
-
- Expression function_value = ParseFunctionLiteral(CHECK_OK);
-
- if (function_value.IsStrictFunction() &&
- !identifier.IsValidStrictVariable()) {
- // Strict mode violation, using either reserved word or eval/arguments
- // as name of strict function.
- const char* type = "strict_function_name";
- if (identifier.IsFutureStrictReserved()) {
- type = "strict_reserved_word";
- }
- ReportMessageAt(location, type, NULL);
- *ok = false;
- }
- return Statement::FunctionDeclaration();
-}
-
-
-PreParser::Statement PreParser::ParseBlock(bool* ok) {
- // Block ::
- // '{' Statement* '}'
-
- // Note that a Block does not introduce a new execution scope!
- // (ECMA-262, 3rd, 12.2)
- //
- Expect(i::Token::LBRACE, CHECK_OK);
- while (peek() != i::Token::RBRACE) {
- if (is_extended_mode()) {
- ParseSourceElement(CHECK_OK);
- } else {
- ParseStatement(CHECK_OK);
- }
- }
- Expect(i::Token::RBRACE, ok);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseVariableStatement(
- VariableDeclarationContext var_context,
- bool* ok) {
- // VariableStatement ::
- // VariableDeclarations ';'
-
- Statement result = ParseVariableDeclarations(var_context,
- NULL,
- NULL,
- CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
-}
-
-
-// If the variable declaration declares exactly one non-const
-// variable, then *var is set to that variable. In all other cases,
-// *var is untouched; in particular, it is the caller's responsibility
-// to initialize it properly. This mechanism is also used for the parsing
-// of 'for-in' loops.
-PreParser::Statement PreParser::ParseVariableDeclarations(
- VariableDeclarationContext var_context,
- VariableDeclarationProperties* decl_props,
- int* num_decl,
- bool* ok) {
- // VariableDeclarations ::
- // ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
- //
- // The ES6 Draft Rev3 specifies the following grammar for const declarations
- //
- // ConstDeclaration ::
- // const ConstBinding (',' ConstBinding)* ';'
- // ConstBinding ::
- // Identifier '=' AssignmentExpression
- //
- // TODO(ES6):
- // ConstBinding ::
- // BindingPattern '=' AssignmentExpression
- bool require_initializer = false;
- if (peek() == i::Token::VAR) {
- Consume(i::Token::VAR);
- } else if (peek() == i::Token::CONST) {
- // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
- //
- // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
- //
- // * It is a Syntax Error if the code that matches this production is not
- // contained in extended code.
- //
- // However disallowing const in classic mode will break compatibility with
- // existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in classic mode.
- Consume(i::Token::CONST);
- switch (language_mode()) {
- case i::CLASSIC_MODE:
- break;
- case i::STRICT_MODE: {
- i::Scanner::Location location = scanner_->peek_location();
- ReportMessageAt(location, "strict_const", NULL);
- *ok = false;
- return Statement::Default();
- }
- case i::EXTENDED_MODE:
- if (var_context != kSourceElement &&
- var_context != kForStatement) {
- i::Scanner::Location location = scanner_->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_const", NULL);
- *ok = false;
- return Statement::Default();
- }
- require_initializer = true;
- break;
- }
- } else if (peek() == i::Token::LET) {
- // ES6 Draft Rev4 section 12.2.1:
- //
- // LetDeclaration : let LetBindingList ;
- //
- // * It is a Syntax Error if the code that matches this production is not
- // contained in extended code.
- if (!is_extended_mode()) {
- i::Scanner::Location location = scanner_->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "illegal_let", NULL);
- *ok = false;
- return Statement::Default();
- }
- Consume(i::Token::LET);
- if (var_context != kSourceElement &&
- var_context != kForStatement) {
- i::Scanner::Location location = scanner_->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_let", NULL);
- *ok = false;
- return Statement::Default();
- }
- } else {
- *ok = false;
- return Statement::Default();
- }
-
- // The scope of a var/const declared variable anywhere inside a function
- // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). The scope
- // of a let declared variable is the scope of the immediately enclosing
- // block.
- int nvars = 0; // the number of variables declared
- do {
- // Parse variable name.
- if (nvars > 0) Consume(i::Token::COMMA);
- Identifier identifier = ParseIdentifier(CHECK_OK);
- if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
- "strict_var_name",
- identifier,
- ok);
- return Statement::Default();
- }
- nvars++;
- if (peek() == i::Token::ASSIGN || require_initializer) {
- Expect(i::Token::ASSIGN, CHECK_OK);
- ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
- if (decl_props != NULL) *decl_props = kHasInitializers;
- }
- } while (peek() == i::Token::COMMA);
-
- if (num_decl != NULL) *num_decl = nvars;
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
- // ExpressionStatement | LabelledStatement ::
- // Expression ';'
- // Identifier ':' Statement
-
- Expression expr = ParseExpression(true, CHECK_OK);
- if (expr.IsRawIdentifier()) {
- ASSERT(!expr.AsIdentifier().IsFutureReserved());
- ASSERT(is_classic_mode() || !expr.AsIdentifier().IsFutureStrictReserved());
- if (peek() == i::Token::COLON) {
- Consume(i::Token::COLON);
- return ParseStatement(ok);
- }
- // Preparsing is disabled for extensions (because the extension details
- // aren't passed to lazily compiled functions), so we don't
- // accept "native function" in the preparser.
- }
- // Parsed expression statement.
- ExpectSemicolon(CHECK_OK);
- return Statement::ExpressionStatement(expr);
-}
-
-
-PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
- // IfStatement ::
- // 'if' '(' Expression ')' Statement ('else' Statement)?
-
- Expect(i::Token::IF, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
- ParseStatement(CHECK_OK);
- if (peek() == i::Token::ELSE) {
- Next();
- ParseStatement(CHECK_OK);
- }
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
- // ContinueStatement ::
- // 'continue' [no line terminator] Identifier? ';'
-
- Expect(i::Token::CONTINUE, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
- ParseIdentifier(CHECK_OK);
- }
- ExpectSemicolon(CHECK_OK);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
- // BreakStatement ::
- // 'break' [no line terminator] Identifier? ';'
-
- Expect(i::Token::BREAK, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
- ParseIdentifier(CHECK_OK);
- }
- ExpectSemicolon(CHECK_OK);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
- // ReturnStatement ::
- // 'return' [no line terminator] Expression? ';'
-
- // Consume the return token. It is necessary to do the before
- // reporting any errors on it, because of the way errors are
- // reported (underlining).
- Expect(i::Token::RETURN, CHECK_OK);
-
- // An ECMAScript program is considered syntactically incorrect if it
- // contains a return statement that is not within the body of a
- // function. See ECMA-262, section 12.9, page 67.
- // This is not handled during preparsing.
-
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
- ParseExpression(true, CHECK_OK);
- }
- ExpectSemicolon(CHECK_OK);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
- // WithStatement ::
- // 'with' '(' Expression ')' Statement
- Expect(i::Token::WITH, CHECK_OK);
- if (!is_classic_mode()) {
- i::Scanner::Location location = scanner_->location();
- ReportMessageAt(location, "strict_mode_with", NULL);
- *ok = false;
- return Statement::Default();
- }
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
-
- Scope::InsideWith iw(scope_);
- ParseStatement(CHECK_OK);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
- // SwitchStatement ::
- // 'switch' '(' Expression ')' '{' CaseClause* '}'
-
- Expect(i::Token::SWITCH, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
-
- Expect(i::Token::LBRACE, CHECK_OK);
- i::Token::Value token = peek();
- while (token != i::Token::RBRACE) {
- if (token == i::Token::CASE) {
- Expect(i::Token::CASE, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- } else {
- Expect(i::Token::DEFAULT, CHECK_OK);
- }
- Expect(i::Token::COLON, CHECK_OK);
- token = peek();
- while (token != i::Token::CASE &&
- token != i::Token::DEFAULT &&
- token != i::Token::RBRACE) {
- ParseStatement(CHECK_OK);
- token = peek();
- }
- }
- Expect(i::Token::RBRACE, ok);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
- // DoStatement ::
- // 'do' Statement 'while' '(' Expression ')' ';'
-
- Expect(i::Token::DO, CHECK_OK);
- ParseStatement(CHECK_OK);
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, ok);
- if (peek() == i::Token::SEMICOLON) Consume(i::Token::SEMICOLON);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
- // WhileStatement ::
- // 'while' '(' Expression ')' Statement
-
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
- ParseStatement(ok);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseForStatement(bool* ok) {
- // ForStatement ::
- // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
-
- Expect(i::Token::FOR, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- if (peek() != i::Token::SEMICOLON) {
- if (peek() == i::Token::VAR || peek() == i::Token::CONST ||
- peek() == i::Token::LET) {
- bool is_let = peek() == i::Token::LET;
- int decl_count;
- VariableDeclarationProperties decl_props = kHasNoInitializers;
- ParseVariableDeclarations(
- kForStatement, &decl_props, &decl_count, CHECK_OK);
- bool accept_IN = decl_count == 1 &&
- !(is_let && decl_props == kHasInitializers);
- if (peek() == i::Token::IN && accept_IN) {
- Expect(i::Token::IN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
-
- ParseStatement(CHECK_OK);
- return Statement::Default();
- }
- } else {
- ParseExpression(false, CHECK_OK);
- if (peek() == i::Token::IN) {
- Expect(i::Token::IN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
-
- ParseStatement(CHECK_OK);
- return Statement::Default();
- }
- }
- }
-
- // Parsed initializer at this point.
- Expect(i::Token::SEMICOLON, CHECK_OK);
-
- if (peek() != i::Token::SEMICOLON) {
- ParseExpression(true, CHECK_OK);
- }
- Expect(i::Token::SEMICOLON, CHECK_OK);
-
- if (peek() != i::Token::RPAREN) {
- ParseExpression(true, CHECK_OK);
- }
- Expect(i::Token::RPAREN, CHECK_OK);
-
- ParseStatement(ok);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
- // ThrowStatement ::
- // 'throw' [no line terminator] Expression ';'
-
- Expect(i::Token::THROW, CHECK_OK);
- if (scanner_->HasAnyLineTerminatorBeforeNext()) {
- i::Scanner::Location pos = scanner_->location();
- ReportMessageAt(pos, "newline_after_throw", NULL);
- *ok = false;
- return Statement::Default();
- }
- ParseExpression(true, CHECK_OK);
- ExpectSemicolon(ok);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
- // TryStatement ::
- // 'try' Block Catch
- // 'try' Block Finally
- // 'try' Block Catch Finally
- //
- // Catch ::
- // 'catch' '(' Identifier ')' Block
- //
- // Finally ::
- // 'finally' Block
-
- // In preparsing, allow any number of catch/finally blocks, including zero
- // of both.
-
- Expect(i::Token::TRY, CHECK_OK);
-
- ParseBlock(CHECK_OK);
-
- bool catch_or_finally_seen = false;
- if (peek() == i::Token::CATCH) {
- Consume(i::Token::CATCH);
- Expect(i::Token::LPAREN, CHECK_OK);
- Identifier id = ParseIdentifier(CHECK_OK);
- if (!is_classic_mode() && !id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
- "strict_catch_variable",
- id,
- ok);
- return Statement::Default();
- }
- Expect(i::Token::RPAREN, CHECK_OK);
- { Scope::InsideWith iw(scope_);
- ParseBlock(CHECK_OK);
- }
- catch_or_finally_seen = true;
- }
- if (peek() == i::Token::FINALLY) {
- Consume(i::Token::FINALLY);
- ParseBlock(CHECK_OK);
- catch_or_finally_seen = true;
- }
- if (!catch_or_finally_seen) {
- *ok = false;
- }
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
- // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
- // contexts this is used as a statement which invokes the debugger as if a
- // break point is present.
- // DebuggerStatement ::
- // 'debugger' ';'
-
- Expect(i::Token::DEBUGGER, CHECK_OK);
- ExpectSemicolon(ok);
- return Statement::Default();
-}
-
-
-#undef CHECK_OK
-#define CHECK_OK ok); \
- if (!*ok) return Expression::Default(); \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-
-// Precedence = 1
-PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == i::Token::COMMA) {
- Expect(i::Token::COMMA, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = Expression::Default();
- }
- return result;
-}
-
-
-// Precedence = 2
-PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
- bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- i::Scanner::Location before = scanner_->peek_location();
- Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!i::Token::IsAssignmentOp(peek())) {
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- if (!is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_lhs_assignment", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- i::Token::Value op = Next(); // Get assignment operator.
- ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- if ((op == i::Token::ASSIGN) && expression.IsThisProperty()) {
- scope_->AddProperty();
- }
-
- return Expression::Default();
-}
-
-
-// Precedence = 3
-PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
- bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- // We start using the binary expression parser for prec >= 4 only!
- Expression expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != i::Token::CONDITIONAL) return expression;
- Consume(i::Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- ParseAssignmentExpression(true, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- return Expression::Default();
-}
-
-
-int PreParser::Precedence(i::Token::Value tok, bool accept_IN) {
- if (tok == i::Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return i::Token::Precedence(tok);
-}
-
-
-// Precedence >= 4
-PreParser::Expression PreParser::ParseBinaryExpression(int prec,
- bool accept_IN,
- bool* ok) {
- Expression result = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Next();
- ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
- result = Expression::Default();
- }
- }
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- i::Token::Value op = peek();
- if (i::Token::IsUnaryOp(op)) {
- op = Next();
- ParseUnaryExpression(ok);
- return Expression::Default();
- } else if (i::Token::IsCountOp(op)) {
- op = Next();
- i::Scanner::Location before = scanner_->peek_location();
- Expression expression = ParseUnaryExpression(CHECK_OK);
- if (!is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_lhs_prefix", NULL);
- *ok = false;
- }
- return Expression::Default();
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- i::Scanner::Location before = scanner_->peek_location();
- Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- i::Token::IsCountOp(peek())) {
- if (!is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_lhs_postfix", NULL);
- *ok = false;
- return Expression::Default();
- }
- Next();
- return Expression::Default();
- }
- return expression;
-}
-
-
-PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression result = Expression::Default();
- if (peek() == i::Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
-
- case i::Token::LPAREN: {
- ParseArguments(CHECK_OK);
- result = Expression::Default();
- break;
- }
-
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- unsigned new_count = 0;
- do {
- Consume(i::Token::NEW);
- new_count++;
- } while (peek() == i::Token::NEW);
-
- return ParseMemberWithNewPrefixesExpression(new_count, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(0, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
- unsigned new_count, bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression result = Expression::Default();
- if (peek() == i::Token::FUNCTION) {
- Consume(i::Token::FUNCTION);
- Identifier identifier = Identifier::Default();
- if (peek_any_identifier()) {
- identifier = ParseIdentifier(CHECK_OK);
- }
- result = ParseFunctionLiteral(CHECK_OK);
- if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
- "strict_function_name",
- identifier,
- ok);
- return Expression::Default();
- }
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
- case i::Token::LPAREN: {
- if (new_count == 0) return result;
- // Consume one of the new prefixes (already parsed).
- ParseArguments(CHECK_OK);
- new_count--;
- result = Expression::Default();
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- Expression result = Expression::Default();
- switch (peek()) {
- case i::Token::THIS: {
- Next();
- result = Expression::This();
- break;
- }
-
- case i::Token::FUTURE_RESERVED_WORD: {
- Next();
- i::Scanner::Location location = scanner_->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "reserved_word", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
- if (!is_classic_mode()) {
- Next();
- i::Scanner::Location location = scanner_->location();
- ReportMessageAt(location, "strict_reserved_word", NULL);
- *ok = false;
- return Expression::Default();
- }
- // FALLTHROUGH
- case i::Token::IDENTIFIER: {
- Identifier id = ParseIdentifier(CHECK_OK);
- result = Expression::FromIdentifier(id);
- break;
- }
-
- case i::Token::NULL_LITERAL:
- case i::Token::TRUE_LITERAL:
- case i::Token::FALSE_LITERAL:
- case i::Token::NUMBER: {
- Next();
- break;
- }
- case i::Token::STRING: {
- Next();
- result = GetStringSymbol();
- break;
- }
-
- case i::Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case i::Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case i::Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case i::Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case i::Token::LPAREN:
- Consume(i::Token::LPAREN);
- parenthesized_function_ = (peek() == i::Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
- result = result.Parenthesize();
- break;
-
- case i::Token::MOD:
- result = ParseV8Intrinsic(CHECK_OK);
- break;
-
- default: {
- Next();
- *ok = false;
- return Expression::Default();
- }
- }
-
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
- Expect(i::Token::LBRACK, CHECK_OK);
- while (peek() != i::Token::RBRACK) {
- if (peek() != i::Token::COMMA) {
- ParseAssignmentExpression(true, CHECK_OK);
- }
- if (peek() != i::Token::RBRACK) {
- Expect(i::Token::COMMA, CHECK_OK);
- }
- }
- Expect(i::Token::RBRACK, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return Expression::Default();
-}
-
-void PreParser::CheckDuplicate(DuplicateFinder* finder,
- i::Token::Value property,
- int type,
- bool* ok) {
- int old_type;
- if (property == i::Token::NUMBER) {
- old_type = finder->AddNumber(scanner_->literal_ascii_string(), type);
- } else if (scanner_->is_literal_ascii()) {
- old_type = finder->AddAsciiSymbol(scanner_->literal_ascii_string(),
- type);
- } else {
- old_type = finder->AddUtf16Symbol(scanner_->literal_utf16_string(), type);
- }
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (is_classic_mode()) return;
- ReportMessageAt(scanner_->location(),
- "strict_duplicate_property", NULL);
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- ReportMessageAt(scanner_->location(),
- "accessor_data_property", NULL);
- } else {
- ASSERT(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- ReportMessageAt(scanner_->location(),
- "accessor_get_set", NULL);
- }
- *ok = false;
- }
-}
-
-
-PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- Expect(i::Token::LBRACE, CHECK_OK);
- DuplicateFinder duplicate_finder(scanner_->unicode_cache());
- while (peek() != i::Token::RBRACE) {
- i::Token::Value next = peek();
- switch (next) {
- case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD:
- case i::Token::FUTURE_STRICT_RESERVED_WORD: {
- bool is_getter = false;
- bool is_setter = false;
- ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if ((is_getter || is_setter) && peek() != i::Token::COLON) {
- i::Token::Value name = Next();
- bool is_keyword = i::Token::IsKeyword(name);
- if (name != i::Token::IDENTIFIER &&
- name != i::Token::FUTURE_RESERVED_WORD &&
- name != i::Token::FUTURE_STRICT_RESERVED_WORD &&
- name != i::Token::NUMBER &&
- name != i::Token::STRING &&
- !is_keyword) {
- *ok = false;
- return Expression::Default();
- }
- if (!is_keyword) {
- LogSymbol();
- }
- PropertyType type = is_getter ? kGetterProperty : kSetterProperty;
- CheckDuplicate(&duplicate_finder, name, type, CHECK_OK);
- ParseFunctionLiteral(CHECK_OK);
- if (peek() != i::Token::RBRACE) {
- Expect(i::Token::COMMA, CHECK_OK);
- }
- continue; // restart the while
- }
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
- break;
- }
- case i::Token::STRING:
- Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
- GetStringSymbol();
- break;
- case i::Token::NUMBER:
- Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
- break;
- default:
- if (i::Token::IsKeyword(next)) {
- Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
- } else {
- // Unexpected token.
- *ok = false;
- return Expression::Default();
- }
- }
-
- Expect(i::Token::COLON, CHECK_OK);
- ParseAssignmentExpression(true, CHECK_OK);
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != i::Token::RBRACE) Expect(i::Token::COMMA, CHECK_OK);
- }
- Expect(i::Token::RBRACE, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
- bool* ok) {
- if (!scanner_->ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessageAt(scanner_->location(), "unterminated_regexp", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- scope_->NextMaterializedLiteralIndex();
-
- if (!scanner_->ScanRegExpFlags()) {
- Next();
- ReportMessageAt(scanner_->location(), "invalid_regexp_flags", NULL);
- *ok = false;
- return Expression::Default();
- }
- Next();
- return Expression::Default();
-}
-
-
-PreParser::Arguments PreParser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- Expect(i::Token::LPAREN, ok);
- if (!*ok) return -1;
- bool done = (peek() == i::Token::RPAREN);
- int argc = 0;
- while (!done) {
- ParseAssignmentExpression(true, ok);
- if (!*ok) return -1;
- argc++;
- done = (peek() == i::Token::RPAREN);
- if (!done) {
- Expect(i::Token::COMMA, ok);
- if (!*ok) return -1;
- }
- }
- Expect(i::Token::RPAREN, ok);
- return argc;
-}
-
-
-PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
- // Function ::
- // '(' FormalParameterList? ')' '{' FunctionBody '}'
-
- // Parse function body.
- ScopeType outer_scope_type = scope_->type();
- bool inside_with = scope_->IsInsideWith();
- Scope function_scope(&scope_, kFunctionScope);
- // FormalParameterList ::
- // '(' (Identifier)*[','] ')'
- Expect(i::Token::LPAREN, CHECK_OK);
- int start_position = scanner_->location().beg_pos;
- bool done = (peek() == i::Token::RPAREN);
- DuplicateFinder duplicate_finder(scanner_->unicode_cache());
- while (!done) {
- Identifier id = ParseIdentifier(CHECK_OK);
- if (!id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
- "strict_param_name",
- id,
- CHECK_OK);
- }
- int prev_value;
- if (scanner_->is_literal_ascii()) {
- prev_value =
- duplicate_finder.AddAsciiSymbol(scanner_->literal_ascii_string(), 1);
- } else {
- prev_value =
- duplicate_finder.AddUtf16Symbol(scanner_->literal_utf16_string(), 1);
- }
-
- if (prev_value != 0) {
- SetStrictModeViolation(scanner_->location(),
- "strict_param_dupe",
- CHECK_OK);
- }
- done = (peek() == i::Token::RPAREN);
- if (!done) {
- Expect(i::Token::COMMA, CHECK_OK);
- }
- }
- Expect(i::Token::RPAREN, CHECK_OK);
-
- // Determine if the function will be lazily compiled.
- // Currently only happens to top-level functions.
- // Optimistically assume that all top-level functions are lazily compiled.
- bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
- !inside_with && allow_lazy_ &&
- !parenthesized_function_);
- parenthesized_function_ = false;
-
- Expect(i::Token::LBRACE, CHECK_OK);
- if (is_lazily_compiled) {
- ParseLazyFunctionLiteralBody(CHECK_OK);
- } else {
- ParseSourceElements(i::Token::RBRACE, ok);
- }
- Expect(i::Token::RBRACE, CHECK_OK);
-
- if (!is_classic_mode()) {
- int end_position = scanner_->location().end_pos;
- CheckOctalLiteral(start_position, end_position, CHECK_OK);
- CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
- return Expression::StrictFunction();
- }
-
- return Expression::Default();
-}
-
-
-void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
- int body_start = scanner_->location().beg_pos;
- log_->PauseRecording();
- ParseSourceElements(i::Token::RBRACE, ok);
- log_->ResumeRecording();
- if (!*ok) return;
-
- // Position right after terminal '}'.
- ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
- int body_end = scanner_->peek_location().end_pos;
- log_->LogFunction(body_start, body_end,
- scope_->materialized_literal_count(),
- scope_->expected_properties(),
- language_mode());
-}
-
-
-PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
- // CallRuntime ::
- // '%' Identifier Arguments
- Expect(i::Token::MOD, CHECK_OK);
- if (!allow_natives_syntax_) {
- *ok = false;
- return Expression::Default();
- }
- ParseIdentifier(CHECK_OK);
- ParseArguments(ok);
-
- return Expression::Default();
-}
-
-#undef CHECK_OK
-
-
-void PreParser::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- i::Token::Value tok = peek();
- if (tok == i::Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner_->HasAnyLineTerminatorBeforeNext() ||
- tok == i::Token::RBRACE ||
- tok == i::Token::EOS) {
- return;
- }
- Expect(i::Token::SEMICOLON, ok);
-}
-
-
-void PreParser::LogSymbol() {
- int identifier_pos = scanner_->location().beg_pos;
- if (scanner_->is_literal_ascii()) {
- log_->LogAsciiSymbol(identifier_pos, scanner_->literal_ascii_string());
- } else {
- log_->LogUtf16Symbol(identifier_pos, scanner_->literal_utf16_string());
- }
-}
-
-
-PreParser::Expression PreParser::GetStringSymbol() {
- const int kUseStrictLength = 10;
- const char* kUseStrictChars = "use strict";
- LogSymbol();
- if (scanner_->is_literal_ascii() &&
- scanner_->literal_length() == kUseStrictLength &&
- !scanner_->literal_contains_escapes() &&
- !strncmp(scanner_->literal_ascii_string().start(), kUseStrictChars,
- kUseStrictLength)) {
- return Expression::UseStrictStringLiteral();
- }
- return Expression::StringLiteral();
-}
-
-
-PreParser::Identifier PreParser::GetIdentifierSymbol() {
- LogSymbol();
- if (scanner_->current_token() == i::Token::FUTURE_RESERVED_WORD) {
- return Identifier::FutureReserved();
- } else if (scanner_->current_token() ==
- i::Token::FUTURE_STRICT_RESERVED_WORD) {
- return Identifier::FutureStrictReserved();
- }
- if (scanner_->is_literal_ascii()) {
- // Detect strict-mode poison words.
- if (scanner_->literal_length() == 4 &&
- !strncmp(scanner_->literal_ascii_string().start(), "eval", 4)) {
- return Identifier::Eval();
- }
- if (scanner_->literal_length() == 9 &&
- !strncmp(scanner_->literal_ascii_string().start(), "arguments", 9)) {
- return Identifier::Arguments();
- }
- }
- return Identifier::Default();
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- i::Token::Value next = Next();
- switch (next) {
- case i::Token::FUTURE_RESERVED_WORD: {
- i::Scanner::Location location = scanner_->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "reserved_word", NULL);
- *ok = false;
- return GetIdentifierSymbol();
- }
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
- if (!is_classic_mode()) {
- i::Scanner::Location location = scanner_->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "strict_reserved_word", NULL);
- *ok = false;
- }
- // FALLTHROUGH
- case i::Token::IDENTIFIER:
- return GetIdentifierSymbol();
- default:
- *ok = false;
- return Identifier::Default();
- }
-}
-
-
-void PreParser::SetStrictModeViolation(i::Scanner::Location location,
- const char* type,
- bool* ok) {
- if (!is_classic_mode()) {
- ReportMessageAt(location, type, NULL);
- *ok = false;
- return;
- }
- // Delay report in case this later turns out to be strict code
- // (i.e., for function names and parameters prior to a "use strict"
- // directive).
- // It's safe to overwrite an existing violation.
- // It's either from a function that turned out to be non-strict,
- // or it's in the current function (and we just need to report
- // one error), or it's in a unclosed nesting function that wasn't
- // strict (otherwise we would already be in strict mode).
- strict_mode_violation_location_ = location;
- strict_mode_violation_type_ = type;
-}
-
-
-void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
- int end_pos,
- bool* ok) {
- i::Scanner::Location location = strict_mode_violation_location_;
- if (location.IsValid() &&
- location.beg_pos > beg_pos && location.end_pos < end_pos) {
- ReportMessageAt(location, strict_mode_violation_type_, NULL);
- *ok = false;
- }
-}
-
-
-void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
- const char* eval_args_type,
- Identifier identifier,
- bool* ok) {
- const char* type = eval_args_type;
- if (identifier.IsFutureReserved()) {
- type = "reserved_word";
- } else if (identifier.IsFutureStrictReserved()) {
- type = "strict_reserved_word";
- }
- if (!is_classic_mode()) {
- ReportMessageAt(location, type, NULL);
- *ok = false;
- return;
- }
- strict_mode_violation_location_ = location;
- strict_mode_violation_type_ = type;
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
- i::Token::Value next = Next();
- if (i::Token::IsKeyword(next)) {
- int pos = scanner_->location().beg_pos;
- const char* keyword = i::Token::String(next);
- log_->LogAsciiSymbol(pos, i::Vector<const char>(keyword,
- i::StrLength(keyword)));
- return Identifier::Default();
- }
- if (next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD ||
- next == i::Token::FUTURE_STRICT_RESERVED_WORD) {
- return GetIdentifierSymbol();
- }
- *ok = false;
- return Identifier::Default();
-}
-
-#undef CHECK_OK
-
-
-// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'.
-PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Identifier result = ParseIdentifierName(ok);
- if (!*ok) return Identifier::Default();
- if (scanner_->is_literal_ascii() &&
- scanner_->literal_length() == 3) {
- const char* token = scanner_->literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-bool PreParser::peek_any_identifier() {
- i::Token::Value next = peek();
- return next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD ||
- next == i::Token::FUTURE_STRICT_RESERVED_WORD;
-}
-
-
-int DuplicateFinder::AddAsciiSymbol(i::Vector<const char> key, int value) {
- return AddSymbol(i::Vector<const byte>::cast(key), true, value);
-}
-
-int DuplicateFinder::AddUtf16Symbol(i::Vector<const uint16_t> key, int value) {
- return AddSymbol(i::Vector<const byte>::cast(key), false, value);
-}
-
-int DuplicateFinder::AddSymbol(i::Vector<const byte> key,
- bool is_ascii,
- int value) {
- uint32_t hash = Hash(key, is_ascii);
- byte* encoding = BackupKey(key, is_ascii);
- i::HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
- int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- entry->value =
- reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
- return old_value;
-}
-
-
-int DuplicateFinder::AddNumber(i::Vector<const char> key, int value) {
- ASSERT(key.length() > 0);
- // Quick check for already being in canonical form.
- if (IsNumberCanonical(key)) {
- return AddAsciiSymbol(key, value);
- }
-
- int flags = i::ALLOW_HEX | i::ALLOW_OCTALS;
- double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
- int length;
- const char* string;
- if (!isfinite(double_value)) {
- string = "Infinity";
- length = 8; // strlen("Infinity");
- } else {
- string = DoubleToCString(double_value,
- i::Vector<char>(number_buffer_, kBufferSize));
- length = i::StrLength(string);
- }
- return AddSymbol(i::Vector<const byte>(reinterpret_cast<const byte*>(string),
- length), true, value);
-}
-
-
-bool DuplicateFinder::IsNumberCanonical(i::Vector<const char> number) {
- // Test for a safe approximation of number literals that are already
- // in canonical form: max 15 digits, no leading zeroes, except an
- // integer part that is a single zero, and no trailing zeros below
- // the decimal point.
- int pos = 0;
- int length = number.length();
- if (number.length() > 15) return false;
- if (number[pos] == '0') {
- pos++;
- } else {
- while (pos < length &&
- static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
- }
- if (length == pos) return true;
- if (number[pos] != '.') return false;
- pos++;
- bool invalid_last_digit = true;
- while (pos < length) {
- byte digit = number[pos] - '0';
- if (digit > '9' - '0') return false;
- invalid_last_digit = (digit == 0);
- pos++;
- }
- return !invalid_last_digit;
-}
-
-
-uint32_t DuplicateFinder::Hash(i::Vector<const byte> key, bool is_ascii) {
- // Primitive hash function, almost identical to the one used
- // for strings (except that it's seeded by the length and ASCII-ness).
- int length = key.length();
- uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
- for (int i = 0; i < length; i++) {
- uint32_t c = key[i];
- hash = (hash + c) * 1025;
- hash ^= (hash >> 6);
- }
- return hash;
-}
-
-
-bool DuplicateFinder::Match(void* first, void* second) {
- // Decode lengths.
- // Length + ASCII-bit is encoded as base 128, most significant heptet first,
- // with a 8th bit being non-zero while there are more heptets.
- // The value encodes the number of bytes following, and whether the original
- // was ASCII.
- byte* s1 = reinterpret_cast<byte*>(first);
- byte* s2 = reinterpret_cast<byte*>(second);
- uint32_t length_ascii_field = 0;
- byte c1;
- do {
- c1 = *s1;
- if (c1 != *s2) return false;
- length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
- s1++;
- s2++;
- } while ((c1 & 0x80) != 0);
- int length = static_cast<int>(length_ascii_field >> 1);
- return memcmp(s1, s2, length) == 0;
-}
-
-
-byte* DuplicateFinder::BackupKey(i::Vector<const byte> bytes,
- bool is_ascii) {
- uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
- backing_store_.StartSequence();
- // Emit ascii_length as base-128 encoded number, with the 7th bit set
- // on the byte of every heptet except the last, least significant, one.
- if (ascii_length >= (1 << 7)) {
- if (ascii_length >= (1 << 14)) {
- if (ascii_length >= (1 << 21)) {
- if (ascii_length >= (1 << 28)) {
- backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
- }
- backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
- }
- backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
- }
- backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
- }
- backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
-
- backing_store_.AddBlock(bytes);
- return backing_store_.EndSequence().start();
-}
-} } // v8::preparser
diff --git a/src/3rdparty/v8/src/preparser.h b/src/3rdparty/v8/src/preparser.h
deleted file mode 100644
index ad52d74..0000000
--- a/src/3rdparty/v8/src/preparser.h
+++ /dev/null
@@ -1,672 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PREPARSER_H
-#define V8_PREPARSER_H
-
-#include "hashmap.h"
-#include "token.h"
-#include "scanner.h"
-
-namespace v8 {
-
-namespace internal {
-class UnicodeCache;
-}
-
-namespace preparser {
-
-typedef uint8_t byte;
-
-// Preparsing checks a JavaScript program and emits preparse-data that helps
-// a later parsing to be faster.
-// See preparse-data-format.h for the data format.
-
-// The PreParser checks that the syntax follows the grammar for JavaScript,
-// and collects some information about the program along the way.
-// The grammar check is only performed in order to understand the program
-// sufficiently to deduce some information about it, that can be used
-// to speed up later parsing. Finding errors is not the goal of pre-parsing,
-// rather it is to speed up properly written and correct programs.
-// That means that contextual checks (like a label being declared where
-// it is used) are generally omitted.
-
-namespace i = v8::internal;
-
-class DuplicateFinder {
- public:
- explicit DuplicateFinder(i::UnicodeCache* constants)
- : unicode_constants_(constants),
- backing_store_(16),
- map_(&Match) { }
-
- int AddAsciiSymbol(i::Vector<const char> key, int value);
- int AddUtf16Symbol(i::Vector<const uint16_t> key, int value);
- // Add a a number literal by converting it (if necessary)
- // to the string that ToString(ToNumber(literal)) would generate.
- // and then adding that string with AddAsciiSymbol.
- // This string is the actual value used as key in an object literal,
- // and the one that must be different from the other keys.
- int AddNumber(i::Vector<const char> key, int value);
-
- private:
- int AddSymbol(i::Vector<const byte> key, bool is_ascii, int value);
- // Backs up the key and its length in the backing store.
- // The backup is stored with a base 127 encoding of the
- // length (plus a bit saying whether the string is ASCII),
- // followed by the bytes of the key.
- byte* BackupKey(i::Vector<const byte> key, bool is_ascii);
-
- // Compare two encoded keys (both pointing into the backing store)
- // for having the same base-127 encoded lengths and ASCII-ness,
- // and then having the same 'length' bytes following.
- static bool Match(void* first, void* second);
- // Creates a hash from a sequence of bytes.
- static uint32_t Hash(i::Vector<const byte> key, bool is_ascii);
- // Checks whether a string containing a JS number is its canonical
- // form.
- static bool IsNumberCanonical(i::Vector<const char> key);
-
- // Size of buffer. Sufficient for using it to call DoubleToCString in
- // from conversions.h.
- static const int kBufferSize = 100;
-
- i::UnicodeCache* unicode_constants_;
- // Backing store used to store strings used as hashmap keys.
- i::SequenceCollector<unsigned char> backing_store_;
- i::HashMap map_;
- // Buffer used for string->number->canonical string conversions.
- char number_buffer_[kBufferSize];
-};
-
-
-class PreParser {
- public:
- enum PreParseResult {
- kPreParseStackOverflow,
- kPreParseSuccess
- };
-
-
- PreParser(i::Scanner* scanner,
- i::ParserRecorder* log,
- uintptr_t stack_limit,
- bool allow_lazy,
- bool allow_natives_syntax,
- bool allow_modules)
- : scanner_(scanner),
- log_(log),
- scope_(NULL),
- stack_limit_(stack_limit),
- strict_mode_violation_location_(i::Scanner::Location::invalid()),
- strict_mode_violation_type_(NULL),
- stack_overflow_(false),
- allow_lazy_(allow_lazy),
- allow_modules_(allow_modules),
- allow_natives_syntax_(allow_natives_syntax),
- parenthesized_function_(false),
- harmony_scoping_(scanner->HarmonyScoping()) { }
-
- ~PreParser() {}
-
- // Pre-parse the program from the character stream; returns true on
- // success (even if parsing failed, the pre-parse data successfully
- // captured the syntax error), and false if a stack-overflow happened
- // during parsing.
- static PreParseResult PreParseProgram(i::Scanner* scanner,
- i::ParserRecorder* log,
- int flags,
- uintptr_t stack_limit) {
- bool allow_lazy = (flags & i::kAllowLazy) != 0;
- bool allow_natives_syntax = (flags & i::kAllowNativesSyntax) != 0;
- bool allow_modules = (flags & i::kAllowModules) != 0;
- return PreParser(scanner, log, stack_limit, allow_lazy,
- allow_natives_syntax, allow_modules).PreParse();
- }
-
- // Parses a single function literal, from the opening parentheses before
- // parameters to the closing brace after the body.
- // Returns a FunctionEntry describing the body of the function in enough
- // detail that it can be lazily compiled.
- // The scanner is expected to have matched the "function" keyword and
- // parameters, and have consumed the initial '{'.
- // At return, unless an error occurred, the scanner is positioned before the
- // the final '}'.
- PreParseResult PreParseLazyFunction(i::LanguageMode mode,
- i::ParserRecorder* log);
-
- private:
- // Used to detect duplicates in object literals. Each of the values
- // kGetterProperty, kSetterProperty and kValueProperty represents
- // a type of object literal property. When parsing a property, its
- // type value is stored in the DuplicateFinder for the property name.
- // Values are chosen so that having intersection bits means the there is
- // an incompatibility.
- // I.e., you can add a getter to a property that already has a setter, since
- // kGetterProperty and kSetterProperty doesn't intersect, but not if it
- // already has a getter or a value. Adding the getter to an existing
- // setter will store the value (kGetterProperty | kSetterProperty), which
- // is incompatible with adding any further properties.
- enum PropertyType {
- kNone = 0,
- // Bit patterns representing different object literal property types.
- kGetterProperty = 1,
- kSetterProperty = 2,
- kValueProperty = 7,
- // Helper constants.
- kValueFlag = 4
- };
-
- // Checks the type of conflict based on values coming from PropertyType.
- bool HasConflict(int type1, int type2) { return (type1 & type2) != 0; }
- bool IsDataDataConflict(int type1, int type2) {
- return ((type1 & type2) & kValueFlag) != 0;
- }
- bool IsDataAccessorConflict(int type1, int type2) {
- return ((type1 ^ type2) & kValueFlag) != 0;
- }
- bool IsAccessorAccessorConflict(int type1, int type2) {
- return ((type1 | type2) & kValueFlag) == 0;
- }
-
-
- void CheckDuplicate(DuplicateFinder* finder,
- i::Token::Value property,
- int type,
- bool* ok);
-
- // These types form an algebra over syntactic categories that is just
- // rich enough to let us recognize and propagate the constructs that
- // are either being counted in the preparser data, or is important
- // to throw the correct syntax error exceptions.
-
- enum ScopeType {
- kTopLevelScope,
- kFunctionScope
- };
-
- enum VariableDeclarationContext {
- kSourceElement,
- kStatement,
- kForStatement
- };
-
- // If a list of variable declarations includes any initializers.
- enum VariableDeclarationProperties {
- kHasInitializers,
- kHasNoInitializers
- };
-
- class Expression;
-
- class Identifier {
- public:
- static Identifier Default() {
- return Identifier(kUnknownIdentifier);
- }
- static Identifier Eval() {
- return Identifier(kEvalIdentifier);
- }
- static Identifier Arguments() {
- return Identifier(kArgumentsIdentifier);
- }
- static Identifier FutureReserved() {
- return Identifier(kFutureReservedIdentifier);
- }
- static Identifier FutureStrictReserved() {
- return Identifier(kFutureStrictReservedIdentifier);
- }
- bool IsEval() { return type_ == kEvalIdentifier; }
- bool IsArguments() { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
- bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
- bool IsFutureStrictReserved() {
- return type_ == kFutureStrictReservedIdentifier;
- }
- bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
-
- private:
- enum Type {
- kUnknownIdentifier,
- kFutureReservedIdentifier,
- kFutureStrictReservedIdentifier,
- kEvalIdentifier,
- kArgumentsIdentifier
- };
- explicit Identifier(Type type) : type_(type) { }
- Type type_;
-
- friend class Expression;
- };
-
- // Bits 0 and 1 are used to identify the type of expression:
- // If bit 0 is set, it's an identifier.
- // if bit 1 is set, it's a string literal.
- // If neither is set, it's no particular type, and both set isn't
- // use yet.
- // Bit 2 is used to mark the expression as being parenthesized,
- // so "(foo)" isn't recognized as a pure identifier (and possible label).
- class Expression {
- public:
- static Expression Default() {
- return Expression(kUnknownExpression);
- }
-
- static Expression FromIdentifier(Identifier id) {
- return Expression(kIdentifierFlag | (id.type_ << kIdentifierShift));
- }
-
- static Expression StringLiteral() {
- return Expression(kUnknownStringLiteral);
- }
-
- static Expression UseStrictStringLiteral() {
- return Expression(kUseStrictString);
- }
-
- static Expression This() {
- return Expression(kThisExpression);
- }
-
- static Expression ThisProperty() {
- return Expression(kThisPropertyExpression);
- }
-
- static Expression StrictFunction() {
- return Expression(kStrictFunctionExpression);
- }
-
- bool IsIdentifier() {
- return (code_ & kIdentifierFlag) != 0;
- }
-
- // Only works corretly if it is actually an identifier expression.
- PreParser::Identifier AsIdentifier() {
- return PreParser::Identifier(
- static_cast<PreParser::Identifier::Type>(code_ >> kIdentifierShift));
- }
-
- bool IsParenthesized() {
- // If bit 0 or 1 is set, we interpret bit 2 as meaning parenthesized.
- return (code_ & 7) > 4;
- }
-
- bool IsRawIdentifier() {
- return !IsParenthesized() && IsIdentifier();
- }
-
- bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
-
- bool IsRawStringLiteral() {
- return !IsParenthesized() && IsStringLiteral();
- }
-
- bool IsUseStrictLiteral() {
- return (code_ & kStringLiteralMask) == kUseStrictString;
- }
-
- bool IsThis() {
- return code_ == kThisExpression;
- }
-
- bool IsThisProperty() {
- return code_ == kThisPropertyExpression;
- }
-
- bool IsStrictFunction() {
- return code_ == kStrictFunctionExpression;
- }
-
- Expression Parenthesize() {
- int type = code_ & 3;
- if (type != 0) {
- // Identifiers and string literals can be parenthesized.
- // They no longer work as labels or directive prologues,
- // but are still recognized in other contexts.
- return Expression(code_ | kParentesizedExpressionFlag);
- }
- // For other types of expressions, it's not important to remember
- // the parentheses.
- return *this;
- }
-
- private:
- // First two/three bits are used as flags.
- // Bit 0 and 1 represent identifiers or strings literals, and are
- // mutually exclusive, but can both be absent.
- // If bit 0 or 1 are set, bit 2 marks that the expression has
- // been wrapped in parentheses (a string literal can no longer
- // be a directive prologue, and an identifier can no longer be
- // a label.
- enum {
- kUnknownExpression = 0,
- // Identifiers
- kIdentifierFlag = 1, // Used to detect labels.
- kIdentifierShift = 3,
-
- kStringLiteralFlag = 2, // Used to detect directive prologue.
- kUnknownStringLiteral = kStringLiteralFlag,
- kUseStrictString = kStringLiteralFlag | 8,
- kStringLiteralMask = kUseStrictString,
-
- kParentesizedExpressionFlag = 4, // Only if identifier or string literal.
-
- // Below here applies if neither identifier nor string literal.
- kThisExpression = 4,
- kThisPropertyExpression = 8,
- kStrictFunctionExpression = 12
- };
-
- explicit Expression(int expression_code) : code_(expression_code) { }
-
- int code_;
- };
-
- class Statement {
- public:
- static Statement Default() {
- return Statement(kUnknownStatement);
- }
-
- static Statement FunctionDeclaration() {
- return Statement(kFunctionDeclaration);
- }
-
- // Creates expression statement from expression.
- // Preserves being an unparenthesized string literal, possibly
- // "use strict".
- static Statement ExpressionStatement(Expression expression) {
- if (!expression.IsParenthesized()) {
- if (expression.IsUseStrictLiteral()) {
- return Statement(kUseStrictExpressionStatement);
- }
- if (expression.IsStringLiteral()) {
- return Statement(kStringLiteralExpressionStatement);
- }
- }
- return Default();
- }
-
- bool IsStringLiteral() {
- return code_ != kUnknownStatement;
- }
-
- bool IsUseStrictLiteral() {
- return code_ == kUseStrictExpressionStatement;
- }
-
- bool IsFunctionDeclaration() {
- return code_ == kFunctionDeclaration;
- }
-
- private:
- enum Type {
- kUnknownStatement,
- kStringLiteralExpressionStatement,
- kUseStrictExpressionStatement,
- kFunctionDeclaration
- };
-
- explicit Statement(Type code) : code_(code) {}
- Type code_;
- };
-
- enum SourceElements {
- kUnknownSourceElements
- };
-
- typedef int Arguments;
-
- class Scope {
- public:
- Scope(Scope** variable, ScopeType type)
- : variable_(variable),
- prev_(*variable),
- type_(type),
- materialized_literal_count_(0),
- expected_properties_(0),
- with_nesting_count_(0),
- language_mode_(
- (prev_ != NULL) ? prev_->language_mode() : i::CLASSIC_MODE) {
- *variable = this;
- }
- ~Scope() { *variable_ = prev_; }
- void NextMaterializedLiteralIndex() { materialized_literal_count_++; }
- void AddProperty() { expected_properties_++; }
- ScopeType type() { return type_; }
- int expected_properties() { return expected_properties_; }
- int materialized_literal_count() { return materialized_literal_count_; }
- bool IsInsideWith() { return with_nesting_count_ != 0; }
- bool is_classic_mode() {
- return language_mode_ == i::CLASSIC_MODE;
- }
- i::LanguageMode language_mode() {
- return language_mode_;
- }
- void set_language_mode(i::LanguageMode language_mode) {
- language_mode_ = language_mode;
- }
-
- class InsideWith {
- public:
- explicit InsideWith(Scope* scope) : scope_(scope) {
- scope->with_nesting_count_++;
- }
-
- ~InsideWith() { scope_->with_nesting_count_--; }
-
- private:
- Scope* scope_;
- DISALLOW_COPY_AND_ASSIGN(InsideWith);
- };
-
- private:
- Scope** const variable_;
- Scope* const prev_;
- const ScopeType type_;
- int materialized_literal_count_;
- int expected_properties_;
- int with_nesting_count_;
- i::LanguageMode language_mode_;
- };
-
- // Preparse the program. Only called in PreParseProgram after creating
- // the instance.
- PreParseResult PreParse() {
- Scope top_scope(&scope_, kTopLevelScope);
- bool ok = true;
- int start_position = scanner_->peek_location().beg_pos;
- ParseSourceElements(i::Token::EOS, &ok);
- if (stack_overflow_) return kPreParseStackOverflow;
- if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
- } else if (!scope_->is_classic_mode()) {
- CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
- }
- return kPreParseSuccess;
- }
-
- // Report syntax error
- void ReportUnexpectedToken(i::Token::Value token);
- void ReportMessageAt(i::Scanner::Location location,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
- }
- void ReportMessageAt(int start_pos,
- int end_pos,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(start_pos, end_pos, type, name_opt);
- }
-
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
-
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites.
- Statement ParseSourceElement(bool* ok);
- SourceElements ParseSourceElements(int end_token, bool* ok);
- Statement ParseStatement(bool* ok);
- Statement ParseFunctionDeclaration(bool* ok);
- Statement ParseBlock(bool* ok);
- Statement ParseVariableStatement(VariableDeclarationContext var_context,
- bool* ok);
- Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
- VariableDeclarationProperties* decl_props,
- int* num_decl,
- bool* ok);
- Statement ParseExpressionOrLabelledStatement(bool* ok);
- Statement ParseIfStatement(bool* ok);
- Statement ParseContinueStatement(bool* ok);
- Statement ParseBreakStatement(bool* ok);
- Statement ParseReturnStatement(bool* ok);
- Statement ParseWithStatement(bool* ok);
- Statement ParseSwitchStatement(bool* ok);
- Statement ParseDoWhileStatement(bool* ok);
- Statement ParseWhileStatement(bool* ok);
- Statement ParseForStatement(bool* ok);
- Statement ParseThrowStatement(bool* ok);
- Statement ParseTryStatement(bool* ok);
- Statement ParseDebuggerStatement(bool* ok);
-
- Expression ParseExpression(bool accept_IN, bool* ok);
- Expression ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression ParseUnaryExpression(bool* ok);
- Expression ParsePostfixExpression(bool* ok);
- Expression ParseLeftHandSideExpression(bool* ok);
- Expression ParseNewExpression(bool* ok);
- Expression ParseMemberExpression(bool* ok);
- Expression ParseMemberWithNewPrefixesExpression(unsigned new_count, bool* ok);
- Expression ParsePrimaryExpression(bool* ok);
- Expression ParseArrayLiteral(bool* ok);
- Expression ParseObjectLiteral(bool* ok);
- Expression ParseRegExpLiteral(bool seen_equal, bool* ok);
- Expression ParseV8Intrinsic(bool* ok);
-
- Arguments ParseArguments(bool* ok);
- Expression ParseFunctionLiteral(bool* ok);
- void ParseLazyFunctionLiteralBody(bool* ok);
-
- Identifier ParseIdentifier(bool* ok);
- Identifier ParseIdentifierName(bool* ok);
- Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
- // Logs the currently parsed literal as a symbol in the preparser data.
- void LogSymbol();
- // Log the currently parsed identifier.
- Identifier GetIdentifierSymbol();
- // Log the currently parsed string literal.
- Expression GetStringSymbol();
-
- i::Token::Value peek() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- return scanner_->peek();
- }
-
- i::Token::Value Next() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- {
- int marker;
- if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
- // Further calls to peek/Next will return illegal token.
- // The current one will still be returned. It might already
- // have been seen using peek.
- stack_overflow_ = true;
- }
- }
- return scanner_->Next();
- }
-
- bool peek_any_identifier();
-
- void set_language_mode(i::LanguageMode language_mode) {
- scope_->set_language_mode(language_mode);
- }
-
- bool is_classic_mode() {
- return scope_->language_mode() == i::CLASSIC_MODE;
- }
-
- bool is_extended_mode() {
- return scope_->language_mode() == i::EXTENDED_MODE;
- }
-
- i::LanguageMode language_mode() { return scope_->language_mode(); }
-
- void Consume(i::Token::Value token) { Next(); }
-
- void Expect(i::Token::Value token, bool* ok) {
- if (Next() != token) {
- *ok = false;
- }
- }
-
- bool Check(i::Token::Value token) {
- i::Token::Value next = peek();
- if (next == token) {
- Consume(next);
- return true;
- }
- return false;
- }
- void ExpectSemicolon(bool* ok);
-
- static int Precedence(i::Token::Value tok, bool accept_IN);
-
- void SetStrictModeViolation(i::Scanner::Location,
- const char* type,
- bool* ok);
-
- void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
-
- void StrictModeIdentifierViolation(i::Scanner::Location,
- const char* eval_args_type,
- Identifier identifier,
- bool* ok);
-
- i::Scanner* scanner_;
- i::ParserRecorder* log_;
- Scope* scope_;
- uintptr_t stack_limit_;
- i::Scanner::Location strict_mode_violation_location_;
- const char* strict_mode_violation_type_;
- bool stack_overflow_;
- bool allow_lazy_;
- bool allow_modules_;
- bool allow_natives_syntax_;
- bool parenthesized_function_;
- bool harmony_scoping_;
-};
-} } // v8::preparser
-
-#endif // V8_PREPARSER_H
diff --git a/src/3rdparty/v8/src/prettyprinter.cc b/src/3rdparty/v8/src/prettyprinter.cc
deleted file mode 100644
index c339583..0000000
--- a/src/3rdparty/v8/src/prettyprinter.cc
+++ /dev/null
@@ -1,1136 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-
-#include "v8.h"
-
-#include "prettyprinter.h"
-#include "scopes.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-
-PrettyPrinter::PrettyPrinter() {
- output_ = NULL;
- size_ = 0;
- pos_ = 0;
- InitializeAstVisitor();
-}
-
-
-PrettyPrinter::~PrettyPrinter() {
- DeleteArray(output_);
-}
-
-
-void PrettyPrinter::VisitBlock(Block* node) {
- if (!node->is_initializer_block()) Print("{ ");
- PrintStatements(node->statements());
- if (node->statements()->length() > 0) Print(" ");
- if (!node->is_initializer_block()) Print("}");
-}
-
-
-void PrettyPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
- Print("var ");
- PrintLiteral(node->proxy()->name(), false);
- Print(";");
-}
-
-
-void PrettyPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
- Print("function ");
- PrintLiteral(node->proxy()->name(), false);
- Print(" = ");
- PrintFunctionLiteral(node->fun());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitModuleDeclaration(ModuleDeclaration* node) {
- Print("module ");
- PrintLiteral(node->proxy()->name(), false);
- Print(" = ");
- Visit(node->module());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitImportDeclaration(ImportDeclaration* node) {
- Print("import ");
- PrintLiteral(node->proxy()->name(), false);
- Print(" from ");
- Visit(node->module());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitExportDeclaration(ExportDeclaration* node) {
- Print("export ");
- PrintLiteral(node->proxy()->name(), false);
- Print(";");
-}
-
-
-void PrettyPrinter::VisitModuleLiteral(ModuleLiteral* node) {
- VisitBlock(node->body());
-}
-
-
-void PrettyPrinter::VisitModuleVariable(ModuleVariable* node) {
- Visit(node->proxy());
-}
-
-
-void PrettyPrinter::VisitModulePath(ModulePath* node) {
- Visit(node->module());
- Print(".");
- PrintLiteral(node->name(), false);
-}
-
-
-void PrettyPrinter::VisitModuleUrl(ModuleUrl* node) {
- Print("at ");
- PrintLiteral(node->url(), true);
-}
-
-
-void PrettyPrinter::VisitModuleStatement(ModuleStatement* node) {
- Print("module ");
- PrintLiteral(node->proxy()->name(), false);
- Print(" ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
- Visit(node->expression());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitEmptyStatement(EmptyStatement* node) {
- Print(";");
-}
-
-
-void PrettyPrinter::VisitIfStatement(IfStatement* node) {
- Print("if (");
- Visit(node->condition());
- Print(") ");
- Visit(node->then_statement());
- if (node->HasElseStatement()) {
- Print(" else ");
- Visit(node->else_statement());
- }
-}
-
-
-void PrettyPrinter::VisitContinueStatement(ContinueStatement* node) {
- Print("continue");
- ZoneStringList* labels = node->target()->labels();
- if (labels != NULL) {
- Print(" ");
- ASSERT(labels->length() > 0); // guaranteed to have at least one entry
- PrintLiteral(labels->at(0), false); // any label from the list is fine
- }
- Print(";");
-}
-
-
-void PrettyPrinter::VisitBreakStatement(BreakStatement* node) {
- Print("break");
- ZoneStringList* labels = node->target()->labels();
- if (labels != NULL) {
- Print(" ");
- ASSERT(labels->length() > 0); // guaranteed to have at least one entry
- PrintLiteral(labels->at(0), false); // any label from the list is fine
- }
- Print(";");
-}
-
-
-void PrettyPrinter::VisitReturnStatement(ReturnStatement* node) {
- Print("return ");
- Visit(node->expression());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitWithStatement(WithStatement* node) {
- Print("with (");
- Visit(node->expression());
- Print(") ");
- Visit(node->statement());
-}
-
-
-void PrettyPrinter::VisitSwitchStatement(SwitchStatement* node) {
- PrintLabels(node->labels());
- Print("switch (");
- Visit(node->tag());
- Print(") { ");
- ZoneList<CaseClause*>* cases = node->cases();
- for (int i = 0; i < cases->length(); i++)
- PrintCaseClause(cases->at(i));
- Print("}");
-}
-
-
-void PrettyPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
- PrintLabels(node->labels());
- Print("do ");
- Visit(node->body());
- Print(" while (");
- Visit(node->cond());
- Print(");");
-}
-
-
-void PrettyPrinter::VisitWhileStatement(WhileStatement* node) {
- PrintLabels(node->labels());
- Print("while (");
- Visit(node->cond());
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitForStatement(ForStatement* node) {
- PrintLabels(node->labels());
- Print("for (");
- if (node->init() != NULL) {
- Visit(node->init());
- Print(" ");
- } else {
- Print("; ");
- }
- if (node->cond() != NULL) Visit(node->cond());
- Print("; ");
- if (node->next() != NULL) {
- Visit(node->next()); // prints extra ';', unfortunately
- // to fix: should use Expression for next
- }
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitForInStatement(ForInStatement* node) {
- PrintLabels(node->labels());
- Print("for (");
- Visit(node->each());
- Print(" in ");
- Visit(node->enumerable());
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
- Print("try ");
- Visit(node->try_block());
- Print(" catch (");
- const bool quote = false;
- PrintLiteral(node->variable()->name(), quote);
- Print(") ");
- Visit(node->catch_block());
-}
-
-
-void PrettyPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
- Print("try ");
- Visit(node->try_block());
- Print(" finally ");
- Visit(node->finally_block());
-}
-
-
-void PrettyPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
- Print("debugger ");
-}
-
-
-void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
- Print("(");
- PrintFunctionLiteral(node);
- Print(")");
-}
-
-
-void PrettyPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- Print("(");
- PrintLiteral(node->shared_function_info(), true);
- Print(")");
-}
-
-
-void PrettyPrinter::VisitConditional(Conditional* node) {
- Visit(node->condition());
- Print(" ? ");
- Visit(node->then_expression());
- Print(" : ");
- Visit(node->else_expression());
-}
-
-
-void PrettyPrinter::VisitLiteral(Literal* node) {
- PrintLiteral(node->handle(), true);
-}
-
-
-void PrettyPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
- Print(" RegExp(");
- PrintLiteral(node->pattern(), false);
- Print(",");
- PrintLiteral(node->flags(), false);
- Print(") ");
-}
-
-
-void PrettyPrinter::VisitObjectLiteral(ObjectLiteral* node) {
- Print("{ ");
- for (int i = 0; i < node->properties()->length(); i++) {
- if (i != 0) Print(",");
- ObjectLiteral::Property* property = node->properties()->at(i);
- Print(" ");
- Visit(property->key());
- Print(": ");
- Visit(property->value());
- }
- Print(" }");
-}
-
-
-void PrettyPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- Print("[ ");
- for (int i = 0; i < node->values()->length(); i++) {
- if (i != 0) Print(",");
- Visit(node->values()->at(i));
- }
- Print(" ]");
-}
-
-
-void PrettyPrinter::VisitVariableProxy(VariableProxy* node) {
- PrintLiteral(node->name(), false);
-}
-
-
-void PrettyPrinter::VisitAssignment(Assignment* node) {
- Visit(node->target());
- Print(" %s ", Token::String(node->op()));
- Visit(node->value());
-}
-
-
-void PrettyPrinter::VisitThrow(Throw* node) {
- Print("throw ");
- Visit(node->exception());
-}
-
-
-void PrettyPrinter::VisitProperty(Property* node) {
- Expression* key = node->key();
- Literal* literal = key->AsLiteral();
- if (literal != NULL && literal->handle()->IsInternalizedString()) {
- Print("(");
- Visit(node->obj());
- Print(").");
- PrintLiteral(literal->handle(), false);
- } else {
- Visit(node->obj());
- Print("[");
- Visit(key);
- Print("]");
- }
-}
-
-
-void PrettyPrinter::VisitCall(Call* node) {
- Visit(node->expression());
- PrintArguments(node->arguments());
-}
-
-
-void PrettyPrinter::VisitCallNew(CallNew* node) {
- Print("new (");
- Visit(node->expression());
- Print(")");
- PrintArguments(node->arguments());
-}
-
-
-void PrettyPrinter::VisitCallRuntime(CallRuntime* node) {
- Print("%%");
- PrintLiteral(node->name(), false);
- PrintArguments(node->arguments());
-}
-
-
-void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
- Token::Value op = node->op();
- bool needsSpace =
- op == Token::DELETE || op == Token::TYPEOF || op == Token::VOID;
- Print("(%s%s", Token::String(op), needsSpace ? " " : "");
- Visit(node->expression());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitCountOperation(CountOperation* node) {
- Print("(");
- if (node->is_prefix()) Print("%s", Token::String(node->op()));
- Visit(node->expression());
- if (node->is_postfix()) Print("%s", Token::String(node->op()));
- Print(")");
-}
-
-
-void PrettyPrinter::VisitBinaryOperation(BinaryOperation* node) {
- Print("(");
- Visit(node->left());
- Print(" %s ", Token::String(node->op()));
- Visit(node->right());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
- Print("(");
- Visit(node->left());
- Print(" %s ", Token::String(node->op()));
- Visit(node->right());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
- Print("<this-function>");
-}
-
-
-const char* PrettyPrinter::Print(AstNode* node) {
- Init();
- Visit(node);
- return output_;
-}
-
-
-const char* PrettyPrinter::PrintExpression(FunctionLiteral* program) {
- Init();
- ExpressionStatement* statement =
- program->body()->at(0)->AsExpressionStatement();
- Visit(statement->expression());
- return output_;
-}
-
-
-const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
- Init();
- PrintStatements(program->body());
- Print("\n");
- return output_;
-}
-
-
-void PrettyPrinter::PrintOut(AstNode* node) {
- PrettyPrinter printer;
- PrintF("%s", printer.Print(node));
-}
-
-
-void PrettyPrinter::Init() {
- if (size_ == 0) {
- ASSERT(output_ == NULL);
- const int initial_size = 256;
- output_ = NewArray<char>(initial_size);
- size_ = initial_size;
- }
- output_[0] = '\0';
- pos_ = 0;
-}
-
-
-void PrettyPrinter::Print(const char* format, ...) {
- for (;;) {
- va_list arguments;
- va_start(arguments, format);
- int n = OS::VSNPrintF(Vector<char>(output_, size_) + pos_,
- format,
- arguments);
- va_end(arguments);
-
- if (n >= 0) {
- // there was enough space - we are done
- pos_ += n;
- return;
- } else {
- // there was not enough space - allocate more and try again
- const int slack = 32;
- int new_size = size_ + (size_ >> 1) + slack;
- char* new_output = NewArray<char>(new_size);
- memcpy(new_output, output_, pos_);
- DeleteArray(output_);
- output_ = new_output;
- size_ = new_size;
- }
- }
-}
-
-
-void PrettyPrinter::PrintStatements(ZoneList<Statement*>* statements) {
- if (statements == NULL) return;
- for (int i = 0; i < statements->length(); i++) {
- if (i != 0) Print(" ");
- Visit(statements->at(i));
- }
-}
-
-
-void PrettyPrinter::PrintLabels(ZoneStringList* labels) {
- if (labels != NULL) {
- for (int i = 0; i < labels->length(); i++) {
- PrintLiteral(labels->at(i), false);
- Print(": ");
- }
- }
-}
-
-
-void PrettyPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
- Print("(");
- for (int i = 0; i < arguments->length(); i++) {
- if (i != 0) Print(", ");
- Visit(arguments->at(i));
- }
- Print(")");
-}
-
-
-void PrettyPrinter::PrintLiteral(Handle<Object> value, bool quote) {
- Object* object = *value;
- if (object->IsString()) {
- String* string = String::cast(object);
- if (quote) Print("\"");
- for (int i = 0; i < string->length(); i++) {
- Print("%c", string->Get(i));
- }
- if (quote) Print("\"");
- } else if (object->IsNull()) {
- Print("null");
- } else if (object->IsTrue()) {
- Print("true");
- } else if (object->IsFalse()) {
- Print("false");
- } else if (object->IsUndefined()) {
- Print("undefined");
- } else if (object->IsNumber()) {
- Print("%g", object->Number());
- } else if (object->IsJSObject()) {
- // regular expression
- if (object->IsJSFunction()) {
- Print("JS-Function");
- } else if (object->IsJSArray()) {
- Print("JS-array[%u]", JSArray::cast(object)->length());
- } else if (object->IsJSObject()) {
- Print("JS-Object");
- } else {
- Print("?UNKNOWN?");
- }
- } else if (object->IsFixedArray()) {
- Print("FixedArray");
- } else {
- Print("<unknown literal %p>", object);
- }
-}
-
-
-void PrettyPrinter::PrintParameters(Scope* scope) {
- Print("(");
- for (int i = 0; i < scope->num_parameters(); i++) {
- if (i > 0) Print(", ");
- PrintLiteral(scope->parameter(i)->name(), false);
- }
- Print(")");
-}
-
-
-void PrettyPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- if (i > 0) Print(" ");
- Visit(declarations->at(i));
- }
-}
-
-
-void PrettyPrinter::PrintFunctionLiteral(FunctionLiteral* function) {
- Print("function ");
- PrintLiteral(function->name(), false);
- PrintParameters(function->scope());
- Print(" { ");
- PrintDeclarations(function->scope()->declarations());
- PrintStatements(function->body());
- Print(" }");
-}
-
-
-void PrettyPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- Print("default");
- } else {
- Print("case ");
- Visit(clause->label());
- }
- Print(": ");
- PrintStatements(clause->statements());
- if (clause->statements()->length() > 0)
- Print(" ");
-}
-
-
-//-----------------------------------------------------------------------------
-
-class IndentedScope BASE_EMBEDDED {
- public:
- explicit IndentedScope(AstPrinter* printer) : ast_printer_(printer) {
- ast_printer_->inc_indent();
- }
-
- IndentedScope(AstPrinter* printer, const char* txt, AstNode* node = NULL)
- : ast_printer_(printer) {
- ast_printer_->PrintIndented(txt);
- ast_printer_->Print("\n");
- ast_printer_->inc_indent();
- }
-
- virtual ~IndentedScope() {
- ast_printer_->dec_indent();
- }
-
- private:
- AstPrinter* ast_printer_;
-};
-
-
-//-----------------------------------------------------------------------------
-
-
-AstPrinter::AstPrinter() : indent_(0) {
-}
-
-
-AstPrinter::~AstPrinter() {
- ASSERT(indent_ == 0);
-}
-
-
-void AstPrinter::PrintIndented(const char* txt) {
- for (int i = 0; i < indent_; i++) {
- Print(". ");
- }
- Print(txt);
-}
-
-
-void AstPrinter::PrintLiteralIndented(const char* info,
- Handle<Object> value,
- bool quote) {
- PrintIndented(info);
- Print(" ");
- PrintLiteral(value, quote);
- Print("\n");
-}
-
-
-void AstPrinter::PrintLiteralWithModeIndented(const char* info,
- Variable* var,
- Handle<Object> value) {
- if (var == NULL) {
- PrintLiteralIndented(info, value, true);
- } else {
- EmbeddedVector<char, 256> buf;
- int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
- Variable::Mode2String(var->mode()));
- if (var->is_qml_global()) {
- pos += OS::SNPrintF(buf + pos, ":QML");
- }
- OS::SNPrintF(buf + pos, ")");
- PrintLiteralIndented(buf.start(), value, true);
- }
-}
-
-
-void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
- if (labels != NULL && labels->length() > 0) {
- PrintIndented(info == NULL ? "LABELS" : info);
- Print(" ");
- PrintLabels(labels);
- Print("\n");
- } else if (info != NULL) {
- PrintIndented(info);
- Print("\n");
- }
-}
-
-
-void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(this, s, node);
- Visit(node);
-}
-
-
-const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
- Init();
- { IndentedScope indent(this, "FUNC");
- PrintLiteralIndented("NAME", program->name(), true);
- PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
- PrintParameters(program->scope());
- PrintDeclarations(program->scope()->declarations());
- PrintStatements(program->body());
- }
- return Output();
-}
-
-
-void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
- if (declarations->length() > 0) {
- IndentedScope indent(this, "DECLS");
- for (int i = 0; i < declarations->length(); i++) {
- Visit(declarations->at(i));
- }
- }
-}
-
-
-void AstPrinter::PrintParameters(Scope* scope) {
- if (scope->num_parameters() > 0) {
- IndentedScope indent(this, "PARAMS");
- for (int i = 0; i < scope->num_parameters(); i++) {
- PrintLiteralWithModeIndented("VAR", scope->parameter(i),
- scope->parameter(i)->name());
- }
- }
-}
-
-
-void AstPrinter::PrintStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
- }
-}
-
-
-void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- Visit(arguments->at(i));
- }
-}
-
-
-void AstPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- IndentedScope indent(this, "DEFAULT");
- PrintStatements(clause->statements());
- } else {
- IndentedScope indent(this, "CASE");
- Visit(clause->label());
- PrintStatements(clause->statements());
- }
-}
-
-
-void AstPrinter::VisitBlock(Block* node) {
- const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK";
- IndentedScope indent(this, block_txt);
- PrintStatements(node->statements());
-}
-
-
-void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
- PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
- node->proxy()->var(),
- node->proxy()->name());
-}
-
-
-void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
- PrintIndented("FUNCTION ");
- PrintLiteral(node->proxy()->name(), true);
- Print(" = function ");
- PrintLiteral(node->fun()->name(), false);
- Print("\n");
-}
-
-
-void AstPrinter::VisitModuleDeclaration(ModuleDeclaration* node) {
- IndentedScope indent(this, "MODULE");
- PrintLiteralIndented("NAME", node->proxy()->name(), true);
- Visit(node->module());
-}
-
-
-void AstPrinter::VisitImportDeclaration(ImportDeclaration* node) {
- IndentedScope indent(this, "IMPORT");
- PrintLiteralIndented("NAME", node->proxy()->name(), true);
- Visit(node->module());
-}
-
-
-void AstPrinter::VisitExportDeclaration(ExportDeclaration* node) {
- IndentedScope indent(this, "EXPORT ");
- PrintLiteral(node->proxy()->name(), true);
-}
-
-
-void AstPrinter::VisitModuleLiteral(ModuleLiteral* node) {
- VisitBlock(node->body());
-}
-
-
-void AstPrinter::VisitModuleVariable(ModuleVariable* node) {
- Visit(node->proxy());
-}
-
-
-void AstPrinter::VisitModulePath(ModulePath* node) {
- IndentedScope indent(this, "PATH");
- PrintIndentedVisit("MODULE", node->module());
- PrintLiteralIndented("NAME", node->name(), false);
-}
-
-
-void AstPrinter::VisitModuleUrl(ModuleUrl* node) {
- PrintLiteralIndented("URL", node->url(), true);
-}
-
-
-void AstPrinter::VisitModuleStatement(ModuleStatement* node) {
- IndentedScope indent(this, "MODULE");
- PrintLiteralIndented("NAME", node->proxy()->name(), true);
- PrintStatements(node->body()->statements());
-}
-
-
-void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
- Visit(node->expression());
-}
-
-
-void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
- PrintIndented("EMPTY\n");
-}
-
-
-void AstPrinter::VisitIfStatement(IfStatement* node) {
- PrintIndentedVisit("IF", node->condition());
- PrintIndentedVisit("THEN", node->then_statement());
- if (node->HasElseStatement()) {
- PrintIndentedVisit("ELSE", node->else_statement());
- }
-}
-
-
-void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
- PrintLabelsIndented("CONTINUE", node->target()->labels());
-}
-
-
-void AstPrinter::VisitBreakStatement(BreakStatement* node) {
- PrintLabelsIndented("BREAK", node->target()->labels());
-}
-
-
-void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
- PrintIndentedVisit("RETURN", node->expression());
-}
-
-
-void AstPrinter::VisitWithStatement(WithStatement* node) {
- IndentedScope indent(this, "WITH");
- PrintIndentedVisit("OBJECT", node->expression());
- PrintIndentedVisit("BODY", node->statement());
-}
-
-
-void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
- IndentedScope indent(this, "SWITCH");
- PrintLabelsIndented(NULL, node->labels());
- PrintIndentedVisit("TAG", node->tag());
- for (int i = 0; i < node->cases()->length(); i++) {
- PrintCaseClause(node->cases()->at(i));
- }
-}
-
-
-void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
- IndentedScope indent(this, "DO");
- PrintLabelsIndented(NULL, node->labels());
- PrintIndentedVisit("BODY", node->body());
- PrintIndentedVisit("COND", node->cond());
-}
-
-
-void AstPrinter::VisitWhileStatement(WhileStatement* node) {
- IndentedScope indent(this, "WHILE");
- PrintLabelsIndented(NULL, node->labels());
- PrintIndentedVisit("COND", node->cond());
- PrintIndentedVisit("BODY", node->body());
-}
-
-
-void AstPrinter::VisitForStatement(ForStatement* node) {
- IndentedScope indent(this, "FOR");
- PrintLabelsIndented(NULL, node->labels());
- if (node->init()) PrintIndentedVisit("INIT", node->init());
- if (node->cond()) PrintIndentedVisit("COND", node->cond());
- PrintIndentedVisit("BODY", node->body());
- if (node->next()) PrintIndentedVisit("NEXT", node->next());
-}
-
-
-void AstPrinter::VisitForInStatement(ForInStatement* node) {
- IndentedScope indent(this, "FOR IN");
- PrintIndentedVisit("FOR", node->each());
- PrintIndentedVisit("IN", node->enumerable());
- PrintIndentedVisit("BODY", node->body());
-}
-
-
-void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
- IndentedScope indent(this, "TRY CATCH");
- PrintIndentedVisit("TRY", node->try_block());
- PrintLiteralWithModeIndented("CATCHVAR",
- node->variable(),
- node->variable()->name());
- PrintIndentedVisit("CATCH", node->catch_block());
-}
-
-
-void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
- IndentedScope indent(this, "TRY FINALLY");
- PrintIndentedVisit("TRY", node->try_block());
- PrintIndentedVisit("FINALLY", node->finally_block());
-}
-
-
-void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
- IndentedScope indent(this, "DEBUGGER");
-}
-
-
-void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
- IndentedScope indent(this, "FUNC LITERAL");
- PrintLiteralIndented("NAME", node->name(), false);
- PrintLiteralIndented("INFERRED NAME", node->inferred_name(), false);
- PrintParameters(node->scope());
- // We don't want to see the function literal in this case: it
- // will be printed via PrintProgram when the code for it is
- // generated.
- // PrintStatements(node->body());
-}
-
-
-void AstPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- IndentedScope indent(this, "FUNC LITERAL");
- PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true);
-}
-
-
-void AstPrinter::VisitConditional(Conditional* node) {
- IndentedScope indent(this, "CONDITIONAL");
- PrintIndentedVisit("?", node->condition());
- PrintIndentedVisit("THEN", node->then_expression());
- PrintIndentedVisit("ELSE", node->else_expression());
-}
-
-
-void AstPrinter::VisitLiteral(Literal* node) {
- PrintLiteralIndented("LITERAL", node->handle(), true);
-}
-
-
-void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
- IndentedScope indent(this, "REGEXP LITERAL");
- PrintLiteralIndented("PATTERN", node->pattern(), false);
- PrintLiteralIndented("FLAGS", node->flags(), false);
-}
-
-
-void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
- IndentedScope indent(this, "OBJ LITERAL");
- for (int i = 0; i < node->properties()->length(); i++) {
- const char* prop_kind = NULL;
- switch (node->properties()->at(i)->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- prop_kind = "PROPERTY - CONSTANT";
- break;
- case ObjectLiteral::Property::COMPUTED:
- prop_kind = "PROPERTY - COMPUTED";
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- prop_kind = "PROPERTY - MATERIALIZED_LITERAL";
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- prop_kind = "PROPERTY - PROTOTYPE";
- break;
- case ObjectLiteral::Property::GETTER:
- prop_kind = "PROPERTY - GETTER";
- break;
- case ObjectLiteral::Property::SETTER:
- prop_kind = "PROPERTY - SETTER";
- break;
- default:
- UNREACHABLE();
- }
- IndentedScope prop(this, prop_kind);
- PrintIndentedVisit("KEY", node->properties()->at(i)->key());
- PrintIndentedVisit("VALUE", node->properties()->at(i)->value());
- }
-}
-
-
-void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- IndentedScope indent(this, "ARRAY LITERAL");
- if (node->values()->length() > 0) {
- IndentedScope indent(this, "VALUES");
- for (int i = 0; i < node->values()->length(); i++) {
- Visit(node->values()->at(i));
- }
- }
-}
-
-
-void AstPrinter::VisitVariableProxy(VariableProxy* node) {
- Variable* var = node->var();
- EmbeddedVector<char, 128> buf;
- int pos = OS::SNPrintF(buf, "VAR PROXY");
- switch (var->location()) {
- case Variable::UNALLOCATED:
- break;
- case Variable::PARAMETER:
- OS::SNPrintF(buf + pos, " parameter[%d]", var->index());
- break;
- case Variable::LOCAL:
- OS::SNPrintF(buf + pos, " local[%d]", var->index());
- break;
- case Variable::CONTEXT:
- OS::SNPrintF(buf + pos, " context[%d]", var->index());
- break;
- case Variable::LOOKUP:
- OS::SNPrintF(buf + pos, " lookup");
- break;
- }
- PrintLiteralWithModeIndented(buf.start(), var, node->name());
-}
-
-
-void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
- Visit(node->target());
- Visit(node->value());
-}
-
-
-void AstPrinter::VisitThrow(Throw* node) {
- PrintIndentedVisit("THROW", node->exception());
-}
-
-
-void AstPrinter::VisitProperty(Property* node) {
- IndentedScope indent(this, "PROPERTY", node);
- Visit(node->obj());
- Literal* literal = node->key()->AsLiteral();
- if (literal != NULL && literal->handle()->IsInternalizedString()) {
- PrintLiteralIndented("NAME", literal->handle(), false);
- } else {
- PrintIndentedVisit("KEY", node->key());
- }
-}
-
-
-void AstPrinter::VisitCall(Call* node) {
- IndentedScope indent(this, "CALL");
- Visit(node->expression());
- PrintArguments(node->arguments());
-}
-
-
-void AstPrinter::VisitCallNew(CallNew* node) {
- IndentedScope indent(this, "CALL NEW");
- Visit(node->expression());
- PrintArguments(node->arguments());
-}
-
-
-void AstPrinter::VisitCallRuntime(CallRuntime* node) {
- PrintLiteralIndented("CALL RUNTIME ", node->name(), false);
- IndentedScope indent(this);
- PrintArguments(node->arguments());
-}
-
-
-void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
- PrintIndentedVisit(Token::Name(node->op()), node->expression());
-}
-
-
-void AstPrinter::VisitCountOperation(CountOperation* node) {
- EmbeddedVector<char, 128> buf;
- OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
- Token::Name(node->op()));
- PrintIndentedVisit(buf.start(), node->expression());
-}
-
-
-void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
- Visit(node->left());
- Visit(node->right());
-}
-
-
-void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
- Visit(node->left());
- Visit(node->right());
-}
-
-
-void AstPrinter::VisitThisFunction(ThisFunction* node) {
- IndentedScope indent(this, "THIS-FUNCTION");
-}
-
-#endif // DEBUG
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/prettyprinter.h b/src/3rdparty/v8/src/prettyprinter.h
deleted file mode 100644
index 41175ab..0000000
--- a/src/3rdparty/v8/src/prettyprinter.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PRETTYPRINTER_H_
-#define V8_PRETTYPRINTER_H_
-
-#include "allocation.h"
-#include "ast.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-
-class PrettyPrinter: public AstVisitor {
- public:
- PrettyPrinter();
- virtual ~PrettyPrinter();
-
- // The following routines print a node into a string.
- // The result string is alive as long as the PrettyPrinter is alive.
- const char* Print(AstNode* node);
- const char* PrintExpression(FunctionLiteral* program);
- const char* PrintProgram(FunctionLiteral* program);
-
- void Print(const char* format, ...);
-
- // Print a node to stdout.
- static void PrintOut(AstNode* node);
-
- // Individual nodes
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- char* output_; // output string buffer
- int size_; // output_ size
- int pos_; // current printing position
-
- protected:
- void Init();
- const char* Output() const { return output_; }
-
- virtual void PrintStatements(ZoneList<Statement*>* statements);
- void PrintLabels(ZoneStringList* labels);
- virtual void PrintArguments(ZoneList<Expression*>* arguments);
- void PrintLiteral(Handle<Object> value, bool quote);
- void PrintParameters(Scope* scope);
- void PrintDeclarations(ZoneList<Declaration*>* declarations);
- void PrintFunctionLiteral(FunctionLiteral* function);
- void PrintCaseClause(CaseClause* clause);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-};
-
-
-// Prints the AST structure
-class AstPrinter: public PrettyPrinter {
- public:
- AstPrinter();
- virtual ~AstPrinter();
-
- const char* PrintProgram(FunctionLiteral* program);
-
- // Individual nodes
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- friend class IndentedScope;
- void PrintIndented(const char* txt);
- void PrintIndentedVisit(const char* s, AstNode* node);
-
- void PrintStatements(ZoneList<Statement*>* statements);
- void PrintDeclarations(ZoneList<Declaration*>* declarations);
- void PrintParameters(Scope* scope);
- void PrintArguments(ZoneList<Expression*>* arguments);
- void PrintCaseClause(CaseClause* clause);
- void PrintLiteralIndented(const char* info, Handle<Object> value, bool quote);
- void PrintLiteralWithModeIndented(const char* info,
- Variable* var,
- Handle<Object> value);
- void PrintLabelsIndented(const char* info, ZoneStringList* labels);
-
- void inc_indent() { indent_++; }
- void dec_indent() { indent_--; }
-
- int indent_;
-};
-
-#endif // DEBUG
-
-} } // namespace v8::internal
-
-#endif // V8_PRETTYPRINTER_H_
diff --git a/src/3rdparty/v8/src/profile-generator-inl.h b/src/3rdparty/v8/src/profile-generator-inl.h
deleted file mode 100644
index cbdb6dd..0000000
--- a/src/3rdparty/v8/src/profile-generator-inl.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PROFILE_GENERATOR_INL_H_
-#define V8_PROFILE_GENERATOR_INL_H_
-
-#include "profile-generator.h"
-
-namespace v8 {
-namespace internal {
-
-const char* StringsStorage::GetFunctionName(String* name) {
- return GetFunctionName(GetName(name));
-}
-
-
-const char* StringsStorage::GetFunctionName(const char* name) {
- return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
-}
-
-
-CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- const char* name,
- const char* resource_name,
- int line_number,
- int security_token_id)
- : tag_(tag),
- name_prefix_(name_prefix),
- name_(name),
- resource_name_(resource_name),
- line_number_(line_number),
- shared_id_(0),
- security_token_id_(security_token_id) {
-}
-
-
-bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
- return tag == Logger::FUNCTION_TAG
- || tag == Logger::LAZY_COMPILE_TAG
- || tag == Logger::SCRIPT_TAG
- || tag == Logger::NATIVE_FUNCTION_TAG
- || tag == Logger::NATIVE_LAZY_COMPILE_TAG
- || tag == Logger::NATIVE_SCRIPT_TAG;
-}
-
-
-ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
- : tree_(tree),
- entry_(entry),
- total_ticks_(0),
- self_ticks_(0),
- children_(CodeEntriesMatch) {
-}
-
-
-CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
- switch (tag) {
- case GC:
- return gc_entry_;
- case JS:
- case COMPILER:
- case PARALLEL_COMPILER:
- // DOM events handlers are reported as OTHER / EXTERNAL entries.
- // To avoid confusing people, let's put all these entries into
- // one bucket.
- case OTHER:
- case EXTERNAL:
- return program_entry_;
- default: return NULL;
- }
-}
-
-} } // namespace v8::internal
-
-#endif // V8_PROFILE_GENERATOR_INL_H_
diff --git a/src/3rdparty/v8/src/profile-generator.cc b/src/3rdparty/v8/src/profile-generator.cc
deleted file mode 100644
index e5b5194..0000000
--- a/src/3rdparty/v8/src/profile-generator.cc
+++ /dev/null
@@ -1,945 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "profile-generator-inl.h"
-
-#include "global-handles.h"
-#include "scopeinfo.h"
-#include "unicode.h"
-#include "zone-inl.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-TokenEnumerator::TokenEnumerator()
- : token_locations_(4),
- token_removed_(4) {
-}
-
-
-TokenEnumerator::~TokenEnumerator() {
- Isolate* isolate = Isolate::Current();
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (!token_removed_[i]) {
- isolate->global_handles()->ClearWeakness(token_locations_[i]);
- isolate->global_handles()->Destroy(token_locations_[i]);
- }
- }
-}
-
-
-int TokenEnumerator::GetTokenId(Object* token) {
- Isolate* isolate = Isolate::Current();
- if (token == NULL) return TokenEnumerator::kNoSecurityToken;
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (*token_locations_[i] == token && !token_removed_[i]) return i;
- }
- Handle<Object> handle = isolate->global_handles()->Create(token);
- // handle.location() points to a memory cell holding a pointer
- // to a token object in the V8's heap.
- isolate->global_handles()->MakeWeak(handle.location(),
- this,
- NULL,
- TokenRemovedCallback);
- token_locations_.Add(handle.location());
- token_removed_.Add(false);
- return token_locations_.length() - 1;
-}
-
-
-void TokenEnumerator::TokenRemovedCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
- void* parameter) {
- reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
- Utils::OpenHandle(*handle).location());
- handle.Dispose(isolate);
-}
-
-
-void TokenEnumerator::TokenRemoved(Object** token_location) {
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (token_locations_[i] == token_location && !token_removed_[i]) {
- token_removed_[i] = true;
- return;
- }
- }
-}
-
-
-StringsStorage::StringsStorage()
- : names_(StringsMatch) {
-}
-
-
-StringsStorage::~StringsStorage() {
- for (HashMap::Entry* p = names_.Start();
- p != NULL;
- p = names_.Next(p)) {
- DeleteArray(reinterpret_cast<const char*>(p->value));
- }
-}
-
-
-const char* StringsStorage::GetCopy(const char* src) {
- int len = static_cast<int>(strlen(src));
- Vector<char> dst = Vector<char>::New(len + 1);
- OS::StrNCpy(dst, src, len);
- dst[len] = '\0';
- uint32_t hash =
- StringHasher::HashSequentialString(dst.start(), len, HEAP->HashSeed());
- return AddOrDisposeString(dst.start(), hash);
-}
-
-
-const char* StringsStorage::GetFormatted(const char* format, ...) {
- va_list args;
- va_start(args, format);
- const char* result = GetVFormatted(format, args);
- va_end(args);
- return result;
-}
-
-
-const char* StringsStorage::AddOrDisposeString(char* str, uint32_t hash) {
- HashMap::Entry* cache_entry = names_.Lookup(str, hash, true);
- if (cache_entry->value == NULL) {
- // New entry added.
- cache_entry->value = str;
- } else {
- DeleteArray(str);
- }
- return reinterpret_cast<const char*>(cache_entry->value);
-}
-
-
-const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
- Vector<char> str = Vector<char>::New(1024);
- int len = OS::VSNPrintF(str, format, args);
- if (len == -1) {
- DeleteArray(str.start());
- return format;
- }
- uint32_t hash = StringHasher::HashSequentialString(
- str.start(), len, HEAP->HashSeed());
- return AddOrDisposeString(str.start(), hash);
-}
-
-
-const char* StringsStorage::GetName(String* name) {
- if (name->IsString()) {
- int length = Min(kMaxNameSize, name->length());
- SmartArrayPointer<char> data =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
- uint32_t hash = StringHasher::HashSequentialString(
- *data, length, name->GetHeap()->HashSeed());
- return AddOrDisposeString(data.Detach(), hash);
- }
- return "";
-}
-
-
-const char* StringsStorage::GetName(int index) {
- return GetFormatted("%d", index);
-}
-
-
-size_t StringsStorage::GetUsedMemorySize() const {
- size_t size = sizeof(*this);
- size += sizeof(HashMap::Entry) * names_.capacity();
- for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
- size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
- }
- return size;
-}
-
-const char* const CodeEntry::kEmptyNamePrefix = "";
-
-
-void CodeEntry::CopyData(const CodeEntry& source) {
- tag_ = source.tag_;
- name_prefix_ = source.name_prefix_;
- name_ = source.name_;
- resource_name_ = source.resource_name_;
- line_number_ = source.line_number_;
-}
-
-
-uint32_t CodeEntry::GetCallUid() const {
- uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
- if (shared_id_ != 0) {
- hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
- v8::internal::kZeroHashSeed);
- } else {
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
- v8::internal::kZeroHashSeed);
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
- v8::internal::kZeroHashSeed);
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
- v8::internal::kZeroHashSeed);
- hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
- }
- return hash;
-}
-
-
-bool CodeEntry::IsSameAs(CodeEntry* entry) const {
- return this == entry
- || (tag_ == entry->tag_
- && shared_id_ == entry->shared_id_
- && (shared_id_ != 0
- || (name_prefix_ == entry->name_prefix_
- && name_ == entry->name_
- && resource_name_ == entry->resource_name_
- && line_number_ == entry->line_number_)));
-}
-
-
-ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
- HashMap::Entry* map_entry =
- children_.Lookup(entry, CodeEntryHash(entry), false);
- return map_entry != NULL ?
- reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
-}
-
-
-ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
- HashMap::Entry* map_entry =
- children_.Lookup(entry, CodeEntryHash(entry), true);
- if (map_entry->value == NULL) {
- // New node added.
- ProfileNode* new_node = new ProfileNode(tree_, entry);
- map_entry->value = new_node;
- children_list_.Add(new_node);
- }
- return reinterpret_cast<ProfileNode*>(map_entry->value);
-}
-
-
-double ProfileNode::GetSelfMillis() const {
- return tree_->TicksToMillis(self_ticks_);
-}
-
-
-double ProfileNode::GetTotalMillis() const {
- return tree_->TicksToMillis(total_ticks_);
-}
-
-
-void ProfileNode::Print(int indent) {
- OS::Print("%5u %5u %*c %s%s [%d]",
- total_ticks_, self_ticks_,
- indent, ' ',
- entry_->name_prefix(),
- entry_->name(),
- entry_->security_token_id());
- if (entry_->resource_name()[0] != '\0')
- OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
- OS::Print("\n");
- for (HashMap::Entry* p = children_.Start();
- p != NULL;
- p = children_.Next(p)) {
- reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
- }
-}
-
-
-class DeleteNodesCallback {
- public:
- void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
-
- void AfterAllChildrenTraversed(ProfileNode* node) {
- delete node;
- }
-
- void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
-};
-
-
-ProfileTree::ProfileTree()
- : root_entry_(Logger::FUNCTION_TAG,
- "",
- "(root)",
- "",
- 0,
- TokenEnumerator::kNoSecurityToken),
- root_(new ProfileNode(this, &root_entry_)) {
-}
-
-
-ProfileTree::~ProfileTree() {
- DeleteNodesCallback cb;
- TraverseDepthFirst(&cb);
-}
-
-
-void ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
- ProfileNode* node = root_;
- for (CodeEntry** entry = path.start() + path.length() - 1;
- entry != path.start() - 1;
- --entry) {
- if (*entry != NULL) {
- node = node->FindOrAddChild(*entry);
- }
- }
- node->IncrementSelfTicks();
-}
-
-
-void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
- ProfileNode* node = root_;
- for (CodeEntry** entry = path.start();
- entry != path.start() + path.length();
- ++entry) {
- if (*entry != NULL) {
- node = node->FindOrAddChild(*entry);
- }
- }
- node->IncrementSelfTicks();
-}
-
-
-struct NodesPair {
- NodesPair(ProfileNode* src, ProfileNode* dst)
- : src(src), dst(dst) { }
- ProfileNode* src;
- ProfileNode* dst;
-};
-
-
-class FilteredCloneCallback {
- public:
- FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
- : stack_(10),
- security_token_id_(security_token_id) {
- stack_.Add(NodesPair(NULL, dst_root));
- }
-
- void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) {
- if (IsTokenAcceptable(child->entry()->security_token_id(),
- parent->entry()->security_token_id())) {
- ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry());
- clone->IncreaseSelfTicks(child->self_ticks());
- stack_.Add(NodesPair(child, clone));
- } else {
- // Attribute ticks to parent node.
- stack_.last().dst->IncreaseSelfTicks(child->self_ticks());
- }
- }
-
- void AfterAllChildrenTraversed(ProfileNode* parent) { }
-
- void AfterChildTraversed(ProfileNode*, ProfileNode* child) {
- if (stack_.last().src == child) {
- stack_.RemoveLast();
- }
- }
-
- private:
- bool IsTokenAcceptable(int token, int parent_token) {
- if (token == TokenEnumerator::kNoSecurityToken
- || token == security_token_id_) return true;
- if (token == TokenEnumerator::kInheritsSecurityToken) {
- ASSERT(parent_token != TokenEnumerator::kInheritsSecurityToken);
- return parent_token == TokenEnumerator::kNoSecurityToken
- || parent_token == security_token_id_;
- }
- return false;
- }
-
- List<NodesPair> stack_;
- int security_token_id_;
-};
-
-void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
- ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
- FilteredCloneCallback cb(root_, security_token_id);
- src->TraverseDepthFirst(&cb);
- CalculateTotalTicks();
-}
-
-
-void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
- ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
-}
-
-
-class Position {
- public:
- explicit Position(ProfileNode* node)
- : node(node), child_idx_(0) { }
- INLINE(ProfileNode* current_child()) {
- return node->children()->at(child_idx_);
- }
- INLINE(bool has_current_child()) {
- return child_idx_ < node->children()->length();
- }
- INLINE(void next_child()) { ++child_idx_; }
-
- ProfileNode* node;
- private:
- int child_idx_;
-};
-
-
-// Non-recursive implementation of a depth-first post-order tree traversal.
-template <typename Callback>
-void ProfileTree::TraverseDepthFirst(Callback* callback) {
- List<Position> stack(10);
- stack.Add(Position(root_));
- while (stack.length() > 0) {
- Position& current = stack.last();
- if (current.has_current_child()) {
- callback->BeforeTraversingChild(current.node, current.current_child());
- stack.Add(Position(current.current_child()));
- } else {
- callback->AfterAllChildrenTraversed(current.node);
- if (stack.length() > 1) {
- Position& parent = stack[stack.length() - 2];
- callback->AfterChildTraversed(parent.node, current.node);
- parent.next_child();
- }
- // Remove child from the stack.
- stack.RemoveLast();
- }
- }
-}
-
-
-class CalculateTotalTicksCallback {
- public:
- void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
-
- void AfterAllChildrenTraversed(ProfileNode* node) {
- node->IncreaseTotalTicks(node->self_ticks());
- }
-
- void AfterChildTraversed(ProfileNode* parent, ProfileNode* child) {
- parent->IncreaseTotalTicks(child->total_ticks());
- }
-};
-
-
-void ProfileTree::CalculateTotalTicks() {
- CalculateTotalTicksCallback cb;
- TraverseDepthFirst(&cb);
-}
-
-
-void ProfileTree::ShortPrint() {
- OS::Print("root: %u %u %.2fms %.2fms\n",
- root_->total_ticks(), root_->self_ticks(),
- root_->GetTotalMillis(), root_->GetSelfMillis());
-}
-
-
-void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
- top_down_.AddPathFromEnd(path);
- bottom_up_.AddPathFromStart(path);
-}
-
-
-void CpuProfile::CalculateTotalTicks() {
- top_down_.CalculateTotalTicks();
- bottom_up_.CalculateTotalTicks();
-}
-
-
-void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
- top_down_.SetTickRatePerMs(actual_sampling_rate);
- bottom_up_.SetTickRatePerMs(actual_sampling_rate);
-}
-
-
-CpuProfile* CpuProfile::FilteredClone(int security_token_id) {
- ASSERT(security_token_id != TokenEnumerator::kNoSecurityToken);
- CpuProfile* clone = new CpuProfile(title_, uid_);
- clone->top_down_.FilteredClone(&top_down_, security_token_id);
- clone->bottom_up_.FilteredClone(&bottom_up_, security_token_id);
- return clone;
-}
-
-
-void CpuProfile::ShortPrint() {
- OS::Print("top down ");
- top_down_.ShortPrint();
- OS::Print("bottom up ");
- bottom_up_.ShortPrint();
-}
-
-
-void CpuProfile::Print() {
- OS::Print("[Top down]:\n");
- top_down_.Print();
- OS::Print("[Bottom up]:\n");
- bottom_up_.Print();
-}
-
-
-CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
-const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
-
-
-void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
- DeleteAllCoveredCode(addr, addr + size);
- CodeTree::Locator locator;
- tree_.Insert(addr, &locator);
- locator.set_value(CodeEntryInfo(entry, size));
-}
-
-
-void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
- List<Address> to_delete;
- Address addr = end - 1;
- while (addr >= start) {
- CodeTree::Locator locator;
- if (!tree_.FindGreatestLessThan(addr, &locator)) break;
- Address start2 = locator.key(), end2 = start2 + locator.value().size;
- if (start2 < end && start < end2) to_delete.Add(start2);
- addr = start2 - 1;
- }
- for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
-}
-
-
-CodeEntry* CodeMap::FindEntry(Address addr) {
- CodeTree::Locator locator;
- if (tree_.FindGreatestLessThan(addr, &locator)) {
- // locator.key() <= addr. Need to check that addr is within entry.
- const CodeEntryInfo& entry = locator.value();
- if (addr < (locator.key() + entry.size))
- return entry.entry;
- }
- return NULL;
-}
-
-
-int CodeMap::GetSharedId(Address addr) {
- CodeTree::Locator locator;
- // For shared function entries, 'size' field is used to store their IDs.
- if (tree_.Find(addr, &locator)) {
- const CodeEntryInfo& entry = locator.value();
- ASSERT(entry.entry == kSharedFunctionCodeEntry);
- return entry.size;
- } else {
- tree_.Insert(addr, &locator);
- int id = next_shared_id_++;
- locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry, id));
- return id;
- }
-}
-
-
-void CodeMap::MoveCode(Address from, Address to) {
- if (from == to) return;
- CodeTree::Locator locator;
- if (!tree_.Find(from, &locator)) return;
- CodeEntryInfo entry = locator.value();
- tree_.Remove(from);
- AddCode(to, entry.entry, entry.size);
-}
-
-
-void CodeMap::CodeTreePrinter::Call(
- const Address& key, const CodeMap::CodeEntryInfo& value) {
- OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
-}
-
-
-void CodeMap::Print() {
- CodeTreePrinter printer;
- tree_.ForEach(&printer);
-}
-
-
-CpuProfilesCollection::CpuProfilesCollection()
- : profiles_uids_(UidsMatch),
- current_profiles_semaphore_(OS::CreateSemaphore(1)) {
- // Create list of unabridged profiles.
- profiles_by_token_.Add(new List<CpuProfile*>());
-}
-
-
-static void DeleteCodeEntry(CodeEntry** entry_ptr) {
- delete *entry_ptr;
-}
-
-static void DeleteCpuProfile(CpuProfile** profile_ptr) {
- delete *profile_ptr;
-}
-
-static void DeleteProfilesList(List<CpuProfile*>** list_ptr) {
- if (*list_ptr != NULL) {
- (*list_ptr)->Iterate(DeleteCpuProfile);
- delete *list_ptr;
- }
-}
-
-CpuProfilesCollection::~CpuProfilesCollection() {
- delete current_profiles_semaphore_;
- current_profiles_.Iterate(DeleteCpuProfile);
- detached_profiles_.Iterate(DeleteCpuProfile);
- profiles_by_token_.Iterate(DeleteProfilesList);
- code_entries_.Iterate(DeleteCodeEntry);
-}
-
-
-bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
- ASSERT(uid > 0);
- current_profiles_semaphore_->Wait();
- if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
- current_profiles_semaphore_->Signal();
- return false;
- }
- for (int i = 0; i < current_profiles_.length(); ++i) {
- if (strcmp(current_profiles_[i]->title(), title) == 0) {
- // Ignore attempts to start profile with the same title.
- current_profiles_semaphore_->Signal();
- return false;
- }
- }
- current_profiles_.Add(new CpuProfile(title, uid));
- current_profiles_semaphore_->Signal();
- return true;
-}
-
-
-bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) {
- return StartProfiling(GetName(title), uid);
-}
-
-
-CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
- const char* title,
- double actual_sampling_rate) {
- const int title_len = StrLength(title);
- CpuProfile* profile = NULL;
- current_profiles_semaphore_->Wait();
- for (int i = current_profiles_.length() - 1; i >= 0; --i) {
- if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
- profile = current_profiles_.Remove(i);
- break;
- }
- }
- current_profiles_semaphore_->Signal();
-
- if (profile != NULL) {
- profile->CalculateTotalTicks();
- profile->SetActualSamplingRate(actual_sampling_rate);
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- unabridged_list->Add(profile);
- HashMap::Entry* entry =
- profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
- static_cast<uint32_t>(profile->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1);
- return GetProfile(security_token_id, profile->uid());
- }
- return NULL;
-}
-
-
-CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
- unsigned uid) {
- int index = GetProfileIndex(uid);
- if (index < 0) return NULL;
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- if (security_token_id == TokenEnumerator::kNoSecurityToken) {
- return unabridged_list->at(index);
- }
- List<CpuProfile*>* list = GetProfilesList(security_token_id);
- if (list->at(index) == NULL) {
- (*list)[index] =
- unabridged_list->at(index)->FilteredClone(security_token_id);
- }
- return list->at(index);
-}
-
-
-int CpuProfilesCollection::GetProfileIndex(unsigned uid) {
- HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- return entry != NULL ?
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value)) : -1;
-}
-
-
-bool CpuProfilesCollection::IsLastProfile(const char* title) {
- // Called from VM thread, and only it can mutate the list,
- // so no locking is needed here.
- if (current_profiles_.length() != 1) return false;
- return StrLength(title) == 0
- || strcmp(current_profiles_[0]->title(), title) == 0;
-}
-
-
-void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
- // Called from VM thread for a completed profile.
- unsigned uid = profile->uid();
- int index = GetProfileIndex(uid);
- if (index < 0) {
- detached_profiles_.RemoveElement(profile);
- return;
- }
- profiles_uids_.Remove(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid));
- // Decrement all indexes above the deleted one.
- for (HashMap::Entry* p = profiles_uids_.Start();
- p != NULL;
- p = profiles_uids_.Next(p)) {
- intptr_t p_index = reinterpret_cast<intptr_t>(p->value);
- if (p_index > index) {
- p->value = reinterpret_cast<void*>(p_index - 1);
- }
- }
- for (int i = 0; i < profiles_by_token_.length(); ++i) {
- List<CpuProfile*>* list = profiles_by_token_[i];
- if (list != NULL && index < list->length()) {
- // Move all filtered clones into detached_profiles_,
- // so we can know that they are still in use.
- CpuProfile* cloned_profile = list->Remove(index);
- if (cloned_profile != NULL && cloned_profile != profile) {
- detached_profiles_.Add(cloned_profile);
- }
- }
- }
-}
-
-
-int CpuProfilesCollection::TokenToIndex(int security_token_id) {
- ASSERT(TokenEnumerator::kNoSecurityToken == -1);
- return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
-}
-
-
-List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
- int security_token_id) {
- const int index = TokenToIndex(security_token_id);
- const int lists_to_add = index - profiles_by_token_.length() + 1;
- if (lists_to_add > 0) profiles_by_token_.AddBlock(NULL, lists_to_add);
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- const int current_count = unabridged_list->length();
- if (profiles_by_token_[index] == NULL) {
- profiles_by_token_[index] = new List<CpuProfile*>(current_count);
- }
- List<CpuProfile*>* list = profiles_by_token_[index];
- const int profiles_to_add = current_count - list->length();
- if (profiles_to_add > 0) list->AddBlock(NULL, profiles_to_add);
- return list;
-}
-
-
-List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- if (security_token_id == TokenEnumerator::kNoSecurityToken) {
- return unabridged_list;
- }
- List<CpuProfile*>* list = GetProfilesList(security_token_id);
- const int current_count = unabridged_list->length();
- for (int i = 0; i < current_count; ++i) {
- if (list->at(i) == NULL) {
- (*list)[i] = unabridged_list->at(i)->FilteredClone(security_token_id);
- }
- }
- return list;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name,
- int line_number) {
- CodeEntry* entry = new CodeEntry(tag,
- CodeEntry::kEmptyNamePrefix,
- GetFunctionName(name),
- GetName(resource_name),
- line_number,
- TokenEnumerator::kNoSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name) {
- CodeEntry* entry = new CodeEntry(tag,
- CodeEntry::kEmptyNamePrefix,
- GetFunctionName(name),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kNoSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- String* name) {
- CodeEntry* entry = new CodeEntry(tag,
- name_prefix,
- GetName(name),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kInheritsSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- int args_count) {
- CodeEntry* entry = new CodeEntry(tag,
- "args_count: ",
- GetName(args_count),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kInheritsSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-void CpuProfilesCollection::AddPathToCurrentProfiles(
- const Vector<CodeEntry*>& path) {
- // As starting / stopping profiles is rare relatively to this
- // method, we don't bother minimizing the duration of lock holding,
- // e.g. copying contents of the list to a local vector.
- current_profiles_semaphore_->Wait();
- for (int i = 0; i < current_profiles_.length(); ++i) {
- current_profiles_[i]->AddPath(path);
- }
- current_profiles_semaphore_->Signal();
-}
-
-
-void SampleRateCalculator::Tick() {
- if (--wall_time_query_countdown_ == 0)
- UpdateMeasurements(OS::TimeCurrentMillis());
-}
-
-
-void SampleRateCalculator::UpdateMeasurements(double current_time) {
- if (measurements_count_++ != 0) {
- const double measured_ticks_per_ms =
- (kWallTimeQueryIntervalMs * ticks_per_ms_) /
- (current_time - last_wall_time_);
- // Update the average value.
- ticks_per_ms_ +=
- (measured_ticks_per_ms - ticks_per_ms_) / measurements_count_;
- // Update the externally accessible result.
- result_ = static_cast<AtomicWord>(ticks_per_ms_ * kResultScale);
- }
- last_wall_time_ = current_time;
- wall_time_query_countdown_ =
- static_cast<unsigned>(kWallTimeQueryIntervalMs * ticks_per_ms_);
-}
-
-
-const char* const ProfileGenerator::kAnonymousFunctionName =
- "(anonymous function)";
-const char* const ProfileGenerator::kProgramEntryName =
- "(program)";
-const char* const ProfileGenerator::kGarbageCollectorEntryName =
- "(garbage collector)";
-
-
-ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
- : profiles_(profiles),
- program_entry_(
- profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
- gc_entry_(
- profiles->NewCodeEntry(Logger::BUILTIN_TAG,
- kGarbageCollectorEntryName)) {
-}
-
-
-void ProfileGenerator::RecordTickSample(const TickSample& sample) {
- // Allocate space for stack frames + pc + function + vm-state.
- ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
- // As actual number of decoded code entries may vary, initialize
- // entries vector with NULL values.
- CodeEntry** entry = entries.start();
- memset(entry, 0, entries.length() * sizeof(*entry));
- if (sample.pc != NULL) {
- *entry++ = code_map_.FindEntry(sample.pc);
-
- if (sample.has_external_callback) {
- // Don't use PC when in external callback code, as it can point
- // inside callback's code, and we will erroneously report
- // that a callback calls itself.
- *(entries.start()) = NULL;
- *entry++ = code_map_.FindEntry(sample.external_callback);
- } else if (sample.tos != NULL) {
- // Find out, if top of stack was pointing inside a JS function
- // meaning that we have encountered a frameless invocation.
- *entry = code_map_.FindEntry(sample.tos);
- if (*entry != NULL && !(*entry)->is_js_function()) {
- *entry = NULL;
- }
- entry++;
- }
-
- for (const Address* stack_pos = sample.stack,
- *stack_end = stack_pos + sample.frames_count;
- stack_pos != stack_end;
- ++stack_pos) {
- *entry++ = code_map_.FindEntry(*stack_pos);
- }
- }
-
- if (FLAG_prof_browser_mode) {
- bool no_symbolized_entries = true;
- for (CodeEntry** e = entries.start(); e != entry; ++e) {
- if (*e != NULL) {
- no_symbolized_entries = false;
- break;
- }
- }
- // If no frames were symbolized, put the VM state entry in.
- if (no_symbolized_entries) {
- *entry++ = EntryForVMState(sample.state);
- }
- }
-
- profiles_->AddPathToCurrentProfiles(entries);
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/profile-generator.h b/src/3rdparty/v8/src/profile-generator.h
deleted file mode 100644
index 8c6c71a..0000000
--- a/src/3rdparty/v8/src/profile-generator.h
+++ /dev/null
@@ -1,452 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PROFILE_GENERATOR_H_
-#define V8_PROFILE_GENERATOR_H_
-
-#include "allocation.h"
-#include "hashmap.h"
-#include "../include/v8-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-class TokenEnumerator {
- public:
- TokenEnumerator();
- ~TokenEnumerator();
- int GetTokenId(Object* token);
-
- static const int kNoSecurityToken = -1;
- static const int kInheritsSecurityToken = -2;
-
- private:
- static void TokenRemovedCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
- void* parameter);
- void TokenRemoved(Object** token_location);
-
- List<Object**> token_locations_;
- List<bool> token_removed_;
-
- friend class TokenEnumeratorTester;
-
- DISALLOW_COPY_AND_ASSIGN(TokenEnumerator);
-};
-
-
-// Provides a storage of strings allocated in C++ heap, to hold them
-// forever, even if they disappear from JS heap or external storage.
-class StringsStorage {
- public:
- StringsStorage();
- ~StringsStorage();
-
- const char* GetCopy(const char* src);
- const char* GetFormatted(const char* format, ...);
- const char* GetVFormatted(const char* format, va_list args);
- const char* GetName(String* name);
- const char* GetName(int index);
- inline const char* GetFunctionName(String* name);
- inline const char* GetFunctionName(const char* name);
- size_t GetUsedMemorySize() const;
-
- private:
- static const int kMaxNameSize = 1024;
-
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
- }
- const char* AddOrDisposeString(char* str, uint32_t hash);
-
- // Mapping of strings by String::Hash to const char* strings.
- HashMap names_;
-
- DISALLOW_COPY_AND_ASSIGN(StringsStorage);
-};
-
-
-class CodeEntry {
- public:
- // CodeEntry doesn't own name strings, just references them.
- INLINE(CodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- const char* name,
- const char* resource_name,
- int line_number,
- int security_token_id));
-
- INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
- INLINE(const char* name_prefix() const) { return name_prefix_; }
- INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; }
- INLINE(const char* name() const) { return name_; }
- INLINE(const char* resource_name() const) { return resource_name_; }
- INLINE(int line_number() const) { return line_number_; }
- INLINE(int shared_id() const) { return shared_id_; }
- INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
- INLINE(int security_token_id() const) { return security_token_id_; }
-
- INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
-
- void CopyData(const CodeEntry& source);
- uint32_t GetCallUid() const;
- bool IsSameAs(CodeEntry* entry) const;
-
- static const char* const kEmptyNamePrefix;
-
- private:
- Logger::LogEventsAndTags tag_;
- const char* name_prefix_;
- const char* name_;
- const char* resource_name_;
- int line_number_;
- int shared_id_;
- int security_token_id_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeEntry);
-};
-
-
-class ProfileTree;
-
-class ProfileNode {
- public:
- INLINE(ProfileNode(ProfileTree* tree, CodeEntry* entry));
-
- ProfileNode* FindChild(CodeEntry* entry);
- ProfileNode* FindOrAddChild(CodeEntry* entry);
- INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
- INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
- INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
-
- INLINE(CodeEntry* entry() const) { return entry_; }
- INLINE(unsigned self_ticks() const) { return self_ticks_; }
- INLINE(unsigned total_ticks() const) { return total_ticks_; }
- INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
- double GetSelfMillis() const;
- double GetTotalMillis() const;
-
- void Print(int indent);
-
- private:
- INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
- return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
- reinterpret_cast<CodeEntry*>(entry2));
- }
-
- INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
- return entry->GetCallUid();
- }
-
- ProfileTree* tree_;
- CodeEntry* entry_;
- unsigned total_ticks_;
- unsigned self_ticks_;
- // Mapping from CodeEntry* to ProfileNode*
- HashMap children_;
- List<ProfileNode*> children_list_;
-
- DISALLOW_COPY_AND_ASSIGN(ProfileNode);
-};
-
-
-class ProfileTree {
- public:
- ProfileTree();
- ~ProfileTree();
-
- void AddPathFromEnd(const Vector<CodeEntry*>& path);
- void AddPathFromStart(const Vector<CodeEntry*>& path);
- void CalculateTotalTicks();
- void FilteredClone(ProfileTree* src, int security_token_id);
-
- double TicksToMillis(unsigned ticks) const {
- return ticks * ms_to_ticks_scale_;
- }
- ProfileNode* root() const { return root_; }
- void SetTickRatePerMs(double ticks_per_ms);
-
- void ShortPrint();
- void Print() {
- root_->Print(0);
- }
-
- private:
- template <typename Callback>
- void TraverseDepthFirst(Callback* callback);
-
- CodeEntry root_entry_;
- ProfileNode* root_;
- double ms_to_ticks_scale_;
-
- DISALLOW_COPY_AND_ASSIGN(ProfileTree);
-};
-
-
-class CpuProfile {
- public:
- CpuProfile(const char* title, unsigned uid)
- : title_(title), uid_(uid) { }
-
- // Add pc -> ... -> main() call path to the profile.
- void AddPath(const Vector<CodeEntry*>& path);
- void CalculateTotalTicks();
- void SetActualSamplingRate(double actual_sampling_rate);
- CpuProfile* FilteredClone(int security_token_id);
-
- INLINE(const char* title() const) { return title_; }
- INLINE(unsigned uid() const) { return uid_; }
- INLINE(const ProfileTree* top_down() const) { return &top_down_; }
- INLINE(const ProfileTree* bottom_up() const) { return &bottom_up_; }
-
- void UpdateTicksScale();
-
- void ShortPrint();
- void Print();
-
- private:
- const char* title_;
- unsigned uid_;
- ProfileTree top_down_;
- ProfileTree bottom_up_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuProfile);
-};
-
-
-class CodeMap {
- public:
- CodeMap() : next_shared_id_(1) { }
- void AddCode(Address addr, CodeEntry* entry, unsigned size);
- void MoveCode(Address from, Address to);
- CodeEntry* FindEntry(Address addr);
- int GetSharedId(Address addr);
-
- void Print();
-
- private:
- struct CodeEntryInfo {
- CodeEntryInfo(CodeEntry* an_entry, unsigned a_size)
- : entry(an_entry), size(a_size) { }
- CodeEntry* entry;
- unsigned size;
- };
-
- struct CodeTreeConfig {
- typedef Address Key;
- typedef CodeEntryInfo Value;
- static const Key kNoKey;
- static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
- static int Compare(const Key& a, const Key& b) {
- return a < b ? -1 : (a > b ? 1 : 0);
- }
- };
- typedef SplayTree<CodeTreeConfig> CodeTree;
-
- class CodeTreePrinter {
- public:
- void Call(const Address& key, const CodeEntryInfo& value);
- };
-
- void DeleteAllCoveredCode(Address start, Address end);
-
- // Fake CodeEntry pointer to distinguish shared function entries.
- static CodeEntry* const kSharedFunctionCodeEntry;
-
- CodeTree tree_;
- int next_shared_id_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeMap);
-};
-
-
-class CpuProfilesCollection {
- public:
- CpuProfilesCollection();
- ~CpuProfilesCollection();
-
- bool StartProfiling(const char* title, unsigned uid);
- bool StartProfiling(String* title, unsigned uid);
- CpuProfile* StopProfiling(int security_token_id,
- const char* title,
- double actual_sampling_rate);
- List<CpuProfile*>* Profiles(int security_token_id);
- const char* GetName(String* name) {
- return function_and_resource_names_.GetName(name);
- }
- const char* GetName(int args_count) {
- return function_and_resource_names_.GetName(args_count);
- }
- CpuProfile* GetProfile(int security_token_id, unsigned uid);
- bool IsLastProfile(const char* title);
- void RemoveProfile(CpuProfile* profile);
- bool HasDetachedProfiles() { return detached_profiles_.length() > 0; }
-
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- String* name, String* resource_name, int line_number);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix, String* name);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
- CodeEntry* NewCodeEntry(int security_token_id);
-
- // Called from profile generator thread.
- void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
-
- // Limits the number of profiles that can be simultaneously collected.
- static const int kMaxSimultaneousProfiles = 100;
-
- private:
- const char* GetFunctionName(String* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- const char* GetFunctionName(const char* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- int GetProfileIndex(unsigned uid);
- List<CpuProfile*>* GetProfilesList(int security_token_id);
- int TokenToIndex(int security_token_id);
-
- INLINE(static bool UidsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- StringsStorage function_and_resource_names_;
- List<CodeEntry*> code_entries_;
- List<List<CpuProfile*>* > profiles_by_token_;
- // Mapping from profiles' uids to indexes in the second nested list
- // of profiles_by_token_.
- HashMap profiles_uids_;
- List<CpuProfile*> detached_profiles_;
-
- // Accessed by VM thread and profile generator thread.
- List<CpuProfile*> current_profiles_;
- Semaphore* current_profiles_semaphore_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
-};
-
-
-class SampleRateCalculator {
- public:
- SampleRateCalculator()
- : result_(Logger::kSamplingIntervalMs * kResultScale),
- ticks_per_ms_(Logger::kSamplingIntervalMs),
- measurements_count_(0),
- wall_time_query_countdown_(1) {
- }
-
- double ticks_per_ms() {
- return result_ / static_cast<double>(kResultScale);
- }
- void Tick();
- void UpdateMeasurements(double current_time);
-
- // Instead of querying current wall time each tick,
- // we use this constant to control query intervals.
- static const unsigned kWallTimeQueryIntervalMs = 100;
-
- private:
- // As the result needs to be accessed from a different thread, we
- // use type that guarantees atomic writes to memory. There should
- // be <= 1000 ticks per second, thus storing a value of a 10 ** 5
- // order should provide enough precision while keeping away from a
- // potential overflow.
- static const int kResultScale = 100000;
-
- AtomicWord result_;
- // All other fields are accessed only from the sampler thread.
- double ticks_per_ms_;
- unsigned measurements_count_;
- unsigned wall_time_query_countdown_;
- double last_wall_time_;
-
- DISALLOW_COPY_AND_ASSIGN(SampleRateCalculator);
-};
-
-
-class ProfileGenerator {
- public:
- explicit ProfileGenerator(CpuProfilesCollection* profiles);
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name,
- int line_number)) {
- return profiles_->NewCodeEntry(tag, name, resource_name, line_number);
- }
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name)) {
- return profiles_->NewCodeEntry(tag, name);
- }
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- String* name)) {
- return profiles_->NewCodeEntry(tag, name_prefix, name);
- }
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- int args_count)) {
- return profiles_->NewCodeEntry(tag, args_count);
- }
-
- INLINE(CodeEntry* NewCodeEntry(int security_token_id)) {
- return profiles_->NewCodeEntry(security_token_id);
- }
-
- void RecordTickSample(const TickSample& sample);
-
- INLINE(CodeMap* code_map()) { return &code_map_; }
-
- INLINE(void Tick()) { sample_rate_calc_.Tick(); }
- INLINE(double actual_sampling_rate()) {
- return sample_rate_calc_.ticks_per_ms();
- }
-
- static const char* const kAnonymousFunctionName;
- static const char* const kProgramEntryName;
- static const char* const kGarbageCollectorEntryName;
-
- private:
- INLINE(CodeEntry* EntryForVMState(StateTag tag));
-
- CpuProfilesCollection* profiles_;
- CodeMap code_map_;
- CodeEntry* program_entry_;
- CodeEntry* gc_entry_;
- SampleRateCalculator sample_rate_calc_;
-
- DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_PROFILE_GENERATOR_H_
diff --git a/src/3rdparty/v8/src/property-details.h b/src/3rdparty/v8/src/property-details.h
deleted file mode 100644
index 510e985..0000000
--- a/src/3rdparty/v8/src/property-details.h
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PROPERTY_DETAILS_H_
-#define V8_PROPERTY_DETAILS_H_
-
-#include "../include/v8.h"
-#include "allocation.h"
-#include "utils.h"
-
-// Ecma-262 3rd 8.6.1
-enum PropertyAttributes {
- NONE = v8::None,
- READ_ONLY = v8::ReadOnly,
- DONT_ENUM = v8::DontEnum,
- DONT_DELETE = v8::DontDelete,
-
- SEALED = DONT_ENUM | DONT_DELETE,
- FROZEN = SEALED | READ_ONLY,
-
- ABSENT = 16 // Used in runtime to indicate a property is absent.
- // ABSENT can never be stored in or returned from a descriptor's attributes
- // bitfield. It is only used as a return value meaning the attributes of
- // a non-existent property.
-};
-
-
-namespace v8 {
-namespace internal {
-
-class Smi;
-
-// Type of properties.
-// Order of properties is significant.
-// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-debugger.js.
-enum PropertyType {
- // Only in slow mode.
- NORMAL = 0,
- // Only in fast mode.
- FIELD = 1,
- CONSTANT_FUNCTION = 2,
- CALLBACKS = 3,
- // Only in lookup results, not in descriptors.
- HANDLER = 4,
- INTERCEPTOR = 5,
- TRANSITION = 6,
- // Only used as a marker in LookupResult.
- NONEXISTENT = 7
-};
-
-
-// PropertyDetails captures type and attributes for a property.
-// They are used both in property dictionaries and instance descriptors.
-class PropertyDetails BASE_EMBEDDED {
- public:
- PropertyDetails(PropertyAttributes attributes,
- PropertyType type,
- int index = 0) {
- value_ = TypeField::encode(type)
- | AttributesField::encode(attributes)
- | DictionaryStorageField::encode(index);
-
- ASSERT(type == this->type());
- ASSERT(attributes == this->attributes());
- ASSERT(index == this->dictionary_index());
- }
-
- int pointer() { return DescriptorPointer::decode(value_); }
-
- PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
-
- // Conversion for storing details as Object*.
- explicit inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi();
-
- PropertyType type() { return TypeField::decode(value_); }
-
- PropertyAttributes attributes() const {
- return AttributesField::decode(value_);
- }
-
- int dictionary_index() {
- return DictionaryStorageField::decode(value_);
- }
-
- int descriptor_index() {
- return DescriptorStorageField::decode(value_);
- }
-
- inline PropertyDetails AsDeleted();
-
- static bool IsValidIndex(int index) {
- return DictionaryStorageField::is_valid(index);
- }
-
- bool IsReadOnly() const { return (attributes() & READ_ONLY) != 0; }
- bool IsDontDelete() const { return (attributes() & DONT_DELETE) != 0; }
- bool IsDontEnum() const { return (attributes() & DONT_ENUM) != 0; }
- bool IsDeleted() const { return DeletedField::decode(value_) != 0;}
-
- // Bit fields in value_ (type, shift, size). Must be public so the
- // constants can be embedded in generated code.
- class TypeField: public BitField<PropertyType, 0, 3> {};
- class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
- class DeletedField: public BitField<uint32_t, 6, 1> {};
- class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
- class DescriptorStorageField: public BitField<uint32_t, 7, 11> {};
- class DescriptorPointer: public BitField<uint32_t, 18, 11> {};
-
- static const int kInitialIndex = 1;
-
- private:
- PropertyDetails(int value, int pointer) {
- value_ = DescriptorPointer::update(value, pointer);
- }
-
- uint32_t value_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PROPERTY_DETAILS_H_
diff --git a/src/3rdparty/v8/src/property.cc b/src/3rdparty/v8/src/property.cc
deleted file mode 100644
index c2ea422..0000000
--- a/src/3rdparty/v8/src/property.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-
-void LookupResult::Iterate(ObjectVisitor* visitor) {
- LookupResult* current = this; // Could be NULL.
- while (current != NULL) {
- visitor->VisitPointer(BitCast<Object**>(&current->holder_));
- current = current->next_;
- }
-}
-
-
-#ifdef OBJECT_PRINT
-void LookupResult::Print(FILE* out) {
- if (!IsFound()) {
- FPrintF(out, "Not Found\n");
- return;
- }
-
- FPrintF(out, "LookupResult:\n");
- FPrintF(out, " -cacheable = %s\n", IsCacheable() ? "true" : "false");
- FPrintF(out, " -attributes = %x\n", GetAttributes());
- switch (type()) {
- case NORMAL:
- FPrintF(out, " -type = normal\n");
- FPrintF(out, " -entry = %d", GetDictionaryEntry());
- break;
- case CONSTANT_FUNCTION:
- FPrintF(out, " -type = constant function\n");
- FPrintF(out, " -function:\n");
- GetConstantFunction()->Print(out);
- FPrintF(out, "\n");
- break;
- case FIELD:
- FPrintF(out, " -type = field\n");
- FPrintF(out, " -index = %d", GetFieldIndex().field_index());
- FPrintF(out, "\n");
- break;
- case CALLBACKS:
- FPrintF(out, " -type = call backs\n");
- FPrintF(out, " -callback object:\n");
- GetCallbackObject()->Print(out);
- break;
- case HANDLER:
- FPrintF(out, " -type = lookup proxy\n");
- break;
- case INTERCEPTOR:
- FPrintF(out, " -type = lookup interceptor\n");
- break;
- case TRANSITION:
- switch (GetTransitionDetails().type()) {
- case FIELD:
- FPrintF(out, " -type = map transition\n");
- FPrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- FPrintF(out, "\n");
- return;
- case CONSTANT_FUNCTION:
- FPrintF(out, " -type = constant property transition\n");
- FPrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- FPrintF(out, "\n");
- return;
- case CALLBACKS:
- FPrintF(out, " -type = callbacks transition\n");
- FPrintF(out, " -callback object:\n");
- GetCallbackObject()->Print(out);
- return;
- default:
- UNREACHABLE();
- return;
- }
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
-}
-
-
-void Descriptor::Print(FILE* out) {
- FPrintF(out, "Descriptor ");
- GetKey()->ShortPrint(out);
- FPrintF(out, " @ ");
- GetValue()->ShortPrint(out);
- FPrintF(out, " %d\n", GetDetails().descriptor_index());
-}
-
-
-#endif
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/property.h b/src/3rdparty/v8/src/property.h
deleted file mode 100644
index 941b51d..0000000
--- a/src/3rdparty/v8/src/property.h
+++ /dev/null
@@ -1,482 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PROPERTY_H_
-#define V8_PROPERTY_H_
-
-#include "allocation.h"
-#include "transitions.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Abstraction for elements in instance-descriptor arrays.
-//
-// Each descriptor has a key, property attributes, property type,
-// property index (in the actual instance-descriptor array) and
-// optionally a piece of data.
-//
-
-class Descriptor BASE_EMBEDDED {
- public:
- static int IndexFromValue(Object* value) {
- return Smi::cast(value)->value();
- }
-
- MUST_USE_RESULT MaybeObject* KeyToInternalizedString() {
- if (!StringShape(key_).IsInternalized()) {
- MaybeObject* maybe_result = HEAP->InternalizeString(key_);
- if (!maybe_result->To(&key_)) return maybe_result;
- }
- return key_;
- }
-
- String* GetKey() { return key_; }
- Object* GetValue() { return value_; }
- PropertyDetails GetDetails() { return details_; }
-
-#ifdef OBJECT_PRINT
- void Print(FILE* out);
-#endif
-
- void SetEnumerationIndex(int index) {
- details_ = PropertyDetails(details_.attributes(), details_.type(), index);
- }
-
- void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
-
- private:
- String* key_;
- Object* value_;
- PropertyDetails details_;
-
- protected:
- Descriptor() : details_(Smi::FromInt(0)) {}
-
- void Init(String* key, Object* value, PropertyDetails details) {
- key_ = key;
- value_ = value;
- details_ = details;
- }
-
- Descriptor(String* key, Object* value, PropertyDetails details)
- : key_(key),
- value_(value),
- details_(details) { }
-
- Descriptor(String* key,
- Object* value,
- PropertyAttributes attributes,
- PropertyType type,
- int index)
- : key_(key),
- value_(value),
- details_(attributes, type, index) { }
-
- friend class DescriptorArray;
-};
-
-
-class FieldDescriptor: public Descriptor {
- public:
- FieldDescriptor(String* key,
- int field_index,
- PropertyAttributes attributes,
- int index = 0)
- : Descriptor(key, Smi::FromInt(field_index), attributes, FIELD, index) {}
-};
-
-
-class ConstantFunctionDescriptor: public Descriptor {
- public:
- ConstantFunctionDescriptor(String* key,
- JSFunction* function,
- PropertyAttributes attributes,
- int index)
- : Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
-};
-
-
-class CallbacksDescriptor: public Descriptor {
- public:
- CallbacksDescriptor(String* key,
- Object* foreign,
- PropertyAttributes attributes,
- int index = 0)
- : Descriptor(key, foreign, attributes, CALLBACKS, index) {}
-};
-
-
-// Holds a property index value distinguishing if it is a field index or an
-// index inside the object header.
-class PropertyIndex {
- public:
- static PropertyIndex NewFieldIndex(int index) {
- return PropertyIndex(index, false);
- }
- static PropertyIndex NewHeaderIndex(int index) {
- return PropertyIndex(index, true);
- }
-
- bool is_field_index() { return (index_ & kHeaderIndexBit) == 0; }
- bool is_header_index() { return (index_ & kHeaderIndexBit) != 0; }
-
- int field_index() {
- ASSERT(is_field_index());
- return value();
- }
- int header_index() {
- ASSERT(is_header_index());
- return value();
- }
-
- bool is_inobject(Handle<JSObject> holder) {
- if (is_header_index()) return true;
- return field_index() < holder->map()->inobject_properties();
- }
-
- int translate(Handle<JSObject> holder) {
- if (is_header_index()) return header_index();
- int index = field_index() - holder->map()->inobject_properties();
- if (index >= 0) return index;
- return index + holder->map()->instance_size() / kPointerSize;
- }
-
- private:
- static const int kHeaderIndexBit = 1 << 31;
- static const int kIndexMask = ~kHeaderIndexBit;
-
- int value() { return index_ & kIndexMask; }
-
- PropertyIndex(int index, bool is_header_based)
- : index_(index | (is_header_based ? kHeaderIndexBit : 0)) {
- ASSERT(index <= kIndexMask);
- }
-
- int index_;
-};
-
-
-class LookupResult BASE_EMBEDDED {
- public:
- explicit LookupResult(Isolate* isolate)
- : isolate_(isolate),
- next_(isolate->top_lookup_result()),
- lookup_type_(NOT_FOUND),
- holder_(NULL),
- cacheable_(true),
- details_(NONE, NONEXISTENT) {
- isolate->SetTopLookupResult(this);
- }
-
- ~LookupResult() {
- ASSERT(isolate()->top_lookup_result() == this);
- isolate()->SetTopLookupResult(next_);
- }
-
- Isolate* isolate() const { return isolate_; }
-
- void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
- lookup_type_ = DESCRIPTOR_TYPE;
- holder_ = holder;
- details_ = details;
- number_ = number;
- }
-
- void TransitionResult(JSObject* holder, int number) {
- lookup_type_ = TRANSITION_TYPE;
- details_ = PropertyDetails(NONE, TRANSITION);
- holder_ = holder;
- number_ = number;
- }
-
- void DictionaryResult(JSObject* holder, int entry) {
- lookup_type_ = DICTIONARY_TYPE;
- holder_ = holder;
- details_ = holder->property_dictionary()->DetailsAt(entry);
- number_ = entry;
- }
-
- void HandlerResult(JSProxy* proxy) {
- lookup_type_ = HANDLER_TYPE;
- holder_ = proxy;
- details_ = PropertyDetails(NONE, HANDLER);
- cacheable_ = false;
- }
-
- void InterceptorResult(JSObject* holder) {
- lookup_type_ = INTERCEPTOR_TYPE;
- holder_ = holder;
- details_ = PropertyDetails(NONE, INTERCEPTOR);
- }
-
- void NotFound() {
- lookup_type_ = NOT_FOUND;
- details_ = PropertyDetails(NONE, NONEXISTENT);
- holder_ = NULL;
- }
-
- JSObject* holder() {
- ASSERT(IsFound());
- return JSObject::cast(holder_);
- }
-
- JSProxy* proxy() {
- ASSERT(IsFound());
- return JSProxy::cast(holder_);
- }
-
- PropertyType type() {
- ASSERT(IsFound());
- return details_.type();
- }
-
- PropertyAttributes GetAttributes() {
- ASSERT(!IsTransition());
- ASSERT(IsFound());
- ASSERT(details_.type() != NONEXISTENT);
- return details_.attributes();
- }
-
- PropertyDetails GetPropertyDetails() {
- ASSERT(!IsTransition());
- return details_;
- }
-
- bool IsFastPropertyType() {
- ASSERT(IsFound());
- return IsTransition() || type() != NORMAL;
- }
-
- // Property callbacks does not include transitions to callbacks.
- bool IsPropertyCallbacks() {
- ASSERT(!(details_.type() == CALLBACKS && !IsFound()));
- return details_.type() == CALLBACKS;
- }
-
- bool IsReadOnly() {
- ASSERT(IsFound());
- ASSERT(!IsTransition());
- ASSERT(details_.type() != NONEXISTENT);
- return details_.IsReadOnly();
- }
-
- bool IsField() {
- ASSERT(!(details_.type() == FIELD && !IsFound()));
- return details_.type() == FIELD;
- }
-
- bool IsNormal() {
- ASSERT(!(details_.type() == NORMAL && !IsFound()));
- return details_.type() == NORMAL;
- }
-
- bool IsConstantFunction() {
- ASSERT(!(details_.type() == CONSTANT_FUNCTION && !IsFound()));
- return details_.type() == CONSTANT_FUNCTION;
- }
-
- bool IsDontDelete() { return details_.IsDontDelete(); }
- bool IsDontEnum() { return details_.IsDontEnum(); }
- bool IsDeleted() { return details_.IsDeleted(); }
- bool IsFound() { return lookup_type_ != NOT_FOUND; }
- bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
- bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
- bool IsInterceptor() { return lookup_type_ == INTERCEPTOR_TYPE; }
-
- // Is the result is a property excluding transitions and the null descriptor?
- bool IsProperty() {
- return IsFound() && !IsTransition();
- }
-
- bool IsDataProperty() {
- switch (type()) {
- case FIELD:
- case NORMAL:
- case CONSTANT_FUNCTION:
- return true;
- case CALLBACKS: {
- Object* callback = GetCallbackObject();
- return callback->IsAccessorInfo() || callback->IsForeign();
- }
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
- return false;
- }
- UNREACHABLE();
- return false;
- }
-
- bool IsCacheable() { return cacheable_; }
- void DisallowCaching() { cacheable_ = false; }
-
- Object* GetLazyValue() {
- switch (type()) {
- case FIELD:
- return holder()->FastPropertyAt(GetFieldIndex().field_index());
- case NORMAL: {
- Object* value;
- value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
- if (holder()->IsGlobalObject()) {
- value = JSGlobalPropertyCell::cast(value)->value();
- }
- return value;
- }
- case CONSTANT_FUNCTION:
- return GetConstantFunction();
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
- return isolate()->heap()->the_hole_value();
- }
- UNREACHABLE();
- return NULL;
- }
-
- Map* GetTransitionTarget() {
- ASSERT(IsTransition());
- TransitionArray* transitions = holder()->map()->transitions();
- return transitions->GetTarget(number_);
- }
-
- PropertyDetails GetTransitionDetails(Map* map) {
- ASSERT(IsTransition());
- TransitionArray* transitions = map->transitions();
- return transitions->GetTargetDetails(number_);
- }
-
- PropertyDetails GetTransitionDetails() {
- return GetTransitionDetails(holder()->map());
- }
-
- bool IsTransitionToField(Map* map) {
- return IsTransition() && GetTransitionDetails(map).type() == FIELD;
- }
-
- Map* GetTransitionMap() {
- ASSERT(IsTransition());
- return Map::cast(GetValue());
- }
-
- Map* GetTransitionMapFromMap(Map* map) {
- ASSERT(IsTransition());
- return map->transitions()->GetTarget(number_);
- }
-
- int GetTransitionIndex() {
- ASSERT(IsTransition());
- return number_;
- }
-
- int GetDescriptorIndex() {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- return number_;
- }
-
- PropertyIndex GetFieldIndex() {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(IsField());
- return PropertyIndex::NewFieldIndex(
- Descriptor::IndexFromValue(GetValue()));
- }
-
- int GetLocalFieldIndexFromMap(Map* map) {
- ASSERT(IsField());
- return Descriptor::IndexFromValue(GetValueFromMap(map)) -
- map->inobject_properties();
- }
-
- int GetDictionaryEntry() {
- ASSERT(lookup_type_ == DICTIONARY_TYPE);
- return number_;
- }
-
- JSFunction* GetConstantFunction() {
- ASSERT(type() == CONSTANT_FUNCTION);
- return JSFunction::cast(GetValue());
- }
-
- JSFunction* GetConstantFunctionFromMap(Map* map) {
- ASSERT(type() == CONSTANT_FUNCTION);
- return JSFunction::cast(GetValueFromMap(map));
- }
-
- Object* GetCallbackObject() {
- ASSERT(type() == CALLBACKS && !IsTransition());
- return GetValue();
- }
-
-#ifdef OBJECT_PRINT
- void Print(FILE* out);
-#endif
-
- Object* GetValue() {
- if (lookup_type_ == DESCRIPTOR_TYPE) {
- return GetValueFromMap(holder()->map());
- }
- // In the dictionary case, the data is held in the value field.
- ASSERT(lookup_type_ == DICTIONARY_TYPE);
- return holder()->GetNormalizedProperty(this);
- }
-
- Object* GetValueFromMap(Map* map) const {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(number_ < map->NumberOfOwnDescriptors());
- return map->instance_descriptors()->GetValue(number_);
- }
-
- void Iterate(ObjectVisitor* visitor);
-
- private:
- Isolate* isolate_;
- LookupResult* next_;
-
- // Where did we find the result;
- enum {
- NOT_FOUND,
- DESCRIPTOR_TYPE,
- TRANSITION_TYPE,
- DICTIONARY_TYPE,
- HANDLER_TYPE,
- INTERCEPTOR_TYPE
- } lookup_type_;
-
- JSReceiver* holder_;
- int number_;
- bool cacheable_;
- PropertyDetails details_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_PROPERTY_H_
diff --git a/src/3rdparty/v8/src/proxy.js b/src/3rdparty/v8/src/proxy.js
deleted file mode 100644
index 53a3572..0000000
--- a/src/3rdparty/v8/src/proxy.js
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"use strict";
-
-global.Proxy = new $Object();
-
-var $Proxy = global.Proxy
-
-function ProxyCreate(handler, proto) {
- if (!IS_SPEC_OBJECT(handler))
- throw MakeTypeError("handler_non_object", ["create"])
- if (IS_UNDEFINED(proto))
- proto = null
- else if (!(IS_SPEC_OBJECT(proto) || proto === null))
- throw MakeTypeError("proto_non_object", ["create"])
- return %CreateJSProxy(handler, proto)
-}
-
-function ProxyCreateFunction(handler, callTrap, constructTrap) {
- if (!IS_SPEC_OBJECT(handler))
- throw MakeTypeError("handler_non_object", ["create"])
- if (!IS_SPEC_FUNCTION(callTrap))
- throw MakeTypeError("trap_function_expected", ["createFunction", "call"])
- if (IS_UNDEFINED(constructTrap)) {
- constructTrap = DerivedConstructTrap(callTrap)
- } else if (IS_SPEC_FUNCTION(constructTrap)) {
- // Make sure the trap receives 'undefined' as this.
- var construct = constructTrap
- constructTrap = function() {
- return %Apply(construct, void 0, arguments, 0, %_ArgumentsLength());
- }
- } else {
- throw MakeTypeError("trap_function_expected",
- ["createFunction", "construct"])
- }
- return %CreateJSFunctionProxy(
- handler, callTrap, constructTrap, $Function.prototype)
-}
-
-%CheckIsBootstrapping()
-InstallFunctions($Proxy, DONT_ENUM, [
- "create", ProxyCreate,
- "createFunction", ProxyCreateFunction
-])
-
-
-////////////////////////////////////////////////////////////////////////////////
-// Builtins
-////////////////////////////////////////////////////////////////////////////////
-
-function DerivedConstructTrap(callTrap) {
- return function() {
- var proto = this.prototype
- if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
- var obj = new $Object()
- obj.__proto__ = proto
- var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
- return IS_SPEC_OBJECT(result) ? result : obj
- }
-}
-
-function DelegateCallAndConstruct(callTrap, constructTrap) {
- return function() {
- return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
- this, arguments, 0, %_ArgumentsLength())
- }
-}
-
-function DerivedGetTrap(receiver, name) {
- var desc = this.getPropertyDescriptor(name)
- if (IS_UNDEFINED(desc)) { return desc }
- if ('value' in desc) {
- return desc.value
- } else {
- if (IS_UNDEFINED(desc.get)) { return desc.get }
- // The proposal says: desc.get.call(receiver)
- return %_CallFunction(receiver, desc.get)
- }
-}
-
-function DerivedSetTrap(receiver, name, val) {
- var desc = this.getOwnPropertyDescriptor(name)
- if (desc) {
- if ('writable' in desc) {
- if (desc.writable) {
- desc.value = val
- this.defineProperty(name, desc)
- return true
- } else {
- return false
- }
- } else { // accessor
- if (desc.set) {
- // The proposal says: desc.set.call(receiver, val)
- %_CallFunction(receiver, val, desc.set)
- return true
- } else {
- return false
- }
- }
- }
- desc = this.getPropertyDescriptor(name)
- if (desc) {
- if ('writable' in desc) {
- if (desc.writable) {
- // fall through
- } else {
- return false
- }
- } else { // accessor
- if (desc.set) {
- // The proposal says: desc.set.call(receiver, val)
- %_CallFunction(receiver, val, desc.set)
- return true
- } else {
- return false
- }
- }
- }
- this.defineProperty(name, {
- value: val,
- writable: true,
- enumerable: true,
- configurable: true});
- return true;
-}
-
-function DerivedHasTrap(name) {
- return !!this.getPropertyDescriptor(name)
-}
-
-function DerivedHasOwnTrap(name) {
- return !!this.getOwnPropertyDescriptor(name)
-}
-
-function DerivedKeysTrap() {
- var names = this.getOwnPropertyNames()
- var enumerableNames = []
- for (var i = 0, count = 0; i < names.length; ++i) {
- var name = names[i]
- var desc = this.getOwnPropertyDescriptor(TO_STRING_INLINE(name))
- if (!IS_UNDEFINED(desc) && desc.enumerable) {
- enumerableNames[count++] = names[i]
- }
- }
- return enumerableNames
-}
-
-function DerivedEnumerateTrap() {
- var names = this.getPropertyNames()
- var enumerableNames = []
- for (var i = 0, count = 0; i < names.length; ++i) {
- var name = names[i]
- var desc = this.getPropertyDescriptor(TO_STRING_INLINE(name))
- if (!IS_UNDEFINED(desc) && desc.enumerable) {
- enumerableNames[count++] = names[i]
- }
- }
- return enumerableNames
-}
-
-function ProxyEnumerate(proxy) {
- var handler = %GetHandler(proxy)
- if (IS_UNDEFINED(handler.enumerate)) {
- return %Apply(DerivedEnumerateTrap, handler, [], 0, 0)
- } else {
- return ToStringArray(handler.enumerate(), "enumerate")
- }
-}
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp-inl.h b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp-inl.h
deleted file mode 100644
index a767ec0..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp-inl.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A light-weight assembler for the Irregexp byte code.
-
-
-#include "v8.h"
-#include "ast.h"
-#include "bytecodes-irregexp.h"
-
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#ifdef V8_INTERPRETED_REGEXP
-
-void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
- uint32_t twenty_four_bits) {
- uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte);
- ASSERT(pc_ <= buffer_.length());
- if (pc_ + 3 >= buffer_.length()) {
- Expand();
- }
- *reinterpret_cast<uint32_t*>(buffer_.start() + pc_) = word;
- pc_ += 4;
-}
-
-
-void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) {
- ASSERT(pc_ <= buffer_.length());
- if (pc_ + 1 >= buffer_.length()) {
- Expand();
- }
- *reinterpret_cast<uint16_t*>(buffer_.start() + pc_) = word;
- pc_ += 2;
-}
-
-
-void RegExpMacroAssemblerIrregexp::Emit8(uint32_t word) {
- ASSERT(pc_ <= buffer_.length());
- if (pc_ == buffer_.length()) {
- Expand();
- }
- *reinterpret_cast<unsigned char*>(buffer_.start() + pc_) = word;
- pc_ += 1;
-}
-
-
-void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
- ASSERT(pc_ <= buffer_.length());
- if (pc_ + 3 >= buffer_.length()) {
- Expand();
- }
- *reinterpret_cast<uint32_t*>(buffer_.start() + pc_) = word;
- pc_ += 4;
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
-
-#endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc
deleted file mode 100644
index 16766ca..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc
+++ /dev/null
@@ -1,499 +0,0 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "ast.h"
-#include "bytecodes-irregexp.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-macro-assembler-irregexp.h"
-#include "regexp-macro-assembler-irregexp-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-#ifdef V8_INTERPRETED_REGEXP
-
-RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer,
- Zone* zone)
- : RegExpMacroAssembler(zone),
- buffer_(buffer),
- pc_(0),
- own_buffer_(false),
- advance_current_end_(kInvalidPC) {
-}
-
-
-RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
- if (backtrack_.is_linked()) backtrack_.Unuse();
- if (own_buffer_) buffer_.Dispose();
-}
-
-
-RegExpMacroAssemblerIrregexp::IrregexpImplementation
-RegExpMacroAssemblerIrregexp::Implementation() {
- return kBytecodeImplementation;
-}
-
-
-void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
- advance_current_end_ = kInvalidPC;
- ASSERT(!l->is_bound());
- if (l->is_linked()) {
- int pos = l->pos();
- while (pos != 0) {
- int fixup = pos;
- pos = *reinterpret_cast<int32_t*>(buffer_.start() + fixup);
- *reinterpret_cast<uint32_t*>(buffer_.start() + fixup) = pc_;
- }
- }
- l->bind_to(pc_);
-}
-
-
-void RegExpMacroAssemblerIrregexp::EmitOrLink(Label* l) {
- if (l == NULL) l = &backtrack_;
- if (l->is_bound()) {
- Emit32(l->pos());
- } else {
- int pos = 0;
- if (l->is_linked()) {
- pos = l->pos();
- }
- l->link_to(pc_);
- Emit32(pos);
- }
-}
-
-
-void RegExpMacroAssemblerIrregexp::PopRegister(int register_index) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_POP_REGISTER, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::PushRegister(
- int register_index,
- StackCheckFlag check_stack_limit) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_PUSH_REGISTER, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::WriteCurrentPositionToRegister(
- int register_index, int cp_offset) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_REGISTER_TO_CP, register_index);
- Emit32(cp_offset); // Current position offset.
-}
-
-
-void RegExpMacroAssemblerIrregexp::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- for (int reg = reg_from; reg <= reg_to; reg++) {
- SetRegister(reg, -1);
- }
-}
-
-
-void RegExpMacroAssemblerIrregexp::ReadCurrentPositionFromRegister(
- int register_index) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_CP_TO_REGISTER, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::WriteStackPointerToRegister(
- int register_index) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_REGISTER_TO_SP, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::ReadStackPointerFromRegister(
- int register_index) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_SP_TO_REGISTER, register_index);
-}
-
-
-void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) {
- ASSERT(is_uint24(by));
- Emit(BC_SET_CURRENT_POSITION_FROM_END, by);
-}
-
-
-void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_SET_REGISTER, register_index);
- Emit32(to);
-}
-
-
-void RegExpMacroAssemblerIrregexp::AdvanceRegister(int register_index, int by) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_ADVANCE_REGISTER, register_index);
- Emit32(by);
-}
-
-
-void RegExpMacroAssemblerIrregexp::PopCurrentPosition() {
- Emit(BC_POP_CP, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::PushCurrentPosition() {
- Emit(BC_PUSH_CP, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::Backtrack() {
- Emit(BC_POP_BT, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::GoTo(Label* l) {
- if (advance_current_end_ == pc_) {
- // Combine advance current and goto.
- pc_ = advance_current_start_;
- Emit(BC_ADVANCE_CP_AND_GOTO, advance_current_offset_);
- EmitOrLink(l);
- advance_current_end_ = kInvalidPC;
- } else {
- // Regular goto.
- Emit(BC_GOTO, 0);
- EmitOrLink(l);
- }
-}
-
-
-void RegExpMacroAssemblerIrregexp::PushBacktrack(Label* l) {
- Emit(BC_PUSH_BT, 0);
- EmitOrLink(l);
-}
-
-
-bool RegExpMacroAssemblerIrregexp::Succeed() {
- Emit(BC_SUCCEED, 0);
- return false; // Restart matching for global regexp not supported.
-}
-
-
-void RegExpMacroAssemblerIrregexp::Fail() {
- Emit(BC_FAIL, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) {
- ASSERT(by >= kMinCPOffset);
- ASSERT(by <= kMaxCPOffset);
- advance_current_start_ = pc_;
- advance_current_offset_ = by;
- Emit(BC_ADVANCE_CP, by);
- advance_current_end_ = pc_;
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckGreedyLoop(
- Label* on_tos_equals_current_position) {
- Emit(BC_CHECK_GREEDY, 0);
- EmitOrLink(on_tos_equals_current_position);
-}
-
-
-void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
- Label* on_failure,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= kMinCPOffset);
- ASSERT(cp_offset <= kMaxCPOffset);
- int bytecode;
- if (check_bounds) {
- if (characters == 4) {
- bytecode = BC_LOAD_4_CURRENT_CHARS;
- } else if (characters == 2) {
- bytecode = BC_LOAD_2_CURRENT_CHARS;
- } else {
- ASSERT(characters == 1);
- bytecode = BC_LOAD_CURRENT_CHAR;
- }
- } else {
- if (characters == 4) {
- bytecode = BC_LOAD_4_CURRENT_CHARS_UNCHECKED;
- } else if (characters == 2) {
- bytecode = BC_LOAD_2_CURRENT_CHARS_UNCHECKED;
- } else {
- ASSERT(characters == 1);
- bytecode = BC_LOAD_CURRENT_CHAR_UNCHECKED;
- }
- }
- Emit(bytecode, cp_offset);
- if (check_bounds) EmitOrLink(on_failure);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterLT(uc16 limit,
- Label* on_less) {
- Emit(BC_CHECK_LT, limit);
- EmitOrLink(on_less);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterGT(uc16 limit,
- Label* on_greater) {
- Emit(BC_CHECK_GT, limit);
- EmitOrLink(on_greater);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacter(uint32_t c, Label* on_equal) {
- if (c > MAX_FIRST_ARG) {
- Emit(BC_CHECK_4_CHARS, 0);
- Emit32(c);
- } else {
- Emit(BC_CHECK_CHAR, c);
- }
- EmitOrLink(on_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckAtStart(Label* on_at_start) {
- Emit(BC_CHECK_AT_START, 0);
- EmitOrLink(on_at_start);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotAtStart(Label* on_not_at_start) {
- Emit(BC_CHECK_NOT_AT_START, 0);
- EmitOrLink(on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- if (c > MAX_FIRST_ARG) {
- Emit(BC_CHECK_NOT_4_CHARS, 0);
- Emit32(c);
- } else {
- Emit(BC_CHECK_NOT_CHAR, c);
- }
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c > MAX_FIRST_ARG) {
- Emit(BC_AND_CHECK_4_CHARS, 0);
- Emit32(c);
- } else {
- Emit(BC_AND_CHECK_CHAR, c);
- }
- Emit32(mask);
- EmitOrLink(on_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- if (c > MAX_FIRST_ARG) {
- Emit(BC_AND_CHECK_NOT_4_CHARS, 0);
- Emit32(c);
- } else {
- Emit(BC_AND_CHECK_NOT_CHAR, c);
- }
- Emit32(mask);
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- Emit(BC_MINUS_AND_CHECK_NOT_CHAR, c);
- Emit16(minus);
- Emit16(mask);
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- Emit(BC_CHECK_CHAR_IN_RANGE, 0);
- Emit16(from);
- Emit16(to);
- EmitOrLink(on_in_range);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- Emit(BC_CHECK_CHAR_NOT_IN_RANGE, 0);
- Emit16(from);
- Emit16(to);
- EmitOrLink(on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckBitInTable(
- Handle<ByteArray> table, Label* on_bit_set) {
- Emit(BC_CHECK_BIT_IN_TABLE, 0);
- EmitOrLink(on_bit_set);
- for (int i = 0; i < kTableSize; i += kBitsPerByte) {
- int byte = 0;
- for (int j = 0; j < kBitsPerByte; j++) {
- if (table->get(i + j) != 0) byte |= 1 << j;
- }
- Emit8(byte);
- }
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
- Label* on_not_equal) {
- ASSERT(start_reg >= 0);
- ASSERT(start_reg <= kMaxRegister);
- Emit(BC_CHECK_NOT_BACK_REF, start_reg);
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_not_equal) {
- ASSERT(start_reg >= 0);
- ASSERT(start_reg <= kMaxRegister);
- Emit(BC_CHECK_NOT_BACK_REF_NO_CASE, start_reg);
- EmitOrLink(on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- ASSERT(cp_offset >= kMinCPOffset);
- ASSERT(cp_offset + str.length() - 1 <= kMaxCPOffset);
- // It is vital that this loop is backwards due to the unchecked character
- // load below.
- for (int i = str.length() - 1; i >= 0; i--) {
- if (check_end_of_string && i == str.length() - 1) {
- Emit(BC_LOAD_CURRENT_CHAR, cp_offset + i);
- EmitOrLink(on_failure);
- } else {
- Emit(BC_LOAD_CURRENT_CHAR_UNCHECKED, cp_offset + i);
- }
- Emit(BC_CHECK_NOT_CHAR, str[i]);
- EmitOrLink(on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
- int comparand,
- Label* on_less_than) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_CHECK_REGISTER_LT, register_index);
- Emit32(comparand);
- EmitOrLink(on_less_than);
-}
-
-
-void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
- int comparand,
- Label* on_greater_or_equal) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_CHECK_REGISTER_GE, register_index);
- Emit32(comparand);
- EmitOrLink(on_greater_or_equal);
-}
-
-
-void RegExpMacroAssemblerIrregexp::IfRegisterEqPos(int register_index,
- Label* on_eq) {
- ASSERT(register_index >= 0);
- ASSERT(register_index <= kMaxRegister);
- Emit(BC_CHECK_REGISTER_EQ_POS, register_index);
- EmitOrLink(on_eq);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerIrregexp::GetCode(
- Handle<String> source) {
- Bind(&backtrack_);
- Emit(BC_POP_BT, 0);
- Handle<ByteArray> array = FACTORY->NewByteArray(length());
- Copy(array->GetDataStartAddress());
- return array;
-}
-
-
-int RegExpMacroAssemblerIrregexp::length() {
- return pc_;
-}
-
-
-void RegExpMacroAssemblerIrregexp::Copy(Address a) {
- memcpy(a, buffer_.start(), length());
-}
-
-
-void RegExpMacroAssemblerIrregexp::Expand() {
- bool old_buffer_was_our_own = own_buffer_;
- Vector<byte> old_buffer = buffer_;
- buffer_ = Vector<byte>::New(old_buffer.length() * 2);
- own_buffer_ = true;
- memcpy(buffer_.start(), old_buffer.start(), old_buffer.length());
- if (old_buffer_was_our_own) {
- old_buffer.Dispose();
- }
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h
deleted file mode 100644
index 4bc2980..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-
-namespace v8 {
-namespace internal {
-
-#ifdef V8_INTERPRETED_REGEXP
-
-class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- RegExpMacroAssemblerIrregexp(Vector<byte>, Zone* zone);
- virtual ~RegExpMacroAssemblerIrregexp();
- // The byte-code interpreter checks on each push anyway.
- virtual int stack_limit_slack() { return 1; }
- virtual void Bind(Label* label);
- virtual void AdvanceCurrentPosition(int by); // Signed cp change.
- virtual void PopCurrentPosition();
- virtual void PushCurrentPosition();
- virtual void Backtrack();
- virtual void GoTo(Label* label);
- virtual void PushBacktrack(Label* label);
- virtual bool Succeed();
- virtual void Fail();
- virtual void PopRegister(int register_index);
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void WriteStackPointerToRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
- virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
- virtual void IfRegisterEqPos(int register_index, Label* if_eq);
-
- virtual IrregexpImplementation Implementation();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
-
- private:
- void Expand();
- // Code and bitmap emission.
- inline void EmitOrLink(Label* label);
- inline void Emit32(uint32_t x);
- inline void Emit16(uint32_t x);
- inline void Emit8(uint32_t x);
- inline void Emit(uint32_t bc, uint32_t arg);
- // Bytecode buffer.
- int length();
- void Copy(Address a);
-
- // The buffer into which code and relocation info are generated.
- Vector<byte> buffer_;
- // The program counter.
- int pc_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
- Label backtrack_;
-
- int advance_current_start_;
- int advance_current_offset_;
- int advance_current_end_;
-
- static const int kInvalidPC = -1;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
-
-#endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc b/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc
deleted file mode 100644
index f878e8c..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc
+++ /dev/null
@@ -1,449 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "ast.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-macro-assembler-tracer.h"
-
-namespace v8 {
-namespace internal {
-
-RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
- RegExpMacroAssembler* assembler) :
- RegExpMacroAssembler(assembler->zone()),
- assembler_(assembler) {
- unsigned int type = assembler->Implementation();
- ASSERT(type < 5);
- const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
- PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
-}
-
-
-RegExpMacroAssemblerTracer::~RegExpMacroAssemblerTracer() {
-}
-
-
-// This is used for printing out debugging information. It makes an integer
-// that is closely related to the address of an object.
-static int LabelToInt(Label* label) {
- return static_cast<int>(reinterpret_cast<intptr_t>(label));
-}
-
-
-void RegExpMacroAssemblerTracer::Bind(Label* label) {
- PrintF("label[%08x]: (Bind)\n", LabelToInt(label));
- assembler_->Bind(label);
-}
-
-
-void RegExpMacroAssemblerTracer::AdvanceCurrentPosition(int by) {
- PrintF(" AdvanceCurrentPosition(by=%d);\n", by);
- assembler_->AdvanceCurrentPosition(by);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckGreedyLoop(Label* label) {
- PrintF(" CheckGreedyLoop(label[%08x]);\n\n", LabelToInt(label));
- assembler_->CheckGreedyLoop(label);
-}
-
-
-void RegExpMacroAssemblerTracer::PopCurrentPosition() {
- PrintF(" PopCurrentPosition();\n");
- assembler_->PopCurrentPosition();
-}
-
-
-void RegExpMacroAssemblerTracer::PushCurrentPosition() {
- PrintF(" PushCurrentPosition();\n");
- assembler_->PushCurrentPosition();
-}
-
-
-void RegExpMacroAssemblerTracer::Backtrack() {
- PrintF(" Backtrack();\n");
- assembler_->Backtrack();
-}
-
-
-void RegExpMacroAssemblerTracer::GoTo(Label* label) {
- PrintF(" GoTo(label[%08x]);\n\n", LabelToInt(label));
- assembler_->GoTo(label);
-}
-
-
-void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) {
- PrintF(" PushBacktrack(label[%08x]);\n", LabelToInt(label));
- assembler_->PushBacktrack(label);
-}
-
-
-bool RegExpMacroAssemblerTracer::Succeed() {
- bool restart = assembler_->Succeed();
- PrintF(" Succeed();%s\n", restart ? " [restart for global match]" : "");
- return restart;
-}
-
-
-void RegExpMacroAssemblerTracer::Fail() {
- PrintF(" Fail();");
- assembler_->Fail();
-}
-
-
-void RegExpMacroAssemblerTracer::PopRegister(int register_index) {
- PrintF(" PopRegister(register=%d);\n", register_index);
- assembler_->PopRegister(register_index);
-}
-
-
-void RegExpMacroAssemblerTracer::PushRegister(
- int register_index,
- StackCheckFlag check_stack_limit) {
- PrintF(" PushRegister(register=%d, %s);\n",
- register_index,
- check_stack_limit ? "check stack limit" : "");
- assembler_->PushRegister(register_index, check_stack_limit);
-}
-
-
-void RegExpMacroAssemblerTracer::AdvanceRegister(int reg, int by) {
- PrintF(" AdvanceRegister(register=%d, by=%d);\n", reg, by);
- assembler_->AdvanceRegister(reg, by);
-}
-
-
-void RegExpMacroAssemblerTracer::SetCurrentPositionFromEnd(int by) {
- PrintF(" SetCurrentPositionFromEnd(by=%d);\n", by);
- assembler_->SetCurrentPositionFromEnd(by);
-}
-
-
-void RegExpMacroAssemblerTracer::SetRegister(int register_index, int to) {
- PrintF(" SetRegister(register=%d, to=%d);\n", register_index, to);
- assembler_->SetRegister(register_index, to);
-}
-
-
-void RegExpMacroAssemblerTracer::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- PrintF(" WriteCurrentPositionToRegister(register=%d,cp_offset=%d);\n",
- reg,
- cp_offset);
- assembler_->WriteCurrentPositionToRegister(reg, cp_offset);
-}
-
-
-void RegExpMacroAssemblerTracer::ClearRegisters(int reg_from, int reg_to) {
- PrintF(" ClearRegister(from=%d, to=%d);\n", reg_from, reg_to);
- assembler_->ClearRegisters(reg_from, reg_to);
-}
-
-
-void RegExpMacroAssemblerTracer::ReadCurrentPositionFromRegister(int reg) {
- PrintF(" ReadCurrentPositionFromRegister(register=%d);\n", reg);
- assembler_->ReadCurrentPositionFromRegister(reg);
-}
-
-
-void RegExpMacroAssemblerTracer::WriteStackPointerToRegister(int reg) {
- PrintF(" WriteStackPointerToRegister(register=%d);\n", reg);
- assembler_->WriteStackPointerToRegister(reg);
-}
-
-
-void RegExpMacroAssemblerTracer::ReadStackPointerFromRegister(int reg) {
- PrintF(" ReadStackPointerFromRegister(register=%d);\n", reg);
- assembler_->ReadStackPointerFromRegister(reg);
-}
-
-
-void RegExpMacroAssemblerTracer::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- const char* check_msg = check_bounds ? "" : " (unchecked)";
- PrintF(" LoadCurrentCharacter(cp_offset=%d, label[%08x]%s (%d chars));\n",
- cp_offset,
- LabelToInt(on_end_of_input),
- check_msg,
- characters);
- assembler_->LoadCurrentCharacter(cp_offset,
- on_end_of_input,
- check_bounds,
- characters);
-}
-
-
-class PrintablePrinter {
- public:
- explicit PrintablePrinter(uc16 character) : character_(character) { }
-
- const char* operator*() {
- if (character_ >= ' ' && character_ <= '~') {
- buffer_[0] = '(';
- buffer_[1] = static_cast<char>(character_);
- buffer_[2] = ')';
- buffer_[3] = '\0';
- } else {
- buffer_[0] = '\0';
- }
- return &buffer_[0];
- };
-
- private:
- uc16 character_;
- char buffer_[4];
-};
-
-
-void RegExpMacroAssemblerTracer::CheckCharacterLT(uc16 limit, Label* on_less) {
- PrintablePrinter printable(limit);
- PrintF(" CheckCharacterLT(c=0x%04x%s, label[%08x]);\n",
- limit,
- *printable,
- LabelToInt(on_less));
- assembler_->CheckCharacterLT(limit, on_less);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacterGT(uc16 limit,
- Label* on_greater) {
- PrintablePrinter printable(limit);
- PrintF(" CheckCharacterGT(c=0x%04x%s, label[%08x]);\n",
- limit,
- *printable,
- LabelToInt(on_greater));
- assembler_->CheckCharacterGT(limit, on_greater);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacter(unsigned c, Label* on_equal) {
- PrintablePrinter printable(c);
- PrintF(" CheckCharacter(c=0x%04x%s, label[%08x]);\n",
- c,
- *printable,
- LabelToInt(on_equal));
- assembler_->CheckCharacter(c, on_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) {
- PrintF(" CheckAtStart(label[%08x]);\n", LabelToInt(on_at_start));
- assembler_->CheckAtStart(on_at_start);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) {
- PrintF(" CheckNotAtStart(label[%08x]);\n", LabelToInt(on_not_at_start));
- assembler_->CheckNotAtStart(on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotCharacter(unsigned c,
- Label* on_not_equal) {
- PrintablePrinter printable(c);
- PrintF(" CheckNotCharacter(c=0x%04x%s, label[%08x]);\n",
- c,
- *printable,
- LabelToInt(on_not_equal));
- assembler_->CheckNotCharacter(c, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacterAfterAnd(
- unsigned c,
- unsigned mask,
- Label* on_equal) {
- PrintablePrinter printable(c);
- PrintF(" CheckCharacterAfterAnd(c=0x%04x%s, mask=0x%04x, label[%08x]);\n",
- c,
- *printable,
- mask,
- LabelToInt(on_equal));
- assembler_->CheckCharacterAfterAnd(c, mask, on_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotCharacterAfterAnd(
- unsigned c,
- unsigned mask,
- Label* on_not_equal) {
- PrintablePrinter printable(c);
- PrintF(" CheckNotCharacterAfterAnd(c=0x%04x%s, mask=0x%04x, label[%08x]);\n",
- c,
- *printable,
- mask,
- LabelToInt(on_not_equal));
- assembler_->CheckNotCharacterAfterAnd(c, mask, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- PrintF(" CheckNotCharacterAfterMinusAnd(c=0x%04x, minus=%04x, mask=0x%04x, "
- "label[%08x]);\n",
- c,
- minus,
- mask,
- LabelToInt(on_not_equal));
- assembler_->CheckNotCharacterAfterMinusAnd(c, minus, mask, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- PrintablePrinter printable_from(from);
- PrintablePrinter printable_to(to);
- PrintF(" CheckCharacterInRange(from=0x%04x%s, to=0x%04x%s, label[%08x]);\n",
- from,
- *printable_from,
- to,
- *printable_to,
- LabelToInt(on_not_in_range));
- assembler_->CheckCharacterInRange(from, to, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- PrintablePrinter printable_from(from);
- PrintablePrinter printable_to(to);
- PrintF(
- " CheckCharacterNotInRange(from=0x%04x%s," " to=%04x%s, label[%08x]);\n",
- from,
- *printable_from,
- to,
- *printable_to,
- LabelToInt(on_in_range));
- assembler_->CheckCharacterNotInRange(from, to, on_in_range);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckBitInTable(
- Handle<ByteArray> table, Label* on_bit_set) {
- PrintF(" CheckBitInTable(label[%08x] ", LabelToInt(on_bit_set));
- for (int i = 0; i < kTableSize; i++) {
- PrintF("%c", table->get(i) != 0 ? 'X' : '.');
- if (i % 32 == 31 && i != kTableMask) {
- PrintF("\n ");
- }
- }
- PrintF(");\n");
- assembler_->CheckBitInTable(table, on_bit_set);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
- Label* on_no_match) {
- PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg,
- LabelToInt(on_no_match));
- assembler_->CheckNotBackReference(start_reg, on_no_match);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, label[%08x]);\n",
- start_reg, LabelToInt(on_no_match));
- assembler_->CheckNotBackReferenceIgnoreCase(start_reg, on_no_match);
-}
-
-
-void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- PrintF(" %s(str=\"",
- check_end_of_string ? "CheckCharacters" : "CheckCharactersUnchecked");
- for (int i = 0; i < str.length(); i++) {
- PrintF("0x%04x", str[i]);
- }
- PrintF("\", cp_offset=%d, label[%08x])\n",
- cp_offset, LabelToInt(on_failure));
- assembler_->CheckCharacters(str, cp_offset, on_failure, check_end_of_string);
-}
-
-
-bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
- uc16 type,
- Label* on_no_match) {
- bool supported = assembler_->CheckSpecialCharacterClass(type,
- on_no_match);
- PrintF(" CheckSpecialCharacterClass(type='%c', label[%08x]): %s;\n",
- type,
- LabelToInt(on_no_match),
- supported ? "true" : "false");
- return supported;
-}
-
-
-void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index,
- int comparand, Label* if_lt) {
- PrintF(" IfRegisterLT(register=%d, number=%d, label[%08x]);\n",
- register_index, comparand, LabelToInt(if_lt));
- assembler_->IfRegisterLT(register_index, comparand, if_lt);
-}
-
-
-void RegExpMacroAssemblerTracer::IfRegisterEqPos(int register_index,
- Label* if_eq) {
- PrintF(" IfRegisterEqPos(register=%d, label[%08x]);\n",
- register_index, LabelToInt(if_eq));
- assembler_->IfRegisterEqPos(register_index, if_eq);
-}
-
-
-void RegExpMacroAssemblerTracer::IfRegisterGE(int register_index,
- int comparand, Label* if_ge) {
- PrintF(" IfRegisterGE(register=%d, number=%d, label[%08x]);\n",
- register_index, comparand, LabelToInt(if_ge));
- assembler_->IfRegisterGE(register_index, comparand, if_ge);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerTracer::Implementation() {
- return assembler_->Implementation();
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerTracer::GetCode(Handle<String> source) {
- PrintF(" GetCode(%s);\n", *(source->ToCString()));
- return assembler_->GetCode(source);
-}
-
-}} // namespace v8::internal
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h b/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h
deleted file mode 100644
index ac262df..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
-
-namespace v8 {
-namespace internal {
-
-// Decorator on a RegExpMacroAssembler that write all calls.
-class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
- public:
- explicit RegExpMacroAssemblerTracer(RegExpMacroAssembler* assembler);
- virtual ~RegExpMacroAssemblerTracer();
- virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); }
- virtual bool CanReadUnaligned() { return assembler_->CanReadUnaligned(); }
- virtual void AdvanceCurrentPosition(int by); // Signed cp change.
- virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 and_with,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- private:
- RegExpMacroAssembler* assembler_;
-};
-
-}} // namespace v8::internal
-
-#endif // V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler.cc b/src/3rdparty/v8/src/regexp-macro-assembler.cc
deleted file mode 100644
index 3ebf5a8..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler.cc
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "ast.h"
-#include "assembler.h"
-#include "regexp-stack.h"
-#include "regexp-macro-assembler.h"
-#include "simulator.h"
-
-namespace v8 {
-namespace internal {
-
-RegExpMacroAssembler::RegExpMacroAssembler(Zone* zone)
- : slow_safe_compiler_(false),
- global_mode_(NOT_GLOBAL),
- zone_(zone) {
-}
-
-
-RegExpMacroAssembler::~RegExpMacroAssembler() {
-}
-
-
-bool RegExpMacroAssembler::CanReadUnaligned() {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- return true;
-#else
- return false;
-#endif
-}
-
-
-#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
-
-NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Zone* zone)
- : RegExpMacroAssembler(zone) {
-}
-
-
-NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
-}
-
-
-bool NativeRegExpMacroAssembler::CanReadUnaligned() {
- return FLAG_enable_unaligned_accesses && !slow_safe();
-}
-
-const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
- String* subject,
- int start_index) {
- // Not just flat, but ultra flat.
- ASSERT(subject->IsExternalString() || subject->IsSeqString());
- ASSERT(start_index >= 0);
- ASSERT(start_index <= subject->length());
- if (subject->IsOneByteRepresentation()) {
- const byte* address;
- if (StringShape(subject).IsExternal()) {
- const uint8_t* data = ExternalAsciiString::cast(subject)->GetChars();
- address = reinterpret_cast<const byte*>(data);
- } else {
- ASSERT(subject->IsSeqOneByteString());
- const uint8_t* data = SeqOneByteString::cast(subject)->GetChars();
- address = reinterpret_cast<const byte*>(data);
- }
- return address + start_index;
- }
- const uc16* data;
- if (StringShape(subject).IsExternal()) {
- data = ExternalTwoByteString::cast(subject)->GetChars();
- } else {
- ASSERT(subject->IsSeqTwoByteString());
- data = SeqTwoByteString::cast(subject)->GetChars();
- }
- return reinterpret_cast<const byte*>(data + start_index);
-}
-
-
-NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
- Handle<Code> regexp_code,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate) {
-
- ASSERT(subject->IsFlat());
- ASSERT(previous_index >= 0);
- ASSERT(previous_index <= subject->length());
-
- // No allocations before calling the regexp, but we can't use
- // AssertNoAllocation, since regexps might be preempted, and another thread
- // might do allocation anyway.
-
- String* subject_ptr = *subject;
- // Character offsets into string.
- int start_offset = previous_index;
- int char_length = subject_ptr->length() - start_offset;
- int slice_offset = 0;
-
- // The string has been flattened, so if it is a cons string it contains the
- // full string in the first part.
- if (StringShape(subject_ptr).IsCons()) {
- ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length());
- subject_ptr = ConsString::cast(subject_ptr)->first();
- } else if (StringShape(subject_ptr).IsSliced()) {
- SlicedString* slice = SlicedString::cast(subject_ptr);
- subject_ptr = slice->parent();
- slice_offset = slice->offset();
- }
- // Ensure that an underlying string has the same ASCII-ness.
- bool is_ascii = subject_ptr->IsOneByteRepresentation();
- ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
- // String is now either Sequential or External
- int char_size_shift = is_ascii ? 0 : 1;
-
- const byte* input_start =
- StringCharacterPosition(subject_ptr, start_offset + slice_offset);
- int byte_length = char_length << char_size_shift;
- const byte* input_end = input_start + byte_length;
- Result res = Execute(*regexp_code,
- *subject,
- start_offset,
- input_start,
- input_end,
- offsets_vector,
- offsets_vector_length,
- isolate);
- return res;
-}
-
-
-NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
- Code* code,
- String* input, // This needs to be the unpacked (sliced, cons) string.
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int output_size,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- // Ensure that the minimum stack has been allocated.
- RegExpStackScope stack_scope(isolate);
- Address stack_base = stack_scope.stack()->stack_base();
-
- int direct_call = 0;
- int result = CALL_GENERATED_REGEXP_CODE(code->entry(),
- input,
- start_offset,
- input_start,
- input_end,
- output,
- output_size,
- stack_base,
- direct_call,
- isolate);
- ASSERT(result >= RETRY);
-
- if (result == EXCEPTION && !isolate->has_pending_exception()) {
- // We detected a stack overflow (on the backtrack stack) in RegExp code,
- // but haven't created the exception yet.
- isolate->StackOverflow();
- }
- return static_cast<Result>(result);
-}
-
-
-const byte NativeRegExpMacroAssembler::word_character_map[] = {
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
-
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // '0' - '7'
- 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // '8' - '9'
-
- 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'A' - 'G'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'H' - 'O'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'P' - 'W'
- 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0xffu, // 'X' - 'Z', '_'
-
- 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'a' - 'g'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'h' - 'o'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'p' - 'w'
- 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
- // Latin-1 range
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
-
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
-
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
-
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
-};
-
-
-int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
- Address byte_offset1,
- Address byte_offset2,
- size_t byte_length,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
- isolate->regexp_macro_assembler_canonicalize();
- // This function is not allowed to cause a garbage collection.
- // A GC might move the calling generated code and invalidate the
- // return address on the stack.
- ASSERT(byte_length % 2 == 0);
- uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
- uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
- size_t length = byte_length >> 1;
-
- for (size_t i = 0; i < length; i++) {
- unibrow::uchar c1 = substring1[i];
- unibrow::uchar c2 = substring2[i];
- if (c1 != c2) {
- unibrow::uchar s1[1] = { c1 };
- canonicalize->get(c1, '\0', s1);
- if (s1[0] != c2) {
- unibrow::uchar s2[1] = { c2 };
- canonicalize->get(c2, '\0', s2);
- if (s1[0] != s2[0]) {
- return 0;
- }
- }
- }
- }
- return 1;
-}
-
-
-Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
- Address* stack_base,
- Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- RegExpStack* regexp_stack = isolate->regexp_stack();
- size_t size = regexp_stack->stack_capacity();
- Address old_stack_base = regexp_stack->stack_base();
- ASSERT(old_stack_base == *stack_base);
- ASSERT(stack_pointer <= old_stack_base);
- ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
- Address new_stack_base = regexp_stack->EnsureCapacity(size * 2);
- if (new_stack_base == NULL) {
- return NULL;
- }
- *stack_base = new_stack_base;
- intptr_t stack_content_size = old_stack_base - stack_pointer;
- return new_stack_base - stack_content_size;
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler.h b/src/3rdparty/v8/src/regexp-macro-assembler.h
deleted file mode 100644
index 211ab6b..0000000
--- a/src/3rdparty/v8/src/regexp-macro-assembler.h
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_H_
-
-#include "ast.h"
-
-namespace v8 {
-namespace internal {
-
-struct DisjunctDecisionRow {
- RegExpCharacterClass cc;
- Label* on_match;
-};
-
-
-class RegExpMacroAssembler {
- public:
- // The implementation must be able to handle at least:
- static const int kMaxRegister = (1 << 16) - 1;
- static const int kMaxCPOffset = (1 << 15) - 1;
- static const int kMinCPOffset = -(1 << 15);
-
- static const int kTableSizeBits = 7;
- static const int kTableSize = 1 << kTableSizeBits;
- static const int kTableMask = kTableSize - 1;
-
- enum IrregexpImplementation {
- kIA32Implementation,
- kARMImplementation,
- kMIPSImplementation,
- kX64Implementation,
- kBytecodeImplementation
- };
-
- enum StackCheckFlag {
- kNoStackLimitCheck = false,
- kCheckStackLimit = true
- };
-
- explicit RegExpMacroAssembler(Zone* zone);
- virtual ~RegExpMacroAssembler();
- // The maximal number of pushes between stack checks. Users must supply
- // kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
- // at least once for every stack_limit() pushes that are executed.
- virtual int stack_limit_slack() = 0;
- virtual bool CanReadUnaligned();
- virtual void AdvanceCurrentPosition(int by) = 0; // Signed cp change.
- virtual void AdvanceRegister(int reg, int by) = 0; // r[reg] += by.
- // Continues execution from the position pushed on the top of the backtrack
- // stack by an earlier PushBacktrack(Label*).
- virtual void Backtrack() = 0;
- virtual void Bind(Label* label) = 0;
- virtual void CheckAtStart(Label* on_at_start) = 0;
- // Dispatch after looking the current character up in a 2-bits-per-entry
- // map. The destinations vector has up to 4 labels.
- virtual void CheckCharacter(unsigned c, Label* on_equal) = 0;
- // Bitwise and the current character with the given constant and then
- // check for a match with c.
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_equal) = 0;
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
- virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
- // Check the current character for a match with a literal string. If we
- // fail to match then goto the on_failure label. If check_eos is set then
- // the end of input always fails. If check_eos is clear then it is the
- // caller's responsibility to ensure that the end of string is not hit.
- // If the label is NULL then we should pop a backtrack address off
- // the stack and go to that.
- virtual void CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_eos) = 0;
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position) = 0;
- virtual void CheckNotAtStart(Label* on_not_at_start) = 0;
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match) = 0;
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match) = 0;
- // Check the current character for a match with a literal character. If we
- // fail to match then goto the on_failure label. End of input always
- // matches. If the label is NULL then we should pop a backtrack address off
- // the stack and go to that.
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal) = 0;
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_not_equal) = 0;
- // Subtract a constant from the current character, then and with the given
- // constant and then check for a match with c.
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 and_with,
- Label* on_not_equal) = 0;
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to, // Both inclusive.
- Label* on_in_range) = 0;
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to, // Both inclusive.
- Label* on_not_in_range) = 0;
-
- // The current character (modulus the kTableSize) is looked up in the byte
- // array, and if the found byte is non-zero, we jump to the on_bit_set label.
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set) = 0;
-
- // Checks whether the given offset from the current position is before
- // the end of the string. May overwrite the current character.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input) {
- LoadCurrentCharacter(cp_offset, on_outside_input, true);
- }
- // Check whether a standard/default character class matches the current
- // character. Returns false if the type of special character class does
- // not have custom support.
- // May clobber the current loaded character.
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- return false;
- }
- virtual void Fail() = 0;
- virtual Handle<HeapObject> GetCode(Handle<String> source) = 0;
- virtual void GoTo(Label* label) = 0;
- // Check whether a register is >= a given constant and go to a label if it
- // is. Backtracks instead if the label is NULL.
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge) = 0;
- // Check whether a register is < a given constant and go to a label if it is.
- // Backtracks instead if the label is NULL.
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt) = 0;
- // Check whether a register is == to the current position and go to a
- // label if it is.
- virtual void IfRegisterEqPos(int reg, Label* if_eq) = 0;
- virtual IrregexpImplementation Implementation() = 0;
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1) = 0;
- virtual void PopCurrentPosition() = 0;
- virtual void PopRegister(int register_index) = 0;
- // Pushes the label on the backtrack stack, so that a following Backtrack
- // will go to this label. Always checks the backtrack stack limit.
- virtual void PushBacktrack(Label* label) = 0;
- virtual void PushCurrentPosition() = 0;
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit) = 0;
- virtual void ReadCurrentPositionFromRegister(int reg) = 0;
- virtual void ReadStackPointerFromRegister(int reg) = 0;
- virtual void SetCurrentPositionFromEnd(int by) = 0;
- virtual void SetRegister(int register_index, int to) = 0;
- // Return whether the matching (with a global regexp) will be restarted.
- virtual bool Succeed() = 0;
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;
- virtual void ClearRegisters(int reg_from, int reg_to) = 0;
- virtual void WriteStackPointerToRegister(int reg) = 0;
-
- // Controls the generation of large inlined constants in the code.
- void set_slow_safe(bool ssc) { slow_safe_compiler_ = ssc; }
- bool slow_safe() { return slow_safe_compiler_; }
-
- enum GlobalMode { NOT_GLOBAL, GLOBAL, GLOBAL_NO_ZERO_LENGTH_CHECK };
- // Set whether the regular expression has the global flag. Exiting due to
- // a failure in a global regexp may still mean success overall.
- inline void set_global_mode(GlobalMode mode) { global_mode_ = mode; }
- inline bool global() { return global_mode_ != NOT_GLOBAL; }
- inline bool global_with_zero_length_check() {
- return global_mode_ == GLOBAL;
- }
-
- Zone* zone() const { return zone_; }
-
- private:
- bool slow_safe_compiler_;
- bool global_mode_;
- Zone* zone_;
-};
-
-
-#ifndef V8_INTERPRETED_REGEXP // Avoid compiling unused code.
-
-class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
- public:
- // Type of input string to generate code for.
- enum Mode { ASCII = 1, UC16 = 2 };
-
- // Result of calling generated native RegExp code.
- // RETRY: Something significant changed during execution, and the matching
- // should be retried from scratch.
- // EXCEPTION: Something failed during execution. If no exception has been
- // thrown, it's an internal out-of-memory, and the caller should
- // throw the exception.
- // FAILURE: Matching failed.
- // SUCCESS: Matching succeeded, and the output array has been filled with
- // capture positions.
- enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
-
- explicit NativeRegExpMacroAssembler(Zone* zone);
- virtual ~NativeRegExpMacroAssembler();
- virtual bool CanReadUnaligned();
-
- static Result Match(Handle<Code> regexp,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate);
-
- // Compares two-byte strings case insensitively.
- // Called from generated RegExp code.
- static int CaseInsensitiveCompareUC16(Address byte_offset1,
- Address byte_offset2,
- size_t byte_length,
- Isolate* isolate);
-
- // Called from RegExp if the backtrack stack limit is hit.
- // Tries to expand the stack. Returns the new stack-pointer if
- // successful, and updates the stack_top address, or returns 0 if unable
- // to grow the stack.
- // This function must not trigger a garbage collection.
- static Address GrowStack(Address stack_pointer, Address* stack_top,
- Isolate* isolate);
-
- static const byte* StringCharacterPosition(String* subject, int start_index);
-
- // Byte map of one byte characters with a 0xff if the character is a word
- // character (digit, letter or underscore) and 0x00 otherwise.
- // Used by generated RegExp code.
- static const byte word_character_map[256];
-
- static Address word_character_map_address() {
- return const_cast<Address>(&word_character_map[0]);
- }
-
- static Result Execute(Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int output_size,
- Isolate* isolate);
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-} } // namespace v8::internal
-
-#endif // V8_REGEXP_MACRO_ASSEMBLER_H_
diff --git a/src/3rdparty/v8/src/regexp-stack.cc b/src/3rdparty/v8/src/regexp-stack.cc
deleted file mode 100644
index 325a149..0000000
--- a/src/3rdparty/v8/src/regexp-stack.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "regexp-stack.h"
-
-namespace v8 {
-namespace internal {
-
-RegExpStackScope::RegExpStackScope(Isolate* isolate)
- : regexp_stack_(isolate->regexp_stack()) {
- // Initialize, if not already initialized.
- regexp_stack_->EnsureCapacity(0);
-}
-
-
-RegExpStackScope::~RegExpStackScope() {
- ASSERT(Isolate::Current() == regexp_stack_->isolate_);
- // Reset the buffer if it has grown.
- regexp_stack_->Reset();
-}
-
-
-RegExpStack::RegExpStack()
- : isolate_(NULL) {
-}
-
-
-RegExpStack::~RegExpStack() {
- thread_local_.Free();
-}
-
-
-char* RegExpStack::ArchiveStack(char* to) {
- size_t size = sizeof(thread_local_);
- memcpy(reinterpret_cast<void*>(to),
- &thread_local_,
- size);
- thread_local_ = ThreadLocal();
- return to + size;
-}
-
-
-char* RegExpStack::RestoreStack(char* from) {
- size_t size = sizeof(thread_local_);
- memcpy(&thread_local_, reinterpret_cast<void*>(from), size);
- return from + size;
-}
-
-
-void RegExpStack::Reset() {
- if (thread_local_.memory_size_ > kMinimumStackSize) {
- DeleteArray(thread_local_.memory_);
- thread_local_ = ThreadLocal();
- }
-}
-
-
-void RegExpStack::ThreadLocal::Free() {
- if (memory_size_ > 0) {
- DeleteArray(memory_);
- Clear();
- }
-}
-
-
-Address RegExpStack::EnsureCapacity(size_t size) {
- if (size > kMaximumStackSize) return NULL;
- if (size < kMinimumStackSize) size = kMinimumStackSize;
- if (thread_local_.memory_size_ < size) {
- Address new_memory = NewArray<byte>(static_cast<int>(size));
- if (thread_local_.memory_size_ > 0) {
- // Copy original memory into top of new memory.
- memcpy(reinterpret_cast<void*>(
- new_memory + size - thread_local_.memory_size_),
- reinterpret_cast<void*>(thread_local_.memory_),
- thread_local_.memory_size_);
- DeleteArray(thread_local_.memory_);
- }
- thread_local_.memory_ = new_memory;
- thread_local_.memory_size_ = size;
- thread_local_.limit_ = new_memory + kStackLimitSlack * kPointerSize;
- }
- return thread_local_.memory_ + thread_local_.memory_size_;
-}
-
-
-}} // namespace v8::internal
diff --git a/src/3rdparty/v8/src/regexp-stack.h b/src/3rdparty/v8/src/regexp-stack.h
deleted file mode 100644
index 5684239..0000000
--- a/src/3rdparty/v8/src/regexp-stack.h
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGEXP_STACK_H_
-#define V8_REGEXP_STACK_H_
-
-namespace v8 {
-namespace internal {
-
-class RegExpStack;
-
-// Maintains a per-v8thread stack area that can be used by irregexp
-// implementation for its backtracking stack.
-// Since there is only one stack area, the Irregexp implementation is not
-// re-entrant. I.e., no regular expressions may be executed in the same thread
-// during a preempted Irregexp execution.
-class RegExpStackScope {
- public:
- // Create and delete an instance to control the life-time of a growing stack.
-
- // Initializes the stack memory area if necessary.
- explicit RegExpStackScope(Isolate* isolate);
- ~RegExpStackScope(); // Releases the stack if it has grown.
-
- RegExpStack* stack() const { return regexp_stack_; }
-
- private:
- RegExpStack* regexp_stack_;
-
- DISALLOW_COPY_AND_ASSIGN(RegExpStackScope);
-};
-
-
-class RegExpStack {
- public:
- // Number of allocated locations on the stack below the limit.
- // No sequence of pushes must be longer that this without doing a stack-limit
- // check.
- static const int kStackLimitSlack = 32;
-
- // Gives the top of the memory used as stack.
- Address stack_base() {
- ASSERT(thread_local_.memory_size_ != 0);
- return thread_local_.memory_ + thread_local_.memory_size_;
- }
-
- // The total size of the memory allocated for the stack.
- size_t stack_capacity() { return thread_local_.memory_size_; }
-
- // If the stack pointer gets below the limit, we should react and
- // either grow the stack or report an out-of-stack exception.
- // There is only a limited number of locations below the stack limit,
- // so users of the stack should check the stack limit during any
- // sequence of pushes longer that this.
- Address* limit_address() { return &(thread_local_.limit_); }
-
- // Ensures that there is a memory area with at least the specified size.
- // If passing zero, the default/minimum size buffer is allocated.
- Address EnsureCapacity(size_t size);
-
- // Thread local archiving.
- static int ArchiveSpacePerThread() {
- return static_cast<int>(sizeof(ThreadLocal));
- }
- char* ArchiveStack(char* to);
- char* RestoreStack(char* from);
- void FreeThreadResources() { thread_local_.Free(); }
-
- private:
- RegExpStack();
- ~RegExpStack();
-
- // Artificial limit used when no memory has been allocated.
- static const uintptr_t kMemoryTop = static_cast<uintptr_t>(-1);
-
- // Minimal size of allocated stack area.
- static const size_t kMinimumStackSize = 1 * KB;
-
- // Maximal size of allocated stack area.
- static const size_t kMaximumStackSize = 64 * MB;
-
- // Structure holding the allocated memory, size and limit.
- struct ThreadLocal {
- ThreadLocal() { Clear(); }
- // If memory_size_ > 0 then memory_ must be non-NULL.
- Address memory_;
- size_t memory_size_;
- Address limit_;
- void Clear() {
- memory_ = NULL;
- memory_size_ = 0;
- limit_ = reinterpret_cast<Address>(kMemoryTop);
- }
- void Free();
- };
-
- // Address of allocated memory.
- Address memory_address() {
- return reinterpret_cast<Address>(&thread_local_.memory_);
- }
-
- // Address of size of allocated memory.
- Address memory_size_address() {
- return reinterpret_cast<Address>(&thread_local_.memory_size_);
- }
-
- // Resets the buffer if it has grown beyond the default/minimum size.
- // After this, the buffer is either the default size, or it is empty, so
- // you have to call EnsureCapacity before using it again.
- void Reset();
-
- ThreadLocal thread_local_;
- Isolate* isolate_;
-
- friend class ExternalReference;
- friend class Isolate;
- friend class RegExpStackScope;
-
- DISALLOW_COPY_AND_ASSIGN(RegExpStack);
-};
-
-}} // namespace v8::internal
-
-#endif // V8_REGEXP_STACK_H_
diff --git a/src/3rdparty/v8/src/regexp.js b/src/3rdparty/v8/src/regexp.js
deleted file mode 100644
index 2349ca7..0000000
--- a/src/3rdparty/v8/src/regexp.js
+++ /dev/null
@@ -1,481 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Expect $Object = global.Object;
-// Expect $Array = global.Array;
-
-var $RegExp = global.RegExp;
-
-// A recursive descent parser for Patterns according to the grammar of
-// ECMA-262 15.10.1, with deviations noted below.
-function DoConstructRegExp(object, pattern, flags) {
- // RegExp : Called as constructor; see ECMA-262, section 15.10.4.
- if (IS_REGEXP(pattern)) {
- if (!IS_UNDEFINED(flags)) {
- throw MakeTypeError('regexp_flags', []);
- }
- flags = (pattern.global ? 'g' : '')
- + (pattern.ignoreCase ? 'i' : '')
- + (pattern.multiline ? 'm' : '');
- pattern = pattern.source;
- }
-
- pattern = IS_UNDEFINED(pattern) ? '' : ToString(pattern);
- flags = IS_UNDEFINED(flags) ? '' : ToString(flags);
-
- var global = false;
- var ignoreCase = false;
- var multiline = false;
- for (var i = 0; i < flags.length; i++) {
- var c = %_CallFunction(flags, i, StringCharAt);
- switch (c) {
- case 'g':
- if (global) {
- throw MakeSyntaxError("invalid_regexp_flags", [flags]);
- }
- global = true;
- break;
- case 'i':
- if (ignoreCase) {
- throw MakeSyntaxError("invalid_regexp_flags", [flags]);
- }
- ignoreCase = true;
- break;
- case 'm':
- if (multiline) {
- throw MakeSyntaxError("invalid_regexp_flags", [flags]);
- }
- multiline = true;
- break;
- default:
- throw MakeSyntaxError("invalid_regexp_flags", [flags]);
- }
- }
-
- %RegExpInitializeObject(object, pattern, global, ignoreCase, multiline);
-
- // Call internal function to compile the pattern.
- %RegExpCompile(object, pattern, flags);
-}
-
-
-function RegExpConstructor(pattern, flags) {
- if (%_IsConstructCall()) {
- DoConstructRegExp(this, pattern, flags);
- } else {
- // RegExp : Called as function; see ECMA-262, section 15.10.3.1.
- if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) {
- return pattern;
- }
- return new $RegExp(pattern, flags);
- }
-}
-
-// Deprecated RegExp.prototype.compile method. We behave like the constructor
-// were called again. In SpiderMonkey, this method returns the regexp object.
-// In JSC, it returns undefined. For compatibility with JSC, we match their
-// behavior.
-function RegExpCompile(pattern, flags) {
- // Both JSC and SpiderMonkey treat a missing pattern argument as the
- // empty subject string, and an actual undefined value passed as the
- // pattern as the string 'undefined'. Note that JSC is inconsistent
- // here, treating undefined values differently in
- // RegExp.prototype.compile and in the constructor, where they are
- // the empty string. For compatibility with JSC, we match their
- // behavior.
- if (this == $RegExp.prototype) {
- // We don't allow recompiling RegExp.prototype.
- throw MakeTypeError('incompatible_method_receiver',
- ['RegExp.prototype.compile', this]);
- }
- if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
- DoConstructRegExp(this, 'undefined', flags);
- } else {
- DoConstructRegExp(this, pattern, flags);
- }
-}
-
-
-function DoRegExpExec(regexp, string, index) {
- var result = %_RegExpExec(regexp, string, index, lastMatchInfo);
- if (result !== null) lastMatchInfoOverride = null;
- return result;
-}
-
-
-function BuildResultFromMatchInfo(lastMatchInfo, s) {
- var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
- var start = lastMatchInfo[CAPTURE0];
- var end = lastMatchInfo[CAPTURE1];
- var result = %_RegExpConstructResult(numResults, start, s);
- result[0] = %_SubString(s, start, end);
- var j = REGEXP_FIRST_CAPTURE + 2;
- for (var i = 1; i < numResults; i++) {
- start = lastMatchInfo[j++];
- if (start != -1) {
- end = lastMatchInfo[j];
- result[i] = %_SubString(s, start, end);
- }
- j++;
- }
- return result;
-}
-
-
-function RegExpExecNoTests(regexp, string, start) {
- // Must be called with RegExp, string and positive integer as arguments.
- var matchInfo = %_RegExpExec(regexp, string, start, lastMatchInfo);
- if (matchInfo !== null) {
- lastMatchInfoOverride = null;
- return BuildResultFromMatchInfo(matchInfo, string);
- }
- regexp.lastIndex = 0;
- return null;
-}
-
-
-function RegExpExec(string) {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['RegExp.prototype.exec', this]);
- }
-
- string = TO_STRING_INLINE(string);
- var lastIndex = this.lastIndex;
-
- // Conversion is required by the ES5 specification (RegExp.prototype.exec
- // algorithm, step 5) even if the value is discarded for non-global RegExps.
- var i = TO_INTEGER(lastIndex);
-
- var global = this.global;
- if (global) {
- if (i < 0 || i > string.length) {
- this.lastIndex = 0;
- return null;
- }
- } else {
- i = 0;
- }
-
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
- // matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
-
- if (matchIndices === null) {
- this.lastIndex = 0;
- return null;
- }
-
- // Successful match.
- lastMatchInfoOverride = null;
- if (global) {
- this.lastIndex = lastMatchInfo[CAPTURE1];
- }
- return BuildResultFromMatchInfo(matchIndices, string);
-}
-
-
-// One-element cache for the simplified test regexp.
-var regexp_key;
-var regexp_val;
-
-// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
-// that test is defined in terms of String.prototype.exec. However, it probably
-// means the original value of String.prototype.exec, which is what everybody
-// else implements.
-function RegExpTest(string) {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['RegExp.prototype.test', this]);
- }
- string = TO_STRING_INLINE(string);
-
- var lastIndex = this.lastIndex;
-
- // Conversion is required by the ES5 specification (RegExp.prototype.exec
- // algorithm, step 5) even if the value is discarded for non-global RegExps.
- var i = TO_INTEGER(lastIndex);
-
- if (this.global) {
- if (i < 0 || i > string.length) {
- this.lastIndex = 0;
- return false;
- }
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
- // matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
- if (matchIndices === null) {
- this.lastIndex = 0;
- return false;
- }
- lastMatchInfoOverride = null;
- this.lastIndex = lastMatchInfo[CAPTURE1];
- return true;
- } else {
- // Non-global regexp.
- // Remove irrelevant preceeding '.*' in a non-global test regexp.
- // The expression checks whether this.source starts with '.*' and
- // that the third char is not a '?'.
- var regexp = this;
- if (%_StringCharCodeAt(regexp.source, 0) == 46 && // '.'
- %_StringCharCodeAt(regexp.source, 1) == 42 && // '*'
- %_StringCharCodeAt(regexp.source, 2) != 63) { // '?'
- regexp = TrimRegExp(regexp);
- }
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
- // matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
- if (matchIndices === null) {
- this.lastIndex = 0;
- return false;
- }
- lastMatchInfoOverride = null;
- return true;
- }
-}
-
-function TrimRegExp(regexp) {
- if (!%_ObjectEquals(regexp_key, regexp)) {
- regexp_key = regexp;
- regexp_val =
- new $RegExp(%_SubString(regexp.source, 2, regexp.source.length),
- (regexp.ignoreCase ? regexp.multiline ? "im" : "i"
- : regexp.multiline ? "m" : ""));
- }
- return regexp_val;
-}
-
-
-function RegExpToString() {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['RegExp.prototype.toString', this]);
- }
- var result = '/' + this.source + '/';
- if (this.global) result += 'g';
- if (this.ignoreCase) result += 'i';
- if (this.multiline) result += 'm';
- return result;
-}
-
-
-// Getters for the static properties lastMatch, lastParen, leftContext, and
-// rightContext of the RegExp constructor. The properties are computed based
-// on the captures array of the last successful match and the subject string
-// of the last successful match.
-function RegExpGetLastMatch() {
- if (lastMatchInfoOverride !== null) {
- return OVERRIDE_MATCH(lastMatchInfoOverride);
- }
- var regExpSubject = LAST_SUBJECT(lastMatchInfo);
- return %_SubString(regExpSubject,
- lastMatchInfo[CAPTURE0],
- lastMatchInfo[CAPTURE1]);
-}
-
-
-function RegExpGetLastParen() {
- if (lastMatchInfoOverride) {
- var override = lastMatchInfoOverride;
- if (override.length <= 3) return '';
- return override[override.length - 3];
- }
- var length = NUMBER_OF_CAPTURES(lastMatchInfo);
- if (length <= 2) return ''; // There were no captures.
- // We match the SpiderMonkey behavior: return the substring defined by the
- // last pair (after the first pair) of elements of the capture array even if
- // it is empty.
- var regExpSubject = LAST_SUBJECT(lastMatchInfo);
- var start = lastMatchInfo[CAPTURE(length - 2)];
- var end = lastMatchInfo[CAPTURE(length - 1)];
- if (start != -1 && end != -1) {
- return %_SubString(regExpSubject, start, end);
- }
- return "";
-}
-
-
-function RegExpGetLeftContext() {
- var start_index;
- var subject;
- if (!lastMatchInfoOverride) {
- start_index = lastMatchInfo[CAPTURE0];
- subject = LAST_SUBJECT(lastMatchInfo);
- } else {
- var override = lastMatchInfoOverride;
- start_index = OVERRIDE_POS(override);
- subject = OVERRIDE_SUBJECT(override);
- }
- return %_SubString(subject, 0, start_index);
-}
-
-
-function RegExpGetRightContext() {
- var start_index;
- var subject;
- if (!lastMatchInfoOverride) {
- start_index = lastMatchInfo[CAPTURE1];
- subject = LAST_SUBJECT(lastMatchInfo);
- } else {
- var override = lastMatchInfoOverride;
- subject = OVERRIDE_SUBJECT(override);
- var match = OVERRIDE_MATCH(override);
- start_index = OVERRIDE_POS(override) + match.length;
- }
- return %_SubString(subject, start_index, subject.length);
-}
-
-
-// The properties $1..$9 are the first nine capturing substrings of the last
-// successful match, or ''. The function RegExpMakeCaptureGetter will be
-// called with indices from 1 to 9.
-function RegExpMakeCaptureGetter(n) {
- return function() {
- if (lastMatchInfoOverride) {
- if (n < lastMatchInfoOverride.length - 2) {
- return OVERRIDE_CAPTURE(lastMatchInfoOverride, n);
- }
- return '';
- }
- var index = n * 2;
- if (index >= NUMBER_OF_CAPTURES(lastMatchInfo)) return '';
- var matchStart = lastMatchInfo[CAPTURE(index)];
- var matchEnd = lastMatchInfo[CAPTURE(index + 1)];
- if (matchStart == -1 || matchEnd == -1) return '';
- return %_SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
- };
-}
-
-
-// Property of the builtins object for recording the result of the last
-// regexp match. The property lastMatchInfo includes the matchIndices
-// array of the last successful regexp match (an array of start/end index
-// pairs for the match and all the captured substrings), the invariant is
-// that there are at least two capture indeces. The array also contains
-// the subject string for the last successful match.
-var lastMatchInfo = new InternalPackedArray(
- 2, // REGEXP_NUMBER_OF_CAPTURES
- "", // Last subject.
- void 0, // Last input - settable with RegExpSetInput.
- 0, // REGEXP_FIRST_CAPTURE + 0
- 0 // REGEXP_FIRST_CAPTURE + 1
-);
-
-// Override last match info with an array of actual substrings.
-// Used internally by replace regexp with function.
-// The array has the format of an "apply" argument for a replacement
-// function.
-var lastMatchInfoOverride = null;
-
-// -------------------------------------------------------------------
-
-function SetUpRegExp() {
- %CheckIsBootstrapping();
- %FunctionSetInstanceClassName($RegExp, 'RegExp');
- %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
- %SetCode($RegExp, RegExpConstructor);
-
- InstallFunctions($RegExp.prototype, DONT_ENUM, $Array(
- "exec", RegExpExec,
- "test", RegExpTest,
- "toString", RegExpToString,
- "compile", RegExpCompile
- ));
-
- // The length of compile is 1 in SpiderMonkey.
- %FunctionSetLength($RegExp.prototype.compile, 1);
-
- // The properties input, $input, and $_ are aliases for each other. When this
- // value is set the value it is set to is coerced to a string.
- // Getter and setter for the input.
- var RegExpGetInput = function() {
- var regExpInput = LAST_INPUT(lastMatchInfo);
- return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
- };
- var RegExpSetInput = function(string) {
- LAST_INPUT(lastMatchInfo) = ToString(string);
- };
-
- %OptimizeObjectForAddingMultipleProperties($RegExp, 22);
- %DefineOrRedefineAccessorProperty($RegExp, 'input', RegExpGetInput,
- RegExpSetInput, DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, '$_', RegExpGetInput,
- RegExpSetInput, DONT_ENUM | DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, '$input', RegExpGetInput,
- RegExpSetInput, DONT_ENUM | DONT_DELETE);
-
- // The properties multiline and $* are aliases for each other. When this
- // value is set in SpiderMonkey, the value it is set to is coerced to a
- // boolean. We mimic that behavior with a slight difference: in SpiderMonkey
- // the value of the expression 'RegExp.multiline = null' (for instance) is the
- // boolean false (i.e., the value after coercion), while in V8 it is the value
- // null (i.e., the value before coercion).
-
- // Getter and setter for multiline.
- var multiline = false;
- var RegExpGetMultiline = function() { return multiline; };
- var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; };
-
- %DefineOrRedefineAccessorProperty($RegExp, 'multiline', RegExpGetMultiline,
- RegExpSetMultiline, DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, '$*', RegExpGetMultiline,
- RegExpSetMultiline,
- DONT_ENUM | DONT_DELETE);
-
-
- var NoOpSetter = function(ignored) {};
-
-
- // Static properties set by a successful match.
- %DefineOrRedefineAccessorProperty($RegExp, 'lastMatch', RegExpGetLastMatch,
- NoOpSetter, DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, '$&', RegExpGetLastMatch,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, 'lastParen', RegExpGetLastParen,
- NoOpSetter, DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, '$+', RegExpGetLastParen,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, 'leftContext',
- RegExpGetLeftContext, NoOpSetter,
- DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, '$`', RegExpGetLeftContext,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, 'rightContext',
- RegExpGetRightContext, NoOpSetter,
- DONT_DELETE);
- %DefineOrRedefineAccessorProperty($RegExp, "$'", RegExpGetRightContext,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
-
- for (var i = 1; i < 10; ++i) {
- %DefineOrRedefineAccessorProperty($RegExp, '$' + i,
- RegExpMakeCaptureGetter(i), NoOpSetter,
- DONT_DELETE);
- }
- %ToFastProperties($RegExp);
-}
-
-SetUpRegExp();
diff --git a/src/3rdparty/v8/src/rewriter.cc b/src/3rdparty/v8/src/rewriter.cc
deleted file mode 100644
index 44fe050..0000000
--- a/src/3rdparty/v8/src/rewriter.cc
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "rewriter.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-class Processor: public AstVisitor {
- public:
- Processor(Variable* result, Zone* zone)
- : result_(result),
- result_assigned_(false),
- is_set_(false),
- in_try_(false),
- factory_(Isolate::Current(), zone) {
- InitializeAstVisitor();
- }
-
- virtual ~Processor() { }
-
- void Process(ZoneList<Statement*>* statements);
- bool result_assigned() const { return result_assigned_; }
-
- AstNodeFactory<AstNullVisitor>* factory() {
- return &factory_;
- }
-
- private:
- Variable* result_;
-
- // We are not tracking result usage via the result_'s use
- // counts (we leave the accurate computation to the
- // usage analyzer). Instead we simple remember if
- // there was ever an assignment to result_.
- bool result_assigned_;
-
- // To avoid storing to .result all the time, we eliminate some of
- // the stores by keeping track of whether or not we're sure .result
- // will be overwritten anyway. This is a bit more tricky than what I
- // was hoping for
- bool is_set_;
- bool in_try_;
-
- AstNodeFactory<AstNullVisitor> factory_;
-
- Expression* SetResult(Expression* value) {
- result_assigned_ = true;
- VariableProxy* result_proxy = factory()->NewVariableProxy(result_);
- return factory()->NewAssignment(
- Token::ASSIGN, result_proxy, value, RelocInfo::kNoPosition);
- }
-
- // Node visitors.
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- void VisitIterationStatement(IterationStatement* stmt);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-};
-
-
-void Processor::Process(ZoneList<Statement*>* statements) {
- for (int i = statements->length() - 1; i >= 0; --i) {
- Visit(statements->at(i));
- }
-}
-
-
-void Processor::VisitBlock(Block* node) {
- // An initializer block is the rewritten form of a variable declaration
- // with initialization expressions. The initializer block contains the
- // list of assignments corresponding to the initialization expressions.
- // While unclear from the spec (ECMA-262, 3rd., 12.2), the value of
- // a variable declaration with initialization expression is 'undefined'
- // with some JS VMs: For instance, using smjs, print(eval('var x = 7'))
- // returns 'undefined'. To obtain the same behavior with v8, we need
- // to prevent rewriting in that case.
- if (!node->is_initializer_block()) Process(node->statements());
-}
-
-
-void Processor::VisitModuleStatement(ModuleStatement* node) {
- bool set_after_body = is_set_;
- Visit(node->body());
- is_set_ = is_set_ && set_after_body;
-}
-
-
-void Processor::VisitExpressionStatement(ExpressionStatement* node) {
- // Rewrite : <x>; -> .result = <x>;
- if (!is_set_ && !node->expression()->IsThrow()) {
- node->set_expression(SetResult(node->expression()));
- if (!in_try_) is_set_ = true;
- }
-}
-
-
-void Processor::VisitIfStatement(IfStatement* node) {
- // Rewrite both then and else parts (reversed).
- bool save = is_set_;
- Visit(node->else_statement());
- bool set_after_then = is_set_;
- is_set_ = save;
- Visit(node->then_statement());
- is_set_ = is_set_ && set_after_then;
-}
-
-
-void Processor::VisitIterationStatement(IterationStatement* node) {
- // Rewrite the body.
- bool set_after_loop = is_set_;
- Visit(node->body());
- is_set_ = is_set_ && set_after_loop;
-}
-
-
-void Processor::VisitDoWhileStatement(DoWhileStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitWhileStatement(WhileStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitForStatement(ForStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitForInStatement(ForInStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
- // Rewrite both try and catch blocks (reversed order).
- bool set_after_catch = is_set_;
- Visit(node->catch_block());
- is_set_ = is_set_ && set_after_catch;
- bool save = in_try_;
- in_try_ = true;
- Visit(node->try_block());
- in_try_ = save;
-}
-
-
-void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
- // Rewrite both try and finally block (reversed order).
- Visit(node->finally_block());
- bool save = in_try_;
- in_try_ = true;
- Visit(node->try_block());
- in_try_ = save;
-}
-
-
-void Processor::VisitSwitchStatement(SwitchStatement* node) {
- // Rewrite statements in all case clauses in reversed order.
- ZoneList<CaseClause*>* clauses = node->cases();
- bool set_after_switch = is_set_;
- for (int i = clauses->length() - 1; i >= 0; --i) {
- CaseClause* clause = clauses->at(i);
- Process(clause->statements());
- }
- is_set_ = is_set_ && set_after_switch;
-}
-
-
-void Processor::VisitContinueStatement(ContinueStatement* node) {
- is_set_ = false;
-}
-
-
-void Processor::VisitBreakStatement(BreakStatement* node) {
- is_set_ = false;
-}
-
-
-void Processor::VisitWithStatement(WithStatement* node) {
- bool set_after_body = is_set_;
- Visit(node->statement());
- is_set_ = is_set_ && set_after_body;
-}
-
-
-// Do nothing:
-void Processor::VisitVariableDeclaration(VariableDeclaration* node) {}
-void Processor::VisitFunctionDeclaration(FunctionDeclaration* node) {}
-void Processor::VisitModuleDeclaration(ModuleDeclaration* node) {}
-void Processor::VisitImportDeclaration(ImportDeclaration* node) {}
-void Processor::VisitExportDeclaration(ExportDeclaration* node) {}
-void Processor::VisitModuleLiteral(ModuleLiteral* node) {}
-void Processor::VisitModuleVariable(ModuleVariable* node) {}
-void Processor::VisitModulePath(ModulePath* node) {}
-void Processor::VisitModuleUrl(ModuleUrl* node) {}
-void Processor::VisitEmptyStatement(EmptyStatement* node) {}
-void Processor::VisitReturnStatement(ReturnStatement* node) {}
-void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
-
-
-// Expressions are never visited yet.
-#define DEF_VISIT(type) \
- void Processor::Visit##type(type* expr) { UNREACHABLE(); }
-EXPRESSION_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-
-// Assumes code has been parsed. Mutates the AST, so the AST should not
-// continue to be used in the case of failure.
-bool Rewriter::Rewrite(CompilationInfo* info) {
- FunctionLiteral* function = info->function();
- ASSERT(function != NULL);
- Scope* scope = function->scope();
- ASSERT(scope != NULL);
- if (!scope->is_global_scope() && !scope->is_eval_scope()) return true;
-
- ZoneList<Statement*>* body = function->body();
- if (!body->is_empty()) {
- Variable* result = scope->NewTemporary(
- info->isolate()->factory()->result_string());
- Processor processor(result, info->zone());
- processor.Process(body);
- if (processor.HasStackOverflow()) return false;
-
- if (processor.result_assigned()) {
- ASSERT(function->end_position() != RelocInfo::kNoPosition);
- // Set the position of the assignment statement one character past the
- // source code, such that it definitely is not in the source code range
- // of an immediate inner scope. For example in
- // eval('with ({x:1}) x = 1');
- // the end position of the function generated for executing the eval code
- // coincides with the end of the with scope which is the position of '1'.
- int position = function->end_position();
- VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
- result->name(), false, result->interface(), position);
- result_proxy->BindTo(result);
- Statement* result_statement =
- processor.factory()->NewReturnStatement(result_proxy);
- result_statement->set_statement_pos(position);
- body->Add(result_statement, info->zone());
- }
- }
-
- return true;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/rewriter.h b/src/3rdparty/v8/src/rewriter.h
deleted file mode 100644
index 59914d9..0000000
--- a/src/3rdparty/v8/src/rewriter.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REWRITER_H_
-#define V8_REWRITER_H_
-
-namespace v8 {
-namespace internal {
-
-class CompilationInfo;
-
-class Rewriter {
- public:
- // Rewrite top-level code (ECMA 262 "programs") so as to conservatively
- // include an assignment of the value of the last statement in the code to
- // a compiler-generated temporary variable wherever needed.
- //
- // Assumes code has been parsed and scopes have been analyzed. Mutates the
- // AST, so the AST should not continue to be used in the case of failure.
- static bool Rewrite(CompilationInfo* info);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_REWRITER_H_
diff --git a/src/3rdparty/v8/src/runtime-profiler.cc b/src/3rdparty/v8/src/runtime-profiler.cc
deleted file mode 100644
index 94a5650..0000000
--- a/src/3rdparty/v8/src/runtime-profiler.cc
+++ /dev/null
@@ -1,482 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "runtime-profiler.h"
-
-#include "assembler.h"
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "full-codegen.h"
-#include "global-handles.h"
-#include "isolate-inl.h"
-#include "mark-compact.h"
-#include "platform.h"
-#include "scopeinfo.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Optimization sampler constants.
-static const int kSamplerFrameCount = 2;
-
-// Constants for statistical profiler.
-static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
-
-static const int kSamplerTicksBetweenThresholdAdjustment = 32;
-
-static const int kSamplerThresholdInit = 3;
-static const int kSamplerThresholdMin = 1;
-static const int kSamplerThresholdDelta = 1;
-
-static const int kSamplerThresholdSizeFactorInit = 3;
-
-static const int kSizeLimit = 1500;
-
-// Constants for counter based profiler.
-
-// Number of times a function has to be seen on the stack before it is
-// optimized.
-static const int kProfilerTicksBeforeOptimization = 2;
-// If the function optimization was disabled due to high deoptimization count,
-// but the function is hot and has been seen on the stack this number of times,
-// then we try to reenable optimization for this function.
-static const int kProfilerTicksBeforeReenablingOptimization = 250;
-// If a function does not have enough type info (according to
-// FLAG_type_info_threshold), but has seen a huge number of ticks,
-// optimize it as it is.
-static const int kTicksWhenNotEnoughTypeInfo = 100;
-// We only have one byte to store the number of ticks.
-STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
-STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
-STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
-
-
-// Maximum size in bytes of generated code for a function to be optimized
-// the very first time it is seen on the stack.
-static const int kMaxSizeEarlyOpt =
- 5 * FullCodeGenerator::kBackEdgeDistanceUnit;
-
-
-Atomic32 RuntimeProfiler::state_ = 0;
-
-// TODO(isolates): Clean up the semaphore when it is no longer required.
-static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
-
-#ifdef DEBUG
-bool RuntimeProfiler::has_been_globally_set_up_ = false;
-#endif
-bool RuntimeProfiler::enabled_ = false;
-
-
-RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
- : isolate_(isolate),
- sampler_threshold_(kSamplerThresholdInit),
- sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
- sampler_ticks_until_threshold_adjustment_(
- kSamplerTicksBetweenThresholdAdjustment),
- sampler_window_position_(0),
- any_ic_changed_(false),
- code_generated_(false) {
- ClearSampleBuffer();
-}
-
-
-void RuntimeProfiler::GlobalSetUp() {
- ASSERT(!has_been_globally_set_up_);
- enabled_ = V8::UseCrankshaft() && FLAG_opt;
-#ifdef DEBUG
- has_been_globally_set_up_ = true;
-#endif
-}
-
-
-static void GetICCounts(JSFunction* function,
- int* ic_with_type_info_count,
- int* ic_total_count,
- int* percentage) {
- *ic_total_count = 0;
- *ic_with_type_info_count = 0;
- Object* raw_info =
- function->shared()->code()->type_feedback_info();
- if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
- *ic_with_type_info_count = info->ic_with_type_info_count();
- *ic_total_count = info->ic_total_count();
- }
- *percentage = *ic_total_count > 0
- ? 100 * *ic_with_type_info_count / *ic_total_count
- : 100;
-}
-
-
-void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
- ASSERT(function->IsOptimizable());
- // If we are in manual mode, don't auto-optimize anything.
- if (FLAG_manual_parallel_recompilation) return;
-
- if (FLAG_trace_opt) {
- PrintF("[marking ");
- function->PrintName();
- PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
- PrintF(" for recompilation, reason: %s", reason);
- if (FLAG_type_info_threshold > 0) {
- int typeinfo, total, percentage;
- GetICCounts(function, &typeinfo, &total, &percentage);
- PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
- }
- PrintF("]\n");
- }
-
- if (FLAG_parallel_recompilation) {
- function->MarkForParallelRecompilation();
- } else {
- // The next call to the function will trigger optimization.
- function->MarkForLazyRecompilation();
- }
-}
-
-
-void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
- // See AlwaysFullCompiler (in compiler.cc) comment on why we need
- // Debug::has_break_points().
- ASSERT(function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation());
- if (!FLAG_use_osr ||
- isolate_->DebuggerHasBreakPoints() ||
- function->IsBuiltin()) {
- return;
- }
-
- SharedFunctionInfo* shared = function->shared();
- // If the code is not optimizable, don't try OSR.
- if (!shared->code()->optimizable()) return;
-
- // We are not prepared to do OSR for a function that already has an
- // allocated arguments object. The optimized code would bypass it for
- // arguments accesses, which is unsound. Don't try OSR.
- if (shared->uses_arguments()) return;
-
- // We're using on-stack replacement: patch the unoptimized code so that
- // any back edge in any unoptimized frame will trigger on-stack
- // replacement for that frame.
- if (FLAG_trace_osr) {
- PrintF("[patching stack checks in ");
- function->PrintName();
- PrintF(" for on-stack replacement]\n");
- }
-
- // Get the stack check stub code object to match against. We aren't
- // prepared to generate it, but we don't expect to have to.
- Code* stack_check_code = NULL;
- InterruptStub interrupt_stub;
- bool found_code = interrupt_stub.FindCodeInCache(&stack_check_code, isolate_);
- if (found_code) {
- Code* replacement_code =
- isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
- Code* unoptimized_code = shared->code();
- Deoptimizer::PatchStackCheckCode(unoptimized_code,
- stack_check_code,
- replacement_code);
- }
-}
-
-
-void RuntimeProfiler::ClearSampleBuffer() {
- memset(sampler_window_, 0, sizeof(sampler_window_));
- memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
-}
-
-
-int RuntimeProfiler::LookupSample(JSFunction* function) {
- int weight = 0;
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* sample = sampler_window_[i];
- if (sample != NULL) {
- bool fits = FLAG_lookup_sample_by_shared
- ? (function->shared() == JSFunction::cast(sample)->shared())
- : (function == JSFunction::cast(sample));
- if (fits) {
- weight += sampler_window_weight_[i];
- }
- }
- }
- return weight;
-}
-
-
-void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
- ASSERT(IsPowerOf2(kSamplerWindowSize));
- sampler_window_[sampler_window_position_] = function;
- sampler_window_weight_[sampler_window_position_] = weight;
- sampler_window_position_ = (sampler_window_position_ + 1) &
- (kSamplerWindowSize - 1);
-}
-
-
-void RuntimeProfiler::OptimizeNow() {
- HandleScope scope(isolate_);
-
- // Run through the JavaScript frames and collect them. If we already
- // have a sample of the function, we mark it for optimizations
- // (eagerly or lazily).
- JSFunction* samples[kSamplerFrameCount];
- int sample_count = 0;
- int frame_count = 0;
- int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
- : kSamplerFrameCount;
- for (JavaScriptFrameIterator it(isolate_);
- frame_count++ < frame_count_limit && !it.done();
- it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* function = JSFunction::cast(frame->function());
-
- if (!FLAG_watch_ic_patching) {
- // Adjust threshold each time we have processed
- // a certain number of ticks.
- if (sampler_ticks_until_threshold_adjustment_ > 0) {
- sampler_ticks_until_threshold_adjustment_--;
- if (sampler_ticks_until_threshold_adjustment_ <= 0) {
- // If the threshold is not already at the minimum
- // modify and reset the ticks until next adjustment.
- if (sampler_threshold_ > kSamplerThresholdMin) {
- sampler_threshold_ -= kSamplerThresholdDelta;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
- }
- }
- }
- }
-
- SharedFunctionInfo* shared = function->shared();
- Code* shared_code = shared->code();
-
- if (shared_code->kind() != Code::FUNCTION) continue;
-
- if (function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation()) {
- int nesting = shared_code->allow_osr_at_loop_nesting_level();
- if (nesting == 0) AttemptOnStackReplacement(function);
- int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
- shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
- }
-
- // Only record top-level code on top of the execution stack and
- // avoid optimizing excessively large scripts since top-level code
- // will be executed only once.
- const int kMaxToplevelSourceSize = 10 * 1024;
- if (shared->is_toplevel() &&
- (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
- continue;
- }
-
- // Do not record non-optimizable functions.
- if (shared->optimization_disabled()) {
- if (shared->deopt_count() >= FLAG_max_opt_count) {
- // If optimization was disabled due to many deoptimizations,
- // then check if the function is hot and try to reenable optimization.
- int ticks = shared_code->profiler_ticks();
- if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
- shared_code->set_profiler_ticks(0);
- shared->TryReenableOptimization();
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
- }
- }
- continue;
- }
- if (!function->IsOptimizable()) continue;
-
- if (FLAG_watch_ic_patching) {
- int ticks = shared_code->profiler_ticks();
-
- if (ticks >= kProfilerTicksBeforeOptimization) {
- int typeinfo, total, percentage;
- GetICCounts(function, &typeinfo, &total, &percentage);
- if (percentage >= FLAG_type_info_threshold) {
- // If this particular function hasn't had any ICs patched for enough
- // ticks, optimize it now.
- Optimize(function, "hot and stable");
- } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
- Optimize(function, "not much type info but very hot");
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
- if (FLAG_trace_opt_verbose) {
- PrintF("[not yet optimizing ");
- function->PrintName();
- PrintF(", not enough type info: %d/%d (%d%%)]\n",
- typeinfo, total, percentage);
- }
- }
- } else if (!any_ic_changed_ &&
- shared_code->instruction_size() < kMaxSizeEarlyOpt) {
- // If no IC was patched since the last tick and this function is very
- // small, optimistically optimize it now.
- Optimize(function, "small function");
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
- }
- } else { // !FLAG_watch_ic_patching
- samples[sample_count++] = function;
-
- int function_size = function->shared()->SourceSize();
- int threshold_size_factor = (function_size > kSizeLimit)
- ? sampler_threshold_size_factor_
- : 1;
-
- int threshold = sampler_threshold_ * threshold_size_factor;
-
- if (LookupSample(function) >= threshold) {
- Optimize(function, "sampler window lookup");
- }
- }
- }
- if (FLAG_watch_ic_patching) {
- any_ic_changed_ = false;
- } else { // !FLAG_watch_ic_patching
- // Add the collected functions as samples. It's important not to do
- // this as part of collecting them because this will interfere with
- // the sample lookup in case of recursive functions.
- for (int i = 0; i < sample_count; i++) {
- AddSample(samples[i], kSamplerFrameWeight[i]);
- }
- }
-}
-
-
-void RuntimeProfiler::SetUp() {
- ASSERT(has_been_globally_set_up_);
- if (!FLAG_watch_ic_patching) {
- ClearSampleBuffer();
- }
- // If the ticker hasn't already started, make sure to do so to get
- // the ticks for the runtime profiler.
- if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
-}
-
-
-void RuntimeProfiler::Reset() {
- if (!FLAG_watch_ic_patching) {
- sampler_threshold_ = kSamplerThresholdInit;
- sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
- }
-}
-
-
-void RuntimeProfiler::TearDown() {
- // Nothing to do.
-}
-
-
-int RuntimeProfiler::SamplerWindowSize() {
- return kSamplerWindowSize;
-}
-
-
-// Update the pointers in the sampler window after a GC.
-void RuntimeProfiler::UpdateSamplesAfterScavenge() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window_[i];
- if (function != NULL && isolate_->heap()->InNewSpace(function)) {
- MapWord map_word = HeapObject::cast(function)->map_word();
- if (map_word.IsForwardingAddress()) {
- sampler_window_[i] = map_word.ToForwardingAddress();
- } else {
- sampler_window_[i] = NULL;
- }
- }
- }
-}
-
-
-void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
- // The profiler thread must still be waiting.
- ASSERT(NoBarrier_Load(&state_) >= 0);
- // In IsolateEnteredJS we have already incremented the counter and
- // undid the decrement done by the profiler thread. Increment again
- // to get the right count of active isolates.
- NoBarrier_AtomicIncrement(&state_, 1);
- semaphore.Pointer()->Signal();
-}
-
-
-bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
- Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
- ASSERT(old_state >= -1);
- if (old_state != 0) return false;
- semaphore.Pointer()->Wait();
- return true;
-}
-
-
-void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
- // Do a fake increment. If the profiler is waiting on the semaphore,
- // the returned state is 0, which can be left as an initial state in
- // case profiling is restarted later. If the profiler is not
- // waiting, the increment will prevent it from waiting, but has to
- // be undone after the profiler is stopped.
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
- ASSERT(new_state >= 0);
- if (new_state == 0) {
- // The profiler thread is waiting. Wake it up. It must check for
- // stop conditions before attempting to wait again.
- semaphore.Pointer()->Signal();
- }
- thread->Join();
- // The profiler thread is now stopped. Undo the increment in case it
- // was not waiting.
- if (new_state != 0) {
- NoBarrier_AtomicIncrement(&state_, -1);
- }
-}
-
-
-void RuntimeProfiler::RemoveDeadSamples() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window_[i];
- if (function != NULL &&
- !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
- sampler_window_[i] = NULL;
- }
- }
-}
-
-
-void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- visitor->VisitPointer(&sampler_window_[i]);
- }
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/runtime-profiler.h b/src/3rdparty/v8/src/runtime-profiler.h
deleted file mode 100644
index 62c48c7..0000000
--- a/src/3rdparty/v8/src/runtime-profiler.h
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_RUNTIME_PROFILER_H_
-#define V8_RUNTIME_PROFILER_H_
-
-#include "allocation.h"
-#include "atomicops.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-class JSFunction;
-class Object;
-class Semaphore;
-
-class RuntimeProfiler {
- public:
- explicit RuntimeProfiler(Isolate* isolate);
-
- static void GlobalSetUp();
-
- static inline bool IsEnabled() {
- ASSERT(has_been_globally_set_up_);
- return enabled_;
- }
-
- void OptimizeNow();
-
- void SetUp();
- void Reset();
- void TearDown();
-
- Object** SamplerWindowAddress();
- int SamplerWindowSize();
-
- void NotifyICChanged() { any_ic_changed_ = true; }
-
- // Rate limiting support.
-
- // VM thread interface.
- //
- // Called by isolates when their states change.
- static inline void IsolateEnteredJS(Isolate* isolate);
- static inline void IsolateExitedJS(Isolate* isolate);
-
- // Profiler thread interface.
- //
- // WaitForSomeIsolateToEnterJS():
- // When no isolates are running JavaScript code for some time the
- // profiler thread suspends itself by calling the wait function. The
- // wait function returns true after it waited or false immediately.
- // While the function was waiting the profiler may have been
- // disabled so it *must check* whether it is allowed to continue.
- static bool WaitForSomeIsolateToEnterJS();
-
- // Stops the runtime profiler thread when profiling support is being
- // turned off.
- static void StopRuntimeProfilerThreadBeforeShutdown(Thread* thread);
-
- void UpdateSamplesAfterScavenge();
- void RemoveDeadSamples();
- void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
-
- void AttemptOnStackReplacement(JSFunction* function);
-
- private:
- static const int kSamplerWindowSize = 16;
-
- static void HandleWakeUp(Isolate* isolate);
-
- void Optimize(JSFunction* function, const char* reason);
-
- void ClearSampleBuffer();
-
- void ClearSampleBufferNewSpaceEntries();
-
- int LookupSample(JSFunction* function);
-
- void AddSample(JSFunction* function, int weight);
-
- Isolate* isolate_;
-
- int sampler_threshold_;
- int sampler_threshold_size_factor_;
- int sampler_ticks_until_threshold_adjustment_;
-
- Object* sampler_window_[kSamplerWindowSize];
- int sampler_window_position_;
- int sampler_window_weight_[kSamplerWindowSize];
-
- bool any_ic_changed_;
- bool code_generated_;
-
- // Possible state values:
- // -1 => the profiler thread is waiting on the semaphore
- // 0 or positive => the number of isolates running JavaScript code.
- static Atomic32 state_;
-
-#ifdef DEBUG
- static bool has_been_globally_set_up_;
-#endif
- static bool enabled_;
-};
-
-
-// Implementation of RuntimeProfiler inline functions.
-
-void RuntimeProfiler::IsolateEnteredJS(Isolate* isolate) {
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
- if (new_state == 0) {
- // Just incremented from -1 to 0. -1 can only be set by the
- // profiler thread before it suspends itself and starts waiting on
- // the semaphore.
- HandleWakeUp(isolate);
- }
- ASSERT(new_state >= 0);
-}
-
-
-void RuntimeProfiler::IsolateExitedJS(Isolate* isolate) {
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, -1);
- ASSERT(new_state >= 0);
- USE(new_state);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_RUNTIME_PROFILER_H_
diff --git a/src/3rdparty/v8/src/runtime.cc b/src/3rdparty/v8/src/runtime.cc
deleted file mode 100644
index 191e717..0000000
--- a/src/3rdparty/v8/src/runtime.cc
+++ /dev/null
@@ -1,13380 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "compiler.h"
-#include "cpu.h"
-#include "dateparser-inl.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "date.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "isolate-inl.h"
-#include "jsregexp.h"
-#include "jsregexp-inl.h"
-#include "json-parser.h"
-#include "json-stringifier.h"
-#include "liveedit.h"
-#include "misc-intrinsics.h"
-#include "parser.h"
-#include "platform.h"
-#include "runtime-profiler.h"
-#include "runtime.h"
-#include "scopeinfo.h"
-#include "smart-pointers.h"
-#include "string-search.h"
-#include "stub-cache.h"
-#include "uri.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define RUNTIME_ASSERT(value) \
- if (!(value)) return isolate->ThrowIllegalOperation();
-
-// Cast the given object to a value of the specified type and store
-// it in a variable with the given name. If the object is not of the
-// expected type call IllegalOperation and return.
-#define CONVERT_ARG_CHECKED(Type, name, index) \
- RUNTIME_ASSERT(args[index]->Is##Type()); \
- Type* name = Type::cast(args[index]);
-
-#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index) \
- RUNTIME_ASSERT(args[index]->Is##Type()); \
- Handle<Type> name = args.at<Type>(index);
-
-// Cast the given object to a boolean and store it in a variable with
-// the given name. If the object is not a boolean call IllegalOperation
-// and return.
-#define CONVERT_BOOLEAN_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsBoolean()); \
- bool name = args[index]->IsTrue();
-
-// Cast the given argument to a Smi and store its value in an int variable
-// with the given name. If the argument is not a Smi call IllegalOperation
-// and return.
-#define CONVERT_SMI_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
- int name = args.smi_at(index);
-
-// Cast the given argument to a double and store it in a variable with
-// the given name. If the argument is not a number (as opposed to
-// the number not-a-number) call IllegalOperation and return.
-#define CONVERT_DOUBLE_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsNumber()); \
- double name = args.number_at(index);
-
-// Call the specified converter on the object *comand store the result in
-// a variable of the specified type with the given name. If the
-// object is not a Number call IllegalOperation and return.
-#define CONVERT_NUMBER_CHECKED(type, name, Type, obj) \
- RUNTIME_ASSERT(obj->IsNumber()); \
- type name = NumberTo##Type(obj);
-
-
-// Cast the given argument to PropertyDetails and store its value in a
-// variable with the given name. If the argument is not a Smi call
-// IllegalOperation and return.
-#define CONVERT_PROPERTY_DETAILS_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
- PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
-
-
-// Assert that the given argument has a valid value for a StrictModeFlag
-// and store it in a StrictModeFlag variable with the given name.
-#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
- RUNTIME_ASSERT(args.smi_at(index) == kStrictMode || \
- args.smi_at(index) == kNonStrictMode); \
- StrictModeFlag name = \
- static_cast<StrictModeFlag>(args.smi_at(index));
-
-
-// Assert that the given argument has a valid value for a LanguageMode
-// and store it in a LanguageMode variable with the given name.
-#define CONVERT_LANGUAGE_MODE_ARG(name, index) \
- ASSERT(args[index]->IsSmi()); \
- ASSERT(args.smi_at(index) == CLASSIC_MODE || \
- args.smi_at(index) == STRICT_MODE || \
- args.smi_at(index) == EXTENDED_MODE); \
- LanguageMode name = \
- static_cast<LanguageMode>(args.smi_at(index));
-
-
-MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
- JSObject* boilerplate) {
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return isolate->StackOverflow();
-
- Heap* heap = isolate->heap();
- Object* result;
- { MaybeObject* maybe_result = heap->CopyJSObject(boilerplate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSObject* copy = JSObject::cast(result);
-
- // Deep copy local properties.
- if (copy->HasFastProperties()) {
- FixedArray* properties = copy->properties();
- for (int i = 0; i < properties->length(); i++) {
- Object* value = properties->get(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- properties->set(i, result);
- }
- }
- int nof = copy->map()->inobject_properties();
- for (int i = 0; i < nof; i++) {
- Object* value = copy->InObjectPropertyAt(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- copy->InObjectPropertyAtPut(i, result);
- }
- }
- } else {
- { MaybeObject* maybe_result =
- heap->AllocateFixedArray(copy->NumberOfLocalProperties());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* names = FixedArray::cast(result);
- copy->GetLocalPropertyNames(names, 0);
- for (int i = 0; i < names->length(); i++) {
- ASSERT(names->get(i)->IsString());
- String* key_string = String::cast(names->get(i));
- PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(key_string);
- // Only deep copy fields from the object literal expression.
- // In particular, don't try to copy the length attribute of
- // an array.
- if (attributes != NONE) continue;
- Object* value =
- copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- { MaybeObject* maybe_result =
- // Creating object copy for literals. No strict mode needed.
- copy->SetProperty(key_string, result, NONE, kNonStrictMode);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- }
- }
-
- // Deep copy local elements.
- // Pixel elements cannot be created using an object literal.
- ASSERT(!copy->HasExternalArrayElements());
- switch (copy->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- FixedArray* elements = FixedArray::cast(copy->elements());
- if (elements->map() == heap->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
-#ifdef DEBUG
- for (int i = 0; i < elements->length(); i++) {
- ASSERT(!elements->get(i)->IsJSObject());
- }
-#endif
- } else {
- for (int i = 0; i < elements->length(); i++) {
- Object* value = elements->get(i);
- ASSERT(value->IsSmi() ||
- value->IsTheHole() ||
- (IsFastObjectElementsKind(copy->GetElementsKind())));
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
- js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- elements->set(i, result);
- }
- }
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* element_dictionary = copy->element_dictionary();
- int capacity = element_dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = element_dictionary->KeyAt(i);
- if (element_dictionary->IsKey(k)) {
- Object* value = element_dictionary->ValueAt(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
- js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- element_dictionary->ValueAtPut(i, result);
- }
- }
- }
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- // No contained objects, nothing to do.
- break;
- }
- return copy;
-}
-
-
-static Handle<Map> ComputeObjectLiteralMap(
- Handle<Context> context,
- Handle<FixedArray> constant_properties,
- bool* is_result_from_cache) {
- Isolate* isolate = context->GetIsolate();
- int properties_length = constant_properties->length();
- int number_of_properties = properties_length / 2;
- // Check that there are only internal strings and array indices among keys.
- int number_of_string_keys = 0;
- for (int p = 0; p != properties_length; p += 2) {
- Object* key = constant_properties->get(p);
- uint32_t element_index = 0;
- if (key->IsInternalizedString()) {
- number_of_string_keys++;
- } else if (key->ToArrayIndex(&element_index)) {
- // An index key does not require space in the property backing store.
- number_of_properties--;
- } else {
- // Bail out as a non-internalized-string non-index key makes caching
- // impossible.
- // ASSERT to make sure that the if condition after the loop is false.
- ASSERT(number_of_string_keys != number_of_properties);
- break;
- }
- }
- // If we only have internalized strings and array indices among keys then we
- // can use the map cache in the native context.
- const int kMaxKeys = 10;
- if ((number_of_string_keys == number_of_properties) &&
- (number_of_string_keys < kMaxKeys)) {
- // Create the fixed array with the key.
- Handle<FixedArray> keys =
- isolate->factory()->NewFixedArray(number_of_string_keys);
- if (number_of_string_keys > 0) {
- int index = 0;
- for (int p = 0; p < properties_length; p += 2) {
- Object* key = constant_properties->get(p);
- if (key->IsInternalizedString()) {
- keys->set(index++, key);
- }
- }
- ASSERT(index == number_of_string_keys);
- }
- *is_result_from_cache = true;
- return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
- }
- *is_result_from_cache = false;
- return isolate->factory()->CopyMap(
- Handle<Map>(context->object_function()->initial_map()),
- number_of_properties);
-}
-
-
-static Handle<Object> CreateLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> constant_properties);
-
-
-static Handle<Object> CreateObjectLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> constant_properties,
- bool should_have_fast_elements,
- bool has_function_literal) {
- // Get the native context from the literals array. This is the
- // context in which the function was created and we use the object
- // function from this context to create the object literal. We do
- // not use the object function from the current native context
- // because this might be the object function from another context
- // which we should not have access to.
- Handle<Context> context =
- Handle<Context>(JSFunction::NativeContextFromLiterals(*literals));
-
- // In case we have function literals, we want the object to be in
- // slow properties mode for now. We don't go in the map cache because
- // maps with constant functions can't be shared if the functions are
- // not the same (which is the common case).
- bool is_result_from_cache = false;
- Handle<Map> map = has_function_literal
- ? Handle<Map>(context->object_function()->initial_map())
- : ComputeObjectLiteralMap(context,
- constant_properties,
- &is_result_from_cache);
-
- Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
-
- // Normalize the elements of the boilerplate to save space if needed.
- if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
-
- // Add the constant properties to the boilerplate.
- int length = constant_properties->length();
- bool should_transform =
- !is_result_from_cache && boilerplate->HasFastProperties();
- if (should_transform || has_function_literal) {
- // Normalize the properties of object to avoid n^2 behavior
- // when extending the object multiple properties. Indicate the number of
- // properties to be added.
- JSObject::NormalizeProperties(
- boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
- }
-
- for (int index = 0; index < length; index +=2) {
- Handle<Object> key(constant_properties->get(index+0), isolate);
- Handle<Object> value(constant_properties->get(index+1), isolate);
- if (value->IsFixedArray()) {
- // The value contains the constant_properties of a
- // simple object or array literal.
- Handle<FixedArray> array = Handle<FixedArray>::cast(value);
- value = CreateLiteralBoilerplate(isolate, literals, array);
- if (value.is_null()) return value;
- }
- Handle<Object> result;
- uint32_t element_index = 0;
- if (key->IsInternalizedString()) {
- if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
- // Array index as string (uint32).
- result = JSObject::SetOwnElement(
- boilerplate, element_index, value, kNonStrictMode);
- } else {
- Handle<String> name(String::cast(*key));
- ASSERT(!name->AsArrayIndex(&element_index));
- result = JSObject::SetLocalPropertyIgnoreAttributes(
- boilerplate, name, value, NONE);
- }
- } else if (key->ToArrayIndex(&element_index)) {
- // Array index (uint32).
- result = JSObject::SetOwnElement(
- boilerplate, element_index, value, kNonStrictMode);
- } else {
- // Non-uint32 number.
- ASSERT(key->IsNumber());
- double num = key->Number();
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str = DoubleToCString(num, buffer);
- Handle<String> name =
- isolate->factory()->NewStringFromAscii(CStrVector(str));
- result = JSObject::SetLocalPropertyIgnoreAttributes(
- boilerplate, name, value, NONE);
- }
- // If setting the property on the boilerplate throws an
- // exception, the exception is converted to an empty handle in
- // the handle based operations. In that case, we need to
- // convert back to an exception.
- if (result.is_null()) return result;
- }
-
- // Transform to fast properties if necessary. For object literals with
- // containing function literals we defer this operation until after all
- // computed properties have been assigned so that we can generate
- // constant function properties.
- if (should_transform && !has_function_literal) {
- JSObject::TransformToFastProperties(
- boilerplate, boilerplate->map()->unused_property_fields());
- }
-
- return boilerplate;
-}
-
-
-MaybeObject* TransitionElements(Handle<Object> object,
- ElementsKind to_kind,
- Isolate* isolate) {
- HandleScope scope(isolate);
- if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
- ElementsKind from_kind =
- Handle<JSObject>::cast(object)->map()->elements_kind();
- if (Map::IsValidElementsTransition(from_kind, to_kind)) {
- Handle<Object> result = JSObject::TransitionElementsKind(
- Handle<JSObject>::cast(object), to_kind);
- if (result.is_null()) return isolate->ThrowIllegalOperation();
- return *result;
- }
- return isolate->ThrowIllegalOperation();
-}
-
-
-static const int kSmiLiteralMinimumLength = 1024;
-
-
-Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> elements) {
- // Create the JSArray.
- Handle<JSFunction> constructor(
- JSFunction::NativeContextFromLiterals(*literals)->array_function());
- Handle<JSArray> object =
- Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
-
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(elements->get(1)));
-
- ASSERT(IsFastElementsKind(constant_elements_kind));
- Context* native_context = isolate->context()->native_context();
- Object* maybe_maps_array = native_context->js_array_maps();
- ASSERT(!maybe_maps_array->IsUndefined());
- Object* maybe_map = FixedArray::cast(maybe_maps_array)->get(
- constant_elements_kind);
- ASSERT(maybe_map->IsMap());
- object->set_map(Map::cast(maybe_map));
-
- Handle<FixedArrayBase> copied_elements_values;
- if (IsFastDoubleElementsKind(constant_elements_kind)) {
- ASSERT(FLAG_smi_only_arrays);
- copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
- Handle<FixedDoubleArray>::cast(constant_elements_values));
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind));
- const bool is_cow =
- (constant_elements_values->map() ==
- isolate->heap()->fixed_cow_array_map());
- if (is_cow) {
- copied_elements_values = constant_elements_values;
-#if DEBUG
- Handle<FixedArray> fixed_array_values =
- Handle<FixedArray>::cast(copied_elements_values);
- for (int i = 0; i < fixed_array_values->length(); i++) {
- ASSERT(!fixed_array_values->get(i)->IsFixedArray());
- }
-#endif
- } else {
- Handle<FixedArray> fixed_array_values =
- Handle<FixedArray>::cast(constant_elements_values);
- Handle<FixedArray> fixed_array_values_copy =
- isolate->factory()->CopyFixedArray(fixed_array_values);
- copied_elements_values = fixed_array_values_copy;
- for (int i = 0; i < fixed_array_values->length(); i++) {
- Object* current = fixed_array_values->get(i);
- if (current->IsFixedArray()) {
- // The value contains the constant_properties of a
- // simple object or array literal.
- Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
- Handle<Object> result =
- CreateLiteralBoilerplate(isolate, literals, fa);
- if (result.is_null()) return result;
- fixed_array_values_copy->set(i, *result);
- }
- }
- }
- }
- object->set_elements(*copied_elements_values);
- object->set_length(Smi::FromInt(copied_elements_values->length()));
-
- // Ensure that the boilerplate object has FAST_*_ELEMENTS, unless the flag is
- // on or the object is larger than the threshold.
- if (!FLAG_smi_only_arrays &&
- constant_elements_values->length() < kSmiLiteralMinimumLength) {
- ElementsKind elements_kind = object->GetElementsKind();
- if (!IsFastObjectElementsKind(elements_kind)) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- CHECK(!TransitionElements(object, FAST_HOLEY_ELEMENTS,
- isolate)->IsFailure());
- } else {
- CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure());
- }
- }
- }
-
- object->ValidateElements();
- return object;
-}
-
-
-static Handle<Object> CreateLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> array) {
- Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
- const bool kHasNoFunctionLiteral = false;
- switch (CompileTimeValue::GetType(array)) {
- case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate,
- literals,
- elements,
- true,
- kHasNoFunctionLiteral);
- case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate,
- literals,
- elements,
- false,
- kHasNoFunctionLiteral);
- case CompileTimeValue::ARRAY_LITERAL:
- return Runtime::CreateArrayLiteralBoilerplate(
- isolate, literals, elements);
- default:
- UNREACHABLE();
- return Handle<Object>::null();
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
- bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
- bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- ASSERT(*elements != isolate->heap()->empty_fixed_array());
- boilerplate =
- Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- ASSERT(*elements != isolate->heap()->empty_fixed_array());
- boilerplate =
- Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- if (JSObject::cast(*boilerplate)->elements()->map() ==
- isolate->heap()->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
- }
-
- JSObject* boilerplate_object = JSObject::cast(*boilerplate);
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(
- boilerplate_object->GetElementsKind());
- if (mode == TRACK_ALLOCATION_SITE) {
- return isolate->heap()->CopyJSObjectWithAllocationSite(boilerplate_object);
- }
-
- return isolate->heap()->CopyJSObject(boilerplate_object);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateSymbol) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 0);
- return isolate->heap()->AllocateSymbol();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
- Object* prototype = args[1];
- Object* used_prototype =
- prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
- return isolate->heap()->AllocateJSProxy(handler, used_prototype);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
- ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
- Object* call_trap = args[1];
- RUNTIME_ASSERT(call_trap->IsJSFunction() || call_trap->IsJSFunctionProxy());
- CONVERT_ARG_CHECKED(JSFunction, construct_trap, 2);
- Object* prototype = args[3];
- Object* used_prototype =
- prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
- return isolate->heap()->AllocateJSFunctionProxy(
- handler, call_trap, construct_trap, used_prototype);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
- ASSERT(args.length() == 1);
- Object* obj = args[0];
- return isolate->heap()->ToBoolean(obj->IsJSProxy());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) {
- ASSERT(args.length() == 1);
- Object* obj = args[0];
- return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
- return proxy->handler();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
- return proxy->call_trap();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
- return proxy->construct_trap();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
- proxy->Fix();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<ObjectHashSet> table = isolate->factory()->NewObjectHashSet(0);
- holder->set_table(*table);
- return *holder;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1], isolate);
- Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- table = ObjectHashSetAdd(table, key);
- holder->set_table(*table);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1], isolate);
- Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- return isolate->heap()->ToBoolean(table->Contains(*key));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1], isolate);
- Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- table = ObjectHashSetRemove(table, key);
- holder->set_table(*table);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetGetSize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- return Smi::FromInt(table->NumberOfElements());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
- holder->set_table(*table);
- return *holder;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
- return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapHas) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
- Handle<ObjectHashTable> new_table =
- PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
- holder->set_table(*new_table);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
- holder->set_table(*new_table);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGetSize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- return Smi::FromInt(table->NumberOfElements());
-}
-
-
-static JSWeakMap* WeakMapInitialize(Isolate* isolate,
- Handle<JSWeakMap> weakmap) {
- ASSERT(weakmap->map()->inobject_properties() == 0);
- Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
- weakmap->set_table(*table);
- weakmap->set_next(Smi::FromInt(0));
- return *weakmap;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- return WeakMapInitialize(isolate, weakmap);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
- return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapHas) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapDelete) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
- Handle<ObjectHashTable> new_table =
- PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
- weakmap->set_table(*new_table);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- Handle<Object> value(args[2], isolate);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
- weakmap->set_table(*new_table);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- Object* obj = args[0];
- if (!obj->IsJSObject()) return isolate->heap()->null_value();
- return JSObject::cast(obj)->class_name();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0);
- Object* obj = input_obj;
- // We don't expect access checks to be needed on JSProxy objects.
- ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
- do {
- if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(JSObject::cast(obj),
- isolate->heap()->proto_string(),
- v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
- return isolate->heap()->undefined_value();
- }
- obj = obj->GetPrototype(isolate);
- } while (obj->IsJSObject() &&
- JSObject::cast(obj)->map()->is_hidden_prototype());
- return obj;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
- Object* O = args[0];
- Object* V = args[1];
- while (true) {
- Object* prototype = V->GetPrototype(isolate);
- if (prototype->IsNull()) return isolate->heap()->false_value();
- if (O == prototype) return isolate->heap()->true_value();
- V = prototype;
- }
-}
-
-
-static bool CheckAccessException(Object* callback,
- v8::AccessType access_type) {
- if (callback->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(callback);
- return
- (access_type == v8::ACCESS_HAS &&
- (info->all_can_read() || info->all_can_write())) ||
- (access_type == v8::ACCESS_GET && info->all_can_read()) ||
- (access_type == v8::ACCESS_SET && info->all_can_write());
- }
- return false;
-}
-
-
-template<class Key>
-static bool CheckGenericAccess(
- JSObject* receiver,
- JSObject* holder,
- Key key,
- v8::AccessType access_type,
- bool (Isolate::*mayAccess)(JSObject*, Key, v8::AccessType)) {
- Isolate* isolate = receiver->GetIsolate();
- for (JSObject* current = receiver;
- true;
- current = JSObject::cast(current->GetPrototype())) {
- if (current->IsAccessCheckNeeded() &&
- !(isolate->*mayAccess)(current, key, access_type)) {
- return false;
- }
- if (current == holder) break;
- }
- return true;
-}
-
-
-enum AccessCheckResult {
- ACCESS_FORBIDDEN,
- ACCESS_ALLOWED,
- ACCESS_ABSENT
-};
-
-
-static AccessCheckResult CheckElementAccess(
- JSObject* obj,
- uint32_t index,
- v8::AccessType access_type) {
- // TODO(1095): we should traverse hidden prototype hierachy as well.
- if (CheckGenericAccess(
- obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
- return ACCESS_ALLOWED;
- }
-
- obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
- return ACCESS_FORBIDDEN;
-}
-
-
-static AccessCheckResult CheckPropertyAccess(
- JSObject* obj,
- String* name,
- v8::AccessType access_type) {
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- return CheckElementAccess(obj, index, access_type);
- }
-
- LookupResult lookup(obj->GetIsolate());
- obj->LocalLookup(name, &lookup, true);
-
- if (!lookup.IsProperty()) return ACCESS_ABSENT;
- if (CheckGenericAccess<Object*>(
- obj, lookup.holder(), name, access_type, &Isolate::MayNamedAccess)) {
- return ACCESS_ALLOWED;
- }
-
- // Access check callback denied the access, but some properties
- // can have a special permissions which override callbacks descision
- // (currently see v8::AccessControl).
- // API callbacks can have per callback access exceptions.
- switch (lookup.type()) {
- case CALLBACKS:
- if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
- return ACCESS_ALLOWED;
- }
- break;
- case INTERCEPTOR:
- // If the object has an interceptor, try real named properties.
- // Overwrite the result to fetch the correct property later.
- lookup.holder()->LookupRealNamedProperty(name, &lookup);
- if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) {
- if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
- return ACCESS_ALLOWED;
- }
- }
- break;
- default:
- break;
- }
-
- obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
- return ACCESS_FORBIDDEN;
-}
-
-
-// Enumerator used as indices into the array returned from GetOwnProperty
-enum PropertyDescriptorIndices {
- IS_ACCESSOR_INDEX,
- VALUE_INDEX,
- GETTER_INDEX,
- SETTER_INDEX,
- WRITABLE_INDEX,
- ENUMERABLE_INDEX,
- CONFIGURABLE_INDEX,
- DESCRIPTOR_SIZE
-};
-
-
-static MaybeObject* GetOwnProperty(Isolate* isolate,
- Handle<JSObject> obj,
- Handle<String> name) {
- Heap* heap = isolate->heap();
- // Due to some WebKit tests, we want to make sure that we do not log
- // more than one access failure here.
- switch (CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS)) {
- case ACCESS_FORBIDDEN: return heap->false_value();
- case ACCESS_ALLOWED: break;
- case ACCESS_ABSENT: return heap->undefined_value();
- }
-
- PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
- if (attrs == ABSENT) return heap->undefined_value();
- AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name);
- Handle<AccessorPair> accessors(raw_accessors, isolate);
-
- Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
- elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(raw_accessors != NULL));
-
- if (raw_accessors == NULL) {
- elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
- // GetProperty does access check.
- Handle<Object> value = GetProperty(isolate, obj, name);
- if (value.is_null()) return Failure::Exception();
- elms->set(VALUE_INDEX, *value);
- } else {
- // Access checks are performed for both accessors separately.
- // When they fail, the respective field is not set in the descriptor.
- Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
- Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
- if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, getter);
- }
- if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, setter);
- }
- }
-
- return *isolate->factory()->NewJSArrayWithElements(elms);
-}
-
-
-// Returns an array with the property description:
-// if args[1] is not a property on args[0]
-// returns undefined
-// if args[1] is a data property on args[0]
-// [false, value, Writeable, Enumerable, Configurable]
-// if args[1] is an accessor on args[0]
-// [true, GetFunction, SetFunction, Enumerable, Configurable]
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
- return GetOwnProperty(isolate, obj, name);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- return obj->PreventExtensions();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
- ASSERT(proto->IsJSGlobalObject());
- obj = JSObject::cast(proto);
- }
- return isolate->heap()->ToBoolean(obj->map()->is_extensible());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<Object> result =
- RegExpImpl::Compile(re, pattern, flags, isolate->runtime_zone());
- if (result.is_null()) return Failure::Exception();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(FunctionTemplateInfo, data, 0);
- return *isolate->factory()->CreateApiFunction(data);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
- ASSERT(args.length() == 1);
- Object* arg = args[0];
- bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
- return isolate->heap()->ToBoolean(result);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(HeapObject, templ, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1)
- int offset = index * kPointerSize + HeapObject::kHeaderSize;
- InstanceType type = templ->map()->instance_type();
- RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE ||
- type == OBJECT_TEMPLATE_INFO_TYPE);
- RUNTIME_ASSERT(offset > 0);
- if (type == FUNCTION_TEMPLATE_INFO_TYPE) {
- RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize);
- } else {
- RUNTIME_ASSERT(offset < ObjectTemplateInfo::kSize);
- }
- return *HeapObject::RawField(templ, offset);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(HeapObject, object, 0);
- Map* old_map = object->map();
- bool needs_access_checks = old_map->is_access_check_needed();
- if (needs_access_checks) {
- // Copy map so it won't interfere constructor's initial map.
- Map* new_map;
- MaybeObject* maybe_new_map = old_map->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- new_map->set_is_access_check_needed(false);
- object->set_map(new_map);
- }
- return isolate->heap()->ToBoolean(needs_access_checks);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(HeapObject, object, 0);
- Map* old_map = object->map();
- if (!old_map->is_access_check_needed()) {
- // Copy map so it won't interfere constructor's initial map.
- Map* new_map;
- MaybeObject* maybe_new_map = old_map->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- new_map->set_is_access_check_needed(true);
- object->set_map(new_map);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-static Failure* ThrowRedeclarationError(Isolate* isolate,
- const char* type,
- Handle<String> name) {
- HandleScope scope(isolate);
- Handle<Object> type_handle =
- isolate->factory()->NewStringFromAscii(CStrVector(type));
- Handle<Object> args[2] = { type_handle, name };
- Handle<Object> error =
- isolate->factory()->NewTypeError("redeclaration", HandleVector(args, 2));
- return isolate->Throw(*error);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
- ASSERT(args.length() == 3);
- HandleScope scope(isolate);
-
- Handle<Context> context = args.at<Context>(0);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
- CONVERT_SMI_ARG_CHECKED(flags, 2);
-
- Handle<JSObject> js_global =
- Handle<JSObject>(isolate->context()->global_object());
- Handle<JSObject> qml_global =
- Handle<JSObject>(isolate->context()->qml_global_object());
-
- // Traverse the name/value pairs and set the properties.
- int length = pairs->length();
- for (int i = 0; i < length; i += 3) {
- HandleScope scope(isolate);
- Handle<String> name(String::cast(pairs->get(i)));
- Handle<Object> value(pairs->get(i + 1), isolate);
- Handle<Object> is_qml_global(pairs->get(i + 2), isolate);
- ASSERT(is_qml_global->IsBoolean());
-
- Handle<JSObject> global = is_qml_global->IsTrue() ? qml_global : js_global;
-
- // We have to declare a global const property. To capture we only
- // assign to it when evaluating the assignment for "const x =
- // <expr>" the initial value is the hole.
- bool is_var = value->IsUndefined();
- bool is_const = value->IsTheHole();
- bool is_function = value->IsSharedFunctionInfo();
- ASSERT(is_var + is_const + is_function == 1);
-
- if (is_var || is_const) {
- // Lookup the property in the global object, and don't set the
- // value of the variable if the property is already there.
- // Do the lookup locally only, see ES5 erratum.
- LookupResult lookup(isolate);
- if (FLAG_es52_globals) {
- global->LocalLookup(*name, &lookup, true, true);
- } else {
- global->Lookup(*name, &lookup, true);
- }
- if (lookup.IsFound()) {
- // We found an existing property. Unless it was an interceptor
- // that claims the property is absent, skip this declaration.
- if (!lookup.IsInterceptor()) continue;
- PropertyAttributes attributes = global->GetPropertyAttribute(*name);
- if (attributes != ABSENT) continue;
- // Fall-through and introduce the absent property by using
- // SetProperty.
- }
- } else if (is_function) {
- // Copy the function and update its context. Use it as value.
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>::cast(value);
- Handle<JSFunction> function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, TENURED);
- value = function;
- }
-
- LookupResult lookup(isolate);
- global->LocalLookup(*name, &lookup, true);
-
- // Compute the property attributes. According to ECMA-262,
- // the property must be non-configurable except in eval.
- int attr = NONE;
- bool is_eval = DeclareGlobalsEvalFlag::decode(flags);
- if (!is_eval) {
- attr |= DONT_DELETE;
- }
- bool is_native = DeclareGlobalsNativeFlag::decode(flags);
- if (is_const || (is_native && is_function)) {
- attr |= READ_ONLY;
- }
-
- LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
-
- if (!lookup.IsFound() || is_function) {
- // If the local property exists, check that we can reconfigure it
- // as required for function declarations.
- if (lookup.IsFound() && lookup.IsDontDelete()) {
- if (lookup.IsReadOnly() || lookup.IsDontEnum() ||
- lookup.IsPropertyCallbacks()) {
- return ThrowRedeclarationError(isolate, "function", name);
- }
- // If the existing property is not configurable, keep its attributes.
- attr = lookup.GetAttributes();
- }
- // Define or redefine own property.
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- global, name, value, static_cast<PropertyAttributes>(attr)));
- } else {
- // Do a [[Put]] on the existing (own) property.
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetProperty(
- global, name, value, static_cast<PropertyAttributes>(attr),
- language_mode == CLASSIC_MODE ? kNonStrictMode : kStrictMode,
- true));
- }
- }
-
- ASSERT(!isolate->has_pending_exception());
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
-
- // Declarations are always made in a function or native context. In the
- // case of eval code, the context passed is the context of the caller,
- // which may be some nested context and not the declaration context.
- RUNTIME_ASSERT(args[0]->IsContext());
- Handle<Context> context(Context::cast(args[0])->declaration_context());
-
- Handle<String> name(String::cast(args[1]));
- PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
- RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
- Handle<Object> initial_value(args[3], isolate);
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
- BindingFlags binding_flags;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding_flags);
-
- if (attributes != ABSENT) {
- // The name was declared before; check for conflicting re-declarations.
- // Note: this is actually inconsistent with what happens for globals (where
- // we silently ignore such declarations).
- if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
- // Functions are not read-only.
- ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
- const char* type = ((attributes & READ_ONLY) != 0) ? "const" : "var";
- return ThrowRedeclarationError(isolate, type, name);
- }
-
- // Initialize it if necessary.
- if (*initial_value != NULL) {
- if (index >= 0) {
- ASSERT(holder.is_identical_to(context));
- if (((attributes & READ_ONLY) == 0) ||
- context->get(index)->IsTheHole()) {
- context->set(index, *initial_value);
- }
- } else {
- // Slow case: The property is in the context extension object of a
- // function context or the global object of a native context.
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- JSReceiver::SetProperty(object, name, initial_value, mode,
- kNonStrictMode));
- }
- }
-
- } else {
- // The property is not in the function context. It needs to be
- // "declared" in the function context's extension context or as a
- // property of the the global object.
- Handle<JSObject> object;
- if (context->has_extension()) {
- object = Handle<JSObject>(JSObject::cast(context->extension()));
- } else {
- // Context extension objects are allocated lazily.
- ASSERT(context->IsFunctionContext());
- object = isolate->factory()->NewJSObject(
- isolate->context_extension_function());
- context->set_extension(*object);
- }
- ASSERT(*object != NULL);
-
- // Declare the property by setting it to the initial value if provided,
- // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
- // constant declarations).
- ASSERT(!object->HasLocalProperty(*name));
- Handle<Object> value(isolate->heap()->undefined_value(), isolate);
- if (*initial_value != NULL) value = initial_value;
- // Declaring a const context slot is a conflicting declaration if
- // there is a callback with that name in a prototype. It is
- // allowed to introduce const variables in
- // JSContextExtensionObjects. They are treated specially in
- // SetProperty and no setters are invoked for those since they are
- // not real JSObjects.
- if (initial_value->IsTheHole() &&
- !object->IsJSContextExtensionObject()) {
- LookupResult lookup(isolate);
- object->Lookup(*name, &lookup);
- if (lookup.IsPropertyCallbacks()) {
- return ThrowRedeclarationError(isolate, "const", name);
- }
- }
- if (object->IsJSGlobalObject()) {
- // Define own property on the global object.
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(object, name, value, mode));
- } else {
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSReceiver::SetProperty(object, name, value, mode, kNonStrictMode));
- }
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
- NoHandleAllocation nha(isolate);
- // args[0] == name
- // args[1] == language_mode
- // args[2] == qml_mode
- // args[3] == value (optional)
-
- // Determine if we need to assign to the variable if it already
- // exists (based on the number of arguments).
- RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
- bool assign = args.length() == 4;
-
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- RUNTIME_ASSERT(args[1]->IsSmi());
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
- StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
-
- RUNTIME_ASSERT(args[2]->IsSmi());
- int qml_mode = Smi::cast(args[2])->value();
-
- JSObject* global = qml_mode ? isolate->context()->qml_global_object()
- : isolate->context()->global_object();
-
- // According to ECMA-262, section 12.2, page 62, the property must
- // not be deletable.
- PropertyAttributes attributes = DONT_DELETE;
-
- // Lookup the property locally in the global object. If it isn't
- // there, there is a property with this name in the prototype chain.
- // We follow Safari and Firefox behavior and only set the property
- // locally if there is an explicit initialization value that we have
- // to assign to the property.
- // Note that objects can have hidden prototypes, so we need to traverse
- // the whole chain of hidden prototypes to do a 'local' lookup.
- Object* object = global;
- LookupResult lookup(isolate);
- JSObject::cast(object)->LocalLookup(*name, &lookup, true, true);
- if (lookup.IsInterceptor()) {
- HandleScope handle_scope(isolate);
- PropertyAttributes intercepted =
- lookup.holder()->GetPropertyAttribute(*name);
- if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
- // Found an interceptor that's not read only.
- if (assign) {
- return lookup.holder()->SetProperty(
- &lookup, *name, args[3], attributes, strict_mode_flag);
- } else {
- return isolate->heap()->undefined_value();
- }
- }
- }
-
- // Reload global in case the loop above performed a GC.
- global = qml_mode ? isolate->context()->qml_global_object()
- : isolate->context()->global_object();
- if (assign) {
- return global->SetProperty(
- *name, args[3], attributes, strict_mode_flag,
- JSReceiver::MAY_BE_STORE_FROM_KEYED, true);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
- // All constants are declared with an initial value. The name
- // of the constant is the first argument and the initial value
- // is the second.
- RUNTIME_ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- Handle<Object> value = args.at<Object>(1);
-
- RUNTIME_ASSERT(args[2]->IsSmi());
- int qml_mode = Smi::cast(args[2])->value();
-
- // Get the current global object from top.
- JSObject* global = qml_mode ? isolate->context()->qml_global_object()
- : isolate->context()->global_object();
-
- // According to ECMA-262, section 12.2, page 62, the property must
- // not be deletable. Since it's a const, it must be READ_ONLY too.
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
-
- // Lookup the property locally in the global object. If it isn't
- // there, we add the property and take special precautions to always
- // add it as a local property even in case of callbacks in the
- // prototype chain (this rules out using SetProperty).
- // We use SetLocalPropertyIgnoreAttributes instead
- LookupResult lookup(isolate);
- global->LocalLookup(*name, &lookup);
- if (!lookup.IsFound()) {
- return global->SetLocalPropertyIgnoreAttributes(*name,
- *value,
- attributes);
- }
-
- if (!lookup.IsReadOnly()) {
- // Restore global object from context (in case of GC) and continue
- // with setting the value.
- HandleScope handle_scope(isolate);
- Handle<JSObject> global(qml_mode ? isolate->context()->qml_global_object()
- : isolate->context()->global_object());
-
- // BUG 1213575: Handle the case where we have to set a read-only
- // property through an interceptor and only do it if it's
- // uninitialized, e.g. the hole. Nirk...
- // Passing non-strict mode because the property is writable.
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- JSReceiver::SetProperty(global, name, value, attributes,
- kNonStrictMode));
- return *value;
- }
-
- // Set the value, but only if we're assigning the initial value to a
- // constant. For now, we determine this by checking if the
- // current value is the hole.
- // Strict mode handling not needed (const is disallowed in strict mode).
- if (lookup.IsField()) {
- FixedArray* properties = global->properties();
- int index = lookup.GetFieldIndex().field_index();
- if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
- properties->set(index, *value);
- }
- } else if (lookup.IsNormal()) {
- if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
- !lookup.IsReadOnly()) {
- global->SetNormalizedProperty(&lookup, *value);
- }
- } else {
- // Ignore re-initialization of constants that have already been
- // assigned a function value.
- ASSERT(lookup.IsReadOnly() && lookup.IsConstantFunction());
- }
-
- // Use the set value as the result of the operation.
- return *value;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
-
- Handle<Object> value(args[0], isolate);
- ASSERT(!value->IsTheHole());
-
- // Initializations are always done in a function or native context.
- RUNTIME_ASSERT(args[1]->IsContext());
- Handle<Context> context(Context::cast(args[1])->declaration_context());
-
- Handle<String> name(String::cast(args[2]));
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- BindingFlags binding_flags;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding_flags);
-
- if (index >= 0) {
- ASSERT(holder->IsContext());
- // Property was found in a context. Perform the assignment if we
- // found some non-constant or an uninitialized constant.
- Handle<Context> context = Handle<Context>::cast(holder);
- if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
- context->set(index, *value);
- }
- return *value;
- }
-
- // The property could not be found, we introduce it as a property of the
- // global object.
- if (attributes == ABSENT) {
- Handle<JSObject> global = Handle<JSObject>(
- isolate->context()->global_object());
- // Strict mode not needed (const disallowed in strict mode).
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- JSReceiver::SetProperty(global, name, value, NONE, kNonStrictMode));
- return *value;
- }
-
- // The property was present in some function's context extension object,
- // as a property on the subject of a with, or as a property of the global
- // object.
- //
- // In most situations, eval-introduced consts should still be present in
- // the context extension object. However, because declaration and
- // initialization are separate, the property might have been deleted
- // before we reach the initialization point.
- //
- // Example:
- //
- // function f() { eval("delete x; const x;"); }
- //
- // In that case, the initialization behaves like a normal assignment.
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
-
- if (*object == context->extension()) {
- // This is the property that was introduced by the const declaration.
- // Set it if it hasn't been set before. NOTE: We cannot use
- // GetProperty() to get the current value as it 'unholes' the value.
- LookupResult lookup(isolate);
- object->LocalLookupRealNamedProperty(*name, &lookup);
- ASSERT(lookup.IsFound()); // the property was declared
- ASSERT(lookup.IsReadOnly()); // and it was declared as read-only
-
- if (lookup.IsField()) {
- FixedArray* properties = object->properties();
- int index = lookup.GetFieldIndex().field_index();
- if (properties->get(index)->IsTheHole()) {
- properties->set(index, *value);
- }
- } else if (lookup.IsNormal()) {
- if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
- object->SetNormalizedProperty(&lookup, *value);
- }
- } else {
- // We should not reach here. Any real, named property should be
- // either a field or a dictionary slot.
- UNREACHABLE();
- }
- } else {
- // The property was found on some other object. Set it if it is not a
- // read-only property.
- if ((attributes & READ_ONLY) == 0) {
- // Strict mode not needed (const disallowed in strict mode).
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- JSReceiver::SetProperty(object, name, value, attributes,
- kNonStrictMode));
- }
- }
-
- return *value;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*,
- Runtime_OptimizeObjectForAddingMultipleProperties) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_SMI_ARG_CHECKED(properties, 1);
- if (object->HasFastProperties()) {
- JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
- }
- return *object;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- // Due to the way the JS calls are constructed this must be less than the
- // length of a string, i.e. it is always a Smi. We check anyway for security.
- CONVERT_SMI_ARG_CHECKED(index, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- RUNTIME_ASSERT(index >= 0);
- RUNTIME_ASSERT(index <= subject->length());
- isolate->counters()->regexp_entry_runtime()->Increment();
- Handle<Object> result = RegExpImpl::Exec(regexp,
- subject,
- index,
- last_match_info);
- if (result.is_null()) return Failure::Exception();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
- ASSERT(args.length() == 3);
- CONVERT_SMI_ARG_CHECKED(elements_count, 0);
- if (elements_count < 0 ||
- elements_count > FixedArray::kMaxLength ||
- !Smi::IsValid(elements_count)) {
- return isolate->ThrowIllegalOperation();
- }
- Object* new_object;
- { MaybeObject* maybe_new_object =
- isolate->heap()->AllocateFixedArrayWithHoles(elements_count);
- if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
- }
- FixedArray* elements = FixedArray::cast(new_object);
- { MaybeObject* maybe_new_object = isolate->heap()->AllocateRaw(
- JSRegExpResult::kSize, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
- }
- {
- AssertNoAllocation no_gc;
- HandleScope scope(isolate);
- reinterpret_cast<HeapObject*>(new_object)->
- set_map(isolate->native_context()->regexp_result_map());
- }
- JSArray* array = JSArray::cast(new_object);
- array->set_properties(isolate->heap()->empty_fixed_array());
- array->set_elements(elements);
- array->set_length(Smi::FromInt(elements_count));
- // Write in-object properties after the length of the array.
- array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, args[1]);
- array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, args[2]);
- return array;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
- AssertNoAllocation no_alloc;
- ASSERT(args.length() == 5);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_CHECKED(String, source, 1);
- // If source is the empty string we set it to "(?:)" instead as
- // suggested by ECMA-262, 5th, section 15.10.4.1.
- if (source->length() == 0) source = isolate->heap()->query_colon_string();
-
- Object* global = args[2];
- if (!global->IsTrue()) global = isolate->heap()->false_value();
-
- Object* ignoreCase = args[3];
- if (!ignoreCase->IsTrue()) ignoreCase = isolate->heap()->false_value();
-
- Object* multiline = args[4];
- if (!multiline->IsTrue()) multiline = isolate->heap()->false_value();
-
- Map* map = regexp->map();
- Object* constructor = map->constructor();
- if (constructor->IsJSFunction() &&
- JSFunction::cast(constructor)->initial_map() == map) {
- // If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
- // Both true and false are immovable immortal objects so no need for write
- // barrier.
- regexp->InObjectPropertyAtPut(
- JSRegExp::kGlobalFieldIndex, global, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(
- JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(
- JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(
- JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), SKIP_WRITE_BARRIER);
- return regexp;
- }
-
- // Map has changed, so use generic, but slower, method.
- PropertyAttributes final =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Heap* heap = isolate->heap();
- MaybeObject* result;
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_string(),
- source,
- final);
- // TODO(jkummerow): Turn these back into ASSERTs when we can be certain
- // that it never fires in Release mode in the wild.
- CHECK(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_string(),
- global,
- final);
- CHECK(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_string(),
- ignoreCase,
- final);
- CHECK(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_string(),
- multiline,
- final);
- CHECK(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_string(),
- Smi::FromInt(0),
- writable);
- CHECK(!result->IsFailure());
- USE(result);
- return regexp;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0);
- // This is necessary to enable fast checks for absence of elements
- // on Array.prototype and below.
- prototype->set_elements(isolate->heap()->empty_fixed_array());
- return Smi::FromInt(0);
-}
-
-
-static Handle<JSFunction> InstallBuiltin(Isolate* isolate,
- Handle<JSObject> holder,
- const char* name,
- Builtins::Name builtin_name) {
- Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
- Handle<Code> code(isolate->builtins()->builtin(builtin_name));
- Handle<JSFunction> optimized =
- isolate->factory()->NewFunction(key,
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- code,
- false);
- optimized->shared()->DontAdaptArguments();
- JSReceiver::SetProperty(holder, key, optimized, NONE, kStrictMode);
- return optimized;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, holder, 0);
-
- InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
- InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
- InstallBuiltin(isolate, holder, "shift", Builtins::kArrayShift);
- InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
- InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
- InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
- InstallBuiltin(isolate, holder, "concat", Builtins::kArrayConcat);
-
- return *holder;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
-
- if (!callable->IsJSFunction()) {
- HandleScope scope(isolate);
- bool threw = false;
- Handle<Object> delegate =
- Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
- if (threw) return Failure::Exception();
- callable = JSFunction::cast(*delegate);
- }
- JSFunction* function = JSFunction::cast(callable);
-
- SharedFunctionInfo* shared = function->shared();
- if (shared->native() || !shared->is_classic_mode()) {
- return isolate->heap()->undefined_value();
- }
- // Returns undefined for strict or native functions, or
- // the associated global receiver for "normal" functions.
-
- Context* native_context =
- function->context()->global_object()->native_context();
- return native_context->global_object()->global_receiver();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- int index = args.smi_at(1);
- Handle<String> pattern = args.at<String>(2);
- Handle<String> flags = args.at<String>(3);
-
- // Get the RegExp function from the context in the literals array.
- // This is the RegExp function from the context in which the
- // function was created. We do not use the RegExp function from the
- // current native context because this might be the RegExp function
- // from another context which we should not have access to.
- Handle<JSFunction> constructor =
- Handle<JSFunction>(
- JSFunction::NativeContextFromLiterals(*literals)->regexp_function());
- // Compute the regular expression literal.
- bool has_pending_exception;
- Handle<Object> regexp =
- RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags,
- &has_pending_exception);
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
- literals->set(index, *regexp);
- return *regexp;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return f->shared()->name();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
- f->shared()->set_name(name);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(
- f->shared()->name_should_print_as_anonymous());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- f->shared()->set_name_should_print_as_anonymous(true);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- f->RemovePrototype();
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScript) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
- if (!script->IsScript()) return isolate->heap()->undefined_value();
-
- return *GetScriptWrapper(Handle<Script>::cast(script));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
- Handle<SharedFunctionInfo> shared(f->shared());
- return *shared->GetSourceCode();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- int pos = fun->shared()->start_position();
- return Smi::FromInt(pos);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(Code, code, 0);
- CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
-
- RUNTIME_ASSERT(0 <= offset && offset < code->Size());
-
- Address pc = code->address() + offset;
- return Smi::FromInt(code->SourcePosition(pc));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
- fun->SetInstanceClassName(name);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- CONVERT_SMI_ARG_CHECKED(length, 1);
- fun->shared()->set_length(length);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- ASSERT(fun->should_have_prototype());
- Object* obj;
- { MaybeObject* maybe_obj =
- Accessors::FunctionSetPrototype(fun, args[1], NULL);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return args[0]; // return TOS
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
- NoHandleAllocation ha(isolate);
- RUNTIME_ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
-
- String* name = isolate->heap()->prototype_string();
-
- if (function->HasFastProperties()) {
- // Construct a new field descriptor with updated attributes.
- DescriptorArray* instance_desc = function->map()->instance_descriptors();
-
- int index = instance_desc->SearchWithCache(name, function->map());
- ASSERT(index != DescriptorArray::kNotFound);
- PropertyDetails details = instance_desc->GetDetails(index);
-
- CallbacksDescriptor new_desc(name,
- instance_desc->GetValue(index),
- static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
- details.descriptor_index());
-
- // Create a new map featuring the new field descriptors array.
- Map* new_map;
- MaybeObject* maybe_map =
- function->map()->CopyReplaceDescriptor(
- instance_desc, &new_desc, index, OMIT_TRANSITION);
- if (!maybe_map->To(&new_map)) return maybe_map;
-
- function->set_map(new_map);
- } else { // Dictionary properties.
- // Directly manipulate the property details.
- int entry = function->property_dictionary()->FindEntry(name);
- ASSERT(entry != StringDictionary::kNotFound);
- PropertyDetails details = function->property_dictionary()->DetailsAt(entry);
- PropertyDetails new_details(
- static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
- details.type(),
- details.dictionary_index());
- function->property_dictionary()->DetailsAtPut(entry, new_details);
- }
- return function;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->shared()->IsApiFunction());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->IsBuiltin());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
- Handle<Object> code = args.at<Object>(1);
-
- if (code->IsNull()) return *target;
- RUNTIME_ASSERT(code->IsJSFunction());
- Handle<JSFunction> source = Handle<JSFunction>::cast(code);
- Handle<SharedFunctionInfo> target_shared(target->shared());
- Handle<SharedFunctionInfo> source_shared(source->shared());
-
- if (!JSFunction::EnsureCompiled(source, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
-
- // Set the code, scope info, formal parameter count, and the length
- // of the target shared function info. Set the source code of the
- // target function to undefined. SetCode is only used for built-in
- // constructors like String, Array, and Object, and some web code
- // doesn't like seeing source code for constructors.
- target_shared->ReplaceCode(source_shared->code());
- target_shared->set_scope_info(source_shared->scope_info());
- target_shared->set_length(source_shared->length());
- target_shared->set_formal_parameter_count(
- source_shared->formal_parameter_count());
- target_shared->set_script(isolate->heap()->undefined_value());
-
- // Since we don't store the source we should never optimize this.
- target_shared->code()->set_optimizable(false);
-
- // Clear the optimization hints related to the compiled code as these
- // are no longer valid when the code is overwritten.
- target_shared->ClearThisPropertyAssignmentsInfo();
-
- // Set the code of the target function.
- target->ReplaceCode(source_shared->code());
- ASSERT(target->next_function_link()->IsUndefined());
-
- // Make sure we get a fresh copy of the literal vector to avoid cross
- // context contamination.
- Handle<Context> context(source->context());
- int number_of_literals = source->NumberOfLiterals();
- Handle<FixedArray> literals =
- isolate->factory()->NewFixedArray(number_of_literals, TENURED);
- if (number_of_literals > 0) {
- literals->set(JSFunction::kLiteralNativeContextIndex,
- context->native_context());
- }
- target->set_context(*context);
- target->set_literals(*literals);
-
- if (isolate->logger()->is_logging_code_events() ||
- CpuProfiler::is_profiling(isolate)) {
- isolate->logger()->LogExistingFunction(
- source_shared, Handle<Code>(source_shared->code()));
- }
-
- return *target;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_SMI_ARG_CHECKED(num, 1);
- RUNTIME_ASSERT(num >= 0);
- SetExpectedNofProperties(function, num);
- return isolate->heap()->undefined_value();
-}
-
-
-MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
- Object* char_code) {
- uint32_t code;
- if (char_code->ToArrayIndex(&code)) {
- if (code <= 0xffff) {
- return isolate->heap()->LookupSingleCharacterStringFromCode(code);
- }
- }
- return isolate->heap()->empty_string();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(String, subject, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
-
- // Flatten the string. If someone wants to get a char at an index
- // in a cons string, it is likely that more indices will be
- // accessed.
- Object* flat;
- { MaybeObject* maybe_flat = subject->TryFlatten();
- if (!maybe_flat->ToObject(&flat)) return maybe_flat;
- }
- subject = String::cast(flat);
-
- if (i >= static_cast<uint32_t>(subject->length())) {
- return isolate->heap()->nan_value();
- }
-
- return Smi::FromInt(subject->Get(i));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- return CharFromCode(isolate, args[0]);
-}
-
-
-class FixedArrayBuilder {
- public:
- explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
- : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
- length_(0),
- has_non_smi_elements_(false) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- ASSERT(initial_capacity > 0);
- }
-
- explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
- : array_(backing_store),
- length_(0),
- has_non_smi_elements_(false) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- ASSERT(backing_store->length() > 0);
- }
-
- bool HasCapacity(int elements) {
- int length = array_->length();
- int required_length = length_ + elements;
- return (length >= required_length);
- }
-
- void EnsureCapacity(int elements) {
- int length = array_->length();
- int required_length = length_ + elements;
- if (length < required_length) {
- int new_length = length;
- do {
- new_length *= 2;
- } while (new_length < required_length);
- Handle<FixedArray> extended_array =
- array_->GetIsolate()->factory()->NewFixedArrayWithHoles(new_length);
- array_->CopyTo(0, *extended_array, 0, length_);
- array_ = extended_array;
- }
- }
-
- void Add(Object* value) {
- ASSERT(!value->IsSmi());
- ASSERT(length_ < capacity());
- array_->set(length_, value);
- length_++;
- has_non_smi_elements_ = true;
- }
-
- void Add(Smi* value) {
- ASSERT(value->IsSmi());
- ASSERT(length_ < capacity());
- array_->set(length_, value);
- length_++;
- }
-
- Handle<FixedArray> array() {
- return array_;
- }
-
- int length() {
- return length_;
- }
-
- int capacity() {
- return array_->length();
- }
-
- Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- FACTORY->SetContent(target_array, array_);
- target_array->set_length(Smi::FromInt(length_));
- return target_array;
- }
-
-
- private:
- Handle<FixedArray> array_;
- int length_;
- bool has_non_smi_elements_;
-};
-
-
-// Forward declarations.
-const int kStringBuilderConcatHelperLengthBits = 11;
-const int kStringBuilderConcatHelperPositionBits = 19;
-
-template <typename schar>
-static inline void StringBuilderConcatHelper(String*,
- schar*,
- FixedArray*,
- int);
-
-typedef BitField<int, 0, kStringBuilderConcatHelperLengthBits>
- StringBuilderSubstringLength;
-typedef BitField<int,
- kStringBuilderConcatHelperLengthBits,
- kStringBuilderConcatHelperPositionBits>
- StringBuilderSubstringPosition;
-
-
-class ReplacementStringBuilder {
- public:
- ReplacementStringBuilder(Heap* heap,
- Handle<String> subject,
- int estimated_part_count)
- : heap_(heap),
- array_builder_(heap->isolate(), estimated_part_count),
- subject_(subject),
- character_count_(0),
- is_ascii_(subject->IsOneByteRepresentation()) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- ASSERT(estimated_part_count > 0);
- }
-
- static inline void AddSubjectSlice(FixedArrayBuilder* builder,
- int from,
- int to) {
- ASSERT(from >= 0);
- int length = to - from;
- ASSERT(length > 0);
- if (StringBuilderSubstringLength::is_valid(length) &&
- StringBuilderSubstringPosition::is_valid(from)) {
- int encoded_slice = StringBuilderSubstringLength::encode(length) |
- StringBuilderSubstringPosition::encode(from);
- builder->Add(Smi::FromInt(encoded_slice));
- } else {
- // Otherwise encode as two smis.
- builder->Add(Smi::FromInt(-length));
- builder->Add(Smi::FromInt(from));
- }
- }
-
-
- void EnsureCapacity(int elements) {
- array_builder_.EnsureCapacity(elements);
- }
-
-
- void AddSubjectSlice(int from, int to) {
- AddSubjectSlice(&array_builder_, from, to);
- IncrementCharacterCount(to - from);
- }
-
-
- void AddString(Handle<String> string) {
- int length = string->length();
- ASSERT(length > 0);
- AddElement(*string);
- if (!string->IsOneByteRepresentation()) {
- is_ascii_ = false;
- }
- IncrementCharacterCount(length);
- }
-
-
- Handle<String> ToString() {
- if (array_builder_.length() == 0) {
- return heap_->isolate()->factory()->empty_string();
- }
-
- Handle<String> joined_string;
- if (is_ascii_) {
- Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
- AssertNoAllocation no_alloc;
- uint8_t* char_buffer = seq->GetChars();
- StringBuilderConcatHelper(*subject_,
- char_buffer,
- *array_builder_.array(),
- array_builder_.length());
- joined_string = Handle<String>::cast(seq);
- } else {
- // Non-ASCII.
- Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
- AssertNoAllocation no_alloc;
- uc16* char_buffer = seq->GetChars();
- StringBuilderConcatHelper(*subject_,
- char_buffer,
- *array_builder_.array(),
- array_builder_.length());
- joined_string = Handle<String>::cast(seq);
- }
- return joined_string;
- }
-
-
- void IncrementCharacterCount(int by) {
- if (character_count_ > String::kMaxLength - by) {
- V8::FatalProcessOutOfMemory("String.replace result too large.");
- }
- character_count_ += by;
- }
-
- private:
- Handle<SeqOneByteString> NewRawOneByteString(int length) {
- return heap_->isolate()->factory()->NewRawOneByteString(length);
- }
-
-
- Handle<SeqTwoByteString> NewRawTwoByteString(int length) {
- return heap_->isolate()->factory()->NewRawTwoByteString(length);
- }
-
-
- void AddElement(Object* element) {
- ASSERT(element->IsSmi() || element->IsString());
- ASSERT(array_builder_.capacity() > array_builder_.length());
- array_builder_.Add(element);
- }
-
- Heap* heap_;
- FixedArrayBuilder array_builder_;
- Handle<String> subject_;
- int character_count_;
- bool is_ascii_;
-};
-
-
-class CompiledReplacement {
- public:
- explicit CompiledReplacement(Zone* zone)
- : parts_(1, zone), replacement_substrings_(0, zone), zone_(zone) {}
-
- // Return whether the replacement is simple.
- bool Compile(Handle<String> replacement,
- int capture_count,
- int subject_length);
-
- // Use Apply only if Compile returned false.
- void Apply(ReplacementStringBuilder* builder,
- int match_from,
- int match_to,
- int32_t* match);
-
- // Number of distinct parts of the replacement pattern.
- int parts() {
- return parts_.length();
- }
-
- Zone* zone() const { return zone_; }
-
- private:
- enum PartType {
- SUBJECT_PREFIX = 1,
- SUBJECT_SUFFIX,
- SUBJECT_CAPTURE,
- REPLACEMENT_SUBSTRING,
- REPLACEMENT_STRING,
-
- NUMBER_OF_PART_TYPES
- };
-
- struct ReplacementPart {
- static inline ReplacementPart SubjectMatch() {
- return ReplacementPart(SUBJECT_CAPTURE, 0);
- }
- static inline ReplacementPart SubjectCapture(int capture_index) {
- return ReplacementPart(SUBJECT_CAPTURE, capture_index);
- }
- static inline ReplacementPart SubjectPrefix() {
- return ReplacementPart(SUBJECT_PREFIX, 0);
- }
- static inline ReplacementPart SubjectSuffix(int subject_length) {
- return ReplacementPart(SUBJECT_SUFFIX, subject_length);
- }
- static inline ReplacementPart ReplacementString() {
- return ReplacementPart(REPLACEMENT_STRING, 0);
- }
- static inline ReplacementPart ReplacementSubString(int from, int to) {
- ASSERT(from >= 0);
- ASSERT(to > from);
- return ReplacementPart(-from, to);
- }
-
- // If tag <= 0 then it is the negation of a start index of a substring of
- // the replacement pattern, otherwise it's a value from PartType.
- ReplacementPart(int tag, int data)
- : tag(tag), data(data) {
- // Must be non-positive or a PartType value.
- ASSERT(tag < NUMBER_OF_PART_TYPES);
- }
- // Either a value of PartType or a non-positive number that is
- // the negation of an index into the replacement string.
- int tag;
- // The data value's interpretation depends on the value of tag:
- // tag == SUBJECT_PREFIX ||
- // tag == SUBJECT_SUFFIX: data is unused.
- // tag == SUBJECT_CAPTURE: data is the number of the capture.
- // tag == REPLACEMENT_SUBSTRING ||
- // tag == REPLACEMENT_STRING: data is index into array of substrings
- // of the replacement string.
- // tag <= 0: Temporary representation of the substring of the replacement
- // string ranging over -tag .. data.
- // Is replaced by REPLACEMENT_{SUB,}STRING when we create the
- // substring objects.
- int data;
- };
-
- template<typename Char>
- bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
- Vector<Char> characters,
- int capture_count,
- int subject_length,
- Zone* zone) {
- int length = characters.length();
- int last = 0;
- for (int i = 0; i < length; i++) {
- Char c = characters[i];
- if (c == '$') {
- int next_index = i + 1;
- if (next_index == length) { // No next character!
- break;
- }
- Char c2 = characters[next_index];
- switch (c2) {
- case '$':
- if (i > last) {
- // There is a substring before. Include the first "$".
- parts->Add(ReplacementPart::ReplacementSubString(last, next_index),
- zone);
- last = next_index + 1; // Continue after the second "$".
- } else {
- // Let the next substring start with the second "$".
- last = next_index;
- }
- i = next_index;
- break;
- case '`':
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
- }
- parts->Add(ReplacementPart::SubjectPrefix(), zone);
- i = next_index;
- last = i + 1;
- break;
- case '\'':
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
- }
- parts->Add(ReplacementPart::SubjectSuffix(subject_length), zone);
- i = next_index;
- last = i + 1;
- break;
- case '&':
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
- }
- parts->Add(ReplacementPart::SubjectMatch(), zone);
- i = next_index;
- last = i + 1;
- break;
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9': {
- int capture_ref = c2 - '0';
- if (capture_ref > capture_count) {
- i = next_index;
- continue;
- }
- int second_digit_index = next_index + 1;
- if (second_digit_index < length) {
- // Peek ahead to see if we have two digits.
- Char c3 = characters[second_digit_index];
- if ('0' <= c3 && c3 <= '9') { // Double digits.
- int double_digit_ref = capture_ref * 10 + c3 - '0';
- if (double_digit_ref <= capture_count) {
- next_index = second_digit_index;
- capture_ref = double_digit_ref;
- }
- }
- }
- if (capture_ref > 0) {
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
- }
- ASSERT(capture_ref <= capture_count);
- parts->Add(ReplacementPart::SubjectCapture(capture_ref), zone);
- last = next_index + 1;
- }
- i = next_index;
- break;
- }
- default:
- i = next_index;
- break;
- }
- }
- }
- if (length > last) {
- if (last == 0) {
- // Replacement is simple. Do not use Apply to do the replacement.
- return true;
- } else {
- parts->Add(ReplacementPart::ReplacementSubString(last, length), zone);
- }
- }
- return false;
- }
-
- ZoneList<ReplacementPart> parts_;
- ZoneList<Handle<String> > replacement_substrings_;
- Zone* zone_;
-};
-
-
-bool CompiledReplacement::Compile(Handle<String> replacement,
- int capture_count,
- int subject_length) {
- {
- AssertNoAllocation no_alloc;
- String::FlatContent content = replacement->GetFlatContent();
- ASSERT(content.IsFlat());
- bool simple = false;
- if (content.IsAscii()) {
- simple = ParseReplacementPattern(&parts_,
- content.ToOneByteVector(),
- capture_count,
- subject_length,
- zone());
- } else {
- ASSERT(content.IsTwoByte());
- simple = ParseReplacementPattern(&parts_,
- content.ToUC16Vector(),
- capture_count,
- subject_length,
- zone());
- }
- if (simple) return true;
- }
-
- Isolate* isolate = replacement->GetIsolate();
- // Find substrings of replacement string and create them as String objects.
- int substring_index = 0;
- for (int i = 0, n = parts_.length(); i < n; i++) {
- int tag = parts_[i].tag;
- if (tag <= 0) { // A replacement string slice.
- int from = -tag;
- int to = parts_[i].data;
- replacement_substrings_.Add(
- isolate->factory()->NewSubString(replacement, from, to), zone());
- parts_[i].tag = REPLACEMENT_SUBSTRING;
- parts_[i].data = substring_index;
- substring_index++;
- } else if (tag == REPLACEMENT_STRING) {
- replacement_substrings_.Add(replacement, zone());
- parts_[i].data = substring_index;
- substring_index++;
- }
- }
- return false;
-}
-
-
-void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
- int match_from,
- int match_to,
- int32_t* match) {
- ASSERT_LT(0, parts_.length());
- for (int i = 0, n = parts_.length(); i < n; i++) {
- ReplacementPart part = parts_[i];
- switch (part.tag) {
- case SUBJECT_PREFIX:
- if (match_from > 0) builder->AddSubjectSlice(0, match_from);
- break;
- case SUBJECT_SUFFIX: {
- int subject_length = part.data;
- if (match_to < subject_length) {
- builder->AddSubjectSlice(match_to, subject_length);
- }
- break;
- }
- case SUBJECT_CAPTURE: {
- int capture = part.data;
- int from = match[capture * 2];
- int to = match[capture * 2 + 1];
- if (from >= 0 && to > from) {
- builder->AddSubjectSlice(from, to);
- }
- break;
- }
- case REPLACEMENT_SUBSTRING:
- case REPLACEMENT_STRING:
- builder->AddString(replacement_substrings_[part.data]);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
-
-void FindAsciiStringIndices(Vector<const uint8_t> subject,
- char pattern,
- ZoneList<int>* indices,
- unsigned int limit,
- Zone* zone) {
- ASSERT(limit > 0);
- // Collect indices of pattern in subject using memchr.
- // Stop after finding at most limit values.
- const uint8_t* subject_start = subject.start();
- const uint8_t* subject_end = subject_start + subject.length();
- const uint8_t* pos = subject_start;
- while (limit > 0) {
- pos = reinterpret_cast<const uint8_t*>(
- memchr(pos, pattern, subject_end - pos));
- if (pos == NULL) return;
- indices->Add(static_cast<int>(pos - subject_start), zone);
- pos++;
- limit--;
- }
-}
-
-
-void FindTwoByteStringIndices(const Vector<const uc16> subject,
- uc16 pattern,
- ZoneList<int>* indices,
- unsigned int limit,
- Zone* zone) {
- ASSERT(limit > 0);
- const uc16* subject_start = subject.start();
- const uc16* subject_end = subject_start + subject.length();
- for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
- if (*pos == pattern) {
- indices->Add(static_cast<int>(pos - subject_start), zone);
- limit--;
- }
- }
-}
-
-
-template <typename SubjectChar, typename PatternChar>
-void FindStringIndices(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- ZoneList<int>* indices,
- unsigned int limit,
- Zone* zone) {
- ASSERT(limit > 0);
- // Collect indices of pattern in subject.
- // Stop after finding at most limit values.
- int pattern_length = pattern.length();
- int index = 0;
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
- while (limit > 0) {
- index = search.Search(subject, index);
- if (index < 0) return;
- indices->Add(index, zone);
- index += pattern_length;
- limit--;
- }
-}
-
-
-void FindStringIndicesDispatch(Isolate* isolate,
- String* subject,
- String* pattern,
- ZoneList<int>* indices,
- unsigned int limit,
- Zone* zone) {
- {
- AssertNoAllocation no_gc;
- String::FlatContent subject_content = subject->GetFlatContent();
- String::FlatContent pattern_content = pattern->GetFlatContent();
- ASSERT(subject_content.IsFlat());
- ASSERT(pattern_content.IsFlat());
- if (subject_content.IsAscii()) {
- Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
- if (pattern_content.IsAscii()) {
- Vector<const uint8_t> pattern_vector =
- pattern_content.ToOneByteVector();
- if (pattern_vector.length() == 1) {
- FindAsciiStringIndices(subject_vector,
- pattern_vector[0],
- indices,
- limit,
- zone);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_vector,
- indices,
- limit,
- zone);
- }
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_content.ToUC16Vector(),
- indices,
- limit,
- zone);
- }
- } else {
- Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
- if (pattern_content.IsAscii()) {
- Vector<const uint8_t> pattern_vector =
- pattern_content.ToOneByteVector();
- if (pattern_vector.length() == 1) {
- FindTwoByteStringIndices(subject_vector,
- pattern_vector[0],
- indices,
- limit,
- zone);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_vector,
- indices,
- limit,
- zone);
- }
- } else {
- Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector();
- if (pattern_vector.length() == 1) {
- FindTwoByteStringIndices(subject_vector,
- pattern_vector[0],
- indices,
- limit,
- zone);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_vector,
- indices,
- limit,
- zone);
- }
- }
- }
- }
-}
-
-
-template<typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> pattern_regexp,
- Handle<String> replacement,
- Handle<JSArray> last_match_info) {
- ASSERT(subject->IsFlat());
- ASSERT(replacement->IsFlat());
-
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_space(zone, DELETE_ON_EXIT);
- ZoneList<int> indices(8, zone);
- ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
- String* pattern =
- String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
- int subject_len = subject->length();
- int pattern_len = pattern->length();
- int replacement_len = replacement->length();
-
- FindStringIndicesDispatch(
- isolate, *subject, pattern, &indices, 0xffffffff, zone);
-
- int matches = indices.length();
- if (matches == 0) return *subject;
-
- // Detect integer overflow.
- int64_t result_len_64 =
- (static_cast<int64_t>(replacement_len) -
- static_cast<int64_t>(pattern_len)) *
- static_cast<int64_t>(matches) +
- static_cast<int64_t>(subject_len);
- if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException(0x11);
- int result_len = static_cast<int>(result_len_64);
-
- int subject_pos = 0;
- int result_pos = 0;
-
- Handle<ResultSeqString> result;
- if (ResultSeqString::kHasAsciiEncoding) {
- result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawOneByteString(result_len));
- } else {
- result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawTwoByteString(result_len));
- }
-
- for (int i = 0; i < matches; i++) {
- // Copy non-matched subject content.
- if (subject_pos < indices.at(i)) {
- String::WriteToFlat(*subject,
- result->GetChars() + result_pos,
- subject_pos,
- indices.at(i));
- result_pos += indices.at(i) - subject_pos;
- }
-
- // Replace match.
- if (replacement_len > 0) {
- String::WriteToFlat(*replacement,
- result->GetChars() + result_pos,
- 0,
- replacement_len);
- result_pos += replacement_len;
- }
-
- subject_pos = indices.at(i) + pattern_len;
- }
- // Add remaining subject content at the end.
- if (subject_pos < subject_len) {
- String::WriteToFlat(*subject,
- result->GetChars() + result_pos,
- subject_pos,
- subject_len);
- }
-
- int32_t match_indices[] = { indices.at(matches - 1),
- indices.at(matches - 1) + pattern_len };
- RegExpImpl::SetLastMatchInfo(last_match_info, subject, 0, match_indices);
-
- return *result;
-}
-
-
-MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<String> replacement,
- Handle<JSArray> last_match_info) {
- ASSERT(subject->IsFlat());
- ASSERT(replacement->IsFlat());
-
- int capture_count = regexp->CaptureCount();
- int subject_length = subject->length();
-
- // CompiledReplacement uses zone allocation.
- Zone* zone = isolate->runtime_zone();
- ZoneScope zonescope(zone, DELETE_ON_EXIT);
- CompiledReplacement compiled_replacement(zone);
- bool simple_replace = compiled_replacement.Compile(replacement,
- capture_count,
- subject_length);
-
- // Shortcut for simple non-regexp global replacements
- if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) {
- if (subject->IsOneByteConvertible() &&
- replacement->IsOneByteConvertible()) {
- return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
- isolate, subject, regexp, replacement, last_match_info);
- } else {
- return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
- isolate, subject, regexp, replacement, last_match_info);
- }
- }
-
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return Failure::Exception();
-
- int32_t* current_match = global_cache.FetchNext();
- if (current_match == NULL) {
- if (global_cache.HasException()) return Failure::Exception();
- return *subject;
- }
-
- // Guessing the number of parts that the final result string is built
- // from. Global regexps can match any number of times, so we guess
- // conservatively.
- int expected_parts = (compiled_replacement.parts() + 1) * 4 + 1;
- ReplacementStringBuilder builder(isolate->heap(),
- subject,
- expected_parts);
-
- // Number of parts added by compiled replacement plus preceeding
- // string and possibly suffix after last match. It is possible for
- // all components to use two elements when encoded as two smis.
- const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
-
- int prev = 0;
-
- do {
- builder.EnsureCapacity(parts_added_per_loop);
-
- int start = current_match[0];
- int end = current_match[1];
-
- if (prev < start) {
- builder.AddSubjectSlice(prev, start);
- }
-
- if (simple_replace) {
- builder.AddString(replacement);
- } else {
- compiled_replacement.Apply(&builder,
- start,
- end,
- current_match);
- }
- prev = end;
-
- current_match = global_cache.FetchNext();
- } while (current_match != NULL);
-
- if (global_cache.HasException()) return Failure::Exception();
-
- if (prev < subject_length) {
- builder.EnsureCapacity(2);
- builder.AddSubjectSlice(prev, subject_length);
- }
-
- RegExpImpl::SetLastMatchInfo(last_match_info,
- subject,
- capture_count,
- global_cache.LastSuccessfulMatch());
-
- return *(builder.ToString());
-}
-
-
-template <typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_info) {
- ASSERT(subject->IsFlat());
-
- // Shortcut for simple non-regexp global replacements
- if (regexp->TypeTag() == JSRegExp::ATOM) {
- Handle<String> empty_string = isolate->factory()->empty_string();
- if (subject->IsOneByteRepresentation()) {
- return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
- isolate, subject, regexp, empty_string, last_match_info);
- } else {
- return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
- isolate, subject, regexp, empty_string, last_match_info);
- }
- }
-
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return Failure::Exception();
-
- int32_t* current_match = global_cache.FetchNext();
- if (current_match == NULL) {
- if (global_cache.HasException()) return Failure::Exception();
- return *subject;
- }
-
- int start = current_match[0];
- int end = current_match[1];
- int capture_count = regexp->CaptureCount();
- int subject_length = subject->length();
-
- int new_length = subject_length - (end - start);
- if (new_length == 0) return isolate->heap()->empty_string();
-
- Handle<ResultSeqString> answer;
- if (ResultSeqString::kHasAsciiEncoding) {
- answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawOneByteString(new_length));
- } else {
- answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawTwoByteString(new_length));
- }
-
- int prev = 0;
- int position = 0;
-
- do {
- start = current_match[0];
- end = current_match[1];
- if (prev < start) {
- // Add substring subject[prev;start] to answer string.
- String::WriteToFlat(*subject, answer->GetChars() + position, prev, start);
- position += start - prev;
- }
- prev = end;
-
- current_match = global_cache.FetchNext();
- } while (current_match != NULL);
-
- if (global_cache.HasException()) return Failure::Exception();
-
- RegExpImpl::SetLastMatchInfo(last_match_info,
- subject,
- capture_count,
- global_cache.LastSuccessfulMatch());
-
- if (prev < subject_length) {
- // Add substring subject[prev;length] to answer string.
- String::WriteToFlat(
- *subject, answer->GetChars() + position, prev, subject_length);
- position += subject_length - prev;
- }
-
- if (position == 0) return isolate->heap()->empty_string();
-
- // Shorten string and fill
- int string_size = ResultSeqString::SizeFor(position);
- int allocated_string_size = ResultSeqString::SizeFor(new_length);
- int delta = allocated_string_size - string_size;
-
- answer->set_length(position);
- if (delta == 0) return *answer;
-
- Address end_of_string = answer->address() + string_size;
- isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
- MemoryChunk::IncrementLiveBytesFromMutator(answer->address(), -delta);
- }
-
- return *answer;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceGlobalRegExpWithString) {
- ASSERT(args.length() == 4);
-
- HandleScope scope(isolate);
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
-
- ASSERT(regexp->GetFlags().is_global());
-
- if (!subject->IsFlat()) subject = FlattenGetString(subject);
-
- if (replacement->length() == 0) {
- if (subject->IsOneByteConvertible()) {
- return StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
- isolate, subject, regexp, last_match_info);
- } else {
- return StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>(
- isolate, subject, regexp, last_match_info);
- }
- }
-
- if (!replacement->IsFlat()) replacement = FlattenGetString(replacement);
-
- return StringReplaceGlobalRegExpWithString(
- isolate, subject, regexp, replacement, last_match_info);
-}
-
-
-Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
- Handle<String> subject,
- Handle<String> search,
- Handle<String> replace,
- bool* found,
- int recursion_limit) {
- if (recursion_limit == 0) return Handle<String>::null();
- if (subject->IsConsString()) {
- ConsString* cons = ConsString::cast(*subject);
- Handle<String> first = Handle<String>(cons->first());
- Handle<String> second = Handle<String>(cons->second());
- Handle<String> new_first =
- StringReplaceOneCharWithString(isolate,
- first,
- search,
- replace,
- found,
- recursion_limit - 1);
- if (*found) return isolate->factory()->NewConsString(new_first, second);
- if (new_first.is_null()) return new_first;
-
- Handle<String> new_second =
- StringReplaceOneCharWithString(isolate,
- second,
- search,
- replace,
- found,
- recursion_limit - 1);
- if (*found) return isolate->factory()->NewConsString(first, new_second);
- if (new_second.is_null()) return new_second;
-
- return subject;
- } else {
- int index = Runtime::StringMatch(isolate, subject, search, 0);
- if (index == -1) return subject;
- *found = true;
- Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
- Handle<String> cons1 = isolate->factory()->NewConsString(first, replace);
- Handle<String> second =
- isolate->factory()->NewSubString(subject, index + 1, subject->length());
- return isolate->factory()->NewConsString(cons1, second);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
- ASSERT(args.length() == 3);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
-
- // If the cons string tree is too deep, we simply abort the recursion and
- // retry with a flattened subject string.
- const int kRecursionLimit = 0x1000;
- bool found = false;
- Handle<String> result = StringReplaceOneCharWithString(isolate,
- subject,
- search,
- replace,
- &found,
- kRecursionLimit);
- if (!result.is_null()) return *result;
- return *StringReplaceOneCharWithString(isolate,
- FlattenGetString(subject),
- search,
- replace,
- &found,
- kRecursionLimit);
-}
-
-
-// Perform string match of pattern on subject, starting at start index.
-// Caller must ensure that 0 <= start_index <= sub->length(),
-// and should check that pat->length() + start_index <= sub->length().
-int Runtime::StringMatch(Isolate* isolate,
- Handle<String> sub,
- Handle<String> pat,
- int start_index) {
- ASSERT(0 <= start_index);
- ASSERT(start_index <= sub->length());
-
- int pattern_length = pat->length();
- if (pattern_length == 0) return start_index;
-
- int subject_length = sub->length();
- if (start_index + pattern_length > subject_length) return -1;
-
- if (!sub->IsFlat()) FlattenString(sub);
- if (!pat->IsFlat()) FlattenString(pat);
-
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- // Extract flattened substrings of cons strings before determining asciiness.
- String::FlatContent seq_sub = sub->GetFlatContent();
- String::FlatContent seq_pat = pat->GetFlatContent();
-
- // dispatch on type of strings
- if (seq_pat.IsAscii()) {
- Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
- if (seq_sub.IsAscii()) {
- return SearchString(isolate,
- seq_sub.ToOneByteVector(),
- pat_vector,
- start_index);
- }
- return SearchString(isolate,
- seq_sub.ToUC16Vector(),
- pat_vector,
- start_index);
- }
- Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
- if (seq_sub.IsAscii()) {
- return SearchString(isolate,
- seq_sub.ToOneByteVector(),
- pat_vector,
- start_index);
- }
- return SearchString(isolate,
- seq_sub.ToUC16Vector(),
- pat_vector,
- start_index);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) {
- HandleScope scope(isolate); // create a new handle scope
- ASSERT(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
-
- Object* index = args[2];
- uint32_t start_index;
- if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
- RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
- int position =
- Runtime::StringMatch(isolate, sub, pat, start_index);
- return Smi::FromInt(position);
-}
-
-
-template <typename schar, typename pchar>
-static int StringMatchBackwards(Vector<const schar> subject,
- Vector<const pchar> pattern,
- int idx) {
- int pattern_length = pattern.length();
- ASSERT(pattern_length >= 1);
- ASSERT(idx + pattern_length <= subject.length());
-
- if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
- for (int i = 0; i < pattern_length; i++) {
- uc16 c = pattern[i];
- if (c > String::kMaxOneByteCharCode) {
- return -1;
- }
- }
- }
-
- pchar pattern_first_char = pattern[0];
- for (int i = idx; i >= 0; i--) {
- if (subject[i] != pattern_first_char) continue;
- int j = 1;
- while (j < pattern_length) {
- if (pattern[j] != subject[i+j]) {
- break;
- }
- j++;
- }
- if (j == pattern_length) {
- return i;
- }
- }
- return -1;
-}
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
- HandleScope scope(isolate); // create a new handle scope
- ASSERT(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
-
- Object* index = args[2];
- uint32_t start_index;
- if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
- uint32_t pat_length = pat->length();
- uint32_t sub_length = sub->length();
-
- if (start_index + pat_length > sub_length) {
- start_index = sub_length - pat_length;
- }
-
- if (pat_length == 0) {
- return Smi::FromInt(start_index);
- }
-
- if (!sub->IsFlat()) FlattenString(sub);
- if (!pat->IsFlat()) FlattenString(pat);
-
- int position = -1;
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
-
- String::FlatContent sub_content = sub->GetFlatContent();
- String::FlatContent pat_content = pat->GetFlatContent();
-
- if (pat_content.IsAscii()) {
- Vector<const uint8_t> pat_vector = pat_content.ToOneByteVector();
- if (sub_content.IsAscii()) {
- position = StringMatchBackwards(sub_content.ToOneByteVector(),
- pat_vector,
- start_index);
- } else {
- position = StringMatchBackwards(sub_content.ToUC16Vector(),
- pat_vector,
- start_index);
- }
- } else {
- Vector<const uc16> pat_vector = pat_content.ToUC16Vector();
- if (sub_content.IsAscii()) {
- position = StringMatchBackwards(sub_content.ToOneByteVector(),
- pat_vector,
- start_index);
- } else {
- position = StringMatchBackwards(sub_content.ToUC16Vector(),
- pat_vector,
- start_index);
- }
- }
-
- return Smi::FromInt(position);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(String, str1, 0);
- CONVERT_ARG_CHECKED(String, str2, 1);
-
- if (str1 == str2) return Smi::FromInt(0); // Equal.
- int str1_length = str1->length();
- int str2_length = str2->length();
-
- // Decide trivial cases without flattening.
- if (str1_length == 0) {
- if (str2_length == 0) return Smi::FromInt(0); // Equal.
- return Smi::FromInt(-str2_length);
- } else {
- if (str2_length == 0) return Smi::FromInt(str1_length);
- }
-
- int end = str1_length < str2_length ? str1_length : str2_length;
-
- // No need to flatten if we are going to find the answer on the first
- // character. At this point we know there is at least one character
- // in each string, due to the trivial case handling above.
- int d = str1->Get(0) - str2->Get(0);
- if (d != 0) return Smi::FromInt(d);
-
- str1->TryFlatten();
- str2->TryFlatten();
-
- ConsStringIteratorOp* op1 =
- isolate->runtime_state()->string_locale_compare_it1();
- ConsStringIteratorOp* op2 =
- isolate->runtime_state()->string_locale_compare_it2();
- // TODO(dcarney) Can do array compares here more efficiently.
- StringCharacterStream stream1(str1, op1);
- StringCharacterStream stream2(str2, op2);
-
- for (int i = 0; i < end; i++) {
- uint16_t char1 = stream1.GetNext();
- uint16_t char2 = stream2.GetNext();
- if (char1 != char2) return Smi::FromInt(char1 - char2);
- }
-
- return Smi::FromInt(str1_length - str2_length);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 3);
-
- CONVERT_ARG_CHECKED(String, value, 0);
- int start, end;
- // We have a fast integer-only case here to avoid a conversion to double in
- // the common case where from and to are Smis.
- if (args[1]->IsSmi() && args[2]->IsSmi()) {
- CONVERT_SMI_ARG_CHECKED(from_number, 1);
- CONVERT_SMI_ARG_CHECKED(to_number, 2);
- start = from_number;
- end = to_number;
- } else {
- CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
- CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
- start = FastD2IChecked(from_number);
- end = FastD2IChecked(to_number);
- }
- RUNTIME_ASSERT(end >= start);
- RUNTIME_ASSERT(start >= 0);
- RUNTIME_ASSERT(end <= value->length());
- isolate->counters()->sub_string_runtime()->Increment();
- if (end - start == 1) {
- return isolate->heap()->LookupSingleCharacterStringFromCode(
- value->Get(start));
- }
- return value->SubString(start, end);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
- ASSERT_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
- HandleScope handles(isolate);
-
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return Failure::Exception();
-
- int capture_count = regexp->CaptureCount();
-
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_space(zone, DELETE_ON_EXIT);
- ZoneList<int> offsets(8, zone);
-
- while (true) {
- int32_t* match = global_cache.FetchNext();
- if (match == NULL) break;
- offsets.Add(match[0], zone); // start
- offsets.Add(match[1], zone); // end
- }
-
- if (global_cache.HasException()) return Failure::Exception();
-
- if (offsets.length() == 0) {
- // Not a single match.
- return isolate->heap()->null_value();
- }
-
- RegExpImpl::SetLastMatchInfo(regexp_info,
- subject,
- capture_count,
- global_cache.LastSuccessfulMatch());
-
- int matches = offsets.length() / 2;
- Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
- Handle<String> substring =
- isolate->factory()->NewSubString(subject, offsets.at(0), offsets.at(1));
- elements->set(0, *substring);
- for (int i = 1; i < matches; i++) {
- HandleScope temp_scope(isolate);
- int from = offsets.at(i * 2);
- int to = offsets.at(i * 2 + 1);
- Handle<String> substring =
- isolate->factory()->NewProperSubString(subject, from, to);
- elements->set(i, *substring);
- }
- Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
- result->set_length(Smi::FromInt(matches));
- return *result;
-}
-
-
-// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
-// separate last match info. See comment on that function.
-template<bool has_capture>
-static MaybeObject* SearchRegExpMultiple(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_array,
- Handle<JSArray> result_array) {
- ASSERT(subject->IsFlat());
- ASSERT_NE(has_capture, regexp->CaptureCount() == 0);
-
- int capture_count = regexp->CaptureCount();
- int subject_length = subject->length();
-
- static const int kMinLengthToCache = 0x1000;
-
- if (subject_length > kMinLengthToCache) {
- Handle<Object> cached_answer(RegExpResultsCache::Lookup(
- isolate->heap(),
- *subject,
- regexp->data(),
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES), isolate);
- if (*cached_answer != Smi::FromInt(0)) {
- Handle<FixedArray> cached_fixed_array =
- Handle<FixedArray>(FixedArray::cast(*cached_answer));
- // The cache FixedArray is a COW-array and can therefore be reused.
- isolate->factory()->SetContent(result_array, cached_fixed_array);
- // The actual length of the result array is stored in the last element of
- // the backing store (the backing FixedArray may have a larger capacity).
- Object* cached_fixed_array_last_element =
- cached_fixed_array->get(cached_fixed_array->length() - 1);
- Smi* js_array_length = Smi::cast(cached_fixed_array_last_element);
- result_array->set_length(js_array_length);
- RegExpImpl::SetLastMatchInfo(
- last_match_array, subject, capture_count, NULL);
- return *result_array;
- }
- }
-
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return Failure::Exception();
-
- Handle<FixedArray> result_elements;
- if (result_array->HasFastObjectElements()) {
- result_elements =
- Handle<FixedArray>(FixedArray::cast(result_array->elements()));
- }
- if (result_elements.is_null() || result_elements->length() < 16) {
- result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
- }
-
- FixedArrayBuilder builder(result_elements);
-
- // Position to search from.
- int match_start = -1;
- int match_end = 0;
- bool first = true;
-
- // Two smis before and after the match, for very long strings.
- static const int kMaxBuilderEntriesPerRegExpMatch = 5;
-
- while (true) {
- int32_t* current_match = global_cache.FetchNext();
- if (current_match == NULL) break;
- match_start = current_match[0];
- builder.EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(&builder,
- match_end,
- match_start);
- }
- match_end = current_match[1];
- {
- // Avoid accumulating new handles inside loop.
- HandleScope temp_scope(isolate);
- Handle<String> match;
- if (!first) {
- match = isolate->factory()->NewProperSubString(subject,
- match_start,
- match_end);
- } else {
- match = isolate->factory()->NewSubString(subject,
- match_start,
- match_end);
- first = false;
- }
-
- if (has_capture) {
- // Arguments array to replace function is match, captures, index and
- // subject, i.e., 3 + capture count in total.
- Handle<FixedArray> elements =
- isolate->factory()->NewFixedArray(3 + capture_count);
-
- elements->set(0, *match);
- for (int i = 1; i <= capture_count; i++) {
- int start = current_match[i * 2];
- if (start >= 0) {
- int end = current_match[i * 2 + 1];
- ASSERT(start <= end);
- Handle<String> substring =
- isolate->factory()->NewSubString(subject, start, end);
- elements->set(i, *substring);
- } else {
- ASSERT(current_match[i * 2 + 1] < 0);
- elements->set(i, isolate->heap()->undefined_value());
- }
- }
- elements->set(capture_count + 1, Smi::FromInt(match_start));
- elements->set(capture_count + 2, *subject);
- builder.Add(*isolate->factory()->NewJSArrayWithElements(elements));
- } else {
- builder.Add(*match);
- }
- }
- }
-
- if (global_cache.HasException()) return Failure::Exception();
-
- if (match_start >= 0) {
- // Finished matching, with at least one match.
- if (match_end < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(&builder,
- match_end,
- subject_length);
- }
-
- RegExpImpl::SetLastMatchInfo(
- last_match_array, subject, capture_count, NULL);
-
- if (subject_length > kMinLengthToCache) {
- // Store the length of the result array into the last element of the
- // backing FixedArray.
- builder.EnsureCapacity(1);
- Handle<FixedArray> fixed_array = builder.array();
- fixed_array->set(fixed_array->length() - 1,
- Smi::FromInt(builder.length()));
- // Cache the result and turn the FixedArray into a COW array.
- RegExpResultsCache::Enter(isolate->heap(),
- *subject,
- regexp->data(),
- *fixed_array,
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
- }
- return *builder.ToJSArray(result_array);
- } else {
- return isolate->heap()->null_value(); // No matches at all.
- }
-}
-
-
-// This is only called for StringReplaceGlobalRegExpWithFunction. This sets
-// lastMatchInfoOverride to maintain the last match info, so we don't need to
-// set any other last match array info.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
- ASSERT(args.length() == 4);
- HandleScope handles(isolate);
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- if (!subject->IsFlat()) FlattenString(subject);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
-
- ASSERT(regexp->GetFlags().is_global());
-
- if (regexp->CaptureCount() == 0) {
- return SearchRegExpMultiple<false>(
- isolate, subject, regexp, last_match_info, result_array);
- } else {
- return SearchRegExpMultiple<true>(
- isolate, subject, regexp, last_match_info, result_array);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(radix, 1);
- RUNTIME_ASSERT(2 <= radix && radix <= 36);
-
- // Fast case where the result is a one character string.
- if (args[0]->IsSmi()) {
- int value = args.smi_at(0);
- if (value >= 0 && value < radix) {
- // Character array used for conversion.
- static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- return isolate->heap()->
- LookupSingleCharacterStringFromCode(kCharTable[value]);
- }
- }
-
- // Slow case.
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (isnan(value)) {
- return *isolate->factory()->nan_string();
- }
- if (isinf(value)) {
- if (value < 0) {
- return *isolate->factory()->minus_infinity_string();
- }
- return *isolate->factory()->infinity_string();
- }
- char* str = DoubleToRadixCString(value, radix);
- MaybeObject* result =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
- DeleteArray(str);
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2IChecked(f_number);
- RUNTIME_ASSERT(f >= 0);
- char* str = DoubleToFixedCString(value, f);
- MaybeObject* res =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
- DeleteArray(str);
- return res;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2IChecked(f_number);
- RUNTIME_ASSERT(f >= -1 && f <= 20);
- char* str = DoubleToExponentialCString(value, f);
- MaybeObject* res =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
- DeleteArray(str);
- return res;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2IChecked(f_number);
- RUNTIME_ASSERT(f >= 1 && f <= 21);
- char* str = DoubleToPrecisionCString(value, f);
- MaybeObject* res =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
- DeleteArray(str);
- return res;
-}
-
-
-// Returns a single character string where first character equals
-// string->Get(index).
-static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
- if (index < static_cast<uint32_t>(string->length())) {
- string->TryFlatten();
- return LookupSingleCharacterStringFromCode(
- string->GetIsolate(),
- string->Get(index));
- }
- return Execution::CharAt(string, index);
-}
-
-
-MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
- // Handle [] indexing on Strings
- if (object->IsString()) {
- Handle<Object> result = GetCharAt(Handle<String>::cast(object), index);
- if (!result->IsUndefined()) return *result;
- }
-
- // Handle [] indexing on String objects
- if (object->IsStringObjectWithCharacterAt(index)) {
- Handle<JSValue> js_value = Handle<JSValue>::cast(object);
- Handle<Object> result =
- GetCharAt(Handle<String>(String::cast(js_value->value())), index);
- if (!result->IsUndefined()) return *result;
- }
-
- if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- return object->GetPrototype(isolate)->GetElement(index);
- }
-
- return object->GetElement(index);
-}
-
-
-MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key) {
- HandleScope scope(isolate);
-
- if (object->IsUndefined() || object->IsNull()) {
- Handle<Object> args[2] = { key, object };
- Handle<Object> error =
- isolate->factory()->NewTypeError("non_object_property_load",
- HandleVector(args, 2));
- return isolate->Throw(*error);
- }
-
- // Check if the given key is an array index.
- uint32_t index;
- if (key->ToArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
- }
-
- // Convert the key to a string - possibly by calling back into JavaScript.
- Handle<String> name;
- if (key->IsString()) {
- name = Handle<String>::cast(key);
- } else {
- bool has_pending_exception = false;
- Handle<Object> converted =
- Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- name = Handle<String>::cast(converted);
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element if so.
- if (name->AsArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
- } else {
- return object->GetProperty(*name);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
-
- return Runtime::GetObjectProperty(isolate, object, key);
-}
-
-
-// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- // Fast cases for getting named properties of the receiver JSObject
- // itself.
- //
- // The global proxy objects has to be excluded since LocalLookup on
- // the global proxy object can return a valid result even though the
- // global proxy object never has properties. This is the case
- // because the global proxy object forwards everything to its hidden
- // prototype including local lookups.
- //
- // Additionally, we need to make sure that we do not cache results
- // for objects that require access checks.
- if (args[0]->IsJSObject()) {
- if (!args[0]->IsJSGlobalProxy() &&
- !args[0]->IsAccessCheckNeeded() &&
- args[1]->IsString()) {
- JSObject* receiver = JSObject::cast(args[0]);
- String* key = String::cast(args[1]);
- if (receiver->HasFastProperties()) {
- // Attempt to use lookup cache.
- Map* receiver_map = receiver->map();
- KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
- int offset = keyed_lookup_cache->Lookup(receiver_map, key);
- if (offset != -1) {
- Object* value = receiver->FastPropertyAt(offset);
- return value->IsTheHole()
- ? isolate->heap()->undefined_value()
- : value;
- }
- // Lookup cache miss. Perform lookup and update the cache if
- // appropriate.
- LookupResult result(isolate);
- receiver->LocalLookup(key, &result);
- if (result.IsField()) {
- int offset = result.GetFieldIndex().field_index();
- keyed_lookup_cache->Update(receiver_map, key, offset);
- return receiver->FastPropertyAt(offset);
- }
- } else {
- // Attempt dictionary lookup.
- StringDictionary* dictionary = receiver->property_dictionary();
- int entry = dictionary->FindEntry(key);
- if ((entry != StringDictionary::kNotFound) &&
- (dictionary->DetailsAt(entry).type() == NORMAL)) {
- Object* value = dictionary->ValueAt(entry);
- if (!receiver->IsGlobalObject()) return value;
- value = JSGlobalPropertyCell::cast(value)->value();
- if (!value->IsTheHole()) return value;
- // If value is the hole do the general lookup.
- }
- }
- } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) {
- // JSObject without a string key. If the key is a Smi, check for a
- // definite out-of-bounds access to elements, which is a strong indicator
- // that subsequent accesses will also call the runtime. Proactively
- // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
- // doubles for those future calls in the case that the elements would
- // become FAST_DOUBLE_ELEMENTS.
- Handle<JSObject> js_object(args.at<JSObject>(0));
- ElementsKind elements_kind = js_object->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
- FixedArrayBase* elements = js_object->elements();
- if (args.at<Smi>(1)->value() >= elements->length()) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- elements_kind = FAST_ELEMENTS;
- }
- MaybeObject* maybe_object = TransitionElements(js_object,
- elements_kind,
- isolate);
- if (maybe_object->IsFailure()) return maybe_object;
- }
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
- !IsFastElementsKind(elements_kind));
- }
- }
- } else if (args[0]->IsString() && args[1]->IsSmi()) {
- // Fast case for string indexing using [] with a smi index.
- HandleScope scope(isolate);
- Handle<String> str = args.at<String>(0);
- int index = args.smi_at(1);
- if (index >= 0 && index < str->length()) {
- Handle<Object> result = GetCharAt(str, index);
- return *result;
- }
- }
-
- // Fall back to GetObjectProperty.
- return Runtime::GetObjectProperty(isolate,
- args.at<Object>(0),
- args.at<Object>(1));
-}
-
-
-static bool IsValidAccessor(Handle<Object> obj) {
- return obj->IsUndefined() || obj->IsSpecFunction() || obj->IsNull();
-}
-
-
-// Implements part of 8.12.9 DefineOwnProperty.
-// There are 3 cases that lead here:
-// Step 4b - define a new accessor property.
-// Steps 9c & 12 - replace an existing data property with an accessor property.
-// Step 12 - update an existing accessor property with an accessor or generic
-// descriptor.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
- ASSERT(args.length() == 5);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- RUNTIME_ASSERT(!obj->IsNull());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
- RUNTIME_ASSERT(IsValidAccessor(getter));
- CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
- RUNTIME_ASSERT(IsValidAccessor(setter));
- CONVERT_SMI_ARG_CHECKED(unchecked, 4);
- RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
-
- bool fast = obj->HasFastProperties();
- JSObject::DefineAccessor(obj, name, getter, setter, attr);
- if (fast) JSObject::TransformToFastProperties(obj, 0);
- return isolate->heap()->undefined_value();
-}
-
-// Implements part of 8.12.9 DefineOwnProperty.
-// There are 3 cases that lead here:
-// Step 4a - define a new data property.
-// Steps 9b & 12 - replace an existing accessor property with a data property.
-// Step 12 - update an existing data property with a data or generic
-// descriptor.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
- ASSERT(args.length() == 4);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, obj_value, 2);
- CONVERT_SMI_ARG_CHECKED(unchecked, 3);
- RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
-
- LookupResult result(isolate);
- js_object->LocalLookupRealNamedProperty(*name, &result);
-
- // Special case for callback properties.
- if (result.IsPropertyCallbacks()) {
- Object* callback = result.GetCallbackObject();
- // To be compatible with Safari we do not change the value on API objects
- // in Object.defineProperty(). Firefox disagrees here, and actually changes
- // the value.
- if (callback->IsAccessorInfo()) {
- return isolate->heap()->undefined_value();
- }
- // Avoid redefining foreign callback as data property, just use the stored
- // setter to update the value instead.
- // TODO(mstarzinger): So far this only works if property attributes don't
- // change, this should be fixed once we cleanup the underlying code.
- if (callback->IsForeign() && result.GetAttributes() == attr) {
- return js_object->SetPropertyWithCallback(callback,
- *name,
- *obj_value,
- result.holder(),
- kStrictMode);
- }
- }
-
- // Take special care when attributes are different and there is already
- // a property. For simplicity we normalize the property which enables us
- // to not worry about changing the instance_descriptor and creating a new
- // map. The current version of SetObjectProperty does not handle attributes
- // correctly in the case where a property is a field and is reset with
- // new attributes.
- if (result.IsFound() &&
- (attr != result.GetAttributes() || result.IsPropertyCallbacks())) {
- // New attributes - normalize to avoid writing to instance descriptor
- if (js_object->IsJSGlobalProxy()) {
- // Since the result is a property, the prototype will exist so
- // we don't have to check for null.
- js_object = Handle<JSObject>(JSObject::cast(js_object->GetPrototype()));
- }
- JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
- // Use IgnoreAttributes version since a readonly property may be
- // overridden and SetProperty does not allow this.
- return js_object->SetLocalPropertyIgnoreAttributes(*name,
- *obj_value,
- attr);
- }
-
- return Runtime::ForceSetObjectProperty(isolate,
- js_object,
- name,
- obj_value,
- attr);
-}
-
-
-// Return property without being observable by accessors or interceptors.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, key, 1);
- LookupResult lookup(isolate);
- object->LookupRealNamedProperty(*key, &lookup);
- if (!lookup.IsFound()) return isolate->heap()->undefined_value();
- switch (lookup.type()) {
- case NORMAL:
- return lookup.holder()->GetNormalizedProperty(&lookup);
- case FIELD:
- return lookup.holder()->FastPropertyAt(
- lookup.GetFieldIndex().field_index());
- case CONSTANT_FUNCTION:
- return lookup.GetConstantFunction();
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- return isolate->heap()->undefined_value();
- case NONEXISTENT:
- UNREACHABLE();
- }
- return isolate->heap()->undefined_value();
-}
-
-
-MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode) {
- SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
- HandleScope scope(isolate);
-
- if (object->IsUndefined() || object->IsNull()) {
- Handle<Object> args[2] = { key, object };
- Handle<Object> error =
- isolate->factory()->NewTypeError("non_object_property_store",
- HandleVector(args, 2));
- return isolate->Throw(*error);
- }
-
- if (object->IsJSProxy()) {
- bool has_pending_exception = false;
- Handle<Object> name = Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- return JSProxy::cast(*object)->SetProperty(
- String::cast(*name), *value, attr, strict_mode);
- }
-
- // If the object isn't a JavaScript object, we ignore the store.
- if (!object->IsJSObject()) return *value;
-
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
-
- // Check if the given key is an array index.
- uint32_t index;
- if (key->ToArrayIndex(&index)) {
- // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
- // of a string using [] notation. We need to support this too in
- // JavaScript.
- // In the case of a String object we just need to redirect the assignment to
- // the underlying string if the index is in range. Since the underlying
- // string does nothing with the assignment then we can ignore such
- // assignments.
- if (js_object->IsStringObjectWithCharacterAt(index)) {
- return *value;
- }
-
- js_object->ValidateElements();
- Handle<Object> result = JSObject::SetElement(
- js_object, index, value, attr, strict_mode, set_mode);
- js_object->ValidateElements();
- if (result.is_null()) return Failure::Exception();
- return *value;
- }
-
- if (key->IsString()) {
- Handle<Object> result;
- if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- result = JSObject::SetElement(
- js_object, index, value, attr, strict_mode, set_mode);
- } else {
- Handle<String> key_string = Handle<String>::cast(key);
- key_string->TryFlatten();
- result = JSReceiver::SetProperty(
- js_object, key_string, value, attr, strict_mode);
- }
- if (result.is_null()) return Failure::Exception();
- return *value;
- }
-
- // Call-back into JavaScript to convert the key to a string.
- bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- Handle<String> name = Handle<String>::cast(converted);
-
- if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(
- index, *value, attr, strict_mode, true, set_mode);
- } else {
- return js_object->SetProperty(*name, *value, attr, strict_mode);
- }
-}
-
-
-MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
- Handle<JSObject> js_object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr) {
- HandleScope scope(isolate);
-
- // Check if the given key is an array index.
- uint32_t index;
- if (key->ToArrayIndex(&index)) {
- // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
- // of a string using [] notation. We need to support this too in
- // JavaScript.
- // In the case of a String object we just need to redirect the assignment to
- // the underlying string if the index is in range. Since the underlying
- // string does nothing with the assignment then we can ignore such
- // assignments.
- if (js_object->IsStringObjectWithCharacterAt(index)) {
- return *value;
- }
-
- return js_object->SetElement(
- index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
- }
-
- if (key->IsString()) {
- if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- return js_object->SetElement(
- index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
- } else {
- Handle<String> key_string = Handle<String>::cast(key);
- key_string->TryFlatten();
- return js_object->SetLocalPropertyIgnoreAttributes(*key_string,
- *value,
- attr);
- }
- }
-
- // Call-back into JavaScript to convert the key to a string.
- bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- Handle<String> name = Handle<String>::cast(converted);
-
- if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(
- index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
- } else {
- return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
- }
-}
-
-
-MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<Object> key) {
- HandleScope scope(isolate);
-
- // Check if the given key is an array index.
- uint32_t index;
- if (key->ToArrayIndex(&index)) {
- // In Firefox/SpiderMonkey, Safari and Opera you can access the
- // characters of a string using [] notation. In the case of a
- // String object we just need to redirect the deletion to the
- // underlying string if the index is in range. Since the
- // underlying string does nothing with the deletion, we can ignore
- // such deletions.
- if (receiver->IsStringObjectWithCharacterAt(index)) {
- return isolate->heap()->true_value();
- }
-
- return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION);
- }
-
- Handle<String> key_string;
- if (key->IsString()) {
- key_string = Handle<String>::cast(key);
- } else {
- // Call-back into JavaScript to convert the key to a string.
- bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- key_string = Handle<String>::cast(converted);
- }
-
- key_string->TryFlatten();
- return receiver->DeleteProperty(*key_string, JSReceiver::FORCE_DELETION);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
- NoHandleAllocation ha(isolate);
- RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
-
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
- CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
- RUNTIME_ASSERT(
- (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- // Compute attributes.
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(unchecked_attributes);
-
- StrictModeFlag strict_mode = kNonStrictMode;
- if (args.length() == 5) {
- CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_flag, 4);
- strict_mode = strict_mode_flag;
- }
-
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- attributes,
- strict_mode);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsKind) {
- HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- CONVERT_ARG_HANDLE_CHECKED(Map, map, 1);
- JSObject::TransitionElementsKind(array, map->elements_kind());
- return *array;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
- NoHandleAllocation ha(isolate);
- RUNTIME_ASSERT(args.length() == 1);
- Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
- Handle<JSObject> js_object(Handle<JSObject>::cast(object));
- ASSERT(!js_object->map()->is_observed());
- ElementsKind new_kind = js_object->HasFastHoleyElements()
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
- return TransitionElements(object, new_kind, isolate);
- } else {
- return *object;
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
- NoHandleAllocation ha(isolate);
- RUNTIME_ASSERT(args.length() == 1);
- Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
- Handle<JSObject> js_object(Handle<JSObject>::cast(object));
- ASSERT(!js_object->map()->is_observed());
- ElementsKind new_kind = js_object->HasFastHoleyElements()
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
- return TransitionElements(object, new_kind, isolate);
- } else {
- return *object;
- }
-}
-
-
-// Set the native flag on the function.
-// This is used to decide if we should transform null and undefined
-// into the global object when doing call and apply.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
- NoHandleAllocation ha(isolate);
- RUNTIME_ASSERT(args.length() == 1);
-
- Handle<Object> object = args.at<Object>(0);
-
- if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(*object);
- func->shared()->set_native(true);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
- RUNTIME_ASSERT(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_SMI_ARG_CHECKED(store_index, 1);
- Handle<Object> value = args.at<Object>(2);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3);
- CONVERT_SMI_ARG_CHECKED(literal_index, 4);
- HandleScope scope(isolate);
-
- Object* raw_boilerplate_object = literals->get(literal_index);
- Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
- ElementsKind elements_kind = object->GetElementsKind();
- ASSERT(IsFastElementsKind(elements_kind));
- // Smis should never trigger transitions.
- ASSERT(!value->IsSmi());
-
- if (value->IsNumber()) {
- ASSERT(IsFastSmiElementsKind(elements_kind));
- ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
- if (IsMoreGeneralElementsKindTransition(
- boilerplate_object->GetElementsKind(),
- transitioned_kind)) {
- JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
- }
- JSObject::TransitionElementsKind(object, transitioned_kind);
- ASSERT(IsFastDoubleElementsKind(object->GetElementsKind()));
- FixedDoubleArray* double_array = FixedDoubleArray::cast(object->elements());
- HeapNumber* number = HeapNumber::cast(*value);
- double_array->set(store_index, number->Number());
- } else {
- ASSERT(IsFastSmiElementsKind(elements_kind) ||
- IsFastDoubleElementsKind(elements_kind));
- ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
- JSObject::TransitionElementsKind(object, transitioned_kind);
- if (IsMoreGeneralElementsKindTransition(
- boilerplate_object->GetElementsKind(),
- transitioned_kind)) {
- JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
- }
- FixedArray* object_array = FixedArray::cast(object->elements());
- object_array->set(store_index, *value);
- }
- return *object;
-}
-
-
-// Check whether debugger and is about to step into the callback that is passed
-// to a built-in function such as Array.forEach.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate->IsDebuggerActive() || !isolate->debug()->StepInActive()) {
- return isolate->heap()->false_value();
- }
- CONVERT_ARG_CHECKED(Object, callback, 0);
- // We do not step into the callback if it's a builtin or not even a function.
- if (!callback->IsJSFunction() || JSFunction::cast(callback)->IsBuiltin()) {
- return isolate->heap()->false_value();
- }
- return isolate->heap()->true_value();
-#else
- return isolate->heap()->false_value();
-#endif // ENABLE_DEBUGGER_SUPPORT
-}
-
-
-// Set one shot breakpoints for the callback function that is passed to a
-// built-in function such as Array.forEach to enable stepping into the callback.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = isolate->debug();
- if (!debug->IsStepping()) return isolate->heap()->undefined_value();
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
- HandleScope scope(isolate);
- // When leaving the callback, step out has been activated, but not performed
- // if we do not leave the builtin. To be able to step into the callback
- // again, we need to clear the step out at this point.
- debug->ClearStepOut();
- debug->FloodWithOneShot(callback);
-#endif // ENABLE_DEBUGGER_SUPPORT
- return isolate->heap()->undefined_value();
-}
-
-
-// Set a local property, even if it is READ_ONLY. If the property does not
-// exist, it will be added with attributes NONE.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
- NoHandleAllocation ha(isolate);
- RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
- // Compute attributes.
- PropertyAttributes attributes = NONE;
- if (args.length() == 4) {
- CONVERT_SMI_ARG_CHECKED(unchecked_value, 3);
- // Only attribute bits should be set.
- RUNTIME_ASSERT(
- (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- attributes = static_cast<PropertyAttributes>(unchecked_value);
- }
-
- return object->
- SetLocalPropertyIgnoreAttributes(name, args[2], attributes);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 3);
-
- CONVERT_ARG_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_CHECKED(String, key, 1);
- CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
- return object->DeleteProperty(key, (strict_mode == kStrictMode)
- ? JSReceiver::STRICT_DELETION
- : JSReceiver::NORMAL_DELETION);
-}
-
-
-static Object* HasLocalPropertyImplementation(Isolate* isolate,
- Handle<JSObject> object,
- Handle<String> key) {
- if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
- // Handle hidden prototypes. If there's a hidden prototype above this thing
- // then we have to check it for properties, because they are supposed to
- // look like they are on this object.
- Handle<Object> proto(object->GetPrototype(), isolate);
- if (proto->IsJSObject() &&
- Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
- return HasLocalPropertyImplementation(isolate,
- Handle<JSObject>::cast(proto),
- key);
- }
- return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, key, 1);
-
- uint32_t index;
- const bool key_is_array_index = key->AsArrayIndex(&index);
-
- Object* obj = args[0];
- // Only JS objects can have properties.
- if (obj->IsJSObject()) {
- JSObject* object = JSObject::cast(obj);
- // Fast case: either the key is a real named property or it is not
- // an array index and there are no interceptors or hidden
- // prototypes.
- if (object->HasRealNamedProperty(key)) return isolate->heap()->true_value();
- Map* map = object->map();
- if (!key_is_array_index &&
- !map->has_named_interceptor() &&
- !HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
- return isolate->heap()->false_value();
- }
- // Slow case.
- HandleScope scope(isolate);
- return HasLocalPropertyImplementation(isolate,
- Handle<JSObject>(object),
- Handle<String>(key));
- } else if (obj->IsString() && key_is_array_index) {
- // Well, there is one exception: Handle [] on strings.
- String* string = String::cast(obj);
- if (index < static_cast<uint32_t>(string->length())) {
- return isolate->heap()->true_value();
- }
- }
- return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
- NoHandleAllocation na(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_CHECKED(String, key, 1);
-
- bool result = receiver->HasProperty(key);
- if (isolate->has_pending_exception()) return Failure::Exception();
- return isolate->heap()->ToBoolean(result);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
- NoHandleAllocation na(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
-
- bool result = receiver->HasElement(index);
- if (isolate->has_pending_exception()) return Failure::Exception();
- return isolate->heap()->ToBoolean(result);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_ARG_CHECKED(String, key, 1);
-
- PropertyAttributes att = object->GetLocalPropertyAttribute(key);
- return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- bool threw = false;
- Handle<JSArray> result = GetKeysFor(object, &threw);
- if (threw) return Failure::Exception();
- return *result;
-}
-
-
-// Returns either a FixedArray as Runtime_GetPropertyNames,
-// or, if the given object has an enum cache that contains
-// all enumerable properties of the object and its prototypes
-// have none, the map of the object. This is used to speed up
-// the check for deletions during a for-in.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0);
-
- if (raw_object->IsSimpleEnum()) return raw_object->map();
-
- HandleScope scope(isolate);
- Handle<JSReceiver> object(raw_object);
- bool threw = false;
- Handle<FixedArray> content =
- GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, &threw);
- if (threw) return Failure::Exception();
-
- // Test again, since cache may have been built by preceding call.
- if (object->IsSimpleEnum()) return object->map();
-
- return *content;
-}
-
-
-// Find the length of the prototype chain that is to to handled as one. If a
-// prototype object is hidden it is to be viewed as part of the the object it
-// is prototype for.
-static int LocalPrototypeChainLength(JSObject* obj) {
- int count = 1;
- Object* proto = obj->GetPrototype();
- while (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype()) {
- count++;
- proto = JSObject::cast(proto)->GetPrototype();
- }
- return count;
-}
-
-
-// Return the names of the local named properties.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return isolate->heap()->undefined_value();
- }
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (obj->IsJSGlobalProxy()) {
- // Only collect names if access is permitted.
- if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*obj,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
- return *isolate->factory()->NewJSArray(0);
- }
- obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
- }
-
- // Find the number of objects making up this.
- int length = LocalPrototypeChainLength(*obj);
-
- // Find the number of local properties for each of the objects.
- ScopedVector<int> local_property_count(length);
- int total_property_count = 0;
- Handle<JSObject> jsproto = obj;
- for (int i = 0; i < length; i++) {
- // Only collect names if access is permitted.
- if (jsproto->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*jsproto,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
- return *isolate->factory()->NewJSArray(0);
- }
- int n;
- n = jsproto->NumberOfLocalProperties();
- local_property_count[i] = n;
- total_property_count += n;
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- // Allocate an array with storage for all the property names.
- Handle<FixedArray> names =
- isolate->factory()->NewFixedArray(total_property_count);
-
- // Get the property names.
- jsproto = obj;
- int proto_with_hidden_properties = 0;
- int next_copy_index = 0;
- for (int i = 0; i < length; i++) {
- jsproto->GetLocalPropertyNames(*names, next_copy_index);
- next_copy_index += local_property_count[i];
- if (jsproto->HasHiddenProperties()) {
- proto_with_hidden_properties++;
- }
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- // Filter out name of hidden propeties object.
- if (proto_with_hidden_properties > 0) {
- Handle<FixedArray> old_names = names;
- names = isolate->factory()->NewFixedArray(
- names->length() - proto_with_hidden_properties);
- int dest_pos = 0;
- for (int i = 0; i < total_property_count; i++) {
- Object* name = old_names->get(i);
- if (name == isolate->heap()->hidden_string()) {
- continue;
- }
- names->set(dest_pos++, name);
- }
- }
-
- return *isolate->factory()->NewJSArrayWithElements(names);
-}
-
-
-// Return the names of the local indexed properties.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalElementNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return isolate->heap()->undefined_value();
- }
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
- Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
- obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
- return *isolate->factory()->NewJSArrayWithElements(names);
-}
-
-
-// Return information on whether an object has a named or indexed interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return Smi::FromInt(0);
- }
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- int result = 0;
- if (obj->HasNamedInterceptor()) result |= 2;
- if (obj->HasIndexedInterceptor()) result |= 1;
-
- return Smi::FromInt(result);
-}
-
-
-// Return property names from named interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- if (obj->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-// Return element names from indexed interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- if (obj->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
- ASSERT_EQ(args.length(), 1);
- CONVERT_ARG_CHECKED(JSObject, raw_object, 0);
- HandleScope scope(isolate);
- Handle<JSObject> object(raw_object);
-
- if (object->IsJSGlobalProxy()) {
- // Do access checks before going to the global object.
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
- return *isolate->factory()->NewJSArray(0);
- }
-
- Handle<Object> proto(object->GetPrototype(), isolate);
- // If proxy is detached we simply return an empty array.
- if (proto->IsNull()) return *isolate->factory()->NewJSArray(0);
- object = Handle<JSObject>::cast(proto);
- }
-
- bool threw = false;
- Handle<FixedArray> contents =
- GetKeysInFixedArrayFor(object, LOCAL_ONLY, &threw);
- if (threw) return Failure::Exception();
-
- // Some fast paths through GetKeysInFixedArrayFor reuse a cached
- // property array and since the result is mutable we have to create
- // a fresh clone on each invocation.
- int length = contents->length();
- Handle<FixedArray> copy = isolate->factory()->NewFixedArray(length);
- for (int i = 0; i < length; i++) {
- Object* entry = contents->get(i);
- if (entry->IsString()) {
- copy->set(i, entry);
- } else {
- ASSERT(entry->IsNumber());
- HandleScope scope(isolate);
- Handle<Object> entry_handle(entry, isolate);
- Handle<Object> entry_str =
- isolate->factory()->NumberToString(entry_handle);
- copy->set(i, *entry_str);
- }
- }
- return *isolate->factory()->NewJSArrayWithElements(copy);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- // Compute the frame holding the arguments.
- JavaScriptFrameIterator it(isolate);
- it.AdvanceToArgumentsFrame();
- JavaScriptFrame* frame = it.frame();
-
- // Get the actual number of provided arguments.
- const uint32_t n = frame->ComputeParametersCount();
-
- // Try to convert the key to an index. If successful and within
- // index return the the argument from the frame.
- uint32_t index;
- if (args[0]->ToArrayIndex(&index) && index < n) {
- return frame->GetParameter(index);
- }
-
- // Convert the key to a string.
- HandleScope scope(isolate);
- bool exception = false;
- Handle<Object> converted =
- Execution::ToString(args.at<Object>(0), &exception);
- if (exception) return Failure::Exception();
- Handle<String> key = Handle<String>::cast(converted);
-
- // Try to convert the string key into an array index.
- if (key->AsArrayIndex(&index)) {
- if (index < n) {
- return frame->GetParameter(index);
- } else {
- return isolate->initial_object_prototype()->GetElement(index);
- }
- }
-
- // Handle special arguments properties.
- if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n);
- if (key->Equals(isolate->heap()->callee_string())) {
- Object* function = frame->function();
- if (function->IsJSFunction() &&
- !JSFunction::cast(function)->shared()->is_classic_mode()) {
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
- }
- return function;
- }
-
- // Lookup in the initial Object.prototype object.
- return isolate->initial_object_prototype()->GetProperty(*key);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
- ASSERT(args.length() == 1);
- Object* object = args[0];
- return (object->IsJSObject() && !object->IsGlobalObject())
- ? JSObject::cast(object)->TransformToFastProperties(0)
- : object;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- return args[0]->ToBoolean();
-}
-
-
-// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
-// Possible optimizations: put the type string into the oddballs.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
- NoHandleAllocation ha(isolate);
-
- Object* obj = args[0];
- if (obj->IsNumber()) return isolate->heap()->number_string();
- HeapObject* heap_obj = HeapObject::cast(obj);
-
- // typeof an undetectable object is 'undefined'
- if (heap_obj->map()->is_undetectable()) {
- return isolate->heap()->undefined_string();
- }
-
- InstanceType instance_type = heap_obj->map()->instance_type();
- if (instance_type < FIRST_NONSTRING_TYPE) {
- return isolate->heap()->string_string();
- }
-
- switch (instance_type) {
- case ODDBALL_TYPE:
- if (heap_obj->IsTrue() || heap_obj->IsFalse()) {
- return isolate->heap()->boolean_string();
- }
- if (heap_obj->IsNull()) {
- return FLAG_harmony_typeof
- ? isolate->heap()->null_string()
- : isolate->heap()->object_string();
- }
- ASSERT(heap_obj->IsUndefined());
- return isolate->heap()->undefined_string();
- case JS_FUNCTION_TYPE:
- case JS_FUNCTION_PROXY_TYPE:
- return isolate->heap()->function_string();
- default:
- // For any kind of object not handled above, the spec rule for
- // host objects gives that it is okay to return "object"
- return isolate->heap()->object_string();
- }
-}
-
-
-static bool AreDigits(const uint8_t*s, int from, int to) {
- for (int i = from; i < to; i++) {
- if (s[i] < '0' || s[i] > '9') return false;
- }
-
- return true;
-}
-
-
-static int ParseDecimalInteger(const uint8_t*s, int from, int to) {
- ASSERT(to - from < 10); // Overflow is not possible.
- ASSERT(from < to);
- int d = s[from] - '0';
-
- for (int i = from + 1; i < to; i++) {
- d = 10 * d + (s[i] - '0');
- }
-
- return d;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(String, subject, 0);
- subject->TryFlatten();
-
- // Fast case: short integer or some sorts of junk values.
- int len = subject->length();
- if (subject->IsSeqOneByteString()) {
- if (len == 0) return Smi::FromInt(0);
-
- uint8_t const* data = SeqOneByteString::cast(subject)->GetChars();
- bool minus = (data[0] == '-');
- int start_pos = (minus ? 1 : 0);
-
- if (start_pos == len) {
- return isolate->heap()->nan_value();
- } else if (data[start_pos] > '9') {
- // Fast check for a junk value. A valid string may start from a
- // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or
- // the 'I' character ('Infinity'). All of that have codes not greater than
- // '9' except 'I' and &nbsp;.
- if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
- return isolate->heap()->nan_value();
- }
- } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
- // The maximal/minimal smi has 10 digits. If the string has less digits we
- // know it will fit into the smi-data type.
- int d = ParseDecimalInteger(data, start_pos, len);
- if (minus) {
- if (d == 0) return isolate->heap()->minus_zero_value();
- d = -d;
- } else if (!subject->HasHashCode() &&
- len <= String::kMaxArrayIndexSize &&
- (len == 1 || data[0] != '0')) {
- // String hash is not calculated yet but all the data are present.
- // Update the hash field to speed up sequential convertions.
- uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
-#ifdef DEBUG
- subject->Hash(); // Force hash calculation.
- ASSERT_EQ(static_cast<int>(subject->hash_field()),
- static_cast<int>(hash));
-#endif
- subject->set_hash_field(hash);
- }
- return Smi::FromInt(d);
- }
- }
-
- // Slower case.
- return isolate->heap()->NumberFromDouble(
- StringToDouble(isolate->unicode_cache(), subject, ALLOW_HEX));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) {
- CONVERT_SMI_ARG_CHECKED(length, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
- if (length == 0) return isolate->heap()->empty_string();
- if (is_one_byte) {
- return isolate->heap()->AllocateRawOneByteString(length);
- } else {
- return isolate->heap()->AllocateRawTwoByteString(length);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TruncateString) {
- CONVERT_ARG_CHECKED(SeqString, string, 0);
- CONVERT_SMI_ARG_CHECKED(new_length, 1);
- return string->Truncate(new_length);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
- ASSERT(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Handle<String> string = FlattenGetString(source);
- String::FlatContent content = string->GetFlatContent();
- ASSERT(content.IsFlat());
- Handle<String> result =
- content.IsAscii() ? URIEscape::Escape<uint8_t>(isolate, source)
- : URIEscape::Escape<uc16>(isolate, source);
- if (result.is_null()) return Failure::OutOfMemoryException(0x12);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
- ASSERT(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Handle<String> string = FlattenGetString(source);
- String::FlatContent content = string->GetFlatContent();
- ASSERT(content.IsFlat());
- return content.IsAscii() ? *URIUnescape::Unescape<uint8_t>(isolate, source)
- : *URIUnescape::Unescape<uc16>(isolate, source);
-}
-
-
-static const unsigned int kQuoteTableLength = 128u;
-
-static const int kJsonQuotesCharactersPerEntry = 8;
-static const char* const JsonQuotes =
- "\\u0000 \\u0001 \\u0002 \\u0003 "
- "\\u0004 \\u0005 \\u0006 \\u0007 "
- "\\b \\t \\n \\u000b "
- "\\f \\r \\u000e \\u000f "
- "\\u0010 \\u0011 \\u0012 \\u0013 "
- "\\u0014 \\u0015 \\u0016 \\u0017 "
- "\\u0018 \\u0019 \\u001a \\u001b "
- "\\u001c \\u001d \\u001e \\u001f "
- " ! \\\" # "
- "$ % & ' "
- "( ) * + "
- ", - . / "
- "0 1 2 3 "
- "4 5 6 7 "
- "8 9 : ; "
- "< = > ? "
- "@ A B C "
- "D E F G "
- "H I J K "
- "L M N O "
- "P Q R S "
- "T U V W "
- "X Y Z [ "
- "\\\\ ] ^ _ "
- "` a b c "
- "d e f g "
- "h i j k "
- "l m n o "
- "p q r s "
- "t u v w "
- "x y z { "
- "| } ~ \177 ";
-
-
-// For a string that is less than 32k characters it should always be
-// possible to allocate it in new space.
-static const int kMaxGuaranteedNewSpaceString = 32 * 1024;
-
-
-// Doing JSON quoting cannot make the string more than this many times larger.
-static const int kJsonQuoteWorstCaseBlowup = 6;
-
-static const int kSpaceForQuotesAndComma = 3;
-static const int kSpaceForBrackets = 2;
-
-// Covers the entire ASCII range (all other characters are unchanged by JSON
-// quoting).
-static const byte JsonQuoteLengths[kQuoteTableLength] = {
- 6, 6, 6, 6, 6, 6, 6, 6,
- 2, 2, 2, 6, 2, 2, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6,
- 1, 1, 2, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 2, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
-};
-
-
-template <typename StringType>
-MaybeObject* AllocateRawString(Isolate* isolate, int length);
-
-
-template <>
-MaybeObject* AllocateRawString<SeqTwoByteString>(Isolate* isolate, int length) {
- return isolate->heap()->AllocateRawTwoByteString(length);
-}
-
-
-template <>
-MaybeObject* AllocateRawString<SeqOneByteString>(Isolate* isolate, int length) {
- return isolate->heap()->AllocateRawOneByteString(length);
-}
-
-
-template <typename Char, typename StringType, bool comma>
-static MaybeObject* SlowQuoteJsonString(Isolate* isolate,
- Vector<const Char> characters) {
- int length = characters.length();
- const Char* read_cursor = characters.start();
- const Char* end = read_cursor + length;
- const int kSpaceForQuotes = 2 + (comma ? 1 :0);
- int quoted_length = kSpaceForQuotes;
- while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (static_cast<unsigned>(c) >= kQuoteTableLength) {
- quoted_length++;
- } else {
- quoted_length += JsonQuoteLengths[static_cast<unsigned>(c)];
- }
- }
- MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
- quoted_length);
- Object* new_object;
- if (!new_alloc->ToObject(&new_object)) {
- return new_alloc;
- }
- StringType* new_string = StringType::cast(new_object);
-
- Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize);
- if (comma) *(write_cursor++) = ',';
- *(write_cursor++) = '"';
-
- read_cursor = characters.start();
- while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (static_cast<unsigned>(c) >= kQuoteTableLength) {
- *(write_cursor++) = c;
- } else {
- int len = JsonQuoteLengths[static_cast<unsigned>(c)];
- const char* replacement = JsonQuotes +
- static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
- for (int i = 0; i < len; i++) {
- *write_cursor++ = *replacement++;
- }
- }
- }
- *(write_cursor++) = '"';
- return new_string;
-}
-
-
-template <typename SinkChar, typename SourceChar>
-static inline SinkChar* WriteQuoteJsonString(
- Isolate* isolate,
- SinkChar* write_cursor,
- Vector<const SourceChar> characters) {
- // SinkChar is only char if SourceChar is guaranteed to be char.
- ASSERT(sizeof(SinkChar) >= sizeof(SourceChar));
- const SourceChar* read_cursor = characters.start();
- const SourceChar* end = read_cursor + characters.length();
- *(write_cursor++) = '"';
- while (read_cursor < end) {
- SourceChar c = *(read_cursor++);
- if (static_cast<unsigned>(c) >= kQuoteTableLength) {
- *(write_cursor++) = static_cast<SinkChar>(c);
- } else {
- int len = JsonQuoteLengths[static_cast<unsigned>(c)];
- const char* replacement = JsonQuotes +
- static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
- write_cursor[0] = replacement[0];
- if (len > 1) {
- write_cursor[1] = replacement[1];
- if (len > 2) {
- ASSERT(len == 6);
- write_cursor[2] = replacement[2];
- write_cursor[3] = replacement[3];
- write_cursor[4] = replacement[4];
- write_cursor[5] = replacement[5];
- }
- }
- write_cursor += len;
- }
- }
- *(write_cursor++) = '"';
- return write_cursor;
-}
-
-
-template <typename Char, typename StringType, bool comma>
-static MaybeObject* QuoteJsonString(Isolate* isolate,
- Vector<const Char> characters) {
- int length = characters.length();
- isolate->counters()->quote_json_char_count()->Increment(length);
- int worst_case_length =
- length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotesAndComma;
- if (worst_case_length > kMaxGuaranteedNewSpaceString) {
- return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
- }
-
- MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
- worst_case_length);
- Object* new_object;
- if (!new_alloc->ToObject(&new_object)) {
- return new_alloc;
- }
- if (!isolate->heap()->new_space()->Contains(new_object)) {
- // Even if our string is small enough to fit in new space we still have to
- // handle it being allocated in old space as may happen in the third
- // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
- // CEntryStub::GenerateCore.
- return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
- }
- StringType* new_string = StringType::cast(new_object);
- ASSERT(isolate->heap()->new_space()->Contains(new_string));
-
- Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize);
- if (comma) *(write_cursor++) = ',';
- write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
- write_cursor,
- characters);
- int final_length = static_cast<int>(
- write_cursor - reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize));
- isolate->heap()->new_space()->
- template ShrinkStringAtAllocationBoundary<StringType>(
- new_string, final_length);
- return new_string;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
- NoHandleAllocation ha(isolate);
- CONVERT_ARG_CHECKED(String, str, 0);
- if (!str->IsFlat()) {
- MaybeObject* try_flatten = str->TryFlatten();
- Object* flat;
- if (!try_flatten->ToObject(&flat)) {
- return try_flatten;
- }
- str = String::cast(flat);
- ASSERT(str->IsFlat());
- }
- String::FlatContent flat = str->GetFlatContent();
- ASSERT(flat.IsFlat());
- if (flat.IsTwoByte()) {
- return QuoteJsonString<uc16, SeqTwoByteString, false>(isolate,
- flat.ToUC16Vector());
- } else {
- return QuoteJsonString<uint8_t, SeqOneByteString, false>(
- isolate,
- flat.ToOneByteVector());
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
- NoHandleAllocation ha(isolate);
- CONVERT_ARG_CHECKED(String, str, 0);
- if (!str->IsFlat()) {
- MaybeObject* try_flatten = str->TryFlatten();
- Object* flat;
- if (!try_flatten->ToObject(&flat)) {
- return try_flatten;
- }
- str = String::cast(flat);
- ASSERT(str->IsFlat());
- }
- String::FlatContent flat = str->GetFlatContent();
- if (flat.IsTwoByte()) {
- return QuoteJsonString<uc16, SeqTwoByteString, true>(isolate,
- flat.ToUC16Vector());
- } else {
- return QuoteJsonString<uint8_t, SeqOneByteString, true>(
- isolate,
- flat.ToOneByteVector());
- }
-}
-
-
-template <typename Char, typename StringType>
-static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
- FixedArray* array,
- int worst_case_length) {
- int length = array->length();
-
- MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
- worst_case_length);
- Object* new_object;
- if (!new_alloc->ToObject(&new_object)) {
- return new_alloc;
- }
- if (!isolate->heap()->new_space()->Contains(new_object)) {
- // Even if our string is small enough to fit in new space we still have to
- // handle it being allocated in old space as may happen in the third
- // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
- // CEntryStub::GenerateCore.
- return isolate->heap()->undefined_value();
- }
- AssertNoAllocation no_gc;
- StringType* new_string = StringType::cast(new_object);
- ASSERT(isolate->heap()->new_space()->Contains(new_string));
-
- Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize);
- *(write_cursor++) = '[';
- for (int i = 0; i < length; i++) {
- if (i != 0) *(write_cursor++) = ',';
- String* str = String::cast(array->get(i));
- String::FlatContent content = str->GetFlatContent();
- ASSERT(content.IsFlat());
- if (content.IsTwoByte()) {
- write_cursor = WriteQuoteJsonString<Char, uc16>(isolate,
- write_cursor,
- content.ToUC16Vector());
- } else {
- write_cursor =
- WriteQuoteJsonString<Char, uint8_t>(isolate,
- write_cursor,
- content.ToOneByteVector());
- }
- }
- *(write_cursor++) = ']';
-
- int final_length = static_cast<int>(
- write_cursor - reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize));
- isolate->heap()->new_space()->
- template ShrinkStringAtAllocationBoundary<StringType>(
- new_string, final_length);
- return new_string;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
-
- if (!array->HasFastObjectElements()) {
- return isolate->heap()->undefined_value();
- }
- FixedArray* elements = FixedArray::cast(array->elements());
- int n = elements->length();
- bool ascii = true;
- int total_length = 0;
-
- for (int i = 0; i < n; i++) {
- Object* elt = elements->get(i);
- if (!elt->IsString()) return isolate->heap()->undefined_value();
- String* element = String::cast(elt);
- if (!element->IsFlat()) return isolate->heap()->undefined_value();
- total_length += element->length();
- if (ascii && element->IsTwoByteRepresentation()) {
- ascii = false;
- }
- }
-
- int worst_case_length =
- kSpaceForBrackets + n * kSpaceForQuotesAndComma
- + total_length * kJsonQuoteWorstCaseBlowup;
-
- if (worst_case_length > kMaxGuaranteedNewSpaceString) {
- return isolate->heap()->undefined_value();
- }
-
- if (ascii) {
- return QuoteJsonStringArray<char, SeqOneByteString>(isolate,
- elements,
- worst_case_length);
- } else {
- return QuoteJsonStringArray<uc16, SeqTwoByteString>(isolate,
- elements,
- worst_case_length);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BasicJSONStringify) {
- ASSERT(args.length() == 1);
- HandleScope scope(isolate);
- BasicJsonStringifier stringifier(isolate);
- return stringifier.Stringify(Handle<Object>(args[0], isolate));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
- NoHandleAllocation ha(isolate);
-
- CONVERT_ARG_CHECKED(String, s, 0);
- CONVERT_SMI_ARG_CHECKED(radix, 1);
-
- s->TryFlatten();
-
- RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
- double value = StringToInt(isolate->unicode_cache(), s, radix);
- return isolate->heap()->NumberFromDouble(value);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
- NoHandleAllocation ha(isolate);
- CONVERT_ARG_CHECKED(String, str, 0);
-
- // ECMA-262 section 15.1.2.3, empty string is NaN
- double value = StringToDouble(isolate->unicode_cache(),
- str, ALLOW_TRAILING_JUNK, OS::nan_value());
-
- // Create a number object from the value.
- return isolate->heap()->NumberFromDouble(value);
-}
-
-
-template <class Converter>
-MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
- Isolate* isolate,
- String* s,
- int length,
- int input_string_length,
- unibrow::Mapping<Converter, 128>* mapping) {
- // We try this twice, once with the assumption that the result is no longer
- // than the input and, if that assumption breaks, again with the exact
- // length. This may not be pretty, but it is nicer than what was here before
- // and I hereby claim my vaffel-is.
- //
- // Allocate the resulting string.
- //
- // NOTE: This assumes that the upper/lower case of an ASCII
- // character is also ASCII. This is currently the case, but it
- // might break in the future if we implement more context and locale
- // dependent upper/lower conversions.
- Object* o;
- { MaybeObject* maybe_o = s->IsOneByteRepresentation()
- ? isolate->heap()->AllocateRawOneByteString(length)
- : isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* result = String::cast(o);
- bool has_changed_character = false;
-
- // Convert all characters to upper case, assuming that they will fit
- // in the buffer
- Access<ConsStringIteratorOp> op(
- isolate->runtime_state()->string_iterator());
- StringCharacterStream stream(s, op.value());
- unibrow::uchar chars[Converter::kMaxWidth];
- // We can assume that the string is not empty
- uc32 current = stream.GetNext();
- for (int i = 0; i < length;) {
- bool has_next = stream.HasMore();
- uc32 next = has_next ? stream.GetNext() : 0;
- int char_length = mapping->get(current, next, chars);
- if (char_length == 0) {
- // The case conversion of this character is the character itself.
- result->Set(i, current);
- i++;
- } else if (char_length == 1) {
- // Common case: converting the letter resulted in one character.
- ASSERT(static_cast<uc32>(chars[0]) != current);
- result->Set(i, chars[0]);
- has_changed_character = true;
- i++;
- } else if (length == input_string_length) {
- // We've assumed that the result would be as long as the
- // input but here is a character that converts to several
- // characters. No matter, we calculate the exact length
- // of the result and try the whole thing again.
- //
- // Note that this leaves room for optimization. We could just
- // memcpy what we already have to the result string. Also,
- // the result string is the last object allocated we could
- // "realloc" it and probably, in the vast majority of cases,
- // extend the existing string to be able to hold the full
- // result.
- int next_length = 0;
- if (has_next) {
- next_length = mapping->get(next, 0, chars);
- if (next_length == 0) next_length = 1;
- }
- int current_length = i + char_length + next_length;
- while (stream.HasMore()) {
- current = stream.GetNext();
- // NOTE: we use 0 as the next character here because, while
- // the next character may affect what a character converts to,
- // it does not in any case affect the length of what it convert
- // to.
- int char_length = mapping->get(current, 0, chars);
- if (char_length == 0) char_length = 1;
- current_length += char_length;
- if (current_length > Smi::kMaxValue) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x13);
- }
- }
- // Try again with the real length.
- return Smi::FromInt(current_length);
- } else {
- for (int j = 0; j < char_length; j++) {
- result->Set(i, chars[j]);
- i++;
- }
- has_changed_character = true;
- }
- current = next;
- }
- if (has_changed_character) {
- return result;
- } else {
- // If we didn't actually change anything in doing the conversion
- // we simple return the result and let the converted string
- // become garbage; there is no reason to keep two identical strings
- // alive.
- return s;
- }
-}
-
-
-namespace {
-
-static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
-#ifdef ENABLE_LATIN_1
-static const uintptr_t kAsciiMask = kOneInEveryByte << 7;
-#endif
-
-// Given a word and two range boundaries returns a word with high bit
-// set in every byte iff the corresponding input byte was strictly in
-// the range (m, n). All the other bits in the result are cleared.
-// This function is only useful when it can be inlined and the
-// boundaries are statically known.
-// Requires: all bytes in the input word and the boundaries must be
-// ASCII (less than 0x7F).
-static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
- // Use strict inequalities since in edge cases the function could be
- // further simplified.
- ASSERT(0 < m && m < n);
-#ifndef ENABLE_LATIN_1
- // Every byte in an ASCII string is less than or equal to 0x7F.
- ASSERT((w & (kOneInEveryByte * 0x7F)) == w);
- ASSERT(n < 0x7F);
-#endif
- // Has high bit set in every w byte less than n.
- uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
- // Has high bit set in every w byte greater than m.
- uintptr_t tmp2 = w + kOneInEveryByte * (0x7F - m);
- return (tmp1 & tmp2 & (kOneInEveryByte * 0x80));
-}
-
-
-enum AsciiCaseConversion {
- ASCII_TO_LOWER,
- ASCII_TO_UPPER
-};
-
-
-template <AsciiCaseConversion dir>
-struct FastAsciiConverter {
-#ifdef ENABLE_LATIN_1
- static bool Convert(char* dst, char* src, int length, bool* changed_out) {
-#else
- static bool Convert(char* dst, char* src, int length) {
-#endif
-#ifdef DEBUG
- char* saved_dst = dst;
- char* saved_src = src;
-#endif
- // We rely on the distance between upper and lower case letters
- // being a known power of 2.
- ASSERT('a' - 'A' == (1 << 5));
- // Boundaries for the range of input characters than require conversion.
- const char lo = (dir == ASCII_TO_LOWER) ? 'A' - 1 : 'a' - 1;
- const char hi = (dir == ASCII_TO_LOWER) ? 'Z' + 1 : 'z' + 1;
- bool changed = false;
-#ifdef ENABLE_LATIN_1
- uintptr_t or_acc = 0;
-#endif
- char* const limit = src + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- // Process the prefix of the input that requires no conversion one
- // (machine) word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
-#ifdef ENABLE_LATIN_1
- or_acc |= w;
-#endif
- if (AsciiRangeMask(w, lo, hi) != 0) {
- changed = true;
- break;
- }
- *reinterpret_cast<uintptr_t*>(dst) = w;
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
- // Process the remainder of the input performing conversion when
- // required one word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
-#ifdef ENABLE_LATIN_1
- or_acc |= w;
-#endif
- uintptr_t m = AsciiRangeMask(w, lo, hi);
- // The mask has high (7th) bit set in every byte that needs
- // conversion and we know that the distance between cases is
- // 1 << 5.
- *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
-#endif
- // Process the last few bytes of the input (or the whole input if
- // unaligned access is not supported).
- while (src < limit) {
- char c = *src;
-#ifdef ENABLE_LATIN_1
- or_acc |= c;
-#endif
- if (lo < c && c < hi) {
- c ^= (1 << 5);
- changed = true;
- }
- *dst = c;
- ++src;
- ++dst;
- }
-#ifdef ENABLE_LATIN_1
- if ((or_acc & kAsciiMask) != 0) {
- return false;
- }
-#endif
-#ifdef DEBUG
- CheckConvert(saved_dst, saved_src, length, changed);
-#endif
-#ifdef ENABLE_LATIN_1
- *changed_out = changed;
- return true;
-#else
- return changed;
-#endif
- }
-
-#ifdef DEBUG
- static void CheckConvert(char* dst, char* src, int length, bool changed) {
- bool expected_changed = false;
- for (int i = 0; i < length; i++) {
- if (dst[i] == src[i]) continue;
- expected_changed = true;
- if (dir == ASCII_TO_LOWER) {
- ASSERT('A' <= src[i] && src[i] <= 'Z');
- ASSERT(dst[i] == src[i] + ('a' - 'A'));
- } else {
- ASSERT(dir == ASCII_TO_UPPER);
- ASSERT('a' <= src[i] && src[i] <= 'z');
- ASSERT(dst[i] == src[i] - ('a' - 'A'));
- }
- }
- ASSERT(expected_changed == changed);
- }
-#endif
-};
-
-
-struct ToLowerTraits {
- typedef unibrow::ToLowercase UnibrowConverter;
-
- typedef FastAsciiConverter<ASCII_TO_LOWER> AsciiConverter;
-};
-
-
-struct ToUpperTraits {
- typedef unibrow::ToUppercase UnibrowConverter;
-
- typedef FastAsciiConverter<ASCII_TO_UPPER> AsciiConverter;
-};
-
-} // namespace
-
-
-template <typename ConvertTraits>
-MUST_USE_RESULT static MaybeObject* ConvertCase(
- Arguments args,
- Isolate* isolate,
- unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
- NoHandleAllocation ha(isolate);
- CONVERT_ARG_CHECKED(String, s, 0);
- s = s->TryFlattenGetString();
-
- const int length = s->length();
- // Assume that the string is not empty; we need this assumption later
- if (length == 0) return s;
-
- // Simpler handling of ASCII strings.
- //
- // NOTE: This assumes that the upper/lower case of an ASCII
- // character is also ASCII. This is currently the case, but it
- // might break in the future if we implement more context and locale
- // dependent upper/lower conversions.
- if (s->IsSeqOneByteString()) {
- Object* o;
- { MaybeObject* maybe_o = isolate->heap()->AllocateRawOneByteString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- SeqOneByteString* result = SeqOneByteString::cast(o);
-#ifndef ENABLE_LATIN_1
- bool has_changed_character = ConvertTraits::AsciiConverter::Convert(
- reinterpret_cast<char*>(result->GetChars()),
- reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
- length);
- return has_changed_character ? result : s;
-#else
- bool has_changed_character;
- bool is_ascii = ConvertTraits::AsciiConverter::Convert(
- reinterpret_cast<char*>(result->GetChars()),
- reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
- length,
- &has_changed_character);
- // If not ASCII, we discard the result and take the 2 byte path.
- if (is_ascii) {
- return has_changed_character ? result : s;
- }
-#endif
- }
-
- Object* answer;
- { MaybeObject* maybe_answer =
- ConvertCaseHelper(isolate, s, length, length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
- }
- if (answer->IsSmi()) {
- // Retry with correct length.
- { MaybeObject* maybe_answer =
- ConvertCaseHelper(isolate,
- s, Smi::cast(answer)->value(), length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
- }
- }
- return answer;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToLowerCase) {
- return ConvertCase<ToLowerTraits>(
- args, isolate, isolate->runtime_state()->to_lower_mapping());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
- return ConvertCase<ToUpperTraits>(
- args, isolate, isolate->runtime_state()->to_upper_mapping());
-}
-
-
-static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
- return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 3);
-
- CONVERT_ARG_CHECKED(String, s, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
-
- s->TryFlatten();
- int length = s->length();
-
- int left = 0;
- if (trimLeft) {
- while (left < length && IsTrimWhiteSpace(s->Get(left))) {
- left++;
- }
- }
-
- int right = length;
- if (trimRight) {
- while (right > left && IsTrimWhiteSpace(s->Get(right - 1))) {
- right--;
- }
- }
- return s->SubString(left, right);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
- ASSERT(args.length() == 3);
- HandleScope handle_scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
-
- int subject_length = subject->length();
- int pattern_length = pattern->length();
- RUNTIME_ASSERT(pattern_length > 0);
-
- if (limit == 0xffffffffu) {
- Handle<Object> cached_answer(
- RegExpResultsCache::Lookup(isolate->heap(),
- *subject,
- *pattern,
- RegExpResultsCache::STRING_SPLIT_SUBSTRINGS),
- isolate);
- if (*cached_answer != Smi::FromInt(0)) {
- // The cache FixedArray is a COW-array and can therefore be reused.
- Handle<JSArray> result =
- isolate->factory()->NewJSArrayWithElements(
- Handle<FixedArray>::cast(cached_answer));
- return *result;
- }
- }
-
- // The limit can be very large (0xffffffffu), but since the pattern
- // isn't empty, we can never create more parts than ~half the length
- // of the subject.
-
- if (!subject->IsFlat()) FlattenString(subject);
-
- static const int kMaxInitialListCapacity = 16;
-
- Zone* zone = isolate->runtime_zone();
- ZoneScope scope(zone, DELETE_ON_EXIT);
-
- // Find (up to limit) indices of separator and end-of-string in subject
- int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
- ZoneList<int> indices(initial_capacity, zone);
- if (!pattern->IsFlat()) FlattenString(pattern);
-
- FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit, zone);
-
- if (static_cast<uint32_t>(indices.length()) < limit) {
- indices.Add(subject_length, zone);
- }
-
- // The list indices now contains the end of each part to create.
-
- // Create JSArray of substrings separated by separator.
- int part_count = indices.length();
-
- Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
- if (maybe_result->IsFailure()) return maybe_result;
- result->set_length(Smi::FromInt(part_count));
-
- ASSERT(result->HasFastObjectElements());
-
- if (part_count == 1 && indices.at(0) == subject_length) {
- FixedArray::cast(result->elements())->set(0, *subject);
- return *result;
- }
-
- Handle<FixedArray> elements(FixedArray::cast(result->elements()));
- int part_start = 0;
- for (int i = 0; i < part_count; i++) {
- HandleScope local_loop_handle(isolate);
- int part_end = indices.at(i);
- Handle<String> substring =
- isolate->factory()->NewProperSubString(subject, part_start, part_end);
- elements->set(i, *substring);
- part_start = part_end + pattern_length;
- }
-
- if (limit == 0xffffffffu) {
- if (result->HasFastObjectElements()) {
- RegExpResultsCache::Enter(isolate->heap(),
- *subject,
- *pattern,
- *elements,
- RegExpResultsCache::STRING_SPLIT_SUBSTRINGS);
- }
- }
-
- return *result;
-}
-
-
-// Copies ASCII characters to the given fixed array looking up
-// one-char strings in the cache. Gives up on the first char that is
-// not in the cache and fills the remainder with smi zeros. Returns
-// the length of the successfully copied prefix.
-static int CopyCachedAsciiCharsToArray(Heap* heap,
- const uint8_t* chars,
- FixedArray* elements,
- int length) {
- AssertNoAllocation no_gc;
- FixedArray* ascii_cache = heap->single_character_string_cache();
- Object* undefined = heap->undefined_value();
- int i;
- WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
- for (i = 0; i < length; ++i) {
- Object* value = ascii_cache->get(chars[i]);
- if (value == undefined) break;
- elements->set(i, value, mode);
- }
- if (i < length) {
- ASSERT(Smi::FromInt(0) == 0);
- memset(elements->data_start() + i, 0, kPointerSize * (length - i));
- }
-#ifdef DEBUG
- for (int j = 0; j < length; ++j) {
- Object* element = elements->get(j);
- ASSERT(element == Smi::FromInt(0) ||
- (element->IsString() && String::cast(element)->LooksValid()));
- }
-#endif
- return i;
-}
-
-
-// Converts a String to JSArray.
-// For example, "foo" => ["f", "o", "o"].
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
-
- s = FlattenGetString(s);
- const int length = static_cast<int>(Min<uint32_t>(s->length(), limit));
-
- Handle<FixedArray> elements;
- int position = 0;
- if (s->IsFlat() && s->IsOneByteRepresentation()) {
- // Try using cached chars where possible.
- Object* obj;
- { MaybeObject* maybe_obj =
- isolate->heap()->AllocateUninitializedFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
- String::FlatContent content = s->GetFlatContent();
- if (content.IsAscii()) {
- Vector<const uint8_t> chars = content.ToOneByteVector();
- // Note, this will initialize all elements (not only the prefix)
- // to prevent GC from seeing partially initialized array.
- position = CopyCachedAsciiCharsToArray(isolate->heap(),
- chars.start(),
- *elements,
- length);
- } else {
- MemsetPointer(elements->data_start(),
- isolate->heap()->undefined_value(),
- length);
- }
- } else {
- elements = isolate->factory()->NewFixedArray(length);
- }
- for (int i = position; i < length; ++i) {
- Handle<Object> str =
- LookupSingleCharacterStringFromCode(isolate, s->Get(i));
- elements->set(i, *str);
- }
-
-#ifdef DEBUG
- for (int i = 0; i < length; ++i) {
- ASSERT(String::cast(elements->get(i))->length() == 1);
- }
-#endif
-
- return *isolate->factory()->NewJSArrayWithElements(elements);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(String, value, 0);
- return value->ToObject();
-}
-
-
-bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
- unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
- int char_length = runtime_state->to_upper_mapping()->get(ch, 0, chars);
- return char_length == 0;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- Object* number = args[0];
- RUNTIME_ASSERT(number->IsNumber());
-
- return isolate->heap()->NumberToString(number);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- Object* number = args[0];
- RUNTIME_ASSERT(number->IsNumber());
-
- return isolate->heap()->NumberToString(number, false);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
- return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
-
- double double_value = DoubleToInteger(number);
- // Map both -0 and +0 to +0.
- if (double_value == 0) double_value = 0;
-
- return isolate->heap()->NumberFromDouble(double_value);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
- return isolate->heap()->NumberFromUint32(number);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
- return isolate->heap()->NumberFromInt32(DoubleToInt32(number));
-}
-
-
-// Converts a Number to a Smi, if possible. Returns NaN if the number is not
-// a small integer.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- Object* obj = args[0];
- if (obj->IsSmi()) {
- return obj;
- }
- if (obj->IsHeapNumber()) {
- double value = HeapNumber::cast(obj)->value();
- int int_value = FastD2I(value);
- if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
- }
- }
- return isolate->heap()->nan_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 0);
- return isolate->heap()->AllocateHeapNumber(0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->NumberFromDouble(x + y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->NumberFromDouble(x - y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->NumberFromDouble(x * y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->NumberFromDouble(-x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 0);
-
- return isolate->heap()->NumberFromDouble(9876543210.0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->NumberFromDouble(x / y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
-
- x = modulo(x, y);
- // NumberFromDouble may return a Smi instead of a Number object
- return isolate->heap()->NumberFromDouble(x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, str1, 0);
- CONVERT_ARG_CHECKED(String, str2, 1);
- isolate->counters()->string_add_runtime()->Increment();
- return isolate->heap()->AllocateConsString(str1, str2);
-}
-
-
-template <typename sinkchar>
-static inline void StringBuilderConcatHelper(String* special,
- sinkchar* sink,
- FixedArray* fixed_array,
- int array_length) {
- int position = 0;
- for (int i = 0; i < array_length; i++) {
- Object* element = fixed_array->get(i);
- if (element->IsSmi()) {
- // Smi encoding of position and length.
- int encoded_slice = Smi::cast(element)->value();
- int pos;
- int len;
- if (encoded_slice > 0) {
- // Position and length encoded in one smi.
- pos = StringBuilderSubstringPosition::decode(encoded_slice);
- len = StringBuilderSubstringLength::decode(encoded_slice);
- } else {
- // Position and length encoded in two smis.
- Object* obj = fixed_array->get(++i);
- ASSERT(obj->IsSmi());
- pos = Smi::cast(obj)->value();
- len = -encoded_slice;
- }
- String::WriteToFlat(special,
- sink + position,
- pos,
- pos + len);
- position += len;
- } else {
- String* string = String::cast(element);
- int element_length = string->length();
- String::WriteToFlat(string, sink + position, 0, element_length);
- position += element_length;
- }
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x14);
- }
- int array_length = args.smi_at(1);
- CONVERT_ARG_CHECKED(String, special, 2);
-
- // This assumption is used by the slice encoding in one or two smis.
- ASSERT(Smi::kMaxValue >= String::kMaxLength);
-
- MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
- if (maybe_result->IsFailure()) return maybe_result;
-
- int special_length = special->length();
- if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- FixedArray* fixed_array = FixedArray::cast(array->elements());
- if (fixed_array->length() < array_length) {
- array_length = fixed_array->length();
- }
-
- if (array_length == 0) {
- return isolate->heap()->empty_string();
- } else if (array_length == 1) {
- Object* first = fixed_array->get(0);
- if (first->IsString()) return first;
- }
-
- bool one_byte = special->IsOneByteConvertible();
- int position = 0;
- for (int i = 0; i < array_length; i++) {
- int increment = 0;
- Object* elt = fixed_array->get(i);
- if (elt->IsSmi()) {
- // Smi encoding of position and length.
- int smi_value = Smi::cast(elt)->value();
- int pos;
- int len;
- if (smi_value > 0) {
- // Position and length encoded in one smi.
- pos = StringBuilderSubstringPosition::decode(smi_value);
- len = StringBuilderSubstringLength::decode(smi_value);
- } else {
- // Position and length encoded in two smis.
- len = -smi_value;
- // Get the position and check that it is a positive smi.
- i++;
- if (i >= array_length) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- Object* next_smi = fixed_array->get(i);
- if (!next_smi->IsSmi()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- pos = Smi::cast(next_smi)->value();
- if (pos < 0) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- }
- ASSERT(pos >= 0);
- ASSERT(len >= 0);
- if (pos > special_length || len > special_length - pos) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- increment = len;
- } else if (elt->IsString()) {
- String* element = String::cast(elt);
- int element_length = element->length();
- increment = element_length;
- if (one_byte && !element->IsOneByteConvertible()) {
- one_byte = false;
- }
- } else {
- ASSERT(!elt->IsTheHole());
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- if (increment > String::kMaxLength - position) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x15);
- }
- position += increment;
- }
-
- int length = position;
- Object* object;
-
- if (one_byte) {
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawOneByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqOneByteString* answer = SeqOneByteString::cast(object);
- StringBuilderConcatHelper(special,
- answer->GetChars(),
- fixed_array,
- array_length);
- return answer;
- } else {
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqTwoByteString* answer = SeqTwoByteString::cast(object);
- StringBuilderConcatHelper(special,
- answer->GetChars(),
- fixed_array,
- array_length);
- return answer;
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x16);
- }
- int array_length = args.smi_at(1);
- CONVERT_ARG_CHECKED(String, separator, 2);
-
- if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- FixedArray* fixed_array = FixedArray::cast(array->elements());
- if (fixed_array->length() < array_length) {
- array_length = fixed_array->length();
- }
-
- if (array_length == 0) {
- return isolate->heap()->empty_string();
- } else if (array_length == 1) {
- Object* first = fixed_array->get(0);
- if (first->IsString()) return first;
- }
-
- int separator_length = separator->length();
- int max_nof_separators =
- (String::kMaxLength + separator_length - 1) / separator_length;
- if (max_nof_separators < (array_length - 1)) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x17);
- }
- int length = (array_length - 1) * separator_length;
- for (int i = 0; i < array_length; i++) {
- Object* element_obj = fixed_array->get(i);
- if (!element_obj->IsString()) {
- // TODO(1161): handle this case.
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- String* element = String::cast(element_obj);
- int increment = element->length();
- if (increment > String::kMaxLength - length) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x18);
- }
- length += increment;
- }
-
- Object* object;
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqTwoByteString* answer = SeqTwoByteString::cast(object);
-
- uc16* sink = answer->GetChars();
-#ifdef DEBUG
- uc16* end = sink + length;
-#endif
-
- String* first = String::cast(fixed_array->get(0));
- int first_length = first->length();
- String::WriteToFlat(first, sink, 0, first_length);
- sink += first_length;
-
- for (int i = 1; i < array_length; i++) {
- ASSERT(sink + separator_length <= end);
- String::WriteToFlat(separator, sink, 0, separator_length);
- sink += separator_length;
-
- String* element = String::cast(fixed_array->get(i));
- int element_length = element->length();
- ASSERT(sink + element_length <= end);
- String::WriteToFlat(element, sink, 0, element_length);
- sink += element_length;
- }
- ASSERT(sink == end);
-
- // Use %_FastAsciiArrayJoin instead.
- ASSERT(!answer->IsOneByteRepresentation());
- return answer;
-}
-
-template <typename Char>
-static void JoinSparseArrayWithSeparator(FixedArray* elements,
- int elements_length,
- uint32_t array_length,
- String* separator,
- Vector<Char> buffer) {
- int previous_separator_position = 0;
- int separator_length = separator->length();
- int cursor = 0;
- for (int i = 0; i < elements_length; i += 2) {
- int position = NumberToInt32(elements->get(i));
- String* string = String::cast(elements->get(i + 1));
- int string_length = string->length();
- if (string->length() > 0) {
- while (previous_separator_position < position) {
- String::WriteToFlat<Char>(separator, &buffer[cursor],
- 0, separator_length);
- cursor += separator_length;
- previous_separator_position++;
- }
- String::WriteToFlat<Char>(string, &buffer[cursor],
- 0, string_length);
- cursor += string->length();
- }
- }
- if (separator_length > 0) {
- // Array length must be representable as a signed 32-bit number,
- // otherwise the total string length would have been too large.
- ASSERT(array_length <= 0x7fffffff); // Is int32_t.
- int last_array_index = static_cast<int>(array_length - 1);
- while (previous_separator_position < last_array_index) {
- String::WriteToFlat<Char>(separator, &buffer[cursor],
- 0, separator_length);
- cursor += separator_length;
- previous_separator_position++;
- }
- }
- ASSERT(cursor <= buffer.length());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
- RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
- CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
- CONVERT_ARG_CHECKED(String, separator, 2);
- // elements_array is fast-mode JSarray of alternating positions
- // (increasing order) and strings.
- // array_length is length of original array (used to add separators);
- // separator is string to put between elements. Assumed to be non-empty.
-
- // Find total length of join result.
- int string_length = 0;
- bool is_ascii = separator->IsOneByteRepresentation();
- int max_string_length;
- if (is_ascii) {
- max_string_length = SeqOneByteString::kMaxLength;
- } else {
- max_string_length = SeqTwoByteString::kMaxLength;
- }
- bool overflow = false;
- CONVERT_NUMBER_CHECKED(int, elements_length,
- Int32, elements_array->length());
- RUNTIME_ASSERT((elements_length & 1) == 0); // Even length.
- FixedArray* elements = FixedArray::cast(elements_array->elements());
- for (int i = 0; i < elements_length; i += 2) {
- RUNTIME_ASSERT(elements->get(i)->IsNumber());
- RUNTIME_ASSERT(elements->get(i + 1)->IsString());
- String* string = String::cast(elements->get(i + 1));
- int length = string->length();
- if (is_ascii && !string->IsOneByteRepresentation()) {
- is_ascii = false;
- max_string_length = SeqTwoByteString::kMaxLength;
- }
- if (length > max_string_length ||
- max_string_length - length < string_length) {
- overflow = true;
- break;
- }
- string_length += length;
- }
- int separator_length = separator->length();
- if (!overflow && separator_length > 0) {
- if (array_length <= 0x7fffffffu) {
- int separator_count = static_cast<int>(array_length) - 1;
- int remaining_length = max_string_length - string_length;
- if ((remaining_length / separator_length) >= separator_count) {
- string_length += separator_length * (array_length - 1);
- } else {
- // Not room for the separators within the maximal string length.
- overflow = true;
- }
- } else {
- // Nonempty separator and at least 2^31-1 separators necessary
- // means that the string is too large to create.
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
- overflow = true;
- }
- }
- if (overflow) {
- // Throw OutOfMemory exception for creating too large a string.
- V8::FatalProcessOutOfMemory("Array join result too large.");
- }
-
- if (is_ascii) {
- MaybeObject* result_allocation =
- isolate->heap()->AllocateRawOneByteString(string_length);
- if (result_allocation->IsFailure()) return result_allocation;
- SeqOneByteString* result_string =
- SeqOneByteString::cast(result_allocation->ToObjectUnchecked());
- JoinSparseArrayWithSeparator<uint8_t>(elements,
- elements_length,
- array_length,
- separator,
- Vector<uint8_t>(
- result_string->GetChars(),
- string_length));
- return result_string;
- } else {
- MaybeObject* result_allocation =
- isolate->heap()->AllocateRawTwoByteString(string_length);
- if (result_allocation->IsFailure()) return result_allocation;
- SeqTwoByteString* result_string =
- SeqTwoByteString::cast(result_allocation->ToObjectUnchecked());
- JoinSparseArrayWithSeparator<uc16>(elements,
- elements_length,
- array_length,
- separator,
- Vector<uc16>(result_string->GetChars(),
- string_length));
- return result_string;
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x | y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x & y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x ^ y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- return isolate->heap()->NumberFromInt32(~x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x << (y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromUint32(x >> (y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (isnan(x)) return Smi::FromInt(NOT_EQUAL);
- if (isnan(y)) return Smi::FromInt(NOT_EQUAL);
- if (x == y) return Smi::FromInt(EQUAL);
- Object* result;
- if ((fpclassify(x) == FP_ZERO) && (fpclassify(y) == FP_ZERO)) {
- result = Smi::FromInt(EQUAL);
- } else {
- result = Smi::FromInt(NOT_EQUAL);
- }
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(String, x, 0);
- CONVERT_ARG_CHECKED(String, y, 1);
-
- bool not_equal = !x->Equals(y);
- // This is slightly convoluted because the value that signifies
- // equality is 0 and inequality is 1 so we have to negate the result
- // from String::Equals.
- ASSERT(not_equal == 0 || not_equal == 1);
- STATIC_CHECK(EQUAL == 0);
- STATIC_CHECK(NOT_EQUAL == 1);
- return Smi::FromInt(not_equal);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_UserObjectEquals) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSObject, lhs, 1);
- CONVERT_ARG_CHECKED(JSObject, rhs, 0);
-
- bool result;
-
- v8::UserObjectComparisonCallback callback =
- isolate->UserObjectComparisonCallback();
- if (callback) {
- HandleScope scope(isolate);
- Handle<JSObject> lhs_handle(lhs);
- Handle<JSObject> rhs_handle(rhs);
- result = callback(v8::Utils::ToLocal(lhs_handle),
- v8::Utils::ToLocal(rhs_handle));
- } else {
- result = (lhs == rhs);
- }
-
- return Smi::FromInt(result?0:1);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 3);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (isnan(x) || isnan(y)) return args[2];
- if (x == y) return Smi::FromInt(EQUAL);
- if (isless(x, y)) return Smi::FromInt(LESS);
- return Smi::FromInt(GREATER);
-}
-
-
-// Compare two Smis as if they were converted to strings and then
-// compared lexicographically.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(x_value, 0);
- CONVERT_SMI_ARG_CHECKED(y_value, 1);
-
- // If the integers are equal so are the string representations.
- if (x_value == y_value) return Smi::FromInt(EQUAL);
-
- // If one of the integers is zero the normal integer order is the
- // same as the lexicographic order of the string representations.
- if (x_value == 0 || y_value == 0)
- return Smi::FromInt(x_value < y_value ? LESS : GREATER);
-
- // If only one of the integers is negative the negative number is
- // smallest because the char code of '-' is less than the char code
- // of any digit. Otherwise, we make both values positive.
-
- // Use unsigned values otherwise the logic is incorrect for -MIN_INT on
- // architectures using 32-bit Smis.
- uint32_t x_scaled = x_value;
- uint32_t y_scaled = y_value;
- if (x_value < 0 || y_value < 0) {
- if (y_value >= 0) return Smi::FromInt(LESS);
- if (x_value >= 0) return Smi::FromInt(GREATER);
- x_scaled = -x_value;
- y_scaled = -y_value;
- }
-
- static const uint32_t kPowersOf10[] = {
- 1, 10, 100, 1000, 10*1000, 100*1000,
- 1000*1000, 10*1000*1000, 100*1000*1000,
- 1000*1000*1000
- };
-
- // If the integers have the same number of decimal digits they can be
- // compared directly as the numeric order is the same as the
- // lexicographic order. If one integer has fewer digits, it is scaled
- // by some power of 10 to have the same number of digits as the longer
- // integer. If the scaled integers are equal it means the shorter
- // integer comes first in the lexicographic order.
-
- // From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
- int x_log2 = IntegerLog2(x_scaled);
- int x_log10 = ((x_log2 + 1) * 1233) >> 12;
- x_log10 -= x_scaled < kPowersOf10[x_log10];
-
- int y_log2 = IntegerLog2(y_scaled);
- int y_log10 = ((y_log2 + 1) * 1233) >> 12;
- y_log10 -= y_scaled < kPowersOf10[y_log10];
-
- int tie = EQUAL;
-
- if (x_log10 < y_log10) {
- // X has fewer digits. We would like to simply scale up X but that
- // might overflow, e.g when comparing 9 with 1_000_000_000, 9 would
- // be scaled up to 9_000_000_000. So we scale up by the next
- // smallest power and scale down Y to drop one digit. It is OK to
- // drop one digit from the longer integer since the final digit is
- // past the length of the shorter integer.
- x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
- y_scaled /= 10;
- tie = LESS;
- } else if (y_log10 < x_log10) {
- y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
- x_scaled /= 10;
- tie = GREATER;
- }
-
- if (x_scaled < y_scaled) return Smi::FromInt(LESS);
- if (x_scaled > y_scaled) return Smi::FromInt(GREATER);
- return Smi::FromInt(tie);
-}
-
-
-static Object* StringCharacterStreamCompare(RuntimeState* state,
- String* x,
- String* y) {
- StringCharacterStream stream_x(x, state->string_iterator_compare_x());
- StringCharacterStream stream_y(y, state->string_iterator_compare_y());
- while (stream_x.HasMore() && stream_y.HasMore()) {
- int d = stream_x.GetNext() - stream_y.GetNext();
- if (d < 0) return Smi::FromInt(LESS);
- else if (d > 0) return Smi::FromInt(GREATER);
- }
-
- // x is (non-trivial) prefix of y:
- if (stream_y.HasMore()) return Smi::FromInt(LESS);
- // y is prefix of x:
- return Smi::FromInt(stream_x.HasMore() ? GREATER : EQUAL);
-}
-
-
-static Object* FlatStringCompare(String* x, String* y) {
- ASSERT(x->IsFlat());
- ASSERT(y->IsFlat());
- Object* equal_prefix_result = Smi::FromInt(EQUAL);
- int prefix_length = x->length();
- if (y->length() < prefix_length) {
- prefix_length = y->length();
- equal_prefix_result = Smi::FromInt(GREATER);
- } else if (y->length() > prefix_length) {
- equal_prefix_result = Smi::FromInt(LESS);
- }
- int r;
- String::FlatContent x_content = x->GetFlatContent();
- String::FlatContent y_content = y->GetFlatContent();
- if (x_content.IsAscii()) {
- Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
- if (y_content.IsAscii()) {
- Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- } else {
- Vector<const uc16> x_chars = x_content.ToUC16Vector();
- if (y_content.IsAscii()) {
- Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- }
- Object* result;
- if (r == 0) {
- result = equal_prefix_result;
- } else {
- result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
- }
- ASSERT(result ==
- StringCharacterStreamCompare(Isolate::Current()->runtime_state(), x, y));
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(String, x, 0);
- CONVERT_ARG_CHECKED(String, y, 1);
-
- isolate->counters()->string_compare_runtime()->Increment();
-
- // A few fast case tests before we flatten.
- if (x == y) return Smi::FromInt(EQUAL);
- if (y->length() == 0) {
- if (x->length() == 0) return Smi::FromInt(EQUAL);
- return Smi::FromInt(GREATER);
- } else if (x->length() == 0) {
- return Smi::FromInt(LESS);
- }
-
- int d = x->Get(0) - y->Get(0);
- if (d < 0) return Smi::FromInt(LESS);
- else if (d > 0) return Smi::FromInt(GREATER);
-
- Object* obj;
- { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(x);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(y);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
- : StringCharacterStreamCompare(isolate->runtime_state(), x, y);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_acos()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ACOS, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_asin()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ASIN, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_atan()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ATAN, x);
-}
-
-
-static const double kPiDividedBy4 = 0.78539816339744830962;
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- isolate->counters()->math_atan2()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- double result;
- if (isinf(x) && isinf(y)) {
- // Make sure that the result in case of two infinite arguments
- // is a multiple of Pi / 4. The sign of the result is determined
- // by the first argument (x) and the sign of the second argument
- // determines the multiplier: one or three.
- int multiplier = (x < 0) ? -1 : 1;
- if (y < 0) multiplier *= 3;
- result = multiplier * kPiDividedBy4;
- } else {
- result = atan2(x, y);
- }
- return isolate->heap()->AllocateHeapNumber(result);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_ceil()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->NumberFromDouble(ceiling(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_cos()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::COS, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_exp()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- lazily_initialize_fast_exp();
- return isolate->heap()->NumberFromDouble(fast_exp(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_floor()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->NumberFromDouble(floor(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_log()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
-}
-
-// Slow version of Math.pow. We check for fast paths for special cases.
-// Used if SSE2/VFP3 is not available.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- isolate->counters()->math_pow()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-
- // If the second argument is a smi, it is much faster to call the
- // custom powi() function than the generic pow().
- if (args[1]->IsSmi()) {
- int y = args.smi_at(1);
- return isolate->heap()->NumberFromDouble(power_double_int(x, y));
- }
-
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- double result = power_helper(x, y);
- if (isnan(result)) return isolate->heap()->nan_value();
- return isolate->heap()->AllocateHeapNumber(result);
-}
-
-// Fast version of Math.pow if we know that y is not an integer and y is not
-// -0.5 or 0.5. Used as slow case from full codegen.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- isolate->counters()->math_pow()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (y == 0) {
- return Smi::FromInt(1);
- } else {
- double result = power_double_double(x, y);
- if (isnan(result)) return isolate->heap()->nan_value();
- return isolate->heap()->AllocateHeapNumber(result);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_round()->Increment();
-
- if (!args[0]->IsHeapNumber()) {
- // Must be smi. Return the argument unchanged for all the other types
- // to make fuzz-natives test happy.
- return args[0];
- }
-
- HeapNumber* number = reinterpret_cast<HeapNumber*>(args[0]);
-
- double value = number->value();
- int exponent = number->get_exponent();
- int sign = number->get_sign();
-
- if (exponent < -1) {
- // Number in range ]-0.5..0.5[. These always round to +/-zero.
- if (sign) return isolate->heap()->minus_zero_value();
- return Smi::FromInt(0);
- }
-
- // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
- // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
- // argument holds for 32-bit smis).
- if (!sign && exponent < kSmiValueSize - 2) {
- return Smi::FromInt(static_cast<int>(value + 0.5));
- }
-
- // If the magnitude is big enough, there's no place for fraction part. If we
- // try to add 0.5 to this number, 1.0 will be added instead.
- if (exponent >= 52) {
- return number;
- }
-
- if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
-
- // Do not call NumberFromDouble() to avoid extra checks.
- return isolate->heap()->AllocateHeapNumber(floor(value + 0.5));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_sin()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::SIN, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_sqrt()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(fast_sqrt(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_tan()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::TAN, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_SMI_ARG_CHECKED(year, 0);
- CONVERT_SMI_ARG_CHECKED(month, 1);
-
- return Smi::FromInt(isolate->date_cache()->DaysFromYearMonth(year, month));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 0);
- CONVERT_DOUBLE_ARG_CHECKED(time, 1);
- CONVERT_SMI_ARG_CHECKED(is_utc, 2);
-
- DateCache* date_cache = isolate->date_cache();
-
- Object* value = NULL;
- bool is_value_nan = false;
- if (isnan(time)) {
- value = isolate->heap()->nan_value();
- is_value_nan = true;
- } else if (!is_utc &&
- (time < -DateCache::kMaxTimeBeforeUTCInMs ||
- time > DateCache::kMaxTimeBeforeUTCInMs)) {
- value = isolate->heap()->nan_value();
- is_value_nan = true;
- } else {
- time = is_utc ? time : date_cache->ToUTC(static_cast<int64_t>(time));
- if (time < -DateCache::kMaxTimeInMs ||
- time > DateCache::kMaxTimeInMs) {
- value = isolate->heap()->nan_value();
- is_value_nan = true;
- } else {
- MaybeObject* maybe_result =
- isolate->heap()->AllocateHeapNumber(DoubleToInteger(time));
- if (!maybe_result->ToObject(&value)) return maybe_result;
- }
- }
- date->SetValue(value, is_value_nan);
- return value;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
-
- Handle<JSFunction> callee = args.at<JSFunction>(0);
- Object** parameters = reinterpret_cast<Object**>(args[1]);
- const int argument_count = Smi::cast(args[2])->value();
-
- Handle<JSObject> result =
- isolate->factory()->NewArgumentsObject(callee, argument_count);
- // Allocate the elements if needed.
- int parameter_count = callee->shared()->formal_parameter_count();
- if (argument_count > 0) {
- if (parameter_count > 0) {
- int mapped_count = Min(argument_count, parameter_count);
- Handle<FixedArray> parameter_map =
- isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
- parameter_map->set_map(
- isolate->heap()->non_strict_arguments_elements_map());
-
- Handle<Map> old_map(result->map());
- Handle<Map> new_map = isolate->factory()->CopyMap(old_map);
- new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
-
- result->set_map(*new_map);
- result->set_elements(*parameter_map);
-
- // Store the context and the arguments array at the beginning of the
- // parameter map.
- Handle<Context> context(isolate->context());
- Handle<FixedArray> arguments =
- isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
- parameter_map->set(0, *context);
- parameter_map->set(1, *arguments);
-
- // Loop over the actual parameters backwards.
- int index = argument_count - 1;
- while (index >= mapped_count) {
- // These go directly in the arguments array and have no
- // corresponding slot in the parameter map.
- arguments->set(index, *(parameters - index - 1));
- --index;
- }
-
- Handle<ScopeInfo> scope_info(callee->shared()->scope_info());
- while (index >= 0) {
- // Detect duplicate names to the right in the parameter list.
- Handle<String> name(scope_info->ParameterName(index));
- int context_local_count = scope_info->ContextLocalCount();
- bool duplicate = false;
- for (int j = index + 1; j < parameter_count; ++j) {
- if (scope_info->ParameterName(j) == *name) {
- duplicate = true;
- break;
- }
- }
-
- if (duplicate) {
- // This goes directly in the arguments array with a hole in the
- // parameter map.
- arguments->set(index, *(parameters - index - 1));
- parameter_map->set_the_hole(index + 2);
- } else {
- // The context index goes in the parameter map with a hole in the
- // arguments array.
- int context_index = -1;
- for (int j = 0; j < context_local_count; ++j) {
- if (scope_info->ContextLocalName(j) == *name) {
- context_index = j;
- break;
- }
- }
- ASSERT(context_index >= 0);
- arguments->set_the_hole(index);
- parameter_map->set(index + 2, Smi::FromInt(
- Context::MIN_CONTEXT_SLOTS + context_index));
- }
-
- --index;
- }
- } else {
- // If there is no aliasing, the arguments object elements are not
- // special in any way.
- Handle<FixedArray> elements =
- isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
- result->set_elements(*elements);
- for (int i = 0; i < argument_count; ++i) {
- elements->set(i, *(parameters - i - 1));
- }
- }
- }
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 3);
-
- JSFunction* callee = JSFunction::cast(args[0]);
- Object** parameters = reinterpret_cast<Object**>(args[1]);
- const int length = args.smi_at(2);
-
- Object* result;
- { MaybeObject* maybe_result =
- isolate->heap()->AllocateArgumentsObject(callee, length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Allocate the elements if needed.
- if (length > 0) {
- // Allocate the fixed array.
- Object* obj;
- { MaybeObject* maybe_obj = isolate->heap()->AllocateRawFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- AssertNoAllocation no_gc;
- FixedArray* array = reinterpret_cast<FixedArray*>(obj);
- array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
- array->set_length(length);
-
- WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < length; i++) {
- array->set(i, *--parameters, mode);
- }
- JSObject::cast(result)->set_elements(FixedArray::cast(obj));
- }
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
- CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(pretenure, 2);
-
- // The caller ensures that we pretenure closures that are assigned
- // directly to properties.
- PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED;
- Handle<JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context,
- pretenure_flag);
- return *result;
-}
-
-
-// Find the arguments of the JavaScript function invocation that called
-// into C++ code. Collect these in a newly allocated array of handles (possibly
-// prefixed by a number of empty handles).
-static SmartArrayPointer<Handle<Object> > GetCallerArguments(
- Isolate* isolate,
- int prefix_argc,
- int* total_argc) {
- // Find frame containing arguments passed to the caller.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- List<JSFunction*> functions(2);
- frame->GetFunctions(&functions);
- if (functions.length() > 1) {
- int inlined_jsframe_index = functions.length() - 1;
- JSFunction* inlined_function = functions[inlined_jsframe_index];
- Vector<SlotRef> args_slots =
- SlotRef::ComputeSlotMappingForArguments(
- frame,
- inlined_jsframe_index,
- inlined_function->shared()->formal_parameter_count());
-
- int args_count = args_slots.length();
-
- *total_argc = prefix_argc + args_count;
- SmartArrayPointer<Handle<Object> > param_data(
- NewArray<Handle<Object> >(*total_argc));
- for (int i = 0; i < args_count; i++) {
- Handle<Object> val = args_slots[i].GetValue(isolate);
- param_data[prefix_argc + i] = val;
- }
-
- args_slots.Dispose();
-
- return param_data;
- } else {
- it.AdvanceToArgumentsFrame();
- frame = it.frame();
- int args_count = frame->ComputeParametersCount();
-
- *total_argc = prefix_argc + args_count;
- SmartArrayPointer<Handle<Object> > param_data(
- NewArray<Handle<Object> >(*total_argc));
- for (int i = 0; i < args_count; i++) {
- Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
- param_data[prefix_argc + i] = val;
- }
- return param_data;
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0);
- RUNTIME_ASSERT(args[3]->IsNumber());
- Handle<Object> bindee = args.at<Object>(1);
-
- // TODO(lrn): Create bound function in C++ code from premade shared info.
- bound_function->shared()->set_bound(true);
- // Get all arguments of calling function (Function.prototype.bind).
- int argc = 0;
- SmartArrayPointer<Handle<Object> > arguments =
- GetCallerArguments(isolate, 0, &argc);
- // Don't count the this-arg.
- if (argc > 0) {
- ASSERT(*arguments[0] == args[2]);
- argc--;
- } else {
- ASSERT(args[2]->IsUndefined());
- }
- // Initialize array of bindings (function, this, and any existing arguments
- // if the function was already bound).
- Handle<FixedArray> new_bindings;
- int i;
- if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) {
- Handle<FixedArray> old_bindings(
- JSFunction::cast(*bindee)->function_bindings());
- new_bindings =
- isolate->factory()->NewFixedArray(old_bindings->length() + argc);
- bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex),
- isolate);
- i = 0;
- for (int n = old_bindings->length(); i < n; i++) {
- new_bindings->set(i, old_bindings->get(i));
- }
- } else {
- int array_size = JSFunction::kBoundArgumentsStartIndex + argc;
- new_bindings = isolate->factory()->NewFixedArray(array_size);
- new_bindings->set(JSFunction::kBoundFunctionIndex, *bindee);
- new_bindings->set(JSFunction::kBoundThisIndex, args[2]);
- i = 2;
- }
- // Copy arguments, skipping the first which is "this_arg".
- for (int j = 0; j < argc; j++, i++) {
- new_bindings->set(i, *arguments[j + 1]);
- }
- new_bindings->set_map_no_write_barrier(
- isolate->heap()->fixed_cow_array_map());
- bound_function->set_function_bindings(*new_bindings);
-
- // Update length.
- Handle<String> length_string = isolate->factory()->length_string();
- Handle<Object> new_length(args.at<Object>(3));
- PropertyAttributes attr =
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
- ForceSetProperty(bound_function, length_string, new_length, attr);
- return *bound_function;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) {
- HandleScope handles(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callable, 0);
- if (callable->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
- if (function->shared()->bound()) {
- Handle<FixedArray> bindings(function->function_bindings());
- ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map());
- return *isolate->factory()->NewJSArrayWithElements(bindings);
- }
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- // First argument is a function to use as a constructor.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- RUNTIME_ASSERT(function->shared()->bound());
-
- // The argument is a bound function. Extract its bound arguments
- // and callable.
- Handle<FixedArray> bound_args =
- Handle<FixedArray>(FixedArray::cast(function->function_bindings()));
- int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex;
- Handle<Object> bound_function(
- JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)),
- isolate);
- ASSERT(!bound_function->IsJSFunction() ||
- !Handle<JSFunction>::cast(bound_function)->shared()->bound());
-
- int total_argc = 0;
- SmartArrayPointer<Handle<Object> > param_data =
- GetCallerArguments(isolate, bound_argc, &total_argc);
- for (int i = 0; i < bound_argc; i++) {
- param_data[i] = Handle<Object>(bound_args->get(
- JSFunction::kBoundArgumentsStartIndex + i), isolate);
- }
-
- if (!bound_function->IsJSFunction()) {
- bool exception_thrown;
- bound_function = Execution::TryGetConstructorDelegate(bound_function,
- &exception_thrown);
- if (exception_thrown) return Failure::Exception();
- }
- ASSERT(bound_function->IsJSFunction());
-
- bool exception = false;
- Handle<Object> result =
- Execution::New(Handle<JSFunction>::cast(bound_function),
- total_argc, *param_data, &exception);
- if (exception) {
- return Failure::Exception();
- }
- ASSERT(!result.is_null());
- return *result;
-}
-
-
-static void TrySettingInlineConstructStub(Isolate* isolate,
- Handle<JSFunction> function) {
- Handle<Object> prototype = isolate->factory()->null_value();
- if (function->has_instance_prototype()) {
- prototype = Handle<Object>(function->instance_prototype(), isolate);
- }
- if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
- ConstructStubCompiler compiler(isolate);
- Handle<Code> code = compiler.CompileConstructStub(function);
- function->shared()->set_construct_stub(*code);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- Handle<Object> constructor = args.at<Object>(0);
-
- // If the constructor isn't a proper function we throw a type error.
- if (!constructor->IsJSFunction()) {
- Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
- Handle<Object> type_error =
- isolate->factory()->NewTypeError("not_constructor", arguments);
- return isolate->Throw(*type_error);
- }
-
- Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
-
- // If function should not have prototype, construction is not allowed. In this
- // case generated code bailouts here, since function has no initial_map.
- if (!function->should_have_prototype() && !function->shared()->bound()) {
- Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
- Handle<Object> type_error =
- isolate->factory()->NewTypeError("not_constructor", arguments);
- return isolate->Throw(*type_error);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = isolate->debug();
- // Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) {
- debug->HandleStepIn(function, Handle<Object>::null(), 0, true);
- }
-#endif
-
- if (function->has_initial_map()) {
- if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
- // The 'Function' function ignores the receiver object when
- // called using 'new' and creates a new JSFunction object that
- // is returned. The receiver object is only used for error
- // reporting if an error occurs when constructing the new
- // JSFunction. FACTORY->NewJSObject() should not be used to
- // allocate JSFunctions since it does not properly initialize
- // the shared part of the function. Since the receiver is
- // ignored anyway, we use the global object as the receiver
- // instead of a new JSFunction object. This way, errors are
- // reported the same way whether or not 'Function' is called
- // using 'new'.
- return isolate->context()->global_object();
- }
- }
-
- // The function should be compiled for the optimization hints to be
- // available.
- JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION);
-
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- if (!function->has_initial_map() &&
- shared->IsInobjectSlackTrackingInProgress()) {
- // The tracking is already in progress for another function. We can only
- // track one initial_map at a time, so we force the completion before the
- // function is called as a constructor for the first time.
- shared->CompleteInobjectSlackTracking();
- }
-
- bool first_allocation = !shared->live_objects_may_exist();
- Handle<JSObject> result = isolate->factory()->NewJSObject(function);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- // Delay setting the stub if inobject slack tracking is in progress.
- if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
- TrySettingInlineConstructStub(isolate, function);
- }
-
- isolate->counters()->constructed_objects()->Increment();
- isolate->counters()->constructed_objects_runtime()->Increment();
-
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- function->shared()->CompleteInobjectSlackTracking();
- TrySettingInlineConstructStub(isolate, function);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- Handle<JSFunction> function = args.at<JSFunction>(0);
-#ifdef DEBUG
- if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
- PrintF("[lazy: ");
- function->PrintName();
- PrintF("]\n");
- }
-#endif
-
- // Compile the target function.
- ASSERT(!function->is_compiled());
- if (!JSFunction::CompileLazy(function, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
-
- // All done. Return the compiled code.
- ASSERT(function->is_compiled());
- return function->code();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- Handle<JSFunction> function = args.at<JSFunction>(0);
-
- // If the function is not compiled ignore the lazy
- // recompilation. This can happen if the debugger is activated and
- // the function is returned to the not compiled state.
- if (!function->shared()->is_compiled()) {
- function->ReplaceCode(function->shared()->code());
- return function->code();
- }
-
- // If the function is not optimizable or debugger is active continue using the
- // code from the full compiler.
- if (!FLAG_crankshaft ||
- !function->shared()->code()->optimizable() ||
- isolate->DebuggerHasBreakPoints()) {
- if (FLAG_trace_opt) {
- PrintF("[failed to optimize ");
- function->PrintName();
- PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
- function->shared()->code()->optimizable() ? "T" : "F",
- isolate->DebuggerHasBreakPoints() ? "T" : "F");
- }
- function->ReplaceCode(function->shared()->code());
- return function->code();
- }
- function->shared()->code()->set_profiler_ticks(0);
- if (JSFunction::CompileOptimized(function,
- BailoutId::None(),
- CLEAR_EXCEPTION)) {
- return function->code();
- }
- if (FLAG_trace_opt) {
- PrintF("[failed to optimize ");
- function->PrintName();
- PrintF(": optimized compilation failed]\n");
- }
- function->ReplaceCode(function->shared()->code());
- return function->code();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
- HandleScope handle_scope(isolate);
- ASSERT(FLAG_parallel_recompilation);
- Compiler::RecompileParallel(args.at<JSFunction>(0));
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ForceParallelRecompile) {
- if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
- HandleScope handle_scope(isolate);
- ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
- if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
- return isolate->Throw(*isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("Recompile queue is full.")));
- }
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- fun->ReplaceCode(isolate->builtins()->builtin(Builtins::kParallelRecompile));
- Compiler::RecompileParallel(fun);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) {
- if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
- HandleScope handle_scope(isolate);
- ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread();
- Handle<SharedFunctionInfo> shared(fun->shared());
- while (*opt_thread->InstallNextOptimizedFunction() != *shared) { }
- return isolate->heap()->undefined_value();
-}
-
-
-class ActivationsFinder : public ThreadVisitor {
- public:
- explicit ActivationsFinder(JSFunction* function)
- : function_(function), has_activations_(false) {}
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- if (has_activations_) return;
-
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == function_) {
- has_activations_ = true;
- return;
- }
- }
- }
-
- bool has_activations() { return has_activations_; }
-
- private:
- JSFunction* function_;
- bool has_activations_;
-};
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- ASSERT(isolate->heap()->IsAllocationAllowed());
- ASSERT(deoptimizer->compiled_code_kind() == Code::COMPILED_STUB);
- delete deoptimizer;
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- RUNTIME_ASSERT(args[0]->IsSmi());
- Deoptimizer::BailoutType type =
- static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- ASSERT(isolate->heap()->IsAllocationAllowed());
-
- ASSERT(deoptimizer->compiled_code_kind() != Code::COMPILED_STUB);
-
- // Make sure to materialize objects before causing any allocation.
- JavaScriptFrameIterator it(isolate);
- deoptimizer->MaterializeHeapObjects(&it);
- delete deoptimizer;
-
- JavaScriptFrame* frame = it.frame();
- RUNTIME_ASSERT(frame->function()->IsJSFunction());
- Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
- RUNTIME_ASSERT(type != Deoptimizer::EAGER || function->IsOptimized());
-
- // Avoid doing too much work when running with --always-opt and keep
- // the optimized code around.
- if (FLAG_always_opt || type == Deoptimizer::LAZY) {
- return isolate->heap()->undefined_value();
- }
-
- // Find other optimized activations of the function or functions that
- // share the same optimized code.
- bool has_other_activations = false;
- while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* other_function = JSFunction::cast(frame->function());
- if (frame->is_optimized() && other_function->code() == function->code()) {
- has_other_activations = true;
- break;
- }
- it.Advance();
- }
-
- if (!has_other_activations) {
- ActivationsFinder activations_finder(*function);
- isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
- has_other_activations = activations_finder.has_activations();
- }
-
- if (!has_other_activations) {
- if (FLAG_trace_deopt) {
- PrintF("[removing optimized code for: ");
- function->PrintName();
- PrintF("]\n");
- }
- function->ReplaceCode(function->shared()->code());
- } else {
- Deoptimizer::DeoptimizeFunction(*function);
- }
- // Flush optimized code cache for this function.
- function->shared()->ClearOptimizedCodeMap();
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- delete deoptimizer;
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (!function->IsOptimized()) return isolate->heap()->undefined_value();
-
- Deoptimizer::DeoptimizeFunction(*function);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- Code* unoptimized = function->shared()->code();
- if (unoptimized->kind() == Code::FUNCTION) {
- unoptimized->ClearInlineCaches();
- unoptimized->ClearTypeFeedbackCells(isolate->heap());
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
-#if defined(USE_SIMULATOR)
- return isolate->heap()->true_value();
-#else
- return isolate->heap()->false_value();
-#endif
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
- HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
- if (!function->IsOptimizable()) return isolate->heap()->undefined_value();
- function->MarkForLazyRecompilation();
-
- Code* unoptimized = function->shared()->code();
- if (args.length() == 2 &&
- unoptimized->kind() == Code::FUNCTION) {
- CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
- CHECK(type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr")));
- isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
- unoptimized->set_allow_osr_at_loop_nesting_level(
- Code::kMaxLoopNestingMarker);
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- // The least significant bit (after untagging) indicates whether the
- // function is currently optimized, regardless of reason.
- if (!V8::UseCrankshaft()) {
- return Smi::FromInt(4); // 4 == "never".
- }
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_parallel_recompilation) {
- if (function->IsMarkedForLazyRecompilation()) {
- return Smi::FromInt(5);
- }
- }
- if (FLAG_always_opt) {
- // We may have always opt, but that is more best-effort than a real
- // promise, so we still say "no" if it is not optimized.
- return function->IsOptimized() ? Smi::FromInt(3) // 3 == "always".
- : Smi::FromInt(2); // 2 == "no".
- }
- return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes".
- : Smi::FromInt(2); // 2 == "no".
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- return Smi::FromInt(function->shared()->opt_count());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
- // We're not prepared to handle a function with arguments object.
- ASSERT(!function->shared()->uses_arguments());
-
- // We have hit a back edge in an unoptimized frame for a function that was
- // selected for on-stack replacement. Find the unoptimized code object.
- Handle<Code> unoptimized(function->shared()->code(), isolate);
- // Keep track of whether we've succeeded in optimizing.
- bool succeeded = unoptimized->optimizable();
- if (succeeded) {
- // If we are trying to do OSR when there are already optimized
- // activations of the function, it means (a) the function is directly or
- // indirectly recursive and (b) an optimized invocation has been
- // deoptimized so that we are currently in an unoptimized activation.
- // Check for optimized activations of this function.
- JavaScriptFrameIterator it(isolate);
- while (succeeded && !it.done()) {
- JavaScriptFrame* frame = it.frame();
- succeeded = !frame->is_optimized() || frame->function() != *function;
- it.Advance();
- }
- }
-
- BailoutId ast_id = BailoutId::None();
- if (succeeded) {
- // The top JS function is this one, the PC is somewhere in the
- // unoptimized code.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- ASSERT(frame->function() == *function);
- ASSERT(frame->LookupCode() == *unoptimized);
- ASSERT(unoptimized->contains(frame->pc()));
-
- // Use linear search of the unoptimized code's stack check table to find
- // the AST id matching the PC.
- Address start = unoptimized->instruction_start();
- unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
- Address table_cursor = start + unoptimized->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(table_cursor);
- table_cursor += kIntSize;
- for (unsigned i = 0; i < table_length; ++i) {
- // Table entries are (AST id, pc offset) pairs.
- uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
- if (pc_offset == target_pc_offset) {
- ast_id = BailoutId(static_cast<int>(Memory::uint32_at(table_cursor)));
- break;
- }
- table_cursor += 2 * kIntSize;
- }
- ASSERT(!ast_id.IsNone());
- if (FLAG_trace_osr) {
- PrintF("[replacing on-stack at AST id %d in ", ast_id.ToInt());
- function->PrintName();
- PrintF("]\n");
- }
-
- // Try to compile the optimized code. A true return value from
- // CompileOptimized means that compilation succeeded, not necessarily
- // that optimization succeeded.
- if (JSFunction::CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
- function->IsOptimized()) {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- function->code()->deoptimization_data());
- if (data->OsrPcOffset()->value() >= 0) {
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement offset %d in optimized code]\n",
- data->OsrPcOffset()->value());
- }
- ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
- } else {
- // We may never generate the desired OSR entry if we emit an
- // early deoptimize.
- succeeded = false;
- }
- } else {
- succeeded = false;
- }
- }
-
- // Revert to the original stack checks in the original unoptimized code.
- if (FLAG_trace_osr) {
- PrintF("[restoring original stack checks in ");
- function->PrintName();
- PrintF("]\n");
- }
- InterruptStub interrupt_stub;
- Handle<Code> check_code = interrupt_stub.GetCode(isolate);
- Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertStackCheckCode(*unoptimized,
- *check_code,
- *replacement_code);
-
- // Allow OSR only at nesting level zero again.
- unoptimized->set_allow_osr_at_loop_nesting_level(0);
-
- // If the optimization attempt succeeded, return the AST id tagged as a
- // smi. This tells the builtin that we need to translate the unoptimized
- // frame to an optimized one.
- if (succeeded) {
- ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- return Smi::FromInt(ast_id.ToInt());
- } else {
- if (function->IsMarkedForLazyRecompilation()) {
- function->ReplaceCode(function->shared()->code());
- }
- return Smi::FromInt(-1);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) {
- RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetRootNaN) {
- RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
- return isolate->heap()->nan_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
- HandleScope scope(isolate);
- ASSERT(args.length() >= 2);
- int argc = args.length() - 2;
- CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1);
- Object* receiver = args[0];
-
- // If there are too many arguments, allocate argv via malloc.
- const int argv_small_size = 10;
- Handle<Object> argv_small_buffer[argv_small_size];
- SmartArrayPointer<Handle<Object> > argv_large_buffer;
- Handle<Object>* argv = argv_small_buffer;
- if (argc > argv_small_size) {
- argv = new Handle<Object>[argc];
- if (argv == NULL) return isolate->StackOverflow();
- argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
- }
-
- for (int i = 0; i < argc; ++i) {
- MaybeObject* maybe = args[1 + i];
- Object* object;
- if (!maybe->To<Object>(&object)) return maybe;
- argv[i] = Handle<Object>(object, isolate);
- }
-
- bool threw;
- Handle<JSReceiver> hfun(fun);
- Handle<Object> hreceiver(receiver, isolate);
- Handle<Object> result =
- Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
-
- if (threw) return Failure::Exception();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0);
- Handle<Object> receiver = args.at<Object>(1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
- CONVERT_SMI_ARG_CHECKED(offset, 3);
- CONVERT_SMI_ARG_CHECKED(argc, 4);
- ASSERT(offset >= 0);
- ASSERT(argc >= 0);
-
- // If there are too many arguments, allocate argv via malloc.
- const int argv_small_size = 10;
- Handle<Object> argv_small_buffer[argv_small_size];
- SmartArrayPointer<Handle<Object> > argv_large_buffer;
- Handle<Object>* argv = argv_small_buffer;
- if (argc > argv_small_size) {
- argv = new Handle<Object>[argc];
- if (argv == NULL) return isolate->StackOverflow();
- argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
- }
-
- for (int i = 0; i < argc; ++i) {
- argv[i] = Object::GetElement(arguments, offset + i);
- }
-
- bool threw;
- Handle<Object> result =
- Execution::Call(fun, receiver, argc, argv, &threw, true);
-
- if (threw) return Failure::Exception();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetFunctionDelegate(args.at<Object>(0));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetConstructorDelegate(args.at<Object>(0));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- CONVERT_ARG_CHECKED(ScopeInfo, scope_info, 1);
- Context* result;
- MaybeObject* maybe_result =
- isolate->heap()->AllocateGlobalContext(function, scope_info);
- if (!maybe_result->To(&result)) return maybe_result;
-
- ASSERT(function->context() == isolate->context());
- ASSERT(function->context()->global_object() == result->global_object());
- isolate->set_context(result);
- result->global_object()->set_global_context(result);
-
- return result; // non-failure
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- SharedFunctionInfo* shared = function->shared();
- // TODO(pvarga): The QML mode should be checked in the ContextLength function.
- int length = shared->scope_info()->ContextLength(shared->qml_mode());
- Context* result;
- MaybeObject* maybe_result =
- isolate->heap()->AllocateFunctionContext(length, function);
- if (!maybe_result->To(&result)) return maybe_result;
-
- isolate->set_context(result);
-
- return result; // non-failure
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- JSObject* extension_object;
- if (args[0]->IsJSObject()) {
- extension_object = JSObject::cast(args[0]);
- } else {
- // Convert the object to a proper JavaScript object.
- MaybeObject* maybe_js_object = args[0]->ToObject();
- if (!maybe_js_object->To(&extension_object)) {
- if (Failure::cast(maybe_js_object)->IsInternalError()) {
- HandleScope scope(isolate);
- Handle<Object> handle = args.at<Object>(0);
- Handle<Object> result =
- isolate->factory()->NewTypeError("with_expression",
- HandleVector(&handle, 1));
- return isolate->Throw(*result);
- } else {
- return maybe_js_object;
- }
- }
- }
-
- JSFunction* function;
- if (args[1]->IsSmi()) {
- // A smi sentinel indicates a context nested inside global code rather
- // than some function. There is a canonical empty function that can be
- // gotten from the native context.
- function = isolate->context()->native_context()->closure();
- } else {
- function = JSFunction::cast(args[1]);
- }
-
- Context* context;
- MaybeObject* maybe_context =
- isolate->heap()->AllocateWithContext(function,
- isolate->context(),
- extension_object);
- if (!maybe_context->To(&context)) return maybe_context;
- isolate->set_context(context);
- return context;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 3);
- String* name = String::cast(args[0]);
- Object* thrown_object = args[1];
- JSFunction* function;
- if (args[2]->IsSmi()) {
- // A smi sentinel indicates a context nested inside global code rather
- // than some function. There is a canonical empty function that can be
- // gotten from the native context.
- function = isolate->context()->native_context()->closure();
- } else {
- function = JSFunction::cast(args[2]);
- }
- Context* context;
- MaybeObject* maybe_context =
- isolate->heap()->AllocateCatchContext(function,
- isolate->context(),
- name,
- thrown_object);
- if (!maybe_context->To(&context)) return maybe_context;
- isolate->set_context(context);
- return context;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 2);
- ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
- JSFunction* function;
- if (args[1]->IsSmi()) {
- // A smi sentinel indicates a context nested inside global code rather
- // than some function. There is a canonical empty function that can be
- // gotten from the native context.
- function = isolate->context()->native_context()->closure();
- } else {
- function = JSFunction::cast(args[1]);
- }
- Context* context;
- MaybeObject* maybe_context =
- isolate->heap()->AllocateBlockContext(function,
- isolate->context(),
- scope_info);
- if (!maybe_context->To(&context)) return maybe_context;
- isolate->set_context(context);
- return context;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
- ASSERT(args.length() == 1);
- Object* obj = args[0];
- return isolate->heap()->ToBoolean(obj->IsJSModule());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
- ASSERT(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(index, 0);
-
- if (!args[1]->IsScopeInfo()) {
- // Module already initialized. Find hosting context and retrieve context.
- Context* host = Context::cast(isolate->context())->global_context();
- Context* context = Context::cast(host->get(index));
- ASSERT(context->previous() == isolate->context());
- isolate->set_context(context);
- return context;
- }
-
- CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
-
- // Allocate module context.
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
- Handle<Context> context = factory->NewModuleContext(scope_info);
- Handle<JSModule> module = factory->NewJSModule(context, scope_info);
- context->set_module(*module);
- Context* previous = isolate->context();
- context->set_previous(previous);
- context->set_closure(previous->closure());
- context->set_global_object(previous->global_object());
- isolate->set_context(*context);
-
- // Find hosting scope and initialize internal variable holding module there.
- previous->global_context()->set(index, *context);
-
- return *context;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0);
- Context* host_context = isolate->context();
-
- for (int i = 0; i < descriptions->length(); ++i) {
- Handle<ModuleInfo> description(ModuleInfo::cast(descriptions->get(i)));
- int host_index = description->host_index();
- Handle<Context> context(Context::cast(host_context->get(host_index)));
- Handle<JSModule> module(context->module());
-
- for (int j = 0; j < description->length(); ++j) {
- Handle<String> name(description->name(j));
- VariableMode mode = description->mode(j);
- int index = description->index(j);
- switch (mode) {
- case VAR:
- case LET:
- case CONST:
- case CONST_HARMONY: {
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? FROZEN : SEALED;
- Handle<AccessorInfo> info =
- Accessors::MakeModuleExport(name, index, attr);
- Handle<Object> result = SetAccessor(module, info);
- ASSERT(!(result.is_null() || result->IsUndefined()));
- USE(result);
- break;
- }
- case MODULE: {
- Object* referenced_context = Context::cast(host_context)->get(index);
- Handle<JSModule> value(Context::cast(referenced_context)->module());
- JSReceiver::SetProperty(module, name, value, FROZEN, kStrictMode);
- break;
- }
- case INTERNAL:
- case TEMPORARY:
- case DYNAMIC:
- case DYNAMIC_GLOBAL:
- case DYNAMIC_LOCAL:
- UNREACHABLE();
- }
- }
-
- JSObject::PreventExtensions(module);
- }
-
- ASSERT(!isolate->has_pending_exception());
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- BindingFlags binding_flags;
- Handle<Object> holder = context->Lookup(name,
- flags,
- &index,
- &attributes,
- &binding_flags);
-
- // If the slot was not found the result is true.
- if (holder.is_null()) {
- return isolate->heap()->true_value();
- }
-
- // If the slot was found in a context, it should be DONT_DELETE.
- if (holder->IsContext()) {
- return isolate->heap()->false_value();
- }
-
- // The slot was found in a JSObject, either a context extension object,
- // the global object, or the subject of a with. Try to delete it
- // (respecting DONT_DELETE).
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
- return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
-}
-
-
-// A mechanism to return a pair of Object pointers in registers (if possible).
-// How this is achieved is calling convention-dependent.
-// All currently supported x86 compiles uses calling conventions that are cdecl
-// variants where a 64-bit value is returned in two 32-bit registers
-// (edx:eax on ia32, r1:r0 on ARM).
-// In AMD-64 calling convention a struct of two pointers is returned in rdx:rax.
-// In Win64 calling convention, a struct of two pointers is returned in memory,
-// allocated by the caller, and passed as a pointer in a hidden first parameter.
-#ifdef V8_HOST_ARCH_64_BIT
-struct ObjectPair {
- MaybeObject* x;
- MaybeObject* y;
-};
-
-static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
- ObjectPair result = {x, y};
- // Pointers x and y returned in rax and rdx, in AMD-x64-abi.
- // In Win64 they are assigned to a hidden first argument.
- return result;
-}
-#else
-typedef uint64_t ObjectPair;
-static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
- return reinterpret_cast<uint32_t>(x) |
- (reinterpret_cast<ObjectPair>(y) << 32);
-}
-#endif
-
-
-static inline MaybeObject* Unhole(Heap* heap,
- MaybeObject* x,
- PropertyAttributes attributes) {
- ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0);
- USE(attributes);
- return x->IsTheHole() ? heap->undefined_value() : x;
-}
-
-
-static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
- JSObject* holder) {
- ASSERT(!holder->IsGlobalObject());
- Context* top = isolate->context();
- // Get the context extension function.
- JSFunction* context_extension_function =
- top->native_context()->context_extension_function();
- // If the holder isn't a context extension object, we just return it
- // as the receiver. This allows arguments objects to be used as
- // receivers, but only if they are put in the context scope chain
- // explicitly via a with-statement.
- Object* constructor = holder->map()->constructor();
- if (constructor != context_extension_function) return holder;
- // Fall back to using the global object as the implicit receiver if
- // the property turns out to be a local variable allocated in a
- // context extension object - introduced via eval. Implicit global
- // receivers are indicated with the hole value.
- return isolate->heap()->the_hole_value();
-}
-
-
-static ObjectPair LoadContextSlotHelper(Arguments args,
- Isolate* isolate,
- bool throw_error) {
- HandleScope scope(isolate);
- ASSERT_EQ(2, args.length());
-
- if (!args[0]->IsContext() || !args[1]->IsString()) {
- return MakePair(isolate->ThrowIllegalOperation(), NULL);
- }
- Handle<Context> context = args.at<Context>(0);
- Handle<String> name = args.at<String>(1);
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- BindingFlags binding_flags;
- Handle<Object> holder = context->Lookup(name,
- flags,
- &index,
- &attributes,
- &binding_flags);
-
- // If the index is non-negative, the slot has been found in a context.
- if (index >= 0) {
- ASSERT(holder->IsContext());
- // If the "property" we were looking for is a local variable, the
- // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
- //
- // Use the hole as the receiver to signal that the receiver is implicit
- // and that the global receiver should be used (as distinguished from an
- // explicit receiver that happens to be a global object).
- Handle<Object> receiver = isolate->factory()->the_hole_value();
- Object* value = Context::cast(*holder)->get(index);
- // Check for uninitialized bindings.
- switch (binding_flags) {
- case MUTABLE_CHECK_INITIALIZED:
- case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
- if (value->IsTheHole()) {
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return MakePair(isolate->Throw(*reference_error), NULL);
- }
- // FALLTHROUGH
- case MUTABLE_IS_INITIALIZED:
- case IMMUTABLE_IS_INITIALIZED:
- case IMMUTABLE_IS_INITIALIZED_HARMONY:
- ASSERT(!value->IsTheHole());
- return MakePair(value, *receiver);
- case IMMUTABLE_CHECK_INITIALIZED:
- return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
- case MISSING_BINDING:
- UNREACHABLE();
- return MakePair(NULL, NULL);
- }
- }
-
- // Otherwise, if the slot was found the holder is a context extension
- // object, subject of a with, or a global object. We read the named
- // property from it.
- if (!holder.is_null()) {
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
- ASSERT(object->HasProperty(*name));
- // GetProperty below can cause GC.
- Handle<Object> receiver_handle(
- object->IsGlobalObject()
- ? GlobalObject::cast(*object)->global_receiver()
- : ComputeReceiverForNonGlobal(isolate, *object),
- isolate);
-
- // No need to unhole the value here. This is taken care of by the
- // GetProperty function.
- MaybeObject* value = object->GetProperty(*name);
- return MakePair(value, *receiver_handle);
- }
-
- if (throw_error) {
- // The property doesn't exist - throw exception.
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return MakePair(isolate->Throw(*reference_error), NULL);
- } else {
- // The property doesn't exist - return undefined.
- return MakePair(isolate->heap()->undefined_value(),
- isolate->heap()->undefined_value());
- }
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlot) {
- return LoadContextSlotHelper(args, isolate, true);
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlotNoReferenceError) {
- return LoadContextSlotHelper(args, isolate, false);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
-
- Handle<Object> value(args[0], isolate);
- CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
- StrictModeFlag strict_mode = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- BindingFlags binding_flags;
- Handle<Object> holder = context->Lookup(name,
- flags,
- &index,
- &attributes,
- &binding_flags);
-
- if (index >= 0) {
- // The property was found in a context slot.
- Handle<Context> context = Handle<Context>::cast(holder);
- if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
- context->get(index)->IsTheHole()) {
- Handle<Object> error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return isolate->Throw(*error);
- }
- // Ignore if read_only variable.
- if ((attributes & READ_ONLY) == 0) {
- // Context is a fixed array and set cannot fail.
- context->set(index, *value);
- } else if (strict_mode == kStrictMode) {
- // Setting read only property in strict mode.
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_cannot_assign",
- HandleVector(&name, 1));
- return isolate->Throw(*error);
- }
- return *value;
- }
-
- // Slow case: The property is not in a context slot. It is either in a
- // context extension object, a property of the subject of a with, or a
- // property of the global object.
- Handle<JSObject> object;
-
- if (!holder.is_null()) {
- // The property exists on the holder.
- object = Handle<JSObject>::cast(holder);
- } else {
- // The property was not found.
- ASSERT(attributes == ABSENT);
-
- if (strict_mode == kStrictMode) {
- // Throw in strict mode (assignment to undefined variable).
- Handle<Object> error =
- isolate->factory()->NewReferenceError(
- "not_defined", HandleVector(&name, 1));
- return isolate->Throw(*error);
- }
- // In non-strict mode, the property is added to the global object.
- attributes = NONE;
- object = Handle<JSObject>(isolate->context()->global_object());
- }
-
- // Set the property if it's not read only or doesn't yet exist.
- if ((attributes & READ_ONLY) == 0 ||
- (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- JSReceiver::SetProperty(object, name, value, NONE, strict_mode));
- } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
- // Setting read only property in strict mode.
- Handle<Object> error =
- isolate->factory()->NewTypeError(
- "strict_cannot_assign", HandleVector(&name, 1));
- return isolate->Throw(*error);
- }
- return *value;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- return isolate->Throw(args[0]);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- return isolate->ReThrow(args[0]);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
- ASSERT_EQ(0, args.length());
- return isolate->PromoteScheduledException();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- Handle<Object> name(args[0], isolate);
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return isolate->Throw(*reference_error);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "not_date_object", HandleVector<Object>(NULL, 0)));
-}
-
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
- ASSERT(args.length() == 0);
-
- // First check if this is a real stack overflow.
- if (isolate->stack_guard()->IsStackOverflow()) {
- NoHandleAllocation na(isolate);
- return isolate->StackOverflow();
- }
-
- return Execution::HandleStackGuardInterrupt(isolate);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
- ASSERT(args.length() == 0);
- return Execution::HandleStackGuardInterrupt(isolate);
-}
-
-
-static int StackSize(Isolate* isolate) {
- int n = 0;
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) n++;
- return n;
-}
-
-
-static void PrintTransition(Isolate* isolate, Object* result) {
- // indentation
- { const int nmax = 80;
- int n = StackSize(isolate);
- if (n <= nmax)
- PrintF("%4d:%*s", n, n, "");
- else
- PrintF("%4d:%*s", n, nmax, "...");
- }
-
- if (result == NULL) {
- JavaScriptFrame::PrintTop(isolate, stdout, true, false);
- PrintF(" {\n");
- } else {
- // function result
- PrintF("} -> ");
- result->ShortPrint();
- PrintF("\n");
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
- ASSERT(args.length() == 0);
- NoHandleAllocation ha(isolate);
- PrintTransition(isolate, NULL);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
- NoHandleAllocation ha(isolate);
- PrintTransition(isolate, args[0]);
- return args[0]; // return TOS
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
-#ifdef DEBUG
- if (args[0]->IsString()) {
- // If we have a string, assume it's a code "marker"
- // and print some interesting cpu debugging info.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- PrintF("fp = %p, sp = %p, caller_sp = %p: ",
- frame->fp(), frame->sp(), frame->caller_sp());
- } else {
- PrintF("DebugPrint: ");
- }
- args[0]->Print();
- if (args[0]->IsHeapObject()) {
- PrintF("\n");
- HeapObject::cast(args[0])->map()->Print();
- }
-#else
- // ShortPrint is available in release mode. Print is not.
- args[0]->ShortPrint();
-#endif
- PrintF("\n");
- Flush();
-
- return args[0]; // return TOS
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
- ASSERT(args.length() == 0);
- NoHandleAllocation ha(isolate);
- isolate->PrintStack();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 0);
-
- // According to ECMA-262, section 15.9.1, page 117, the precision of
- // the number in a Date object representing a particular instant in
- // time is milliseconds. Therefore, we floor the result of getting
- // the OS time.
- double millis = floor(OS::TimeCurrentMillis());
- return isolate->heap()->NumberFromDouble(millis);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
- FlattenString(str);
-
- CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
-
- MaybeObject* maybe_result_array =
- output->EnsureCanContainHeapObjectElements();
- if (maybe_result_array->IsFailure()) return maybe_result_array;
- RUNTIME_ASSERT(output->HasFastObjectElements());
-
- AssertNoAllocation no_allocation;
-
- FixedArray* output_array = FixedArray::cast(output->elements());
- RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
- bool result;
- String::FlatContent str_content = str->GetFlatContent();
- if (str_content.IsAscii()) {
- result = DateParser::Parse(str_content.ToOneByteVector(),
- output_array,
- isolate->unicode_cache());
- } else {
- ASSERT(str_content.IsTwoByte());
- result = DateParser::Parse(str_content.ToUC16Vector(),
- output_array,
- isolate->unicode_cache());
- }
-
- if (result) {
- return *output;
- } else {
- return isolate->heap()->null_value();
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- int64_t time = isolate->date_cache()->EquivalentTime(static_cast<int64_t>(x));
- const char* zone = OS::LocalTimezone(static_cast<double>(time));
- return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- int64_t time = isolate->date_cache()->ToUTC(static_cast<int64_t>(x));
-
- return isolate->heap()->NumberFromDouble(static_cast<double>(time));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
- ASSERT(args.length() == 1);
- Object* global = args[0];
- if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
- return JSGlobalObject::cast(global)->global_receiver();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
- HandleScope scope(isolate);
- ASSERT_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
-
- Zone* zone = isolate->runtime_zone();
- source = Handle<String>(source->TryFlattenGetString());
- // Optimized fast case where we only have ASCII characters.
- Handle<Object> result;
- if (source->IsSeqOneByteString()) {
- result = JsonParser<true>::Parse(source, zone);
- } else {
- result = JsonParser<false>::Parse(source, zone);
- }
- if (result.is_null()) {
- // Syntax error or stack overflow in scanner.
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
- return *result;
-}
-
-
-bool CodeGenerationFromStringsAllowed(Isolate* isolate,
- Handle<Context> context) {
- ASSERT(context->allow_code_gen_from_strings()->IsFalse());
- // Check with callback if set.
- AllowCodeGenerationFromStringsCallback callback =
- isolate->allow_code_gen_callback();
- if (callback == NULL) {
- // No callback set and code generation disallowed.
- return false;
- } else {
- // Callback set. Let it decide if code generation is allowed.
- VMState state(isolate, EXTERNAL);
- return callback(v8::Utils::ToLocal(context));
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
- HandleScope scope(isolate);
- ASSERT_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
-
- // Extract native context.
- Handle<Context> context(isolate->context()->native_context());
-
- // Check if native context allows code generation from
- // strings. Throw an exception if it doesn't.
- if (context->allow_code_gen_from_strings()->IsFalse() &&
- !CodeGenerationFromStringsAllowed(isolate, context)) {
- Handle<Object> error_message =
- context->ErrorMessageForCodeGenerationFromStrings();
- return isolate->Throw(*isolate->factory()->NewEvalError(
- "code_gen_from_strings", HandleVector<Object>(&error_message, 1)));
- }
-
- // Compile source string in the native context.
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition, false);
- if (shared.is_null()) return Failure::Exception();
- Handle<JSFunction> fun =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context,
- NOT_TENURED);
- return *fun;
-}
-
-
-static ObjectPair CompileGlobalEval(Isolate* isolate,
- Handle<String> source,
- Handle<Object> receiver,
- LanguageMode language_mode,
- int scope_position,
- bool qml_mode) {
- Handle<Context> context = Handle<Context>(isolate->context());
- Handle<Context> native_context = Handle<Context>(context->native_context());
-
- // Check if native context allows code generation from
- // strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings()->IsFalse() &&
- !CodeGenerationFromStringsAllowed(isolate, native_context)) {
- Handle<Object> error_message =
- native_context->ErrorMessageForCodeGenerationFromStrings();
- isolate->Throw(*isolate->factory()->NewEvalError(
- "code_gen_from_strings", HandleVector<Object>(&error_message, 1)));
- return MakePair(Failure::Exception(), NULL);
- }
-
- // Deal with a normal eval call with a string argument. Compile it
- // and return the compiled function bound in the local context.
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source,
- Handle<Context>(isolate->context()),
- context->IsNativeContext(),
- language_mode,
- scope_position,
- qml_mode);
- if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
- Handle<JSFunction> compiled =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, NOT_TENURED);
- return MakePair(*compiled, *receiver);
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
- ASSERT(args.length() == 6);
-
- HandleScope scope(isolate);
- Handle<Object> callee = args.at<Object>(0);
-
- // If "eval" didn't refer to the original GlobalEval, it's not a
- // direct call to eval.
- // (And even if it is, but the first argument isn't a string, just let
- // execution default to an indirect call to eval, which will also return
- // the first argument without doing anything).
- if (*callee != isolate->native_context()->global_eval_fun() ||
- !args[1]->IsString()) {
- return MakePair(*callee, isolate->heap()->the_hole_value());
- }
-
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
- ASSERT(args[4]->IsSmi());
- return CompileGlobalEval(isolate,
- args.at<String>(1),
- args.at<Object>(2),
- language_mode,
- args.smi_at(4),
- Smi::cast(args[5])->value());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNewFunctionAttributes) {
- // This utility adjusts the property attributes for newly created Function
- // object ("new Function(...)") by changing the map.
- // All it does is changing the prototype property to enumerable
- // as specified in ECMA262, 15.3.5.2.
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
-
- Handle<Map> map = func->shared()->is_classic_mode()
- ? isolate->function_instance_map()
- : isolate->strict_mode_function_instance_map();
-
- ASSERT(func->map()->instance_type() == map->instance_type());
- ASSERT(func->map()->instance_size() == map->instance_size());
- func->set_map(*map);
- return *func;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
- // Allocate a block of memory in NewSpace (filled with a filler).
- // Use as fallback for allocation in generated code when NewSpace
- // is full.
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- int size = size_smi->value();
- RUNTIME_ASSERT(IsAligned(size, kPointerSize));
- RUNTIME_ASSERT(size > 0);
- Heap* heap = isolate->heap();
- const int kMinFreeNewSpaceAfterGC = heap->InitialSemiSpaceSize() * 3/4;
- RUNTIME_ASSERT(size <= kMinFreeNewSpaceAfterGC);
- Object* allocation;
- { MaybeObject* maybe_allocation = heap->new_space()->AllocateRaw(size);
- if (maybe_allocation->ToObject(&allocation)) {
- heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
- }
- return maybe_allocation;
- }
-}
-
-
-// Push an object unto an array of objects if it is not already in the
-// array. Returns true if the element was pushed on the stack and
-// false otherwise.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
- CONVERT_ARG_CHECKED(JSReceiver, element, 1);
- RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
- int length = Smi::cast(array->length())->value();
- FixedArray* elements = FixedArray::cast(array->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == element) return isolate->heap()->false_value();
- }
- Object* obj;
- // Strict not needed. Used for cycle detection in Array join implementation.
- { MaybeObject* maybe_obj =
- array->SetFastElement(length, element, kNonStrictMode, true);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return isolate->heap()->true_value();
-}
-
-
-/**
- * A simple visitor visits every element of Array's.
- * The backend storage can be a fixed array for fast elements case,
- * or a dictionary for sparse array. Since Dictionary is a subtype
- * of FixedArray, the class can be used by both fast and slow cases.
- * The second parameter of the constructor, fast_elements, specifies
- * whether the storage is a FixedArray or Dictionary.
- *
- * An index limit is used to deal with the situation that a result array
- * length overflows 32-bit non-negative integer.
- */
-class ArrayConcatVisitor {
- public:
- ArrayConcatVisitor(Isolate* isolate,
- Handle<FixedArray> storage,
- bool fast_elements) :
- isolate_(isolate),
- storage_(Handle<FixedArray>::cast(
- isolate->global_handles()->Create(*storage))),
- index_offset_(0u),
- fast_elements_(fast_elements) { }
-
- ~ArrayConcatVisitor() {
- clear_storage();
- }
-
- void visit(uint32_t i, Handle<Object> elm) {
- if (i >= JSObject::kMaxElementCount - index_offset_) return;
- uint32_t index = index_offset_ + i;
-
- if (fast_elements_) {
- if (index < static_cast<uint32_t>(storage_->length())) {
- storage_->set(index, *elm);
- return;
- }
- // Our initial estimate of length was foiled, possibly by
- // getters on the arrays increasing the length of later arrays
- // during iteration.
- // This shouldn't happen in anything but pathological cases.
- SetDictionaryMode(index);
- // Fall-through to dictionary mode.
- }
- ASSERT(!fast_elements_);
- Handle<SeededNumberDictionary> dict(
- SeededNumberDictionary::cast(*storage_));
- Handle<SeededNumberDictionary> result =
- isolate_->factory()->DictionaryAtNumberPut(dict, index, elm);
- if (!result.is_identical_to(dict)) {
- // Dictionary needed to grow.
- clear_storage();
- set_storage(*result);
- }
- }
-
- void increase_index_offset(uint32_t delta) {
- if (JSObject::kMaxElementCount - index_offset_ < delta) {
- index_offset_ = JSObject::kMaxElementCount;
- } else {
- index_offset_ += delta;
- }
- }
-
- Handle<JSArray> ToArray() {
- Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
- Handle<Object> length =
- isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
- Handle<Map> map;
- if (fast_elements_) {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- FAST_HOLEY_ELEMENTS);
- } else {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- DICTIONARY_ELEMENTS);
- }
- array->set_map(*map);
- array->set_length(*length);
- array->set_elements(*storage_);
- return array;
- }
-
- private:
- // Convert storage to dictionary mode.
- void SetDictionaryMode(uint32_t index) {
- ASSERT(fast_elements_);
- Handle<FixedArray> current_storage(*storage_);
- Handle<SeededNumberDictionary> slow_storage(
- isolate_->factory()->NewSeededNumberDictionary(
- current_storage->length()));
- uint32_t current_length = static_cast<uint32_t>(current_storage->length());
- for (uint32_t i = 0; i < current_length; i++) {
- HandleScope loop_scope(isolate_);
- Handle<Object> element(current_storage->get(i), isolate_);
- if (!element->IsTheHole()) {
- Handle<SeededNumberDictionary> new_storage =
- isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
- if (!new_storage.is_identical_to(slow_storage)) {
- slow_storage = loop_scope.CloseAndEscape(new_storage);
- }
- }
- }
- clear_storage();
- set_storage(*slow_storage);
- fast_elements_ = false;
- }
-
- inline void clear_storage() {
- isolate_->global_handles()->Destroy(
- Handle<Object>::cast(storage_).location());
- }
-
- inline void set_storage(FixedArray* storage) {
- storage_ = Handle<FixedArray>::cast(
- isolate_->global_handles()->Create(storage));
- }
-
- Isolate* isolate_;
- Handle<FixedArray> storage_; // Always a global handle.
- // Index after last seen index. Always less than or equal to
- // JSObject::kMaxElementCount.
- uint32_t index_offset_;
- bool fast_elements_;
-};
-
-
-static uint32_t EstimateElementCount(Handle<JSArray> array) {
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
- int element_count = 0;
- switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- // Fast elements can't have lengths that are not representable by
- // a 32-bit signed integer.
- ASSERT(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
- int fast_length = static_cast<int>(length);
- Handle<FixedArray> elements(FixedArray::cast(array->elements()));
- for (int i = 0; i < fast_length; i++) {
- if (!elements->get(i)->IsTheHole()) element_count++;
- }
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- // Fast elements can't have lengths that are not representable by
- // a 32-bit signed integer.
- ASSERT(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
- int fast_length = static_cast<int>(length);
- if (array->elements()->IsFixedArray()) {
- ASSERT(FixedArray::cast(array->elements())->length() == 0);
- break;
- }
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(array->elements()));
- for (int i = 0; i < fast_length; i++) {
- if (!elements->is_the_hole(i)) element_count++;
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dictionary(
- SeededNumberDictionary::cast(array->elements()));
- int capacity = dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Handle<Object> key(dictionary->KeyAt(i), array->GetIsolate());
- if (dictionary->IsKey(*key)) {
- element_count++;
- }
- }
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- // External arrays are always dense.
- return length;
- }
- // As an estimate, we assume that the prototype doesn't contain any
- // inherited elements.
- return element_count;
-}
-
-
-
-template<class ExternalArrayClass, class ElementType>
-static void IterateExternalArrayElements(Isolate* isolate,
- Handle<JSObject> receiver,
- bool elements_are_ints,
- bool elements_are_guaranteed_smis,
- ArrayConcatVisitor* visitor) {
- Handle<ExternalArrayClass> array(
- ExternalArrayClass::cast(receiver->elements()));
- uint32_t len = static_cast<uint32_t>(array->length());
-
- ASSERT(visitor != NULL);
- if (elements_are_ints) {
- if (elements_are_guaranteed_smis) {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))),
- isolate);
- visitor->visit(j, e);
- }
- } else {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- int64_t val = static_cast<int64_t>(array->get_scalar(j));
- if (Smi::IsValid(static_cast<intptr_t>(val))) {
- Handle<Smi> e(Smi::FromInt(static_cast<int>(val)), isolate);
- visitor->visit(j, e);
- } else {
- Handle<Object> e =
- isolate->factory()->NewNumber(static_cast<ElementType>(val));
- visitor->visit(j, e);
- }
- }
- }
- } else {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> e = isolate->factory()->NewNumber(array->get_scalar(j));
- visitor->visit(j, e);
- }
- }
-}
-
-
-// Used for sorting indices in a List<uint32_t>.
-static int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
- uint32_t a = *ap;
- uint32_t b = *bp;
- return (a == b) ? 0 : (a < b) ? -1 : 1;
-}
-
-
-static void CollectElementIndices(Handle<JSObject> object,
- uint32_t range,
- List<uint32_t>* indices) {
- Isolate* isolate = object->GetIsolate();
- ElementsKind kind = object->GetElementsKind();
- switch (kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(object->elements()));
- uint32_t length = static_cast<uint32_t>(elements->length());
- if (range < length) length = range;
- for (uint32_t i = 0; i < length; i++) {
- if (!elements->get(i)->IsTheHole()) {
- indices->Add(i);
- }
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // TODO(1810): Decide if it's worthwhile to implement this.
- UNREACHABLE();
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dict(
- SeededNumberDictionary::cast(object->elements()));
- uint32_t capacity = dict->Capacity();
- for (uint32_t j = 0; j < capacity; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> k(dict->KeyAt(j), isolate);
- if (dict->IsKey(*k)) {
- ASSERT(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- if (index < range) {
- indices->Add(index);
- }
- }
- }
- break;
- }
- default: {
- int dense_elements_length;
- switch (kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- dense_elements_length =
- ExternalPixelArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalByteArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedByteArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalShortArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedShortArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_INT_ELEMENTS: {
- dense_elements_length =
- ExternalIntArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedIntArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- dense_elements_length =
- ExternalFloatArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_DOUBLE_ELEMENTS: {
- dense_elements_length =
- ExternalDoubleArray::cast(object->elements())->length();
- break;
- }
- default:
- UNREACHABLE();
- dense_elements_length = 0;
- break;
- }
- uint32_t length = static_cast<uint32_t>(dense_elements_length);
- if (range <= length) {
- length = range;
- // We will add all indices, so we might as well clear it first
- // and avoid duplicates.
- indices->Clear();
- }
- for (uint32_t i = 0; i < length; i++) {
- indices->Add(i);
- }
- if (length == range) return; // All indices accounted for already.
- break;
- }
- }
-
- Handle<Object> prototype(object->GetPrototype(), isolate);
- if (prototype->IsJSObject()) {
- // The prototype will usually have no inherited element indices,
- // but we have to check.
- CollectElementIndices(Handle<JSObject>::cast(prototype), range, indices);
- }
-}
-
-
-/**
- * A helper function that visits elements of a JSArray in numerical
- * order.
- *
- * The visitor argument called for each existing element in the array
- * with the element index and the element's value.
- * Afterwards it increments the base-index of the visitor by the array
- * length.
- * Returns false if any access threw an exception, otherwise true.
- */
-static bool IterateElements(Isolate* isolate,
- Handle<JSArray> receiver,
- ArrayConcatVisitor* visitor) {
- uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
- switch (receiver->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- // Run through the elements FixedArray and use HasElement and GetElement
- // to check the prototype for missing elements.
- Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
- int fast_length = static_cast<int>(length);
- ASSERT(fast_length <= elements->length());
- for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> element_value(elements->get(j), isolate);
- if (!element_value->IsTheHole()) {
- visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
- // Call GetElement on receiver, not its prototype, or getters won't
- // have the correct receiver.
- element_value = Object::GetElement(receiver, j);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
- visitor->visit(j, element_value);
- }
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // Run through the elements FixedArray and use HasElement and GetElement
- // to check the prototype for missing elements.
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(receiver->elements()));
- int fast_length = static_cast<int>(length);
- ASSERT(fast_length <= elements->length());
- for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope(isolate);
- if (!elements->is_the_hole(j)) {
- double double_value = elements->get_scalar(j);
- Handle<Object> element_value =
- isolate->factory()->NewNumber(double_value);
- visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
- // Call GetElement on receiver, not its prototype, or getters won't
- // have the correct receiver.
- Handle<Object> element_value = Object::GetElement(receiver, j);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
- visitor->visit(j, element_value);
- }
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
- List<uint32_t> indices(dict->Capacity() / 2);
- // Collect all indices in the object and the prototypes less
- // than length. This might introduce duplicates in the indices list.
- CollectElementIndices(receiver, length, &indices);
- indices.Sort(&compareUInt32);
- int j = 0;
- int n = indices.length();
- while (j < n) {
- HandleScope loop_scope(isolate);
- uint32_t index = indices[j];
- Handle<Object> element = Object::GetElement(receiver, index);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
- visitor->visit(index, element);
- // Skip to next different index (i.e., omit duplicates).
- do {
- j++;
- } while (j < n && indices[j] == index);
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- Handle<ExternalPixelArray> pixels(ExternalPixelArray::cast(
- receiver->elements()));
- for (uint32_t j = 0; j < length; j++) {
- Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
- visitor->visit(j, e);
- }
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- IterateExternalArrayElements<ExternalByteArray, int8_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- IterateExternalArrayElements<ExternalShortArray, int16_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case EXTERNAL_INT_ELEMENTS: {
- IterateExternalArrayElements<ExternalIntArray, int32_t>(
- isolate, receiver, true, false, visitor);
- break;
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
- isolate, receiver, true, false, visitor);
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- IterateExternalArrayElements<ExternalFloatArray, float>(
- isolate, receiver, false, false, visitor);
- break;
- }
- case EXTERNAL_DOUBLE_ELEMENTS: {
- IterateExternalArrayElements<ExternalDoubleArray, double>(
- isolate, receiver, false, false, visitor);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- visitor->increase_index_offset(length);
- return true;
-}
-
-
-/**
- * Array::concat implementation.
- * See ECMAScript 262, 15.4.4.4.
- * TODO(581): Fix non-compliance for very large concatenations and update to
- * following the ECMAScript 5 specification.
- */
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
- ASSERT(args.length() == 1);
- HandleScope handle_scope(isolate);
-
- CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0);
- int argument_count = static_cast<int>(arguments->length()->Number());
- RUNTIME_ASSERT(arguments->HasFastObjectElements());
- Handle<FixedArray> elements(FixedArray::cast(arguments->elements()));
-
- // Pass 1: estimate the length and number of elements of the result.
- // The actual length can be larger if any of the arguments have getters
- // that mutate other arguments (but will otherwise be precise).
- // The number of elements is precise if there are no inherited elements.
-
- ElementsKind kind = FAST_SMI_ELEMENTS;
-
- uint32_t estimate_result_length = 0;
- uint32_t estimate_nof_elements = 0;
- for (int i = 0; i < argument_count; i++) {
- HandleScope loop_scope(isolate);
- Handle<Object> obj(elements->get(i), isolate);
- uint32_t length_estimate;
- uint32_t element_estimate;
- if (obj->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(obj));
- length_estimate = static_cast<uint32_t>(array->length()->Number());
- if (length_estimate != 0) {
- ElementsKind array_kind =
- GetPackedElementsKind(array->map()->elements_kind());
- if (IsMoreGeneralElementsKindTransition(kind, array_kind)) {
- kind = array_kind;
- }
- }
- element_estimate = EstimateElementCount(array);
- } else {
- if (obj->IsHeapObject()) {
- if (obj->IsNumber()) {
- if (IsMoreGeneralElementsKindTransition(kind, FAST_DOUBLE_ELEMENTS)) {
- kind = FAST_DOUBLE_ELEMENTS;
- }
- } else if (IsMoreGeneralElementsKindTransition(kind, FAST_ELEMENTS)) {
- kind = FAST_ELEMENTS;
- }
- }
- length_estimate = 1;
- element_estimate = 1;
- }
- // Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length <
- length_estimate) {
- estimate_result_length = JSObject::kMaxElementCount;
- } else {
- estimate_result_length += length_estimate;
- }
- if (JSObject::kMaxElementCount - estimate_nof_elements <
- element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
- } else {
- estimate_nof_elements += element_estimate;
- }
- }
-
- // If estimated number of elements is more than half of length, a
- // fixed array (fast case) is more time and space-efficient than a
- // dictionary.
- bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length;
-
- Handle<FixedArray> storage;
- if (fast_case) {
- if (kind == FAST_DOUBLE_ELEMENTS) {
- Handle<FixedDoubleArray> double_storage =
- isolate->factory()->NewFixedDoubleArray(estimate_result_length);
- int j = 0;
- bool failure = false;
- for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj(elements->get(i), isolate);
- if (obj->IsSmi()) {
- double_storage->set(j, Smi::cast(*obj)->value());
- j++;
- } else if (obj->IsNumber()) {
- double_storage->set(j, obj->Number());
- j++;
- } else {
- JSArray* array = JSArray::cast(*obj);
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
- switch (array->map()->elements_kind()) {
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // Empty fixed array indicates that there are no elements.
- if (array->elements()->IsFixedArray()) break;
- FixedDoubleArray* elements =
- FixedDoubleArray::cast(array->elements());
- for (uint32_t i = 0; i < length; i++) {
- if (elements->is_the_hole(i)) {
- failure = true;
- break;
- }
- double double_value = elements->get_scalar(i);
- double_storage->set(j, double_value);
- j++;
- }
- break;
- }
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
- FixedArray* elements(
- FixedArray::cast(array->elements()));
- for (uint32_t i = 0; i < length; i++) {
- Object* element = elements->get(i);
- if (element->IsTheHole()) {
- failure = true;
- break;
- }
- int32_t int_value = Smi::cast(element)->value();
- double_storage->set(j, int_value);
- j++;
- }
- break;
- }
- case FAST_HOLEY_ELEMENTS:
- ASSERT_EQ(0, length);
- break;
- default:
- UNREACHABLE();
- }
- }
- if (failure) break;
- }
- Handle<JSArray> array = isolate->factory()->NewJSArray(0);
- Smi* length = Smi::FromInt(j);
- Handle<Map> map;
- map = isolate->factory()->GetElementsTransitionMap(array, kind);
- array->set_map(*map);
- array->set_length(length);
- array->set_elements(*double_storage);
- return *array;
- }
- // The backing storage array must have non-existing elements to preserve
- // holes across concat operations.
- storage = isolate->factory()->NewFixedArrayWithHoles(
- estimate_result_length);
- } else {
- // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
- uint32_t at_least_space_for = estimate_nof_elements +
- (estimate_nof_elements >> 2);
- storage = Handle<FixedArray>::cast(
- isolate->factory()->NewSeededNumberDictionary(at_least_space_for));
- }
-
- ArrayConcatVisitor visitor(isolate, storage, fast_case);
-
- for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj(elements->get(i), isolate);
- if (obj->IsJSArray()) {
- Handle<JSArray> array = Handle<JSArray>::cast(obj);
- if (!IterateElements(isolate, array, &visitor)) {
- return Failure::Exception();
- }
- } else {
- visitor.visit(0, obj);
- visitor.increase_index_offset(1);
- }
- }
-
- return *visitor.ToArray();
-}
-
-
-// This will not allocate (flatten the string), but it may run
-// very slowly for very deeply nested ConsStrings. For debugging use only.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(String, string, 0);
- ConsStringIteratorOp op;
- StringCharacterStream stream(string, &op);
- while (stream.HasMore()) {
- uint16_t character = stream.GetNext();
- PrintF("%c", character);
- }
- return string;
-}
-
-// Moves all own elements of an object, that are below a limit, to positions
-// starting at zero. All undefined values are placed after non-undefined values,
-// and are followed by non-existing element. Does not change the length
-// property.
-// Returns the number of non-undefined elements collected.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
- return object->PrepareElementsForSort(limit);
-}
-
-
-// Move contents of argument 0 (an array) to argument 1 (an array)
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSArray, from, 0);
- CONVERT_ARG_CHECKED(JSArray, to, 1);
- from->ValidateElements();
- to->ValidateElements();
- FixedArrayBase* new_elements = from->elements();
- ElementsKind from_kind = from->GetElementsKind();
- MaybeObject* maybe_new_map;
- maybe_new_map = to->GetElementsTransitionMap(isolate, from_kind);
- Object* new_map;
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- to->set_map_and_elements(Map::cast(new_map), new_elements);
- to->set_length(from->length());
- Object* obj;
- { MaybeObject* maybe_obj = from->ResetElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- from->set_length(Smi::FromInt(0));
- to->ValidateElements();
- return to;
-}
-
-
-// How many elements does this object/array have?
-RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- HeapObject* elements = object->elements();
- if (elements->IsDictionary()) {
- int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
- return Smi::FromInt(result);
- } else if (object->IsJSArray()) {
- return JSArray::cast(object)->length();
- } else {
- return Smi::FromInt(FixedArray::cast(elements)->length());
- }
-}
-
-
-// Returns an array that tells you where in the [0, length) interval an array
-// might have elements. Can either return keys (positive integers) or
-// intervals (pair of a negative integer (-start-1) followed by a
-// positive (length)) or undefined values.
-// Intervals can span over some keys that are not in the object.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
- if (array->elements()->IsDictionary()) {
- // Create an array and get all the keys into it, then remove all the
- // keys that are not integers in the range 0 to length-1.
- bool threw = false;
- Handle<FixedArray> keys =
- GetKeysInFixedArrayFor(array, INCLUDE_PROTOS, &threw);
- if (threw) return Failure::Exception();
-
- int keys_length = keys->length();
- for (int i = 0; i < keys_length; i++) {
- Object* key = keys->get(i);
- uint32_t index = 0;
- if (!key->ToArrayIndex(&index) || index >= length) {
- // Zap invalid keys.
- keys->set_undefined(i);
- }
- }
- return *isolate->factory()->NewJSArrayWithElements(keys);
- } else {
- ASSERT(array->HasFastSmiOrObjectElements() ||
- array->HasFastDoubleElements());
- Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
- // -1 means start of array.
- single_interval->set(0, Smi::FromInt(-1));
- FixedArrayBase* elements = FixedArrayBase::cast(array->elements());
- uint32_t actual_length =
- static_cast<uint32_t>(elements->length());
- uint32_t min_length = actual_length < length ? actual_length : length;
- Handle<Object> length_object =
- isolate->factory()->NewNumber(static_cast<double>(min_length));
- single_interval->set(1, *length_object);
- return *isolate->factory()->NewJSArrayWithElements(single_interval);
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
- CONVERT_SMI_ARG_CHECKED(flag, 2);
- AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
- if (!receiver->IsJSObject()) return isolate->heap()->undefined_value();
- return JSObject::cast(receiver)->LookupAccessor(name, component);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
- ASSERT(args.length() == 0);
- return Execution::DebugBreakHelper();
-}
-
-
-// Helper functions for wrapping and unwrapping stack frame ids.
-static Smi* WrapFrameId(StackFrame::Id id) {
- ASSERT(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
- return Smi::FromInt(id >> 2);
-}
-
-
-static StackFrame::Id UnwrapFrameId(int wrapped) {
- return static_cast<StackFrame::Id>(wrapped << 2);
-}
-
-
-// Adds a JavaScript function as a debug event listener.
-// args[0]: debug event listener function to set or null or undefined for
-// clearing the event listener function
-// args[1]: object supplied during callback
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
- ASSERT(args.length() == 2);
- RUNTIME_ASSERT(args[0]->IsJSFunction() ||
- args[0]->IsUndefined() ||
- args[0]->IsNull());
- Handle<Object> callback = args.at<Object>(0);
- Handle<Object> data = args.at<Object>(1);
- isolate->debugger()->SetEventListener(callback, data);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
- ASSERT(args.length() == 0);
- isolate->stack_guard()->DebugBreak();
- return isolate->heap()->undefined_value();
-}
-
-
-static MaybeObject* DebugLookupResultValue(Heap* heap,
- Object* receiver,
- String* name,
- LookupResult* result,
- bool* caught_exception) {
- Object* value;
- switch (result->type()) {
- case NORMAL:
- value = result->holder()->GetNormalizedProperty(result);
- if (value->IsTheHole()) {
- return heap->undefined_value();
- }
- return value;
- case FIELD:
- value =
- JSObject::cast(result->holder())->FastPropertyAt(
- result->GetFieldIndex().field_index());
- if (value->IsTheHole()) {
- return heap->undefined_value();
- }
- return value;
- case CONSTANT_FUNCTION:
- return result->GetConstantFunction();
- case CALLBACKS: {
- Object* structure = result->GetCallbackObject();
- if (structure->IsForeign() || structure->IsAccessorInfo()) {
- MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
- receiver, structure, name);
- if (!maybe_value->ToObject(&value)) {
- if (maybe_value->IsRetryAfterGC()) return maybe_value;
- ASSERT(maybe_value->IsException());
- maybe_value = heap->isolate()->pending_exception();
- heap->isolate()->clear_pending_exception();
- if (caught_exception != NULL) {
- *caught_exception = true;
- }
- return maybe_value;
- }
- return value;
- } else {
- return heap->undefined_value();
- }
- }
- case INTERCEPTOR:
- case TRANSITION:
- return heap->undefined_value();
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
- return heap->undefined_value();
- }
- UNREACHABLE(); // keep the compiler happy
- return heap->undefined_value();
-}
-
-
-// Get debugger related details for an object property.
-// args[0]: object holding property
-// args[1]: name of the property
-//
-// The array returned contains the following information:
-// 0: Property value
-// 1: Property details
-// 2: Property value is exception
-// 3: Getter function if defined
-// 4: Setter function if defined
-// Items 2-4 are only filled if the property has either a getter or a setter
-// defined through __defineGetter__ and/or __defineSetter__.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
- HandleScope scope(isolate);
-
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
-
- // Make sure to set the current context to the context before the debugger was
- // entered (if the debugger is entered). The reason for switching context here
- // is that for some property lookups (accessors and interceptors) callbacks
- // into the embedding application can occour, and the embedding application
- // could have the assumption that its own native context is the current
- // context and not some internal debugger context.
- SaveContext save(isolate);
- if (isolate->debug()->InDebugger()) {
- isolate->set_context(*isolate->debug()->debugger_entry()->GetContext());
- }
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (obj->IsJSGlobalProxy()) {
- obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
- }
-
-
- // Check if the name is trivially convertible to an index and get the element
- // if so.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
- Object* element_or_char;
- { MaybeObject* maybe_element_or_char =
- Runtime::GetElementOrCharAt(isolate, obj, index);
- if (!maybe_element_or_char->ToObject(&element_or_char)) {
- return maybe_element_or_char;
- }
- }
- details->set(0, element_or_char);
- details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
- return *isolate->factory()->NewJSArrayWithElements(details);
- }
-
- // Find the number of objects making up this.
- int length = LocalPrototypeChainLength(*obj);
-
- // Try local lookup on each of the objects.
- Handle<JSObject> jsproto = obj;
- for (int i = 0; i < length; i++) {
- LookupResult result(isolate);
- jsproto->LocalLookup(*name, &result);
- if (result.IsFound()) {
- // LookupResult is not GC safe as it holds raw object pointers.
- // GC can happen later in this code so put the required fields into
- // local variables using handles when required for later use.
- Handle<Object> result_callback_obj;
- if (result.IsPropertyCallbacks()) {
- result_callback_obj = Handle<Object>(result.GetCallbackObject(),
- isolate);
- }
- Smi* property_details = result.GetPropertyDetails().AsSmi();
- // DebugLookupResultValue can cause GC so details from LookupResult needs
- // to be copied to handles before this.
- bool caught_exception = false;
- Object* raw_value;
- { MaybeObject* maybe_raw_value =
- DebugLookupResultValue(isolate->heap(), *obj, *name,
- &result, &caught_exception);
- if (!maybe_raw_value->ToObject(&raw_value)) return maybe_raw_value;
- }
- Handle<Object> value(raw_value, isolate);
-
- // If the callback object is a fixed array then it contains JavaScript
- // getter and/or setter.
- bool hasJavaScriptAccessors = result.IsPropertyCallbacks() &&
- result_callback_obj->IsAccessorPair();
- Handle<FixedArray> details =
- isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
- details->set(0, *value);
- details->set(1, property_details);
- if (hasJavaScriptAccessors) {
- AccessorPair* accessors = AccessorPair::cast(*result_callback_obj);
- details->set(2, isolate->heap()->ToBoolean(caught_exception));
- details->set(3, accessors->GetComponent(ACCESSOR_GETTER));
- details->set(4, accessors->GetComponent(ACCESSOR_SETTER));
- }
-
- return *isolate->factory()->NewJSArrayWithElements(details);
- }
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
- HandleScope scope(isolate);
-
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
-
- LookupResult result(isolate);
- obj->Lookup(*name, &result);
- if (result.IsFound()) {
- return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-// Return the property type calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
- ASSERT(args.length() == 1);
- CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
- return Smi::FromInt(static_cast<int>(details.type()));
-}
-
-
-// Return the property attribute calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
- ASSERT(args.length() == 1);
- CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
- return Smi::FromInt(static_cast<int>(details.attributes()));
-}
-
-
-// Return the property insertion index calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
- ASSERT(args.length() == 1);
- CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
- // TODO(verwaest): Depends on the type of details.
- return Smi::FromInt(details.dictionary_index());
-}
-
-
-// Return property value from named interceptor.
-// args[0]: object
-// args[1]: property name
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- RUNTIME_ASSERT(obj->HasNamedInterceptor());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
-
- PropertyAttributes attributes;
- return obj->GetPropertyWithInterceptor(*obj, *name, &attributes);
-}
-
-
-// Return element value from indexed interceptor.
-// args[0]: object
-// args[1]: index
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- RUNTIME_ASSERT(obj->HasIndexedInterceptor());
- CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
-
- return obj->GetElementWithInterceptor(*obj, index);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
- ASSERT(args.length() >= 1);
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- // Check that the break id is valid.
- if (isolate->debug()->break_id() == 0 ||
- break_id != isolate->debug()->break_id()) {
- return isolate->Throw(
- isolate->heap()->illegal_execution_state_string());
- }
-
- return isolate->heap()->true_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- // Check arguments.
- Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Count all frames which are relevant to debugging stack trace.
- int n = 0;
- StackFrame::Id id = isolate->debug()->break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack frame count is 0.
- return Smi::FromInt(0);
- }
-
- for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) {
- n += it.frame()->GetInlineCount();
- }
- return Smi::FromInt(n);
-}
-
-
-class FrameInspector {
- public:
- FrameInspector(JavaScriptFrame* frame,
- int inlined_jsframe_index,
- Isolate* isolate)
- : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
- // Calculate the deoptimized frame.
- if (frame->is_optimized()) {
- deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
- frame, inlined_jsframe_index, isolate);
- }
- has_adapted_arguments_ = frame_->has_adapted_arguments();
- is_bottommost_ = inlined_jsframe_index == 0;
- is_optimized_ = frame_->is_optimized();
- }
-
- ~FrameInspector() {
- // Get rid of the calculated deoptimized frame if any.
- if (deoptimized_frame_ != NULL) {
- Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame_,
- isolate_);
- }
- }
-
- int GetParametersCount() {
- return is_optimized_
- ? deoptimized_frame_->parameters_count()
- : frame_->ComputeParametersCount();
- }
- int expression_count() { return deoptimized_frame_->expression_count(); }
- Object* GetFunction() {
- return is_optimized_
- ? deoptimized_frame_->GetFunction()
- : frame_->function();
- }
- Object* GetParameter(int index) {
- return is_optimized_
- ? deoptimized_frame_->GetParameter(index)
- : frame_->GetParameter(index);
- }
- Object* GetExpression(int index) {
- return is_optimized_
- ? deoptimized_frame_->GetExpression(index)
- : frame_->GetExpression(index);
- }
- int GetSourcePosition() {
- return is_optimized_
- ? deoptimized_frame_->GetSourcePosition()
- : frame_->LookupCode()->SourcePosition(frame_->pc());
- }
- bool IsConstructor() {
- return is_optimized_ && !is_bottommost_
- ? deoptimized_frame_->HasConstructStub()
- : frame_->IsConstructor();
- }
-
- // To inspect all the provided arguments the frame might need to be
- // replaced with the arguments frame.
- void SetArgumentsFrame(JavaScriptFrame* frame) {
- ASSERT(has_adapted_arguments_);
- frame_ = frame;
- is_optimized_ = frame_->is_optimized();
- ASSERT(!is_optimized_);
- }
-
- private:
- JavaScriptFrame* frame_;
- DeoptimizedFrameInfo* deoptimized_frame_;
- Isolate* isolate_;
- bool is_optimized_;
- bool is_bottommost_;
- bool has_adapted_arguments_;
-
- DISALLOW_COPY_AND_ASSIGN(FrameInspector);
-};
-
-
-static const int kFrameDetailsFrameIdIndex = 0;
-static const int kFrameDetailsReceiverIndex = 1;
-static const int kFrameDetailsFunctionIndex = 2;
-static const int kFrameDetailsArgumentCountIndex = 3;
-static const int kFrameDetailsLocalCountIndex = 4;
-static const int kFrameDetailsSourcePositionIndex = 5;
-static const int kFrameDetailsConstructCallIndex = 6;
-static const int kFrameDetailsAtReturnIndex = 7;
-static const int kFrameDetailsFlagsIndex = 8;
-static const int kFrameDetailsFirstDynamicIndex = 9;
-
-
-static SaveContext* FindSavedContextForFrame(Isolate* isolate,
- JavaScriptFrame* frame) {
- SaveContext* save = isolate->save_context();
- while (save != NULL && !save->IsBelowFrame(frame)) {
- save = save->prev();
- }
- ASSERT(save != NULL);
- return save;
-}
-
-
-// Return an array with frame details
-// args[0]: number: break id
-// args[1]: number: frame index
-//
-// The array returned contains the following information:
-// 0: Frame id
-// 1: Receiver
-// 2: Function
-// 3: Argument count
-// 4: Local count
-// 5: Source position
-// 6: Constructor call
-// 7: Is at return
-// 8: Flags
-// Arguments name, value
-// Locals name, value
-// Return value if any
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
- Heap* heap = isolate->heap();
-
- // Find the relevant frame with the requested index.
- StackFrame::Id id = isolate->debug()->break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there are no JavaScript stack frames return undefined.
- return heap->undefined_value();
- }
-
- int count = 0;
- JavaScriptFrameIterator it(isolate, id);
- for (; !it.done(); it.Advance()) {
- if (index < count + it.frame()->GetInlineCount()) break;
- count += it.frame()->GetInlineCount();
- }
- if (it.done()) return heap->undefined_value();
-
- bool is_optimized = it.frame()->is_optimized();
-
- int inlined_jsframe_index = 0; // Inlined frame index in optimized frame.
- if (is_optimized) {
- inlined_jsframe_index =
- it.frame()->GetInlineCount() - (index - count) - 1;
- }
- FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
-
- // Traverse the saved contexts chain to find the active context for the
- // selected frame.
- SaveContext* save = FindSavedContextForFrame(isolate, it.frame());
-
- // Get the frame id.
- Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
-
- // Find source position in unoptimized code.
- int position = frame_inspector.GetSourcePosition();
-
- // Check for constructor frame.
- bool constructor = frame_inspector.IsConstructor();
-
- // Get scope info and read from it for local variable information.
- Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
- ASSERT(*scope_info != ScopeInfo::Empty(isolate));
-
- // Get the locals names and values into a temporary array.
- //
- // TODO(1240907): Hide compiler-introduced stack variables
- // (e.g. .result)? For users of the debugger, they will probably be
- // confusing.
- Handle<FixedArray> locals =
- isolate->factory()->NewFixedArray(scope_info->LocalCount() * 2);
-
- // Fill in the values of the locals.
- int i = 0;
- for (; i < scope_info->StackLocalCount(); ++i) {
- // Use the value from the stack.
- locals->set(i * 2, scope_info->LocalName(i));
- locals->set(i * 2 + 1, frame_inspector.GetExpression(i));
- }
- if (i < scope_info->LocalCount()) {
- // Get the context containing declarations.
- Handle<Context> context(
- Context::cast(it.frame()->context())->declaration_context());
- for (; i < scope_info->LocalCount(); ++i) {
- Handle<String> name(scope_info->LocalName(i));
- VariableMode mode;
- InitializationFlag init_flag;
- locals->set(i * 2, *name);
- locals->set(i * 2 + 1, context->get(
- scope_info->ContextSlotIndex(*name, &mode, &init_flag)));
- }
- }
-
- // Check whether this frame is positioned at return. If not top
- // frame or if the frame is optimized it cannot be at a return.
- bool at_return = false;
- if (!is_optimized && index == 0) {
- at_return = isolate->debug()->IsBreakAtReturn(it.frame());
- }
-
- // If positioned just before return find the value to be returned and add it
- // to the frame information.
- Handle<Object> return_value = isolate->factory()->undefined_value();
- if (at_return) {
- StackFrameIterator it2(isolate);
- Address internal_frame_sp = NULL;
- while (!it2.done()) {
- if (it2.frame()->is_internal()) {
- internal_frame_sp = it2.frame()->sp();
- } else {
- if (it2.frame()->is_java_script()) {
- if (it2.frame()->id() == it.frame()->id()) {
- // The internal frame just before the JavaScript frame contains the
- // value to return on top. A debug break at return will create an
- // internal frame to store the return value (eax/rax/r0) before
- // entering the debug break exit frame.
- if (internal_frame_sp != NULL) {
- return_value =
- Handle<Object>(Memory::Object_at(internal_frame_sp),
- isolate);
- break;
- }
- }
- }
-
- // Indicate that the previous frame was not an internal frame.
- internal_frame_sp = NULL;
- }
- it2.Advance();
- }
- }
-
- // Now advance to the arguments adapter frame (if any). It contains all
- // the provided parameters whereas the function frame always have the number
- // of arguments matching the functions parameters. The rest of the
- // information (except for what is collected above) is the same.
- if ((inlined_jsframe_index == 0) && it.frame()->has_adapted_arguments()) {
- it.AdvanceToArgumentsFrame();
- frame_inspector.SetArgumentsFrame(it.frame());
- }
-
- // Find the number of arguments to fill. At least fill the number of
- // parameters for the function and fill more if more parameters are provided.
- int argument_count = scope_info->ParameterCount();
- if (argument_count < frame_inspector.GetParametersCount()) {
- argument_count = frame_inspector.GetParametersCount();
- }
-
- // Calculate the size of the result.
- int details_size = kFrameDetailsFirstDynamicIndex +
- 2 * (argument_count + scope_info->LocalCount()) +
- (at_return ? 1 : 0);
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
-
- // Add the frame id.
- details->set(kFrameDetailsFrameIdIndex, *frame_id);
-
- // Add the function (same as in function frame).
- details->set(kFrameDetailsFunctionIndex, frame_inspector.GetFunction());
-
- // Add the arguments count.
- details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
-
- // Add the locals count
- details->set(kFrameDetailsLocalCountIndex,
- Smi::FromInt(scope_info->LocalCount()));
-
- // Add the source position.
- if (position != RelocInfo::kNoPosition) {
- details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
- } else {
- details->set(kFrameDetailsSourcePositionIndex, heap->undefined_value());
- }
-
- // Add the constructor information.
- details->set(kFrameDetailsConstructCallIndex, heap->ToBoolean(constructor));
-
- // Add the at return information.
- details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(at_return));
-
- // Add flags to indicate information on whether this frame is
- // bit 0: invoked in the debugger context.
- // bit 1: optimized frame.
- // bit 2: inlined in optimized frame
- int flags = 0;
- if (*save->context() == *isolate->debug()->debug_context()) {
- flags |= 1 << 0;
- }
- if (is_optimized) {
- flags |= 1 << 1;
- flags |= inlined_jsframe_index << 2;
- }
- details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
-
- // Fill the dynamic part.
- int details_index = kFrameDetailsFirstDynamicIndex;
-
- // Add arguments name and value.
- for (int i = 0; i < argument_count; i++) {
- // Name of the argument.
- if (i < scope_info->ParameterCount()) {
- details->set(details_index++, scope_info->ParameterName(i));
- } else {
- details->set(details_index++, heap->undefined_value());
- }
-
- // Parameter value.
- if (i < frame_inspector.GetParametersCount()) {
- // Get the value from the stack.
- details->set(details_index++, frame_inspector.GetParameter(i));
- } else {
- details->set(details_index++, heap->undefined_value());
- }
- }
-
- // Add locals name and value from the temporary copy from the function frame.
- for (int i = 0; i < scope_info->LocalCount() * 2; i++) {
- details->set(details_index++, locals->get(i));
- }
-
- // Add the value being returned.
- if (at_return) {
- details->set(details_index++, *return_value);
- }
-
- // Add the receiver (same as in function frame).
- // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
- // THE FRAME ITERATOR TO WRAP THE RECEIVER.
- Handle<Object> receiver(it.frame()->receiver(), isolate);
- if (!receiver->IsJSObject() &&
- shared->is_classic_mode() &&
- !shared->native()) {
- // If the receiver is not a JSObject and the function is not a
- // builtin or strict-mode we have hit an optimization where a
- // value object is not converted into a wrapped JS objects. To
- // hide this optimization from the debugger, we wrap the receiver
- // by creating correct wrapper object based on the calling frame's
- // native context.
- it.Advance();
- Handle<Context> calling_frames_native_context(
- Context::cast(Context::cast(it.frame()->context())->native_context()));
- receiver =
- isolate->factory()->ToObject(receiver, calling_frames_native_context);
- }
- details->set(kFrameDetailsReceiverIndex, *receiver);
-
- ASSERT_EQ(details_size, details_index);
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
-// Create a plain JSObject which materializes the local scope for the specified
-// frame.
-static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
- Isolate* isolate,
- JavaScriptFrame* frame,
- FrameInspector* frame_inspector) {
- Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- // Allocate and initialize a JSObject with all the arguments, stack locals
- // heap locals and extension properties of the debugged function.
- Handle<JSObject> local_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- // First fill all parameters.
- for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- Handle<Object> value(i < frame_inspector->GetParametersCount()
- ? frame_inspector->GetParameter(i)
- : isolate->heap()->undefined_value(),
- isolate);
-
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(isolate,
- local_scope,
- Handle<String>(scope_info->ParameterName(i)),
- value,
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
-
- // Second fill all stack locals.
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(isolate,
- local_scope,
- Handle<String>(scope_info->StackLocalName(i)),
- Handle<Object>(frame_inspector->GetExpression(i), isolate),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
-
- if (scope_info->HasContext()) {
- // Third fill all context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->declaration_context());
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, function_context, local_scope)) {
- return Handle<JSObject>();
- }
-
- // Finally copy any properties from the function context extension.
- // These will be variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsNativeContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- bool threw = false;
- Handle<FixedArray> keys =
- GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
- if (threw) return Handle<JSObject>();
-
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- ASSERT(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(isolate,
- local_scope,
- key,
- GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
- }
- }
- }
-
- return local_scope;
-}
-
-
-static Handle<JSObject> MaterializeLocalScope(
- Isolate* isolate,
- JavaScriptFrame* frame,
- int inlined_jsframe_index) {
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- return MaterializeLocalScopeWithFrameInspector(isolate,
- frame,
- &frame_inspector);
-}
-
-
-// Set the context local variable value.
-static bool SetContextLocalValue(Isolate* isolate,
- Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
- Handle<String> next_name(scope_info->ContextLocalName(i));
- if (variable_name->Equals(*next_name)) {
- VariableMode mode;
- InitializationFlag init_flag;
- int context_index =
- scope_info->ContextSlotIndex(*next_name, &mode, &init_flag);
- context->set(context_index, *new_value);
- return true;
- }
- }
-
- return false;
-}
-
-
-static bool SetLocalVariableValue(Isolate* isolate,
- JavaScriptFrame* frame,
- int inlined_jsframe_index,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- if (inlined_jsframe_index != 0 || frame->is_optimized()) {
- // Optimized frames are not supported.
- return false;
- }
-
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- bool default_result = false;
-
- // Parameters.
- for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- if (scope_info->ParameterName(i)->Equals(*variable_name)) {
- frame->SetParameterValue(i, *new_value);
- // Argument might be shadowed in heap context, don't stop here.
- default_result = true;
- }
- }
-
- // Stack locals.
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- if (scope_info->StackLocalName(i)->Equals(*variable_name)) {
- frame->SetExpression(i, *new_value);
- return true;
- }
- }
-
- if (scope_info->HasContext()) {
- // Context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->declaration_context());
- if (SetContextLocalValue(
- isolate, scope_info, function_context, variable_name, new_value)) {
- return true;
- }
-
- // Function context extension. These are variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsNativeContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
-
- if (ext->HasProperty(*variable_name)) {
- // We don't expect this to do anything except replacing
- // property value.
- SetProperty(isolate,
- ext,
- variable_name,
- new_value,
- NONE,
- kNonStrictMode);
- return true;
- }
- }
- }
- }
-
- return default_result;
-}
-
-
-// Create a plain JSObject which materializes the closure content for the
-// context.
-static Handle<JSObject> MaterializeClosure(Isolate* isolate,
- Handle<Context> context) {
- ASSERT(context->IsFunctionContext());
-
- Handle<SharedFunctionInfo> shared(context->closure()->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- // Allocate and initialize a JSObject with all the content of this function
- // closure.
- Handle<JSObject> closure_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- // Fill all context locals to the context extension.
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, context, closure_scope)) {
- return Handle<JSObject>();
- }
-
- // Finally copy any properties from the function context extension. This will
- // be variables introduced by eval.
- if (context->has_extension()) {
- Handle<JSObject> ext(JSObject::cast(context->extension()));
- bool threw = false;
- Handle<FixedArray> keys =
- GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
- if (threw) return Handle<JSObject>();
-
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- ASSERT(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(isolate,
- closure_scope,
- key,
- GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
- }
-
- return closure_scope;
-}
-
-
-// This method copies structure of MaterializeClosure method above.
-static bool SetClosureVariableValue(Isolate* isolate,
- Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- ASSERT(context->IsFunctionContext());
-
- Handle<SharedFunctionInfo> shared(context->closure()->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- // Context locals to the context extension.
- if (SetContextLocalValue(
- isolate, scope_info, context, variable_name, new_value)) {
- return true;
- }
-
- // Properties from the function context extension. This will
- // be variables introduced by eval.
- if (context->has_extension()) {
- Handle<JSObject> ext(JSObject::cast(context->extension()));
- if (ext->HasProperty(*variable_name)) {
- // We don't expect this to do anything except replacing property value.
- SetProperty(isolate,
- ext,
- variable_name,
- new_value,
- NONE,
- kNonStrictMode);
- return true;
- }
- }
-
- return false;
-}
-
-
-// Create a plain JSObject which materializes the scope for the specified
-// catch context.
-static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
- Handle<Context> context) {
- ASSERT(context->IsCatchContext());
- Handle<String> name(String::cast(context->extension()));
- Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
- isolate);
- Handle<JSObject> catch_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(isolate,
- catch_scope,
- name,
- thrown_object,
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- return catch_scope;
-}
-
-
-static bool SetCatchVariableValue(Isolate* isolate,
- Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- ASSERT(context->IsCatchContext());
- Handle<String> name(String::cast(context->extension()));
- if (!name->Equals(*variable_name)) {
- return false;
- }
- context->set(Context::THROWN_OBJECT_INDEX, *new_value);
- return true;
-}
-
-
-// Create a plain JSObject which materializes the block scope for the specified
-// block context.
-static Handle<JSObject> MaterializeBlockScope(
- Isolate* isolate,
- Handle<Context> context) {
- ASSERT(context->IsBlockContext());
- Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
-
- // Allocate and initialize a JSObject with all the arguments, stack locals
- // heap locals and extension properties of the debugged function.
- Handle<JSObject> block_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- // Fill all context locals.
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, context, block_scope)) {
- return Handle<JSObject>();
- }
-
- return block_scope;
-}
-
-
-// Create a plain JSObject which materializes the module scope for the specified
-// module context.
-static Handle<JSObject> MaterializeModuleScope(
- Isolate* isolate,
- Handle<Context> context) {
- ASSERT(context->IsModuleContext());
- Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
-
- // Allocate and initialize a JSObject with all the members of the debugged
- // module.
- Handle<JSObject> module_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- // Fill all context locals.
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, context, module_scope)) {
- return Handle<JSObject>();
- }
-
- return module_scope;
-}
-
-
-// Iterate over the actual scopes visible from a stack frame or from a closure.
-// The iteration proceeds from the innermost visible nested scope outwards.
-// All scopes are backed by an actual context except the local scope,
-// which is inserted "artificially" in the context chain.
-class ScopeIterator {
- public:
- enum ScopeType {
- ScopeTypeGlobal = 0,
- ScopeTypeLocal,
- ScopeTypeWith,
- ScopeTypeClosure,
- ScopeTypeCatch,
- ScopeTypeBlock,
- ScopeTypeModule
- };
-
- ScopeIterator(Isolate* isolate,
- JavaScriptFrame* frame,
- int inlined_jsframe_index)
- : isolate_(isolate),
- frame_(frame),
- inlined_jsframe_index_(inlined_jsframe_index),
- function_(JSFunction::cast(frame->function())),
- context_(Context::cast(frame->context())),
- nested_scope_chain_(4),
- failed_(false) {
-
- // Catch the case when the debugger stops in an internal function.
- Handle<SharedFunctionInfo> shared_info(function_->shared());
- Handle<ScopeInfo> scope_info(shared_info->scope_info());
- if (shared_info->script() == isolate->heap()->undefined_value()) {
- while (context_->closure() == *function_) {
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- return;
- }
-
- // Get the debug info (create it if it does not exist).
- if (!isolate->debug()->EnsureDebugInfo(shared_info, function_)) {
- // Return if ensuring debug info failed.
- return;
- }
- Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
-
- // Find the break point where execution has stopped.
- BreakLocationIterator break_location_iterator(debug_info,
- ALL_BREAK_LOCATIONS);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc());
- if (break_location_iterator.IsExit()) {
- // We are within the return sequence. At the momemt it is not possible to
- // get a source position which is consistent with the current scope chain.
- // Thus all nested with, catch and block contexts are skipped and we only
- // provide the function scope.
- if (scope_info->HasContext()) {
- context_ = Handle<Context>(context_->declaration_context(), isolate_);
- } else {
- while (context_->closure() == *function_) {
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- }
- if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
- } else {
- // Reparse the code and analyze the scopes.
- Handle<Script> script(Script::cast(shared_info->script()));
- Scope* scope = NULL;
-
- // Check whether we are in global, eval or function code.
- Handle<ScopeInfo> scope_info(shared_info->scope_info());
- if (scope_info->Type() != FUNCTION_SCOPE) {
- // Global or eval code.
- CompilationInfoWithZone info(script);
- if (scope_info->Type() == GLOBAL_SCOPE) {
- info.MarkAsGlobal();
- } else {
- ASSERT(scope_info->Type() == EVAL_SCOPE);
- info.MarkAsEval();
- info.SetContext(Handle<Context>(function_->context()));
- }
- if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
- scope = info.function()->scope();
- }
- RetrieveScopeChain(scope, shared_info);
- } else {
- // Function code
- CompilationInfoWithZone info(shared_info);
- if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
- scope = info.function()->scope();
- }
- RetrieveScopeChain(scope, shared_info);
- }
- }
- }
-
- ScopeIterator(Isolate* isolate,
- Handle<JSFunction> function)
- : isolate_(isolate),
- frame_(NULL),
- inlined_jsframe_index_(0),
- function_(function),
- context_(function->context()),
- failed_(false) {
- if (function->IsBuiltin()) {
- context_ = Handle<Context>();
- }
- }
-
- // More scopes?
- bool Done() {
- ASSERT(!failed_);
- return context_.is_null();
- }
-
- bool Failed() { return failed_; }
-
- // Move to the next scope.
- void Next() {
- ASSERT(!failed_);
- ScopeType scope_type = Type();
- if (scope_type == ScopeTypeGlobal) {
- // The global scope is always the last in the chain.
- ASSERT(context_->IsNativeContext());
- context_ = Handle<Context>();
- return;
- }
- if (nested_scope_chain_.is_empty()) {
- context_ = Handle<Context>(context_->previous(), isolate_);
- } else {
- if (nested_scope_chain_.last()->HasContext()) {
- ASSERT(context_->previous() != NULL);
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- nested_scope_chain_.RemoveLast();
- }
- }
-
- // Return the type of the current scope.
- ScopeType Type() {
- ASSERT(!failed_);
- if (!nested_scope_chain_.is_empty()) {
- Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
- switch (scope_info->Type()) {
- case FUNCTION_SCOPE:
- ASSERT(context_->IsFunctionContext() ||
- !scope_info->HasContext());
- return ScopeTypeLocal;
- case MODULE_SCOPE:
- ASSERT(context_->IsModuleContext());
- return ScopeTypeModule;
- case GLOBAL_SCOPE:
- ASSERT(context_->IsNativeContext());
- return ScopeTypeGlobal;
- case WITH_SCOPE:
- ASSERT(context_->IsWithContext());
- return ScopeTypeWith;
- case CATCH_SCOPE:
- ASSERT(context_->IsCatchContext());
- return ScopeTypeCatch;
- case BLOCK_SCOPE:
- ASSERT(!scope_info->HasContext() ||
- context_->IsBlockContext());
- return ScopeTypeBlock;
- case EVAL_SCOPE:
- UNREACHABLE();
- }
- }
- if (context_->IsNativeContext()) {
- ASSERT(context_->global_object()->IsGlobalObject());
- return ScopeTypeGlobal;
- }
- if (context_->IsFunctionContext()) {
- return ScopeTypeClosure;
- }
- if (context_->IsCatchContext()) {
- return ScopeTypeCatch;
- }
- if (context_->IsBlockContext()) {
- return ScopeTypeBlock;
- }
- if (context_->IsModuleContext()) {
- return ScopeTypeModule;
- }
- ASSERT(context_->IsWithContext());
- return ScopeTypeWith;
- }
-
- // Return the JavaScript object with the content of the current scope.
- Handle<JSObject> ScopeObject() {
- ASSERT(!failed_);
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- return Handle<JSObject>(CurrentContext()->global_object());
- case ScopeIterator::ScopeTypeLocal:
- // Materialize the content of the local scope into a JSObject.
- ASSERT(nested_scope_chain_.length() == 1);
- return MaterializeLocalScope(isolate_, frame_, inlined_jsframe_index_);
- case ScopeIterator::ScopeTypeWith:
- // Return the with object.
- return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
- case ScopeIterator::ScopeTypeCatch:
- return MaterializeCatchScope(isolate_, CurrentContext());
- case ScopeIterator::ScopeTypeClosure:
- // Materialize the content of the closure scope into a JSObject.
- return MaterializeClosure(isolate_, CurrentContext());
- case ScopeIterator::ScopeTypeBlock:
- return MaterializeBlockScope(isolate_, CurrentContext());
- case ScopeIterator::ScopeTypeModule:
- return MaterializeModuleScope(isolate_, CurrentContext());
- }
- UNREACHABLE();
- return Handle<JSObject>();
- }
-
- bool SetVariableValue(Handle<String> variable_name,
- Handle<Object> new_value) {
- ASSERT(!failed_);
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- break;
- case ScopeIterator::ScopeTypeLocal:
- return SetLocalVariableValue(isolate_, frame_, inlined_jsframe_index_,
- variable_name, new_value);
- case ScopeIterator::ScopeTypeWith:
- break;
- case ScopeIterator::ScopeTypeCatch:
- return SetCatchVariableValue(isolate_, CurrentContext(),
- variable_name, new_value);
- case ScopeIterator::ScopeTypeClosure:
- return SetClosureVariableValue(isolate_, CurrentContext(),
- variable_name, new_value);
- case ScopeIterator::ScopeTypeBlock:
- // TODO(2399): should we implement it?
- break;
- case ScopeIterator::ScopeTypeModule:
- // TODO(2399): should we implement it?
- break;
- }
- return false;
- }
-
- Handle<ScopeInfo> CurrentScopeInfo() {
- ASSERT(!failed_);
- if (!nested_scope_chain_.is_empty()) {
- return nested_scope_chain_.last();
- } else if (context_->IsBlockContext()) {
- return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
- } else if (context_->IsFunctionContext()) {
- return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
- }
- return Handle<ScopeInfo>::null();
- }
-
- // Return the context for this scope. For the local context there might not
- // be an actual context.
- Handle<Context> CurrentContext() {
- ASSERT(!failed_);
- if (Type() == ScopeTypeGlobal ||
- nested_scope_chain_.is_empty()) {
- return context_;
- } else if (nested_scope_chain_.last()->HasContext()) {
- return context_;
- } else {
- return Handle<Context>();
- }
- }
-
-#ifdef DEBUG
- // Debug print of the content of the current scope.
- void DebugPrint() {
- ASSERT(!failed_);
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- PrintF("Global:\n");
- CurrentContext()->Print();
- break;
-
- case ScopeIterator::ScopeTypeLocal: {
- PrintF("Local:\n");
- function_->shared()->scope_info()->Print();
- if (!CurrentContext().is_null()) {
- CurrentContext()->Print();
- if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension(), isolate_);
- if (extension->IsJSContextExtensionObject()) {
- extension->Print();
- }
- }
- }
- break;
- }
-
- case ScopeIterator::ScopeTypeWith:
- PrintF("With:\n");
- CurrentContext()->extension()->Print();
- break;
-
- case ScopeIterator::ScopeTypeCatch:
- PrintF("Catch:\n");
- CurrentContext()->extension()->Print();
- CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print();
- break;
-
- case ScopeIterator::ScopeTypeClosure:
- PrintF("Closure:\n");
- CurrentContext()->Print();
- if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension(), isolate_);
- if (extension->IsJSContextExtensionObject()) {
- extension->Print();
- }
- }
- break;
-
- default:
- UNREACHABLE();
- }
- PrintF("\n");
- }
-#endif
-
- private:
- Isolate* isolate_;
- JavaScriptFrame* frame_;
- int inlined_jsframe_index_;
- Handle<JSFunction> function_;
- Handle<Context> context_;
- List<Handle<ScopeInfo> > nested_scope_chain_;
- bool failed_;
-
- void RetrieveScopeChain(Scope* scope,
- Handle<SharedFunctionInfo> shared_info) {
- if (scope != NULL) {
- int source_position = shared_info->code()->SourcePosition(frame_->pc());
- scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
- } else {
- // A failed reparse indicates that the preparser has diverged from the
- // parser or that the preparse data given to the initial parse has been
- // faulty. We fail in debug mode but in release mode we only provide the
- // information we get from the context chain but nothing about
- // completely stack allocated scopes or stack allocated locals.
- // Or it could be due to stack overflow.
- ASSERT(isolate_->has_pending_exception());
- failed_ = true;
- }
- }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
-};
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator it(isolate, id);
- JavaScriptFrame* frame = it.frame();
-
- // Count the visible scopes.
- int n = 0;
- for (ScopeIterator it(isolate, frame, 0);
- !it.Done();
- it.Next()) {
- n++;
- }
-
- return Smi::FromInt(n);
-}
-
-
-static const int kScopeDetailsTypeIndex = 0;
-static const int kScopeDetailsObjectIndex = 1;
-static const int kScopeDetailsSize = 2;
-
-
-static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
- ScopeIterator* it) {
- // Calculate the size of the result.
- int details_size = kScopeDetailsSize;
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
-
- // Fill in scope details.
- details->set(kScopeDetailsTypeIndex, Smi::FromInt(it->Type()));
- Handle<JSObject> scope_object = it->ScopeObject();
- RETURN_IF_EMPTY_HANDLE(isolate, scope_object);
- details->set(kScopeDetailsObjectIndex, *scope_object);
-
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-// Return an array with scope details
-// args[0]: number: break id
-// args[1]: number: frame index
-// args[2]: number: inlined frame index
-// args[3]: number: scope index
-//
-// The array returned contains the following information:
-// 0: Scope type
-// 1: Scope object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
-
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
- CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator frame_it(isolate, id);
- JavaScriptFrame* frame = frame_it.frame();
-
- // Find the requested scope.
- int n = 0;
- ScopeIterator it(isolate, frame, inlined_jsframe_index);
- for (; !it.Done() && n < index; it.Next()) {
- n++;
- }
- if (it.Done()) {
- return isolate->heap()->undefined_value();
- }
- return MaterializeScopeDetails(isolate, &it);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeCount) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- // Check arguments.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
-
- // Count the visible scopes.
- int n = 0;
- for (ScopeIterator it(isolate, fun); !it.Done(); it.Next()) {
- n++;
- }
-
- return Smi::FromInt(n);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- // Check arguments.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
-
- // Find the requested scope.
- int n = 0;
- ScopeIterator it(isolate, fun);
- for (; !it.Done() && n < index; it.Next()) {
- n++;
- }
- if (it.Done()) {
- return isolate->heap()->undefined_value();
- }
-
- return MaterializeScopeDetails(isolate, &it);
-}
-
-
-static bool SetScopeVariableValue(ScopeIterator* it, int index,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- for (int n = 0; !it->Done() && n < index; it->Next()) {
- n++;
- }
- if (it->Done()) {
- return false;
- }
- return it->SetVariableValue(variable_name, new_value);
-}
-
-
-// Change variable value in closure or local scope
-// args[0]: number or JsFunction: break id or function
-// args[1]: number: frame index (when arg[0] is break id)
-// args[2]: number: inlined frame index (when arg[0] is break id)
-// args[3]: number: scope index
-// args[4]: string: variable name
-// args[5]: object: new value
-//
-// Return true if success and false otherwise
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScopeVariableValue) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 6);
-
- // Check arguments.
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
- CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 4);
- Handle<Object> new_value = args.at<Object>(5);
-
- bool res;
- if (args[0]->IsNumber()) {
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
- CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator frame_it(isolate, id);
- JavaScriptFrame* frame = frame_it.frame();
-
- ScopeIterator it(isolate, frame, inlined_jsframe_index);
- res = SetScopeVariableValue(&it, index, variable_name, new_value);
- } else {
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- ScopeIterator it(isolate, fun);
- res = SetScopeVariableValue(&it, index, variable_name, new_value);
- }
-
- return isolate->heap()->ToBoolean(res);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
-
-#ifdef DEBUG
- // Print the scopes for the top frame.
- StackFrameLocator locator(isolate);
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- for (ScopeIterator it(isolate, frame, 0);
- !it.Done();
- it.Next()) {
- it.DebugPrint();
- }
-#endif
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadCount) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- // Check arguments.
- Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Count all archived V8 threads.
- int n = 0;
- for (ThreadState* thread =
- isolate->thread_manager()->FirstThreadStateInUse();
- thread != NULL;
- thread = thread->Next()) {
- n++;
- }
-
- // Total number of threads is current thread and archived threads.
- return Smi::FromInt(n + 1);
-}
-
-
-static const int kThreadDetailsCurrentThreadIndex = 0;
-static const int kThreadDetailsThreadIdIndex = 1;
-static const int kThreadDetailsSize = 2;
-
-// Return an array with thread details
-// args[0]: number: break id
-// args[1]: number: thread index
-//
-// The array returned contains the following information:
-// 0: Is current thread?
-// 1: Thread id
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadDetails) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
-
- // Allocate array for result.
- Handle<FixedArray> details =
- isolate->factory()->NewFixedArray(kThreadDetailsSize);
-
- // Thread index 0 is current thread.
- if (index == 0) {
- // Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex,
- isolate->heap()->true_value());
- details->set(kThreadDetailsThreadIdIndex,
- Smi::FromInt(ThreadId::Current().ToInteger()));
- } else {
- // Find the thread with the requested index.
- int n = 1;
- ThreadState* thread =
- isolate->thread_manager()->FirstThreadStateInUse();
- while (index != n && thread != NULL) {
- thread = thread->Next();
- n++;
- }
- if (thread == NULL) {
- return isolate->heap()->undefined_value();
- }
-
- // Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex,
- isolate->heap()->false_value());
- details->set(kThreadDetailsThreadIdIndex,
- Smi::FromInt(thread->id().ToInteger()));
- }
-
- // Convert to JS array and return.
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
-// Sets the disable break state
-// args[0]: disable break state
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0);
- isolate->debug()->set_disable_break(disable_break);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- Handle<SharedFunctionInfo> shared(fun->shared());
- // Find the number of break points
- Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
- if (break_locations->IsUndefined()) return isolate->heap()->undefined_value();
- // Return array as JS array
- return *isolate->factory()->NewJSArrayWithElements(
- Handle<FixedArray>::cast(break_locations));
-}
-
-
-// Return the value of breakpoint_relocation flag
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllowBreakPointRelocation) {
- return Smi::FromInt(FLAG_breakpoint_relocation);
-}
-
-
-// Set a break point in a function.
-// args[0]: function
-// args[1]: number: break source position (within the function source)
-// args[2]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- RUNTIME_ASSERT(source_position >= 0);
- Handle<Object> break_point_object_arg = args.at<Object>(2);
-
- // Set break point.
- isolate->debug()->SetBreakPoint(function, break_point_object_arg,
- &source_position);
-
- return Smi::FromInt(source_position);
-}
-
-
-// Changes the state of a break point in a script and returns source position
-// where break point was set. NOTE: Regarding performance see the NOTE for
-// GetScriptFromScriptData.
-// args[0]: script to set break point in
-// args[1]: number: break source position (within the script source)
-// args[2]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- RUNTIME_ASSERT(source_position >= 0);
- Handle<Object> break_point_object_arg = args.at<Object>(2);
-
- // Get the script from the script wrapper.
- RUNTIME_ASSERT(wrapper->value()->IsScript());
- Handle<Script> script(Script::cast(wrapper->value()));
-
- // Set break point.
- if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
- &source_position)) {
- return isolate->heap()->undefined_value();
- }
-
- return Smi::FromInt(source_position);
-}
-
-
-// Clear a break point
-// args[0]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearBreakPoint) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- Handle<Object> break_point_object_arg = args.at<Object>(0);
-
- // Clear break point.
- isolate->debug()->ClearBreakPoint(break_point_object_arg);
-
- return isolate->heap()->undefined_value();
-}
-
-
-// Change the state of break on exceptions.
-// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
-// args[1]: Boolean indicating on/off.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ChangeBreakOnException) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- RUNTIME_ASSERT(args[0]->IsNumber());
- CONVERT_BOOLEAN_ARG_CHECKED(enable, 1);
-
- // If the number doesn't match an enum value, the ChangeBreakOnException
- // function will default to affecting caught exceptions.
- ExceptionBreakType type =
- static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
- // Update break point state.
- isolate->debug()->ChangeBreakOnException(type, enable);
- return isolate->heap()->undefined_value();
-}
-
-
-// Returns the state of break on exceptions
-// args[0]: boolean indicating uncaught exceptions
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- RUNTIME_ASSERT(args[0]->IsNumber());
-
- ExceptionBreakType type =
- static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
- bool result = isolate->debug()->IsBreakOnException(type);
- return Smi::FromInt(result);
-}
-
-
-// Prepare for stepping
-// args[0]: break id for checking execution state
-// args[1]: step action from the enumeration StepAction
-// args[2]: number of times to perform the step, for step out it is the number
-// of frames to step down.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
-
- // Get the step action and check validity.
- StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
- if (step_action != StepIn &&
- step_action != StepNext &&
- step_action != StepOut &&
- step_action != StepInMin &&
- step_action != StepMin) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
-
- // Get the number of steps.
- int step_count = NumberToInt32(args[2]);
- if (step_count < 1) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
-
- // Clear all current stepping setup.
- isolate->debug()->ClearStepping();
-
- // Prepare step.
- isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
- step_count);
- return isolate->heap()->undefined_value();
-}
-
-
-// Clear all stepping set by PrepareStep.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
- isolate->debug()->ClearStepping();
- return isolate->heap()->undefined_value();
-}
-
-
-// Creates a copy of the with context chain. The copy of the context chain is
-// is linked to the function context supplied.
-static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
- Handle<JSFunction> function,
- Handle<Context> base,
- JavaScriptFrame* frame,
- int inlined_jsframe_index) {
- HandleScope scope(isolate);
- List<Handle<ScopeInfo> > scope_chain;
- List<Handle<Context> > context_chain;
-
- ScopeIterator it(isolate, frame, inlined_jsframe_index);
- if (it.Failed()) return Handle<Context>::null();
-
- for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
- it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
- ASSERT(!it.Done());
- scope_chain.Add(it.CurrentScopeInfo());
- context_chain.Add(it.CurrentContext());
- }
-
- // At the end of the chain. Return the base context to link to.
- Handle<Context> context = base;
-
- // Iteratively copy and or materialize the nested contexts.
- while (!scope_chain.is_empty()) {
- Handle<ScopeInfo> scope_info = scope_chain.RemoveLast();
- Handle<Context> current = context_chain.RemoveLast();
- ASSERT(!(scope_info->HasContext() & current.is_null()));
-
- if (scope_info->Type() == CATCH_SCOPE) {
- Handle<String> name(String::cast(current->extension()));
- Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX),
- isolate);
- context =
- isolate->factory()->NewCatchContext(function,
- context,
- name,
- thrown_object);
- } else if (scope_info->Type() == BLOCK_SCOPE) {
- // Materialize the contents of the block scope into a JSObject.
- Handle<JSObject> block_scope_object =
- MaterializeBlockScope(isolate, current);
- CHECK(!block_scope_object.is_null());
- // Allocate a new function context for the debug evaluation and set the
- // extension object.
- Handle<Context> new_context =
- isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
- function);
- new_context->set_extension(*block_scope_object);
- new_context->set_previous(*context);
- context = new_context;
- } else {
- ASSERT(scope_info->Type() == WITH_SCOPE);
- ASSERT(current->IsWithContext());
- Handle<JSObject> extension(JSObject::cast(current->extension()));
- context =
- isolate->factory()->NewWithContext(function, context, extension);
- }
- }
-
- return scope.CloseAndEscape(context);
-}
-
-
-// Helper function to find or create the arguments object for
-// Runtime_DebugEvaluate.
-static Handle<Object> GetArgumentsObject(Isolate* isolate,
- JavaScriptFrame* frame,
- FrameInspector* frame_inspector,
- Handle<ScopeInfo> scope_info,
- Handle<Context> function_context) {
- // Try to find the value of 'arguments' to pass as parameter. If it is not
- // found (that is the debugged function does not reference 'arguments' and
- // does not support eval) then create an 'arguments' object.
- int index;
- if (scope_info->StackLocalCount() > 0) {
- index = scope_info->StackSlotIndex(isolate->heap()->arguments_string());
- if (index != -1) {
- return Handle<Object>(frame->GetExpression(index), isolate);
- }
- }
-
- if (scope_info->HasHeapAllocatedLocals()) {
- VariableMode mode;
- InitializationFlag init_flag;
- index = scope_info->ContextSlotIndex(
- isolate->heap()->arguments_string(), &mode, &init_flag);
- if (index != -1) {
- return Handle<Object>(function_context->get(index), isolate);
- }
- }
-
- Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
- int length = frame_inspector->GetParametersCount();
- Handle<JSObject> arguments =
- isolate->factory()->NewArgumentsObject(function, length);
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
-
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < length; i++) {
- array->set(i, frame_inspector->GetParameter(i), mode);
- }
- arguments->set_elements(*array);
- return arguments;
-}
-
-
-static const char kSourceStr[] =
- "(function(arguments,__source__){return eval(__source__);})";
-
-
-// Evaluate a piece of JavaScript in the context of a stack frame for
-// debugging. This is accomplished by creating a new context which in its
-// extension part has all the parameters and locals of the function on the
-// stack frame. A function which calls eval with the code to evaluate is then
-// compiled in this context and called in this context. As this context
-// replaces the context of the function on the stack frame a new (empty)
-// function is created as well to be used as the closure for the context.
-// This function and the context acts as replacements for the function on the
-// stack frame presenting the same view of the values of parameters and
-// local variables as if the piece of JavaScript was evaluated at the point
-// where the function on the stack frame is currently stopped.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
- HandleScope scope(isolate);
-
- // Check the execution state and decode arguments frame and source to be
- // evaluated.
- ASSERT(args.length() == 6);
- Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check_result->ToObject(&check_result)) {
- return maybe_check_result;
- }
- }
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
- CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
- CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
- Handle<Object> additional_context(args[5], isolate);
-
- // Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator it(isolate, id);
- JavaScriptFrame* frame = it.frame();
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
- Handle<ScopeInfo> scope_info(function->shared()->scope_info());
- bool qml_mode = function->shared()->qml_mode();
-
- // Traverse the saved contexts chain to find the active context for the
- // selected frame.
- SaveContext* save = FindSavedContextForFrame(isolate, frame);
-
- SaveContext savex(isolate);
- isolate->set_context(*(save->context()));
-
- // Create the (empty) function replacing the function on the stack frame for
- // the purpose of evaluating in the context created below. It is important
- // that this function does not describe any parameters and local variables
- // in the context. If it does then this will cause problems with the lookup
- // in Context::Lookup, where context slots for parameters and local variables
- // are looked at before the extension object.
- Handle<JSFunction> go_between =
- isolate->factory()->NewFunction(isolate->factory()->empty_string(),
- isolate->factory()->undefined_value());
- go_between->set_context(function->context());
-#ifdef DEBUG
- Handle<ScopeInfo> go_between_scope_info(go_between->shared()->scope_info());
- ASSERT(go_between_scope_info->ParameterCount() == 0);
- ASSERT(go_between_scope_info->ContextLocalCount() == 0);
-#endif
-
- // Materialize the content of the local scope into a JSObject.
- Handle<JSObject> local_scope = MaterializeLocalScopeWithFrameInspector(
- isolate, frame, &frame_inspector);
- RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
-
- // Allocate a new context for the debug evaluation and set the extension
- // object build.
- Handle<Context> context =
- isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
- go_between);
-
- // Use the materialized local scope in a with context.
- context =
- isolate->factory()->NewWithContext(go_between, context, local_scope);
-
- // Copy any with contexts present and chain them in front of this context.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context;
- // Get the function's context if it has one.
- if (scope_info->HasContext()) {
- function_context = Handle<Context>(frame_context->declaration_context());
- }
- context = CopyNestedScopeContextChain(isolate,
- go_between,
- context,
- frame,
- inlined_jsframe_index);
- if (context.is_null()) {
- ASSERT(isolate->has_pending_exception());
- MaybeObject* exception = isolate->pending_exception();
- isolate->clear_pending_exception();
- return exception;
- }
-
- if (additional_context->IsJSObject()) {
- Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
- context =
- isolate->factory()->NewWithContext(go_between, context, extension);
- }
-
- // Wrap the evaluation statement in a new function compiled in the newly
- // created context. The function has one parameter which has to be called
- // 'arguments'. This it to have access to what would have been 'arguments' in
- // the function being debugged.
- // function(arguments,__source__) {return eval(__source__);}
-
- Handle<String> function_source =
- isolate->factory()->NewStringFromAscii(
- Vector<const char>(kSourceStr, sizeof(kSourceStr) - 1));
-
- // Currently, the eval code will be executed in non-strict mode,
- // even in the strict code context.
- Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(function_source,
- context,
- context->IsNativeContext(),
- CLASSIC_MODE,
- RelocInfo::kNoPosition,
- qml_mode);
- if (shared.is_null()) return Failure::Exception();
- Handle<JSFunction> compiled_function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
-
- // Invoke the result of the compilation to get the evaluation function.
- bool has_pending_exception;
- Handle<Object> receiver(frame->receiver(), isolate);
- Handle<Object> evaluation_function =
- Execution::Call(compiled_function, receiver, 0, NULL,
- &has_pending_exception, false,
- Handle<Object>(function->context()->qml_global_object(), isolate));
- if (has_pending_exception) return Failure::Exception();
-
- Handle<Object> arguments = GetArgumentsObject(isolate,
- frame,
- &frame_inspector,
- scope_info,
- function_context);
-
- // Check if eval is blocked in the context and temporarily allow it
- // for debugger.
- Handle<Context> native_context = Handle<Context>(context->native_context());
- bool eval_disabled =
- native_context->allow_code_gen_from_strings()->IsFalse();
- if (eval_disabled) {
- native_context->set_allow_code_gen_from_strings(
- isolate->heap()->true_value());
- }
- // Invoke the evaluation function and return the result.
- Handle<Object> argv[] = { arguments, source };
- Handle<Object> result =
- Execution::Call(Handle<JSFunction>::cast(evaluation_function),
- receiver,
- ARRAY_SIZE(argv),
- argv,
- &has_pending_exception);
- if (eval_disabled) {
- native_context->set_allow_code_gen_from_strings(
- isolate->heap()->false_value());
- }
- if (has_pending_exception) return Failure::Exception();
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (result->IsJSGlobalProxy()) {
- result = Handle<JSObject>(JSObject::cast(result->GetPrototype(isolate)));
- }
-
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
- HandleScope scope(isolate);
-
- // Check the execution state and decode arguments frame and source to be
- // evaluated.
- ASSERT(args.length() == 4);
- Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check_result->ToObject(&check_result)) {
- return maybe_check_result;
- }
- }
- CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
- Handle<Object> additional_context(args[3], isolate);
-
- // Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
-
- // Enter the top context from before the debugger was invoked.
- SaveContext save(isolate);
- SaveContext* top = &save;
- while (top != NULL && *top->context() == *isolate->debug()->debug_context()) {
- top = top->prev();
- }
- if (top != NULL) {
- isolate->set_context(*top->context());
- }
-
- // Get the native context now set to the top context from before the
- // debugger was invoked.
- Handle<Context> context = isolate->native_context();
-
- bool is_global = true;
-
- if (additional_context->IsJSObject()) {
- // Create a new with context with the additional context information between
- // the context of the debugged function and the eval code to be executed.
- context = isolate->factory()->NewWithContext(
- Handle<JSFunction>(context->closure()),
- context,
- Handle<JSObject>::cast(additional_context));
- is_global = false;
- }
-
- // Compile the source to be evaluated.
- // Currently, the eval code will be executed in non-strict mode,
- // even in the strict code context.
- Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(source,
- context,
- is_global,
- CLASSIC_MODE,
- RelocInfo::kNoPosition,
- false);
- if (shared.is_null()) return Failure::Exception();
- Handle<JSFunction> compiled_function =
- Handle<JSFunction>(
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context));
-
- // Invoke the result of the compilation to get the evaluation function.
- bool has_pending_exception;
- Handle<Object> receiver = isolate->global_object();
- Handle<Object> result =
- Execution::Call(compiled_function, receiver, 0, NULL,
- &has_pending_exception);
- // Clear the oneshot breakpoints so that the debugger does not step further.
- isolate->debug()->ClearStepping();
- if (has_pending_exception) return Failure::Exception();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
-
- // Fill the script objects.
- Handle<FixedArray> instances = isolate->debug()->GetLoadedScripts();
-
- // Convert the script objects to proper JS objects.
- for (int i = 0; i < instances->length(); i++) {
- Handle<Script> script = Handle<Script>(Script::cast(instances->get(i)));
- // Get the script wrapper in a local handle before calling GetScriptWrapper,
- // because using
- // instances->set(i, *GetScriptWrapper(script))
- // is unsafe as GetScriptWrapper might call GC and the C++ compiler might
- // already have dereferenced the instances handle.
- Handle<JSValue> wrapper = GetScriptWrapper(script);
- instances->set(i, *wrapper);
- }
-
- // Return result as a JS array.
- Handle<JSObject> result =
- isolate->factory()->NewJSObject(isolate->array_function());
- isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
- return *result;
-}
-
-
-// Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(HeapIterator* iterator,
- JSObject* target,
- Object* instance_filter, int max_references,
- FixedArray* instances, int instances_size,
- JSFunction* arguments_function) {
- Isolate* isolate = target->GetIsolate();
- NoHandleAllocation ha(isolate);
- AssertNoAllocation no_alloc;
-
- // Iterate the heap.
- int count = 0;
- JSObject* last = NULL;
- HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator->next()) != NULL) &&
- (max_references == 0 || count < max_references)) {
- // Only look at all JSObjects.
- if (heap_obj->IsJSObject()) {
- // Skip context extension objects and argument arrays as these are
- // checked in the context of functions using them.
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->IsJSContextExtensionObject() ||
- obj->map()->constructor() == arguments_function) {
- continue;
- }
-
- // Check if the JS object has a reference to the object looked for.
- if (obj->ReferencesObject(target)) {
- // Check instance filter if supplied. This is normally used to avoid
- // references from mirror objects (see Runtime_IsInPrototypeChain).
- if (!instance_filter->IsUndefined()) {
- Object* V = obj;
- while (true) {
- Object* prototype = V->GetPrototype(isolate);
- if (prototype->IsNull()) {
- break;
- }
- if (instance_filter == prototype) {
- obj = NULL; // Don't add this object.
- break;
- }
- V = prototype;
- }
- }
-
- if (obj != NULL) {
- // Valid reference found add to instance array if supplied an update
- // count.
- if (instances != NULL && count < instances_size) {
- instances->set(count, obj);
- }
- last = obj;
- count++;
- }
- }
- }
- }
-
- // Check for circular reference only. This can happen when the object is only
- // referenced from mirrors and has a circular reference in which case the
- // object is not really alive and would have been garbage collected if not
- // referenced from the mirror.
- if (count == 1 && last == target) {
- count = 0;
- }
-
- // Return the number of referencing objects found.
- return count;
-}
-
-
-// Scan the heap for objects with direct references to an object
-// args[0]: the object to find references to
-// args[1]: constructor function for instances to exclude (Mirror)
-// args[2]: the the maximum number of objects to return
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
- ASSERT(args.length() == 3);
-
- // First perform a full GC in order to avoid references from dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "%DebugReferencedBy");
- // The heap iterator reserves the right to do a GC to make the heap iterable.
- // Due to the GC above we know it won't need to do that, but it seems cleaner
- // to get the heap iterator constructed before we start having unprotected
- // Object* locals that are not protected by handles.
-
- // Check parameters.
- CONVERT_ARG_CHECKED(JSObject, target, 0);
- Object* instance_filter = args[1];
- RUNTIME_ASSERT(instance_filter->IsUndefined() ||
- instance_filter->IsJSObject());
- CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
- RUNTIME_ASSERT(max_references >= 0);
-
-
- // Get the constructor function for context extension and arguments array.
- JSObject* arguments_boilerplate =
- isolate->context()->native_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
-
- // Get the number of referencing objects.
- int count;
- Heap* heap = isolate->heap();
- HeapIterator heap_iterator(heap);
- count = DebugReferencedBy(&heap_iterator,
- target, instance_filter, max_references,
- NULL, 0, arguments_function);
-
- // Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
-
- // Fill the referencing objects.
- // AllocateFixedArray above does not make the heap non-iterable.
- ASSERT(heap->IsHeapIterable());
- HeapIterator heap_iterator2(heap);
- count = DebugReferencedBy(&heap_iterator2,
- target, instance_filter, max_references,
- instances, count, arguments_function);
-
- // Return result as JS array.
- Object* result;
- MaybeObject* maybe_result = heap->AllocateJSObject(
- isolate->context()->native_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- return JSArray::cast(result)->SetContent(instances);
-}
-
-
-// Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(HeapIterator* iterator,
- JSFunction* constructor,
- int max_references,
- FixedArray* instances,
- int instances_size) {
- AssertNoAllocation no_alloc;
-
- // Iterate the heap.
- int count = 0;
- HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator->next()) != NULL) &&
- (max_references == 0 || count < max_references)) {
- // Only look at all JSObjects.
- if (heap_obj->IsJSObject()) {
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->map()->constructor() == constructor) {
- // Valid reference found add to instance array if supplied an update
- // count.
- if (instances != NULL && count < instances_size) {
- instances->set(count, obj);
- }
- count++;
- }
- }
- }
-
- // Return the number of referencing objects found.
- return count;
-}
-
-
-// Scan the heap for objects constructed by a specific function.
-// args[0]: the constructor to find instances of
-// args[1]: the the maximum number of objects to return
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
- ASSERT(args.length() == 2);
-
- // First perform a full GC in order to avoid dead objects.
- Heap* heap = isolate->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
-
- // Check parameters.
- CONVERT_ARG_CHECKED(JSFunction, constructor, 0);
- CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
- RUNTIME_ASSERT(max_references >= 0);
-
- // Get the number of referencing objects.
- int count;
- HeapIterator heap_iterator(heap);
- count = DebugConstructedBy(&heap_iterator,
- constructor,
- max_references,
- NULL,
- 0);
-
- // Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
-
- ASSERT(HEAP->IsHeapIterable());
- // Fill the referencing objects.
- HeapIterator heap_iterator2(heap);
- count = DebugConstructedBy(&heap_iterator2,
- constructor,
- max_references,
- instances,
- count);
-
- // Return result as JS array.
- Object* result;
- { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
- isolate->context()->native_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return JSArray::cast(result)->SetContent(instances);
-}
-
-
-// Find the effective prototype object as returned by __proto__.
-// args[0]: the object to find the prototype for.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- // Use the __proto__ accessor.
- return Accessors::ObjectPrototype.getter(obj, NULL);
-}
-
-
-// Patches script source (should be called upon BeforeCompile event).
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
-
- RUNTIME_ASSERT(script_wrapper->value()->IsScript());
- Handle<Script> script(Script::cast(script_wrapper->value()));
-
- int compilation_state = Smi::cast(script->compilation_state())->value();
- RUNTIME_ASSERT(compilation_state == Script::COMPILATION_STATE_INITIAL);
- script->set_source(*source);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
- ASSERT(args.length() == 0);
- CPU::DebugBreak();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
-#ifdef DEBUG
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- // Get the function and make sure it is compiled.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
- func->code()->PrintLn();
-#endif // DEBUG
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
-#ifdef DEBUG
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- // Get the function and make sure it is compiled.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
- func->shared()->construct_stub()->PrintLn();
-#endif // DEBUG
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
- NoHandleAllocation ha(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return f->shared()->inferred_name();
-}
-
-
-static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
- Script* script,
- FixedArray* buffer) {
- AssertNoAllocation no_allocations;
- int counter = 0;
- int buffer_size = buffer->length();
- for (HeapObject* obj = iterator->next();
- obj != NULL;
- obj = iterator->next()) {
- ASSERT(obj != NULL);
- if (!obj->IsSharedFunctionInfo()) {
- continue;
- }
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (shared->script() != script) {
- continue;
- }
- if (counter < buffer_size) {
- buffer->set(counter, shared);
- }
- counter++;
- }
- return counter;
-}
-
-// For a script finds all SharedFunctionInfo's in the heap that points
-// to this script. Returns JSArray of SharedFunctionInfo wrapped
-// in OpaqueReferences.
-RUNTIME_FUNCTION(MaybeObject*,
- Runtime_LiveEditFindSharedFunctionInfosForScript) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSValue, script_value, 0);
-
- RUNTIME_ASSERT(script_value->value()->IsScript());
- Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
-
- const int kBufferSize = 32;
-
- Handle<FixedArray> array;
- array = isolate->factory()->NewFixedArray(kBufferSize);
- int number;
- Heap* heap = isolate->heap();
- {
- heap->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
- HeapIterator heap_iterator(heap);
- Script* scr = *script;
- FixedArray* arr = *array;
- number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
- }
- if (number > kBufferSize) {
- array = isolate->factory()->NewFixedArray(number);
- heap->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
- HeapIterator heap_iterator(heap);
- Script* scr = *script;
- FixedArray* arr = *array;
- FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
- }
-
- Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
- result->set_length(Smi::FromInt(number));
-
- LiveEdit::WrapSharedFunctionInfos(result);
-
- return *result;
-}
-
-// For a script calculates compilation information about all its functions.
-// The script source is explicitly specified by the second argument.
-// The source of the actual script is not used, however it is important that
-// all generated code keeps references to this particular instance of script.
-// Returns a JSArray of compilation infos. The array is ordered so that
-// each function with all its descendant is always stored in a continues range
-// with the function itself going first. The root function is a script function.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSValue, script, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
-
- RUNTIME_ASSERT(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source);
-
- if (isolate->has_pending_exception()) {
- return Failure::Exception();
- }
-
- return result;
-}
-
-// Changes the source of the script to a new_source.
-// If old_script_name is provided (i.e. is a String), also creates a copy of
-// the script with its original source and sends notification to debugger.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 3);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
- Handle<Object> old_script_name(args[2], isolate);
-
- RUNTIME_ASSERT(original_script_value->value()->IsScript());
- Handle<Script> original_script(Script::cast(original_script_value->value()));
-
- Object* old_script = LiveEdit::ChangeScriptSource(original_script,
- new_source,
- old_script_name);
-
- if (old_script->IsScript()) {
- Handle<Script> script_handle(Script::cast(old_script));
- return *(GetScriptWrapper(script_handle));
- } else {
- return isolate->heap()->null_value();
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
- return LiveEdit::FunctionSourceUpdated(shared_info);
-}
-
-
-// Replaces code of SharedFunctionInfo with a new one.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
-
- return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
-}
-
-// Connects SharedFunctionInfo to another script.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- Handle<Object> function_object(args[0], isolate);
- Handle<Object> script_object(args[1], isolate);
-
- if (function_object->IsJSValue()) {
- Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
- if (script_object->IsJSValue()) {
- RUNTIME_ASSERT(JSValue::cast(*script_object)->value()->IsScript());
- Script* script = Script::cast(JSValue::cast(*script_object)->value());
- script_object = Handle<Object>(script, isolate);
- }
-
- LiveEdit::SetFunctionScript(function_wrapper, script_object);
- } else {
- // Just ignore this. We may not have a SharedFunctionInfo for some functions
- // and we check it in this function.
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-// In a code of a parent function replaces original function as embedded object
-// with a substitution one.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 3);
- HandleScope scope(isolate);
-
- CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSValue, subst_wrapper, 2);
-
- LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
- subst_wrapper);
-
- return isolate->heap()->undefined_value();
-}
-
-
-// Updates positions of a shared function info (first parameter) according
-// to script source change. Text change is described in second parameter as
-// array of groups of 3 numbers:
-// (change_begin, change_end, change_end_new_position).
-// Each group describes a change in text; groups are sorted by change_begin.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
-
- return LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
-}
-
-
-// For array of SharedFunctionInfo's (each wrapped in JSValue)
-// checks that none of them have activations on stacks (of any thread).
-// Returns array of the same length with corresponding results of
-// LiveEdit::FunctionPatchabilityStatus type.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
-
- return *LiveEdit::CheckAndDropActivations(shared_array, do_drop,
- isolate->runtime_zone());
-}
-
-// Compares 2 strings line-by-line, then token-wise and returns diff in form
-// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
-// of diff chunks.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
-
- return *LiveEdit::CompareStrings(s1, s2);
-}
-
-
-// Restarts a call frame and completely drops all frames above.
-// Returns true if successful. Otherwise returns undefined or an error message.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
- CHECK(isolate->debugger()->live_edit_enabled());
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
-
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
- Heap* heap = isolate->heap();
-
- // Find the relevant frame with the requested index.
- StackFrame::Id id = isolate->debug()->break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there are no JavaScript stack frames return undefined.
- return heap->undefined_value();
- }
-
- int count = 0;
- JavaScriptFrameIterator it(isolate, id);
- for (; !it.done(); it.Advance()) {
- if (index < count + it.frame()->GetInlineCount()) break;
- count += it.frame()->GetInlineCount();
- }
- if (it.done()) return heap->undefined_value();
-
- const char* error_message =
- LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone());
- if (error_message) {
- return *(isolate->factory()->InternalizeUtf8String(error_message));
- }
- return heap->true_value();
-}
-
-
-// A testing entry. Returns statement position which is the closest to
-// source_position.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
- CHECK(isolate->debugger()->live_edit_enabled());
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
-
- Handle<Code> code(function->code(), isolate);
-
- if (code->kind() != Code::FUNCTION &&
- code->kind() != Code::OPTIMIZED_FUNCTION) {
- return isolate->heap()->undefined_value();
- }
-
- RelocIterator it(*code, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
- int closest_pc = 0;
- int distance = kMaxInt;
- while (!it.done()) {
- int statement_position = static_cast<int>(it.rinfo()->data());
- // Check if this break point is closer that what was previously found.
- if (source_position <= statement_position &&
- statement_position - source_position < distance) {
- closest_pc =
- static_cast<int>(it.rinfo()->pc() - code->instruction_start());
- distance = statement_position - source_position;
- // Check whether we can't get any closer.
- if (distance == 0) break;
- }
- it.next();
- }
-
- return Smi::FromInt(closest_pc);
-}
-
-
-// Calls specified function with or without entering the debugger.
-// This is used in unit tests to run code as if debugger is entered or simply
-// to have a stack with C++ frame in the middle.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
- ASSERT(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1);
-
- Handle<Object> result;
- bool pending_exception;
- {
- if (without_debugger) {
- result = Execution::Call(function, isolate->global_object(), 0, NULL,
- &pending_exception);
- } else {
- EnterDebugger enter_debugger;
- result = Execution::Call(function, isolate->global_object(), 0, NULL,
- &pending_exception);
- }
- }
- if (!pending_exception) {
- return *result;
- } else {
- return Failure::Exception();
- }
-}
-
-
-// Sets a v8 flag.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
- CONVERT_ARG_CHECKED(String, arg, 0);
- SmartArrayPointer<char> flags =
- arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- FlagList::SetFlagsFromString(*flags, StrLength(*flags));
- return isolate->heap()->undefined_value();
-}
-
-
-// Performs a GC.
-// Presently, it only does a full GC.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
- return isolate->heap()->undefined_value();
-}
-
-
-// Gets the current heap usage.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
- int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
- if (!Smi::IsValid(usage)) {
- return *isolate->factory()->NewNumberFromInt(usage);
- }
- return Smi::FromInt(usage);
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
- NoHandleAllocation ha(isolate);
- v8::V8::ResumeProfiler();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
- NoHandleAllocation ha(isolate);
- v8::V8::PauseProfiler();
- return isolate->heap()->undefined_value();
-}
-
-
-// Finds the script object from the script data. NOTE: This operation uses
-// heap traversal to find the function generated for the source position
-// for the requested break point. For lazily compiled functions several heap
-// traversals might be required rendering this operation as a rather slow
-// operation. However for setting break points which is normally done through
-// some kind of user interaction the performance is not crucial.
-static Handle<Object> Runtime_GetScriptFromScriptName(
- Handle<String> script_name) {
- // Scan the heap for Script objects to find the script with the requested
- // script data.
- Handle<Script> script;
- Heap* heap = script_name->GetHeap();
- heap->EnsureHeapIsIterable();
- AssertNoAllocation no_allocation_during_heap_iteration;
- HeapIterator iterator(heap);
- HeapObject* obj = NULL;
- while (script.is_null() && ((obj = iterator.next()) != NULL)) {
- // If a script is found check if it has the script data requested.
- if (obj->IsScript()) {
- if (Script::cast(obj)->name()->IsString()) {
- if (String::cast(Script::cast(obj)->name())->Equals(*script_name)) {
- script = Handle<Script>(Script::cast(obj));
- }
- }
- }
- }
-
- // If no script with the requested script data is found return undefined.
- if (script.is_null()) return FACTORY->undefined_value();
-
- // Return the script found.
- return GetScriptWrapper(script);
-}
-
-
-// Get the script object from script data. NOTE: Regarding performance
-// see the NOTE for GetScriptFromScriptData.
-// args[0]: script data for the script to find the source for
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
- HandleScope scope(isolate);
-
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(String, script_name, 0);
-
- // Find the requested script.
- Handle<Object> result =
- Runtime_GetScriptFromScriptName(Handle<String>(script_name));
- return *result;
-}
-
-
-// Collect the raw data for a stack trace. Returns an array of 4
-// element segments each containing a receiver, function, code and
-// native code offset.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
- ASSERT_EQ(args.length(), 3);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
- Handle<Object> caller = args.at<Object>(1);
- CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]);
-
- HandleScope scope(isolate);
- // Optionally capture a more detailed stack trace for the message.
- isolate->CaptureAndSetDetailedStackTrace(error_object);
- // Capture a simple stack trace for the stack property.
- return *isolate->CaptureSimpleStackTrace(error_object, caller, limit);
-}
-
-
-// Mark a function to recognize when called after GC to format the stack trace.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MarkOneShotGetter) {
- ASSERT_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- HandleScope scope(isolate);
- Handle<String> key = isolate->factory()->hidden_stack_trace_string();
- JSObject::SetHiddenProperty(fun, key, key);
- return *fun;
-}
-
-
-// Retrieve the stack trace. This could be the raw stack trace collected
-// on stack overflow or the already formatted stack trace string.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOverflowedStackTrace) {
- HandleScope scope(isolate);
- ASSERT_EQ(args.length(), 1);
- CONVERT_ARG_CHECKED(JSObject, error_object, 0);
- String* key = isolate->heap()->hidden_stack_trace_string();
- Object* result = error_object->GetHiddenProperty(key);
- RUNTIME_ASSERT(result->IsJSArray() ||
- result->IsString() ||
- result->IsUndefined());
- return result;
-}
-
-
-// Set or clear the stack trace attached to an stack overflow error object.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetOverflowedStackTrace) {
- HandleScope scope(isolate);
- ASSERT_EQ(args.length(), 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, value, 1);
- Handle<String> key = isolate->factory()->hidden_stack_trace_string();
- if (value->IsUndefined()) {
- error_object->DeleteHiddenProperty(*key);
- } else {
- RUNTIME_ASSERT(value->IsString());
- JSObject::SetHiddenProperty(error_object, key, value);
- }
- return *error_object;
-}
-
-
-// Returns V8 version as a string.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
- ASSERT_EQ(args.length(), 0);
-
- NoHandleAllocation ha(isolate);
-
- const char* version_string = v8::V8::GetVersion();
-
- return isolate->heap()->AllocateStringFromOneByte(CStrVector(version_string),
- NOT_TENURED);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
- ASSERT(args.length() == 2);
- OS::PrintError("abort: %s\n",
- reinterpret_cast<char*>(args[0]) + args.smi_at(1));
- isolate->PrintStack();
- OS::Abort();
- UNREACHABLE();
- return NULL;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FlattenString) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
- FlattenString(str);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
- // This is only called from codegen, so checks might be more lax.
- CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
- Object* key = args[1];
-
- int finger_index = cache->finger_index();
- Object* o = cache->get(finger_index);
- if (o == key) {
- // The fastest case: hit the same place again.
- return cache->get(finger_index + 1);
- }
-
- for (int i = finger_index - 2;
- i >= JSFunctionResultCache::kEntriesIndex;
- i -= 2) {
- o = cache->get(i);
- if (o == key) {
- cache->set_finger_index(i);
- return cache->get(i + 1);
- }
- }
-
- int size = cache->size();
- ASSERT(size <= cache->length());
-
- for (int i = size - 2; i > finger_index; i -= 2) {
- o = cache->get(i);
- if (o == key) {
- cache->set_finger_index(i);
- return cache->get(i + 1);
- }
- }
-
- // There is no value in the cache. Invoke the function and cache result.
- HandleScope scope(isolate);
-
- Handle<JSFunctionResultCache> cache_handle(cache);
- Handle<Object> key_handle(key, isolate);
- Handle<Object> value;
- {
- Handle<JSFunction> factory(JSFunction::cast(
- cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
- // TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<Object> receiver(isolate->native_context()->global_object(),
- isolate);
- // This handle is nor shared, nor used later, so it's safe.
- Handle<Object> argv[] = { key_handle };
- bool pending_exception;
- value = Execution::Call(factory,
- receiver,
- ARRAY_SIZE(argv),
- argv,
- &pending_exception);
- if (pending_exception) return Failure::Exception();
- }
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- cache_handle->JSFunctionResultCacheVerify();
- }
-#endif
-
- // Function invocation may have cleared the cache. Reread all the data.
- finger_index = cache_handle->finger_index();
- size = cache_handle->size();
-
- // If we have spare room, put new data into it, otherwise evict post finger
- // entry which is likely to be the least recently used.
- int index = -1;
- if (size < cache_handle->length()) {
- cache_handle->set_size(size + JSFunctionResultCache::kEntrySize);
- index = size;
- } else {
- index = finger_index + JSFunctionResultCache::kEntrySize;
- if (index == cache_handle->length()) {
- index = JSFunctionResultCache::kEntriesIndex;
- }
- }
-
- ASSERT(index % 2 == 0);
- ASSERT(index >= JSFunctionResultCache::kEntriesIndex);
- ASSERT(index < cache_handle->length());
-
- cache_handle->set(index, *key_handle);
- cache_handle->set(index + 1, *value);
- cache_handle->set_finger_index(index);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- cache_handle->JSFunctionResultCacheVerify();
- }
-#endif
-
- return *value;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
- CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
- return Smi::FromInt(message->start_position());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
- CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
- return message->script();
-}
-
-
-#ifdef DEBUG
-// ListNatives is ONLY used by the fuzz-natives.js in debug mode
-// Exclude the code in release mode.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
- ASSERT(args.length() == 0);
- HandleScope scope(isolate);
-#define COUNT_ENTRY(Name, argc, ressize) + 1
- int entry_count = 0
- RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(COUNT_ENTRY);
-#undef COUNT_ENTRY
- Factory* factory = isolate->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(entry_count);
- int index = 0;
- bool inline_runtime_functions = false;
-#define ADD_ENTRY(Name, argc, ressize) \
- { \
- HandleScope inner(isolate); \
- Handle<String> name; \
- /* Inline runtime functions have an underscore in front of the name. */ \
- if (inline_runtime_functions) { \
- name = factory->NewStringFromAscii( \
- Vector<const char>("_" #Name, StrLength("_" #Name))); \
- } else { \
- name = factory->NewStringFromAscii( \
- Vector<const char>(#Name, StrLength(#Name))); \
- } \
- Handle<FixedArray> pair_elements = factory->NewFixedArray(2); \
- pair_elements->set(0, *name); \
- pair_elements->set(1, Smi::FromInt(argc)); \
- Handle<JSArray> pair = factory->NewJSArrayWithElements(pair_elements); \
- elements->set(index++, *pair); \
- }
- inline_runtime_functions = false;
- RUNTIME_FUNCTION_LIST(ADD_ENTRY)
- inline_runtime_functions = true;
- INLINE_FUNCTION_LIST(ADD_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
-#undef ADD_ENTRY
- ASSERT_EQ(index, entry_count);
- Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
- return *result;
-}
-#endif
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, format, 0);
- CONVERT_ARG_CHECKED(JSArray, elms, 1);
- String::FlatContent format_content = format->GetFlatContent();
- RUNTIME_ASSERT(format_content.IsAscii());
- Vector<const uint8_t> chars = format_content.ToOneByteVector();
- LOGGER->LogRuntime(isolate, Vector<const char>::cast(chars), elms);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
- UNREACHABLE(); // implemented as macro in the parser
- return NULL;
-}
-
-
-#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
- RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
- return isolate->heap()->ToBoolean(obj->Has##Name()); \
- }
-
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastObjectElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalByteElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedByteElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalShortElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedShortElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalIntElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedIntElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalFloatElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
-// Properties test sitting with elements tests - not fooling anyone.
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
-
-#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, obj1, 0);
- CONVERT_ARG_CHECKED(JSObject, obj2, 1);
- return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (obj->IsNull()) return isolate->heap()->false_value();
- ASSERT(proto->IsJSGlobalObject());
- obj = JSReceiver::cast(proto);
- }
- return isolate->heap()->ToBoolean(obj->map()->is_observed());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1);
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (obj->IsNull()) return isolate->heap()->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- obj = JSReceiver::cast(proto);
- }
- ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
- JSObject::cast(obj)->HasFastElements()));
- if (obj->map()->is_observed() != is_observed) {
- if (is_observed && obj->IsJSObject() &&
- !JSObject::cast(obj)->HasExternalArrayElements()) {
- // Go to dictionary mode, so that we don't skip map checks.
- MaybeObject* maybe = JSObject::cast(obj)->NormalizeElements();
- if (maybe->IsFailure()) return maybe;
- ASSERT(!JSObject::cast(obj)->HasFastElements());
- }
- MaybeObject* maybe = obj->map()->Copy();
- Map* map;
- if (!maybe->To(&map)) return maybe;
- map->set_is_observed(is_observed);
- obj->set_map(map);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) {
- ASSERT(args.length() == 0);
- isolate->set_observer_delivery_pending(true);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
- ASSERT(args.length() == 0);
- return isolate->heap()->observation_state();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
- // TODO(adamk): Currently this runtime function is only called three times per
- // isolate. If it's called more often, the map should be moved into the
- // strong root list.
- Handle<Map> map =
- isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
- Handle<JSWeakMap> weakmap =
- Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map));
- return WeakMapInitialize(isolate, weakmap);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
- ASSERT(args.length() == 1);
- Object* object = args[0];
- if (object->IsJSGlobalProxy()) {
- object = object->GetPrototype(isolate);
- if (object->IsNull()) return isolate->heap()->undefined_value();
- }
- return object;
-}
-
-
-// ----------------------------------------------------------------------------
-// Implementation of Runtime
-
-#define F(name, number_of_args, result_size) \
- { Runtime::k##name, Runtime::RUNTIME, #name, \
- FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
-
-
-#define I(name, number_of_args, result_size) \
- { Runtime::kInline##name, Runtime::INLINE, \
- "_" #name, NULL, number_of_args, result_size },
-
-static const Runtime::Function kIntrinsicFunctions[] = {
- RUNTIME_FUNCTION_LIST(F)
- INLINE_FUNCTION_LIST(I)
- INLINE_RUNTIME_FUNCTION_LIST(I)
-};
-
-
-MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
- Object* dictionary) {
- ASSERT(Isolate::Current()->heap() == heap);
- ASSERT(dictionary != NULL);
- ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0);
- for (int i = 0; i < kNumFunctions; ++i) {
- Object* name_string;
- { MaybeObject* maybe_name_string =
- heap->InternalizeUtf8String(kIntrinsicFunctions[i].name);
- if (!maybe_name_string->ToObject(&name_string)) return maybe_name_string;
- }
- StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
- { MaybeObject* maybe_dictionary = string_dictionary->Add(
- String::cast(name_string),
- Smi::FromInt(i),
- PropertyDetails(NONE, NORMAL));
- if (!maybe_dictionary->ToObject(&dictionary)) {
- // Non-recoverable failure. Calling code must restart heap
- // initialization.
- return maybe_dictionary;
- }
- }
- }
- return dictionary;
-}
-
-
-const Runtime::Function* Runtime::FunctionForName(Handle<String> name) {
- Heap* heap = name->GetHeap();
- int entry = heap->intrinsic_function_names()->FindEntry(*name);
- if (entry != kNotFound) {
- Object* smi_index = heap->intrinsic_function_names()->ValueAt(entry);
- int function_index = Smi::cast(smi_index)->value();
- return &(kIntrinsicFunctions[function_index]);
- }
- return NULL;
-}
-
-
-const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
- return &(kIntrinsicFunctions[static_cast<int>(id)]);
-}
-
-
-void Runtime::PerformGC(Object* result) {
- Isolate* isolate = Isolate::Current();
- Failure* failure = Failure::cast(result);
- if (failure->IsRetryAfterGC()) {
- if (isolate->heap()->new_space()->AddFreshPage()) {
- return;
- }
-
- // Try to do a garbage collection; ignore it if it fails. The C
- // entry stub will throw an out-of-memory exception in that case.
- isolate->heap()->CollectGarbage(failure->allocation_space(),
- "Runtime::PerformGC");
- } else {
- // Handle last resort GC and make sure to allow future allocations
- // to grow the heap without causing GCs (if possible).
- isolate->counters()->gc_last_resort_from_js()->Increment();
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
- "Runtime::PerformGC");
- }
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/runtime.h b/src/3rdparty/v8/src/runtime.h
deleted file mode 100644
index 74cc2d8..0000000
--- a/src/3rdparty/v8/src/runtime.h
+++ /dev/null
@@ -1,727 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_RUNTIME_H_
-#define V8_RUNTIME_H_
-
-#include "allocation.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// The interface to C++ runtime functions.
-
-// ----------------------------------------------------------------------------
-// RUNTIME_FUNCTION_LIST_ALWAYS defines runtime calls available in both
-// release and debug mode.
-// This macro should only be used by the macro RUNTIME_FUNCTION_LIST.
-
-// WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused
-// MSVC Intellisense to crash. It was broken into two macros to work around
-// this problem. Please avoid large recursive macros whenever possible.
-#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
- /* Property access */ \
- F(GetProperty, 2, 1) \
- F(KeyedGetProperty, 2, 1) \
- F(DeleteProperty, 3, 1) \
- F(HasLocalProperty, 2, 1) \
- F(HasProperty, 2, 1) \
- F(HasElement, 2, 1) \
- F(IsPropertyEnumerable, 2, 1) \
- F(GetPropertyNames, 1, 1) \
- F(GetPropertyNamesFast, 1, 1) \
- F(GetLocalPropertyNames, 1, 1) \
- F(GetLocalElementNames, 1, 1) \
- F(GetInterceptorInfo, 1, 1) \
- F(GetNamedInterceptorPropertyNames, 1, 1) \
- F(GetIndexedInterceptorElementNames, 1, 1) \
- F(GetArgumentsProperty, 1, 1) \
- F(ToFastProperties, 1, 1) \
- F(FinishArrayPrototypeSetup, 1, 1) \
- F(SpecialArrayFunctions, 1, 1) \
- F(GetDefaultReceiver, 1, 1) \
- \
- F(GetPrototype, 1, 1) \
- F(IsInPrototypeChain, 2, 1) \
- \
- F(GetOwnProperty, 2, 1) \
- \
- F(IsExtensible, 1, 1) \
- F(PreventExtensions, 1, 1)\
- \
- /* Utilities */ \
- F(CheckIsBootstrapping, 0, 1) \
- F(GetRootNaN, 0, 1) \
- F(Call, -1 /* >= 2 */, 1) \
- F(Apply, 5, 1) \
- F(GetFunctionDelegate, 1, 1) \
- F(GetConstructorDelegate, 1, 1) \
- F(NewArgumentsFast, 3, 1) \
- F(NewStrictArgumentsFast, 3, 1) \
- F(LazyCompile, 1, 1) \
- F(LazyRecompile, 1, 1) \
- F(ParallelRecompile, 1, 1) \
- F(ForceParallelRecompile, 1, 1) \
- F(InstallRecompiledCode, 1, 1) \
- F(NotifyDeoptimized, 1, 1) \
- F(NotifyStubFailure, 0, 1) \
- F(NotifyOSR, 0, 1) \
- F(DeoptimizeFunction, 1, 1) \
- F(ClearFunctionTypeFeedback, 1, 1) \
- F(RunningInSimulator, 0, 1) \
- F(OptimizeFunctionOnNextCall, -1, 1) \
- F(GetOptimizationStatus, 1, 1) \
- F(GetOptimizationCount, 1, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
- F(SetNewFunctionAttributes, 1, 1) \
- F(AllocateInNewSpace, 1, 1) \
- F(SetNativeFlag, 1, 1) \
- F(StoreArrayLiteralElement, 5, 1) \
- F(DebugCallbackSupportsStepping, 1, 1) \
- F(DebugPrepareStepInIfStepping, 1, 1) \
- F(FlattenString, 1, 1) \
- \
- /* Array join support */ \
- F(PushIfAbsent, 2, 1) \
- F(ArrayConcat, 1, 1) \
- \
- /* Conversions */ \
- F(ToBool, 1, 1) \
- F(Typeof, 1, 1) \
- \
- F(StringToNumber, 1, 1) \
- F(StringParseInt, 2, 1) \
- F(StringParseFloat, 1, 1) \
- F(StringToLowerCase, 1, 1) \
- F(StringToUpperCase, 1, 1) \
- F(StringSplit, 3, 1) \
- F(CharFromCode, 1, 1) \
- F(URIEscape, 1, 1) \
- F(URIUnescape, 1, 1) \
- \
- F(NumberToString, 1, 1) \
- F(NumberToStringSkipCache, 1, 1) \
- F(NumberToInteger, 1, 1) \
- F(NumberToIntegerMapMinusZero, 1, 1) \
- F(NumberToJSUint32, 1, 1) \
- F(NumberToJSInt32, 1, 1) \
- F(NumberToSmi, 1, 1) \
- F(AllocateHeapNumber, 0, 1) \
- \
- /* Arithmetic operations */ \
- F(NumberAdd, 2, 1) \
- F(NumberSub, 2, 1) \
- F(NumberMul, 2, 1) \
- F(NumberDiv, 2, 1) \
- F(NumberMod, 2, 1) \
- F(NumberUnaryMinus, 1, 1) \
- F(NumberAlloc, 0, 1) \
- \
- F(StringAdd, 2, 1) \
- F(StringBuilderConcat, 3, 1) \
- F(StringBuilderJoin, 3, 1) \
- F(SparseJoinWithSeparator, 3, 1) \
- \
- /* Bit operations */ \
- F(NumberOr, 2, 1) \
- F(NumberAnd, 2, 1) \
- F(NumberXor, 2, 1) \
- F(NumberNot, 1, 1) \
- \
- F(NumberShl, 2, 1) \
- F(NumberShr, 2, 1) \
- F(NumberSar, 2, 1) \
- \
- /* Comparisons */ \
- F(NumberEquals, 2, 1) \
- F(StringEquals, 2, 1) \
- F(UserObjectEquals, 2, 1) \
- \
- F(NumberCompare, 3, 1) \
- F(SmiLexicographicCompare, 2, 1) \
- F(StringCompare, 2, 1) \
- \
- /* Math */ \
- F(Math_acos, 1, 1) \
- F(Math_asin, 1, 1) \
- F(Math_atan, 1, 1) \
- F(Math_atan2, 2, 1) \
- F(Math_ceil, 1, 1) \
- F(Math_cos, 1, 1) \
- F(Math_exp, 1, 1) \
- F(Math_floor, 1, 1) \
- F(Math_log, 1, 1) \
- F(Math_pow, 2, 1) \
- F(Math_pow_cfunction, 2, 1) \
- F(RoundNumber, 1, 1) \
- F(Math_sin, 1, 1) \
- F(Math_sqrt, 1, 1) \
- F(Math_tan, 1, 1) \
- \
- /* Regular expressions */ \
- F(RegExpCompile, 3, 1) \
- F(RegExpExec, 4, 1) \
- F(RegExpExecMultiple, 4, 1) \
- F(RegExpInitializeObject, 5, 1) \
- F(RegExpConstructResult, 3, 1) \
- \
- /* JSON */ \
- F(ParseJson, 1, 1) \
- F(BasicJSONStringify, 1, 1) \
- F(QuoteJSONString, 1, 1) \
- F(QuoteJSONStringComma, 1, 1) \
- F(QuoteJSONStringArray, 1, 1) \
- \
- /* Strings */ \
- F(StringCharCodeAt, 2, 1) \
- F(StringIndexOf, 3, 1) \
- F(StringLastIndexOf, 3, 1) \
- F(StringLocaleCompare, 2, 1) \
- F(SubString, 3, 1) \
- F(StringReplaceGlobalRegExpWithString, 4, 1) \
- F(StringReplaceOneCharWithString, 3, 1) \
- F(StringMatch, 3, 1) \
- F(StringTrim, 3, 1) \
- F(StringToArray, 2, 1) \
- F(NewStringWrapper, 1, 1) \
- F(NewString, 2, 1) \
- F(TruncateString, 2, 1) \
- \
- /* Numbers */ \
- F(NumberToRadixString, 2, 1) \
- F(NumberToFixed, 2, 1) \
- F(NumberToExponential, 2, 1) \
- F(NumberToPrecision, 2, 1)
-
-#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
- /* Reflection */ \
- F(FunctionSetInstanceClassName, 2, 1) \
- F(FunctionSetLength, 2, 1) \
- F(FunctionSetPrototype, 2, 1) \
- F(FunctionSetReadOnlyPrototype, 1, 1) \
- F(FunctionGetName, 1, 1) \
- F(FunctionSetName, 2, 1) \
- F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
- F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
- F(FunctionBindArguments, 4, 1) \
- F(BoundFunctionGetBindings, 1, 1) \
- F(FunctionRemovePrototype, 1, 1) \
- F(FunctionGetSourceCode, 1, 1) \
- F(FunctionGetScript, 1, 1) \
- F(FunctionGetScriptSourcePosition, 1, 1) \
- F(FunctionGetPositionForOffset, 2, 1) \
- F(FunctionIsAPIFunction, 1, 1) \
- F(FunctionIsBuiltin, 1, 1) \
- F(GetScript, 1, 1) \
- F(CollectStackTrace, 3, 1) \
- F(MarkOneShotGetter, 1, 1) \
- F(GetOverflowedStackTrace, 1, 1) \
- F(SetOverflowedStackTrace, 2, 1) \
- F(GetV8Version, 0, 1) \
- \
- F(ClassOf, 1, 1) \
- F(SetCode, 2, 1) \
- F(SetExpectedNumberOfProperties, 2, 1) \
- \
- F(CreateApiFunction, 1, 1) \
- F(IsTemplate, 1, 1) \
- F(GetTemplateField, 2, 1) \
- F(DisableAccessChecks, 1, 1) \
- F(EnableAccessChecks, 1, 1) \
- \
- /* Dates */ \
- F(DateCurrentTime, 0, 1) \
- F(DateParseString, 2, 1) \
- F(DateLocalTimezone, 1, 1) \
- F(DateToUTC, 1, 1) \
- F(DateMakeDay, 2, 1) \
- F(DateSetValue, 3, 1) \
- \
- /* Numbers */ \
- \
- /* Globals */ \
- F(CompileString, 1, 1) \
- F(GlobalPrint, 1, 1) \
- \
- /* Eval */ \
- F(GlobalReceiver, 1, 1) \
- F(ResolvePossiblyDirectEval, 6, 2) \
- \
- F(SetProperty, -1 /* 4 or 5 */, 1) \
- F(DefineOrRedefineDataProperty, 4, 1) \
- F(DefineOrRedefineAccessorProperty, 5, 1) \
- F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
- F(GetDataProperty, 2, 1) \
- \
- /* Arrays */ \
- F(RemoveArrayHoles, 2, 1) \
- F(GetArrayKeys, 2, 1) \
- F(MoveArrayContents, 2, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- \
- /* Getters and Setters */ \
- F(LookupAccessor, 3, 1) \
- \
- /* Literals */ \
- F(MaterializeRegExpLiteral, 4, 1)\
- F(CreateObjectLiteral, 4, 1) \
- F(CreateObjectLiteralShallow, 4, 1) \
- F(CreateArrayLiteral, 3, 1) \
- F(CreateArrayLiteralShallow, 3, 1) \
- \
- /* Harmony modules */ \
- F(IsJSModule, 1, 1) \
- \
- /* Harmony symbols */ \
- F(CreateSymbol, 0, 1) \
- \
- /* Harmony proxies */ \
- F(CreateJSProxy, 2, 1) \
- F(CreateJSFunctionProxy, 4, 1) \
- F(IsJSProxy, 1, 1) \
- F(IsJSFunctionProxy, 1, 1) \
- F(GetHandler, 1, 1) \
- F(GetCallTrap, 1, 1) \
- F(GetConstructTrap, 1, 1) \
- F(Fix, 1, 1) \
- \
- /* Harmony sets */ \
- F(SetInitialize, 1, 1) \
- F(SetAdd, 2, 1) \
- F(SetHas, 2, 1) \
- F(SetDelete, 2, 1) \
- F(SetGetSize, 1, 1) \
- \
- /* Harmony maps */ \
- F(MapInitialize, 1, 1) \
- F(MapGet, 2, 1) \
- F(MapHas, 2, 1) \
- F(MapDelete, 2, 1) \
- F(MapSet, 3, 1) \
- F(MapGetSize, 1, 1) \
- \
- /* Harmony weakmaps */ \
- F(WeakMapInitialize, 1, 1) \
- F(WeakMapGet, 2, 1) \
- F(WeakMapHas, 2, 1) \
- F(WeakMapDelete, 2, 1) \
- F(WeakMapSet, 3, 1) \
- \
- /* Harmony observe */ \
- F(IsObserved, 1, 1) \
- F(SetIsObserved, 2, 1) \
- F(SetObserverDeliveryPending, 0, 1) \
- F(GetObservationState, 0, 1) \
- F(ObservationWeakMapCreate, 0, 1) \
- F(UnwrapGlobalProxy, 1, 1) \
- \
- /* Statements */ \
- F(NewClosure, 3, 1) \
- F(NewObject, 1, 1) \
- F(NewObjectFromBound, 1, 1) \
- F(FinalizeInstanceSize, 1, 1) \
- F(Throw, 1, 1) \
- F(ReThrow, 1, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowNotDateError, 0, 1) \
- F(StackGuard, 0, 1) \
- F(Interrupt, 0, 1) \
- F(PromoteScheduledException, 0, 1) \
- \
- /* Contexts */ \
- F(NewGlobalContext, 2, 1) \
- F(NewFunctionContext, 1, 1) \
- F(PushWithContext, 2, 1) \
- F(PushCatchContext, 3, 1) \
- F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 2, 1) \
- F(DeleteContextSlot, 2, 1) \
- F(LoadContextSlot, 2, 2) \
- F(LoadContextSlotNoReferenceError, 2, 2) \
- F(StoreContextSlot, 4, 1) \
- \
- /* Declarations and initialization */ \
- F(DeclareGlobals, 3, 1) \
- F(DeclareModules, 1, 1) \
- F(DeclareContextSlot, 4, 1) \
- F(InitializeVarGlobal, -1 /* 3 or 4 */, 1) \
- F(InitializeConstGlobal, 3, 1) \
- F(InitializeConstContextSlot, 3, 1) \
- F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- \
- /* Debugging */ \
- F(DebugPrint, 1, 1) \
- F(DebugTrace, 0, 1) \
- F(TraceEnter, 0, 1) \
- F(TraceExit, 1, 1) \
- F(Abort, 2, 1) \
- /* Logging */ \
- F(Log, 2, 1) \
- /* ES5 */ \
- F(LocalKeys, 1, 1) \
- /* Cache suport */ \
- F(GetFromCache, 2, 1) \
- \
- /* Message objects */ \
- F(MessageGetStartPosition, 1, 1) \
- F(MessageGetScript, 1, 1) \
- \
- /* Pseudo functions - handled as macros by parser */ \
- F(IS_VAR, 1, 1) \
- \
- /* expose boolean functions from objects-inl.h */ \
- F(HasFastSmiElements, 1, 1) \
- F(HasFastSmiOrObjectElements, 1, 1) \
- F(HasFastObjectElements, 1, 1) \
- F(HasFastDoubleElements, 1, 1) \
- F(HasFastHoleyElements, 1, 1) \
- F(HasDictionaryElements, 1, 1) \
- F(HasExternalPixelElements, 1, 1) \
- F(HasExternalArrayElements, 1, 1) \
- F(HasExternalByteElements, 1, 1) \
- F(HasExternalUnsignedByteElements, 1, 1) \
- F(HasExternalShortElements, 1, 1) \
- F(HasExternalUnsignedShortElements, 1, 1) \
- F(HasExternalIntElements, 1, 1) \
- F(HasExternalUnsignedIntElements, 1, 1) \
- F(HasExternalFloatElements, 1, 1) \
- F(HasExternalDoubleElements, 1, 1) \
- F(HasFastProperties, 1, 1) \
- F(TransitionElementsKind, 2, 1) \
- F(TransitionElementsSmiToDouble, 1, 1) \
- F(TransitionElementsDoubleToObject, 1, 1) \
- F(HaveSameMap, 2, 1) \
- /* profiler */ \
- F(ProfilerResume, 0, 1) \
- F(ProfilerPause, 0, 1)
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
- /* Debugger support*/ \
- F(DebugBreak, 0, 1) \
- F(SetDebugEventListener, 2, 1) \
- F(Break, 0, 1) \
- F(DebugGetPropertyDetails, 2, 1) \
- F(DebugGetProperty, 2, 1) \
- F(DebugPropertyTypeFromDetails, 1, 1) \
- F(DebugPropertyAttributesFromDetails, 1, 1) \
- F(DebugPropertyIndexFromDetails, 1, 1) \
- F(DebugNamedInterceptorPropertyValue, 2, 1) \
- F(DebugIndexedInterceptorElementValue, 2, 1) \
- F(CheckExecutionState, 1, 1) \
- F(GetFrameCount, 1, 1) \
- F(GetFrameDetails, 2, 1) \
- F(GetScopeCount, 2, 1) \
- F(GetScopeDetails, 4, 1) \
- F(GetFunctionScopeCount, 1, 1) \
- F(GetFunctionScopeDetails, 2, 1) \
- F(SetScopeVariableValue, 6, 1) \
- F(DebugPrintScopes, 0, 1) \
- F(GetThreadCount, 1, 1) \
- F(GetThreadDetails, 2, 1) \
- F(SetDisableBreak, 1, 1) \
- F(GetBreakLocations, 1, 1) \
- F(AllowBreakPointRelocation, 0, 1) \
- F(SetFunctionBreakPoint, 3, 1) \
- F(SetScriptBreakPoint, 3, 1) \
- F(ClearBreakPoint, 1, 1) \
- F(ChangeBreakOnException, 2, 1) \
- F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 3, 1) \
- F(ClearStepping, 0, 1) \
- F(DebugEvaluate, 6, 1) \
- F(DebugEvaluateGlobal, 4, 1) \
- F(DebugGetLoadedScripts, 0, 1) \
- F(DebugReferencedBy, 3, 1) \
- F(DebugConstructedBy, 2, 1) \
- F(DebugGetPrototype, 1, 1) \
- F(DebugSetScriptSource, 2, 1) \
- F(SystemBreak, 0, 1) \
- F(DebugDisassembleFunction, 1, 1) \
- F(DebugDisassembleConstructor, 1, 1) \
- F(FunctionGetInferredName, 1, 1) \
- F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
- F(LiveEditGatherCompileInfo, 2, 1) \
- F(LiveEditReplaceScript, 3, 1) \
- F(LiveEditReplaceFunctionCode, 2, 1) \
- F(LiveEditFunctionSourceUpdated, 1, 1) \
- F(LiveEditFunctionSetScript, 2, 1) \
- F(LiveEditReplaceRefToNestedFunction, 3, 1) \
- F(LiveEditPatchFunctionPositions, 2, 1) \
- F(LiveEditCheckAndDropActivations, 2, 1) \
- F(LiveEditCompareStrings, 2, 1) \
- F(LiveEditRestartFrame, 2, 1) \
- F(GetFunctionCodePositionFromSource, 2, 1) \
- F(ExecuteInDebugContext, 2, 1) \
- \
- F(SetFlags, 1, 1) \
- F(CollectGarbage, 1, 1) \
- F(GetHeapUsage, 0, 1) \
-
-#else
-#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
-#endif
-
-#ifdef DEBUG
-#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
- /* Testing */ \
- F(ListNatives, 0, 1)
-#else
-#define RUNTIME_FUNCTION_LIST_DEBUG(F)
-#endif
-
-// ----------------------------------------------------------------------------
-// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
-// either directly by id (via the code generator), or indirectly
-// via a native call by name (from within JS code).
-
-#define RUNTIME_FUNCTION_LIST(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
- RUNTIME_FUNCTION_LIST_DEBUG(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
-
-// ----------------------------------------------------------------------------
-// INLINE_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code.
-// Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_FUNCTION_LIST(F) \
- F(IsSmi, 1, 1) \
- F(IsNonNegativeSmi, 1, 1) \
- F(IsSymbol, 1, 1) \
- F(IsArray, 1, 1) \
- F(IsRegExp, 1, 1) \
- F(IsConstructCall, 0, 1) \
- F(CallFunction, -1 /* receiver + n args + function */, 1) \
- F(ArgumentsLength, 0, 1) \
- F(Arguments, 1, 1) \
- F(ValueOf, 1, 1) \
- F(SetValueOf, 2, 1) \
- F(DateField, 2 /* date object, field index */, 1) \
- F(StringCharFromCode, 1, 1) \
- F(StringCharAt, 2, 1) \
- F(OneByteSeqStringSetChar, 3, 1) \
- F(TwoByteSeqStringSetChar, 3, 1) \
- F(ObjectEquals, 2, 1) \
- F(RandomHeapNumber, 0, 1) \
- F(IsObject, 1, 1) \
- F(IsFunction, 1, 1) \
- F(IsUndetectableObject, 1, 1) \
- F(IsSpecObject, 1, 1) \
- F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
- F(MathPow, 2, 1) \
- F(MathSin, 1, 1) \
- F(MathCos, 1, 1) \
- F(MathTan, 1, 1) \
- F(MathSqrt, 1, 1) \
- F(MathLog, 1, 1) \
- F(IsRegExpEquivalent, 2, 1) \
- F(HasCachedArrayIndex, 1, 1) \
- F(GetCachedArrayIndex, 1, 1) \
- F(FastAsciiArrayJoin, 2, 1)
-
-
-// ----------------------------------------------------------------------------
-// INLINE_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code that also have
-// a corresponding runtime function, that is called for slow cases.
-// Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_RUNTIME_FUNCTION_LIST(F) \
- F(ClassOf, 1, 1) \
- F(StringCharCodeAt, 2, 1) \
- F(Log, 3, 1) \
- F(StringAdd, 2, 1) \
- F(SubString, 3, 1) \
- F(StringCompare, 2, 1) \
- F(RegExpExec, 4, 1) \
- F(RegExpConstructResult, 3, 1) \
- F(GetFromCache, 2, 1) \
- F(NumberToString, 1, 1)
-
-
-//---------------------------------------------------------------------------
-// Runtime provides access to all C++ runtime functions.
-
-class RuntimeState {
- public:
- StaticResource<ConsStringIteratorOp>* string_iterator() {
- return &string_iterator_;
- }
- unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
- return &to_upper_mapping_;
- }
- unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
- return &to_lower_mapping_;
- }
- ConsStringIteratorOp* string_iterator_compare_x() {
- return &string_iterator_compare_x_;
- }
- ConsStringIteratorOp* string_iterator_compare_y() {
- return &string_iterator_compare_y_;
- }
- ConsStringIteratorOp* string_locale_compare_it1() {
- return &string_locale_compare_it1_;
- }
- ConsStringIteratorOp* string_locale_compare_it2() {
- return &string_locale_compare_it2_;
- }
-
- private:
- RuntimeState() {}
- // Non-reentrant string buffer for efficient general use in the runtime.
- StaticResource<ConsStringIteratorOp> string_iterator_;
- unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
- unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
- ConsStringIteratorOp string_iterator_compare_x_;
- ConsStringIteratorOp string_iterator_compare_y_;
- ConsStringIteratorOp string_locale_compare_it1_;
- ConsStringIteratorOp string_locale_compare_it2_;
-
- friend class Isolate;
- friend class Runtime;
-
- DISALLOW_COPY_AND_ASSIGN(RuntimeState);
-};
-
-
-class Runtime : public AllStatic {
- public:
- enum FunctionId {
-#define F(name, nargs, ressize) k##name,
- RUNTIME_FUNCTION_LIST(F)
-#undef F
-#define F(name, nargs, ressize) kInline##name,
- INLINE_FUNCTION_LIST(F)
- INLINE_RUNTIME_FUNCTION_LIST(F)
-#undef F
- kNumFunctions,
- kFirstInlineFunction = kInlineIsSmi
- };
-
- enum IntrinsicType {
- RUNTIME,
- INLINE
- };
-
- // Intrinsic function descriptor.
- struct Function {
- FunctionId function_id;
- IntrinsicType intrinsic_type;
- // The JS name of the function.
- const char* name;
-
- // The C++ (native) entry point. NULL if the function is inlined.
- byte* entry;
-
- // The number of arguments expected. nargs is -1 if the function takes
- // a variable number of arguments.
- int nargs;
- // Size of result. Most functions return a single pointer, size 1.
- int result_size;
- };
-
- static const int kNotFound = -1;
-
- // Add internalized strings for all the intrinsic function names to a
- // StringDictionary.
- // Returns failure if an allocation fails. In this case, it must be
- // retried with a new, empty StringDictionary, not with the same one.
- // Alternatively, heap initialization can be completely restarted.
- MUST_USE_RESULT static MaybeObject* InitializeIntrinsicFunctionNames(
- Heap* heap, Object* dictionary);
-
- // Get the intrinsic function with the given name, which must be internalized.
- static const Function* FunctionForName(Handle<String> name);
-
- // Get the intrinsic function with the given FunctionId.
- static const Function* FunctionForId(FunctionId id);
-
- // General-purpose helper functions for runtime system.
- static int StringMatch(Isolate* isolate,
- Handle<String> sub,
- Handle<String> pat,
- int index);
-
- static bool IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch);
-
- // TODO(1240886): Some of the following methods are *not* handle safe, but
- // accept handle arguments. This seems fragile.
-
- // Support getting the characters in a string using [] notation as
- // in Firefox/SpiderMonkey, Safari and Opera.
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
-
- MUST_USE_RESULT static MaybeObject* SetObjectProperty(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty(
- Isolate* isolate,
- Handle<JSObject> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr);
-
- MUST_USE_RESULT static MaybeObject* ForceDeleteObjectProperty(
- Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Object> key);
-
- MUST_USE_RESULT static MaybeObject* GetObjectProperty(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key);
-
- // Helper functions used stubs.
- static void PerformGC(Object* result);
-
- // Used in runtime.cc and hydrogen's VisitArrayLiteral.
- static Handle<Object> CreateArrayLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> elements);
-};
-
-
-//---------------------------------------------------------------------------
-// Constants used by interface to runtime functions.
-
-class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
-class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
-class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
-
-} } // namespace v8::internal
-
-#endif // V8_RUNTIME_H_
diff --git a/src/3rdparty/v8/src/runtime.js b/src/3rdparty/v8/src/runtime.js
deleted file mode 100644
index 6b48734..0000000
--- a/src/3rdparty/v8/src/runtime.js
+++ /dev/null
@@ -1,667 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This files contains runtime support implemented in JavaScript.
-
-// CAUTION: Some of the functions specified in this file are called
-// directly from compiled code. These are the functions with names in
-// ALL CAPS. The compiled code passes the first argument in 'this' and
-// it does not push the function onto the stack. This means that you
-// cannot use contexts in all these functions.
-
-
-/* -----------------------------------
- - - - C o m p a r i s o n - - -
- -----------------------------------
-*/
-
-// The following declarations are shared with other native JS files.
-// They are all declared at this one spot to avoid redeclaration errors.
-var $Object = global.Object;
-var $Array = global.Array;
-var $String = global.String;
-var $Number = global.Number;
-var $Function = global.Function;
-var $Boolean = global.Boolean;
-var $NaN = %GetRootNaN();
-var builtins = this;
-
-// ECMA-262 Section 11.9.3.
-function EQUALS(y) {
- if (IS_STRING(this) && IS_STRING(y)) return %StringEquals(this, y);
- var x = this;
-
- while (true) {
- if (IS_NUMBER(x)) {
- while (true) {
- if (IS_NUMBER(y)) return %NumberEquals(x, y);
- if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
- if (!IS_SPEC_OBJECT(y)) {
- // String or boolean.
- return %NumberEquals(x, %ToNumber(y));
- }
- y = %ToPrimitive(y, NO_HINT);
- }
- } else if (IS_STRING(x)) {
- while (true) {
- if (IS_STRING(y)) return %StringEquals(x, y);
- if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
- if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
- if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
- y = %ToPrimitive(y, NO_HINT);
- }
- } else if (IS_BOOLEAN(x)) {
- if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
- if (IS_NULL_OR_UNDEFINED(y)) return 1;
- if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
- if (IS_STRING(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
- // y is object.
- x = %ToNumber(x);
- y = %ToPrimitive(y, NO_HINT);
- } else if (IS_NULL_OR_UNDEFINED(x)) {
- return IS_NULL_OR_UNDEFINED(y) ? 0 : 1;
- } else {
- // x is an object.
- if (IS_SPEC_OBJECT(y)) {
- return %_ObjectEquals(x, y) ? 0 : 1;
- }
- if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
- if (IS_BOOLEAN(y)) y = %ToNumber(y);
- x = %ToPrimitive(x, NO_HINT);
- }
- }
-}
-
-// ECMA-262, section 11.9.4, page 56.
-function STRICT_EQUALS(x) {
- if (IS_STRING(this)) {
- if (!IS_STRING(x)) return 1; // not equal
- return %StringEquals(this, x);
- }
-
- if (IS_NUMBER(this)) {
- if (!IS_NUMBER(x)) return 1; // not equal
- return %NumberEquals(this, x);
- }
-
- // If anything else gets here, we just do simple identity check.
- // Objects (including functions), null, undefined and booleans were
- // checked in the CompareStub, so there should be nothing left.
- return %_ObjectEquals(this, x) ? 0 : 1;
-}
-
-
-// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
-// the result when either (or both) the operands are NaN.
-function COMPARE(x, ncr) {
- var left;
- var right;
- // Fast cases for string, numbers and undefined compares.
- if (IS_STRING(this)) {
- if (IS_STRING(x)) return %_StringCompare(this, x);
- if (IS_UNDEFINED(x)) return ncr;
- left = this;
- } else if (IS_NUMBER(this)) {
- if (IS_NUMBER(x)) return %NumberCompare(this, x, ncr);
- if (IS_UNDEFINED(x)) return ncr;
- left = this;
- } else if (IS_UNDEFINED(this)) {
- if (!IS_UNDEFINED(x)) {
- %ToPrimitive(x, NUMBER_HINT);
- }
- return ncr;
- } else if (IS_UNDEFINED(x)) {
- %ToPrimitive(this, NUMBER_HINT);
- return ncr;
- } else {
- left = %ToPrimitive(this, NUMBER_HINT);
- }
-
- right = %ToPrimitive(x, NUMBER_HINT);
- if (IS_STRING(left) && IS_STRING(right)) {
- return %_StringCompare(left, right);
- } else {
- var left_number = %ToNumber(left);
- var right_number = %ToNumber(right);
- if (NUMBER_IS_NAN(left_number) || NUMBER_IS_NAN(right_number)) return ncr;
- return %NumberCompare(left_number, right_number, ncr);
- }
-}
-
-
-
-/* -----------------------------------
- - - - A r i t h m e t i c - - -
- -----------------------------------
-*/
-
-// ECMA-262, section 11.6.1, page 50.
-function ADD(x) {
- // Fast case: Check for number operands and do the addition.
- if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
- if (IS_STRING(this) && IS_STRING(x)) return %_StringAdd(this, x);
-
- // Default implementation.
- var a = %ToPrimitive(this, NO_HINT);
- var b = %ToPrimitive(x, NO_HINT);
-
- if (IS_STRING(a)) {
- return %_StringAdd(a, %ToString(b));
- } else if (IS_STRING(b)) {
- return %_StringAdd(%NonStringToString(a), b);
- } else {
- return %NumberAdd(%ToNumber(a), %ToNumber(b));
- }
-}
-
-
-// Left operand (this) is already a string.
-function STRING_ADD_LEFT(y) {
- if (!IS_STRING(y)) {
- if (IS_STRING_WRAPPER(y) && %_IsStringWrapperSafeForDefaultValueOf(y)) {
- y = %_ValueOf(y);
- } else {
- y = IS_NUMBER(y)
- ? %_NumberToString(y)
- : %ToString(%ToPrimitive(y, NO_HINT));
- }
- }
- return %_StringAdd(this, y);
-}
-
-
-// Right operand (y) is already a string.
-function STRING_ADD_RIGHT(y) {
- var x = this;
- if (!IS_STRING(x)) {
- if (IS_STRING_WRAPPER(x) && %_IsStringWrapperSafeForDefaultValueOf(x)) {
- x = %_ValueOf(x);
- } else {
- x = IS_NUMBER(x)
- ? %_NumberToString(x)
- : %ToString(%ToPrimitive(x, NO_HINT));
- }
- }
- return %_StringAdd(x, y);
-}
-
-
-// ECMA-262, section 11.6.2, page 50.
-function SUB(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberSub(x, y);
-}
-
-
-// ECMA-262, section 11.5.1, page 48.
-function MUL(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberMul(x, y);
-}
-
-
-// ECMA-262, section 11.5.2, page 49.
-function DIV(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberDiv(x, y);
-}
-
-
-// ECMA-262, section 11.5.3, page 49.
-function MOD(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberMod(x, y);
-}
-
-
-
-/* -------------------------------------------
- - - - B i t o p e r a t i o n s - - -
- -------------------------------------------
-*/
-
-// ECMA-262, section 11.10, page 57.
-function BIT_OR(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberOr(x, y);
-}
-
-
-// ECMA-262, section 11.10, page 57.
-function BIT_AND(y) {
- var x;
- if (IS_NUMBER(this)) {
- x = this;
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- } else {
- x = %NonNumberToNumber(this);
- // Make sure to convert the right operand to a number before
- // bailing out in the fast case, but after converting the
- // left operand. This ensures that valueOf methods on the right
- // operand are always executed.
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- // Optimize for the case where we end up AND'ing a value
- // that doesn't convert to a number. This is common in
- // certain benchmarks.
- if (NUMBER_IS_NAN(x)) return 0;
- }
- return %NumberAnd(x, y);
-}
-
-
-// ECMA-262, section 11.10, page 57.
-function BIT_XOR(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberXor(x, y);
-}
-
-
-// ECMA-262, section 11.4.7, page 47.
-function UNARY_MINUS() {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- return %NumberUnaryMinus(x);
-}
-
-
-// ECMA-262, section 11.4.8, page 48.
-function BIT_NOT() {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- return %NumberNot(x);
-}
-
-
-// ECMA-262, section 11.7.1, page 51.
-function SHL(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberShl(x, y);
-}
-
-
-// ECMA-262, section 11.7.2, page 51.
-function SAR(y) {
- var x;
- if (IS_NUMBER(this)) {
- x = this;
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- } else {
- x = %NonNumberToNumber(this);
- // Make sure to convert the right operand to a number before
- // bailing out in the fast case, but after converting the
- // left operand. This ensures that valueOf methods on the right
- // operand are always executed.
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- // Optimize for the case where we end up shifting a value
- // that doesn't convert to a number. This is common in
- // certain benchmarks.
- if (NUMBER_IS_NAN(x)) return 0;
- }
- return %NumberSar(x, y);
-}
-
-
-// ECMA-262, section 11.7.3, page 52.
-function SHR(y) {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %NonNumberToNumber(y);
- return %NumberShr(x, y);
-}
-
-
-
-/* -----------------------------
- - - - H e l p e r s - - -
- -----------------------------
-*/
-
-// ECMA-262, section 11.4.1, page 46.
-function DELETE(key, strict) {
- return %DeleteProperty(%ToObject(this), %ToString(key), strict);
-}
-
-
-// ECMA-262, section 11.8.7, page 54.
-function IN(x) {
- if (!IS_SPEC_OBJECT(x)) {
- throw %MakeTypeError('invalid_in_operator_use', [this, x]);
- }
- return %_IsNonNegativeSmi(this) ?
- %HasElement(x, this) : %HasProperty(x, %ToString(this));
-}
-
-
-// ECMA-262, section 11.8.6, page 54. To make the implementation more
-// efficient, the return value should be zero if the 'this' is an
-// instance of F, and non-zero if not. This makes it possible to avoid
-// an expensive ToBoolean conversion in the generated code.
-function INSTANCE_OF(F) {
- var V = this;
- if (!IS_SPEC_FUNCTION(F)) {
- throw %MakeTypeError('instanceof_function_expected', [V]);
- }
-
- // If V is not an object, return false.
- if (!IS_SPEC_OBJECT(V)) {
- return 1;
- }
-
- // Check if function is bound, if so, get [[BoundFunction]] from it
- // and use that instead of F.
- var bindings = %BoundFunctionGetBindings(F);
- if (bindings) {
- F = bindings[kBoundFunctionIndex]; // Always a non-bound function.
- }
- // Get the prototype of F; if it is not an object, throw an error.
- var O = F.prototype;
- if (!IS_SPEC_OBJECT(O)) {
- throw %MakeTypeError('instanceof_nonobject_proto', [O]);
- }
-
- // Return whether or not O is in the prototype chain of V.
- return %IsInPrototypeChain(O, V) ? 0 : 1;
-}
-
-
-// Filter a given key against an object by checking if the object
-// has a property with the given key; return the key as a string if
-// it has. Otherwise returns 0 (smi). Used in for-in statements.
-function FILTER_KEY(key) {
- var string = %ToString(key);
- if (%HasProperty(this, string)) return string;
- return 0;
-}
-
-
-function CALL_NON_FUNCTION() {
- var delegate = %GetFunctionDelegate(this);
- if (!IS_FUNCTION(delegate)) {
- throw %MakeTypeError('called_non_callable', [typeof this]);
- }
- return %Apply(delegate, this, arguments, 0, %_ArgumentsLength());
-}
-
-
-function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
- var delegate = %GetConstructorDelegate(this);
- if (!IS_FUNCTION(delegate)) {
- throw %MakeTypeError('called_non_callable', [typeof this]);
- }
- return %Apply(delegate, this, arguments, 0, %_ArgumentsLength());
-}
-
-
-function CALL_FUNCTION_PROXY() {
- var arity = %_ArgumentsLength() - 1;
- var proxy = %_Arguments(arity); // The proxy comes in as an additional arg.
- var trap = %GetCallTrap(proxy);
- return %Apply(trap, this, arguments, 0, arity);
-}
-
-
-function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR() {
- var proxy = this;
- var trap = %GetConstructTrap(proxy);
- return %Apply(trap, this, arguments, 0, %_ArgumentsLength());
-}
-
-
-function APPLY_PREPARE(args) {
- var length;
- // First check whether length is a positive Smi and args is an
- // array. This is the fast case. If this fails, we do the slow case
- // that takes care of more eventualities.
- if (IS_ARRAY(args)) {
- length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < 0x800000 &&
- IS_SPEC_FUNCTION(this)) {
- return length;
- }
- }
-
- length = (args == null) ? 0 : %ToUint32(args.length);
-
- // We can handle any number of apply arguments if the stack is
- // big enough, but sanity check the value to avoid overflow when
- // multiplying with pointer size.
- if (length > 0x800000) {
- throw %MakeRangeError('stack_overflow', []);
- }
-
- if (!IS_SPEC_FUNCTION(this)) {
- throw %MakeTypeError('apply_non_function',
- [ %ToString(this), typeof this ]);
- }
-
- // Make sure the arguments list has the right type.
- if (args != null && !IS_SPEC_OBJECT(args)) {
- throw %MakeTypeError('apply_wrong_args', []);
- }
-
- // Return the length which is the number of arguments to copy to the
- // stack. It is guaranteed to be a small integer at this point.
- return length;
-}
-
-
-function APPLY_OVERFLOW(length) {
- throw %MakeRangeError('stack_overflow', []);
-}
-
-
-// Convert the receiver to an object - forward to ToObject.
-function TO_OBJECT() {
- return %ToObject(this);
-}
-
-
-// Convert the receiver to a number - forward to ToNumber.
-function TO_NUMBER() {
- return %ToNumber(this);
-}
-
-
-// Convert the receiver to a string - forward to ToString.
-function TO_STRING() {
- return %ToString(this);
-}
-
-
-/* -------------------------------------
- - - - C o n v e r s i o n s - - -
- -------------------------------------
-*/
-
-// ECMA-262, section 9.1, page 30. Use null/undefined for no hint,
-// (1) for number hint, and (2) for string hint.
-function ToPrimitive(x, hint) {
- // Fast case check.
- if (IS_STRING(x)) return x;
- // Normal behavior.
- if (!IS_SPEC_OBJECT(x)) return x;
- if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
- return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
-}
-
-
-// ECMA-262, section 9.2, page 30
-function ToBoolean(x) {
- if (IS_BOOLEAN(x)) return x;
- if (IS_STRING(x)) return x.length != 0;
- if (x == null) return false;
- if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
- return true;
-}
-
-
-// ECMA-262, section 9.3, page 31.
-function ToNumber(x) {
- if (IS_NUMBER(x)) return x;
- if (IS_STRING(x)) {
- return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
- : %StringToNumber(x);
- }
- if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
- return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
-}
-
-function NonNumberToNumber(x) {
- if (IS_STRING(x)) {
- return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
- : %StringToNumber(x);
- }
- if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
- return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
-}
-
-
-// ECMA-262, section 9.8, page 35.
-function ToString(x) {
- if (IS_STRING(x)) return x;
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- if (IS_UNDEFINED(x)) return 'undefined';
- return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
-}
-
-function NonStringToString(x) {
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- if (IS_UNDEFINED(x)) return 'undefined';
- return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
-}
-
-
-// ECMA-262, section 9.9, page 36.
-function ToObject(x) {
- if (IS_STRING(x)) return new $String(x);
- if (IS_NUMBER(x)) return new $Number(x);
- if (IS_BOOLEAN(x)) return new $Boolean(x);
- if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
- throw %MakeTypeError('null_to_object', []);
- }
- return x;
-}
-
-
-// ECMA-262, section 9.4, page 34.
-function ToInteger(x) {
- if (%_IsSmi(x)) return x;
- return %NumberToInteger(ToNumber(x));
-}
-
-
-// ECMA-262, section 9.6, page 34.
-function ToUint32(x) {
- if (%_IsSmi(x) && x >= 0) return x;
- return %NumberToJSUint32(ToNumber(x));
-}
-
-
-// ECMA-262, section 9.5, page 34
-function ToInt32(x) {
- if (%_IsSmi(x)) return x;
- return %NumberToJSInt32(ToNumber(x));
-}
-
-
-// ES5, section 9.12
-function SameValue(x, y) {
- if (typeof x != typeof y) return false;
- if (IS_NUMBER(x)) {
- if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
- // x is +0 and y is -0 or vice versa.
- if (x === 0 && y === 0 && (1 / x) != (1 / y)) return false;
- }
- return x === y;
-}
-
-
-/* ---------------------------------
- - - - U t i l i t i e s - - -
- ---------------------------------
-*/
-
-// Returns if the given x is a primitive value - not an object or a
-// function.
-function IsPrimitive(x) {
- // Even though the type of null is "object", null is still
- // considered a primitive value. IS_SPEC_OBJECT handles this correctly
- // (i.e., it will return false if x is null).
- return !IS_SPEC_OBJECT(x);
-}
-
-
-// ECMA-262, section 8.6.2.6, page 28.
-function DefaultNumber(x) {
- var valueOf = x.valueOf;
- if (IS_SPEC_FUNCTION(valueOf)) {
- var v = %_CallFunction(x, valueOf);
- if (%IsPrimitive(v)) return v;
- }
-
- var toString = x.toString;
- if (IS_SPEC_FUNCTION(toString)) {
- var s = %_CallFunction(x, toString);
- if (%IsPrimitive(s)) return s;
- }
-
- throw %MakeTypeError('cannot_convert_to_primitive', []);
-}
-
-
-// ECMA-262, section 8.6.2.6, page 28.
-function DefaultString(x) {
- var toString = x.toString;
- if (IS_SPEC_FUNCTION(toString)) {
- var s = %_CallFunction(x, toString);
- if (%IsPrimitive(s)) return s;
- }
-
- var valueOf = x.valueOf;
- if (IS_SPEC_FUNCTION(valueOf)) {
- var v = %_CallFunction(x, valueOf);
- if (%IsPrimitive(v)) return v;
- }
-
- throw %MakeTypeError('cannot_convert_to_primitive', []);
-}
-
-
-// NOTE: Setting the prototype for Array must take place as early as
-// possible due to code generation for array literals. When
-// generating code for a array literal a boilerplate array is created
-// that is cloned when running the code. It is essential that the
-// boilerplate gets the right prototype.
-%FunctionSetPrototype($Array, new $Array(0));
diff --git a/src/3rdparty/v8/src/safepoint-table.cc b/src/3rdparty/v8/src/safepoint-table.cc
deleted file mode 100644
index 9e42304..0000000
--- a/src/3rdparty/v8/src/safepoint-table.cc
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "safepoint-table.h"
-
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool SafepointEntry::HasRegisters() const {
- ASSERT(is_valid());
- ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
- for (int i = 0; i < num_reg_bytes; i++) {
- if (bits_[i] != SafepointTable::kNoRegisters) return true;
- }
- return false;
-}
-
-
-bool SafepointEntry::HasRegisterAt(int reg_index) const {
- ASSERT(is_valid());
- ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
- int byte_index = reg_index >> kBitsPerByteLog2;
- int bit_index = reg_index & (kBitsPerByte - 1);
- return (bits_[byte_index] & (1 << bit_index)) != 0;
-}
-
-
-SafepointTable::SafepointTable(Code* code) {
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION ||
- code->kind() == Code::COMPILED_STUB);
- code_ = code;
- Address header = code->instruction_start() + code->safepoint_table_offset();
- length_ = Memory::uint32_at(header + kLengthOffset);
- entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
- pc_and_deoptimization_indexes_ = header + kHeaderSize;
- entries_ = pc_and_deoptimization_indexes_ +
- (length_ * kPcAndDeoptimizationIndexSize);
- ASSERT(entry_size_ > 0);
- STATIC_ASSERT(SafepointEntry::DeoptimizationIndexField::kMax ==
- Safepoint::kNoDeoptimizationIndex);
-}
-
-
-SafepointEntry SafepointTable::FindEntry(Address pc) const {
- unsigned pc_offset = static_cast<unsigned>(pc - code_->instruction_start());
- for (unsigned i = 0; i < length(); i++) {
- // TODO(kasperl): Replace the linear search with binary search.
- if (GetPcOffset(i) == pc_offset) return GetEntry(i);
- }
- return SafepointEntry();
-}
-
-
-void SafepointTable::PrintEntry(unsigned index) const {
- disasm::NameConverter converter;
- SafepointEntry entry = GetEntry(index);
- uint8_t* bits = entry.bits();
-
- // Print the stack slot bits.
- if (entry_size_ > 0) {
- ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
- int last = entry_size_ - 1;
- for (int i = first; i < last; i++) PrintBits(bits[i], kBitsPerByte);
- int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
- PrintBits(bits[last], last_bits);
-
- // Print the registers (if any).
- if (!entry.HasRegisters()) return;
- for (int j = 0; j < kNumSafepointRegisters; j++) {
- if (entry.HasRegisterAt(j)) {
- PrintF(" | %s", converter.NameOfCPURegister(j));
- }
- }
- }
-}
-
-
-void SafepointTable::PrintBits(uint8_t byte, int digits) {
- ASSERT(digits >= 0 && digits <= kBitsPerByte);
- for (int i = 0; i < digits; i++) {
- PrintF("%c", ((byte & (1 << i)) == 0) ? '0' : '1');
- }
-}
-
-
-void Safepoint::DefinePointerRegister(Register reg, Zone* zone) {
- registers_->Add(reg.code(), zone);
-}
-
-
-Safepoint SafepointTableBuilder::DefineSafepoint(
- Assembler* assembler,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- ASSERT(arguments >= 0);
- DeoptimizationInfo info;
- info.pc = assembler->pc_offset();
- info.arguments = arguments;
- info.has_doubles = (kind & Safepoint::kWithDoubles);
- deoptimization_info_.Add(info, zone_);
- deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex, zone_);
- if (deopt_mode == Safepoint::kNoLazyDeopt) {
- last_lazy_safepoint_ = deopt_index_list_.length();
- }
- indexes_.Add(new(zone_) ZoneList<int>(8, zone_), zone_);
- registers_.Add((kind & Safepoint::kWithRegisters)
- ? new(zone_) ZoneList<int>(4, zone_)
- : NULL,
- zone_);
- return Safepoint(indexes_.last(), registers_.last());
-}
-
-
-void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
- while (last_lazy_safepoint_ < deopt_index_list_.length()) {
- deopt_index_list_[last_lazy_safepoint_++] = index;
- }
-}
-
-unsigned SafepointTableBuilder::GetCodeOffset() const {
- ASSERT(emitted_);
- return offset_;
-}
-
-
-void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
- // Make sure the safepoint table is properly aligned. Pad with nops.
- assembler->Align(kIntSize);
- assembler->RecordComment(";;; Safepoint table.");
- offset_ = assembler->pc_offset();
-
- // Take the register bits into account.
- bits_per_entry += kNumSafepointRegisters;
-
- // Compute the number of bytes per safepoint entry.
- int bytes_per_entry =
- RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
-
- // Emit the table header.
- int length = deoptimization_info_.length();
- assembler->dd(length);
- assembler->dd(bytes_per_entry);
-
- // Emit sorted table of pc offsets together with deoptimization indexes.
- for (int i = 0; i < length; i++) {
- assembler->dd(deoptimization_info_[i].pc);
- assembler->dd(EncodeExceptPC(deoptimization_info_[i],
- deopt_index_list_[i]));
- }
-
- // Emit table of bitmaps.
- ZoneList<uint8_t> bits(bytes_per_entry, zone_);
- for (int i = 0; i < length; i++) {
- ZoneList<int>* indexes = indexes_[i];
- ZoneList<int>* registers = registers_[i];
- bits.Clear();
- bits.AddBlock(0, bytes_per_entry, zone_);
-
- // Run through the registers (if any).
- ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- if (registers == NULL) {
- const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
- for (int j = 0; j < num_reg_bytes; j++) {
- bits[j] = SafepointTable::kNoRegisters;
- }
- } else {
- for (int j = 0; j < registers->length(); j++) {
- int index = registers->at(j);
- ASSERT(index >= 0 && index < kNumSafepointRegisters);
- int byte_index = index >> kBitsPerByteLog2;
- int bit_index = index & (kBitsPerByte - 1);
- bits[byte_index] |= (1 << bit_index);
- }
- }
-
- // Run through the indexes and build a bitmap.
- for (int j = 0; j < indexes->length(); j++) {
- int index = bits_per_entry - 1 - indexes->at(j);
- int byte_index = index >> kBitsPerByteLog2;
- int bit_index = index & (kBitsPerByte - 1);
- bits[byte_index] |= (1U << bit_index);
- }
-
- // Emit the bitmap for the current entry.
- for (int k = 0; k < bytes_per_entry; k++) {
- assembler->db(bits[k]);
- }
- }
- emitted_ = true;
-}
-
-
-uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info,
- unsigned index) {
- uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
- encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
- encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
- return encoding;
-}
-
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/safepoint-table.h b/src/3rdparty/v8/src/safepoint-table.h
deleted file mode 100644
index 307d948..0000000
--- a/src/3rdparty/v8/src/safepoint-table.h
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SAFEPOINT_TABLE_H_
-#define V8_SAFEPOINT_TABLE_H_
-
-#include "allocation.h"
-#include "heap.h"
-#include "v8memory.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-struct Register;
-
-class SafepointEntry BASE_EMBEDDED {
- public:
- SafepointEntry() : info_(0), bits_(NULL) {}
-
- SafepointEntry(unsigned info, uint8_t* bits) : info_(info), bits_(bits) {
- ASSERT(is_valid());
- }
-
- bool is_valid() const { return bits_ != NULL; }
-
- bool Equals(const SafepointEntry& other) const {
- return info_ == other.info_ && bits_ == other.bits_;
- }
-
- void Reset() {
- info_ = 0;
- bits_ = NULL;
- }
-
- int deoptimization_index() const {
- ASSERT(is_valid());
- return DeoptimizationIndexField::decode(info_);
- }
-
- static const int kArgumentsFieldBits = 3;
- static const int kSaveDoublesFieldBits = 1;
- static const int kDeoptIndexBits =
- 32 - kArgumentsFieldBits - kSaveDoublesFieldBits;
- class DeoptimizationIndexField:
- public BitField<int, 0, kDeoptIndexBits> {}; // NOLINT
- class ArgumentsField:
- public BitField<unsigned,
- kDeoptIndexBits,
- kArgumentsFieldBits> {}; // NOLINT
- class SaveDoublesField:
- public BitField<bool,
- kDeoptIndexBits + kArgumentsFieldBits,
- kSaveDoublesFieldBits> { }; // NOLINT
-
- int argument_count() const {
- ASSERT(is_valid());
- return ArgumentsField::decode(info_);
- }
-
- bool has_doubles() const {
- ASSERT(is_valid());
- return SaveDoublesField::decode(info_);
- }
-
- uint8_t* bits() {
- ASSERT(is_valid());
- return bits_;
- }
-
- bool HasRegisters() const;
- bool HasRegisterAt(int reg_index) const;
-
- private:
- unsigned info_;
- uint8_t* bits_;
-};
-
-
-class SafepointTable BASE_EMBEDDED {
- public:
- explicit SafepointTable(Code* code);
-
- int size() const {
- return kHeaderSize +
- (length_ * (kPcAndDeoptimizationIndexSize + entry_size_)); }
- unsigned length() const { return length_; }
- unsigned entry_size() const { return entry_size_; }
-
- unsigned GetPcOffset(unsigned index) const {
- ASSERT(index < length_);
- return Memory::uint32_at(GetPcOffsetLocation(index));
- }
-
- SafepointEntry GetEntry(unsigned index) const {
- ASSERT(index < length_);
- unsigned info = Memory::uint32_at(GetInfoLocation(index));
- uint8_t* bits = &Memory::uint8_at(entries_ + (index * entry_size_));
- return SafepointEntry(info, bits);
- }
-
- // Returns the entry for the given pc.
- SafepointEntry FindEntry(Address pc) const;
-
- void PrintEntry(unsigned index) const;
-
- private:
- static const uint8_t kNoRegisters = 0xFF;
-
- static const int kLengthOffset = 0;
- static const int kEntrySizeOffset = kLengthOffset + kIntSize;
- static const int kHeaderSize = kEntrySizeOffset + kIntSize;
-
- static const int kPcSize = kIntSize;
- static const int kDeoptimizationIndexSize = kIntSize;
- static const int kPcAndDeoptimizationIndexSize =
- kPcSize + kDeoptimizationIndexSize;
-
- Address GetPcOffsetLocation(unsigned index) const {
- return pc_and_deoptimization_indexes_ +
- (index * kPcAndDeoptimizationIndexSize);
- }
-
- Address GetInfoLocation(unsigned index) const {
- return GetPcOffsetLocation(index) + kPcSize;
- }
-
- static void PrintBits(uint8_t byte, int digits);
-
- AssertNoAllocation no_allocation_;
- Code* code_;
- unsigned length_;
- unsigned entry_size_;
-
- Address pc_and_deoptimization_indexes_;
- Address entries_;
-
- friend class SafepointTableBuilder;
- friend class SafepointEntry;
-
- DISALLOW_COPY_AND_ASSIGN(SafepointTable);
-};
-
-
-class Safepoint BASE_EMBEDDED {
- public:
- typedef enum {
- kSimple = 0,
- kWithRegisters = 1 << 0,
- kWithDoubles = 1 << 1,
- kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
- } Kind;
-
- enum DeoptMode {
- kNoLazyDeopt,
- kLazyDeopt
- };
-
- static const int kNoDeoptimizationIndex =
- (1 << (SafepointEntry::kDeoptIndexBits)) - 1;
-
- void DefinePointerSlot(int index, Zone* zone) { indexes_->Add(index, zone); }
- void DefinePointerRegister(Register reg, Zone* zone);
-
- private:
- Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
- indexes_(indexes), registers_(registers) { }
- ZoneList<int>* indexes_;
- ZoneList<int>* registers_;
-
- friend class SafepointTableBuilder;
-};
-
-
-class SafepointTableBuilder BASE_EMBEDDED {
- public:
- explicit SafepointTableBuilder(Zone* zone)
- : deoptimization_info_(32, zone),
- deopt_index_list_(32, zone),
- indexes_(32, zone),
- registers_(32, zone),
- emitted_(false),
- last_lazy_safepoint_(0),
- zone_(zone) { }
-
- // Get the offset of the emitted safepoint table in the code.
- unsigned GetCodeOffset() const;
-
- // Define a new safepoint for the current position in the body.
- Safepoint DefineSafepoint(Assembler* assembler,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
-
- // Record deoptimization index for lazy deoptimization for the last
- // outstanding safepoints.
- void RecordLazyDeoptimizationIndex(int index);
-
- // Emit the safepoint table after the body. The number of bits per
- // entry must be enough to hold all the pointer indexes.
- void Emit(Assembler* assembler, int bits_per_entry);
-
-
- private:
- struct DeoptimizationInfo {
- unsigned pc;
- unsigned arguments;
- bool has_doubles;
- };
-
- uint32_t EncodeExceptPC(const DeoptimizationInfo& info, unsigned index);
-
- ZoneList<DeoptimizationInfo> deoptimization_info_;
- ZoneList<unsigned> deopt_index_list_;
- ZoneList<ZoneList<int>*> indexes_;
- ZoneList<ZoneList<int>*> registers_;
-
- unsigned offset_;
- bool emitted_;
- int last_lazy_safepoint_;
-
- Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SAFEPOINT_TABLE_H_
diff --git a/src/3rdparty/v8/src/scanner-character-streams.cc b/src/3rdparty/v8/src/scanner-character-streams.cc
deleted file mode 100644
index 56b9f03..0000000
--- a/src/3rdparty/v8/src/scanner-character-streams.cc
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "scanner-character-streams.h"
-
-#include "handles.h"
-#include "unicode-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// BufferedUtf16CharacterStreams
-
-BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
- : Utf16CharacterStream(),
- pushback_limit_(NULL) {
- // Initialize buffer as being empty. First read will fill the buffer.
- buffer_cursor_ = buffer_;
- buffer_end_ = buffer_;
-}
-
-BufferedUtf16CharacterStream::~BufferedUtf16CharacterStream() { }
-
-void BufferedUtf16CharacterStream::PushBack(uc32 character) {
- if (character == kEndOfInput) {
- pos_--;
- return;
- }
- if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
- // buffer_ is writable, buffer_cursor_ is const pointer.
- buffer_[--buffer_cursor_ - buffer_] = static_cast<uc16>(character);
- pos_--;
- return;
- }
- SlowPushBack(static_cast<uc16>(character));
-}
-
-
-void BufferedUtf16CharacterStream::SlowPushBack(uc16 character) {
- // In pushback mode, the end of the buffer contains pushback,
- // and the start of the buffer (from buffer start to pushback_limit_)
- // contains valid data that comes just after the pushback.
- // We NULL the pushback_limit_ if pushing all the way back to the
- // start of the buffer.
-
- if (pushback_limit_ == NULL) {
- // Enter pushback mode.
- pushback_limit_ = buffer_end_;
- buffer_end_ = buffer_ + kBufferSize;
- buffer_cursor_ = buffer_end_;
- }
- // Ensure that there is room for at least one pushback.
- ASSERT(buffer_cursor_ > buffer_);
- ASSERT(pos_ > 0);
- buffer_[--buffer_cursor_ - buffer_] = character;
- if (buffer_cursor_ == buffer_) {
- pushback_limit_ = NULL;
- } else if (buffer_cursor_ < pushback_limit_) {
- pushback_limit_ = buffer_cursor_;
- }
- pos_--;
-}
-
-
-bool BufferedUtf16CharacterStream::ReadBlock() {
- buffer_cursor_ = buffer_;
- if (pushback_limit_ != NULL) {
- // Leave pushback mode.
- buffer_end_ = pushback_limit_;
- pushback_limit_ = NULL;
- // If there were any valid characters left at the
- // start of the buffer, use those.
- if (buffer_cursor_ < buffer_end_) return true;
- // Otherwise read a new block.
- }
- unsigned length = FillBuffer(pos_, kBufferSize);
- buffer_end_ = buffer_ + length;
- return length > 0;
-}
-
-
-unsigned BufferedUtf16CharacterStream::SlowSeekForward(unsigned delta) {
- // Leave pushback mode (i.e., ignore that there might be valid data
- // in the buffer before the pushback_limit_ point).
- pushback_limit_ = NULL;
- return BufferSeekForward(delta);
-}
-
-// ----------------------------------------------------------------------------
-// GenericStringUtf16CharacterStream
-
-
-GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
- Handle<String> data,
- unsigned start_position,
- unsigned end_position)
- : string_(data),
- length_(end_position) {
- ASSERT(end_position >= start_position);
- buffer_cursor_ = buffer_;
- buffer_end_ = buffer_;
- pos_ = start_position;
-}
-
-
-GenericStringUtf16CharacterStream::~GenericStringUtf16CharacterStream() { }
-
-
-unsigned GenericStringUtf16CharacterStream::BufferSeekForward(unsigned delta) {
- unsigned old_pos = pos_;
- pos_ = Min(pos_ + delta, length_);
- ReadBlock();
- return pos_ - old_pos;
-}
-
-
-unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos,
- unsigned length) {
- if (from_pos >= length_) return 0;
- if (from_pos + length > length_) {
- length = length_ - from_pos;
- }
- String::WriteToFlat<uc16>(*string_, buffer_, from_pos, from_pos + length);
- return length;
-}
-
-
-// ----------------------------------------------------------------------------
-// Utf8ToUtf16CharacterStream
-Utf8ToUtf16CharacterStream::Utf8ToUtf16CharacterStream(const byte* data,
- unsigned length)
- : BufferedUtf16CharacterStream(),
- raw_data_(data),
- raw_data_length_(length),
- raw_data_pos_(0),
- raw_character_position_(0) {
- ReadBlock();
-}
-
-
-Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
-
-
-unsigned Utf8ToUtf16CharacterStream::BufferSeekForward(unsigned delta) {
- unsigned old_pos = pos_;
- unsigned target_pos = pos_ + delta;
- SetRawPosition(target_pos);
- pos_ = raw_character_position_;
- ReadBlock();
- return pos_ - old_pos;
-}
-
-
-unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position,
- unsigned length) {
- static const unibrow::uchar kMaxUtf16Character = 0xffff;
- SetRawPosition(char_position);
- if (raw_character_position_ != char_position) {
- // char_position was not a valid position in the stream (hit the end
- // while spooling to it).
- return 0u;
- }
- unsigned i = 0;
- while (i < length - 1) {
- if (raw_data_pos_ == raw_data_length_) break;
- unibrow::uchar c = raw_data_[raw_data_pos_];
- if (c <= unibrow::Utf8::kMaxOneByteChar) {
- raw_data_pos_++;
- } else {
- c = unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
- raw_data_length_ - raw_data_pos_,
- &raw_data_pos_);
- }
- if (c > kMaxUtf16Character) {
- buffer_[i++] = unibrow::Utf16::LeadSurrogate(c);
- buffer_[i++] = unibrow::Utf16::TrailSurrogate(c);
- } else {
- buffer_[i++] = static_cast<uc16>(c);
- }
- }
- raw_character_position_ = char_position + i;
- return i;
-}
-
-
-static const byte kUtf8MultiByteMask = 0xC0;
-static const byte kUtf8MultiByteCharStart = 0xC0;
-static const byte kUtf8MultiByteCharFollower = 0x80;
-
-
-#ifdef DEBUG
-static bool IsUtf8MultiCharacterStart(byte first_byte) {
- return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
-}
-#endif
-
-
-static bool IsUtf8MultiCharacterFollower(byte later_byte) {
- return (later_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharFollower;
-}
-
-
-// Move the cursor back to point at the preceding UTF-8 character start
-// in the buffer.
-static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
- byte character = buffer[--*cursor];
- if (character > unibrow::Utf8::kMaxOneByteChar) {
- ASSERT(IsUtf8MultiCharacterFollower(character));
- // Last byte of a multi-byte character encoding. Step backwards until
- // pointing to the first byte of the encoding, recognized by having the
- // top two bits set.
- while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { }
- ASSERT(IsUtf8MultiCharacterStart(buffer[*cursor]));
- }
-}
-
-
-// Move the cursor forward to point at the next following UTF-8 character start
-// in the buffer.
-static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
- byte character = buffer[(*cursor)++];
- if (character > unibrow::Utf8::kMaxOneByteChar) {
- // First character of a multi-byte character encoding.
- // The number of most-significant one-bits determines the length of the
- // encoding:
- // 110..... - (0xCx, 0xDx) one additional byte (minimum).
- // 1110.... - (0xEx) two additional bytes.
- // 11110... - (0xFx) three additional bytes (maximum).
- ASSERT(IsUtf8MultiCharacterStart(character));
- // Additional bytes is:
- // 1 if value in range 0xC0 .. 0xDF.
- // 2 if value in range 0xE0 .. 0xEF.
- // 3 if value in range 0xF0 .. 0xF7.
- // Encode that in a single value.
- unsigned additional_bytes =
- ((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
- *cursor += additional_bytes;
- ASSERT(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
- }
-}
-
-
-// This can't set a raw position between two surrogate pairs, since there
-// is no position in the UTF8 stream that corresponds to that. This assumes
-// that the surrogate pair is correctly coded as a 4 byte UTF-8 sequence. If
-// it is illegally coded as two 3 byte sequences then there is no problem here.
-void Utf8ToUtf16CharacterStream::SetRawPosition(unsigned target_position) {
- if (raw_character_position_ > target_position) {
- // Spool backwards in utf8 buffer.
- do {
- int old_pos = raw_data_pos_;
- Utf8CharacterBack(raw_data_, &raw_data_pos_);
- raw_character_position_--;
- ASSERT(old_pos - raw_data_pos_ <= 4);
- // Step back over both code units for surrogate pairs.
- if (old_pos - raw_data_pos_ == 4) raw_character_position_--;
- } while (raw_character_position_ > target_position);
- // No surrogate pair splitting.
- ASSERT(raw_character_position_ == target_position);
- return;
- }
- // Spool forwards in the utf8 buffer.
- while (raw_character_position_ < target_position) {
- if (raw_data_pos_ == raw_data_length_) return;
- int old_pos = raw_data_pos_;
- Utf8CharacterForward(raw_data_, &raw_data_pos_);
- raw_character_position_++;
- ASSERT(raw_data_pos_ - old_pos <= 4);
- if (raw_data_pos_ - old_pos == 4) raw_character_position_++;
- }
- // No surrogate pair splitting.
- ASSERT(raw_character_position_ == target_position);
-}
-
-
-// ----------------------------------------------------------------------------
-// ExternalTwoByteStringUtf16CharacterStream
-
-ExternalTwoByteStringUtf16CharacterStream::
- ~ExternalTwoByteStringUtf16CharacterStream() { }
-
-
-ExternalTwoByteStringUtf16CharacterStream
- ::ExternalTwoByteStringUtf16CharacterStream(
- Handle<ExternalTwoByteString> data,
- int start_position,
- int end_position)
- : Utf16CharacterStream(),
- source_(data),
- raw_data_(data->GetTwoByteData(start_position)) {
- buffer_cursor_ = raw_data_,
- buffer_end_ = raw_data_ + (end_position - start_position);
- pos_ = start_position;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scanner-character-streams.h b/src/3rdparty/v8/src/scanner-character-streams.h
deleted file mode 100644
index 319ee8f..0000000
--- a/src/3rdparty/v8/src/scanner-character-streams.h
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SCANNER_CHARACTER_STREAMS_H_
-#define V8_SCANNER_CHARACTER_STREAMS_H_
-
-#include "scanner.h"
-
-namespace v8 {
-namespace internal {
-
-// A buffered character stream based on a random access character
-// source (ReadBlock can be called with pos_ pointing to any position,
-// even positions before the current).
-class BufferedUtf16CharacterStream: public Utf16CharacterStream {
- public:
- BufferedUtf16CharacterStream();
- virtual ~BufferedUtf16CharacterStream();
-
- virtual void PushBack(uc32 character);
-
- protected:
- static const unsigned kBufferSize = 512;
- static const unsigned kPushBackStepSize = 16;
-
- virtual unsigned SlowSeekForward(unsigned delta);
- virtual bool ReadBlock();
- virtual void SlowPushBack(uc16 character);
-
- virtual unsigned BufferSeekForward(unsigned delta) = 0;
- virtual unsigned FillBuffer(unsigned position, unsigned length) = 0;
-
- const uc16* pushback_limit_;
- uc16 buffer_[kBufferSize];
-};
-
-
-// Generic string stream.
-class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
- public:
- GenericStringUtf16CharacterStream(Handle<String> data,
- unsigned start_position,
- unsigned end_position);
- virtual ~GenericStringUtf16CharacterStream();
-
- protected:
- virtual unsigned BufferSeekForward(unsigned delta);
- virtual unsigned FillBuffer(unsigned position, unsigned length);
-
- Handle<String> string_;
- unsigned start_position_;
- unsigned length_;
-};
-
-
-// Utf16 stream based on a literal UTF-8 string.
-class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
- public:
- Utf8ToUtf16CharacterStream(const byte* data, unsigned length);
- virtual ~Utf8ToUtf16CharacterStream();
-
- protected:
- virtual unsigned BufferSeekForward(unsigned delta);
- virtual unsigned FillBuffer(unsigned char_position, unsigned length);
- void SetRawPosition(unsigned char_position);
-
- const byte* raw_data_;
- unsigned raw_data_length_; // Measured in bytes, not characters.
- unsigned raw_data_pos_;
- // The character position of the character at raw_data[raw_data_pos_].
- // Not necessarily the same as pos_.
- unsigned raw_character_position_;
-};
-
-
-// UTF16 buffer to read characters from an external string.
-class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
- public:
- ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
- int start_position,
- int end_position);
- virtual ~ExternalTwoByteStringUtf16CharacterStream();
-
- virtual void PushBack(uc32 character) {
- ASSERT(buffer_cursor_ > raw_data_);
- buffer_cursor_--;
- pos_--;
- }
-
- protected:
- virtual unsigned SlowSeekForward(unsigned delta) {
- // Fast case always handles seeking.
- return 0;
- }
- virtual bool ReadBlock() {
- // Entire string is read at start.
- return false;
- }
- Handle<ExternalTwoByteString> source_;
- const uc16* raw_data_; // Pointer to the actual array of characters.
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SCANNER_CHARACTER_STREAMS_H_
diff --git a/src/3rdparty/v8/src/scanner.cc b/src/3rdparty/v8/src/scanner.cc
deleted file mode 100755
index 61ee1a4..0000000
--- a/src/3rdparty/v8/src/scanner.cc
+++ /dev/null
@@ -1,1094 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Features shared by parsing and pre-parsing scanners.
-
-#include "scanner.h"
-
-#include "../include/v8stdint.h"
-#include "char-predicates-inl.h"
-
-#undef CONST
-#undef DELETE
-#undef IN
-#undef VOID
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Scanner
-
-Scanner::Scanner(UnicodeCache* unicode_cache)
- : unicode_cache_(unicode_cache),
- octal_pos_(Location::invalid()),
- harmony_scoping_(false),
- harmony_modules_(false) { }
-
-
-void Scanner::Initialize(Utf16CharacterStream* source) {
- source_ = source;
- // Need to capture identifiers in order to recognize "get" and "set"
- // in object literals.
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
-}
-
-
-uc32 Scanner::ScanHexNumber(int expected_length) {
- ASSERT(expected_length <= 4); // prevent overflow
-
- uc32 digits[4] = { 0, 0, 0, 0 };
- uc32 x = 0;
- for (int i = 0; i < expected_length; i++) {
- digits[i] = c0_;
- int d = HexValue(c0_);
- if (d < 0) {
- // According to ECMA-262, 3rd, 7.8.4, page 18, these hex escapes
- // should be illegal, but other JS VMs just return the
- // non-escaped version of the original character.
-
- // Push back digits that we have advanced past.
- for (int j = i-1; j >= 0; j--) {
- PushBack(digits[j]);
- }
- return -1;
- }
- x = x * 16 + d;
- Advance();
- }
-
- return x;
-}
-
-
-// Ensure that tokens can be stored in a byte.
-STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
-
-// Table of one-character tokens, by character (0x00..0x7f only).
-static const byte one_char_tokens[] = {
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::LPAREN, // 0x28
- Token::RPAREN, // 0x29
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::COMMA, // 0x2c
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::COLON, // 0x3a
- Token::SEMICOLON, // 0x3b
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::CONDITIONAL, // 0x3f
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::LBRACK, // 0x5b
- Token::ILLEGAL,
- Token::RBRACK, // 0x5d
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::LBRACE, // 0x7b
- Token::ILLEGAL,
- Token::RBRACE, // 0x7d
- Token::BIT_NOT, // 0x7e
- Token::ILLEGAL
-};
-
-
-Token::Value Scanner::Next() {
- current_ = next_;
- has_line_terminator_before_next_ = false;
- has_multiline_comment_before_next_ = false;
- if (static_cast<unsigned>(c0_) <= 0x7f) {
- Token::Value token = static_cast<Token::Value>(one_char_tokens[c0_]);
- if (token != Token::ILLEGAL) {
- int pos = source_pos();
- next_.token = token;
- next_.location.beg_pos = pos;
- next_.location.end_pos = pos + 1;
- Advance();
- return current_.token;
- }
- }
- Scan();
- return current_.token;
-}
-
-
-static inline bool IsByteOrderMark(uc32 c) {
- // The Unicode value U+FFFE is guaranteed never to be assigned as a
- // Unicode character; this implies that in a Unicode context the
- // 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
- // character expressed in little-endian byte order (since it could
- // not be a U+FFFE character expressed in big-endian byte
- // order). Nevertheless, we check for it to be compatible with
- // Spidermonkey.
- return c == 0xFEFF || c == 0xFFFE;
-}
-
-
-bool Scanner::SkipWhiteSpace() {
- int start_position = source_pos();
-
- while (true) {
- // We treat byte-order marks (BOMs) as whitespace for better
- // compatibility with Spidermonkey and other JavaScript engines.
- while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
- // IsWhiteSpace() includes line terminators!
- if (unicode_cache_->IsLineTerminator(c0_)) {
- // Ignore line terminators, but remember them. This is necessary
- // for automatic semicolon insertion.
- has_line_terminator_before_next_ = true;
- }
- Advance();
- }
-
- // If there is an HTML comment end '-->' at the beginning of a
- // line (with only whitespace in front of it), we treat the rest
- // of the line as a comment. This is in line with the way
- // SpiderMonkey handles it.
- if (c0_ == '-' && has_line_terminator_before_next_) {
- Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '>') {
- // Treat the rest of the line as a comment.
- SkipSingleLineComment();
- // Continue skipping white space after the comment.
- continue;
- }
- PushBack('-'); // undo Advance()
- }
- PushBack('-'); // undo Advance()
- }
- // Return whether or not we skipped any characters.
- return source_pos() != start_position;
- }
-}
-
-
-Token::Value Scanner::SkipSingleLineComment() {
- Advance();
-
- // The line terminator at the end of the line is not considered
- // to be part of the single-line comment; it is recognized
- // separately by the lexical grammar and becomes part of the
- // stream of input elements for the syntactic grammar (see
- // ECMA-262, section 7.4).
- while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
- Advance();
- }
-
- return Token::WHITESPACE;
-}
-
-
-Token::Value Scanner::SkipMultiLineComment() {
- ASSERT(c0_ == '*');
- Advance();
-
- while (c0_ >= 0) {
- uc32 ch = c0_;
- Advance();
- if (unicode_cache_->IsLineTerminator(ch)) {
- // Following ECMA-262, section 7.4, a comment containing
- // a newline will make the comment count as a line-terminator.
- has_multiline_comment_before_next_ = true;
- }
- // If we have reached the end of the multi-line comment, we
- // consume the '/' and insert a whitespace. This way all
- // multi-line comments are treated as whitespace.
- if (ch == '*' && c0_ == '/') {
- c0_ = ' ';
- return Token::WHITESPACE;
- }
- }
-
- // Unterminated multi-line comment.
- return Token::ILLEGAL;
-}
-
-
-Token::Value Scanner::ScanHtmlComment() {
- // Check for <!-- comments.
- ASSERT(c0_ == '!');
- Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '-') return SkipSingleLineComment();
- PushBack('-'); // undo Advance()
- }
- PushBack('!'); // undo Advance()
- ASSERT(c0_ == '!');
- return Token::LT;
-}
-
-
-void Scanner::Scan() {
- next_.literal_chars = NULL;
- Token::Value token;
- do {
- // Remember the position of the next token
- next_.location.beg_pos = source_pos();
-
- switch (c0_) {
- case ' ':
- case '\t':
- Advance();
- token = Token::WHITESPACE;
- break;
-
- case '\n':
- Advance();
- has_line_terminator_before_next_ = true;
- token = Token::WHITESPACE;
- break;
-
- case '"': case '\'':
- token = ScanString();
- break;
-
- case '<':
- // < <= << <<= <!--
- Advance();
- if (c0_ == '=') {
- token = Select(Token::LTE);
- } else if (c0_ == '<') {
- token = Select('=', Token::ASSIGN_SHL, Token::SHL);
- } else if (c0_ == '!') {
- token = ScanHtmlComment();
- } else {
- token = Token::LT;
- }
- break;
-
- case '>':
- // > >= >> >>= >>> >>>=
- Advance();
- if (c0_ == '=') {
- token = Select(Token::GTE);
- } else if (c0_ == '>') {
- // >> >>= >>> >>>=
- Advance();
- if (c0_ == '=') {
- token = Select(Token::ASSIGN_SAR);
- } else if (c0_ == '>') {
- token = Select('=', Token::ASSIGN_SHR, Token::SHR);
- } else {
- token = Token::SAR;
- }
- } else {
- token = Token::GT;
- }
- break;
-
- case '=':
- // = == ===
- Advance();
- if (c0_ == '=') {
- token = Select('=', Token::EQ_STRICT, Token::EQ);
- } else {
- token = Token::ASSIGN;
- }
- break;
-
- case '!':
- // ! != !==
- Advance();
- if (c0_ == '=') {
- token = Select('=', Token::NE_STRICT, Token::NE);
- } else {
- token = Token::NOT;
- }
- break;
-
- case '+':
- // + ++ +=
- Advance();
- if (c0_ == '+') {
- token = Select(Token::INC);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_ADD);
- } else {
- token = Token::ADD;
- }
- break;
-
- case '-':
- // - -- --> -=
- Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '>' && has_line_terminator_before_next_) {
- // For compatibility with SpiderMonkey, we skip lines that
- // start with an HTML comment end '-->'.
- token = SkipSingleLineComment();
- } else {
- token = Token::DEC;
- }
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_SUB);
- } else {
- token = Token::SUB;
- }
- break;
-
- case '*':
- // * *=
- token = Select('=', Token::ASSIGN_MUL, Token::MUL);
- break;
-
- case '%':
- // % %=
- token = Select('=', Token::ASSIGN_MOD, Token::MOD);
- break;
-
- case '/':
- // / // /* /=
- Advance();
- if (c0_ == '/') {
- token = SkipSingleLineComment();
- } else if (c0_ == '*') {
- token = SkipMultiLineComment();
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_DIV);
- } else {
- token = Token::DIV;
- }
- break;
-
- case '&':
- // & && &=
- Advance();
- if (c0_ == '&') {
- token = Select(Token::AND);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_BIT_AND);
- } else {
- token = Token::BIT_AND;
- }
- break;
-
- case '|':
- // | || |=
- Advance();
- if (c0_ == '|') {
- token = Select(Token::OR);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_BIT_OR);
- } else {
- token = Token::BIT_OR;
- }
- break;
-
- case '^':
- // ^ ^=
- token = Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
- break;
-
- case '.':
- // . Number
- Advance();
- if (IsDecimalDigit(c0_)) {
- token = ScanNumber(true);
- } else {
- token = Token::PERIOD;
- }
- break;
-
- case ':':
- token = Select(Token::COLON);
- break;
-
- case ';':
- token = Select(Token::SEMICOLON);
- break;
-
- case ',':
- token = Select(Token::COMMA);
- break;
-
- case '(':
- token = Select(Token::LPAREN);
- break;
-
- case ')':
- token = Select(Token::RPAREN);
- break;
-
- case '[':
- token = Select(Token::LBRACK);
- break;
-
- case ']':
- token = Select(Token::RBRACK);
- break;
-
- case '{':
- token = Select(Token::LBRACE);
- break;
-
- case '}':
- token = Select(Token::RBRACE);
- break;
-
- case '?':
- token = Select(Token::CONDITIONAL);
- break;
-
- case '~':
- token = Select(Token::BIT_NOT);
- break;
-
- default:
- if (unicode_cache_->IsIdentifierStart(c0_)) {
- token = ScanIdentifierOrKeyword();
- } else if (IsDecimalDigit(c0_)) {
- token = ScanNumber(false);
- } else if (SkipWhiteSpace()) {
- token = Token::WHITESPACE;
- } else if (c0_ < 0) {
- token = Token::EOS;
- } else {
- token = Select(Token::ILLEGAL);
- }
- break;
- }
-
- // Continue scanning for tokens as long as we're just skipping
- // whitespace.
- } while (token == Token::WHITESPACE);
-
- next_.location.end_pos = source_pos();
- next_.token = token;
-}
-
-
-void Scanner::SeekForward(int pos) {
- // After this call, we will have the token at the given position as
- // the "next" token. The "current" token will be invalid.
- if (pos == next_.location.beg_pos) return;
- int current_pos = source_pos();
- ASSERT_EQ(next_.location.end_pos, current_pos);
- // Positions inside the lookahead token aren't supported.
- ASSERT(pos >= current_pos);
- if (pos != current_pos) {
- source_->SeekForward(pos - source_->pos());
- Advance();
- // This function is only called to seek to the location
- // of the end of a function (at the "}" token). It doesn't matter
- // whether there was a line terminator in the part we skip.
- has_line_terminator_before_next_ = false;
- has_multiline_comment_before_next_ = false;
- }
- Scan();
-}
-
-
-bool Scanner::ScanEscape() {
- uc32 c = c0_;
- Advance();
-
- // Skip escaped newlines.
- if (unicode_cache_->IsLineTerminator(c)) {
- // Allow CR+LF newlines in multiline string literals.
- if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
- // Allow LF+CR newlines in multiline string literals.
- if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance();
- return true;
- }
-
- switch (c) {
- case '\'': // fall through
- case '"' : // fall through
- case '\\': break;
- case 'b' : c = '\b'; break;
- case 'f' : c = '\f'; break;
- case 'n' : c = '\n'; break;
- case 'r' : c = '\r'; break;
- case 't' : c = '\t'; break;
- case 'u' : {
- c = ScanHexNumber(4);
- if (c < 0) return false;
- break;
- }
- case 'v' : c = '\v'; break;
- case 'x' : {
- c = ScanHexNumber(2);
- if (c < 0) return false;
- break;
- }
- case '0' : // fall through
- case '1' : // fall through
- case '2' : // fall through
- case '3' : // fall through
- case '4' : // fall through
- case '5' : // fall through
- case '6' : // fall through
- case '7' : c = ScanOctalEscape(c, 2); break;
- }
-
- // According to ECMA-262, section 7.8.4, characters not covered by the
- // above cases should be illegal, but they are commonly handled as
- // non-escaped characters by JS VMs.
- AddLiteralChar(c);
- return true;
-}
-
-
-// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
-// ECMA-262. Other JS VMs support them.
-uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
- uc32 x = c - '0';
- int i = 0;
- for (; i < length; i++) {
- int d = c0_ - '0';
- if (d < 0 || d > 7) break;
- int nx = x * 8 + d;
- if (nx >= 256) break;
- x = nx;
- Advance();
- }
- // Anything except '\0' is an octal escape sequence, illegal in strict mode.
- // Remember the position of octal escape sequences so that an error
- // can be reported later (in strict mode).
- // We don't report the error immediately, because the octal escape can
- // occur before the "use strict" directive.
- if (c != '0' || i > 0) {
- octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
- }
- return x;
-}
-
-
-Token::Value Scanner::ScanString() {
- uc32 quote = c0_;
- Advance(); // consume quote
-
- LiteralScope literal(this);
- while (c0_ != quote && c0_ >= 0
- && !unicode_cache_->IsLineTerminator(c0_)) {
- uc32 c = c0_;
- Advance();
- if (c == '\\') {
- if (c0_ < 0 || !ScanEscape()) return Token::ILLEGAL;
- } else {
- AddLiteralChar(c);
- }
- }
- if (c0_ != quote) return Token::ILLEGAL;
- literal.Complete();
-
- Advance(); // consume quote
- return Token::STRING;
-}
-
-
-void Scanner::ScanDecimalDigits() {
- while (IsDecimalDigit(c0_))
- AddLiteralCharAdvance();
-}
-
-
-Token::Value Scanner::ScanNumber(bool seen_period) {
- ASSERT(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
-
- enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
-
- LiteralScope literal(this);
- if (seen_period) {
- // we have already seen a decimal point of the float
- AddLiteralChar('.');
- ScanDecimalDigits(); // we know we have at least one digit
-
- } else {
- // if the first character is '0' we must check for octals and hex
- if (c0_ == '0') {
- int start_pos = source_pos(); // For reporting octal positions.
- AddLiteralCharAdvance();
-
- // either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
- if (c0_ == 'x' || c0_ == 'X') {
- // hex number
- kind = HEX;
- AddLiteralCharAdvance();
- if (!IsHexDigit(c0_)) {
- // we must have at least one hex digit after 'x'/'X'
- return Token::ILLEGAL;
- }
- while (IsHexDigit(c0_)) {
- AddLiteralCharAdvance();
- }
- } else if ('0' <= c0_ && c0_ <= '7') {
- // (possible) octal number
- kind = OCTAL;
- while (true) {
- if (c0_ == '8' || c0_ == '9') {
- kind = DECIMAL;
- break;
- }
- if (c0_ < '0' || '7' < c0_) {
- // Octal literal finished.
- octal_pos_ = Location(start_pos, source_pos());
- break;
- }
- AddLiteralCharAdvance();
- }
- }
- }
-
- // Parse decimal digits and allow trailing fractional part.
- if (kind == DECIMAL) {
- ScanDecimalDigits(); // optional
- if (c0_ == '.') {
- AddLiteralCharAdvance();
- ScanDecimalDigits(); // optional
- }
- }
- }
-
- // scan exponent, if any
- if (c0_ == 'e' || c0_ == 'E') {
- ASSERT(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
- if (kind == OCTAL) return Token::ILLEGAL; // no exponent for octals allowed
- // scan exponent
- AddLiteralCharAdvance();
- if (c0_ == '+' || c0_ == '-')
- AddLiteralCharAdvance();
- if (!IsDecimalDigit(c0_)) {
- // we must have at least one decimal digit after 'e'/'E'
- return Token::ILLEGAL;
- }
- ScanDecimalDigits();
- }
-
- // The source character immediately following a numeric literal must
- // not be an identifier start or a decimal digit; see ECMA-262
- // section 7.8.3, page 17 (note that we read only one decimal digit
- // if the value is 0).
- if (IsDecimalDigit(c0_) || unicode_cache_->IsIdentifierStart(c0_))
- return Token::ILLEGAL;
-
- literal.Complete();
-
- return Token::NUMBER;
-}
-
-
-uc32 Scanner::ScanIdentifierUnicodeEscape() {
- Advance();
- if (c0_ != 'u') return -1;
- Advance();
- uc32 result = ScanHexNumber(4);
- if (result < 0) PushBack('u');
- return result;
-}
-
-
-// ----------------------------------------------------------------------------
-// Keyword Matcher
-
-#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
- KEYWORD_GROUP('b') \
- KEYWORD("break", Token::BREAK) \
- KEYWORD_GROUP('c') \
- KEYWORD("case", Token::CASE) \
- KEYWORD("catch", Token::CATCH) \
- KEYWORD("class", Token::FUTURE_RESERVED_WORD) \
- KEYWORD("const", Token::CONST) \
- KEYWORD("continue", Token::CONTINUE) \
- KEYWORD_GROUP('d') \
- KEYWORD("debugger", Token::DEBUGGER) \
- KEYWORD("default", Token::DEFAULT) \
- KEYWORD("delete", Token::DELETE) \
- KEYWORD("do", Token::DO) \
- KEYWORD_GROUP('e') \
- KEYWORD("else", Token::ELSE) \
- KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
- KEYWORD("export", harmony_modules \
- ? Token::EXPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("extends", Token::FUTURE_RESERVED_WORD) \
- KEYWORD_GROUP('f') \
- KEYWORD("false", Token::FALSE_LITERAL) \
- KEYWORD("finally", Token::FINALLY) \
- KEYWORD("for", Token::FOR) \
- KEYWORD("function", Token::FUNCTION) \
- KEYWORD_GROUP('i') \
- KEYWORD("if", Token::IF) \
- KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("import", harmony_modules \
- ? Token::IMPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("in", Token::IN) \
- KEYWORD("instanceof", Token::INSTANCEOF) \
- KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('l') \
- KEYWORD("let", harmony_scoping \
- ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('n') \
- KEYWORD("new", Token::NEW) \
- KEYWORD("null", Token::NULL_LITERAL) \
- KEYWORD_GROUP('p') \
- KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('r') \
- KEYWORD("return", Token::RETURN) \
- KEYWORD_GROUP('s') \
- KEYWORD("static", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("super", Token::FUTURE_RESERVED_WORD) \
- KEYWORD("switch", Token::SWITCH) \
- KEYWORD_GROUP('t') \
- KEYWORD("this", Token::THIS) \
- KEYWORD("throw", Token::THROW) \
- KEYWORD("true", Token::TRUE_LITERAL) \
- KEYWORD("try", Token::TRY) \
- KEYWORD("typeof", Token::TYPEOF) \
- KEYWORD_GROUP('v') \
- KEYWORD("var", Token::VAR) \
- KEYWORD("void", Token::VOID) \
- KEYWORD_GROUP('w') \
- KEYWORD("while", Token::WHILE) \
- KEYWORD("with", Token::WITH) \
- KEYWORD_GROUP('y') \
- KEYWORD("yield", Token::FUTURE_STRICT_RESERVED_WORD)
-
-
-static Token::Value KeywordOrIdentifierToken(const char* input,
- int input_length,
- bool harmony_scoping,
- bool harmony_modules) {
- ASSERT(input_length >= 1);
- const int kMinLength = 2;
- const int kMaxLength = 10;
- if (input_length < kMinLength || input_length > kMaxLength) {
- return Token::IDENTIFIER;
- }
- switch (input[0]) {
- default:
-#define KEYWORD_GROUP_CASE(ch) \
- break; \
- case ch:
-#define KEYWORD(keyword, token) \
- { \
- /* 'keyword' is a char array, so sizeof(keyword) is */ \
- /* strlen(keyword) plus 1 for the NUL char. */ \
- const int keyword_length = sizeof(keyword) - 1; \
- STATIC_ASSERT(keyword_length >= kMinLength); \
- STATIC_ASSERT(keyword_length <= kMaxLength); \
- if (input_length == keyword_length && \
- input[1] == keyword[1] && \
- (keyword_length <= 2 || input[2] == keyword[2]) && \
- (keyword_length <= 3 || input[3] == keyword[3]) && \
- (keyword_length <= 4 || input[4] == keyword[4]) && \
- (keyword_length <= 5 || input[5] == keyword[5]) && \
- (keyword_length <= 6 || input[6] == keyword[6]) && \
- (keyword_length <= 7 || input[7] == keyword[7]) && \
- (keyword_length <= 8 || input[8] == keyword[8]) && \
- (keyword_length <= 9 || input[9] == keyword[9])) { \
- return token; \
- } \
- }
- KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
- }
- return Token::IDENTIFIER;
-}
-
-
-Token::Value Scanner::ScanIdentifierOrKeyword() {
- ASSERT(unicode_cache_->IsIdentifierStart(c0_));
- LiteralScope literal(this);
- // Scan identifier start character.
- if (c0_ == '\\') {
- uc32 c = ScanIdentifierUnicodeEscape();
- // Only allow legal identifier start characters.
- if (c < 0 ||
- c == '\\' || // No recursive escapes.
- !unicode_cache_->IsIdentifierStart(c)) {
- return Token::ILLEGAL;
- }
- AddLiteralChar(c);
- return ScanIdentifierSuffix(&literal);
- }
-
- uc32 first_char = c0_;
- Advance();
- AddLiteralChar(first_char);
-
- // Scan the rest of the identifier characters.
- while (unicode_cache_->IsIdentifierPart(c0_)) {
- if (c0_ != '\\') {
- uc32 next_char = c0_;
- Advance();
- AddLiteralChar(next_char);
- continue;
- }
- // Fallthrough if no longer able to complete keyword.
- return ScanIdentifierSuffix(&literal);
- }
-
- literal.Complete();
-
- if (next_.literal_chars->is_ascii()) {
- Vector<const char> chars = next_.literal_chars->ascii_literal();
- return KeywordOrIdentifierToken(chars.start(),
- chars.length(),
- harmony_scoping_,
- harmony_modules_);
- }
-
- return Token::IDENTIFIER;
-}
-
-
-Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
- // Scan the rest of the identifier characters.
- while (unicode_cache_->IsIdentifierPart(c0_)) {
- if (c0_ == '\\') {
- uc32 c = ScanIdentifierUnicodeEscape();
- // Only allow legal identifier part characters.
- if (c < 0 ||
- c == '\\' ||
- !unicode_cache_->IsIdentifierPart(c)) {
- return Token::ILLEGAL;
- }
- AddLiteralChar(c);
- } else {
- AddLiteralChar(c0_);
- Advance();
- }
- }
- literal->Complete();
-
- return Token::IDENTIFIER;
-}
-
-
-bool Scanner::ScanRegExpPattern(bool seen_equal) {
- // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
- bool in_character_class = false;
-
- // Previous token is either '/' or '/=', in the second case, the
- // pattern starts at =.
- next_.location.beg_pos = source_pos() - (seen_equal ? 2 : 1);
- next_.location.end_pos = source_pos() - (seen_equal ? 1 : 0);
-
- // Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
- // the scanner should pass uninterpreted bodies to the RegExp
- // constructor.
- LiteralScope literal(this);
- if (seen_equal) {
- AddLiteralChar('=');
- }
-
- while (c0_ != '/' || in_character_class) {
- if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
- if (c0_ == '\\') { // Escape sequence.
- AddLiteralCharAdvance();
- if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
- AddLiteralCharAdvance();
- // If the escape allows more characters, i.e., \x??, \u????, or \c?,
- // only "safe" characters are allowed (letters, digits, underscore),
- // otherwise the escape isn't valid and the invalid character has
- // its normal meaning. I.e., we can just continue scanning without
- // worrying whether the following characters are part of the escape
- // or not, since any '/', '\\' or '[' is guaranteed to not be part
- // of the escape sequence.
-
- // TODO(896): At some point, parse RegExps more throughly to capture
- // octal esacpes in strict mode.
- } else { // Unescaped character.
- if (c0_ == '[') in_character_class = true;
- if (c0_ == ']') in_character_class = false;
- AddLiteralCharAdvance();
- }
- }
- Advance(); // consume '/'
-
- literal.Complete();
-
- return true;
-}
-
-
-bool Scanner::ScanLiteralUnicodeEscape() {
- ASSERT(c0_ == '\\');
- uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0};
- Advance();
- int i = 1;
- if (c0_ == 'u') {
- i++;
- while (i < 6) {
- Advance();
- if (!IsHexDigit(c0_)) break;
- chars_read[i] = c0_;
- i++;
- }
- }
- if (i < 6) {
- // Incomplete escape. Undo all advances and return false.
- while (i > 0) {
- i--;
- PushBack(chars_read[i]);
- }
- return false;
- }
- // Complete escape. Add all chars to current literal buffer.
- for (int i = 0; i < 6; i++) {
- AddLiteralChar(chars_read[i]);
- }
- return true;
-}
-
-
-bool Scanner::ScanRegExpFlags() {
- // Scan regular expression flags.
- LiteralScope literal(this);
- while (unicode_cache_->IsIdentifierPart(c0_)) {
- if (c0_ != '\\') {
- AddLiteralCharAdvance();
- } else {
- if (!ScanLiteralUnicodeEscape()) {
- break;
- }
- Advance();
- }
- }
- literal.Complete();
-
- next_.location.end_pos = source_pos() - 1;
- return true;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scanner.h b/src/3rdparty/v8/src/scanner.h
deleted file mode 100644
index a454750..0000000
--- a/src/3rdparty/v8/src/scanner.h
+++ /dev/null
@@ -1,570 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Features shared by parsing and pre-parsing scanners.
-
-#ifndef V8_SCANNER_H_
-#define V8_SCANNER_H_
-
-#include "allocation.h"
-#include "char-predicates.h"
-#include "checks.h"
-#include "globals.h"
-#include "token.h"
-#include "unicode-inl.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-
-// General collection of (multi-)bit-flags that can be passed to scanners and
-// parsers to signify their (initial) mode of operation.
-enum ParsingFlags {
- kNoParsingFlags = 0,
- // Embed LanguageMode values in parsing flags, i.e., equivalent to:
- // CLASSIC_MODE = 0,
- // STRICT_MODE,
- // EXTENDED_MODE,
- kLanguageModeMask = 0x03,
- kAllowLazy = 0x04,
- kAllowNativesSyntax = 0x08,
- kAllowModules = 0x10
-};
-
-STATIC_ASSERT((kLanguageModeMask & CLASSIC_MODE) == CLASSIC_MODE);
-STATIC_ASSERT((kLanguageModeMask & STRICT_MODE) == STRICT_MODE);
-STATIC_ASSERT((kLanguageModeMask & EXTENDED_MODE) == EXTENDED_MODE);
-
-
-// Returns the value (0 .. 15) of a hexadecimal character c.
-// If c is not a legal hexadecimal character, returns a value < 0.
-inline int HexValue(uc32 c) {
- c -= '0';
- if (static_cast<unsigned>(c) <= 9) return c;
- c = (c | 0x20) - ('a' - '0'); // detect 0x11..0x16 and 0x31..0x36.
- if (static_cast<unsigned>(c) <= 5) return c + 10;
- return -1;
-}
-
-
-// ---------------------------------------------------------------------
-// Buffered stream of UTF-16 code units, using an internal UTF-16 buffer.
-// A code unit is a 16 bit value representing either a 16 bit code point
-// or one part of a surrogate pair that make a single 21 bit code point.
-
-class Utf16CharacterStream {
- public:
- Utf16CharacterStream() : pos_(0) { }
- virtual ~Utf16CharacterStream() { }
-
- // Returns and advances past the next UTF-16 code unit in the input
- // stream. If there are no more code units, it returns a negative
- // value.
- inline uc32 Advance() {
- if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
- pos_++;
- return static_cast<uc32>(*(buffer_cursor_++));
- }
- // Note: currently the following increment is necessary to avoid a
- // parser problem! The scanner treats the final kEndOfInput as
- // a code unit with a position, and does math relative to that
- // position.
- pos_++;
-
- return kEndOfInput;
- }
-
- // Return the current position in the code unit stream.
- // Starts at zero.
- inline unsigned pos() const { return pos_; }
-
- // Skips forward past the next code_unit_count UTF-16 code units
- // in the input, or until the end of input if that comes sooner.
- // Returns the number of code units actually skipped. If less
- // than code_unit_count,
- inline unsigned SeekForward(unsigned code_unit_count) {
- unsigned buffered_chars =
- static_cast<unsigned>(buffer_end_ - buffer_cursor_);
- if (code_unit_count <= buffered_chars) {
- buffer_cursor_ += code_unit_count;
- pos_ += code_unit_count;
- return code_unit_count;
- }
- return SlowSeekForward(code_unit_count);
- }
-
- // Pushes back the most recently read UTF-16 code unit (or negative
- // value if at end of input), i.e., the value returned by the most recent
- // call to Advance.
- // Must not be used right after calling SeekForward.
- virtual void PushBack(int32_t code_unit) = 0;
-
- protected:
- static const uc32 kEndOfInput = -1;
-
- // Ensures that the buffer_cursor_ points to the code_unit at
- // position pos_ of the input, if possible. If the position
- // is at or after the end of the input, return false. If there
- // are more code_units available, return true.
- virtual bool ReadBlock() = 0;
- virtual unsigned SlowSeekForward(unsigned code_unit_count) = 0;
-
- const uc16* buffer_cursor_;
- const uc16* buffer_end_;
- unsigned pos_;
-};
-
-
-class UnicodeCache {
-// ---------------------------------------------------------------------
-// Caching predicates used by scanners.
- public:
- UnicodeCache() {}
- typedef unibrow::Utf8Decoder<512> Utf8Decoder;
-
- StaticResource<Utf8Decoder>* utf8_decoder() {
- return &utf8_decoder_;
- }
-
- bool IsIdentifierStart(unibrow::uchar c) { return kIsIdentifierStart.get(c); }
- bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
- bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
- bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
-
- private:
- unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
- unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
- unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
- StaticResource<Utf8Decoder> utf8_decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
-};
-
-
-// ----------------------------------------------------------------------------
-// LiteralBuffer - Collector of chars of literals.
-
-class LiteralBuffer {
- public:
- LiteralBuffer() : is_ascii_(true), position_(0), backing_store_() { }
-
- ~LiteralBuffer() {
- if (backing_store_.length() > 0) {
- backing_store_.Dispose();
- }
- }
-
- INLINE(void AddChar(uint32_t code_unit)) {
- if (position_ >= backing_store_.length()) ExpandBuffer();
- if (is_ascii_) {
- if (code_unit <= unibrow::Latin1::kMaxChar) {
- backing_store_[position_] = static_cast<byte>(code_unit);
- position_ += kOneByteSize;
- return;
- }
- ConvertToUtf16();
- }
- ASSERT(code_unit < 0x10000u);
- *reinterpret_cast<uc16*>(&backing_store_[position_]) = code_unit;
- position_ += kUC16Size;
- }
-
- bool is_ascii() { return is_ascii_; }
-
- Vector<const uc16> utf16_literal() {
- ASSERT(!is_ascii_);
- ASSERT((position_ & 0x1) == 0);
- return Vector<const uc16>(
- reinterpret_cast<const uc16*>(backing_store_.start()),
- position_ >> 1);
- }
-
- Vector<const char> ascii_literal() {
- ASSERT(is_ascii_);
- return Vector<const char>(
- reinterpret_cast<const char*>(backing_store_.start()),
- position_);
- }
-
- int length() {
- return is_ascii_ ? position_ : (position_ >> 1);
- }
-
- void Reset() {
- position_ = 0;
- is_ascii_ = true;
- }
-
- private:
- static const int kInitialCapacity = 16;
- static const int kGrowthFactory = 4;
- static const int kMinConversionSlack = 256;
- static const int kMaxGrowth = 1 * MB;
- inline int NewCapacity(int min_capacity) {
- int capacity = Max(min_capacity, backing_store_.length());
- int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
- return new_capacity;
- }
-
- void ExpandBuffer() {
- Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
- memcpy(new_store.start(), backing_store_.start(), position_);
- backing_store_.Dispose();
- backing_store_ = new_store;
- }
-
- void ConvertToUtf16() {
- ASSERT(is_ascii_);
- Vector<byte> new_store;
- int new_content_size = position_ * kUC16Size;
- if (new_content_size >= backing_store_.length()) {
- // Ensure room for all currently read code units as UC16 as well
- // as the code unit about to be stored.
- new_store = Vector<byte>::New(NewCapacity(new_content_size));
- } else {
- new_store = backing_store_;
- }
- uint8_t* src = backing_store_.start();
- uc16* dst = reinterpret_cast<uc16*>(new_store.start());
- for (int i = position_ - 1; i >= 0; i--) {
- dst[i] = src[i];
- }
- if (new_store.start() != backing_store_.start()) {
- backing_store_.Dispose();
- backing_store_ = new_store;
- }
- position_ = new_content_size;
- is_ascii_ = false;
- }
-
- bool is_ascii_;
- int position_;
- Vector<byte> backing_store_;
-
- DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
-};
-
-
-// ----------------------------------------------------------------------------
-// JavaScript Scanner.
-
-class Scanner {
- public:
- // Scoped helper for literal recording. Automatically drops the literal
- // if aborting the scanning before it's complete.
- class LiteralScope {
- public:
- explicit LiteralScope(Scanner* self)
- : scanner_(self), complete_(false) {
- scanner_->StartLiteral();
- }
- ~LiteralScope() {
- if (!complete_) scanner_->DropLiteral();
- }
- void Complete() {
- scanner_->TerminateLiteral();
- complete_ = true;
- }
-
- private:
- Scanner* scanner_;
- bool complete_;
- };
-
- // Representation of an interval of source positions.
- struct Location {
- Location(int b, int e) : beg_pos(b), end_pos(e) { }
- Location() : beg_pos(0), end_pos(0) { }
-
- bool IsValid() const {
- return beg_pos >= 0 && end_pos >= beg_pos;
- }
-
- static Location invalid() { return Location(-1, -1); }
-
- int beg_pos;
- int end_pos;
- };
-
- // -1 is outside of the range of any real source code.
- static const int kNoOctalLocation = -1;
-
- explicit Scanner(UnicodeCache* scanner_contants);
-
- void Initialize(Utf16CharacterStream* source);
-
- // Returns the next token and advances input.
- Token::Value Next();
- // Returns the current token again.
- Token::Value current_token() { return current_.token; }
- // Returns the location information for the current token
- // (the token last returned by Next()).
- Location location() const { return current_.location; }
- // Returns the literal string, if any, for the current token (the
- // token last returned by Next()). The string is 0-terminated.
- // Literal strings are collected for identifiers, strings, and
- // numbers.
- // These functions only give the correct result if the literal
- // was scanned between calls to StartLiteral() and TerminateLiteral().
- Vector<const char> literal_ascii_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->ascii_literal();
- }
- Vector<const uc16> literal_utf16_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->utf16_literal();
- }
- bool is_literal_ascii() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_ascii();
- }
- int literal_length() const {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->length();
- }
-
- bool literal_contains_escapes() const {
- Location location = current_.location;
- int source_length = (location.end_pos - location.beg_pos);
- if (current_.token == Token::STRING) {
- // Subtract delimiters.
- source_length -= 2;
- }
- return current_.literal_chars->length() != source_length;
- }
-
- // Similar functions for the upcoming token.
-
- // One token look-ahead (past the token returned by Next()).
- Token::Value peek() const { return next_.token; }
-
- Location peek_location() const { return next_.location; }
-
- // Returns the literal string for the next token (the token that
- // would be returned if Next() were called).
- Vector<const char> next_literal_ascii_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->ascii_literal();
- }
- Vector<const uc16> next_literal_utf16_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->utf16_literal();
- }
- bool is_next_literal_ascii() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->is_ascii();
- }
- int next_literal_length() const {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->length();
- }
-
- UnicodeCache* unicode_cache() { return unicode_cache_; }
-
- static const int kCharacterLookaheadBufferSize = 1;
-
- // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
- uc32 ScanOctalEscape(uc32 c, int length);
-
- // Returns the location of the last seen octal literal.
- Location octal_position() const { return octal_pos_; }
- void clear_octal_position() { octal_pos_ = Location::invalid(); }
-
- // Seek forward to the given position. This operation does not
- // work in general, for instance when there are pushed back
- // characters, but works for seeking forward until simple delimiter
- // tokens, which is what it is used for.
- void SeekForward(int pos);
-
- bool HarmonyScoping() const {
- return harmony_scoping_;
- }
- void SetHarmonyScoping(bool scoping) {
- harmony_scoping_ = scoping;
- }
- bool HarmonyModules() const {
- return harmony_modules_;
- }
- void SetHarmonyModules(bool modules) {
- harmony_modules_ = modules;
- }
-
-
- // Returns true if there was a line terminator before the peek'ed token,
- // possibly inside a multi-line comment.
- bool HasAnyLineTerminatorBeforeNext() const {
- return has_line_terminator_before_next_ ||
- has_multiline_comment_before_next_;
- }
-
- // Scans the input as a regular expression pattern, previous
- // character(s) must be /(=). Returns true if a pattern is scanned.
- bool ScanRegExpPattern(bool seen_equal);
- // Returns true if regexp flags are scanned (always since flags can
- // be empty).
- bool ScanRegExpFlags();
-
- private:
- // The current and look-ahead token.
- struct TokenDesc {
- Token::Value token;
- Location location;
- LiteralBuffer* literal_chars;
- };
-
- // Call this after setting source_ to the input.
- void Init() {
- // Set c0_ (one character ahead)
- STATIC_ASSERT(kCharacterLookaheadBufferSize == 1);
- Advance();
- // Initialize current_ to not refer to a literal.
- current_.literal_chars = NULL;
- }
-
- // Literal buffer support
- inline void StartLiteral() {
- LiteralBuffer* free_buffer = (current_.literal_chars == &literal_buffer1_) ?
- &literal_buffer2_ : &literal_buffer1_;
- free_buffer->Reset();
- next_.literal_chars = free_buffer;
- }
-
- INLINE(void AddLiteralChar(uc32 c)) {
- ASSERT_NOT_NULL(next_.literal_chars);
- next_.literal_chars->AddChar(c);
- }
-
- // Complete scanning of a literal.
- inline void TerminateLiteral() {
- // Does nothing in the current implementation.
- }
-
- // Stops scanning of a literal and drop the collected characters,
- // e.g., due to an encountered error.
- inline void DropLiteral() {
- next_.literal_chars = NULL;
- }
-
- inline void AddLiteralCharAdvance() {
- AddLiteralChar(c0_);
- Advance();
- }
-
- // Low-level scanning support.
- void Advance() { c0_ = source_->Advance(); }
- void PushBack(uc32 ch) {
- source_->PushBack(c0_);
- c0_ = ch;
- }
-
- inline Token::Value Select(Token::Value tok) {
- Advance();
- return tok;
- }
-
- inline Token::Value Select(uc32 next, Token::Value then, Token::Value else_) {
- Advance();
- if (c0_ == next) {
- Advance();
- return then;
- } else {
- return else_;
- }
- }
-
- uc32 ScanHexNumber(int expected_length);
-
- // Scans a single JavaScript token.
- void Scan();
-
- bool SkipWhiteSpace();
- Token::Value SkipSingleLineComment();
- Token::Value SkipMultiLineComment();
- // Scans a possible HTML comment -- begins with '<!'.
- Token::Value ScanHtmlComment();
-
- void ScanDecimalDigits();
- Token::Value ScanNumber(bool seen_period);
- Token::Value ScanIdentifierOrKeyword();
- Token::Value ScanIdentifierSuffix(LiteralScope* literal);
-
- Token::Value ScanString();
-
- // Scans an escape-sequence which is part of a string and adds the
- // decoded character to the current literal. Returns true if a pattern
- // is scanned.
- bool ScanEscape();
- // Decodes a Unicode escape-sequence which is part of an identifier.
- // If the escape sequence cannot be decoded the result is kBadChar.
- uc32 ScanIdentifierUnicodeEscape();
- // Scans a Unicode escape-sequence and adds its characters,
- // uninterpreted, to the current literal. Used for parsing RegExp
- // flags.
- bool ScanLiteralUnicodeEscape();
-
- // Return the current source position.
- int source_pos() {
- return source_->pos() - kCharacterLookaheadBufferSize;
- }
-
- UnicodeCache* unicode_cache_;
-
- // Buffers collecting literal strings, numbers, etc.
- LiteralBuffer literal_buffer1_;
- LiteralBuffer literal_buffer2_;
-
- TokenDesc current_; // desc for current token (as returned by Next())
- TokenDesc next_; // desc for next token (one token look-ahead)
-
- // Input stream. Must be initialized to an Utf16CharacterStream.
- Utf16CharacterStream* source_;
-
-
- // Start position of the octal literal last scanned.
- Location octal_pos_;
-
- // One Unicode character look-ahead; c0_ < 0 at the end of the input.
- uc32 c0_;
-
- // Whether there is a line terminator whitespace character after
- // the current token, and before the next. Does not count newlines
- // inside multiline comments.
- bool has_line_terminator_before_next_;
- // Whether there is a multi-line comment that contains a
- // line-terminator after the current token, and before the next.
- bool has_multiline_comment_before_next_;
- // Whether we scan 'let' as a keyword for harmony block-scoped let bindings.
- bool harmony_scoping_;
- // Whether we scan 'module', 'import', 'export' as keywords.
- bool harmony_modules_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SCANNER_H_
diff --git a/src/3rdparty/v8/src/scopeinfo.cc b/src/3rdparty/v8/src/scopeinfo.cc
deleted file mode 100644
index 6d55e86..0000000
--- a/src/3rdparty/v8/src/scopeinfo.cc
+++ /dev/null
@@ -1,569 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "scopeinfo.h"
-#include "scopes.h"
-
-#include "allocation-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
- // Collect stack and context locals.
- ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
- ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
- scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
- const int stack_local_count = stack_locals.length();
- const int context_local_count = context_locals.length();
- // Make sure we allocate the correct amount.
- ASSERT(scope->StackLocalCount() == stack_local_count);
- ASSERT(scope->ContextLocalCount() == context_local_count);
-
- // Determine use and location of the function variable if it is present.
- FunctionVariableInfo function_name_info;
- VariableMode function_variable_mode;
- if (scope->is_function_scope() && scope->function() != NULL) {
- Variable* var = scope->function()->proxy()->var();
- if (!var->is_used()) {
- function_name_info = UNUSED;
- } else if (var->IsContextSlot()) {
- function_name_info = CONTEXT;
- } else {
- ASSERT(var->IsStackLocal());
- function_name_info = STACK;
- }
- function_variable_mode = var->mode();
- } else {
- function_name_info = NONE;
- function_variable_mode = VAR;
- }
-
- const bool has_function_name = function_name_info != NONE;
- const int parameter_count = scope->num_parameters();
- const int length = kVariablePartIndex
- + parameter_count + stack_local_count + 2 * context_local_count
- + (has_function_name ? 2 : 0);
-
- Handle<ScopeInfo> scope_info = FACTORY->NewScopeInfo(length);
-
- // Encode the flags.
- int flags = TypeField::encode(scope->type()) |
- CallsEvalField::encode(scope->calls_eval()) |
- LanguageModeField::encode(scope->language_mode()) |
- QmlModeField::encode(scope->is_qml_mode()) |
- FunctionVariableField::encode(function_name_info) |
- FunctionVariableMode::encode(function_variable_mode);
- scope_info->SetFlags(flags);
- scope_info->SetParameterCount(parameter_count);
- scope_info->SetStackLocalCount(stack_local_count);
- scope_info->SetContextLocalCount(context_local_count);
-
- int index = kVariablePartIndex;
- // Add parameters.
- ASSERT(index == scope_info->ParameterEntriesIndex());
- for (int i = 0; i < parameter_count; ++i) {
- scope_info->set(index++, *scope->parameter(i)->name());
- }
-
- // Add stack locals' names. We are assuming that the stack locals'
- // slots are allocated in increasing order, so we can simply add
- // them to the ScopeInfo object.
- ASSERT(index == scope_info->StackLocalEntriesIndex());
- for (int i = 0; i < stack_local_count; ++i) {
- ASSERT(stack_locals[i]->index() == i);
- scope_info->set(index++, *stack_locals[i]->name());
- }
-
- // Due to usage analysis, context-allocated locals are not necessarily in
- // increasing order: Some of them may be parameters which are allocated before
- // the non-parameter locals. When the non-parameter locals are sorted
- // according to usage, the allocated slot indices may not be in increasing
- // order with the variable list anymore. Thus, we first need to sort them by
- // context slot index before adding them to the ScopeInfo object.
- context_locals.Sort(&Variable::CompareIndex);
-
- // Add context locals' names.
- ASSERT(index == scope_info->ContextLocalNameEntriesIndex());
- for (int i = 0; i < context_local_count; ++i) {
- scope_info->set(index++, *context_locals[i]->name());
- }
-
- // Add context locals' info.
- ASSERT(index == scope_info->ContextLocalInfoEntriesIndex());
- for (int i = 0; i < context_local_count; ++i) {
- Variable* var = context_locals[i];
- uint32_t value = ContextLocalMode::encode(var->mode()) |
- ContextLocalInitFlag::encode(var->initialization_flag());
- scope_info->set(index++, Smi::FromInt(value));
- }
-
- // If present, add the function variable name and its index.
- ASSERT(index == scope_info->FunctionNameEntryIndex());
- if (has_function_name) {
- int var_index = scope->function()->proxy()->var()->index();
- scope_info->set(index++, *scope->function()->proxy()->name());
- scope_info->set(index++, Smi::FromInt(var_index));
- ASSERT(function_name_info != STACK ||
- (var_index == scope_info->StackLocalCount() &&
- var_index == scope_info->StackSlotCount() - 1));
- ASSERT(function_name_info != CONTEXT ||
- var_index == scope_info->ContextLength() - 1);
- }
-
- ASSERT(index == scope_info->length());
- ASSERT(scope->num_parameters() == scope_info->ParameterCount());
- ASSERT(scope->num_stack_slots() == scope_info->StackSlotCount());
- ASSERT(scope->num_heap_slots() == scope_info->ContextLength() ||
- (scope->num_heap_slots() == kVariablePartIndex &&
- scope_info->ContextLength() == 0));
- return scope_info;
-}
-
-
-ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
- return reinterpret_cast<ScopeInfo*>(isolate->heap()->empty_fixed_array());
-}
-
-
-ScopeType ScopeInfo::Type() {
- ASSERT(length() > 0);
- return TypeField::decode(Flags());
-}
-
-
-bool ScopeInfo::CallsEval() {
- return length() > 0 && CallsEvalField::decode(Flags());
-}
-
-
-LanguageMode ScopeInfo::language_mode() {
- return length() > 0 ? LanguageModeField::decode(Flags()) : CLASSIC_MODE;
-}
-
-
-bool ScopeInfo::IsQmlMode() {
- return length() > 0 && QmlModeField::decode(Flags());
-}
-
-
-int ScopeInfo::LocalCount() {
- return StackLocalCount() + ContextLocalCount();
-}
-
-
-int ScopeInfo::StackSlotCount() {
- if (length() > 0) {
- bool function_name_stack_slot =
- FunctionVariableField::decode(Flags()) == STACK;
- return StackLocalCount() + (function_name_stack_slot ? 1 : 0);
- }
- return 0;
-}
-
-
-int ScopeInfo::ContextLength(bool qml_function) {
- if (length() > 0) {
- int context_locals = ContextLocalCount();
- bool function_name_context_slot =
- FunctionVariableField::decode(Flags()) == CONTEXT;
- bool has_context = context_locals > 0 ||
- function_name_context_slot ||
- Type() == WITH_SCOPE ||
- (Type() == FUNCTION_SCOPE && CallsEval()) ||
- Type() == MODULE_SCOPE;
- // TODO(pvarga): The QML mode should be checked in the
- // has_context expression.
- if (has_context || qml_function) {
- return Context::MIN_CONTEXT_SLOTS + context_locals +
- (function_name_context_slot ? 1 : 0);
- }
- }
- return 0;
-}
-
-
-bool ScopeInfo::HasFunctionName() {
- if (length() > 0) {
- return NONE != FunctionVariableField::decode(Flags());
- } else {
- return false;
- }
-}
-
-
-bool ScopeInfo::HasHeapAllocatedLocals() {
- if (length() > 0) {
- return ContextLocalCount() > 0;
- } else {
- return false;
- }
-}
-
-
-bool ScopeInfo::HasContext() {
- return ContextLength() > 0;
-}
-
-
-String* ScopeInfo::FunctionName() {
- ASSERT(HasFunctionName());
- return String::cast(get(FunctionNameEntryIndex()));
-}
-
-
-String* ScopeInfo::ParameterName(int var) {
- ASSERT(0 <= var && var < ParameterCount());
- int info_index = ParameterEntriesIndex() + var;
- return String::cast(get(info_index));
-}
-
-
-String* ScopeInfo::LocalName(int var) {
- ASSERT(0 <= var && var < LocalCount());
- ASSERT(StackLocalEntriesIndex() + StackLocalCount() ==
- ContextLocalNameEntriesIndex());
- int info_index = StackLocalEntriesIndex() + var;
- return String::cast(get(info_index));
-}
-
-
-String* ScopeInfo::StackLocalName(int var) {
- ASSERT(0 <= var && var < StackLocalCount());
- int info_index = StackLocalEntriesIndex() + var;
- return String::cast(get(info_index));
-}
-
-
-String* ScopeInfo::ContextLocalName(int var) {
- ASSERT(0 <= var && var < ContextLocalCount());
- int info_index = ContextLocalNameEntriesIndex() + var;
- return String::cast(get(info_index));
-}
-
-
-VariableMode ScopeInfo::ContextLocalMode(int var) {
- ASSERT(0 <= var && var < ContextLocalCount());
- int info_index = ContextLocalInfoEntriesIndex() + var;
- int value = Smi::cast(get(info_index))->value();
- return ContextLocalMode::decode(value);
-}
-
-
-InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
- ASSERT(0 <= var && var < ContextLocalCount());
- int info_index = ContextLocalInfoEntriesIndex() + var;
- int value = Smi::cast(get(info_index))->value();
- return ContextLocalInitFlag::decode(value);
-}
-
-
-int ScopeInfo::StackSlotIndex(String* name) {
- ASSERT(name->IsInternalizedString());
- if (length() > 0) {
- int start = StackLocalEntriesIndex();
- int end = StackLocalEntriesIndex() + StackLocalCount();
- for (int i = start; i < end; ++i) {
- if (name == get(i)) {
- return i - start;
- }
- }
- }
- return -1;
-}
-
-
-int ScopeInfo::ContextSlotIndex(String* name,
- VariableMode* mode,
- InitializationFlag* init_flag) {
- ASSERT(name->IsInternalizedString());
- ASSERT(mode != NULL);
- ASSERT(init_flag != NULL);
- if (length() > 0) {
- ContextSlotCache* context_slot_cache = GetIsolate()->context_slot_cache();
- int result = context_slot_cache->Lookup(this, name, mode, init_flag);
- if (result != ContextSlotCache::kNotFound) {
- ASSERT(result < ContextLength());
- return result;
- }
-
- int start = ContextLocalNameEntriesIndex();
- int end = ContextLocalNameEntriesIndex() + ContextLocalCount();
- for (int i = start; i < end; ++i) {
- if (name == get(i)) {
- int var = i - start;
- *mode = ContextLocalMode(var);
- *init_flag = ContextLocalInitFlag(var);
- result = Context::MIN_CONTEXT_SLOTS + var;
- context_slot_cache->Update(this, name, *mode, *init_flag, result);
- ASSERT(result < ContextLength());
- return result;
- }
- }
- // Cache as not found. Mode and init flag don't matter.
- context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
- }
- return -1;
-}
-
-
-int ScopeInfo::ParameterIndex(String* name) {
- ASSERT(name->IsInternalizedString());
- if (length() > 0) {
- // We must read parameters from the end since for
- // multiply declared parameters the value of the
- // last declaration of that parameter is used
- // inside a function (and thus we need to look
- // at the last index). Was bug# 1110337.
- int start = ParameterEntriesIndex();
- int end = ParameterEntriesIndex() + ParameterCount();
- for (int i = end - 1; i >= start; --i) {
- if (name == get(i)) {
- return i - start;
- }
- }
- }
- return -1;
-}
-
-
-int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
- ASSERT(name->IsInternalizedString());
- ASSERT(mode != NULL);
- if (length() > 0) {
- if (FunctionVariableField::decode(Flags()) == CONTEXT &&
- FunctionName() == name) {
- *mode = FunctionVariableMode::decode(Flags());
- return Smi::cast(get(FunctionNameEntryIndex() + 1))->value();
- }
- }
- return -1;
-}
-
-
-bool ScopeInfo::CopyContextLocalsToScopeObject(
- Isolate* isolate,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
- int local_count = ContextLocalCount();
- if (local_count == 0) return true;
- // Fill all context locals to the context extension.
- int start = ContextLocalNameEntriesIndex();
- int end = start + local_count;
- for (int i = start; i < end; ++i) {
- int context_index = Context::MIN_CONTEXT_SLOTS + i - start;
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(isolate,
- scope_object,
- Handle<String>(String::cast(get(i))),
- Handle<Object>(context->get(context_index), isolate),
- ::NONE,
- kNonStrictMode),
- false);
- }
- return true;
-}
-
-
-int ScopeInfo::ParameterEntriesIndex() {
- ASSERT(length() > 0);
- return kVariablePartIndex;
-}
-
-
-int ScopeInfo::StackLocalEntriesIndex() {
- return ParameterEntriesIndex() + ParameterCount();
-}
-
-
-int ScopeInfo::ContextLocalNameEntriesIndex() {
- return StackLocalEntriesIndex() + StackLocalCount();
-}
-
-
-int ScopeInfo::ContextLocalInfoEntriesIndex() {
- return ContextLocalNameEntriesIndex() + ContextLocalCount();
-}
-
-
-int ScopeInfo::FunctionNameEntryIndex() {
- return ContextLocalInfoEntriesIndex() + ContextLocalCount();
-}
-
-
-int ContextSlotCache::Hash(Object* data, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(data)) >> 2;
- return static_cast<int>((addr_hash ^ name->Hash()) % kLength);
-}
-
-
-int ContextSlotCache::Lookup(Object* data,
- String* name,
- VariableMode* mode,
- InitializationFlag* init_flag) {
- int index = Hash(data, name);
- Key& key = keys_[index];
- if ((key.data == data) && key.name->Equals(name)) {
- Value result(values_[index]);
- if (mode != NULL) *mode = result.mode();
- if (init_flag != NULL) *init_flag = result.initialization_flag();
- return result.index() + kNotFound;
- }
- return kNotFound;
-}
-
-
-void ContextSlotCache::Update(Object* data,
- String* name,
- VariableMode mode,
- InitializationFlag init_flag,
- int slot_index) {
- String* internalized_name;
- ASSERT(slot_index > kNotFound);
- if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
- int index = Hash(data, internalized_name);
- Key& key = keys_[index];
- key.data = data;
- key.name = internalized_name;
- // Please note value only takes a uint as index.
- values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
-#ifdef DEBUG
- ValidateEntry(data, name, mode, init_flag, slot_index);
-#endif
- }
-}
-
-
-void ContextSlotCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].data = NULL;
-}
-
-
-#ifdef DEBUG
-
-void ContextSlotCache::ValidateEntry(Object* data,
- String* name,
- VariableMode mode,
- InitializationFlag init_flag,
- int slot_index) {
- String* internalized_name;
- if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
- int index = Hash(data, name);
- Key& key = keys_[index];
- ASSERT(key.data == data);
- ASSERT(key.name->Equals(name));
- Value result(values_[index]);
- ASSERT(result.mode() == mode);
- ASSERT(result.initialization_flag() == init_flag);
- ASSERT(result.index() + kNotFound == slot_index);
- }
-}
-
-
-static void PrintList(const char* list_name,
- int nof_internal_slots,
- int start,
- int end,
- ScopeInfo* scope_info) {
- if (start < end) {
- PrintF("\n // %s\n", list_name);
- if (nof_internal_slots > 0) {
- PrintF(" %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
- }
- for (int i = nof_internal_slots; start < end; ++i, ++start) {
- PrintF(" %2d ", i);
- String::cast(scope_info->get(start))->ShortPrint();
- PrintF("\n");
- }
- }
-}
-
-
-void ScopeInfo::Print() {
- PrintF("ScopeInfo ");
- if (HasFunctionName()) {
- FunctionName()->ShortPrint();
- } else {
- PrintF("/* no function name */");
- }
- PrintF("{");
-
- PrintList("parameters", 0,
- ParameterEntriesIndex(),
- ParameterEntriesIndex() + ParameterCount(),
- this);
- PrintList("stack slots", 0,
- StackLocalEntriesIndex(),
- StackLocalEntriesIndex() + StackLocalCount(),
- this);
- PrintList("context slots",
- Context::MIN_CONTEXT_SLOTS,
- ContextLocalNameEntriesIndex(),
- ContextLocalNameEntriesIndex() + ContextLocalCount(),
- this);
-
- PrintF("}\n");
-}
-#endif // DEBUG
-
-
-//---------------------------------------------------------------------------
-// ModuleInfo.
-
-Handle<ModuleInfo> ModuleInfo::Create(
- Isolate* isolate, Interface* interface, Scope* scope) {
- Handle<ModuleInfo> info = Allocate(isolate, interface->Length());
- info->set_host_index(interface->Index());
- int i = 0;
- for (Interface::Iterator it = interface->iterator();
- !it.done(); it.Advance(), ++i) {
- Variable* var = scope->LocalLookup(it.name());
- info->set_name(i, *it.name());
- info->set_mode(i, var->mode());
- ASSERT((var->mode() == MODULE) == (it.interface()->IsModule()));
- if (var->mode() == MODULE) {
- ASSERT(it.interface()->IsFrozen());
- ASSERT(it.interface()->Index() >= 0);
- info->set_index(i, it.interface()->Index());
- } else {
- ASSERT(var->index() >= 0);
- info->set_index(i, var->index());
- }
- }
- ASSERT(i == info->length());
- return info;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scopeinfo.h b/src/3rdparty/v8/src/scopeinfo.h
deleted file mode 100644
index a884b3b..0000000
--- a/src/3rdparty/v8/src/scopeinfo.h
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SCOPEINFO_H_
-#define V8_SCOPEINFO_H_
-
-#include "allocation.h"
-#include "variables.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Cache for mapping (data, property name) into context slot index.
-// The cache contains both positive and negative results.
-// Slot index equals -1 means the property is absent.
-// Cleared at startup and prior to mark sweep collection.
-class ContextSlotCache {
- public:
- // Lookup context slot index for (data, name).
- // If absent, kNotFound is returned.
- int Lookup(Object* data,
- String* name,
- VariableMode* mode,
- InitializationFlag* init_flag);
-
- // Update an element in the cache.
- void Update(Object* data,
- String* name,
- VariableMode mode,
- InitializationFlag init_flag,
- int slot_index);
-
- // Clear the cache.
- void Clear();
-
- static const int kNotFound = -2;
-
- private:
- ContextSlotCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].data = NULL;
- keys_[i].name = NULL;
- values_[i] = kNotFound;
- }
- }
-
- inline static int Hash(Object* data, String* name);
-
-#ifdef DEBUG
- void ValidateEntry(Object* data,
- String* name,
- VariableMode mode,
- InitializationFlag init_flag,
- int slot_index);
-#endif
-
- static const int kLength = 256;
- struct Key {
- Object* data;
- String* name;
- };
-
- struct Value {
- Value(VariableMode mode,
- InitializationFlag init_flag,
- int index) {
- ASSERT(ModeField::is_valid(mode));
- ASSERT(InitField::is_valid(init_flag));
- ASSERT(IndexField::is_valid(index));
- value_ = ModeField::encode(mode) |
- IndexField::encode(index) |
- InitField::encode(init_flag);
- ASSERT(mode == this->mode());
- ASSERT(init_flag == this->initialization_flag());
- ASSERT(index == this->index());
- }
-
- explicit inline Value(uint32_t value) : value_(value) {}
-
- uint32_t raw() { return value_; }
-
- VariableMode mode() { return ModeField::decode(value_); }
-
- InitializationFlag initialization_flag() {
- return InitField::decode(value_);
- }
-
- int index() { return IndexField::decode(value_); }
-
- // Bit fields in value_ (type, shift, size). Must be public so the
- // constants can be embedded in generated code.
- class ModeField: public BitField<VariableMode, 0, 4> {};
- class InitField: public BitField<InitializationFlag, 4, 1> {};
- class IndexField: public BitField<int, 5, 32-5> {};
-
- private:
- uint32_t value_;
- };
-
- Key keys_[kLength];
- uint32_t values_[kLength];
-
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(ContextSlotCache);
-};
-
-
-
-
-//---------------------------------------------------------------------------
-// Auxiliary class used for the description of module instances.
-// Used by Runtime_DeclareModules.
-
-class ModuleInfo: public FixedArray {
- public:
- static ModuleInfo* cast(Object* description) {
- return static_cast<ModuleInfo*>(FixedArray::cast(description));
- }
-
- static Handle<ModuleInfo> Create(
- Isolate* isolate, Interface* interface, Scope* scope);
-
- // Index of module's context in host context.
- int host_index() { return Smi::cast(get(HOST_OFFSET))->value(); }
-
- // Name, mode, and index of the i-th export, respectively.
- // For value exports, the index is the slot of the value in the module
- // context, for exported modules it is the slot index of the
- // referred module's context in the host context.
- // TODO(rossberg): This format cannot yet handle exports of modules declared
- // in earlier scripts.
- String* name(int i) { return String::cast(get(name_offset(i))); }
- VariableMode mode(int i) {
- return static_cast<VariableMode>(Smi::cast(get(mode_offset(i)))->value());
- }
- int index(int i) { return Smi::cast(get(index_offset(i)))->value(); }
-
- int length() { return (FixedArray::length() - HEADER_SIZE) / ITEM_SIZE; }
-
- private:
- // The internal format is: Index, (Name, VariableMode, Index)*
- enum {
- HOST_OFFSET,
- NAME_OFFSET,
- MODE_OFFSET,
- INDEX_OFFSET,
- HEADER_SIZE = NAME_OFFSET,
- ITEM_SIZE = INDEX_OFFSET - NAME_OFFSET + 1
- };
- inline int name_offset(int i) { return NAME_OFFSET + i * ITEM_SIZE; }
- inline int mode_offset(int i) { return MODE_OFFSET + i * ITEM_SIZE; }
- inline int index_offset(int i) { return INDEX_OFFSET + i * ITEM_SIZE; }
-
- static Handle<ModuleInfo> Allocate(Isolate* isolate, int length) {
- return Handle<ModuleInfo>::cast(
- isolate->factory()->NewFixedArray(HEADER_SIZE + ITEM_SIZE * length));
- }
- void set_host_index(int index) { set(HOST_OFFSET, Smi::FromInt(index)); }
- void set_name(int i, String* name) { set(name_offset(i), name); }
- void set_mode(int i, VariableMode mode) {
- set(mode_offset(i), Smi::FromInt(mode));
- }
- void set_index(int i, int index) {
- set(index_offset(i), Smi::FromInt(index));
- }
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_SCOPEINFO_H_
diff --git a/src/3rdparty/v8/src/scopes.cc b/src/3rdparty/v8/src/scopes.cc
deleted file mode 100644
index 76d3ed4..0000000
--- a/src/3rdparty/v8/src/scopes.cc
+++ /dev/null
@@ -1,1478 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "scopes.h"
-
-#include "accessors.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "messages.h"
-#include "scopeinfo.h"
-
-#include "allocation-inl.h"
-
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Implementation of LocalsMap
-//
-// Note: We are storing the handle locations as key values in the hash map.
-// When inserting a new variable via Declare(), we rely on the fact that
-// the handle location remains alive for the duration of that variable
-// use. Because a Variable holding a handle with the same location exists
-// this is ensured.
-
-static bool Match(void* key1, void* key2) {
- String* name1 = *reinterpret_cast<String**>(key1);
- String* name2 = *reinterpret_cast<String**>(key2);
- ASSERT(name1->IsInternalizedString());
- ASSERT(name2->IsInternalizedString());
- return name1 == name2;
-}
-
-
-VariableMap::VariableMap(Zone* zone)
- : ZoneHashMap(Match, 8, ZoneAllocationPolicy(zone)),
- zone_(zone) {}
-VariableMap::~VariableMap() {}
-
-
-Variable* VariableMap::Declare(
- Scope* scope,
- Handle<String> name,
- VariableMode mode,
- bool is_valid_lhs,
- Variable::Kind kind,
- InitializationFlag initialization_flag,
- Interface* interface) {
- Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true,
- ZoneAllocationPolicy(zone()));
- if (p->value == NULL) {
- // The variable has not been declared yet -> insert it.
- ASSERT(p->key == name.location());
- p->value = new(zone()) Variable(scope,
- name,
- mode,
- is_valid_lhs,
- kind,
- initialization_flag,
- interface);
- }
- return reinterpret_cast<Variable*>(p->value);
-}
-
-
-Variable* VariableMap::Lookup(Handle<String> name) {
- Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false,
- ZoneAllocationPolicy(NULL));
- if (p != NULL) {
- ASSERT(*reinterpret_cast<String**>(p->key) == *name);
- ASSERT(p->value != NULL);
- return reinterpret_cast<Variable*>(p->value);
- }
- return NULL;
-}
-
-
-// ----------------------------------------------------------------------------
-// Implementation of Scope
-
-Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone)
- : isolate_(zone->isolate()),
- inner_scopes_(4, zone),
- variables_(zone),
- internals_(4, zone),
- temps_(4, zone),
- params_(4, zone),
- unresolved_(16, zone),
- decls_(4, zone),
- interface_(FLAG_harmony_modules &&
- (type == MODULE_SCOPE || type == GLOBAL_SCOPE)
- ? Interface::NewModule(zone) : NULL),
- already_resolved_(false),
- zone_(zone) {
- SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
- // The outermost scope must be a global scope.
- ASSERT(type == GLOBAL_SCOPE || outer_scope != NULL);
- ASSERT(!HasIllegalRedeclaration());
-}
-
-
-Scope::Scope(Scope* inner_scope,
- ScopeType type,
- Handle<ScopeInfo> scope_info,
- Zone* zone)
- : isolate_(Isolate::Current()),
- inner_scopes_(4, zone),
- variables_(zone),
- internals_(4, zone),
- temps_(4, zone),
- params_(4, zone),
- unresolved_(16, zone),
- decls_(4, zone),
- interface_(NULL),
- already_resolved_(true),
- zone_(zone) {
- SetDefaults(type, NULL, scope_info);
- if (!scope_info.is_null()) {
- num_heap_slots_ = scope_info_->ContextLength();
- }
- // Ensure at least MIN_CONTEXT_SLOTS to indicate a materialized context.
- num_heap_slots_ = Max(num_heap_slots_,
- static_cast<int>(Context::MIN_CONTEXT_SLOTS));
- AddInnerScope(inner_scope);
-}
-
-
-Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
- : isolate_(Isolate::Current()),
- inner_scopes_(1, zone),
- variables_(zone),
- internals_(0, zone),
- temps_(0, zone),
- params_(0, zone),
- unresolved_(0, zone),
- decls_(0, zone),
- interface_(NULL),
- already_resolved_(true),
- zone_(zone) {
- SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
- AddInnerScope(inner_scope);
- ++num_var_or_const_;
- num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
- Variable* variable = variables_.Declare(this,
- catch_variable_name,
- VAR,
- true, // Valid left-hand side.
- Variable::NORMAL,
- kCreatedInitialized);
- AllocateHeapSlot(variable);
-}
-
-
-void Scope::SetDefaults(ScopeType type,
- Scope* outer_scope,
- Handle<ScopeInfo> scope_info) {
- outer_scope_ = outer_scope;
- type_ = type;
- scope_name_ = isolate_->factory()->empty_string();
- dynamics_ = NULL;
- receiver_ = NULL;
- function_ = NULL;
- arguments_ = NULL;
- illegal_redecl_ = NULL;
- scope_inside_with_ = false;
- scope_contains_with_ = false;
- scope_calls_eval_ = false;
- // Inherit the strict mode from the parent scope.
- language_mode_ = (outer_scope != NULL)
- ? outer_scope->language_mode_ : CLASSIC_MODE;
- qml_mode_flag_ = (outer_scope != NULL)
- ? outer_scope->qml_mode_flag_ : kNonQmlMode;
- outer_scope_calls_non_strict_eval_ = false;
- inner_scope_calls_eval_ = false;
- force_eager_compilation_ = false;
- num_var_or_const_ = 0;
- num_stack_slots_ = 0;
- num_heap_slots_ = 0;
- num_modules_ = 0;
- module_var_ = NULL,
- scope_info_ = scope_info;
- start_position_ = RelocInfo::kNoPosition;
- end_position_ = RelocInfo::kNoPosition;
- if (!scope_info.is_null()) {
- scope_calls_eval_ = scope_info->CallsEval();
- language_mode_ = scope_info->language_mode();
- }
-}
-
-
-Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
- Zone* zone) {
- // Reconstruct the outer scope chain from a closure's context chain.
- Scope* current_scope = NULL;
- Scope* innermost_scope = NULL;
- bool contains_with = false;
- while (!context->IsNativeContext()) {
- if (context->IsWithContext()) {
- Scope* with_scope = new(zone) Scope(current_scope,
- WITH_SCOPE,
- Handle<ScopeInfo>::null(),
- zone);
- current_scope = with_scope;
- // All the inner scopes are inside a with.
- contains_with = true;
- for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
- s->scope_inside_with_ = true;
- }
- } else if (context->IsGlobalContext()) {
- ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
- current_scope = new(zone) Scope(current_scope,
- GLOBAL_SCOPE,
- Handle<ScopeInfo>(scope_info),
- zone);
- } else if (context->IsModuleContext()) {
- ScopeInfo* scope_info = ScopeInfo::cast(context->module()->scope_info());
- current_scope = new(zone) Scope(current_scope,
- MODULE_SCOPE,
- Handle<ScopeInfo>(scope_info),
- zone);
- } else if (context->IsFunctionContext()) {
- ScopeInfo* scope_info = context->closure()->shared()->scope_info();
- current_scope = new(zone) Scope(current_scope,
- FUNCTION_SCOPE,
- Handle<ScopeInfo>(scope_info),
- zone);
- } else if (context->IsBlockContext()) {
- ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
- current_scope = new(zone) Scope(current_scope,
- BLOCK_SCOPE,
- Handle<ScopeInfo>(scope_info),
- zone);
- } else {
- ASSERT(context->IsCatchContext());
- String* name = String::cast(context->extension());
- current_scope = new(zone) Scope(
- current_scope, Handle<String>(name), zone);
- }
- if (contains_with) current_scope->RecordWithStatement();
- if (innermost_scope == NULL) innermost_scope = current_scope;
-
- // Forget about a with when we move to a context for a different function.
- if (context->previous()->closure() != context->closure()) {
- contains_with = false;
- }
- context = context->previous();
- }
-
- global_scope->AddInnerScope(current_scope);
- global_scope->PropagateScopeInfo(false);
- return (innermost_scope == NULL) ? global_scope : innermost_scope;
-}
-
-
-bool Scope::Analyze(CompilationInfo* info) {
- ASSERT(info->function() != NULL);
- Scope* scope = info->function()->scope();
- Scope* top = scope;
-
- // Traverse the scope tree up to the first unresolved scope or the global
- // scope and start scope resolution and variable allocation from that scope.
- while (!top->is_global_scope() &&
- !top->outer_scope()->already_resolved()) {
- top = top->outer_scope();
- }
-
- // Allocate the variables.
- {
- AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate(),
- info->zone());
- if (!top->AllocateVariables(info, &ast_node_factory)) return false;
- }
-
-#ifdef DEBUG
- if (info->isolate()->bootstrapper()->IsActive()
- ? FLAG_print_builtin_scopes
- : FLAG_print_scopes) {
- scope->Print();
- }
-
- if (FLAG_harmony_modules && FLAG_print_interfaces && top->is_global_scope()) {
- PrintF("global : ");
- top->interface()->Print();
- }
-#endif
-
- info->SetScope(scope);
- return true;
-}
-
-
-void Scope::Initialize() {
- ASSERT(!already_resolved());
-
- // Add this scope as a new inner scope of the outer scope.
- if (outer_scope_ != NULL) {
- outer_scope_->inner_scopes_.Add(this, zone());
- scope_inside_with_ = outer_scope_->scope_inside_with_ || is_with_scope();
- } else {
- scope_inside_with_ = is_with_scope();
- }
-
- // Declare convenience variables.
- // Declare and allocate receiver (even for the global scope, and even
- // if naccesses_ == 0).
- // NOTE: When loading parameters in the global scope, we must take
- // care not to access them as properties of the global object, but
- // instead load them directly from the stack. Currently, the only
- // such parameter is 'this' which is passed on the stack when
- // invoking scripts
- if (is_declaration_scope()) {
- Variable* var =
- variables_.Declare(this,
- isolate_->factory()->this_string(),
- VAR,
- false,
- Variable::THIS,
- kCreatedInitialized);
- var->AllocateTo(Variable::PARAMETER, -1);
- receiver_ = var;
- } else {
- ASSERT(outer_scope() != NULL);
- receiver_ = outer_scope()->receiver();
- }
-
- if (is_function_scope()) {
- // Declare 'arguments' variable which exists in all functions.
- // Note that it might never be accessed, in which case it won't be
- // allocated during variable allocation.
- variables_.Declare(this,
- isolate_->factory()->arguments_string(),
- VAR,
- true,
- Variable::ARGUMENTS,
- kCreatedInitialized);
- }
-}
-
-
-Scope* Scope::FinalizeBlockScope() {
- ASSERT(is_block_scope());
- ASSERT(internals_.is_empty());
- ASSERT(temps_.is_empty());
- ASSERT(params_.is_empty());
-
- if (num_var_or_const() > 0) return this;
-
- // Remove this scope from outer scope.
- for (int i = 0; i < outer_scope_->inner_scopes_.length(); i++) {
- if (outer_scope_->inner_scopes_[i] == this) {
- outer_scope_->inner_scopes_.Remove(i);
- break;
- }
- }
-
- // Reparent inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- outer_scope()->AddInnerScope(inner_scopes_[i]);
- }
-
- // Move unresolved variables
- for (int i = 0; i < unresolved_.length(); i++) {
- outer_scope()->unresolved_.Add(unresolved_[i], zone());
- }
-
- return NULL;
-}
-
-
-Variable* Scope::LocalLookup(Handle<String> name) {
- Variable* result = variables_.Lookup(name);
- if (result != NULL || scope_info_.is_null()) {
- return result;
- }
- // If we have a serialized scope info, we might find the variable there.
- // There should be no local slot with the given name.
- ASSERT(scope_info_->StackSlotIndex(*name) < 0);
-
- // Check context slot lookup.
- VariableMode mode;
- Variable::Location location = Variable::CONTEXT;
- InitializationFlag init_flag;
- int index = scope_info_->ContextSlotIndex(*name, &mode, &init_flag);
- if (index < 0) {
- // Check parameters.
- index = scope_info_->ParameterIndex(*name);
- if (index < 0) return NULL;
-
- mode = DYNAMIC;
- location = Variable::LOOKUP;
- init_flag = kCreatedInitialized;
- }
-
- Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
- init_flag);
- var->AllocateTo(location, index);
- return var;
-}
-
-
-Variable* Scope::LookupFunctionVar(Handle<String> name,
- AstNodeFactory<AstNullVisitor>* factory) {
- if (function_ != NULL && function_->proxy()->name().is_identical_to(name)) {
- return function_->proxy()->var();
- } else if (!scope_info_.is_null()) {
- // If we are backed by a scope info, try to lookup the variable there.
- VariableMode mode;
- int index = scope_info_->FunctionContextSlotIndex(*name, &mode);
- if (index < 0) return NULL;
- Variable* var = new(zone()) Variable(
- this, name, mode, true /* is valid LHS */,
- Variable::NORMAL, kCreatedInitialized);
- VariableProxy* proxy = factory->NewVariableProxy(var);
- VariableDeclaration* declaration =
- factory->NewVariableDeclaration(proxy, mode, this);
- DeclareFunctionVar(declaration);
- var->AllocateTo(Variable::CONTEXT, index);
- return var;
- } else {
- return NULL;
- }
-}
-
-
-Variable* Scope::Lookup(Handle<String> name) {
- for (Scope* scope = this;
- scope != NULL;
- scope = scope->outer_scope()) {
- Variable* var = scope->LocalLookup(name);
- if (var != NULL) return var;
- }
- return NULL;
-}
-
-
-void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
- ASSERT(!already_resolved());
- ASSERT(is_function_scope());
- Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
- kCreatedInitialized);
- params_.Add(var, zone());
-}
-
-
-Variable* Scope::DeclareLocal(Handle<String> name,
- VariableMode mode,
- InitializationFlag init_flag,
- Interface* interface) {
- ASSERT(!already_resolved());
- // This function handles VAR and CONST modes. DYNAMIC variables are
- // introduces during variable allocation, INTERNAL variables are allocated
- // explicitly, and TEMPORARY variables are allocated via NewTemporary().
- ASSERT(IsDeclaredVariableMode(mode));
- ++num_var_or_const_;
- return variables_.Declare(
- this, name, mode, true, Variable::NORMAL, init_flag, interface);
-}
-
-
-Variable* Scope::DeclareDynamicGlobal(Handle<String> name) {
- ASSERT(is_global_scope());
- return variables_.Declare(this,
- name,
- DYNAMIC_GLOBAL,
- true,
- Variable::NORMAL,
- kCreatedInitialized);
-}
-
-
-void Scope::RemoveUnresolved(VariableProxy* var) {
- // Most likely (always?) any variable we want to remove
- // was just added before, so we search backwards.
- for (int i = unresolved_.length(); i-- > 0;) {
- if (unresolved_[i] == var) {
- unresolved_.Remove(i);
- return;
- }
- }
-}
-
-
-Variable* Scope::NewInternal(Handle<String> name) {
- ASSERT(!already_resolved());
- Variable* var = new(zone()) Variable(this,
- name,
- INTERNAL,
- false,
- Variable::NORMAL,
- kCreatedInitialized);
- internals_.Add(var, zone());
- return var;
-}
-
-
-Variable* Scope::NewTemporary(Handle<String> name) {
- ASSERT(!already_resolved());
- Variable* var = new(zone()) Variable(this,
- name,
- TEMPORARY,
- true,
- Variable::NORMAL,
- kCreatedInitialized);
- temps_.Add(var, zone());
- return var;
-}
-
-
-void Scope::AddDeclaration(Declaration* declaration) {
- decls_.Add(declaration, zone());
-}
-
-
-void Scope::SetIllegalRedeclaration(Expression* expression) {
- // Record only the first illegal redeclaration.
- if (!HasIllegalRedeclaration()) {
- illegal_redecl_ = expression;
- }
- ASSERT(HasIllegalRedeclaration());
-}
-
-
-void Scope::VisitIllegalRedeclaration(AstVisitor* visitor) {
- ASSERT(HasIllegalRedeclaration());
- illegal_redecl_->Accept(visitor);
-}
-
-
-Declaration* Scope::CheckConflictingVarDeclarations() {
- int length = decls_.length();
- for (int i = 0; i < length; i++) {
- Declaration* decl = decls_[i];
- if (decl->mode() != VAR) continue;
- Handle<String> name = decl->proxy()->name();
-
- // Iterate through all scopes until and including the declaration scope.
- Scope* previous = NULL;
- Scope* current = decl->scope();
- do {
- // There is a conflict if there exists a non-VAR binding.
- Variable* other_var = current->variables_.Lookup(name);
- if (other_var != NULL && other_var->mode() != VAR) {
- return decl;
- }
- previous = current;
- current = current->outer_scope_;
- } while (!previous->is_declaration_scope());
- }
- return NULL;
-}
-
-
-class VarAndOrder {
- public:
- VarAndOrder(Variable* var, int order) : var_(var), order_(order) { }
- Variable* var() const { return var_; }
- int order() const { return order_; }
- static int Compare(const VarAndOrder* a, const VarAndOrder* b) {
- return a->order_ - b->order_;
- }
-
- private:
- Variable* var_;
- int order_;
-};
-
-
-void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
- ZoneList<Variable*>* context_locals) {
- ASSERT(stack_locals != NULL);
- ASSERT(context_locals != NULL);
-
- // Collect internals which are always allocated on the heap.
- for (int i = 0; i < internals_.length(); i++) {
- Variable* var = internals_[i];
- if (var->is_used()) {
- ASSERT(var->IsContextSlot());
- context_locals->Add(var, zone());
- }
- }
-
- // Collect temporaries which are always allocated on the stack.
- for (int i = 0; i < temps_.length(); i++) {
- Variable* var = temps_[i];
- if (var->is_used()) {
- ASSERT(var->IsStackLocal());
- stack_locals->Add(var, zone());
- }
- }
-
- // Collect declared local variables.
- ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
- for (VariableMap::Entry* p = variables_.Start();
- p != NULL;
- p = variables_.Next(p)) {
- Variable* var = reinterpret_cast<Variable*>(p->value);
- if (var->is_used()) {
- vars.Add(VarAndOrder(var, p->order), zone());
- }
- }
- vars.Sort(VarAndOrder::Compare);
- int var_count = vars.length();
- for (int i = 0; i < var_count; i++) {
- Variable* var = vars[i].var();
- if (var->IsStackLocal()) {
- stack_locals->Add(var, zone());
- } else if (var->IsContextSlot()) {
- context_locals->Add(var, zone());
- }
- }
-}
-
-
-bool Scope::AllocateVariables(CompilationInfo* info,
- AstNodeFactory<AstNullVisitor>* factory) {
- // 1) Propagate scope information.
- bool outer_scope_calls_non_strict_eval = false;
- if (outer_scope_ != NULL) {
- outer_scope_calls_non_strict_eval =
- outer_scope_->outer_scope_calls_non_strict_eval() |
- outer_scope_->calls_non_strict_eval();
- }
- PropagateScopeInfo(outer_scope_calls_non_strict_eval);
-
- // 2) Allocate module instances.
- if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
- ASSERT(num_modules_ == 0);
- AllocateModulesRecursively(this);
- }
-
- // 3) Resolve variables.
- if (!ResolveVariablesRecursively(info, factory)) return false;
-
- // 4) Allocate variables.
- AllocateVariablesRecursively();
-
- return true;
-}
-
-
-bool Scope::HasTrivialContext() const {
- // A function scope has a trivial context if it always is the global
- // context. We iteratively scan out the context chain to see if
- // there is anything that makes this scope non-trivial; otherwise we
- // return true.
- for (const Scope* scope = this; scope != NULL; scope = scope->outer_scope_) {
- if (scope->is_eval_scope()) return false;
- if (scope->scope_inside_with_) return false;
- if (scope->num_heap_slots_ > 0) return false;
- }
- return true;
-}
-
-
-bool Scope::HasTrivialOuterContext() const {
- Scope* outer = outer_scope_;
- if (outer == NULL) return true;
- // Note that the outer context may be trivial in general, but the current
- // scope may be inside a 'with' statement in which case the outer context
- // for this scope is not trivial.
- return !scope_inside_with_ && outer->HasTrivialContext();
-}
-
-
-bool Scope::HasLazyCompilableOuterContext() const {
- Scope* outer = outer_scope_;
- if (outer == NULL) return true;
- // We have to prevent lazy compilation if this scope is inside a with scope
- // and all declaration scopes between them have empty contexts. Such
- // declaration scopes may become invisible during scope info deserialization.
- outer = outer->DeclarationScope();
- bool found_non_trivial_declarations = false;
- for (const Scope* scope = outer; scope != NULL; scope = scope->outer_scope_) {
- if (scope->is_with_scope() && !found_non_trivial_declarations) return false;
- if (scope->is_declaration_scope() && scope->num_heap_slots() > 0) {
- found_non_trivial_declarations = true;
- }
- }
- return true;
-}
-
-
-bool Scope::AllowsLazyCompilation() const {
- return !force_eager_compilation_ && HasLazyCompilableOuterContext();
-}
-
-
-bool Scope::AllowsLazyCompilationWithoutContext() const {
- return !force_eager_compilation_ && HasTrivialOuterContext();
-}
-
-
-int Scope::ContextChainLength(Scope* scope) {
- int n = 0;
- for (Scope* s = this; s != scope; s = s->outer_scope_) {
- ASSERT(s != NULL); // scope must be in the scope chain
- if (s->num_heap_slots() > 0) n++;
- }
- return n;
-}
-
-
-Scope* Scope::GlobalScope() {
- Scope* scope = this;
- while (!scope->is_global_scope()) {
- scope = scope->outer_scope();
- }
- return scope;
-}
-
-
-Scope* Scope::DeclarationScope() {
- Scope* scope = this;
- while (!scope->is_declaration_scope()) {
- scope = scope->outer_scope();
- }
- return scope;
-}
-
-
-Handle<ScopeInfo> Scope::GetScopeInfo() {
- if (scope_info_.is_null()) {
- scope_info_ = ScopeInfo::Create(this, zone());
- }
- return scope_info_;
-}
-
-
-void Scope::GetNestedScopeChain(
- List<Handle<ScopeInfo> >* chain,
- int position) {
- if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo()));
-
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* scope = inner_scopes_[i];
- int beg_pos = scope->start_position();
- int end_pos = scope->end_position();
- ASSERT(beg_pos >= 0 && end_pos >= 0);
- if (beg_pos <= position && position < end_pos) {
- scope->GetNestedScopeChain(chain, position);
- return;
- }
- }
-}
-
-
-#ifdef DEBUG
-static const char* Header(ScopeType type) {
- switch (type) {
- case EVAL_SCOPE: return "eval";
- case FUNCTION_SCOPE: return "function";
- case MODULE_SCOPE: return "module";
- case GLOBAL_SCOPE: return "global";
- case CATCH_SCOPE: return "catch";
- case BLOCK_SCOPE: return "block";
- case WITH_SCOPE: return "with";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-static void Indent(int n, const char* str) {
- PrintF("%*s%s", n, "", str);
-}
-
-
-static void PrintName(Handle<String> name) {
- SmartArrayPointer<char> s = name->ToCString(DISALLOW_NULLS);
- PrintF("%s", *s);
-}
-
-
-static void PrintLocation(Variable* var) {
- switch (var->location()) {
- case Variable::UNALLOCATED:
- break;
- case Variable::PARAMETER:
- PrintF("parameter[%d]", var->index());
- break;
- case Variable::LOCAL:
- PrintF("local[%d]", var->index());
- break;
- case Variable::CONTEXT:
- PrintF("context[%d]", var->index());
- break;
- case Variable::LOOKUP:
- PrintF("lookup");
- break;
- }
-}
-
-
-static void PrintVar(int indent, Variable* var) {
- if (var->is_used() || !var->IsUnallocated()) {
- Indent(indent, Variable::Mode2String(var->mode()));
- PrintF(" ");
- PrintName(var->name());
- PrintF("; // ");
- PrintLocation(var);
- if (var->has_forced_context_allocation()) {
- if (!var->IsUnallocated()) PrintF(", ");
- PrintF("forced context allocation");
- }
- PrintF("\n");
- }
-}
-
-
-static void PrintMap(int indent, VariableMap* map) {
- for (VariableMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
- Variable* var = reinterpret_cast<Variable*>(p->value);
- PrintVar(indent, var);
- }
-}
-
-
-void Scope::Print(int n) {
- int n0 = (n > 0 ? n : 0);
- int n1 = n0 + 2; // indentation
-
- // Print header.
- Indent(n0, Header(type_));
- if (scope_name_->length() > 0) {
- PrintF(" ");
- PrintName(scope_name_);
- }
-
- // Print parameters, if any.
- if (is_function_scope()) {
- PrintF(" (");
- for (int i = 0; i < params_.length(); i++) {
- if (i > 0) PrintF(", ");
- PrintName(params_[i]->name());
- }
- PrintF(")");
- }
-
- PrintF(" { // (%d, %d)\n", start_position(), end_position());
-
- // Function name, if any (named function literals, only).
- if (function_ != NULL) {
- Indent(n1, "// (local) function name: ");
- PrintName(function_->proxy()->name());
- PrintF("\n");
- }
-
- // Scope info.
- if (HasTrivialOuterContext()) {
- Indent(n1, "// scope has trivial outer context\n");
- }
- switch (language_mode()) {
- case CLASSIC_MODE:
- break;
- case STRICT_MODE:
- Indent(n1, "// strict mode scope\n");
- break;
- case EXTENDED_MODE:
- Indent(n1, "// extended mode scope\n");
- break;
- }
- if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
- if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
- if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
- if (outer_scope_calls_non_strict_eval_) {
- Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
- }
- if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
- if (num_stack_slots_ > 0) { Indent(n1, "// ");
- PrintF("%d stack slots\n", num_stack_slots_); }
- if (num_heap_slots_ > 0) { Indent(n1, "// ");
- PrintF("%d heap slots\n", num_heap_slots_); }
-
- // Print locals.
- Indent(n1, "// function var\n");
- if (function_ != NULL) {
- PrintVar(n1, function_->proxy()->var());
- }
-
- Indent(n1, "// temporary vars\n");
- for (int i = 0; i < temps_.length(); i++) {
- PrintVar(n1, temps_[i]);
- }
-
- Indent(n1, "// internal vars\n");
- for (int i = 0; i < internals_.length(); i++) {
- PrintVar(n1, internals_[i]);
- }
-
- Indent(n1, "// local vars\n");
- PrintMap(n1, &variables_);
-
- Indent(n1, "// dynamic vars\n");
- if (dynamics_ != NULL) {
- PrintMap(n1, dynamics_->GetMap(DYNAMIC));
- PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
- PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
- }
-
- // Print inner scopes (disable by providing negative n).
- if (n >= 0) {
- for (int i = 0; i < inner_scopes_.length(); i++) {
- PrintF("\n");
- inner_scopes_[i]->Print(n1);
- }
- }
-
- Indent(n0, "}\n");
-}
-#endif // DEBUG
-
-
-Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) {
- if (dynamics_ == NULL) dynamics_ = new(zone()) DynamicScopePart(zone());
- VariableMap* map = dynamics_->GetMap(mode);
- Variable* var = map->Lookup(name);
- if (var == NULL) {
- // Declare a new non-local.
- InitializationFlag init_flag = (mode == VAR)
- ? kCreatedInitialized : kNeedsInitialization;
- var = map->Declare(NULL,
- name,
- mode,
- true,
- Variable::NORMAL,
- init_flag);
- // Allocate it by giving it a dynamic lookup.
- var->AllocateTo(Variable::LOOKUP, -1);
- }
- return var;
-}
-
-
-Variable* Scope::LookupRecursive(Handle<String> name,
- BindingKind* binding_kind,
- AstNodeFactory<AstNullVisitor>* factory) {
- ASSERT(binding_kind != NULL);
- // Try to find the variable in this scope.
- Variable* var = LocalLookup(name);
-
- // We found a variable and we are done. (Even if there is an 'eval' in
- // this scope which introduces the same variable again, the resulting
- // variable remains the same.)
- if (var != NULL) {
- *binding_kind = BOUND;
- return var;
- }
-
- // We did not find a variable locally. Check against the function variable,
- // if any. We can do this for all scopes, since the function variable is
- // only present - if at all - for function scopes.
- *binding_kind = UNBOUND;
- var = LookupFunctionVar(name, factory);
- if (var != NULL) {
- *binding_kind = BOUND;
- } else if (outer_scope_ != NULL) {
- var = outer_scope_->LookupRecursive(name, binding_kind, factory);
- if (*binding_kind == BOUND && (is_function_scope() || is_with_scope())) {
- var->ForceContextAllocation();
- }
- } else {
- ASSERT(is_global_scope());
- }
-
- if (is_with_scope()) {
- // The current scope is a with scope, so the variable binding can not be
- // statically resolved. However, note that it was necessary to do a lookup
- // in the outer scope anyway, because if a binding exists in an outer scope,
- // the associated variable has to be marked as potentially being accessed
- // from inside of an inner with scope (the property may not be in the 'with'
- // object).
- *binding_kind = DYNAMIC_LOOKUP;
- return NULL;
- } else if (calls_non_strict_eval()) {
- // A variable binding may have been found in an outer scope, but the current
- // scope makes a non-strict 'eval' call, so the found variable may not be
- // the correct one (the 'eval' may introduce a binding with the same name).
- // In that case, change the lookup result to reflect this situation.
- if (*binding_kind == BOUND) {
- *binding_kind = BOUND_EVAL_SHADOWED;
- } else if (*binding_kind == UNBOUND) {
- *binding_kind = UNBOUND_EVAL_SHADOWED;
- }
- }
- return var;
-}
-
-
-bool Scope::ResolveVariable(CompilationInfo* info,
- VariableProxy* proxy,
- AstNodeFactory<AstNullVisitor>* factory) {
- ASSERT(info->global_scope()->is_global_scope());
-
- // If the proxy is already resolved there's nothing to do
- // (functions and consts may be resolved by the parser).
- if (proxy->var() != NULL) return true;
-
- // Otherwise, try to resolve the variable.
- BindingKind binding_kind;
- Variable* var = LookupRecursive(proxy->name(), &binding_kind, factory);
- switch (binding_kind) {
- case BOUND:
- // We found a variable binding.
- if (is_qml_mode()) {
- Handle<GlobalObject> global = isolate_->global_object();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
- // Get the context before the debugger was entered.
- SaveContext *save = isolate_->save_context();
- while (save != NULL &&
- *save->context() == *isolate_->debug()->debug_context()) {
- save = save->prev();
- }
-
- global = Handle<GlobalObject>(save->context()->global_object());
- }
-#endif
-
- if (!global->HasProperty(*(proxy->name()))) {
- var->set_is_qml_global(true);
- }
- }
- break;
-
- case BOUND_EVAL_SHADOWED:
- // We either found a variable binding that might be shadowed by eval or
- // gave up on it (e.g. by encountering a local with the same in the outer
- // scope which was not promoted to a context, this can happen if we use
- // debugger to evaluate arbitrary expressions at a break point).
- if (var->IsGlobalObjectProperty()) {
- var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
-
- if (is_qml_mode()) {
- Handle<GlobalObject> global = isolate_->global_object();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate_->debug()->IsLoaded() &&
- isolate_->debug()->InDebugger()) {
- // Get the context before the debugger was entered.
- SaveContext *save = isolate_->save_context();
- while (save != NULL &&
- *save->context() == *isolate_->debug()->debug_context()) {
- save = save->prev();
- }
-
- global = Handle<GlobalObject>(save->context()->global_object());
- }
-#endif
-
- if (!global->HasProperty(*(proxy->name()))) {
- var->set_is_qml_global(true);
- }
- }
- } else if (var->is_dynamic()) {
- var = NonLocal(proxy->name(), DYNAMIC);
- } else {
- Variable* invalidated = var;
- var = NonLocal(proxy->name(), DYNAMIC_LOCAL);
- var->set_local_if_not_shadowed(invalidated);
- }
- break;
-
- case UNBOUND:
- // No binding has been found. Declare a variable on the global object.
- var = info->global_scope()->DeclareDynamicGlobal(proxy->name());
-
- if (is_qml_mode()) {
- Handle<GlobalObject> global = isolate_->global_object();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
- // Get the context before the debugger was entered.
- SaveContext *save = isolate_->save_context();
- while (save != NULL &&
- *save->context() == *isolate_->debug()->debug_context()) {
- save = save->prev();
- }
-
- global = Handle<GlobalObject>(save->context()->global_object());
- }
-#endif
-
- if (!global->HasProperty(*(proxy->name()))) {
- var->set_is_qml_global(true);
- }
- }
-
- break;
-
- case UNBOUND_EVAL_SHADOWED:
- // No binding has been found. But some scope makes a
- // non-strict 'eval' call.
- var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
-
- if (is_qml_mode()) {
- Handle<GlobalObject> global = isolate_->global_object();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
- // Get the context before the debugger was entered.
- SaveContext *save = isolate_->save_context();
- while (save != NULL &&
- *save->context() == *isolate_->debug()->debug_context()) {
- save = save->prev();
- }
-
- global = Handle<GlobalObject>(save->context()->global_object());
- }
-#endif
-
- if (!global->HasProperty(*(proxy->name()))) {
- var->set_is_qml_global(true);
- }
- }
-
- break;
-
- case DYNAMIC_LOOKUP:
- // The variable could not be resolved statically.
- var = NonLocal(proxy->name(), DYNAMIC);
- break;
- }
-
- ASSERT(var != NULL);
-
- if (FLAG_harmony_scoping && is_extended_mode() &&
- var->is_const_mode() && proxy->IsLValue()) {
- // Assignment to const. Throw a syntax error.
- MessageLocation location(
- info->script(), proxy->position(), proxy->position());
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Handle<JSArray> array = factory->NewJSArray(0);
- Handle<Object> result =
- factory->NewSyntaxError("harmony_const_assign", array);
- isolate->Throw(*result, &location);
- return false;
- }
-
- if (FLAG_harmony_modules) {
- bool ok;
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Resolve %s:\n", var->name()->ToAsciiArray());
-#endif
- proxy->interface()->Unify(var->interface(), zone(), &ok);
- if (!ok) {
-#ifdef DEBUG
- if (FLAG_print_interfaces) {
- PrintF("SCOPES TYPE ERROR\n");
- PrintF("proxy: ");
- proxy->interface()->Print();
- PrintF("var: ");
- var->interface()->Print();
- }
-#endif
-
- // Inconsistent use of module. Throw a syntax error.
- // TODO(rossberg): generate more helpful error message.
- MessageLocation location(
- info->script(), proxy->position(), proxy->position());
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Handle<JSArray> array = factory->NewJSArray(1);
- USE(JSObject::SetElement(array, 0, var->name(), NONE, kStrictMode));
- Handle<Object> result =
- factory->NewSyntaxError("module_type_error", array);
- isolate->Throw(*result, &location);
- return false;
- }
- }
-
- proxy->BindTo(var);
-
- return true;
-}
-
-
-bool Scope::ResolveVariablesRecursively(
- CompilationInfo* info,
- AstNodeFactory<AstNullVisitor>* factory) {
- ASSERT(info->global_scope()->is_global_scope());
-
- // Resolve unresolved variables for this scope.
- for (int i = 0; i < unresolved_.length(); i++) {
- if (!ResolveVariable(info, unresolved_[i], factory)) return false;
- }
-
- // Resolve unresolved variables for inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- if (!inner_scopes_[i]->ResolveVariablesRecursively(info, factory))
- return false;
- }
-
- return true;
-}
-
-
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_non_strict_eval ) {
- if (outer_scope_calls_non_strict_eval) {
- outer_scope_calls_non_strict_eval_ = true;
- }
-
- bool calls_non_strict_eval =
- this->calls_non_strict_eval() || outer_scope_calls_non_strict_eval_;
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* inner_scope = inner_scopes_[i];
- if (inner_scope->PropagateScopeInfo(calls_non_strict_eval)) {
- inner_scope_calls_eval_ = true;
- }
- if (inner_scope->force_eager_compilation_) {
- force_eager_compilation_ = true;
- }
- }
-
- return scope_calls_eval_ || inner_scope_calls_eval_;
-}
-
-
-bool Scope::MustAllocate(Variable* var) {
- // Give var a read/write use if there is a chance it might be accessed
- // via an eval() call. This is only possible if the variable has a
- // visible name.
- if ((var->is_this() || var->name()->length() > 0) &&
- (var->has_forced_context_allocation() ||
- scope_calls_eval_ ||
- inner_scope_calls_eval_ ||
- scope_contains_with_ ||
- is_catch_scope() ||
- is_block_scope() ||
- is_module_scope() ||
- is_global_scope())) {
- var->set_is_used(true);
- }
- // Global variables do not need to be allocated.
- return !var->IsGlobalObjectProperty() && var->is_used();
-}
-
-
-bool Scope::MustAllocateInContext(Variable* var) {
- // If var is accessed from an inner scope, or if there is a possibility
- // that it might be accessed from the current or an inner scope (through
- // an eval() call or a runtime with lookup), it must be allocated in the
- // context.
- //
- // Exceptions: temporary variables are never allocated in a context;
- // catch-bound variables are always allocated in a context.
- if (var->mode() == TEMPORARY) return false;
- if (var->mode() == INTERNAL) return true;
- if (is_catch_scope() || is_block_scope() || is_module_scope()) return true;
- if (is_global_scope() && IsLexicalVariableMode(var->mode())) return true;
- return var->has_forced_context_allocation() ||
- scope_calls_eval_ ||
- inner_scope_calls_eval_ ||
- scope_contains_with_;
-}
-
-
-bool Scope::HasArgumentsParameter() {
- for (int i = 0; i < params_.length(); i++) {
- if (params_[i]->name().is_identical_to(
- isolate_->factory()->arguments_string())) {
- return true;
- }
- }
- return false;
-}
-
-
-void Scope::AllocateStackSlot(Variable* var) {
- var->AllocateTo(Variable::LOCAL, num_stack_slots_++);
-}
-
-
-void Scope::AllocateHeapSlot(Variable* var) {
- var->AllocateTo(Variable::CONTEXT, num_heap_slots_++);
-}
-
-
-void Scope::AllocateParameterLocals() {
- ASSERT(is_function_scope());
- Variable* arguments = LocalLookup(isolate_->factory()->arguments_string());
- ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
-
- bool uses_nonstrict_arguments = false;
-
- if (MustAllocate(arguments) && !HasArgumentsParameter()) {
- // 'arguments' is used. Unless there is also a parameter called
- // 'arguments', we must be conservative and allocate all parameters to
- // the context assuming they will be captured by the arguments object.
- // If we have a parameter named 'arguments', a (new) value is always
- // assigned to it via the function invocation. Then 'arguments' denotes
- // that specific parameter value and cannot be used to access the
- // parameters, which is why we don't need to allocate an arguments
- // object in that case.
-
- // We are using 'arguments'. Tell the code generator that is needs to
- // allocate the arguments object by setting 'arguments_'.
- arguments_ = arguments;
-
- // In strict mode 'arguments' does not alias formal parameters.
- // Therefore in strict mode we allocate parameters as if 'arguments'
- // were not used.
- uses_nonstrict_arguments = is_classic_mode();
- }
-
- // The same parameter may occur multiple times in the parameters_ list.
- // If it does, and if it is not copied into the context object, it must
- // receive the highest parameter index for that parameter; thus iteration
- // order is relevant!
- for (int i = params_.length() - 1; i >= 0; --i) {
- Variable* var = params_[i];
- ASSERT(var->scope() == this);
- if (uses_nonstrict_arguments) {
- // Force context allocation of the parameter.
- var->ForceContextAllocation();
- }
-
- if (MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- ASSERT(var->IsUnallocated() || var->IsContextSlot());
- if (var->IsUnallocated()) {
- AllocateHeapSlot(var);
- }
- } else {
- ASSERT(var->IsUnallocated() || var->IsParameter());
- if (var->IsUnallocated()) {
- var->AllocateTo(Variable::PARAMETER, i);
- }
- }
- }
- }
-}
-
-
-void Scope::AllocateNonParameterLocal(Variable* var) {
- ASSERT(var->scope() == this);
- ASSERT(!var->IsVariable(isolate_->factory()->result_string()) ||
- !var->IsStackLocal());
- if (var->IsUnallocated() && MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- AllocateHeapSlot(var);
- } else {
- AllocateStackSlot(var);
- }
- }
-}
-
-
-void Scope::AllocateNonParameterLocals() {
- // All variables that have no rewrite yet are non-parameter locals.
- for (int i = 0; i < temps_.length(); i++) {
- AllocateNonParameterLocal(temps_[i]);
- }
-
- for (int i = 0; i < internals_.length(); i++) {
- AllocateNonParameterLocal(internals_[i]);
- }
-
- ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
- for (VariableMap::Entry* p = variables_.Start();
- p != NULL;
- p = variables_.Next(p)) {
- Variable* var = reinterpret_cast<Variable*>(p->value);
- vars.Add(VarAndOrder(var, p->order), zone());
- }
- vars.Sort(VarAndOrder::Compare);
- int var_count = vars.length();
- for (int i = 0; i < var_count; i++) {
- AllocateNonParameterLocal(vars[i].var());
- }
-
- // For now, function_ must be allocated at the very end. If it gets
- // allocated in the context, it must be the last slot in the context,
- // because of the current ScopeInfo implementation (see
- // ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
- if (function_ != NULL) {
- AllocateNonParameterLocal(function_->proxy()->var());
- }
-}
-
-
-void Scope::AllocateVariablesRecursively() {
- // Allocate variables for inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- inner_scopes_[i]->AllocateVariablesRecursively();
- }
-
- // If scope is already resolved, we still need to allocate
- // variables in inner scopes which might not had been resolved yet.
- if (already_resolved()) return;
- // The number of slots required for variables.
- num_stack_slots_ = 0;
- num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
-
- // Allocate variables for this scope.
- // Parameters must be allocated first, if any.
- if (is_function_scope()) AllocateParameterLocals();
- AllocateNonParameterLocals();
-
- // Force allocation of a context for this scope if necessary. For a 'with'
- // scope and for a function scope that makes an 'eval' call we need a context,
- // even if no local variables were statically allocated in the scope.
- // Likewise for modules.
- bool must_have_context = is_with_scope() || is_module_scope() ||
- (is_function_scope() && calls_eval());
-
- // If we didn't allocate any locals in the local context, then we only
- // need the minimal number of slots if we must have a context.
- if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS && !must_have_context) {
- num_heap_slots_ = 0;
- }
-
- // Allocation done.
- ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
-}
-
-
-void Scope::AllocateModulesRecursively(Scope* host_scope) {
- if (already_resolved()) return;
- if (is_module_scope()) {
- ASSERT(interface_->IsFrozen());
- Handle<String> name = isolate_->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR(".module"));
- ASSERT(module_var_ == NULL);
- module_var_ = host_scope->NewInternal(name);
- ++host_scope->num_modules_;
- }
-
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* inner_scope = inner_scopes_.at(i);
- inner_scope->AllocateModulesRecursively(host_scope);
- }
-}
-
-
-int Scope::StackLocalCount() const {
- return num_stack_slots() -
- (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
-}
-
-
-int Scope::ContextLocalCount() const {
- if (num_heap_slots() == 0) return 0;
- return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
- (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scopes.h b/src/3rdparty/v8/src/scopes.h
deleted file mode 100644
index e2e7cd1..0000000
--- a/src/3rdparty/v8/src/scopes.h
+++ /dev/null
@@ -1,648 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SCOPES_H_
-#define V8_SCOPES_H_
-
-#include "ast.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-class CompilationInfo;
-
-
-// A hash map to support fast variable declaration and lookup.
-class VariableMap: public ZoneHashMap {
- public:
- explicit VariableMap(Zone* zone);
-
- virtual ~VariableMap();
-
- Variable* Declare(Scope* scope,
- Handle<String> name,
- VariableMode mode,
- bool is_valid_lhs,
- Variable::Kind kind,
- InitializationFlag initialization_flag,
- Interface* interface = Interface::NewValue());
-
- Variable* Lookup(Handle<String> name);
-
- Zone* zone() const { return zone_; }
-
- private:
- Zone* zone_;
-};
-
-
-// The dynamic scope part holds hash maps for the variables that will
-// be looked up dynamically from within eval and with scopes. The objects
-// are allocated on-demand from Scope::NonLocal to avoid wasting memory
-// and setup time for scopes that don't need them.
-class DynamicScopePart : public ZoneObject {
- public:
- explicit DynamicScopePart(Zone* zone) {
- for (int i = 0; i < 3; i++)
- maps_[i] = new(zone->New(sizeof(VariableMap))) VariableMap(zone);
- }
-
- VariableMap* GetMap(VariableMode mode) {
- int index = mode - DYNAMIC;
- ASSERT(index >= 0 && index < 3);
- return maps_[index];
- }
-
- private:
- VariableMap *maps_[3];
-};
-
-
-// Global invariants after AST construction: Each reference (i.e. identifier)
-// to a JavaScript variable (including global properties) is represented by a
-// VariableProxy node. Immediately after AST construction and before variable
-// allocation, most VariableProxy nodes are "unresolved", i.e. not bound to a
-// corresponding variable (though some are bound during parse time). Variable
-// allocation binds each unresolved VariableProxy to one Variable and assigns
-// a location. Note that many VariableProxy nodes may refer to the same Java-
-// Script variable.
-
-class Scope: public ZoneObject {
- public:
- // ---------------------------------------------------------------------------
- // Construction
-
- Scope(Scope* outer_scope, ScopeType type, Zone* zone);
-
- // Compute top scope and allocate variables. For lazy compilation the top
- // scope only contains the single lazily compiled function, so this
- // doesn't re-allocate variables repeatedly.
- static bool Analyze(CompilationInfo* info);
-
- static Scope* DeserializeScopeChain(Context* context, Scope* global_scope,
- Zone* zone);
-
- // The scope name is only used for printing/debugging.
- void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
-
- void Initialize();
-
- // Checks if the block scope is redundant, i.e. it does not contain any
- // block scoped declarations. In that case it is removed from the scope
- // tree and its children are reparented.
- Scope* FinalizeBlockScope();
-
- Zone* zone() const { return zone_; }
-
- // ---------------------------------------------------------------------------
- // Declarations
-
- // Lookup a variable in this scope. Returns the variable or NULL if not found.
- Variable* LocalLookup(Handle<String> name);
-
- // This lookup corresponds to a lookup in the "intermediate" scope sitting
- // between this scope and the outer scope. (ECMA-262, 3rd., requires that
- // the name of named function literal is kept in an intermediate scope
- // in between this scope and the next outer scope.)
- Variable* LookupFunctionVar(Handle<String> name,
- AstNodeFactory<AstNullVisitor>* factory);
-
- // Lookup a variable in this scope or outer scopes.
- // Returns the variable or NULL if not found.
- Variable* Lookup(Handle<String> name);
-
- // Declare the function variable for a function literal. This variable
- // is in an intermediate scope between this function scope and the the
- // outer scope. Only possible for function scopes; at most one variable.
- void DeclareFunctionVar(VariableDeclaration* declaration) {
- ASSERT(is_function_scope());
- function_ = declaration;
- }
-
- // Declare a parameter in this scope. When there are duplicated
- // parameters the rightmost one 'wins'. However, the implementation
- // expects all parameters to be declared and from left to right.
- void DeclareParameter(Handle<String> name, VariableMode mode);
-
- // Declare a local variable in this scope. If the variable has been
- // declared before, the previously declared variable is returned.
- Variable* DeclareLocal(Handle<String> name,
- VariableMode mode,
- InitializationFlag init_flag,
- Interface* interface = Interface::NewValue());
-
- // Declare an implicit global variable in this scope which must be a
- // global scope. The variable was introduced (possibly from an inner
- // scope) by a reference to an unresolved variable with no intervening
- // with statements or eval calls.
- Variable* DeclareDynamicGlobal(Handle<String> name);
-
- // Create a new unresolved variable.
- template<class Visitor>
- VariableProxy* NewUnresolved(AstNodeFactory<Visitor>* factory,
- Handle<String> name,
- Interface* interface = Interface::NewValue(),
- int position = RelocInfo::kNoPosition) {
- // Note that we must not share the unresolved variables with
- // the same name because they may be removed selectively via
- // RemoveUnresolved().
- ASSERT(!already_resolved());
- VariableProxy* proxy =
- factory->NewVariableProxy(name, false, interface, position);
- unresolved_.Add(proxy, zone_);
- return proxy;
- }
-
- // Remove a unresolved variable. During parsing, an unresolved variable
- // may have been added optimistically, but then only the variable name
- // was used (typically for labels). If the variable was not declared, the
- // addition introduced a new unresolved variable which may end up being
- // allocated globally as a "ghost" variable. RemoveUnresolved removes
- // such a variable again if it was added; otherwise this is a no-op.
- void RemoveUnresolved(VariableProxy* var);
-
- // Creates a new internal variable in this scope. The name is only used
- // for printing and cannot be used to find the variable. In particular,
- // the only way to get hold of the temporary is by keeping the Variable*
- // around.
- Variable* NewInternal(Handle<String> name);
-
- // Creates a new temporary variable in this scope. The name is only used
- // for printing and cannot be used to find the variable. In particular,
- // the only way to get hold of the temporary is by keeping the Variable*
- // around. The name should not clash with a legitimate variable names.
- Variable* NewTemporary(Handle<String> name);
-
- // Adds the specific declaration node to the list of declarations in
- // this scope. The declarations are processed as part of entering
- // the scope; see codegen.cc:ProcessDeclarations.
- void AddDeclaration(Declaration* declaration);
-
- // ---------------------------------------------------------------------------
- // Illegal redeclaration support.
-
- // Set an expression node that will be executed when the scope is
- // entered. We only keep track of one illegal redeclaration node per
- // scope - the first one - so if you try to set it multiple times
- // the additional requests will be silently ignored.
- void SetIllegalRedeclaration(Expression* expression);
-
- // Visit the illegal redeclaration expression. Do not call if the
- // scope doesn't have an illegal redeclaration node.
- void VisitIllegalRedeclaration(AstVisitor* visitor);
-
- // Check if the scope has (at least) one illegal redeclaration.
- bool HasIllegalRedeclaration() const { return illegal_redecl_ != NULL; }
-
- // For harmony block scoping mode: Check if the scope has conflicting var
- // declarations, i.e. a var declaration that has been hoisted from a nested
- // scope over a let binding of the same name.
- Declaration* CheckConflictingVarDeclarations();
-
- // ---------------------------------------------------------------------------
- // Scope-specific info.
-
- // Inform the scope that the corresponding code contains a with statement.
- void RecordWithStatement() { scope_contains_with_ = true; }
-
- // Inform the scope that the corresponding code contains an eval call.
- void RecordEvalCall() { if (!is_global_scope()) scope_calls_eval_ = true; }
-
- // Set the strict mode flag (unless disabled by a global flag).
- void SetLanguageMode(LanguageMode language_mode) {
- language_mode_ = language_mode;
- }
-
- // Enable qml mode for this scope
- void EnableQmlModeFlag() {
- qml_mode_flag_ = kQmlMode;
- }
-
- // Position in the source where this scope begins and ends.
- //
- // * For the scope of a with statement
- // with (obj) stmt
- // start position: start position of first token of 'stmt'
- // end position: end position of last token of 'stmt'
- // * For the scope of a block
- // { stmts }
- // start position: start position of '{'
- // end position: end position of '}'
- // * For the scope of a function literal or decalaration
- // function fun(a,b) { stmts }
- // start position: start position of '('
- // end position: end position of '}'
- // * For the scope of a catch block
- // try { stms } catch(e) { stmts }
- // start position: start position of '('
- // end position: end position of ')'
- // * For the scope of a for-statement
- // for (let x ...) stmt
- // start position: start position of '('
- // end position: end position of last token of 'stmt'
- int start_position() const { return start_position_; }
- void set_start_position(int statement_pos) {
- start_position_ = statement_pos;
- }
- int end_position() const { return end_position_; }
- void set_end_position(int statement_pos) {
- end_position_ = statement_pos;
- }
-
- // ---------------------------------------------------------------------------
- // Predicates.
-
- // Specific scope types.
- bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
- bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
- bool is_module_scope() const { return type_ == MODULE_SCOPE; }
- bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
- bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
- bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
- bool is_with_scope() const { return type_ == WITH_SCOPE; }
- bool is_declaration_scope() const {
- return is_eval_scope() || is_function_scope() ||
- is_module_scope() || is_global_scope();
- }
- bool is_classic_mode() const {
- return language_mode() == CLASSIC_MODE;
- }
- bool is_extended_mode() const {
- return language_mode() == EXTENDED_MODE;
- }
- bool is_strict_or_extended_eval_scope() const {
- return is_eval_scope() && !is_classic_mode();
- }
- bool is_qml_mode() const { return qml_mode_flag() == kQmlMode; }
-
- // Information about which scopes calls eval.
- bool calls_eval() const { return scope_calls_eval_; }
- bool calls_non_strict_eval() {
- return scope_calls_eval_ && is_classic_mode();
- }
- bool outer_scope_calls_non_strict_eval() const {
- return outer_scope_calls_non_strict_eval_;
- }
-
- // Is this scope inside a with statement.
- bool inside_with() const { return scope_inside_with_; }
- // Does this scope contain a with statement.
- bool contains_with() const { return scope_contains_with_; }
-
- // ---------------------------------------------------------------------------
- // Accessors.
-
- // The type of this scope.
- ScopeType type() const { return type_; }
-
- // The language mode of this scope.
- LanguageMode language_mode() const { return language_mode_; }
-
- // The strict mode of this scope.
- QmlModeFlag qml_mode_flag() const { return qml_mode_flag_; }
-
- // The variable corresponding the 'this' value.
- Variable* receiver() { return receiver_; }
-
- // The variable holding the function literal for named function
- // literals, or NULL. Only valid for function scopes.
- VariableDeclaration* function() const {
- ASSERT(is_function_scope());
- return function_;
- }
-
- // Parameters. The left-most parameter has index 0.
- // Only valid for function scopes.
- Variable* parameter(int index) const {
- ASSERT(is_function_scope());
- return params_[index];
- }
-
- int num_parameters() const { return params_.length(); }
-
- // The local variable 'arguments' if we need to allocate it; NULL otherwise.
- Variable* arguments() const { return arguments_; }
-
- // Declarations list.
- ZoneList<Declaration*>* declarations() { return &decls_; }
-
- // Inner scope list.
- ZoneList<Scope*>* inner_scopes() { return &inner_scopes_; }
-
- // The scope immediately surrounding this scope, or NULL.
- Scope* outer_scope() const { return outer_scope_; }
-
- // The interface as inferred so far; only for module scopes.
- Interface* interface() const { return interface_; }
-
- // ---------------------------------------------------------------------------
- // Variable allocation.
-
- // Collect stack and context allocated local variables in this scope. Note
- // that the function variable - if present - is not collected and should be
- // handled separately.
- void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
- ZoneList<Variable*>* context_locals);
-
- // Current number of var or const locals.
- int num_var_or_const() { return num_var_or_const_; }
-
- // Result of variable allocation.
- int num_stack_slots() const { return num_stack_slots_; }
- int num_heap_slots() const { return num_heap_slots_; }
-
- int StackLocalCount() const;
- int ContextLocalCount() const;
-
- // For global scopes, the number of module literals (including nested ones).
- int num_modules() const { return num_modules_; }
-
- // For module scopes, the host scope's internal variable binding this module.
- Variable* module_var() const { return module_var_; }
-
- // Make sure this scope and all outer scopes are eagerly compiled.
- void ForceEagerCompilation() { force_eager_compilation_ = true; }
-
- // Determine if we can use lazy compilation for this scope.
- bool AllowsLazyCompilation() const;
-
- // Determine if we can use lazy compilation for this scope without a context.
- bool AllowsLazyCompilationWithoutContext() const;
-
- // True if the outer context of this scope is always the native context.
- bool HasTrivialOuterContext() const;
-
- // True if the outer context allows lazy compilation of this scope.
- bool HasLazyCompilableOuterContext() const;
-
- // The number of contexts between this and scope; zero if this == scope.
- int ContextChainLength(Scope* scope);
-
- // Find the innermost global scope.
- Scope* GlobalScope();
-
- // Find the first function, global, or eval scope. This is the scope
- // where var declarations will be hoisted to in the implementation.
- Scope* DeclarationScope();
-
- Handle<ScopeInfo> GetScopeInfo();
-
- // Get the chain of nested scopes within this scope for the source statement
- // position. The scopes will be added to the list from the outermost scope to
- // the innermost scope. Only nested block, catch or with scopes are tracked
- // and will be returned, but no inner function scopes.
- void GetNestedScopeChain(List<Handle<ScopeInfo> >* chain,
- int statement_position);
-
- // ---------------------------------------------------------------------------
- // Strict mode support.
- bool IsDeclared(Handle<String> name) {
- // During formal parameter list parsing the scope only contains
- // two variables inserted at initialization: "this" and "arguments".
- // "this" is an invalid parameter name and "arguments" is invalid parameter
- // name in strict mode. Therefore looking up with the map which includes
- // "this" and "arguments" in addition to all formal parameters is safe.
- return variables_.Lookup(name) != NULL;
- }
-
- // ---------------------------------------------------------------------------
- // Debugging.
-
-#ifdef DEBUG
- void Print(int n = 0); // n = indentation; n < 0 => don't print recursively
-#endif
-
- // ---------------------------------------------------------------------------
- // Implementation.
- protected:
- friend class ParserFactory;
-
- Isolate* const isolate_;
-
- // Scope tree.
- Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
- ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
-
- // The scope type.
- ScopeType type_;
-
- // Debugging support.
- Handle<String> scope_name_;
-
- // The variables declared in this scope:
- //
- // All user-declared variables (incl. parameters). For global scopes
- // variables may be implicitly 'declared' by being used (possibly in
- // an inner scope) with no intervening with statements or eval calls.
- VariableMap variables_;
- // Compiler-allocated (user-invisible) internals.
- ZoneList<Variable*> internals_;
- // Compiler-allocated (user-invisible) temporaries.
- ZoneList<Variable*> temps_;
- // Parameter list in source order.
- ZoneList<Variable*> params_;
- // Variables that must be looked up dynamically.
- DynamicScopePart* dynamics_;
- // Unresolved variables referred to from this scope.
- ZoneList<VariableProxy*> unresolved_;
- // Declarations.
- ZoneList<Declaration*> decls_;
- // Convenience variable.
- Variable* receiver_;
- // Function variable, if any; function scopes only.
- VariableDeclaration* function_;
- // Convenience variable; function scopes only.
- Variable* arguments_;
- // Interface; module scopes only.
- Interface* interface_;
-
- // Illegal redeclaration.
- Expression* illegal_redecl_;
-
- // Scope-specific information computed during parsing.
- //
- // This scope is inside a 'with' of some outer scope.
- bool scope_inside_with_;
- // This scope contains a 'with' statement.
- bool scope_contains_with_;
- // This scope or a nested catch scope or with scope contain an 'eval' call. At
- // the 'eval' call site this scope is the declaration scope.
- bool scope_calls_eval_;
- // The language mode of this scope.
- LanguageMode language_mode_;
- // This scope is a qml mode scope.
- QmlModeFlag qml_mode_flag_;
- // Source positions.
- int start_position_;
- int end_position_;
-
- // Computed via PropagateScopeInfo.
- bool outer_scope_calls_non_strict_eval_;
- bool inner_scope_calls_eval_;
- bool force_eager_compilation_;
-
- // True if it doesn't need scope resolution (e.g., if the scope was
- // constructed based on a serialized scope info or a catch context).
- bool already_resolved_;
-
- // Computed as variables are declared.
- int num_var_or_const_;
-
- // Computed via AllocateVariables; function, block and catch scopes only.
- int num_stack_slots_;
- int num_heap_slots_;
-
- // The number of modules (including nested ones).
- int num_modules_;
-
- // For module scopes, the host scope's internal variable binding this module.
- Variable* module_var_;
-
- // Serialized scope info support.
- Handle<ScopeInfo> scope_info_;
- bool already_resolved() { return already_resolved_; }
-
- // Create a non-local variable with a given name.
- // These variables are looked up dynamically at runtime.
- Variable* NonLocal(Handle<String> name, VariableMode mode);
-
- // Variable resolution.
- // Possible results of a recursive variable lookup telling if and how a
- // variable is bound. These are returned in the output parameter *binding_kind
- // of the LookupRecursive function.
- enum BindingKind {
- // The variable reference could be statically resolved to a variable binding
- // which is returned. There is no 'with' statement between the reference and
- // the binding and no scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a non-strict 'eval' call.
- BOUND,
-
- // The variable reference could be statically resolved to a variable binding
- // which is returned. There is no 'with' statement between the reference and
- // the binding, but some scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a non-strict 'eval' call, that might
- // possibly introduce variable bindings shadowing the found one. Thus the
- // found variable binding is just a guess.
- BOUND_EVAL_SHADOWED,
-
- // The variable reference could not be statically resolved to any binding
- // and thus should be considered referencing a global variable. NULL is
- // returned. The variable reference is not inside any 'with' statement and
- // no scope between the reference scope (inclusive) and global scope
- // (exclusive) makes a non-strict 'eval' call.
- UNBOUND,
-
- // The variable reference could not be statically resolved to any binding
- // NULL is returned. The variable reference is not inside any 'with'
- // statement, but some scope between the reference scope (inclusive) and
- // global scope (exclusive) makes a non-strict 'eval' call, that might
- // possibly introduce a variable binding. Thus the reference should be
- // considered referencing a global variable unless it is shadowed by an
- // 'eval' introduced binding.
- UNBOUND_EVAL_SHADOWED,
-
- // The variable could not be statically resolved and needs to be looked up
- // dynamically. NULL is returned. There are two possible reasons:
- // * A 'with' statement has been encountered and there is no variable
- // binding for the name between the variable reference and the 'with'.
- // The variable potentially references a property of the 'with' object.
- // * The code is being executed as part of a call to 'eval' and the calling
- // context chain contains either a variable binding for the name or it
- // contains a 'with' context.
- DYNAMIC_LOOKUP
- };
-
- // Lookup a variable reference given by name recursively starting with this
- // scope. If the code is executed because of a call to 'eval', the context
- // parameter should be set to the calling context of 'eval'.
- Variable* LookupRecursive(Handle<String> name,
- BindingKind* binding_kind,
- AstNodeFactory<AstNullVisitor>* factory);
- MUST_USE_RESULT
- bool ResolveVariable(CompilationInfo* info,
- VariableProxy* proxy,
- AstNodeFactory<AstNullVisitor>* factory);
- MUST_USE_RESULT
- bool ResolveVariablesRecursively(CompilationInfo* info,
- AstNodeFactory<AstNullVisitor>* factory);
-
- // Scope analysis.
- bool PropagateScopeInfo(bool outer_scope_calls_non_strict_eval);
- bool HasTrivialContext() const;
-
- // Predicates.
- bool MustAllocate(Variable* var);
- bool MustAllocateInContext(Variable* var);
- bool HasArgumentsParameter();
-
- // Variable allocation.
- void AllocateStackSlot(Variable* var);
- void AllocateHeapSlot(Variable* var);
- void AllocateParameterLocals();
- void AllocateNonParameterLocal(Variable* var);
- void AllocateNonParameterLocals();
- void AllocateVariablesRecursively();
- void AllocateModulesRecursively(Scope* host_scope);
-
- // Resolve and fill in the allocation information for all variables
- // in this scopes. Must be called *after* all scopes have been
- // processed (parsed) to ensure that unresolved variables can be
- // resolved properly.
- //
- // In the case of code compiled and run using 'eval', the context
- // parameter is the context in which eval was called. In all other
- // cases the context parameter is an empty handle.
- MUST_USE_RESULT
- bool AllocateVariables(CompilationInfo* info,
- AstNodeFactory<AstNullVisitor>* factory);
-
- private:
- // Construct a scope based on the scope info.
- Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
- Zone* zone);
-
- // Construct a catch scope with a binding for the name.
- Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone);
-
- void AddInnerScope(Scope* inner_scope) {
- if (inner_scope != NULL) {
- inner_scopes_.Add(inner_scope, zone_);
- inner_scope->outer_scope_ = this;
- }
- }
-
- void SetDefaults(ScopeType type,
- Scope* outer_scope,
- Handle<ScopeInfo> scope_info);
-
- Zone* zone_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SCOPES_H_
diff --git a/src/3rdparty/v8/src/serialize.cc b/src/3rdparty/v8/src/serialize.cc
deleted file mode 100644
index e587dfa..0000000
--- a/src/3rdparty/v8/src/serialize.cc
+++ /dev/null
@@ -1,1661 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "bootstrapper.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "ic-inl.h"
-#include "natives.h"
-#include "platform.h"
-#include "runtime.h"
-#include "serialize.h"
-#include "snapshot.h"
-#include "stub-cache.h"
-#include "v8threads.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Coding of external references.
-
-// The encoding of an external reference. The type is in the high word.
-// The id is in the low word.
-static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
- return static_cast<uint32_t>(type) << 16 | id;
-}
-
-
-static int* GetInternalPointer(StatsCounter* counter) {
- // All counters refer to dummy_counter, if deserializing happens without
- // setting up counters.
- static int dummy_counter = 0;
- return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
-}
-
-
-ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
- ExternalReferenceTable* external_reference_table =
- isolate->external_reference_table();
- if (external_reference_table == NULL) {
- external_reference_table = new ExternalReferenceTable(isolate);
- isolate->set_external_reference_table(external_reference_table);
- }
- return external_reference_table;
-}
-
-
-void ExternalReferenceTable::AddFromId(TypeCode type,
- uint16_t id,
- const char* name,
- Isolate* isolate) {
- Address address;
- switch (type) {
- case C_BUILTIN: {
- ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate);
- address = ref.address();
- break;
- }
- case BUILTIN: {
- ExternalReference ref(static_cast<Builtins::Name>(id), isolate);
- address = ref.address();
- break;
- }
- case RUNTIME_FUNCTION: {
- ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate);
- address = ref.address();
- break;
- }
- case IC_UTILITY: {
- ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)),
- isolate);
- address = ref.address();
- break;
- }
- default:
- UNREACHABLE();
- return;
- }
- Add(address, type, id, name);
-}
-
-
-void ExternalReferenceTable::Add(Address address,
- TypeCode type,
- uint16_t id,
- const char* name) {
- ASSERT_NE(NULL, address);
- ExternalReferenceEntry entry;
- entry.address = address;
- entry.code = EncodeExternal(type, id);
- entry.name = name;
- ASSERT_NE(0, entry.code);
- refs_.Add(entry);
- if (id > max_id_[type]) max_id_[type] = id;
-}
-
-
-void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
- for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
- max_id_[type_code] = 0;
- }
-
- // The following populates all of the different type of external references
- // into the ExternalReferenceTable.
- //
- // NOTE: This function was originally 100k of code. It has since been
- // rewritten to be mostly table driven, as the callback macro style tends to
- // very easily cause code bloat. Please be careful in the future when adding
- // new references.
-
- struct RefTableEntry {
- TypeCode type;
- uint16_t id;
- const char* name;
- };
-
- static const RefTableEntry ref_table[] = {
- // Builtins
-#define DEF_ENTRY_C(name, ignored) \
- { C_BUILTIN, \
- Builtins::c_##name, \
- "Builtins::" #name },
-
- BUILTIN_LIST_C(DEF_ENTRY_C)
-#undef DEF_ENTRY_C
-
-#define DEF_ENTRY_C(name, ignored) \
- { BUILTIN, \
- Builtins::k##name, \
- "Builtins::" #name },
-#define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored)
-
- BUILTIN_LIST_C(DEF_ENTRY_C)
- BUILTIN_LIST_A(DEF_ENTRY_A)
- BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
-#undef DEF_ENTRY_C
-#undef DEF_ENTRY_A
-
- // Runtime functions
-#define RUNTIME_ENTRY(name, nargs, ressize) \
- { RUNTIME_FUNCTION, \
- Runtime::k##name, \
- "Runtime::" #name },
-
- RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
-#undef RUNTIME_ENTRY
-
- // IC utilities
-#define IC_ENTRY(name) \
- { IC_UTILITY, \
- IC::k##name, \
- "IC::" #name },
-
- IC_UTIL_LIST(IC_ENTRY)
-#undef IC_ENTRY
- }; // end of ref_table[].
-
- for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
- AddFromId(ref_table[i].type,
- ref_table[i].id,
- ref_table[i].name,
- isolate);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Debug addresses
- Add(Debug_Address(Debug::k_after_break_target_address).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_after_break_target_address << kDebugIdShift,
- "Debug::after_break_target_address()");
- Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_debug_break_slot_address << kDebugIdShift,
- "Debug::debug_break_slot_address()");
- Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_debug_break_return_address << kDebugIdShift,
- "Debug::debug_break_return_address()");
- Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_restarter_frame_function_pointer << kDebugIdShift,
- "Debug::restarter_frame_function_pointer_address()");
-#endif
-
- // Stat counters
- struct StatsRefTableEntry {
- StatsCounter* (Counters::*counter)();
- uint16_t id;
- const char* name;
- };
-
- const StatsRefTableEntry stats_ref_table[] = {
-#define COUNTER_ENTRY(name, caption) \
- { &Counters::name, \
- Counters::k_##name, \
- "Counters::" #name },
-
- STATS_COUNTER_LIST_1(COUNTER_ENTRY)
- STATS_COUNTER_LIST_2(COUNTER_ENTRY)
-#undef COUNTER_ENTRY
- }; // end of stats_ref_table[].
-
- Counters* counters = isolate->counters();
- for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
- Add(reinterpret_cast<Address>(GetInternalPointer(
- (counters->*(stats_ref_table[i].counter))())),
- STATS_COUNTER,
- stats_ref_table[i].id,
- stats_ref_table[i].name);
- }
-
- // Top addresses
-
- const char* AddressNames[] = {
-#define BUILD_NAME_LITERAL(CamelName, hacker_name) \
- "Isolate::" #hacker_name "_address",
- FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL)
- NULL
-#undef BUILD_NAME_LITERAL
- };
-
- for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) {
- Add(isolate->get_address_from_id((Isolate::AddressId)i),
- TOP_ADDRESS, i, AddressNames[i]);
- }
-
- // Accessors
-#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
- Add((Address)&Accessors::name, \
- ACCESSOR, \
- Accessors::k##name, \
- "Accessors::" #name);
-
- ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
-#undef ACCESSOR_DESCRIPTOR_DECLARATION
-
- StubCache* stub_cache = isolate->stub_cache();
-
- // Stub cache tables
- Add(stub_cache->key_reference(StubCache::kPrimary).address(),
- STUB_CACHE_TABLE,
- 1,
- "StubCache::primary_->key");
- Add(stub_cache->value_reference(StubCache::kPrimary).address(),
- STUB_CACHE_TABLE,
- 2,
- "StubCache::primary_->value");
- Add(stub_cache->map_reference(StubCache::kPrimary).address(),
- STUB_CACHE_TABLE,
- 3,
- "StubCache::primary_->map");
- Add(stub_cache->key_reference(StubCache::kSecondary).address(),
- STUB_CACHE_TABLE,
- 4,
- "StubCache::secondary_->key");
- Add(stub_cache->value_reference(StubCache::kSecondary).address(),
- STUB_CACHE_TABLE,
- 5,
- "StubCache::secondary_->value");
- Add(stub_cache->map_reference(StubCache::kSecondary).address(),
- STUB_CACHE_TABLE,
- 6,
- "StubCache::secondary_->map");
-
- // Runtime entries
- Add(ExternalReference::perform_gc_function(isolate).address(),
- RUNTIME_ENTRY,
- 1,
- "Runtime::PerformGC");
- Add(ExternalReference::fill_heap_number_with_random_function(
- isolate).address(),
- RUNTIME_ENTRY,
- 2,
- "V8::FillHeapNumberWithRandom");
- Add(ExternalReference::random_uint32_function(isolate).address(),
- RUNTIME_ENTRY,
- 3,
- "V8::Random");
- Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
- RUNTIME_ENTRY,
- 4,
- "HandleScope::DeleteExtensions");
- Add(ExternalReference::
- incremental_marking_record_write_function(isolate).address(),
- RUNTIME_ENTRY,
- 5,
- "IncrementalMarking::RecordWrite");
- Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
- RUNTIME_ENTRY,
- 6,
- "StoreBuffer::StoreBufferOverflow");
- Add(ExternalReference::
- incremental_evacuation_record_write_function(isolate).address(),
- RUNTIME_ENTRY,
- 7,
- "IncrementalMarking::RecordWrite");
-
-
-
- // Miscellaneous
- Add(ExternalReference::roots_array_start(isolate).address(),
- UNCLASSIFIED,
- 3,
- "Heap::roots_array_start()");
- Add(ExternalReference::address_of_stack_limit(isolate).address(),
- UNCLASSIFIED,
- 4,
- "StackGuard::address_of_jslimit()");
- Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
- UNCLASSIFIED,
- 5,
- "StackGuard::address_of_real_jslimit()");
-#ifndef V8_INTERPRETED_REGEXP
- Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
- UNCLASSIFIED,
- 6,
- "RegExpStack::limit_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_address(
- isolate).address(),
- UNCLASSIFIED,
- 7,
- "RegExpStack::memory_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
- UNCLASSIFIED,
- 8,
- "RegExpStack::memory_size()");
- Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
- UNCLASSIFIED,
- 9,
- "OffsetsVector::static_offsets_vector");
-#endif // V8_INTERPRETED_REGEXP
- Add(ExternalReference::new_space_start(isolate).address(),
- UNCLASSIFIED,
- 10,
- "Heap::NewSpaceStart()");
- Add(ExternalReference::new_space_mask(isolate).address(),
- UNCLASSIFIED,
- 11,
- "Heap::NewSpaceMask()");
- Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(),
- UNCLASSIFIED,
- 12,
- "Heap::always_allocate_scope_depth()");
- Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
- UNCLASSIFIED,
- 14,
- "Heap::NewSpaceAllocationLimitAddress()");
- Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
- UNCLASSIFIED,
- 15,
- "Heap::NewSpaceAllocationTopAddress()");
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Add(ExternalReference::debug_break(isolate).address(),
- UNCLASSIFIED,
- 16,
- "Debug::Break()");
- Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
- UNCLASSIFIED,
- 17,
- "Debug::step_in_fp_addr()");
-#endif
- Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
- UNCLASSIFIED,
- 18,
- "add_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
- UNCLASSIFIED,
- 19,
- "sub_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
- UNCLASSIFIED,
- 20,
- "mul_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
- UNCLASSIFIED,
- 21,
- "div_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
- UNCLASSIFIED,
- 22,
- "mod_two_doubles");
- Add(ExternalReference::compare_doubles(isolate).address(),
- UNCLASSIFIED,
- 23,
- "compare_doubles");
-#ifndef V8_INTERPRETED_REGEXP
- Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
- UNCLASSIFIED,
- 24,
- "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
- Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
- UNCLASSIFIED,
- 25,
- "RegExpMacroAssembler*::CheckStackGuardState()");
- Add(ExternalReference::re_grow_stack(isolate).address(),
- UNCLASSIFIED,
- 26,
- "NativeRegExpMacroAssembler::GrowStack()");
- Add(ExternalReference::re_word_character_map().address(),
- UNCLASSIFIED,
- 27,
- "NativeRegExpMacroAssembler::word_character_map");
-#endif // V8_INTERPRETED_REGEXP
- // Keyed lookup cache.
- Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
- UNCLASSIFIED,
- 28,
- "KeyedLookupCache::keys()");
- Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
- UNCLASSIFIED,
- 29,
- "KeyedLookupCache::field_offsets()");
- Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
- UNCLASSIFIED,
- 30,
- "TranscendentalCache::caches()");
- Add(ExternalReference::handle_scope_next_address(isolate).address(),
- UNCLASSIFIED,
- 31,
- "HandleScope::next");
- Add(ExternalReference::handle_scope_limit_address(isolate).address(),
- UNCLASSIFIED,
- 32,
- "HandleScope::limit");
- Add(ExternalReference::handle_scope_level_address(isolate).address(),
- UNCLASSIFIED,
- 33,
- "HandleScope::level");
- Add(ExternalReference::new_deoptimizer_function(isolate).address(),
- UNCLASSIFIED,
- 34,
- "Deoptimizer::New()");
- Add(ExternalReference::compute_output_frames_function(isolate).address(),
- UNCLASSIFIED,
- 35,
- "Deoptimizer::ComputeOutputFrames()");
- Add(ExternalReference::address_of_min_int().address(),
- UNCLASSIFIED,
- 36,
- "LDoubleConstant::min_int");
- Add(ExternalReference::address_of_one_half().address(),
- UNCLASSIFIED,
- 37,
- "LDoubleConstant::one_half");
- Add(ExternalReference::isolate_address().address(),
- UNCLASSIFIED,
- 38,
- "isolate");
- Add(ExternalReference::address_of_minus_zero().address(),
- UNCLASSIFIED,
- 39,
- "LDoubleConstant::minus_zero");
- Add(ExternalReference::address_of_negative_infinity().address(),
- UNCLASSIFIED,
- 40,
- "LDoubleConstant::negative_infinity");
- Add(ExternalReference::power_double_double_function(isolate).address(),
- UNCLASSIFIED,
- 41,
- "power_double_double_function");
- Add(ExternalReference::power_double_int_function(isolate).address(),
- UNCLASSIFIED,
- 42,
- "power_double_int_function");
- Add(ExternalReference::store_buffer_top(isolate).address(),
- UNCLASSIFIED,
- 43,
- "store_buffer_top");
- Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
- UNCLASSIFIED,
- 44,
- "canonical_nan");
- Add(ExternalReference::address_of_the_hole_nan().address(),
- UNCLASSIFIED,
- 45,
- "the_hole_nan");
- Add(ExternalReference::get_date_field_function(isolate).address(),
- UNCLASSIFIED,
- 46,
- "JSDate::GetField");
- Add(ExternalReference::date_cache_stamp(isolate).address(),
- UNCLASSIFIED,
- 47,
- "date_cache_stamp");
- Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
- UNCLASSIFIED,
- 48,
- "address_of_pending_message_obj");
- Add(ExternalReference::address_of_has_pending_message(isolate).address(),
- UNCLASSIFIED,
- 49,
- "address_of_has_pending_message");
- Add(ExternalReference::address_of_pending_message_script(isolate).address(),
- UNCLASSIFIED,
- 50,
- "pending_message_script");
- Add(ExternalReference::get_make_code_young_function(isolate).address(),
- UNCLASSIFIED,
- 51,
- "Code::MakeCodeYoung");
- Add(ExternalReference::cpu_features().address(),
- UNCLASSIFIED,
- 52,
- "cpu_features");
- Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
- UNCLASSIFIED,
- 53,
- "Heap::NewSpaceAllocationTopAddress");
- Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
- UNCLASSIFIED,
- 54,
- "Heap::NewSpaceAllocationLimitAddress");
- Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
- UNCLASSIFIED,
- 55,
- "Runtime::AllocateInNewSpace");
-
- // Add a small set of deopt entry addresses to encoder without generating the
- // deopt table code, which isn't possible at deserialization time.
- HandleScope scope(isolate);
- for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
- Address address = Deoptimizer::GetDeoptimizationEntry(
- isolate,
- entry,
- Deoptimizer::LAZY,
- Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 56 + entry, "lazy_deopt");
- }
-}
-
-
-ExternalReferenceEncoder::ExternalReferenceEncoder()
- : encodings_(Match),
- isolate_(Isolate::Current()) {
- ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance(isolate_);
- for (int i = 0; i < external_references->size(); ++i) {
- Put(external_references->address(i), i);
- }
-}
-
-
-uint32_t ExternalReferenceEncoder::Encode(Address key) const {
- int index = IndexOf(key);
- ASSERT(key == NULL || index >= 0);
- return index >=0 ?
- ExternalReferenceTable::instance(isolate_)->code(index) : 0;
-}
-
-
-const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
- int index = IndexOf(key);
- return index >= 0 ?
- ExternalReferenceTable::instance(isolate_)->name(index) : NULL;
-}
-
-
-int ExternalReferenceEncoder::IndexOf(Address key) const {
- if (key == NULL) return -1;
- HashMap::Entry* entry =
- const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false);
- return entry == NULL
- ? -1
- : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
-}
-
-
-void ExternalReferenceEncoder::Put(Address key, int index) {
- HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
- entry->value = reinterpret_cast<void*>(index);
-}
-
-
-ExternalReferenceDecoder::ExternalReferenceDecoder()
- : encodings_(NewArray<Address*>(kTypeCodeCount)),
- isolate_(Isolate::Current()) {
- ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance(isolate_);
- for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
- int max = external_references->max_id(type) + 1;
- encodings_[type] = NewArray<Address>(max + 1);
- }
- for (int i = 0; i < external_references->size(); ++i) {
- Put(external_references->code(i), external_references->address(i));
- }
-}
-
-
-ExternalReferenceDecoder::~ExternalReferenceDecoder() {
- for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
- DeleteArray(encodings_[type]);
- }
- DeleteArray(encodings_);
-}
-
-
-bool Serializer::serialization_enabled_ = false;
-bool Serializer::too_late_to_enable_now_ = false;
-
-
-Deserializer::Deserializer(SnapshotByteSource* source)
- : isolate_(NULL),
- source_(source),
- external_reference_decoder_(NULL) {
- for (int i = 0; i < LAST_SPACE + 1; i++) {
- reservations_[i] = kUninitializedReservation;
- }
-}
-
-
-void Deserializer::Deserialize() {
- isolate_ = Isolate::Current();
- ASSERT(isolate_ != NULL);
- isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
- // No active threads.
- ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
- // No active handles.
- ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
- ASSERT_EQ(NULL, external_reference_decoder_);
- external_reference_decoder_ = new ExternalReferenceDecoder();
- isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
- isolate_->heap()->RepairFreeListsAfterBoot();
- isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
-
- isolate_->heap()->set_native_contexts_list(
- isolate_->heap()->undefined_value());
-
- // Update data pointers to the external strings containing natives sources.
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = isolate_->heap()->natives_source_cache()->get(i);
- if (!source->IsUndefined()) {
- ExternalAsciiString::cast(source)->update_data_cache();
- }
- }
-
- // Issue code events for newly deserialized code objects.
- LOG_CODE_EVENT(isolate_, LogCodeObjects());
- LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
-}
-
-
-void Deserializer::DeserializePartial(Object** root) {
- isolate_ = Isolate::Current();
- for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
- ASSERT(reservations_[i] != kUninitializedReservation);
- }
- isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
- if (external_reference_decoder_ == NULL) {
- external_reference_decoder_ = new ExternalReferenceDecoder();
- }
-
- // Keep track of the code space start and end pointers in case new
- // code objects were unserialized
- OldSpace* code_space = isolate_->heap()->code_space();
- Address start_address = code_space->top();
- VisitPointer(root);
-
- // There's no code deserialized here. If this assert fires
- // then that's changed and logging should be added to notify
- // the profiler et al of the new code.
- CHECK_EQ(start_address, code_space->top());
-}
-
-
-Deserializer::~Deserializer() {
- ASSERT(source_->AtEOF());
- if (external_reference_decoder_) {
- delete external_reference_decoder_;
- external_reference_decoder_ = NULL;
- }
-}
-
-
-// This is called on the roots. It is the driver of the deserialization
-// process. It is also called on the body of each function.
-void Deserializer::VisitPointers(Object** start, Object** end) {
- // The space must be new space. Any other space would cause ReadChunk to try
- // to update the remembered using NULL as the address.
- ReadChunk(start, end, NEW_SPACE, NULL);
-}
-
-
-// This routine writes the new object into the pointer provided and then
-// returns true if the new object was in young space and false otherwise.
-// The reason for this strange interface is that otherwise the object is
-// written very late, which means the FreeSpace map is not set up by the
-// time we need to use it to mark the space at the end of a page free.
-void Deserializer::ReadObject(int space_number,
- Object** write_back) {
- int size = source_->GetInt() << kObjectAlignmentBits;
- Address address = Allocate(space_number, size);
- *write_back = HeapObject::FromAddress(address);
- Object** current = reinterpret_cast<Object**>(address);
- Object** limit = current + (size >> kPointerSizeLog2);
- if (FLAG_log_snapshot_positions) {
- LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
- }
- ReadChunk(current, limit, space_number, address);
-#ifdef DEBUG
- bool is_codespace = (space_number == CODE_SPACE);
- ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace);
-#endif
-}
-
-void Deserializer::ReadChunk(Object** current,
- Object** limit,
- int source_space,
- Address current_object_address) {
- Isolate* const isolate = isolate_;
- // Write barrier support costs around 1% in startup time. In fact there
- // are no new space objects in current boot snapshots, so it's not needed,
- // but that may change.
- bool write_barrier_needed = (current_object_address != NULL &&
- source_space != NEW_SPACE &&
- source_space != CELL_SPACE &&
- source_space != CODE_SPACE &&
- source_space != OLD_DATA_SPACE);
- while (current < limit) {
- int data = source_->Get();
- switch (data) {
-#define CASE_STATEMENT(where, how, within, space_number) \
- case where + how + within + space_number: \
- ASSERT((where & ~kPointedToMask) == 0); \
- ASSERT((how & ~kHowToCodeMask) == 0); \
- ASSERT((within & ~kWhereToPointMask) == 0); \
- ASSERT((space_number & ~kSpaceMask) == 0);
-
-#define CASE_BODY(where, how, within, space_number_if_any) \
- { \
- bool emit_write_barrier = false; \
- bool current_was_incremented = false; \
- int space_number = space_number_if_any == kAnyOldSpace ? \
- (data & kSpaceMask) : space_number_if_any; \
- if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
- ReadObject(space_number, current); \
- emit_write_barrier = (space_number == NEW_SPACE); \
- } else { \
- Object* new_object = NULL; /* May not be a real Object pointer. */ \
- if (where == kNewObject) { \
- ReadObject(space_number, &new_object); \
- } else if (where == kRootArray) { \
- int root_id = source_->GetInt(); \
- new_object = isolate->heap()->roots_array_start()[root_id]; \
- emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
- } else if (where == kPartialSnapshotCache) { \
- int cache_index = source_->GetInt(); \
- new_object = isolate->serialize_partial_snapshot_cache() \
- [cache_index]; \
- emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
- } else if (where == kExternalReference) { \
- int skip = source_->GetInt(); \
- current = reinterpret_cast<Object**>(reinterpret_cast<Address>( \
- current) + skip); \
- int reference_id = source_->GetInt(); \
- Address address = external_reference_decoder_-> \
- Decode(reference_id); \
- new_object = reinterpret_cast<Object*>(address); \
- } else if (where == kBackref) { \
- emit_write_barrier = (space_number == NEW_SPACE); \
- new_object = GetAddressFromEnd(data & kSpaceMask); \
- } else { \
- ASSERT(where == kBackrefWithSkip); \
- int skip = source_->GetInt(); \
- current = reinterpret_cast<Object**>( \
- reinterpret_cast<Address>(current) + skip); \
- emit_write_barrier = (space_number == NEW_SPACE); \
- new_object = GetAddressFromEnd(data & kSpaceMask); \
- } \
- if (within == kInnerPointer) { \
- if (space_number != CODE_SPACE || new_object->IsCode()) { \
- Code* new_code_object = reinterpret_cast<Code*>(new_object); \
- new_object = reinterpret_cast<Object*>( \
- new_code_object->instruction_start()); \
- } else { \
- ASSERT(space_number == CODE_SPACE); \
- JSGlobalPropertyCell* cell = \
- JSGlobalPropertyCell::cast(new_object); \
- new_object = reinterpret_cast<Object*>( \
- cell->ValueAddress()); \
- } \
- } \
- if (how == kFromCode) { \
- Address location_of_branch_data = \
- reinterpret_cast<Address>(current); \
- Assembler::deserialization_set_special_target_at( \
- location_of_branch_data, \
- reinterpret_cast<Address>(new_object)); \
- location_of_branch_data += Assembler::kSpecialTargetSize; \
- current = reinterpret_cast<Object**>(location_of_branch_data); \
- current_was_incremented = true; \
- } else { \
- *current = new_object; \
- } \
- } \
- if (emit_write_barrier && write_barrier_needed) { \
- Address current_address = reinterpret_cast<Address>(current); \
- isolate->heap()->RecordWrite( \
- current_object_address, \
- static_cast<int>(current_address - current_object_address)); \
- } \
- if (!current_was_incremented) { \
- current++; \
- } \
- break; \
- } \
-
-// This generates a case and a body for the new space (which has to do extra
-// write barrier handling) and handles the other spaces with 8 fall-through
-// cases and one body.
-#define ALL_SPACES(where, how, within) \
- CASE_STATEMENT(where, how, within, NEW_SPACE) \
- CASE_BODY(where, how, within, NEW_SPACE) \
- CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
- CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- CASE_STATEMENT(where, how, within, CELL_SPACE) \
- CASE_STATEMENT(where, how, within, MAP_SPACE) \
- CASE_BODY(where, how, within, kAnyOldSpace)
-
-#define FOUR_CASES(byte_code) \
- case byte_code: \
- case byte_code + 1: \
- case byte_code + 2: \
- case byte_code + 3:
-
-#define SIXTEEN_CASES(byte_code) \
- FOUR_CASES(byte_code) \
- FOUR_CASES(byte_code + 4) \
- FOUR_CASES(byte_code + 8) \
- FOUR_CASES(byte_code + 12)
-
-#define COMMON_RAW_LENGTHS(f) \
- f(1) \
- f(2) \
- f(3) \
- f(4) \
- f(5) \
- f(6) \
- f(7) \
- f(8) \
- f(9) \
- f(10) \
- f(11) \
- f(12) \
- f(13) \
- f(14) \
- f(15) \
- f(16) \
- f(17) \
- f(18) \
- f(19) \
- f(20) \
- f(21) \
- f(22) \
- f(23) \
- f(24) \
- f(25) \
- f(26) \
- f(27) \
- f(28) \
- f(29) \
- f(30) \
- f(31)
-
- // We generate 15 cases and bodies that process special tags that combine
- // the raw data tag and the length into one byte.
-#define RAW_CASE(index) \
- case kRawData + index: { \
- byte* raw_data_out = reinterpret_cast<byte*>(current); \
- source_->CopyRaw(raw_data_out, index * kPointerSize); \
- current = \
- reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
- break; \
- }
- COMMON_RAW_LENGTHS(RAW_CASE)
-#undef RAW_CASE
-
- // Deserialize a chunk of raw data that doesn't have one of the popular
- // lengths.
- case kRawData: {
- int size = source_->GetInt();
- byte* raw_data_out = reinterpret_cast<byte*>(current);
- source_->CopyRaw(raw_data_out, size);
- break;
- }
-
- SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance)
- SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) {
- int root_id = RootArrayConstantFromByteCode(data);
- Object* object = isolate->heap()->roots_array_start()[root_id];
- ASSERT(!isolate->heap()->InNewSpace(object));
- *current++ = object;
- break;
- }
-
- SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance)
- SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance + 16) {
- int root_id = RootArrayConstantFromByteCode(data);
- int skip = source_->GetInt();
- current = reinterpret_cast<Object**>(
- reinterpret_cast<intptr_t>(current) + skip);
- Object* object = isolate->heap()->roots_array_start()[root_id];
- ASSERT(!isolate->heap()->InNewSpace(object));
- *current++ = object;
- break;
- }
-
- case kRepeat: {
- int repeats = source_->GetInt();
- Object* object = current[-1];
- ASSERT(!isolate->heap()->InNewSpace(object));
- for (int i = 0; i < repeats; i++) current[i] = object;
- current += repeats;
- break;
- }
-
- STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
- Heap::kOldSpaceRoots);
- STATIC_ASSERT(kMaxRepeats == 13);
- case kConstantRepeat:
- FOUR_CASES(kConstantRepeat + 1)
- FOUR_CASES(kConstantRepeat + 5)
- FOUR_CASES(kConstantRepeat + 9) {
- int repeats = RepeatsForCode(data);
- Object* object = current[-1];
- ASSERT(!isolate->heap()->InNewSpace(object));
- for (int i = 0; i < repeats; i++) current[i] = object;
- current += repeats;
- break;
- }
-
- // Deserialize a new object and write a pointer to it to the current
- // object.
- ALL_SPACES(kNewObject, kPlain, kStartOfObject)
- // Support for direct instruction pointers in functions. It's an inner
- // pointer because it points at the entry point, not at the start of the
- // code object.
- CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
- CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
- // Deserialize a new code object and write a pointer to its first
- // instruction to the current code object.
- ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
- // Find a recently deserialized object using its offset from the current
- // allocation point and write a pointer to it to the current object.
- ALL_SPACES(kBackref, kPlain, kStartOfObject)
- ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if V8_TARGET_ARCH_MIPS
- // Deserialize a new object from pointer found in code and write
- // a pointer to it to the current object. Required only for MIPS, and
- // omitted on the other architectures because it is fully unrolled and
- // would cause bloat.
- ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
- // Find a recently deserialized code object using its offset from the
- // current allocation point and write a pointer to it to the current
- // object. Required only for MIPS.
- ALL_SPACES(kBackref, kFromCode, kStartOfObject)
- ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
-#endif
- // Find a recently deserialized code object using its offset from the
- // current allocation point and write a pointer to its first instruction
- // to the current code object or the instruction pointer in a function
- // object.
- ALL_SPACES(kBackref, kFromCode, kInnerPointer)
- ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
- ALL_SPACES(kBackref, kPlain, kInnerPointer)
- ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
- // Find an object in the roots array and write a pointer to it to the
- // current object.
- CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
- CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
- // Find an object in the partial snapshots cache and write a pointer to it
- // to the current object.
- CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
- CASE_BODY(kPartialSnapshotCache,
- kPlain,
- kStartOfObject,
- 0)
- // Find an code entry in the partial snapshots cache and
- // write a pointer to it to the current object.
- CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
- CASE_BODY(kPartialSnapshotCache,
- kPlain,
- kInnerPointer,
- 0)
- // Find an external reference and write a pointer to it to the current
- // object.
- CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
- CASE_BODY(kExternalReference,
- kPlain,
- kStartOfObject,
- 0)
- // Find an external reference and write a pointer to it in the current
- // code object.
- CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
- CASE_BODY(kExternalReference,
- kFromCode,
- kStartOfObject,
- 0)
-
-#undef CASE_STATEMENT
-#undef CASE_BODY
-#undef ALL_SPACES
-
- case kSkip: {
- int size = source_->GetInt();
- current = reinterpret_cast<Object**>(
- reinterpret_cast<intptr_t>(current) + size);
- break;
- }
-
- case kNativesStringResource: {
- int index = source_->Get();
- Vector<const char> source_vector = Natives::GetRawScriptSource(index);
- NativesExternalStringResource* resource =
- new NativesExternalStringResource(isolate->bootstrapper(),
- source_vector.start(),
- source_vector.length());
- *current++ = reinterpret_cast<Object*>(resource);
- break;
- }
-
- case kSynchronize: {
- // If we get here then that indicates that you have a mismatch between
- // the number of GC roots when serializing and deserializing.
- UNREACHABLE();
- }
-
- default:
- UNREACHABLE();
- }
- }
- ASSERT_EQ(limit, current);
-}
-
-
-void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
- ASSERT(integer < 1 << 22);
- integer <<= 2;
- int bytes = 1;
- if (integer > 0xff) bytes = 2;
- if (integer > 0xffff) bytes = 3;
- integer |= bytes;
- Put(static_cast<int>(integer & 0xff), "IntPart1");
- if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
- if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
-}
-
-
-Serializer::Serializer(SnapshotByteSink* sink)
- : sink_(sink),
- current_root_index_(0),
- external_reference_encoder_(new ExternalReferenceEncoder),
- root_index_wave_front_(0) {
- isolate_ = Isolate::Current();
- // The serializer is meant to be used only to generate initial heap images
- // from a context in which there is only one isolate.
- ASSERT(isolate_->IsDefaultIsolate());
- for (int i = 0; i <= LAST_SPACE; i++) {
- fullness_[i] = 0;
- }
-}
-
-
-Serializer::~Serializer() {
- delete external_reference_encoder_;
-}
-
-
-void StartupSerializer::SerializeStrongReferences() {
- Isolate* isolate = Isolate::Current();
- // No active threads.
- CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse());
- // No active or weak handles.
- CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
- CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
- // We don't support serializing installed extensions.
- CHECK(!isolate->has_installed_extensions());
-
- HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
-}
-
-
-void PartialSerializer::Serialize(Object** object) {
- this->VisitPointer(object);
- Pad();
-}
-
-
-void Serializer::VisitPointers(Object** start, Object** end) {
- Isolate* isolate = Isolate::Current();
-
- for (Object** current = start; current < end; current++) {
- if (start == isolate->heap()->roots_array_start()) {
- root_index_wave_front_ =
- Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
- }
- if (reinterpret_cast<Address>(current) ==
- isolate->heap()->store_buffer()->TopAddress()) {
- sink_->Put(kSkip, "Skip");
- sink_->PutInt(kPointerSize, "SkipOneWord");
- } else if ((*current)->IsSmi()) {
- sink_->Put(kRawData + 1, "Smi");
- for (int i = 0; i < kPointerSize; i++) {
- sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
- }
- } else {
- SerializeObject(*current, kPlain, kStartOfObject, 0);
- }
- }
-}
-
-
-// This ensures that the partial snapshot cache keeps things alive during GC and
-// tracks their movement. When it is called during serialization of the startup
-// snapshot nothing happens. When the partial (context) snapshot is created,
-// this array is populated with the pointers that the partial snapshot will
-// need. As that happens we emit serialized objects to the startup snapshot
-// that correspond to the elements of this cache array. On deserialization we
-// therefore need to visit the cache array. This fills it up with pointers to
-// deserialized objects.
-void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
- if (Serializer::enabled()) return;
- Isolate* isolate = Isolate::Current();
- for (int i = 0; ; i++) {
- if (isolate->serialize_partial_snapshot_cache_length() <= i) {
- // Extend the array ready to get a value from the visitor when
- // deserializing.
- isolate->PushToPartialSnapshotCache(Smi::FromInt(0));
- }
- Object** cache = isolate->serialize_partial_snapshot_cache();
- visitor->VisitPointers(&cache[i], &cache[i + 1]);
- // Sentinel is the undefined object, which is a root so it will not normally
- // be found in the cache.
- if (cache[i] == isolate->heap()->undefined_value()) {
- break;
- }
- }
-}
-
-
-int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- Isolate* isolate = Isolate::Current();
-
- for (int i = 0;
- i < isolate->serialize_partial_snapshot_cache_length();
- i++) {
- Object* entry = isolate->serialize_partial_snapshot_cache()[i];
- if (entry == heap_object) return i;
- }
-
- // We didn't find the object in the cache. So we add it to the cache and
- // then visit the pointer so that it becomes part of the startup snapshot
- // and we can refer to it from the partial snapshot.
- int length = isolate->serialize_partial_snapshot_cache_length();
- isolate->PushToPartialSnapshotCache(heap_object);
- startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
- // We don't recurse from the startup snapshot generator into the partial
- // snapshot generator.
- ASSERT(length == isolate->serialize_partial_snapshot_cache_length() - 1);
- return length;
-}
-
-
-int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
- Heap* heap = HEAP;
- if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
- for (int i = 0; i < root_index_wave_front_; i++) {
- Object* root = heap->roots_array_start()[i];
- if (!root->IsSmi() && root == heap_object) {
-#if V8_TARGET_ARCH_MIPS
- if (from == kFromCode) {
- // In order to avoid code bloat in the deserializer we don't have
- // support for the encoding that specifies a particular root should
- // be written into the lui/ori instructions on MIPS. Therefore we
- // should not generate such serialization data for MIPS.
- return kInvalidRootIndex;
- }
-#endif
- return i;
- }
- }
- return kInvalidRootIndex;
-}
-
-
-// Encode the location of an already deserialized object in order to write its
-// location into a later object. We can encode the location as an offset from
-// the start of the deserialized objects or as an offset backwards from the
-// current allocation pointer.
-void Serializer::SerializeReferenceToPreviousObject(
- int space,
- int address,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) {
- int offset = CurrentAllocationAddress(space) - address;
- // Shift out the bits that are always 0.
- offset >>= kObjectAlignmentBits;
- if (skip == 0) {
- sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
- } else {
- sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
- "BackRefSerWithSkip");
- sink_->PutInt(skip, "BackRefSkipDistance");
- }
- sink_->PutInt(offset, "offset");
-}
-
-
-void StartupSerializer::SerializeObject(
- Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) {
- CHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
-
- int root_index;
- if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
- PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
- return;
- }
-
- if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfObject(heap_object);
- int address = address_mapper_.MappedTo(heap_object);
- SerializeReferenceToPreviousObject(space,
- address,
- how_to_code,
- where_to_point,
- skip);
- } else {
- if (skip != 0) {
- sink_->Put(kSkip, "FlushPendingSkip");
- sink_->PutInt(skip, "SkipDistance");
- }
-
- // Object has not yet been serialized. Serialize it here.
- ObjectSerializer object_serializer(this,
- heap_object,
- sink_,
- how_to_code,
- where_to_point);
- object_serializer.Serialize();
- }
-}
-
-
-void StartupSerializer::SerializeWeakReferences() {
- // This phase comes right after the partial serialization (of the snapshot).
- // After we have done the partial serialization the partial snapshot cache
- // will contain some references needed to decode the partial snapshot. We
- // add one entry with 'undefined' which is the sentinel that the deserializer
- // uses to know it is done deserializing the array.
- Isolate* isolate = Isolate::Current();
- Object* undefined = isolate->heap()->undefined_value();
- VisitPointer(&undefined);
- HEAP->IterateWeakRoots(this, VISIT_ALL);
- Pad();
-}
-
-
-void Serializer::PutRoot(int root_index,
- HeapObject* object,
- SerializerDeserializer::HowToCode how_to_code,
- SerializerDeserializer::WhereToPoint where_to_point,
- int skip) {
- if (how_to_code == kPlain &&
- where_to_point == kStartOfObject &&
- root_index < kRootArrayNumberOfConstantEncodings &&
- !HEAP->InNewSpace(object)) {
- if (skip == 0) {
- sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
- "RootConstant");
- } else {
- sink_->Put(kRootArrayConstants + kHasSkipDistance + root_index,
- "RootConstant");
- sink_->PutInt(skip, "SkipInPutRoot");
- }
- } else {
- if (skip != 0) {
- sink_->Put(kSkip, "SkipFromPutRoot");
- sink_->PutInt(skip, "SkipFromPutRootDistance");
- }
- sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
- sink_->PutInt(root_index, "root_index");
- }
-}
-
-
-void PartialSerializer::SerializeObject(
- Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) {
- CHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
-
- if (heap_object->IsMap()) {
- // The code-caches link to context-specific code objects, which
- // the startup and context serializes cannot currently handle.
- ASSERT(Map::cast(heap_object)->code_cache() ==
- heap_object->GetHeap()->empty_fixed_array());
- }
-
- int root_index;
- if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
- PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
- return;
- }
-
- if (ShouldBeInThePartialSnapshotCache(heap_object)) {
- if (skip != 0) {
- sink_->Put(kSkip, "SkipFromSerializeObject");
- sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
- }
-
- int cache_index = PartialSnapshotCacheIndex(heap_object);
- sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
- "PartialSnapshotCache");
- sink_->PutInt(cache_index, "partial_snapshot_cache_index");
- return;
- }
-
- // Pointers from the partial snapshot to the objects in the startup snapshot
- // should go through the root array or through the partial snapshot cache.
- // If this is not the case you may have to add something to the root array.
- ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
- // All the internalized strings that the partial snapshot needs should be
- // either in the root table or in the partial snapshot cache.
- ASSERT(!heap_object->IsInternalizedString());
-
- if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfObject(heap_object);
- int address = address_mapper_.MappedTo(heap_object);
- SerializeReferenceToPreviousObject(space,
- address,
- how_to_code,
- where_to_point,
- skip);
- } else {
- if (skip != 0) {
- sink_->Put(kSkip, "SkipFromSerializeObject");
- sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
- }
- // Object has not yet been serialized. Serialize it here.
- ObjectSerializer serializer(this,
- heap_object,
- sink_,
- how_to_code,
- where_to_point);
- serializer.Serialize();
- }
-}
-
-
-void Serializer::ObjectSerializer::Serialize() {
- int space = Serializer::SpaceOfObject(object_);
- int size = object_->Size();
-
- sink_->Put(kNewObject + reference_representation_ + space,
- "ObjectSerialization");
- sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
-
- LOG(i::Isolate::Current(),
- SnapshotPositionEvent(object_->address(), sink_->Position()));
-
- // Mark this object as already serialized.
- int offset = serializer_->Allocate(space, size);
- serializer_->address_mapper()->AddMapping(object_, offset);
-
- // Serialize the map (first word of the object).
- serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0);
-
- // Serialize the rest of the object.
- CHECK_EQ(0, bytes_processed_so_far_);
- bytes_processed_so_far_ = kPointerSize;
- object_->IterateBody(object_->map()->instance_type(), size, this);
- OutputRawData(object_->address() + size);
-}
-
-
-void Serializer::ObjectSerializer::VisitPointers(Object** start,
- Object** end) {
- Object** current = start;
- while (current < end) {
- while (current < end && (*current)->IsSmi()) current++;
- if (current < end) OutputRawData(reinterpret_cast<Address>(current));
-
- while (current < end && !(*current)->IsSmi()) {
- HeapObject* current_contents = HeapObject::cast(*current);
- int root_index = serializer_->RootIndex(current_contents, kPlain);
- // Repeats are not subject to the write barrier so there are only some
- // objects that can be used in a repeat encoding. These are the early
- // ones in the root array that are never in new space.
- if (current != start &&
- root_index != kInvalidRootIndex &&
- root_index < kRootArrayNumberOfConstantEncodings &&
- current_contents == current[-1]) {
- ASSERT(!HEAP->InNewSpace(current_contents));
- int repeat_count = 1;
- while (current < end - 1 && current[repeat_count] == current_contents) {
- repeat_count++;
- }
- current += repeat_count;
- bytes_processed_so_far_ += repeat_count * kPointerSize;
- if (repeat_count > kMaxRepeats) {
- sink_->Put(kRepeat, "SerializeRepeats");
- sink_->PutInt(repeat_count, "SerializeRepeats");
- } else {
- sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
- }
- } else {
- serializer_->SerializeObject(
- current_contents, kPlain, kStartOfObject, 0);
- bytes_processed_so_far_ += kPointerSize;
- current++;
- }
- }
- }
-}
-
-
-void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
- Object** current = rinfo->target_object_address();
-
- int skip = OutputRawData(rinfo->target_address_address(),
- kCanReturnSkipInsteadOfSkipping);
- HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- serializer_->SerializeObject(*current, representation, kStartOfObject, skip);
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
- Address* end) {
- Address references_start = reinterpret_cast<Address>(start);
- int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
-
- for (Address* current = start; current < end; current++) {
- sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- skip = 0;
- int reference_id = serializer_->EncodeExternalReference(*current);
- sink_->PutInt(reference_id, "reference id");
- }
- bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize);
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
- Address references_start = rinfo->target_address_address();
- int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
-
- Address* current = rinfo->target_reference_address();
- int representation = rinfo->IsCodedSpecially() ?
- kFromCode + kStartOfObject : kPlain + kStartOfObject;
- sink_->Put(kExternalReference + representation, "ExternalRef");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- int reference_id = serializer_->EncodeExternalReference(*current);
- sink_->PutInt(reference_id, "reference id");
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
- Address target_start = rinfo->target_address_address();
- int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
- Address target = rinfo->target_address();
- uint32_t encoding = serializer_->EncodeExternalReference(target);
- CHECK(target == NULL ? encoding == 0 : encoding != 0);
- int representation;
- // Can't use a ternary operator because of gcc.
- if (rinfo->IsCodedSpecially()) {
- representation = kStartOfObject + kFromCode;
- } else {
- representation = kStartOfObject + kPlain;
- }
- sink_->Put(kExternalReference + representation, "ExternalReference");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- sink_->PutInt(encoding, "reference id");
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
- CHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Address target_start = rinfo->target_address_address();
- int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(target, kFromCode, kInnerPointer, skip);
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
- serializer_->SerializeObject(target, kPlain, kInnerPointer, skip);
- bytes_processed_so_far_ += kPointerSize;
-}
-
-
-void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(rinfo->target_cell());
- int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
- serializer_->SerializeObject(cell, kPlain, kInnerPointer, skip);
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalAsciiString(
- v8::String::ExternalAsciiStringResource** resource_pointer) {
- Address references_start = reinterpret_cast<Address>(resource_pointer);
- OutputRawData(references_start);
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = HEAP->natives_source_cache()->get(i);
- if (!source->IsUndefined()) {
- ExternalAsciiString* string = ExternalAsciiString::cast(source);
- typedef v8::String::ExternalAsciiStringResource Resource;
- const Resource* resource = string->resource();
- if (resource == *resource_pointer) {
- sink_->Put(kNativesStringResource, "NativesStringResource");
- sink_->PutSection(i, "NativesStringResourceEnd");
- bytes_processed_so_far_ += sizeof(resource);
- return;
- }
- }
- }
- // One of the strings in the natives cache should match the resource. We
- // can't serialize any other kinds of external strings.
- UNREACHABLE();
-}
-
-
-int Serializer::ObjectSerializer::OutputRawData(
- Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
- Address object_start = object_->address();
- Address base = object_start + bytes_processed_so_far_;
- int up_to_offset = static_cast<int>(up_to - object_start);
- int to_skip = up_to_offset - bytes_processed_so_far_;
- int bytes_to_output = to_skip;
- bytes_processed_so_far_ += to_skip;
- // This assert will fail if the reloc info gives us the target_address_address
- // locations in a non-ascending order. Luckily that doesn't happen.
- ASSERT(to_skip >= 0);
- bool outputting_code = false;
- if (to_skip != 0 && code_object_ && !code_has_been_output_) {
- // Output the code all at once and fix later.
- bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
- outputting_code = true;
- code_has_been_output_ = true;
- }
- if (bytes_to_output != 0 &&
- (!code_object_ || outputting_code)) {
-#define RAW_CASE(index) \
- if (!outputting_code && bytes_to_output == index * kPointerSize && \
- index * kPointerSize == to_skip) { \
- sink_->PutSection(kRawData + index, "RawDataFixed"); \
- to_skip = 0; /* This insn already skips. */ \
- } else /* NOLINT */
- COMMON_RAW_LENGTHS(RAW_CASE)
-#undef RAW_CASE
- { /* NOLINT */
- // We always end up here if we are outputting the code of a code object.
- sink_->Put(kRawData, "RawData");
- sink_->PutInt(bytes_to_output, "length");
- }
- for (int i = 0; i < bytes_to_output; i++) {
- unsigned int data = base[i];
- sink_->PutSection(data, "Byte");
- }
- }
- if (to_skip != 0 && return_skip == kIgnoringReturn) {
- sink_->Put(kSkip, "Skip");
- sink_->PutInt(to_skip, "SkipDistance");
- to_skip = 0;
- }
- return to_skip;
-}
-
-
-int Serializer::SpaceOfObject(HeapObject* object) {
- for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
- AllocationSpace s = static_cast<AllocationSpace>(i);
- if (HEAP->InSpace(object, s)) {
- ASSERT(i < kNumberOfSpaces);
- return i;
- }
- }
- UNREACHABLE();
- return 0;
-}
-
-
-int Serializer::Allocate(int space, int size) {
- CHECK(space >= 0 && space < kNumberOfSpaces);
- int allocation_address = fullness_[space];
- fullness_[space] = allocation_address + size;
- return allocation_address;
-}
-
-
-int Serializer::SpaceAreaSize(int space) {
- if (space == CODE_SPACE) {
- return isolate_->memory_allocator()->CodePageAreaSize();
- } else {
- return Page::kPageSize - Page::kObjectStartOffset;
- }
-}
-
-
-void Serializer::Pad() {
- // The non-branching GetInt will read up to 3 bytes too far, so we need
- // to pad the snapshot to make sure we don't read over the end.
- for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
- sink_->Put(kNop, "Padding");
- }
-}
-
-
-bool SnapshotByteSource::AtEOF() {
- if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false;
- for (int x = position_; x < length_; x++) {
- if (data_[x] != SerializerDeserializer::nop()) return false;
- }
- return true;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/serialize.h b/src/3rdparty/v8/src/serialize.h
deleted file mode 100644
index e0bcf4e..0000000
--- a/src/3rdparty/v8/src/serialize.h
+++ /dev/null
@@ -1,663 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SERIALIZE_H_
-#define V8_SERIALIZE_H_
-
-#include "hashmap.h"
-
-namespace v8 {
-namespace internal {
-
-// A TypeCode is used to distinguish different kinds of external reference.
-// It is a single bit to make testing for types easy.
-enum TypeCode {
- UNCLASSIFIED, // One-of-a-kind references.
- BUILTIN,
- RUNTIME_FUNCTION,
- IC_UTILITY,
- DEBUG_ADDRESS,
- STATS_COUNTER,
- TOP_ADDRESS,
- C_BUILTIN,
- EXTENSION,
- ACCESSOR,
- RUNTIME_ENTRY,
- STUB_CACHE_TABLE,
- LAZY_DEOPTIMIZATION
-};
-
-const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
-const int kFirstTypeCode = UNCLASSIFIED;
-
-const int kReferenceIdBits = 16;
-const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
-const int kReferenceTypeShift = kReferenceIdBits;
-const int kDebugRegisterBits = 4;
-const int kDebugIdShift = kDebugRegisterBits;
-
-const int kDeoptTableSerializeEntryCount = 8;
-
-// ExternalReferenceTable is a helper class that defines the relationship
-// between external references and their encodings. It is used to build
-// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
-class ExternalReferenceTable {
- public:
- static ExternalReferenceTable* instance(Isolate* isolate);
-
- ~ExternalReferenceTable() { }
-
- int size() const { return refs_.length(); }
-
- Address address(int i) { return refs_[i].address; }
-
- uint32_t code(int i) { return refs_[i].code; }
-
- const char* name(int i) { return refs_[i].name; }
-
- int max_id(int code) { return max_id_[code]; }
-
- private:
- explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
- PopulateTable(isolate);
- }
-
- struct ExternalReferenceEntry {
- Address address;
- uint32_t code;
- const char* name;
- };
-
- void PopulateTable(Isolate* isolate);
-
- // For a few types of references, we can get their address from their id.
- void AddFromId(TypeCode type,
- uint16_t id,
- const char* name,
- Isolate* isolate);
-
- // For other types of references, the caller will figure out the address.
- void Add(Address address, TypeCode type, uint16_t id, const char* name);
-
- List<ExternalReferenceEntry> refs_;
- int max_id_[kTypeCodeCount];
-};
-
-
-class ExternalReferenceEncoder {
- public:
- ExternalReferenceEncoder();
-
- uint32_t Encode(Address key) const;
-
- const char* NameOfAddress(Address key) const;
-
- private:
- HashMap encodings_;
- static uint32_t Hash(Address key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
- }
-
- int IndexOf(Address key) const;
-
- static bool Match(void* key1, void* key2) { return key1 == key2; }
-
- void Put(Address key, int index);
-
- Isolate* isolate_;
-};
-
-
-class ExternalReferenceDecoder {
- public:
- ExternalReferenceDecoder();
- ~ExternalReferenceDecoder();
-
- Address Decode(uint32_t key) const {
- if (key == 0) return NULL;
- return *Lookup(key);
- }
-
- private:
- Address** encodings_;
-
- Address* Lookup(uint32_t key) const {
- int type = key >> kReferenceTypeShift;
- ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount);
- int id = key & kReferenceIdMask;
- return &encodings_[type][id];
- }
-
- void Put(uint32_t key, Address value) {
- *Lookup(key) = value;
- }
-
- Isolate* isolate_;
-};
-
-
-class SnapshotByteSource {
- public:
- SnapshotByteSource(const byte* array, int length)
- : data_(array), length_(length), position_(0) { }
-
- bool HasMore() { return position_ < length_; }
-
- int Get() {
- ASSERT(position_ < length_);
- return data_[position_++];
- }
-
- int32_t GetUnalignedInt() {
-#if defined(V8_HOST_CAN_READ_UNALIGNED) && __BYTE_ORDER == __LITTLE_ENDIAN
- int32_t answer;
- ASSERT(position_ + sizeof(answer) <= length_ + 0u);
- answer = *reinterpret_cast<const int32_t*>(data_ + position_);
-#else
- int32_t answer = data_[position_];
- answer |= data_[position_ + 1] << 8;
- answer |= data_[position_ + 2] << 16;
- answer |= data_[position_ + 3] << 24;
-#endif
- return answer;
- }
-
- void Advance(int by) { position_ += by; }
-
- inline void CopyRaw(byte* to, int number_of_bytes);
-
- inline int GetInt();
-
- bool AtEOF();
-
- int position() { return position_; }
-
- private:
- const byte* data_;
- int length_;
- int position_;
-};
-
-
-// The Serializer/Deserializer class is a common superclass for Serializer and
-// Deserializer which is used to store common constants and methods used by
-// both.
-class SerializerDeserializer: public ObjectVisitor {
- public:
- static void Iterate(ObjectVisitor* visitor);
-
- static int nop() { return kNop; }
-
- protected:
- // Where the pointed-to object can be found:
- enum Where {
- kNewObject = 0, // Object is next in snapshot.
- // 1-6 One per space.
- kRootArray = 0x9, // Object is found in root array.
- kPartialSnapshotCache = 0xa, // Object is in the cache.
- kExternalReference = 0xb, // Pointer to an external reference.
- kSkip = 0xc, // Skip n bytes.
- kNop = 0xd, // Does nothing, used to pad.
- // 0xe-0xf Free.
- kBackref = 0x10, // Object is described relative to end.
- // 0x11-0x16 One per space.
- kBackrefWithSkip = 0x18, // Object is described relative to end.
- // 0x19-0x1e One per space.
- // 0x20-0x3f Used by misc. tags below.
- kPointedToMask = 0x3f
- };
-
- // How to code the pointer to the object.
- enum HowToCode {
- kPlain = 0, // Straight pointer.
- // What this means depends on the architecture:
- kFromCode = 0x40, // A pointer inlined in code.
- kHowToCodeMask = 0x40
- };
-
- // For kRootArrayConstants
- enum WithSkip {
- kNoSkipDistance = 0,
- kHasSkipDistance = 0x40,
- kWithSkipMask = 0x40
- };
-
- // Where to point within the object.
- enum WhereToPoint {
- kStartOfObject = 0,
- kInnerPointer = 0x80, // First insn in code object or payload of cell.
- kWhereToPointMask = 0x80
- };
-
- // Misc.
- // Raw data to be copied from the snapshot. This byte code does not advance
- // the current pointer, which is used for code objects, where we write the
- // entire code in one memcpy, then fix up stuff with kSkip and other byte
- // codes that overwrite data.
- static const int kRawData = 0x20;
- // Some common raw lengths: 0x21-0x3f. These autoadvance the current pointer.
- // A tag emitted at strategic points in the snapshot to delineate sections.
- // If the deserializer does not find these at the expected moments then it
- // is an indication that the snapshot and the VM do not fit together.
- // Examine the build process for architecture, version or configuration
- // mismatches.
- static const int kSynchronize = 0x70;
- // Used for the source code of the natives, which is in the executable, but
- // is referred to from external strings in the snapshot.
- static const int kNativesStringResource = 0x71;
- static const int kRepeat = 0x72;
- static const int kConstantRepeat = 0x73;
- // 0x73-0x7f Repeat last word (subtract 0x72 to get the count).
- static const int kMaxRepeats = 0x7f - 0x72;
- static int CodeForRepeats(int repeats) {
- ASSERT(repeats >= 1 && repeats <= kMaxRepeats);
- return 0x72 + repeats;
- }
- static int RepeatsForCode(int byte_code) {
- ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f);
- return byte_code - 0x72;
- }
- static const int kRootArrayConstants = 0xa0;
- // 0xa0-0xbf Things from the first 32 elements of the root array.
- static const int kRootArrayNumberOfConstantEncodings = 0x20;
- static int RootArrayConstantFromByteCode(int byte_code) {
- return byte_code & 0x1f;
- }
-
- static const int kNumberOfSpaces = LO_SPACE;
- static const int kAnyOldSpace = -1;
-
- // A bitmask for getting the space out of an instruction.
- static const int kSpaceMask = 7;
-};
-
-
-int SnapshotByteSource::GetInt() {
- // This way of variable-length encoding integers does not suffer from branch
- // mispredictions.
- uint32_t answer = GetUnalignedInt();
- int bytes = answer & 3;
- Advance(bytes);
- uint32_t mask = 0xffffffffu;
- mask >>= 32 - (bytes << 3);
- answer &= mask;
- answer >>= 2;
- return answer;
-}
-
-
-void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
- memcpy(to, data_ + position_, number_of_bytes);
- position_ += number_of_bytes;
-}
-
-
-// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class Deserializer: public SerializerDeserializer {
- public:
- // Create a deserializer from a snapshot byte source.
- explicit Deserializer(SnapshotByteSource* source);
-
- virtual ~Deserializer();
-
- // Deserialize the snapshot into an empty heap.
- void Deserialize();
-
- // Deserialize a single object and the objects reachable from it.
- void DeserializePartial(Object** root);
-
- void set_reservation(int space_number, int reservation) {
- ASSERT(space_number >= 0);
- ASSERT(space_number <= LAST_SPACE);
- reservations_[space_number] = reservation;
- }
-
- private:
- virtual void VisitPointers(Object** start, Object** end);
-
- virtual void VisitExternalReferences(Address* start, Address* end) {
- UNREACHABLE();
- }
-
- virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
- UNREACHABLE();
- }
-
- // Fills in some heap data in an area from start to end (non-inclusive). The
- // space id is used for the write barrier. The object_address is the address
- // of the object we are writing into, or NULL if we are not writing into an
- // object, i.e. if we are writing a series of tagged values that are not on
- // the heap.
- void ReadChunk(
- Object** start, Object** end, int space, Address object_address);
- void ReadObject(int space_number, Object** write_back);
-
- // This routine both allocates a new object, and also keeps
- // track of where objects have been allocated so that we can
- // fix back references when deserializing.
- Address Allocate(int space_index, int size) {
- Address address = high_water_[space_index];
- high_water_[space_index] = address + size;
- return address;
- }
-
- // This returns the address of an object that has been described in the
- // snapshot as being offset bytes back in a particular space.
- HeapObject* GetAddressFromEnd(int space) {
- int offset = source_->GetInt();
- offset <<= kObjectAlignmentBits;
- return HeapObject::FromAddress(high_water_[space] - offset);
- }
-
-
- // Cached current isolate.
- Isolate* isolate_;
-
- SnapshotByteSource* source_;
- // This is the address of the next object that will be allocated in each
- // space. It is used to calculate the addresses of back-references.
- Address high_water_[LAST_SPACE + 1];
-
- int reservations_[LAST_SPACE + 1];
- static const intptr_t kUninitializedReservation = -1;
-
- ExternalReferenceDecoder* external_reference_decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(Deserializer);
-};
-
-
-class SnapshotByteSink {
- public:
- virtual ~SnapshotByteSink() { }
- virtual void Put(int byte, const char* description) = 0;
- virtual void PutSection(int byte, const char* description) {
- Put(byte, description);
- }
- void PutInt(uintptr_t integer, const char* description);
- virtual int Position() = 0;
-};
-
-
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class SerializationAddressMapper {
- public:
- SerializationAddressMapper()
- : serialization_map_(new HashMap(&SerializationMatchFun)),
- no_allocation_(new AssertNoAllocation()) { }
-
- ~SerializationAddressMapper() {
- delete serialization_map_;
- delete no_allocation_;
- }
-
- bool IsMapped(HeapObject* obj) {
- return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
- }
-
- int MappedTo(HeapObject* obj) {
- ASSERT(IsMapped(obj));
- return static_cast<int>(reinterpret_cast<intptr_t>(
- serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
- }
-
- void AddMapping(HeapObject* obj, int to) {
- ASSERT(!IsMapped(obj));
- HashMap::Entry* entry =
- serialization_map_->Lookup(Key(obj), Hash(obj), true);
- entry->value = Value(to);
- }
-
- private:
- static bool SerializationMatchFun(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
- }
-
- static void* Key(HeapObject* obj) {
- return reinterpret_cast<void*>(obj->address());
- }
-
- static void* Value(int v) {
- return reinterpret_cast<void*>(v);
- }
-
- HashMap* serialization_map_;
- AssertNoAllocation* no_allocation_;
- DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
-};
-
-
-// There can be only one serializer per V8 process.
-class Serializer : public SerializerDeserializer {
- public:
- explicit Serializer(SnapshotByteSink* sink);
- ~Serializer();
- void VisitPointers(Object** start, Object** end);
- // You can call this after serialization to find out how much space was used
- // in each space.
- int CurrentAllocationAddress(int space) {
- ASSERT(space < kNumberOfSpaces);
- return fullness_[space];
- }
-
- static void Enable() {
- if (!serialization_enabled_) {
- ASSERT(!too_late_to_enable_now_);
- }
- serialization_enabled_ = true;
- }
-
- static void Disable() { serialization_enabled_ = false; }
- // Call this when you have made use of the fact that there is no serialization
- // going on.
- static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
- static bool enabled() { return serialization_enabled_; }
- SerializationAddressMapper* address_mapper() { return &address_mapper_; }
- void PutRoot(int index,
- HeapObject* object,
- HowToCode how,
- WhereToPoint where,
- int skip);
-
- protected:
- static const int kInvalidRootIndex = -1;
-
- int RootIndex(HeapObject* heap_object, HowToCode from);
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
- intptr_t root_index_wave_front() { return root_index_wave_front_; }
- void set_root_index_wave_front(intptr_t value) {
- ASSERT(value >= root_index_wave_front_);
- root_index_wave_front_ = value;
- }
-
- class ObjectSerializer : public ObjectVisitor {
- public:
- ObjectSerializer(Serializer* serializer,
- Object* o,
- SnapshotByteSink* sink,
- HowToCode how_to_code,
- WhereToPoint where_to_point)
- : serializer_(serializer),
- object_(HeapObject::cast(o)),
- sink_(sink),
- reference_representation_(how_to_code + where_to_point),
- bytes_processed_so_far_(0),
- code_object_(o->IsCode()),
- code_has_been_output_(false) { }
- void Serialize();
- void VisitPointers(Object** start, Object** end);
- void VisitEmbeddedPointer(RelocInfo* target);
- void VisitExternalReferences(Address* start, Address* end);
- void VisitExternalReference(RelocInfo* rinfo);
- void VisitCodeTarget(RelocInfo* target);
- void VisitCodeEntry(Address entry_address);
- void VisitGlobalPropertyCell(RelocInfo* rinfo);
- void VisitRuntimeEntry(RelocInfo* reloc);
- // Used for seralizing the external strings that hold the natives source.
- void VisitExternalAsciiString(
- v8::String::ExternalAsciiStringResource** resource);
- // We can't serialize a heap with external two byte strings.
- void VisitExternalTwoByteString(
- v8::String::ExternalStringResource** resource) {
- UNREACHABLE();
- }
-
- private:
- enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
- // This function outputs or skips the raw data between the last pointer and
- // up to the current position. It optionally can just return the number of
- // bytes to skip instead of performing a skip instruction, in case the skip
- // can be merged into the next instruction.
- int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
-
- Serializer* serializer_;
- HeapObject* object_;
- SnapshotByteSink* sink_;
- int reference_representation_;
- int bytes_processed_so_far_;
- bool code_object_;
- bool code_has_been_output_;
- };
-
- virtual void SerializeObject(Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) = 0;
- void SerializeReferenceToPreviousObject(
- int space,
- int address,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip);
- void InitializeAllocators();
- // This will return the space for an object.
- static int SpaceOfObject(HeapObject* object);
- int Allocate(int space, int size);
- int EncodeExternalReference(Address addr) {
- return external_reference_encoder_->Encode(addr);
- }
-
- int SpaceAreaSize(int space);
-
- Isolate* isolate_;
- // Keep track of the fullness of each space in order to generate
- // relative addresses for back references.
- int fullness_[LAST_SPACE + 1];
- SnapshotByteSink* sink_;
- int current_root_index_;
- ExternalReferenceEncoder* external_reference_encoder_;
- static bool serialization_enabled_;
- // Did we already make use of the fact that serialization was not enabled?
- static bool too_late_to_enable_now_;
- SerializationAddressMapper address_mapper_;
- intptr_t root_index_wave_front_;
- void Pad();
-
- friend class ObjectSerializer;
- friend class Deserializer;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Serializer);
-};
-
-
-class PartialSerializer : public Serializer {
- public:
- PartialSerializer(Serializer* startup_snapshot_serializer,
- SnapshotByteSink* sink)
- : Serializer(sink),
- startup_serializer_(startup_snapshot_serializer) {
- set_root_index_wave_front(Heap::kStrongRootListLength);
- }
-
- // Serialize the objects reachable from a single object pointer.
- virtual void Serialize(Object** o);
- virtual void SerializeObject(Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip);
-
- protected:
- virtual int PartialSnapshotCacheIndex(HeapObject* o);
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
- // Scripts should be referred only through shared function infos. We can't
- // allow them to be part of the partial snapshot because they contain a
- // unique ID, and deserializing several partial snapshots containing script
- // would cause dupes.
- ASSERT(!o->IsScript());
- return o->IsString() || o->IsSharedFunctionInfo() ||
- o->IsHeapNumber() || o->IsCode() ||
- o->IsScopeInfo() ||
- o->map() == HEAP->fixed_cow_array_map();
- }
-
- private:
- Serializer* startup_serializer_;
- DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
-};
-
-
-class StartupSerializer : public Serializer {
- public:
- explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
- // Clear the cache of objects used by the partial snapshot. After the
- // strong roots have been serialized we can create a partial snapshot
- // which will repopulate the cache with objects needed by that partial
- // snapshot.
- Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
- }
- // Serialize the current state of the heap. The order is:
- // 1) Strong references.
- // 2) Partial snapshot cache.
- // 3) Weak references (e.g. the string table).
- virtual void SerializeStrongReferences();
- virtual void SerializeObject(Object* o,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip);
- void SerializeWeakReferences();
- void Serialize() {
- SerializeStrongReferences();
- SerializeWeakReferences();
- Pad();
- }
-
- private:
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
- return false;
- }
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_SERIALIZE_H_
diff --git a/src/3rdparty/v8/src/simulator.h b/src/3rdparty/v8/src/simulator.h
deleted file mode 100644
index 485e930..0000000
--- a/src/3rdparty/v8/src/simulator.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SIMULATOR_H_
-#define V8_SIMULATOR_H_
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/simulator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/simulator-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/simulator-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/simulator-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-#endif // V8_SIMULATOR_H_
diff --git a/src/3rdparty/v8/src/small-pointer-list.h b/src/3rdparty/v8/src/small-pointer-list.h
deleted file mode 100644
index 295a06f..0000000
--- a/src/3rdparty/v8/src/small-pointer-list.h
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SMALL_POINTER_LIST_H_
-#define V8_SMALL_POINTER_LIST_H_
-
-#include "checks.h"
-#include "v8globals.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// SmallPointerList is a list optimized for storing no or just a
-// single value. When more values are given it falls back to ZoneList.
-//
-// The interface tries to be as close to List from list.h as possible.
-template <typename T>
-class SmallPointerList {
- public:
- SmallPointerList() : data_(kEmptyTag) {}
-
- SmallPointerList(int capacity, Zone* zone) : data_(kEmptyTag) {
- Reserve(capacity, zone);
- }
-
- void Reserve(int capacity, Zone* zone) {
- if (capacity < 2) return;
- if ((data_ & kTagMask) == kListTag) {
- if (list()->capacity() >= capacity) return;
- int old_length = list()->length();
- list()->AddBlock(NULL, capacity - list()->capacity(), zone);
- list()->Rewind(old_length);
- return;
- }
- PointerList* list = new(zone) PointerList(capacity, zone);
- if ((data_ & kTagMask) == kSingletonTag) {
- list->Add(single_value(), zone);
- }
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
- data_ = reinterpret_cast<intptr_t>(list) | kListTag;
- }
-
- void Clear() {
- data_ = kEmptyTag;
- }
-
- void Sort() {
- if ((data_ & kTagMask) == kListTag) {
- list()->Sort(compare_value);
- }
- }
-
- bool is_empty() const { return length() == 0; }
-
- int length() const {
- if ((data_ & kTagMask) == kEmptyTag) return 0;
- if ((data_ & kTagMask) == kSingletonTag) return 1;
- return list()->length();
- }
-
- void Add(T* pointer, Zone* zone) {
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment));
- if ((data_ & kTagMask) == kEmptyTag) {
- data_ = reinterpret_cast<intptr_t>(pointer) | kSingletonTag;
- return;
- }
- if ((data_ & kTagMask) == kSingletonTag) {
- PointerList* list = new(zone) PointerList(2, zone);
- list->Add(single_value(), zone);
- list->Add(pointer, zone);
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
- data_ = reinterpret_cast<intptr_t>(list) | kListTag;
- return;
- }
- list()->Add(pointer, zone);
- }
-
- // Note: returns T* and not T*& (unlike List from list.h).
- // This makes the implementation simpler and more const correct.
- T* at(int i) const {
- ASSERT((data_ & kTagMask) != kEmptyTag);
- if ((data_ & kTagMask) == kSingletonTag) {
- ASSERT(i == 0);
- return single_value();
- }
- return list()->at(i);
- }
-
- // See the note above.
- T* operator[](int i) const { return at(i); }
-
- // Remove the given element from the list (if present).
- void RemoveElement(T* pointer) {
- if ((data_ & kTagMask) == kEmptyTag) return;
- if ((data_ & kTagMask) == kSingletonTag) {
- if (pointer == single_value()) {
- data_ = kEmptyTag;
- }
- return;
- }
- list()->RemoveElement(pointer);
- }
-
- T* RemoveLast() {
- ASSERT((data_ & kTagMask) != kEmptyTag);
- if ((data_ & kTagMask) == kSingletonTag) {
- T* result = single_value();
- data_ = kEmptyTag;
- return result;
- }
- return list()->RemoveLast();
- }
-
- void Rewind(int pos) {
- if ((data_ & kTagMask) == kEmptyTag) {
- ASSERT(pos == 0);
- return;
- }
- if ((data_ & kTagMask) == kSingletonTag) {
- ASSERT(pos == 0 || pos == 1);
- if (pos == 0) {
- data_ = kEmptyTag;
- }
- return;
- }
- list()->Rewind(pos);
- }
-
- int CountOccurrences(T* pointer, int start, int end) const {
- if ((data_ & kTagMask) == kEmptyTag) return 0;
- if ((data_ & kTagMask) == kSingletonTag) {
- if (start == 0 && end >= 0) {
- return (single_value() == pointer) ? 1 : 0;
- }
- return 0;
- }
- return list()->CountOccurrences(pointer, start, end);
- }
-
- private:
- typedef ZoneList<T*> PointerList;
-
- static int compare_value(T* const* a, T* const* b) {
- return Compare<T>(**a, **b);
- }
-
- static const intptr_t kEmptyTag = 1;
- static const intptr_t kSingletonTag = 0;
- static const intptr_t kListTag = 2;
- static const intptr_t kTagMask = 3;
- static const intptr_t kValueMask = ~kTagMask;
-
- STATIC_ASSERT(kTagMask + 1 <= kPointerAlignment);
-
- T* single_value() const {
- ASSERT((data_ & kTagMask) == kSingletonTag);
- STATIC_ASSERT(kSingletonTag == 0);
- return reinterpret_cast<T*>(data_);
- }
-
- PointerList* list() const {
- ASSERT((data_ & kTagMask) == kListTag);
- return reinterpret_cast<PointerList*>(data_ & kValueMask);
- }
-
- intptr_t data_;
-
- DISALLOW_COPY_AND_ASSIGN(SmallPointerList);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SMALL_POINTER_LIST_H_
diff --git a/src/3rdparty/v8/src/smart-pointers.h b/src/3rdparty/v8/src/smart-pointers.h
deleted file mode 100644
index 02025bb..0000000
--- a/src/3rdparty/v8/src/smart-pointers.h
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SMART_POINTERS_H_
-#define V8_SMART_POINTERS_H_
-
-namespace v8 {
-namespace internal {
-
-
-template<typename Deallocator, typename T>
-class SmartPointerBase {
- public:
- // Default constructor. Constructs an empty scoped pointer.
- inline SmartPointerBase() : p_(NULL) {}
-
- // Constructs a scoped pointer from a plain one.
- explicit inline SmartPointerBase(T* ptr) : p_(ptr) {}
-
- // Copy constructor removes the pointer from the original to avoid double
- // freeing.
- inline SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
- : p_(rhs.p_) {
- const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
- }
-
- // When the destructor of the scoped pointer is executed the plain pointer
- // is deleted using DeleteArray. This implies that you must allocate with
- // NewArray.
- inline ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
-
- inline T* operator->() const { return p_; }
-
- // You can get the underlying pointer out with the * operator.
- inline T* operator*() { return p_; }
-
- // You can use [n] to index as if it was a plain pointer.
- inline T& operator[](size_t i) {
- return p_[i];
- }
-
- // You can use [n] to index as if it was a plain pointer.
- const inline T& operator[](size_t i) const {
- return p_[i];
- }
-
- // We don't have implicit conversion to a T* since that hinders migration:
- // You would not be able to change a method from returning a T* to
- // returning an SmartArrayPointer<T> and then get errors wherever it is used.
-
-
- // If you want to take out the plain pointer and don't want it automatically
- // deleted then call Detach(). Afterwards, the smart pointer is empty
- // (NULL).
- inline T* Detach() {
- T* temp = p_;
- p_ = NULL;
- return temp;
- }
-
- inline void Reset(T* new_value) {
- if (p_) Deallocator::Delete(p_);
- p_ = new_value;
- }
-
- // Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
- // the copy constructor it removes the pointer in the original to avoid
- // double freeing.
- inline SmartPointerBase<Deallocator, T>& operator=(
- const SmartPointerBase<Deallocator, T>& rhs) {
- ASSERT(is_empty());
- T* tmp = rhs.p_; // swap to handle self-assignment
- const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
- p_ = tmp;
- return *this;
- }
-
- inline bool is_empty() { return p_ == NULL; }
-
- private:
- T* p_;
-};
-
-// A 'scoped array pointer' that calls DeleteArray on its pointer when the
-// destructor is called.
-
-template<typename T>
-struct ArrayDeallocator {
- static void Delete(T* array) {
- DeleteArray(array);
- }
-};
-
-
-template<typename T>
-class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
- public:
- inline SmartArrayPointer() { }
- explicit inline SmartArrayPointer(T* ptr)
- : SmartPointerBase<ArrayDeallocator<T>, T>(ptr) { }
- inline SmartArrayPointer(const SmartArrayPointer<T>& rhs)
- : SmartPointerBase<ArrayDeallocator<T>, T>(rhs) { }
-};
-
-
-template<typename T>
-struct ObjectDeallocator {
- static void Delete(T* array) {
- Malloced::Delete(array);
- }
-};
-
-template<typename T>
-class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
- public:
- inline SmartPointer() { }
- explicit inline SmartPointer(T* ptr)
- : SmartPointerBase<ObjectDeallocator<T>, T>(ptr) { }
- inline SmartPointer(const SmartPointer<T>& rhs)
- : SmartPointerBase<ObjectDeallocator<T>, T>(rhs) { }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SMART_POINTERS_H_
diff --git a/src/3rdparty/v8/src/snapshot-common.cc b/src/3rdparty/v8/src/snapshot-common.cc
deleted file mode 100644
index a8806f0..0000000
--- a/src/3rdparty/v8/src/snapshot-common.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The common functionality when building with or without snapshots.
-
-#include "v8.h"
-
-#include "api.h"
-#include "serialize.h"
-#include "snapshot.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-
-static void ReserveSpaceForSnapshot(Deserializer* deserializer,
- const char* file_name) {
- int file_name_length = StrLength(file_name) + 10;
- Vector<char> name = Vector<char>::New(file_name_length + 1);
- OS::SNPrintF(name, "%s.size", file_name);
- FILE* fp = OS::FOpen(name.start(), "r");
- CHECK_NE(NULL, fp);
- int new_size, pointer_size, data_size, code_size, map_size, cell_size;
-#ifdef _MSC_VER
- // Avoid warning about unsafe fscanf from MSVC.
- // Please note that this is only fine if %c and %s are not being used.
-#define fscanf fscanf_s
-#endif
- CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size));
- CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size));
- CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size));
- CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
- CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
- CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
-#ifdef _MSC_VER
-#undef fscanf
-#endif
- fclose(fp);
- deserializer->set_reservation(NEW_SPACE, new_size);
- deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size);
- deserializer->set_reservation(OLD_DATA_SPACE, data_size);
- deserializer->set_reservation(CODE_SPACE, code_size);
- deserializer->set_reservation(MAP_SPACE, map_size);
- deserializer->set_reservation(CELL_SPACE, cell_size);
- name.Dispose();
-}
-
-
-void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
- deserializer->set_reservation(NEW_SPACE, new_space_used_);
- deserializer->set_reservation(OLD_POINTER_SPACE, pointer_space_used_);
- deserializer->set_reservation(OLD_DATA_SPACE, data_space_used_);
- deserializer->set_reservation(CODE_SPACE, code_space_used_);
- deserializer->set_reservation(MAP_SPACE, map_space_used_);
- deserializer->set_reservation(CELL_SPACE, cell_space_used_);
-}
-
-
-bool Snapshot::Initialize(const char* snapshot_file) {
- if (snapshot_file) {
- int len;
- byte* str = ReadBytes(snapshot_file, &len);
- if (!str) return false;
- bool success;
- {
- SnapshotByteSource source(str, len);
- Deserializer deserializer(&source);
- ReserveSpaceForSnapshot(&deserializer, snapshot_file);
- success = V8::Initialize(&deserializer);
- }
- DeleteArray(str);
- return success;
- } else if (size_ > 0) {
- SnapshotByteSource source(raw_data_, raw_size_);
- Deserializer deserializer(&source);
- ReserveSpaceForLinkedInSnapshot(&deserializer);
- return V8::Initialize(&deserializer);
- }
- return false;
-}
-
-
-bool Snapshot::HaveASnapshotToStartFrom() {
- return size_ != 0;
-}
-
-
-Handle<Context> Snapshot::NewContextFromSnapshot() {
- if (context_size_ == 0) {
- return Handle<Context>();
- }
- SnapshotByteSource source(context_raw_data_,
- context_raw_size_);
- Deserializer deserializer(&source);
- Object* root;
- deserializer.set_reservation(NEW_SPACE, context_new_space_used_);
- deserializer.set_reservation(OLD_POINTER_SPACE, context_pointer_space_used_);
- deserializer.set_reservation(OLD_DATA_SPACE, context_data_space_used_);
- deserializer.set_reservation(CODE_SPACE, context_code_space_used_);
- deserializer.set_reservation(MAP_SPACE, context_map_space_used_);
- deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
- deserializer.DeserializePartial(&root);
- CHECK(root->IsContext());
- return Handle<Context>(Context::cast(root));
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/snapshot-empty.cc b/src/3rdparty/v8/src/snapshot-empty.cc
deleted file mode 100644
index 70e7ab8..0000000
--- a/src/3rdparty/v8/src/snapshot-empty.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Used for building without snapshots.
-
-#include "v8.h"
-
-#include "snapshot.h"
-
-namespace v8 {
-namespace internal {
-
-const byte Snapshot::data_[] = { 0 };
-const byte* Snapshot::raw_data_ = NULL;
-const int Snapshot::size_ = 0;
-const int Snapshot::raw_size_ = 0;
-const byte Snapshot::context_data_[] = { 0 };
-const byte* Snapshot::context_raw_data_ = NULL;
-const int Snapshot::context_size_ = 0;
-const int Snapshot::context_raw_size_ = 0;
-
-const int Snapshot::new_space_used_ = 0;
-const int Snapshot::pointer_space_used_ = 0;
-const int Snapshot::data_space_used_ = 0;
-const int Snapshot::code_space_used_ = 0;
-const int Snapshot::map_space_used_ = 0;
-const int Snapshot::cell_space_used_ = 0;
-
-const int Snapshot::context_new_space_used_ = 0;
-const int Snapshot::context_pointer_space_used_ = 0;
-const int Snapshot::context_data_space_used_ = 0;
-const int Snapshot::context_code_space_used_ = 0;
-const int Snapshot::context_map_space_used_ = 0;
-const int Snapshot::context_cell_space_used_ = 0;
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/snapshot.h b/src/3rdparty/v8/src/snapshot.h
deleted file mode 100644
index c4ae45e..0000000
--- a/src/3rdparty/v8/src/snapshot.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "isolate.h"
-
-#ifndef V8_SNAPSHOT_H_
-#define V8_SNAPSHOT_H_
-
-namespace v8 {
-namespace internal {
-
-class Snapshot {
- public:
- // Initialize the VM from the given snapshot file. If snapshot_file is
- // NULL, use the internal snapshot instead. Returns false if no snapshot
- // could be found.
- static bool Initialize(const char* snapshot_file = NULL);
-
- static bool HaveASnapshotToStartFrom();
-
- // Create a new context using the internal partial snapshot.
- static Handle<Context> NewContextFromSnapshot();
-
- // Returns whether or not the snapshot is enabled.
- static bool IsEnabled() { return size_ != 0; }
-
- // Write snapshot to the given file. Returns true if snapshot was written
- // successfully.
- static bool WriteToFile(const char* snapshot_file);
-
- static const byte* data() { return data_; }
- static int size() { return size_; }
- static int raw_size() { return raw_size_; }
- static void set_raw_data(const byte* raw_data) {
- raw_data_ = raw_data;
- }
- static const byte* context_data() { return context_data_; }
- static int context_size() { return context_size_; }
- static int context_raw_size() { return context_raw_size_; }
- static void set_context_raw_data(
- const byte* context_raw_data) {
- context_raw_data_ = context_raw_data;
- }
-
- private:
- static const byte data_[];
- static const byte* raw_data_;
- static const byte context_data_[];
- static const byte* context_raw_data_;
- static const int new_space_used_;
- static const int pointer_space_used_;
- static const int data_space_used_;
- static const int code_space_used_;
- static const int map_space_used_;
- static const int cell_space_used_;
- static const int context_new_space_used_;
- static const int context_pointer_space_used_;
- static const int context_data_space_used_;
- static const int context_code_space_used_;
- static const int context_map_space_used_;
- static const int context_cell_space_used_;
- static const int size_;
- static const int raw_size_;
- static const int context_size_;
- static const int context_raw_size_;
-
- static void ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SNAPSHOT_H_
diff --git a/src/3rdparty/v8/src/spaces-inl.h b/src/3rdparty/v8/src/spaces-inl.h
deleted file mode 100644
index 8a576a8..0000000
--- a/src/3rdparty/v8/src/spaces-inl.h
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SPACES_INL_H_
-#define V8_SPACES_INL_H_
-
-#include "isolate.h"
-#include "spaces.h"
-#include "v8memory.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Bitmap
-
-void Bitmap::Clear(MemoryChunk* chunk) {
- Bitmap* bitmap = chunk->markbits();
- for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
- chunk->ResetLiveBytes();
-}
-
-
-// -----------------------------------------------------------------------------
-// PageIterator
-
-
-PageIterator::PageIterator(PagedSpace* space)
- : space_(space),
- prev_page_(&space->anchor_),
- next_page_(prev_page_->next_page()) { }
-
-
-bool PageIterator::has_next() {
- return next_page_ != &space_->anchor_;
-}
-
-
-Page* PageIterator::next() {
- ASSERT(has_next());
- prev_page_ = next_page_;
- next_page_ = next_page_->next_page();
- return prev_page_;
-}
-
-
-// -----------------------------------------------------------------------------
-// NewSpacePageIterator
-
-
-NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
- : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
- next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
- last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
-
-NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
- : prev_page_(space->anchor()),
- next_page_(prev_page_->next_page()),
- last_page_(prev_page_->prev_page()) { }
-
-NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
- : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
- next_page_(NewSpacePage::FromAddress(start)),
- last_page_(NewSpacePage::FromLimit(limit)) {
- SemiSpace::AssertValidRange(start, limit);
-}
-
-
-bool NewSpacePageIterator::has_next() {
- return prev_page_ != last_page_;
-}
-
-
-NewSpacePage* NewSpacePageIterator::next() {
- ASSERT(has_next());
- prev_page_ = next_page_;
- next_page_ = next_page_->next_page();
- return prev_page_;
-}
-
-
-// -----------------------------------------------------------------------------
-// HeapObjectIterator
-HeapObject* HeapObjectIterator::FromCurrentPage() {
- while (cur_addr_ != cur_end_) {
- if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
- cur_addr_ = space_->limit();
- continue;
- }
- HeapObject* obj = HeapObject::FromAddress(cur_addr_);
- int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
- cur_addr_ += obj_size;
- ASSERT(cur_addr_ <= cur_end_);
- if (!obj->IsFiller()) {
- ASSERT_OBJECT_SIZE(obj_size);
- return obj;
- }
- }
- return NULL;
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void MemoryAllocator::Protect(Address start, size_t size) {
- OS::Protect(start, size);
-}
-
-
-void MemoryAllocator::Unprotect(Address start,
- size_t size,
- Executability executable) {
- OS::Unprotect(start, size, executable);
-}
-
-
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- OS::Protect(chunks_[id].address(), chunks_[id].size());
-}
-
-
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
- chunks_[id].owner()->executable() == EXECUTABLE);
-}
-
-#endif
-
-
-// --------------------------------------------------------------------------
-// PagedSpace
-Page* Page::Initialize(Heap* heap,
- MemoryChunk* chunk,
- Executability executable,
- PagedSpace* owner) {
- Page* page = reinterpret_cast<Page*>(chunk);
- ASSERT(chunk->size() <= static_cast<size_t>(kPageSize));
- ASSERT(chunk->owner() == owner);
- owner->IncreaseCapacity(page->area_size());
- owner->Free(page->area_start(), page->area_size());
-
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
-
- return page;
-}
-
-
-bool PagedSpace::Contains(Address addr) {
- Page* p = Page::FromAddress(addr);
- if (!p->is_valid()) return false;
- return p->owner() == this;
-}
-
-
-void MemoryChunk::set_scan_on_scavenge(bool scan) {
- if (scan) {
- if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
- SetFlag(SCAN_ON_SCAVENGE);
- } else {
- if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- heap_->incremental_marking()->SetOldSpacePageFlags(this);
-}
-
-
-MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
- MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
- OffsetFrom(addr) & ~Page::kPageAlignmentMask);
- if (maybe->owner() != NULL) return maybe;
- LargeObjectIterator iterator(HEAP->lo_space());
- for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
- // Fixed arrays are the only pointer-containing objects in large object
- // space.
- if (o->IsFixedArray()) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
- if (chunk->Contains(addr)) {
- return chunk;
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-void MemoryChunk::UpdateHighWaterMark(Address mark) {
- if (mark == NULL) return;
- // Need to subtract one from the mark because when a chunk is full the
- // top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromAllocationTop.
- MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
- int new_mark = static_cast<int>(mark - chunk->address());
- if (new_mark > chunk->high_water_mark_) {
- chunk->high_water_mark_ = new_mark;
- }
-}
-
-
-PointerChunkIterator::PointerChunkIterator(Heap* heap)
- : state_(kOldPointerState),
- old_pointer_iterator_(heap->old_pointer_space()),
- map_iterator_(heap->map_space()),
- lo_iterator_(heap->lo_space()) { }
-
-
-Page* Page::next_page() {
- ASSERT(next_chunk()->owner() == owner());
- return static_cast<Page*>(next_chunk());
-}
-
-
-Page* Page::prev_page() {
- ASSERT(prev_chunk()->owner() == owner());
- return static_cast<Page*>(prev_chunk());
-}
-
-
-void Page::set_next_page(Page* page) {
- ASSERT(page->owner() == owner());
- set_next_chunk(page);
-}
-
-
-void Page::set_prev_page(Page* page) {
- ASSERT(page->owner() == owner());
- set_prev_chunk(page);
-}
-
-
-// Try linear allocation in the page of alloc_info's allocation top. Does
-// not contain slow case logic (e.g. move to the next page or try free list
-// allocation) so it can be used by all the allocation functions and for all
-// the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top;
- Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit) return NULL;
-
- allocation_info_.top = new_top;
- return HeapObject::FromAddress(current_top);
-}
-
-
-// Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
- HeapObject* object = AllocateLinearly(size_in_bytes);
- if (object != NULL) {
- if (identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- return object;
- }
-
- ASSERT(!heap()->linear_allocation() ||
- (anchor_.next_chunk() == &anchor_ &&
- anchor_.prev_chunk() == &anchor_));
-
- object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) {
- if (identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- return object;
- }
-
- object = SlowAllocateRaw(size_in_bytes);
- if (object != NULL) {
- if (identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- return object;
- }
-
- return Failure::RetryAfterGC(identity());
-}
-
-
-// -----------------------------------------------------------------------------
-// NewSpace
-
-
-MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
-#ifdef DEBUG
- // If we are stressing compaction we waste some memory in new space
- // in order to get more frequent GCs.
- if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
- if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
- int filler_size = size_in_bytes * 4;
- for (int i = 0; i < filler_size; i += kPointerSize) {
- *(reinterpret_cast<Object**>(old_top + i)) =
- HEAP->one_pointer_filler_map();
- }
- old_top += filler_size;
- allocation_info_.top += filler_size;
- }
- }
-#endif
-
- if (allocation_info_.limit - old_top < size_in_bytes) {
- return SlowAllocateRaw(size_in_bytes);
- }
-
- Object* obj = HeapObject::FromAddress(old_top);
- allocation_info_.top += size_in_bytes;
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- return obj;
-}
-
-
-LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
- return static_cast<LargePage*>(chunk);
-}
-
-
-intptr_t LargeObjectSpace::Available() {
- return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
-}
-
-
-template <typename StringType>
-void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
- ASSERT(length <= string->length());
- ASSERT(string->IsSeqString());
- ASSERT(string->address() + StringType::SizeFor(string->length()) ==
- allocation_info_.top);
- Address old_top = allocation_info_.top;
- allocation_info_.top =
- string->address() + StringType::SizeFor(length);
- string->set_length(length);
- if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
- int delta = static_cast<int>(old_top - allocation_info_.top);
- MemoryChunk::IncrementLiveBytesFromMutator(string->address(), -delta);
- }
-}
-
-
-bool FreeListNode::IsFreeListNode(HeapObject* object) {
- Map* map = object->map();
- Heap* heap = object->GetHeap();
- return map == heap->raw_unchecked_free_space_map()
- || map == heap->raw_unchecked_one_pointer_filler_map()
- || map == heap->raw_unchecked_two_pointer_filler_map();
-}
-
-} } // namespace v8::internal
-
-#endif // V8_SPACES_INL_H_
diff --git a/src/3rdparty/v8/src/spaces.cc b/src/3rdparty/v8/src/spaces.cc
deleted file mode 100644
index 3adb2e3..0000000
--- a/src/3rdparty/v8/src/spaces.cc
+++ /dev/null
@@ -1,3131 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "macro-assembler.h"
-#include "mark-compact.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// HeapObjectIterator
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
- // You can't actually iterate over the anchor page. It is not a real page,
- // just an anchor for the double linked page list. Initialize as if we have
- // reached the end of the anchor page, then the first iteration will move on
- // to the first page.
- Initialize(space,
- NULL,
- NULL,
- kAllPagesInSpace,
- NULL);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
- HeapObjectCallback size_func) {
- // You can't actually iterate over the anchor page. It is not a real page,
- // just an anchor for the double linked page list. Initialize the current
- // address and end as NULL, then the first iteration will move on
- // to the first page.
- Initialize(space,
- NULL,
- NULL,
- kAllPagesInSpace,
- size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(Page* page,
- HeapObjectCallback size_func) {
- Space* owner = page->owner();
- ASSERT(owner == page->heap()->old_pointer_space() ||
- owner == page->heap()->old_data_space() ||
- owner == page->heap()->map_space() ||
- owner == page->heap()->cell_space() ||
- owner == page->heap()->code_space());
- Initialize(reinterpret_cast<PagedSpace*>(owner),
- page->area_start(),
- page->area_end(),
- kOnePageOnly,
- size_func);
- ASSERT(page->WasSweptPrecisely());
-}
-
-
-void HeapObjectIterator::Initialize(PagedSpace* space,
- Address cur, Address end,
- HeapObjectIterator::PageMode mode,
- HeapObjectCallback size_f) {
- // Check that we actually can iterate this space.
- ASSERT(!space->was_swept_conservatively());
-
- space_ = space;
- cur_addr_ = cur;
- cur_end_ = end;
- page_mode_ = mode;
- size_func_ = size_f;
-}
-
-
-// We have hit the end of the page and should advance to the next block of
-// objects. This happens at the end of the page.
-bool HeapObjectIterator::AdvanceToNextPage() {
- ASSERT(cur_addr_ == cur_end_);
- if (page_mode_ == kOnePageOnly) return false;
- Page* cur_page;
- if (cur_addr_ == NULL) {
- cur_page = space_->anchor();
- } else {
- cur_page = Page::FromAddress(cur_addr_ - 1);
- ASSERT(cur_addr_ == cur_page->area_end());
- }
- cur_page = cur_page->next_page();
- if (cur_page == space_->anchor()) return false;
- cur_addr_ = cur_page->area_start();
- cur_end_ = cur_page->area_end();
- ASSERT(cur_page->WasSweptPrecisely());
- return true;
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeRange
-
-
-CodeRange::CodeRange(Isolate* isolate)
- : isolate_(isolate),
- code_range_(NULL),
- free_list_(0),
- allocation_list_(0),
- current_allocation_block_index_(0) {
-}
-
-
-bool CodeRange::SetUp(const size_t requested) {
- ASSERT(code_range_ == NULL);
-
- code_range_ = new VirtualMemory(requested);
- CHECK(code_range_ != NULL);
- if (!code_range_->IsReserved()) {
- delete code_range_;
- code_range_ = NULL;
- return false;
- }
-
- // We are sure that we have mapped a block of requested addresses.
- ASSERT(code_range_->size() == requested);
- LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
- Address base = reinterpret_cast<Address>(code_range_->address());
- Address aligned_base =
- RoundUp(reinterpret_cast<Address>(code_range_->address()),
- MemoryChunk::kAlignment);
- size_t size = code_range_->size() - (aligned_base - base);
- allocation_list_.Add(FreeBlock(aligned_base, size));
- current_allocation_block_index_ = 0;
- return true;
-}
-
-
-int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right) {
- // The entire point of CodeRange is that the difference between two
- // addresses in the range can be represented as a signed 32-bit int,
- // so the cast is semantically correct.
- return static_cast<int>(left->start - right->start);
-}
-
-
-void CodeRange::GetNextAllocationBlock(size_t requested) {
- for (current_allocation_block_index_++;
- current_allocation_block_index_ < allocation_list_.length();
- current_allocation_block_index_++) {
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
- }
- }
-
- // Sort and merge the free blocks on the free list and the allocation list.
- free_list_.AddAll(allocation_list_);
- allocation_list_.Clear();
- free_list_.Sort(&CompareFreeBlockAddress);
- for (int i = 0; i < free_list_.length();) {
- FreeBlock merged = free_list_[i];
- i++;
- // Add adjacent free blocks to the current merged block.
- while (i < free_list_.length() &&
- free_list_[i].start == merged.start + merged.size) {
- merged.size += free_list_[i].size;
- i++;
- }
- if (merged.size > 0) {
- allocation_list_.Add(merged);
- }
- }
- free_list_.Clear();
-
- for (current_allocation_block_index_ = 0;
- current_allocation_block_index_ < allocation_list_.length();
- current_allocation_block_index_++) {
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
- }
- }
-
- // Code range is full or too fragmented.
- V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
-}
-
-
-Address CodeRange::AllocateRawMemory(const size_t requested_size,
- const size_t commit_size,
- size_t* allocated) {
- ASSERT(commit_size <= requested_size);
- ASSERT(current_allocation_block_index_ < allocation_list_.length());
- if (requested_size > allocation_list_[current_allocation_block_index_].size) {
- // Find an allocation block large enough. This function call may
- // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
- GetNextAllocationBlock(requested_size);
- }
- // Commit the requested memory at the start of the current allocation block.
- size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
- FreeBlock current = allocation_list_[current_allocation_block_index_];
- if (aligned_requested >= (current.size - Page::kPageSize)) {
- // Don't leave a small free block, useless for a large object or chunk.
- *allocated = current.size;
- } else {
- *allocated = aligned_requested;
- }
- ASSERT(*allocated <= current.size);
- ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!MemoryAllocator::CommitExecutableMemory(code_range_,
- current.start,
- commit_size,
- *allocated)) {
- *allocated = 0;
- return NULL;
- }
- allocation_list_[current_allocation_block_index_].start += *allocated;
- allocation_list_[current_allocation_block_index_].size -= *allocated;
- if (*allocated == current.size) {
- GetNextAllocationBlock(0); // This block is used up, get the next one.
- }
- return current.start;
-}
-
-
-bool CodeRange::CommitRawMemory(Address start, size_t length) {
- return code_range_->Commit(start, length, true);
-}
-
-
-bool CodeRange::UncommitRawMemory(Address start, size_t length) {
- return code_range_->Uncommit(start, length);
-}
-
-
-void CodeRange::FreeRawMemory(Address address, size_t length) {
- ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
- free_list_.Add(FreeBlock(address, length));
- code_range_->Uncommit(address, length);
-}
-
-
-void CodeRange::TearDown() {
- delete code_range_; // Frees all memory in the virtual memory range.
- code_range_ = NULL;
- free_list_.Free();
- allocation_list_.Free();
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-//
-
-MemoryAllocator::MemoryAllocator(Isolate* isolate)
- : isolate_(isolate),
- capacity_(0),
- capacity_executable_(0),
- size_(0),
- size_executable_(0) {
-}
-
-
-bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
- capacity_ = RoundUp(capacity, Page::kPageSize);
- capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
- ASSERT_GE(capacity_, capacity_executable_);
-
- size_ = 0;
- size_executable_ = 0;
-
- return true;
-}
-
-
-void MemoryAllocator::TearDown() {
- // Check that spaces were torn down before MemoryAllocator.
- ASSERT(size_ == 0);
- // TODO(gc) this will be true again when we fix FreeMemory.
- // ASSERT(size_executable_ == 0);
- capacity_ = 0;
- capacity_executable_ = 0;
-}
-
-
-void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
- Executability executable) {
- // TODO(gc) make code_range part of memory allocator?
- ASSERT(reservation->IsReserved());
- size_t size = reservation->size();
- ASSERT(size_ >= size);
- size_ -= size;
-
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
- if (executable == EXECUTABLE) {
- ASSERT(size_executable_ >= size);
- size_executable_ -= size;
- }
- // Code which is part of the code-range does not have its own VirtualMemory.
- ASSERT(!isolate_->code_range()->contains(
- static_cast<Address>(reservation->address())));
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
- reservation->Release();
-}
-
-
-void MemoryAllocator::FreeMemory(Address base,
- size_t size,
- Executability executable) {
- // TODO(gc) make code_range part of memory allocator?
- ASSERT(size_ >= size);
- size_ -= size;
-
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
- if (executable == EXECUTABLE) {
- ASSERT(size_executable_ >= size);
- size_executable_ -= size;
- }
- if (isolate_->code_range()->contains(static_cast<Address>(base))) {
- ASSERT(executable == EXECUTABLE);
- isolate_->code_range()->FreeRawMemory(base, size);
- } else {
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
- bool result = VirtualMemory::ReleaseRegion(base, size);
- USE(result);
- ASSERT(result);
- }
-}
-
-
-Address MemoryAllocator::ReserveAlignedMemory(size_t size,
- size_t alignment,
- VirtualMemory* controller) {
- VirtualMemory reservation(size, alignment);
-
- if (!reservation.IsReserved()) return NULL;
- size_ += reservation.size();
- Address base = RoundUp(static_cast<Address>(reservation.address()),
- alignment);
- controller->TakeControl(&reservation);
- return base;
-}
-
-
-Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
- size_t commit_size,
- size_t alignment,
- Executability executable,
- VirtualMemory* controller) {
- ASSERT(commit_size <= reserve_size);
- VirtualMemory reservation;
- Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
- if (base == NULL) return NULL;
-
- if (executable == EXECUTABLE) {
- if (!CommitExecutableMemory(&reservation,
- base,
- commit_size,
- reserve_size)) {
- base = NULL;
- }
- } else {
- if (!reservation.Commit(base, commit_size, false)) {
- base = NULL;
- }
- }
-
- if (base == NULL) {
- // Failed to commit the body. Release the mapping and any partially
- // commited regions inside it.
- reservation.Release();
- return NULL;
- }
-
- controller->TakeControl(&reservation);
- return base;
-}
-
-
-void Page::InitializeAsAnchor(PagedSpace* owner) {
- set_owner(owner);
- set_prev_page(this);
- set_next_page(this);
-}
-
-
-NewSpacePage* NewSpacePage::Initialize(Heap* heap,
- Address start,
- SemiSpace* semi_space) {
- Address area_start = start + NewSpacePage::kObjectStartOffset;
- Address area_end = start + Page::kPageSize;
-
- MemoryChunk* chunk = MemoryChunk::Initialize(heap,
- start,
- Page::kPageSize,
- area_start,
- area_end,
- NOT_EXECUTABLE,
- semi_space);
- chunk->set_next_chunk(NULL);
- chunk->set_prev_chunk(NULL);
- chunk->initialize_scan_on_scavenge(true);
- bool in_to_space = (semi_space->id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE);
- ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
- NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
- heap->incremental_marking()->SetNewSpacePageFlags(page);
- return page;
-}
-
-
-void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
- set_owner(semi_space);
- set_next_chunk(this);
- set_prev_chunk(this);
- // Flags marks this invalid page as not being in new-space.
- // All real new-space pages will be in new-space.
- SetFlags(0, ~0);
-}
-
-
-MemoryChunk* MemoryChunk::Initialize(Heap* heap,
- Address base,
- size_t size,
- Address area_start,
- Address area_end,
- Executability executable,
- Space* owner) {
- MemoryChunk* chunk = FromAddress(base);
-
- ASSERT(base == chunk->address());
-
- chunk->heap_ = heap;
- chunk->size_ = size;
- chunk->area_start_ = area_start;
- chunk->area_end_ = area_end;
- chunk->flags_ = 0;
- chunk->set_owner(owner);
- chunk->InitializeReservedMemory();
- chunk->slots_buffer_ = NULL;
- chunk->skip_list_ = NULL;
- chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
- chunk->progress_bar_ = 0;
- chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->parallel_sweeping_ = 0;
- chunk->ResetLiveBytes();
- Bitmap::Clear(chunk);
- chunk->initialize_scan_on_scavenge(false);
- chunk->SetFlag(WAS_SWEPT_PRECISELY);
-
- ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
- ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
-
- if (executable == EXECUTABLE) {
- chunk->SetFlag(IS_EXECUTABLE);
- }
-
- if (owner == heap->old_data_space()) {
- chunk->SetFlag(CONTAINS_ONLY_DATA);
- }
-
- return chunk;
-}
-
-
-// Commit MemoryChunk area to the requested size.
-bool MemoryChunk::CommitArea(size_t requested) {
- size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
- MemoryAllocator::CodePageGuardSize() : 0;
- size_t header_size = area_start() - address() - guard_size;
- size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
- size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
- OS::CommitPageSize());
-
- if (commit_size > committed_size) {
- // Commit size should be less or equal than the reserved size.
- ASSERT(commit_size <= size() - 2 * guard_size);
- // Append the committed area.
- Address start = address() + committed_size + guard_size;
- size_t length = commit_size - committed_size;
- if (reservation_.IsReserved()) {
- if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
- return false;
- }
- } else {
- CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
- if (!code_range->CommitRawMemory(start, length)) return false;
- }
-
- if (Heap::ShouldZapGarbage()) {
- heap_->isolate()->memory_allocator()->ZapBlock(start, length);
- }
- } else if (commit_size < committed_size) {
- ASSERT(commit_size > 0);
- // Shrink the committed area.
- size_t length = committed_size - commit_size;
- Address start = address() + committed_size + guard_size - length;
- if (reservation_.IsReserved()) {
- if (!reservation_.Uncommit(start, length)) return false;
- } else {
- CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
- if (!code_range->UncommitRawMemory(start, length)) return false;
- }
- }
-
- area_end_ = area_start_ + requested;
- return true;
-}
-
-
-void MemoryChunk::InsertAfter(MemoryChunk* other) {
- next_chunk_ = other->next_chunk_;
- prev_chunk_ = other;
- other->next_chunk_->prev_chunk_ = this;
- other->next_chunk_ = this;
-}
-
-
-void MemoryChunk::Unlink() {
- if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
- heap_->decrement_scan_on_scavenge_pages();
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- next_chunk_->prev_chunk_ = prev_chunk_;
- prev_chunk_->next_chunk_ = next_chunk_;
- prev_chunk_ = NULL;
- next_chunk_ = NULL;
-}
-
-
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
- intptr_t commit_area_size,
- Executability executable,
- Space* owner) {
- ASSERT(commit_area_size <= reserve_area_size);
-
- size_t chunk_size;
- Heap* heap = isolate_->heap();
- Address base = NULL;
- VirtualMemory reservation;
- Address area_start = NULL;
- Address area_end = NULL;
-
- //
- // MemoryChunk layout:
- //
- // Executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- base + CodePageGuardStartOffset
- // | Guard |
- // +----------------------------+<- area_start_
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- aligned at OS page boundary
- // | Guard |
- // +----------------------------+<- base + chunk_size
- //
- // Non-executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- area_start_ (base + kObjectStartOffset)
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- base + chunk_size
- //
-
- if (executable == EXECUTABLE) {
- chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
- OS::CommitPageSize()) + CodePageGuardSize();
-
- // Check executable memory limit.
- if (size_executable_ + chunk_size > capacity_executable_) {
- LOG(isolate_,
- StringEvent("MemoryAllocator::AllocateRawMemory",
- "V8 Executable Allocation capacity exceeded"));
- return NULL;
- }
-
- // Size of header (not executable) plus area (executable).
- size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
- OS::CommitPageSize());
- // Allocate executable memory either from code range or from the
- // OS.
- if (isolate_->code_range()->exists()) {
- base = isolate_->code_range()->AllocateRawMemory(chunk_size,
- commit_size,
- &chunk_size);
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
- MemoryChunk::kAlignment));
- if (base == NULL) return NULL;
- size_ += chunk_size;
- // Update executable memory size.
- size_executable_ += chunk_size;
- } else {
- base = AllocateAlignedMemory(chunk_size,
- commit_size,
- MemoryChunk::kAlignment,
- executable,
- &reservation);
- if (base == NULL) return NULL;
- // Update executable memory size.
- size_executable_ += reservation.size();
- }
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
- }
-
- area_start = base + CodePageAreaStartOffset();
- area_end = area_start + commit_area_size;
- } else {
- chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
- OS::CommitPageSize());
- size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
- commit_area_size, OS::CommitPageSize());
- base = AllocateAlignedMemory(chunk_size,
- commit_size,
- MemoryChunk::kAlignment,
- executable,
- &reservation);
-
- if (base == NULL) return NULL;
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
- }
-
- area_start = base + Page::kObjectStartOffset;
- area_end = area_start + commit_area_size;
- }
-
- // Use chunk_size for statistics and callbacks because we assume that they
- // treat reserved but not-yet committed memory regions of chunks as allocated.
- isolate_->counters()->memory_allocated()->
- Increment(static_cast<int>(chunk_size));
-
- LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
- if (owner != NULL) {
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
- }
-
- MemoryChunk* result = MemoryChunk::Initialize(heap,
- base,
- chunk_size,
- area_start,
- area_end,
- executable,
- owner);
- result->set_reserved_memory(&reservation);
- return result;
-}
-
-
-Page* MemoryAllocator::AllocatePage(intptr_t size,
- PagedSpace* owner,
- Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
-
- if (chunk == NULL) return NULL;
-
- return Page::Initialize(isolate_->heap(), chunk, executable, owner);
-}
-
-
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
- Space* owner,
- Executability executable) {
- MemoryChunk* chunk = AllocateChunk(object_size,
- object_size,
- executable,
- owner);
- if (chunk == NULL) return NULL;
- return LargePage::Initialize(isolate_->heap(), chunk);
-}
-
-
-void MemoryAllocator::Free(MemoryChunk* chunk) {
- LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- if (chunk->owner() != NULL) {
- ObjectSpace space =
- static_cast<ObjectSpace>(1 << chunk->owner()->identity());
- PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
- }
-
- isolate_->heap()->RememberUnmappedPage(
- reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
-
- delete chunk->slots_buffer();
- delete chunk->skip_list();
-
- VirtualMemory* reservation = chunk->reserved_memory();
- if (reservation->IsReserved()) {
- FreeMemory(reservation, chunk->executable());
- } else {
- FreeMemory(chunk->address(),
- chunk->size(),
- chunk->executable());
- }
-}
-
-
-bool MemoryAllocator::CommitBlock(Address start,
- size_t size,
- Executability executable) {
- if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(start, size);
- }
-
- isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
- return true;
-}
-
-
-bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!VirtualMemory::UncommitRegion(start, size)) return false;
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- return true;
-}
-
-
-void MemoryAllocator::ZapBlock(Address start, size_t size) {
- for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory::Address_at(start + s) = kZapValue;
- }
-}
-
-
-void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- MemoryAllocationCallbackRegistration registration =
- memory_allocation_callbacks_[i];
- if ((registration.space & space) == space &&
- (registration.action & action) == action)
- registration.callback(space, action, static_cast<int>(size));
- }
-}
-
-
-bool MemoryAllocator::MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) return true;
- }
- return false;
-}
-
-
-void MemoryAllocator::AddMemoryAllocationCallback(
- MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- ASSERT(callback != NULL);
- MemoryAllocationCallbackRegistration registration(callback, space, action);
- ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
- return memory_allocation_callbacks_.Add(registration);
-}
-
-
-void MemoryAllocator::RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback) {
- ASSERT(callback != NULL);
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) {
- memory_allocation_callbacks_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-#ifdef DEBUG
-void MemoryAllocator::ReportStatistics() {
- float pct = static_cast<float>(capacity_ - size_) / capacity_;
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", used: %" V8_PTR_PREFIX "d"
- ", available: %%%d\n\n",
- capacity_, size_, static_cast<int>(pct*100));
-}
-#endif
-
-
-int MemoryAllocator::CodePageGuardStartOffset() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
-}
-
-
-int MemoryAllocator::CodePageGuardSize() {
- return static_cast<int>(OS::CommitPageSize());
-}
-
-
-int MemoryAllocator::CodePageAreaStartOffset() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return CodePageGuardStartOffset() + CodePageGuardSize();
-}
-
-
-int MemoryAllocator::CodePageAreaEndOffset() {
- // We are guarding code pages: the last OS page will be protected as
- // non-writable.
- return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
-}
-
-
-bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size) {
- // Commit page header (not executable).
- if (!vm->Commit(start,
- CodePageGuardStartOffset(),
- false)) {
- return false;
- }
-
- // Create guard page after the header.
- if (!vm->Guard(start + CodePageGuardStartOffset())) {
- return false;
- }
-
- // Commit page body (executable).
- if (!vm->Commit(start + CodePageAreaStartOffset(),
- commit_size - CodePageGuardStartOffset(),
- true)) {
- return false;
- }
-
- // Create guard page before the end.
- if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
- return false;
- }
-
- return true;
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryChunk implementation
-
-void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
- if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
- static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
- }
- chunk->IncrementLiveBytes(by);
-}
-
-// -----------------------------------------------------------------------------
-// PagedSpace implementation
-
-PagedSpace::PagedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- Executability executable)
- : Space(heap, id, executable),
- free_list_(this),
- was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)),
- unswept_free_bytes_(0) {
- if (id == CODE_SPACE) {
- area_size_ = heap->isolate()->memory_allocator()->
- CodePageAreaSize();
- } else {
- area_size_ = Page::kPageSize - Page::kObjectStartOffset;
- }
- max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
- * AreaSize();
- accounting_stats_.Clear();
-
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
-
- anchor_.InitializeAsAnchor(this);
-}
-
-
-bool PagedSpace::SetUp() {
- return true;
-}
-
-
-bool PagedSpace::HasBeenSetUp() {
- return true;
-}
-
-
-void PagedSpace::TearDown() {
- PageIterator iterator(this);
- while (iterator.has_next()) {
- heap()->isolate()->memory_allocator()->Free(iterator.next());
- }
- anchor_.set_next_page(&anchor_);
- anchor_.set_prev_page(&anchor_);
- accounting_stats_.Clear();
-}
-
-
-size_t PagedSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- size_t size = 0;
- PageIterator it(this);
- while (it.has_next()) {
- size += it.next()->CommittedPhysicalMemory();
- }
- return size;
-}
-
-
-MaybeObject* PagedSpace::FindObject(Address addr) {
- // Note: this function can only be called on precisely swept spaces.
- ASSERT(!heap()->mark_compact_collector()->in_use());
-
- if (!Contains(addr)) return Failure::Exception();
-
- Page* p = Page::FromAddress(addr);
- HeapObjectIterator it(p, NULL);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Address cur = obj->address();
- Address next = cur + obj->Size();
- if ((cur <= addr) && (addr < next)) return obj;
- }
-
- UNREACHABLE();
- return Failure::Exception();
-}
-
-bool PagedSpace::CanExpand() {
- ASSERT(max_capacity_ % AreaSize() == 0);
-
- if (Capacity() == max_capacity_) return false;
-
- ASSERT(Capacity() < max_capacity_);
-
- // Are we going to exceed capacity for this space?
- if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
-
- return true;
-}
-
-bool PagedSpace::Expand() {
- if (!CanExpand()) return false;
-
- intptr_t size = AreaSize();
-
- if (anchor_.next_page() == &anchor_) {
- size = SizeOfFirstPage();
- }
-
- Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
- size, this, executable());
- if (p == NULL) return false;
-
- ASSERT(Capacity() <= max_capacity_);
-
- p->InsertAfter(anchor_.prev_page());
-
- return true;
-}
-
-
-intptr_t PagedSpace::SizeOfFirstPage() {
- int size = 0;
- switch (identity()) {
- case OLD_POINTER_SPACE:
- size = 64 * kPointerSize * KB;
- break;
- case OLD_DATA_SPACE:
- size = 192 * KB;
- break;
- case MAP_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case CELL_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case CODE_SPACE:
- if (kPointerSize == 8) {
- // On x64 we allocate code pages in a special way (from the reserved
- // 2Byte area). That part of the code is not yet upgraded to handle
- // small pages.
- size = AreaSize();
- } else {
- size = 384 * KB;
- }
- break;
- default:
- UNREACHABLE();
- }
- return Min(size, AreaSize());
-}
-
-
-int PagedSpace::CountTotalPages() {
- PageIterator it(this);
- int count = 0;
- while (it.has_next()) {
- it.next();
- count++;
- }
- return count;
-}
-
-
-void PagedSpace::ReleasePage(Page* page) {
- ASSERT(page->LiveBytes() == 0);
- ASSERT(AreaSize() == page->area_size());
-
- // Adjust list of unswept pages if the page is the head of the list.
- if (first_unswept_page_ == page) {
- first_unswept_page_ = page->next_page();
- if (first_unswept_page_ == anchor()) {
- first_unswept_page_ = Page::FromAddress(NULL);
- }
- }
-
- if (page->WasSwept()) {
- intptr_t size = free_list_.EvictFreeListItems(page);
- accounting_stats_.AllocateBytes(size);
- ASSERT_EQ(AreaSize(), static_cast<int>(size));
- } else {
- DecreaseUnsweptFreeBytes(page);
- }
-
- if (Page::FromAllocationTop(allocation_info_.top) == page) {
- allocation_info_.top = allocation_info_.limit = NULL;
- }
-
- page->Unlink();
- if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
- heap()->isolate()->memory_allocator()->Free(page);
- } else {
- heap()->QueueMemoryChunkForFree(page);
- }
-
- ASSERT(Capacity() > 0);
- accounting_stats_.ShrinkSpace(AreaSize());
-}
-
-
-#ifdef DEBUG
-void PagedSpace::Print() { }
-#endif
-
-#ifdef VERIFY_HEAP
-void PagedSpace::Verify(ObjectVisitor* visitor) {
- // We can only iterate over the pages if they were swept precisely.
- if (was_swept_conservatively_) return;
-
- bool allocation_pointer_found_in_space =
- (allocation_info_.top == allocation_info_.limit);
- PageIterator page_iterator(this);
- while (page_iterator.has_next()) {
- Page* page = page_iterator.next();
- CHECK(page->owner() == this);
- if (page == Page::FromAllocationTop(allocation_info_.top)) {
- allocation_pointer_found_in_space = true;
- }
- CHECK(page->WasSweptPrecisely());
- HeapObjectIterator it(page, NULL);
- Address end_of_previous_object = page->area_start();
- Address top = page->area_end();
- int black_size = 0;
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- CHECK(end_of_previous_object <= object->address());
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- CHECK(map->IsMap());
- CHECK(heap()->map_space()->Contains(map));
-
- // Perform space-specific object verification.
- VerifyObject(object);
-
- // The object itself should look OK.
- object->Verify();
-
- // All the interior pointers should be contained in the heap.
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, visitor);
- if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
- black_size += size;
- }
-
- CHECK(object->address() + size <= top);
- end_of_previous_object = object->address() + size;
- }
- CHECK_LE(black_size, page->LiveBytes());
- }
- CHECK(allocation_pointer_found_in_space);
-}
-#endif // VERIFY_HEAP
-
-// -----------------------------------------------------------------------------
-// NewSpace implementation
-
-
-bool NewSpace::SetUp(int reserved_semispace_capacity,
- int maximum_semispace_capacity) {
- // Set up new space based on the preallocated memory block defined by
- // start and size. The provided space is divided into two semi-spaces.
- // To support fast containment testing in the new space, the size of
- // this chunk must be a power of two and it must be aligned to its size.
- int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
-
- size_t size = 2 * reserved_semispace_capacity;
- Address base =
- heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
- size, size, &reservation_);
- if (base == NULL) return false;
-
- chunk_base_ = base;
- chunk_size_ = static_cast<uintptr_t>(size);
- LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
-
- ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
- ASSERT(IsPowerOf2(maximum_semispace_capacity));
-
- // Allocate and set up the histogram arrays if necessary.
- allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
- promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
-
-#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
- promoted_histogram_[name].set_name(#name);
- INSTANCE_TYPE_LIST(SET_NAME)
-#undef SET_NAME
-
- ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
- ASSERT(static_cast<intptr_t>(chunk_size_) >=
- 2 * heap()->ReservedSemiSpaceSize());
- ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
-
- to_space_.SetUp(chunk_base_,
- initial_semispace_capacity,
- maximum_semispace_capacity);
- from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
- initial_semispace_capacity,
- maximum_semispace_capacity);
- if (!to_space_.Commit()) {
- return false;
- }
- ASSERT(!from_space_.is_committed()); // No need to use memory yet.
-
- start_ = chunk_base_;
- address_mask_ = ~(2 * reserved_semispace_capacity - 1);
- object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
-
- ResetAllocationInfo();
-
- return true;
-}
-
-
-void NewSpace::TearDown() {
- if (allocated_histogram_) {
- DeleteArray(allocated_histogram_);
- allocated_histogram_ = NULL;
- }
- if (promoted_histogram_) {
- DeleteArray(promoted_histogram_);
- promoted_histogram_ = NULL;
- }
-
- start_ = NULL;
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
-
- to_space_.TearDown();
- from_space_.TearDown();
-
- LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
-
- ASSERT(reservation_.IsReserved());
- heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
- NOT_EXECUTABLE);
- chunk_base_ = NULL;
- chunk_size_ = 0;
-}
-
-
-void NewSpace::Flip() {
- SemiSpace::Swap(&from_space_, &to_space_);
-}
-
-
-void NewSpace::Grow() {
- // Double the semispace size but only up to maximum capacity.
- ASSERT(Capacity() < MaximumCapacity());
- int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
- if (to_space_.GrowTo(new_capacity)) {
- // Only grow from space if we managed to grow to-space.
- if (!from_space_.GrowTo(new_capacity)) {
- // If we managed to grow to-space but couldn't grow from-space,
- // attempt to shrink to-space.
- if (!to_space_.ShrinkTo(from_space_.Capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- V8::FatalProcessOutOfMemory("Failed to grow new space.");
- }
- }
- }
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::Shrink() {
- int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
- int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
- if (rounded_new_capacity < Capacity() &&
- to_space_.ShrinkTo(rounded_new_capacity)) {
- // Only shrink from-space if we managed to shrink to-space.
- from_space_.Reset();
- if (!from_space_.ShrinkTo(rounded_new_capacity)) {
- // If we managed to shrink to-space but couldn't shrink from
- // space, attempt to grow to-space again.
- if (!to_space_.GrowTo(from_space_.Capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- V8::FatalProcessOutOfMemory("Failed to shrink new space.");
- }
- }
- }
- allocation_info_.limit = to_space_.page_high();
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::UpdateAllocationInfo() {
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = to_space_.page_low();
- allocation_info_.limit = to_space_.page_high();
-
- // Lower limit during incremental marking.
- if (heap()->incremental_marking()->IsMarking() &&
- inline_allocation_limit_step() != 0) {
- Address new_limit =
- allocation_info_.top + inline_allocation_limit_step();
- allocation_info_.limit = Min(new_limit, allocation_info_.limit);
- }
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::ResetAllocationInfo() {
- to_space_.Reset();
- UpdateAllocationInfo();
- pages_used_ = 0;
- // Clear all mark-bits in the to-space.
- NewSpacePageIterator it(&to_space_);
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
-}
-
-
-bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top;
- if (NewSpacePage::IsAtStart(top)) {
- // The current page is already empty. Don't try to make another.
-
- // We should only get here if someone asks to allocate more
- // than what can be stored in a single page.
- // TODO(gc): Change the limit on new-space allocation to prevent this
- // from happening (all such allocations should go directly to LOSpace).
- return false;
- }
- if (!to_space_.AdvancePage()) {
- // Failed to get a new page in to-space.
- return false;
- }
-
- // Clear remainder of current page.
- Address limit = NewSpacePage::FromLimit(top)->area_end();
- if (heap()->gc_state() == Heap::SCAVENGE) {
- heap()->promotion_queue()->SetNewLimit(limit);
- heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
- }
-
- int remaining_in_page = static_cast<int>(limit - top);
- heap()->CreateFillerObjectAt(top, remaining_in_page);
- pages_used_++;
- UpdateAllocationInfo();
-
- return true;
-}
-
-
-MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
- Address new_top = old_top + size_in_bytes;
- Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
- // Incremental marking has lowered the limit to get a
- // chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
- high);
- int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(
- bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
- top_on_previous_step_ = new_top;
- return AllocateRaw(size_in_bytes);
- } else if (AddFreshPage()) {
- // Switched to new page. Try allocating again.
- int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(
- bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
- top_on_previous_step_ = to_space_.page_low();
- return AllocateRaw(size_in_bytes);
- } else {
- return Failure::RetryAfterGC();
- }
-}
-
-
-#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceIterator because verification doesn't assume
-// that it works (it depends on the invariants we are checking).
-void NewSpace::Verify() {
- // The allocation pointer should be in the space or at the very end.
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- // There should be objects packed in from the low address up to the
- // allocation pointer.
- Address current = to_space_.first_page()->area_start();
- CHECK_EQ(current, to_space_.space_start());
-
- while (current != top()) {
- if (!NewSpacePage::IsAtEnd(current)) {
- // The allocation pointer should not be in the middle of an object.
- CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
- current < top());
-
- HeapObject* object = HeapObject::FromAddress(current);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- CHECK(map->IsMap());
- CHECK(heap()->map_space()->Contains(map));
-
- // The object should not be code or a map.
- CHECK(!object->IsMap());
- CHECK(!object->IsCode());
-
- // The object itself should look OK.
- object->Verify();
-
- // All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor;
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, &visitor);
-
- current += size;
- } else {
- // At end of page, switch to next page.
- NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
- // Next page should be valid.
- CHECK(!page->is_anchor());
- current = page->area_start();
- }
- }
-
- // Check semi-spaces.
- CHECK_EQ(from_space_.id(), kFromSpace);
- CHECK_EQ(to_space_.id(), kToSpace);
- from_space_.Verify();
- to_space_.Verify();
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// SemiSpace implementation
-
-void SemiSpace::SetUp(Address start,
- int initial_capacity,
- int maximum_capacity) {
- // Creates a space in the young generation. The constructor does not
- // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
- // memory of size 'capacity' when set up, and does not grow or shrink
- // otherwise. In the mark-compact collector, the memory region of the from
- // space is used as the marking stack. It requires contiguous memory
- // addresses.
- ASSERT(maximum_capacity >= Page::kPageSize);
- initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
- capacity_ = initial_capacity;
- maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
- committed_ = false;
- start_ = start;
- address_mask_ = ~(maximum_capacity - 1);
- object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
- age_mark_ = start_;
-}
-
-
-void SemiSpace::TearDown() {
- start_ = NULL;
- capacity_ = 0;
-}
-
-
-bool SemiSpace::Commit() {
- ASSERT(!is_committed());
- int pages = capacity_ / Page::kPageSize;
- Address end = start_ + maximum_capacity_;
- Address start = end - pages * Page::kPageSize;
- if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
- capacity_,
- executable())) {
- return false;
- }
-
- NewSpacePage* page = anchor();
- for (int i = 1; i <= pages; i++) {
- NewSpacePage* new_page =
- NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
- new_page->InsertAfter(page);
- page = new_page;
- }
-
- committed_ = true;
- Reset();
- return true;
-}
-
-
-bool SemiSpace::Uncommit() {
- ASSERT(is_committed());
- Address start = start_ + maximum_capacity_ - capacity_;
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
- return false;
- }
- anchor()->set_next_page(anchor());
- anchor()->set_prev_page(anchor());
-
- committed_ = false;
- return true;
-}
-
-
-size_t SemiSpace::CommittedPhysicalMemory() {
- if (!is_committed()) return 0;
- size_t size = 0;
- NewSpacePageIterator it(this);
- while (it.has_next()) {
- size += it.next()->CommittedPhysicalMemory();
- }
- return size;
-}
-
-
-bool SemiSpace::GrowTo(int new_capacity) {
- if (!is_committed()) {
- if (!Commit()) return false;
- }
- ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
- ASSERT(new_capacity <= maximum_capacity_);
- ASSERT(new_capacity > capacity_);
- int pages_before = capacity_ / Page::kPageSize;
- int pages_after = new_capacity / Page::kPageSize;
-
- Address end = start_ + maximum_capacity_;
- Address start = end - new_capacity;
- size_t delta = new_capacity - capacity_;
-
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start, delta, executable())) {
- return false;
- }
- capacity_ = new_capacity;
- NewSpacePage* last_page = anchor()->prev_page();
- ASSERT(last_page != anchor());
- for (int i = pages_before + 1; i <= pages_after; i++) {
- Address page_address = end - i * Page::kPageSize;
- NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
- page_address,
- this);
- new_page->InsertAfter(last_page);
- Bitmap::Clear(new_page);
- // Duplicate the flags that was set on the old page.
- new_page->SetFlags(last_page->GetFlags(),
- NewSpacePage::kCopyOnFlipFlagsMask);
- last_page = new_page;
- }
- return true;
-}
-
-
-bool SemiSpace::ShrinkTo(int new_capacity) {
- ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
- ASSERT(new_capacity >= initial_capacity_);
- ASSERT(new_capacity < capacity_);
- if (is_committed()) {
- // Semispaces grow backwards from the end of their allocated capacity,
- // so we find the before and after start addresses relative to the
- // end of the space.
- Address space_end = start_ + maximum_capacity_;
- Address old_start = space_end - capacity_;
- size_t delta = capacity_ - new_capacity;
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
-
- MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
- if (!allocator->UncommitBlock(old_start, delta)) {
- return false;
- }
-
- int pages_after = new_capacity / Page::kPageSize;
- NewSpacePage* new_last_page =
- NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
- new_last_page->set_next_page(anchor());
- anchor()->set_prev_page(new_last_page);
- ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
- }
-
- capacity_ = new_capacity;
-
- return true;
-}
-
-
-void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
- anchor_.set_owner(this);
- // Fixup back-pointers to anchor. Address of anchor changes
- // when we swap.
- anchor_.prev_page()->set_next_page(&anchor_);
- anchor_.next_page()->set_prev_page(&anchor_);
-
- bool becomes_to_space = (id_ == kFromSpace);
- id_ = becomes_to_space ? kToSpace : kFromSpace;
- NewSpacePage* page = anchor_.next_page();
- while (page != &anchor_) {
- page->set_owner(this);
- page->SetFlags(flags, mask);
- if (becomes_to_space) {
- page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
- page->SetFlag(MemoryChunk::IN_TO_SPACE);
- page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- page->ResetLiveBytes();
- } else {
- page->SetFlag(MemoryChunk::IN_FROM_SPACE);
- page->ClearFlag(MemoryChunk::IN_TO_SPACE);
- }
- ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
- ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
- page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
- page = page->next_page();
- }
-}
-
-
-void SemiSpace::Reset() {
- ASSERT(anchor_.next_page() != &anchor_);
- current_page_ = anchor_.next_page();
-}
-
-
-void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
- // We won't be swapping semispaces without data in them.
- ASSERT(from->anchor_.next_page() != &from->anchor_);
- ASSERT(to->anchor_.next_page() != &to->anchor_);
-
- // Swap bits.
- SemiSpace tmp = *from;
- *from = *to;
- *to = tmp;
-
- // Fixup back-pointers to the page list anchor now that its address
- // has changed.
- // Swap to/from-space bits on pages.
- // Copy GC flags from old active space (from-space) to new (to-space).
- intptr_t flags = from->current_page()->GetFlags();
- to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
-
- from->FlipPages(0, 0);
-}
-
-
-void SemiSpace::set_age_mark(Address mark) {
- ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
- age_mark_ = mark;
- // Mark all pages up to the one containing mark.
- NewSpacePageIterator it(space_start(), mark);
- while (it.has_next()) {
- it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- }
-}
-
-
-#ifdef DEBUG
-void SemiSpace::Print() { }
-#endif
-
-#ifdef VERIFY_HEAP
-void SemiSpace::Verify() {
- bool is_from_space = (id_ == kFromSpace);
- NewSpacePage* page = anchor_.next_page();
- CHECK(anchor_.semi_space() == this);
- while (page != &anchor_) {
- CHECK(page->semi_space() == this);
- CHECK(page->InNewSpace());
- CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
- CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE));
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
- if (!is_from_space) {
- // The pointers-from-here-are-interesting flag isn't updated dynamically
- // on from-space pages, so it might be out of sync with the marking state.
- if (page->heap()->incremental_marking()->IsMarking()) {
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- } else {
- CHECK(!page->IsFlagSet(
- MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- }
- // TODO(gc): Check that the live_bytes_count_ field matches the
- // black marking on the page (if we make it match in new-space).
- }
- CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
- CHECK(page->prev_page()->next_page() == page);
- page = page->next_page();
- }
-}
-#endif
-
-#ifdef DEBUG
-void SemiSpace::AssertValidRange(Address start, Address end) {
- // Addresses belong to same semi-space
- NewSpacePage* page = NewSpacePage::FromLimit(start);
- NewSpacePage* end_page = NewSpacePage::FromLimit(end);
- SemiSpace* space = page->semi_space();
- CHECK_EQ(space, end_page->semi_space());
- // Start address is before end address, either on same page,
- // or end address is on a later page in the linked list of
- // semi-space pages.
- if (page == end_page) {
- CHECK(start <= end);
- } else {
- while (page != end_page) {
- page = page->next_page();
- CHECK_NE(page, space->anchor());
- }
- }
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// SemiSpaceIterator implementation.
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
- Initialize(space->bottom(), space->top(), NULL);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
- HeapObjectCallback size_func) {
- Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
- Initialize(start, space->top(), NULL);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
- Initialize(from, to, NULL);
-}
-
-
-void SemiSpaceIterator::Initialize(Address start,
- Address end,
- HeapObjectCallback size_func) {
- SemiSpace::AssertValidRange(start, end);
- current_ = start;
- limit_ = end;
- size_func_ = size_func;
-}
-
-
-#ifdef DEBUG
-// heap_histograms is shared, always clear it before using it.
-static void ClearHistograms() {
- Isolate* isolate = Isolate::Current();
- // We reset the name each time, though it hasn't changed.
-#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
-#undef DEF_TYPE_NAME
-
-#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
- INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
-#undef CLEAR_HISTOGRAM
-
- isolate->js_spill_information()->Clear();
-}
-
-
-static void ClearCodeKindStatistics() {
- Isolate* isolate = Isolate::Current();
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- isolate->code_kind_statistics()[i] = 0;
- }
-}
-
-
-static void ReportCodeKindStatistics() {
- Isolate* isolate = Isolate::Current();
- const char* table[Code::NUMBER_OF_KINDS] = { NULL };
-
-#define CASE(name) \
- case Code::name: table[Code::name] = #name; \
- break
-
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- switch (static_cast<Code::Kind>(i)) {
- CASE(FUNCTION);
- CASE(OPTIMIZED_FUNCTION);
- CASE(STUB);
- CASE(COMPILED_STUB);
- CASE(BUILTIN);
- CASE(LOAD_IC);
- CASE(KEYED_LOAD_IC);
- CASE(STORE_IC);
- CASE(KEYED_STORE_IC);
- CASE(CALL_IC);
- CASE(KEYED_CALL_IC);
- CASE(UNARY_OP_IC);
- CASE(BINARY_OP_IC);
- CASE(COMPARE_IC);
- CASE(TO_BOOLEAN_IC);
- }
- }
-
-#undef CASE
-
- PrintF("\n Code kind histograms: \n");
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- if (isolate->code_kind_statistics()[i] > 0) {
- PrintF(" %-20s: %10d bytes\n", table[i],
- isolate->code_kind_statistics()[i]);
- }
- }
- PrintF("\n");
-}
-
-
-static int CollectHistogramInfo(HeapObject* obj) {
- Isolate* isolate = Isolate::Current();
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- ASSERT(isolate->heap_histograms()[type].name() != NULL);
- isolate->heap_histograms()[type].increment_number(1);
- isolate->heap_histograms()[type].increment_bytes(obj->Size());
-
- if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
- JSObject::cast(obj)->IncrementSpillStatistics(
- isolate->js_spill_information());
- }
-
- return obj->Size();
-}
-
-
-static void ReportHistogram(bool print_spill) {
- Isolate* isolate = Isolate::Current();
- PrintF("\n Object Histogram:\n");
- for (int i = 0; i <= LAST_TYPE; i++) {
- if (isolate->heap_histograms()[i].number() > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n",
- isolate->heap_histograms()[i].name(),
- isolate->heap_histograms()[i].number(),
- isolate->heap_histograms()[i].bytes());
- }
- }
- PrintF("\n");
-
- // Summarize string types.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += isolate->heap_histograms()[type].number(); \
- string_bytes += isolate->heap_histograms()[type].bytes();
- STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
- if (string_number > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
- string_bytes);
- }
-
- if (FLAG_collect_heap_spill_statistics && print_spill) {
- isolate->js_spill_information()->Print();
- }
-}
-#endif // DEBUG
-
-
-// Support for statistics gathering for --heap-stats and --log-gc.
-void NewSpace::ClearHistograms() {
- for (int i = 0; i <= LAST_TYPE; i++) {
- allocated_histogram_[i].clear();
- promoted_histogram_[i].clear();
- }
-}
-
-// Because the copying collector does not touch garbage objects, we iterate
-// the new space before a collection to get a histogram of allocated objects.
-// This only happens when --log-gc flag is set.
-void NewSpace::CollectStatistics() {
- ClearHistograms();
- SemiSpaceIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
- RecordAllocation(obj);
-}
-
-
-static void DoReportStatistics(Isolate* isolate,
- HistogramInfo* info, const char* description) {
- LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
- // Lump all the string types together.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += info[type].number(); \
- string_bytes += info[type].bytes();
- STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
- if (string_number > 0) {
- LOG(isolate,
- HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
- }
-
- // Then do the other types.
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
- if (info[i].number() > 0) {
- LOG(isolate,
- HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
- }
- }
- LOG(isolate, HeapSampleEndEvent("NewSpace", description));
-}
-
-
-void NewSpace::ReportStatistics() {
-#ifdef DEBUG
- if (FLAG_heap_stats) {
- float pct = static_cast<float>(Available()) / Capacity();
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
- Capacity(), Available(), static_cast<int>(pct*100));
- PrintF("\n Object Histogram:\n");
- for (int i = 0; i <= LAST_TYPE; i++) {
- if (allocated_histogram_[i].number() > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n",
- allocated_histogram_[i].name(),
- allocated_histogram_[i].number(),
- allocated_histogram_[i].bytes());
- }
- }
- PrintF("\n");
- }
-#endif // DEBUG
-
- if (FLAG_log_gc) {
- Isolate* isolate = ISOLATE;
- DoReportStatistics(isolate, allocated_histogram_, "allocated");
- DoReportStatistics(isolate, promoted_histogram_, "promoted");
- }
-}
-
-
-void NewSpace::RecordAllocation(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- allocated_histogram_[type].increment_number(1);
- allocated_histogram_[type].increment_bytes(obj->Size());
-}
-
-
-void NewSpace::RecordPromotion(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- promoted_histogram_[type].increment_number(1);
- promoted_histogram_[type].increment_bytes(obj->Size());
-}
-
-
-size_t NewSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- size_t size = to_space_.CommittedPhysicalMemory();
- if (from_space_.is_committed()) {
- size += from_space_.CommittedPhysicalMemory();
- }
- return size;
-}
-
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces implementation
-
-void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
- ASSERT(size_in_bytes > 0);
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
-
- // We write a map and possibly size information to the block. If the block
- // is big enough to be a FreeSpace with at least one extra word (the next
- // pointer), we set its map to be the free space map and its size to an
- // appropriate array length for the desired size from HeapObject::Size().
- // If the block is too small (eg, one or two words), to hold both a size
- // field and a next pointer, we give it a filler map that gives it the
- // correct size.
- if (size_in_bytes > FreeSpace::kHeaderSize) {
- set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
- // Can't use FreeSpace::cast because it fails during deserialization.
- FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
- this_as_free_space->set_size(size_in_bytes);
- } else if (size_in_bytes == kPointerSize) {
- set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
- } else if (size_in_bytes == 2 * kPointerSize) {
- set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
- } else {
- UNREACHABLE();
- }
- // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
- // deserialization because the free space map is not done yet.
-}
-
-
-FreeListNode* FreeListNode::next() {
- ASSERT(IsFreeListNode(this));
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kNextOffset));
- } else {
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kPointerSize));
- }
-}
-
-
-FreeListNode** FreeListNode::next_address() {
- ASSERT(IsFreeListNode(this));
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- ASSERT(Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
- } else {
- return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
- }
-}
-
-
-void FreeListNode::set_next(FreeListNode* next) {
- ASSERT(IsFreeListNode(this));
- // While we are booting the VM the free space map will actually be null. So
- // we have to make sure that we don't try to use it for anything at that
- // stage.
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
- Memory::Address_at(address() + kNextOffset) =
- reinterpret_cast<Address>(next);
- } else {
- Memory::Address_at(address() + kPointerSize) =
- reinterpret_cast<Address>(next);
- }
-}
-
-
-intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
- intptr_t free_bytes = 0;
- if (category->top_ != NULL) {
- ASSERT(category->end_ != NULL);
- // This is safe (not going to deadlock) since Concatenate operations
- // are never performed on the same free lists at the same time in
- // reverse order.
- ScopedLock lock_target(mutex_);
- ScopedLock lock_source(category->mutex());
- free_bytes = category->available();
- if (end_ == NULL) {
- end_ = category->end();
- } else {
- category->end()->set_next(top_);
- }
- top_ = category->top();
- available_ += category->available();
- category->Reset();
- }
- return free_bytes;
-}
-
-
-void FreeListCategory::Reset() {
- top_ = NULL;
- end_ = NULL;
- available_ = 0;
-}
-
-
-intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) {
- int sum = 0;
- FreeListNode* n = top_;
- while (n != NULL) {
- if (Page::FromAddress(n->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
- sum += free_space->Size();
- }
- n = n->next();
- }
- return sum;
-}
-
-
-intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
- int sum = 0;
- FreeListNode** n = &top_;
- while (*n != NULL) {
- if (Page::FromAddress((*n)->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
- sum += free_space->Size();
- *n = (*n)->next();
- } else {
- n = (*n)->next_address();
- }
- }
- if (top_ == NULL) {
- end_ = NULL;
- }
- available_ -= sum;
- return sum;
-}
-
-
-FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
- FreeListNode* node = top_;
-
- if (node == NULL) return NULL;
-
- while (node != NULL &&
- Page::FromAddress(node->address())->IsEvacuationCandidate()) {
- available_ -= node->Size();
- node = node->next();
- }
-
- if (node != NULL) {
- set_top(node->next());
- *node_size = node->Size();
- available_ -= *node_size;
- } else {
- set_top(NULL);
- }
-
- if (top() == NULL) {
- set_end(NULL);
- }
-
- return node;
-}
-
-
-void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top_);
- top_ = node;
- if (end_ == NULL) {
- end_ = node;
- }
- available_ += size_in_bytes;
-}
-
-
-void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top_;
- while (n != NULL) {
- Map** map_location = reinterpret_cast<Map**>(n->address());
- if (*map_location == NULL) {
- *map_location = heap->free_space_map();
- } else {
- ASSERT(*map_location == heap->free_space_map());
- }
- n = n->next();
- }
-}
-
-
-FreeList::FreeList(PagedSpace* owner)
- : owner_(owner), heap_(owner->heap()) {
- Reset();
-}
-
-
-intptr_t FreeList::Concatenate(FreeList* free_list) {
- intptr_t free_bytes = 0;
- free_bytes += small_list_.Concatenate(free_list->small_list());
- free_bytes += medium_list_.Concatenate(free_list->medium_list());
- free_bytes += large_list_.Concatenate(free_list->large_list());
- free_bytes += huge_list_.Concatenate(free_list->huge_list());
- return free_bytes;
-}
-
-
-void FreeList::Reset() {
- small_list_.Reset();
- medium_list_.Reset();
- large_list_.Reset();
- huge_list_.Reset();
-}
-
-
-int FreeList::Free(Address start, int size_in_bytes) {
- if (size_in_bytes == 0) return 0;
-
- FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(heap_, size_in_bytes);
-
- // Early return to drop too-small blocks on the floor.
- if (size_in_bytes < kSmallListMin) return size_in_bytes;
-
- // Insert other blocks at the head of a free list of the appropriate
- // magnitude.
- if (size_in_bytes <= kSmallListMax) {
- small_list_.Free(node, size_in_bytes);
- } else if (size_in_bytes <= kMediumListMax) {
- medium_list_.Free(node, size_in_bytes);
- } else if (size_in_bytes <= kLargeListMax) {
- large_list_.Free(node, size_in_bytes);
- } else {
- huge_list_.Free(node, size_in_bytes);
- }
-
- ASSERT(IsVeryLong() || available() == SumFreeLists());
- return 0;
-}
-
-
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
- FreeListNode* node = NULL;
-
- if (size_in_bytes <= kSmallAllocationMax) {
- node = small_list_.PickNodeFromList(node_size);
- if (node != NULL) return node;
- }
-
- if (size_in_bytes <= kMediumAllocationMax) {
- node = medium_list_.PickNodeFromList(node_size);
- if (node != NULL) return node;
- }
-
- if (size_in_bytes <= kLargeAllocationMax) {
- node = large_list_.PickNodeFromList(node_size);
- if (node != NULL) return node;
- }
-
- int huge_list_available = huge_list_.available();
- for (FreeListNode** cur = huge_list_.GetTopAddress();
- *cur != NULL;
- cur = (*cur)->next_address()) {
- FreeListNode* cur_node = *cur;
- while (cur_node != NULL &&
- Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- huge_list_available -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
- cur_node = cur_node->next();
- }
-
- *cur = cur_node;
- if (cur_node == NULL) {
- huge_list_.set_end(NULL);
- break;
- }
-
- ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
- int size = cur_as_free_space->Size();
- if (size >= size_in_bytes) {
- // Large enough node found. Unlink it from the list.
- node = *cur;
- *cur = node->next();
- *node_size = size;
- huge_list_available -= size;
- break;
- }
- }
-
- if (huge_list_.top() == NULL) {
- huge_list_.set_end(NULL);
- }
-
- huge_list_.set_available(huge_list_available);
- ASSERT(IsVeryLong() || available() == SumFreeLists());
-
- return node;
-}
-
-
-// Allocation on the old space free list. If it succeeds then a new linear
-// allocation space has been set up with the top and limit of the space. If
-// the allocation fails then NULL is returned, and the caller can perform a GC
-// or allocate a new page before retrying.
-HeapObject* FreeList::Allocate(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- ASSERT(size_in_bytes <= kMaxBlockSize);
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
- // Don't free list allocate if there is linear space available.
- ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
-
- int new_node_size = 0;
- FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == NULL) return NULL;
-
-
- int bytes_left = new_node_size - size_in_bytes;
- ASSERT(bytes_left >= 0);
-
- int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- owner_->Free(owner_->top(), old_linear_size);
-
- owner_->heap()->incremental_marking()->OldSpaceStep(
- size_in_bytes - old_linear_size);
-
-#ifdef DEBUG
- for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(new_node->address())[i] =
- Smi::FromInt(kCodeZapValue);
- }
-#endif
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- const int kThreshold = IncrementalMarking::kAllocatedThreshold;
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- owner_->Allocate(new_node_size);
-
- if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking_steps) {
- int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
- // We don't want to give too large linear areas to the allocator while
- // incremental marking is going on, because we won't check again whether
- // we want to do another increment until the linear area is used up.
- owner_->Free(new_node->address() + size_in_bytes + linear_size,
- new_node_size - size_in_bytes - linear_size);
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes + linear_size);
- } else if (bytes_left > 0) {
- // Normally we give the rest of the node to the allocator as its new
- // linear allocation area.
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
- } else {
- // TODO(gc) Try not freeing linear allocation region when bytes_left
- // are zero.
- owner_->SetTop(NULL, NULL);
- }
-
- return new_node;
-}
-
-
-void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
- sizes->huge_size_ = huge_list_.CountFreeListItemsInList(p);
- if (sizes->huge_size_ < p->area_size()) {
- sizes->small_size_ = small_list_.CountFreeListItemsInList(p);
- sizes->medium_size_ = medium_list_.CountFreeListItemsInList(p);
- sizes->large_size_ = large_list_.CountFreeListItemsInList(p);
- } else {
- sizes->small_size_ = 0;
- sizes->medium_size_ = 0;
- sizes->large_size_ = 0;
- }
-}
-
-
-intptr_t FreeList::EvictFreeListItems(Page* p) {
- intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
-
- if (sum < p->area_size()) {
- sum += small_list_.EvictFreeListItemsInList(p) +
- medium_list_.EvictFreeListItemsInList(p) +
- large_list_.EvictFreeListItemsInList(p);
- }
-
- return sum;
-}
-
-
-void FreeList::RepairLists(Heap* heap) {
- small_list_.RepairFreeList(heap);
- medium_list_.RepairFreeList(heap);
- large_list_.RepairFreeList(heap);
- huge_list_.RepairFreeList(heap);
-}
-
-
-#ifdef DEBUG
-intptr_t FreeListCategory::SumFreeList() {
- intptr_t sum = 0;
- FreeListNode* cur = top_;
- while (cur != NULL) {
- ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
- sum += cur_as_free_space->Size();
- cur = cur->next();
- }
- return sum;
-}
-
-
-static const int kVeryLongFreeList = 500;
-
-
-int FreeListCategory::FreeListLength() {
- int length = 0;
- FreeListNode* cur = top_;
- while (cur != NULL) {
- length++;
- cur = cur->next();
- if (length == kVeryLongFreeList) return length;
- }
- return length;
-}
-
-
-bool FreeList::IsVeryLong() {
- if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
- return false;
-}
-
-
-// This can take a very long time because it is linear in the number of entries
-// on the free list, so it should not be called if FreeListLength returns
-// kVeryLongFreeList.
-intptr_t FreeList::SumFreeLists() {
- intptr_t sum = small_list_.SumFreeList();
- sum += medium_list_.SumFreeList();
- sum += large_list_.SumFreeList();
- sum += huge_list_.SumFreeList();
- return sum;
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// OldSpace implementation
-
-bool NewSpace::ReserveSpace(int bytes) {
- // We can't reliably unpack a partial snapshot that needs more new space
- // space than the minimum NewSpace size. The limit can be set lower than
- // the end of new space either because there is more space on the next page
- // or because we have lowered the limit in order to get periodic incremental
- // marking. The most reliable way to ensure that there is linear space is
- // to do the allocation, then rewind the limit.
- ASSERT(bytes <= InitialCapacity());
- MaybeObject* maybe = AllocateRaw(bytes);
- Object* object = NULL;
- if (!maybe->ToObject(&object)) return false;
- HeapObject* allocation = HeapObject::cast(object);
- Address top = allocation_info_.top;
- if ((top - bytes) == allocation->address()) {
- allocation_info_.top = allocation->address();
- return true;
- }
- // There may be a borderline case here where the allocation succeeded, but
- // the limit and top have moved on to a new page. In that case we try again.
- return ReserveSpace(bytes);
-}
-
-
-void PagedSpace::PrepareForMarkCompact() {
- // We don't have a linear allocation area while sweeping. It will be restored
- // on the first allocation after the sweep.
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap.
- int old_linear_size = static_cast<int>(limit() - top());
- Free(top(), old_linear_size);
- SetTop(NULL, NULL);
-
- // Stop lazy sweeping and clear marking bits for unswept pages.
- if (first_unswept_page_ != NULL) {
- Page* p = first_unswept_page_;
- do {
- // Do not use ShouldBeSweptLazily predicate here.
- // New evacuation candidates were selected but they still have
- // to be swept before collection starts.
- if (!p->WasSwept()) {
- Bitmap::Clear(p);
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
- reinterpret_cast<intptr_t>(p));
- }
- }
- p = p->next_page();
- } while (p != anchor());
- }
- first_unswept_page_ = Page::FromAddress(NULL);
- unswept_free_bytes_ = 0;
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
-}
-
-
-bool PagedSpace::ReserveSpace(int size_in_bytes) {
- ASSERT(size_in_bytes <= AreaSize());
- ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
- Address current_top = allocation_info_.top;
- Address new_top = current_top + size_in_bytes;
- if (new_top <= allocation_info_.limit) return true;
-
- HeapObject* new_area = free_list_.Allocate(size_in_bytes);
- if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
- if (new_area == NULL) return false;
-
- int old_linear_size = static_cast<int>(limit() - top());
- // Mark the old linear allocation area with a free space so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- Free(top(), old_linear_size);
-
- SetTop(new_area->address(), new_area->address() + size_in_bytes);
- return true;
-}
-
-
-// After we have booted, we have created a map which represents free space
-// on the heap. If there was already a free list then the elements on it
-// were created with the wrong FreeSpaceMap (normally NULL), so we need to
-// fix them.
-void PagedSpace::RepairFreeListsAfterBoot() {
- free_list_.RepairLists(heap());
-}
-
-
-// You have to call this last, since the implementation from PagedSpace
-// doesn't know that memory was 'promised' to large object space.
-bool LargeObjectSpace::ReserveSpace(int bytes) {
- return heap()->OldGenerationCapacityAvailable() >= bytes &&
- (!heap()->incremental_marking()->IsStopped() ||
- heap()->OldGenerationSpaceAvailable() >= bytes);
-}
-
-
-bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
- if (IsLazySweepingComplete()) return true;
-
- intptr_t freed_bytes = 0;
- Page* p = first_unswept_page_;
- do {
- Page* next_page = p->next_page();
- if (ShouldBeSweptLazily(p)) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
- reinterpret_cast<intptr_t>(p));
- }
- DecreaseUnsweptFreeBytes(p);
- freed_bytes +=
- MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
- this, NULL, p);
- }
- p = next_page;
- } while (p != anchor() && freed_bytes < bytes_to_sweep);
-
- if (p == anchor()) {
- first_unswept_page_ = Page::FromAddress(NULL);
- } else {
- first_unswept_page_ = p;
- }
-
- heap()->FreeQueuedChunks();
-
- return IsLazySweepingComplete();
-}
-
-
-void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
- if (allocation_info_.top >= allocation_info_.limit) return;
-
- if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
- // Create filler object to keep page iterable if it was iterable.
- int remaining =
- static_cast<int>(allocation_info_.limit - allocation_info_.top);
- heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
-
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
- }
-}
-
-
-bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->AreSweeperThreadsActivated()) {
- if (FLAG_concurrent_sweeping) {
- if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
- collector->WaitUntilSweepingCompleted();
- collector->FinalizeSweeping();
- return true;
- }
- return false;
- }
- return true;
- } else {
- return AdvanceSweeper(size_in_bytes);
- }
-}
-
-
-HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
- // Allocation in this space has failed.
-
- // If there are unswept pages advance lazy sweeper a bounded number of times
- // until we find a size_in_bytes contiguous piece of memory
- const int kMaxSweepingTries = 5;
- bool sweeping_complete = false;
-
- for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
- sweeping_complete = EnsureSweeperProgress(size_in_bytes);
-
- // Retry the free list allocation.
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
- }
-
- // Free list allocation failed and there is no next page. Fail if we have
- // hit the old generation size limit that should cause a garbage
- // collection.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return NULL;
- }
-
- // Try to expand the space and allocate in the new next page.
- if (Expand()) {
- return free_list_.Allocate(size_in_bytes);
- }
-
- // Last ditch, sweep all the remaining pages to try to find space. This may
- // cause a pause.
- if (!IsLazySweepingComplete()) {
- EnsureSweeperProgress(kMaxInt);
-
- // Retry the free list allocation.
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
- }
-
- // Finally, fail.
- return NULL;
-}
-
-
-#ifdef DEBUG
-void PagedSpace::ReportCodeStatistics() {
- Isolate* isolate = Isolate::Current();
- CommentStatistic* comments_statistics =
- isolate->paged_space_comments_statistics();
- ReportCodeKindStatistics();
- PrintF("Code comment statistics (\" [ comment-txt : size/ "
- "count (average)\"):\n");
- for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
- const CommentStatistic& cs = comments_statistics[i];
- if (cs.size > 0) {
- PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
- cs.size/cs.count);
- }
- }
- PrintF("\n");
-}
-
-
-void PagedSpace::ResetCodeStatistics() {
- Isolate* isolate = Isolate::Current();
- CommentStatistic* comments_statistics =
- isolate->paged_space_comments_statistics();
- ClearCodeKindStatistics();
- for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
- comments_statistics[i].Clear();
- }
- comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
- comments_statistics[CommentStatistic::kMaxComments].size = 0;
- comments_statistics[CommentStatistic::kMaxComments].count = 0;
-}
-
-
-// Adds comment to 'comment_statistics' table. Performance OK as long as
-// 'kMaxComments' is small
-static void EnterComment(Isolate* isolate, const char* comment, int delta) {
- CommentStatistic* comments_statistics =
- isolate->paged_space_comments_statistics();
- // Do not count empty comments
- if (delta <= 0) return;
- CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
- // Search for a free or matching entry in 'comments_statistics': 'cs'
- // points to result.
- for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
- if (comments_statistics[i].comment == NULL) {
- cs = &comments_statistics[i];
- cs->comment = comment;
- break;
- } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
- cs = &comments_statistics[i];
- break;
- }
- }
- // Update entry for 'comment'
- cs->size += delta;
- cs->count += 1;
-}
-
-
-// Call for each nested comment start (start marked with '[ xxx', end marked
-// with ']'. RelocIterator 'it' must point to a comment reloc info.
-static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
- ASSERT(!it->done());
- ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
- const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
- if (tmp[0] != '[') {
- // Not a nested comment; skip
- return;
- }
-
- // Search for end of nested comment or a new nested comment
- const char* const comment_txt =
- reinterpret_cast<const char*>(it->rinfo()->data());
- const byte* prev_pc = it->rinfo()->pc();
- int flat_delta = 0;
- it->next();
- while (true) {
- // All nested comments must be terminated properly, and therefore exit
- // from loop.
- ASSERT(!it->done());
- if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
- const char* const txt =
- reinterpret_cast<const char*>(it->rinfo()->data());
- flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
- if (txt[0] == ']') break; // End of nested comment
- // A new comment
- CollectCommentStatistics(isolate, it);
- // Skip code that was covered with previous comment
- prev_pc = it->rinfo()->pc();
- }
- it->next();
- }
- EnterComment(isolate, comment_txt, flat_delta);
-}
-
-
-// Collects code size statistics:
-// - by code kind
-// - by code comment
-void PagedSpace::CollectCodeStatistics() {
- Isolate* isolate = heap()->isolate();
- HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
- if (obj->IsCode()) {
- Code* code = Code::cast(obj);
- isolate->code_kind_statistics()[code->kind()] += code->Size();
- RelocIterator it(code);
- int delta = 0;
- const byte* prev_pc = code->instruction_start();
- while (!it.done()) {
- if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
- delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
- CollectCommentStatistics(isolate, &it);
- prev_pc = it.rinfo()->pc();
- }
- it.next();
- }
-
- ASSERT(code->instruction_start() <= prev_pc &&
- prev_pc <= code->instruction_end());
- delta += static_cast<int>(code->instruction_end() - prev_pc);
- EnterComment(isolate, "NoComment", delta);
- }
- }
-}
-
-
-void PagedSpace::ReportStatistics() {
- int pct = static_cast<int>(Available() * 100 / Capacity());
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", waste: %" V8_PTR_PREFIX "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
- Capacity(), Waste(), Available(), pct);
-
- if (was_swept_conservatively_) return;
- ClearHistograms();
- HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
- CollectHistogramInfo(obj);
- ReportHistogram(true);
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// FixedSpace implementation
-
-void FixedSpace::PrepareForMarkCompact() {
- // Call prepare of the super class.
- PagedSpace::PrepareForMarkCompact();
-
- // During a non-compacting collection, everything below the linear
- // allocation pointer except wasted top-of-page blocks is considered
- // allocated and we will rediscover available bytes during the
- // collection.
- accounting_stats_.AllocateBytes(free_list_.available());
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
-}
-
-
-// -----------------------------------------------------------------------------
-// MapSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
-
-void MapSpace::VerifyObject(HeapObject* object) {
- // The object should be a map or a free-list node.
- CHECK(object->IsMap() || object->IsFreeSpace());
-}
-
-
-// -----------------------------------------------------------------------------
-// GlobalPropertyCellSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
-
-void CellSpace::VerifyObject(HeapObject* object) {
- // The object should be a global object property cell or a free-list node.
- CHECK(object->IsJSGlobalPropertyCell() ||
- object->map() == heap()->two_pointer_filler_map());
-}
-
-
-// -----------------------------------------------------------------------------
-// LargeObjectIterator
-
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
- current_ = space->first_page_;
- size_func_ = NULL;
-}
-
-
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
- HeapObjectCallback size_func) {
- current_ = space->first_page_;
- size_func_ = size_func;
-}
-
-
-HeapObject* LargeObjectIterator::Next() {
- if (current_ == NULL) return NULL;
-
- HeapObject* object = current_->GetObject();
- current_ = current_->next_page();
- return object;
-}
-
-
-// -----------------------------------------------------------------------------
-// LargeObjectSpace
-static bool ComparePointers(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
-LargeObjectSpace::LargeObjectSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id)
- : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
- max_capacity_(max_capacity),
- first_page_(NULL),
- size_(0),
- page_count_(0),
- objects_size_(0),
- chunk_map_(ComparePointers, 1024) {}
-
-
-bool LargeObjectSpace::SetUp() {
- first_page_ = NULL;
- size_ = 0;
- page_count_ = 0;
- objects_size_ = 0;
- chunk_map_.Clear();
- return true;
-}
-
-
-void LargeObjectSpace::TearDown() {
- while (first_page_ != NULL) {
- LargePage* page = first_page_;
- first_page_ = first_page_->next_page();
- LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
-
- ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
- heap()->isolate()->memory_allocator()->PerformAllocationCallback(
- space, kAllocationActionFree, page->size());
- heap()->isolate()->memory_allocator()->Free(page);
- }
- SetUp();
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
- Executability executable) {
- // Check if we want to force a GC before growing the old space further.
- // If so, fail the allocation.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return Failure::RetryAfterGC(identity());
- }
-
- if (Size() + object_size > max_capacity_) {
- return Failure::RetryAfterGC(identity());
- }
-
- LargePage* page = heap()->isolate()->memory_allocator()->
- AllocateLargePage(object_size, this, executable);
- if (page == NULL) return Failure::RetryAfterGC(identity());
- ASSERT(page->area_size() >= object_size);
-
- size_ += static_cast<int>(page->size());
- objects_size_ += object_size;
- page_count_++;
- page->set_next_page(first_page_);
- first_page_ = page;
-
- // Register all MemoryChunk::kAlignment-aligned chunks covered by
- // this large page in the chunk map.
- uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
- uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
- for (uintptr_t key = base; key <= limit; key++) {
- HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
- static_cast<uint32_t>(key),
- true);
- ASSERT(entry != NULL);
- entry->value = page;
- }
-
- HeapObject* object = page->GetObject();
-
- if (Heap::ShouldZapGarbage()) {
- // Make the object consistent so the heap can be verified in OldSpaceStep.
- // We only need to do this in debug builds or if verify_heap is on.
- reinterpret_cast<Object**>(object->address())[0] =
- heap()->fixed_array_map();
- reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
- }
-
- heap()->incremental_marking()->OldSpaceStep(object_size);
- return object;
-}
-
-
-size_t LargeObjectSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- size_t size = 0;
- LargePage* current = first_page_;
- while (current != NULL) {
- size += current->CommittedPhysicalMemory();
- current = current->next_page();
- }
- return size;
-}
-
-
-// GC support
-MaybeObject* LargeObjectSpace::FindObject(Address a) {
- LargePage* page = FindPage(a);
- if (page != NULL) {
- return page->GetObject();
- }
- return Failure::Exception();
-}
-
-
-LargePage* LargeObjectSpace::FindPage(Address a) {
- uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
- HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
- static_cast<uint32_t>(key),
- false);
- if (e != NULL) {
- ASSERT(e->value != NULL);
- LargePage* page = reinterpret_cast<LargePage*>(e->value);
- ASSERT(page->is_valid());
- if (page->Contains(a)) {
- return page;
- }
- }
- return NULL;
-}
-
-
-void LargeObjectSpace::FreeUnmarkedObjects() {
- LargePage* previous = NULL;
- LargePage* current = first_page_;
- while (current != NULL) {
- HeapObject* object = current->GetObject();
- // Can this large page contain pointers to non-trivial objects. No other
- // pointer object is this big.
- bool is_pointer_object = object->IsFixedArray();
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) {
- mark_bit.Clear();
- Page::FromAddress(object->address())->ResetProgressBar();
- Page::FromAddress(object->address())->ResetLiveBytes();
- previous = current;
- current = current->next_page();
- } else {
- LargePage* page = current;
- // Cut the chunk out from the chunk list.
- current = current->next_page();
- if (previous == NULL) {
- first_page_ = current;
- } else {
- previous->set_next_page(current);
- }
-
- // Free the chunk.
- heap()->mark_compact_collector()->ReportDeleteIfNeeded(
- object, heap()->isolate());
- size_ -= static_cast<int>(page->size());
- objects_size_ -= object->Size();
- page_count_--;
-
- // Remove entries belonging to this page.
- // Use variable alignment to help pass length check (<= 80 characters)
- // of single line in tools/presubmit.py.
- const intptr_t alignment = MemoryChunk::kAlignment;
- uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
- uintptr_t limit = base + (page->size()-1)/alignment;
- for (uintptr_t key = base; key <= limit; key++) {
- chunk_map_.Remove(reinterpret_cast<void*>(key),
- static_cast<uint32_t>(key));
- }
-
- if (is_pointer_object) {
- heap()->QueueMemoryChunkForFree(page);
- } else {
- heap()->isolate()->memory_allocator()->Free(page);
- }
- }
- }
- heap()->FreeQueuedChunks();
-}
-
-
-bool LargeObjectSpace::Contains(HeapObject* object) {
- Address address = object->address();
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
-
- bool owned = (chunk->owner() == this);
-
- SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
-
- return owned;
-}
-
-
-#ifdef VERIFY_HEAP
-// We do not assume that the large object iterator works, because it depends
-// on the invariants we are checking during verification.
-void LargeObjectSpace::Verify() {
- for (LargePage* chunk = first_page_;
- chunk != NULL;
- chunk = chunk->next_page()) {
- // Each chunk contains an object that starts at the large object page's
- // object area start.
- HeapObject* object = chunk->GetObject();
- Page* page = Page::FromAddress(object->address());
- CHECK(object->address() == page->area_start());
-
- // The first word should be a map, and we expect all map pointers to be
- // in map space.
- Map* map = object->map();
- CHECK(map->IsMap());
- CHECK(heap()->map_space()->Contains(map));
-
- // We have only code, sequential strings, external strings
- // (sequential strings that have been morphed into external
- // strings), fixed arrays, and byte arrays in large object space.
- CHECK(object->IsCode() || object->IsSeqString() ||
- object->IsExternalString() || object->IsFixedArray() ||
- object->IsFixedDoubleArray() || object->IsByteArray());
-
- // The object itself should look OK.
- object->Verify();
-
- // Byte arrays and strings don't have interior pointers.
- if (object->IsCode()) {
- VerifyPointersVisitor code_visitor;
- object->IterateBody(map->instance_type(),
- object->Size(),
- &code_visitor);
- } else if (object->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(object);
- for (int j = 0; j < array->length(); j++) {
- Object* element = array->get(j);
- if (element->IsHeapObject()) {
- HeapObject* element_object = HeapObject::cast(element);
- CHECK(heap()->Contains(element_object));
- CHECK(element_object->map()->IsMap());
- }
- }
- }
- }
-}
-#endif
-
-
-#ifdef DEBUG
-void LargeObjectSpace::Print() {
- LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- obj->Print();
- }
-}
-
-
-void LargeObjectSpace::ReportStatistics() {
- PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
- int num_objects = 0;
- ClearHistograms();
- LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- num_objects++;
- CollectHistogramInfo(obj);
- }
-
- PrintF(" number of objects %d, "
- "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
- if (num_objects > 0) ReportHistogram(false);
-}
-
-
-void LargeObjectSpace::CollectCodeStatistics() {
- Isolate* isolate = heap()->isolate();
- LargeObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
- if (obj->IsCode()) {
- Code* code = Code::cast(obj);
- isolate->code_kind_statistics()[code->kind()] += code->Size();
- }
- }
-}
-
-
-void Page::Print() {
- // Make a best-effort to print the objects in the page.
- PrintF("Page@%p in %s\n",
- this->address(),
- AllocationSpaceName(this->owner()->identity()));
- printf(" --------------------------------------\n");
- HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
- unsigned mark_size = 0;
- for (HeapObject* object = objects.Next();
- object != NULL;
- object = objects.Next()) {
- bool is_marked = Marking::MarkBitFrom(object).Get();
- PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
- if (is_marked) {
- mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
- }
- object->ShortPrint();
- PrintF("\n");
- }
- printf(" --------------------------------------\n");
- printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
-}
-
-#endif // DEBUG
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/spaces.h b/src/3rdparty/v8/src/spaces.h
deleted file mode 100644
index e7e11db..0000000
--- a/src/3rdparty/v8/src/spaces.h
+++ /dev/null
@@ -1,2836 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SPACES_H_
-#define V8_SPACES_H_
-
-#include "allocation.h"
-#include "hashmap.h"
-#include "list.h"
-#include "log.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-
-// -----------------------------------------------------------------------------
-// Heap structures:
-//
-// A JS heap consists of a young generation, an old generation, and a large
-// object space. The young generation is divided into two semispaces. A
-// scavenger implements Cheney's copying algorithm. The old generation is
-// separated into a map space and an old object space. The map space contains
-// all (and only) map objects, the rest of old objects go into the old space.
-// The old generation is collected by a mark-sweep-compact collector.
-//
-// The semispaces of the young generation are contiguous. The old and map
-// spaces consists of a list of pages. A page has a page header and an object
-// area.
-//
-// There is a separate large object space for objects larger than
-// Page::kMaxHeapObjectSize, so that they do not have to move during
-// collection. The large object space is paged. Pages in large object space
-// may be larger than the page size.
-//
-// A store-buffer based write barrier is used to keep track of intergenerational
-// references. See store-buffer.h.
-//
-// During scavenges and mark-sweep collections we sometimes (after a store
-// buffer overflow) iterate intergenerational pointers without decoding heap
-// object maps so if the page belongs to old pointer space or large object
-// space it is essential to guarantee that the page does not contain any
-// garbage pointers to new space: every pointer aligned word which satisfies
-// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
-// new space. Thus objects in old pointer and large object spaces should have a
-// special layout (e.g. no bare integer fields). This requirement does not
-// apply to map space which is iterated in a special fashion. However we still
-// require pointer fields of dead maps to be cleaned.
-//
-// To enable lazy cleaning of old space pages we can mark chunks of the page
-// as being garbage. Garbage sections are marked with a special map. These
-// sections are skipped when scanning the page, even if we are otherwise
-// scanning without regard for object boundaries. Garbage sections are chained
-// together to form a free list after a GC. Garbage sections created outside
-// of GCs by object trunctation etc. may not be in the free list chain. Very
-// small free spaces are ignored, they need only be cleaned of bogus pointers
-// into new space.
-//
-// Each page may have up to one special garbage section. The start of this
-// section is denoted by the top field in the space. The end of the section
-// is denoted by the limit field in the space. This special garbage section
-// is not marked with a free space map in the data. The point of this section
-// is to enable linear allocation without having to constantly update the byte
-// array every time the top field is updated and a new object is created. The
-// special garbage section is not in the chain of garbage sections.
-//
-// Since the top and limit fields are in the space, not the page, only one page
-// has a special garbage section, and if the top and limit are equal then there
-// is no special garbage section.
-
-// Some assertion macros used in the debugging mode.
-
-#define ASSERT_PAGE_ALIGNED(address) \
- ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
-
-#define ASSERT_OBJECT_ALIGNED(address) \
- ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
-
-#define ASSERT_OBJECT_SIZE(size) \
- ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
-
-#define ASSERT_PAGE_OFFSET(offset) \
- ASSERT((Page::kObjectStartOffset <= offset) \
- && (offset <= Page::kPageSize))
-
-#define ASSERT_MAP_PAGE_INDEX(index) \
- ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-
-
-class PagedSpace;
-class MemoryAllocator;
-class AllocationInfo;
-class Space;
-class FreeList;
-class MemoryChunk;
-
-class MarkBit {
- public:
- typedef uint32_t CellType;
-
- inline MarkBit(CellType* cell, CellType mask, bool data_only)
- : cell_(cell), mask_(mask), data_only_(data_only) { }
-
- inline CellType* cell() { return cell_; }
- inline CellType mask() { return mask_; }
-
-#ifdef DEBUG
- bool operator==(const MarkBit& other) {
- return cell_ == other.cell_ && mask_ == other.mask_;
- }
-#endif
-
- inline void Set() { *cell_ |= mask_; }
- inline bool Get() { return (*cell_ & mask_) != 0; }
- inline void Clear() { *cell_ &= ~mask_; }
-
- inline bool data_only() { return data_only_; }
-
- inline MarkBit Next() {
- CellType new_mask = mask_ << 1;
- if (new_mask == 0) {
- return MarkBit(cell_ + 1, 1, data_only_);
- } else {
- return MarkBit(cell_, new_mask, data_only_);
- }
- }
-
- private:
- CellType* cell_;
- CellType mask_;
- // This boolean indicates that the object is in a data-only space with no
- // pointers. This enables some optimizations when marking.
- // It is expected that this field is inlined and turned into control flow
- // at the place where the MarkBit object is created.
- bool data_only_;
-};
-
-
-// Bitmap is a sequence of cells each containing fixed number of bits.
-class Bitmap {
- public:
- static const uint32_t kBitsPerCell = 32;
- static const uint32_t kBitsPerCellLog2 = 5;
- static const uint32_t kBitIndexMask = kBitsPerCell - 1;
- static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
- static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
-
- static const size_t kLength =
- (1 << kPageSizeBits) >> (kPointerSizeLog2);
-
- static const size_t kSize =
- (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
-
-
- static int CellsForLength(int length) {
- return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
- }
-
- int CellsCount() {
- return CellsForLength(kLength);
- }
-
- static int SizeFor(int cells_count) {
- return sizeof(MarkBit::CellType) * cells_count;
- }
-
- INLINE(static uint32_t IndexToCell(uint32_t index)) {
- return index >> kBitsPerCellLog2;
- }
-
- INLINE(static uint32_t CellToIndex(uint32_t index)) {
- return index << kBitsPerCellLog2;
- }
-
- INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
- return (index + kBitIndexMask) & ~kBitIndexMask;
- }
-
- INLINE(MarkBit::CellType* cells()) {
- return reinterpret_cast<MarkBit::CellType*>(this);
- }
-
- INLINE(Address address()) {
- return reinterpret_cast<Address>(this);
- }
-
- INLINE(static Bitmap* FromAddress(Address addr)) {
- return reinterpret_cast<Bitmap*>(addr);
- }
-
- inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
- MarkBit::CellType mask = 1 << (index & kBitIndexMask);
- MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
- return MarkBit(cell, mask, data_only);
- }
-
- static inline void Clear(MemoryChunk* chunk);
-
- static void PrintWord(uint32_t word, uint32_t himask = 0) {
- for (uint32_t mask = 1; mask != 0; mask <<= 1) {
- if ((mask & himask) != 0) PrintF("[");
- PrintF((mask & word) ? "1" : "0");
- if ((mask & himask) != 0) PrintF("]");
- }
- }
-
- class CellPrinter {
- public:
- CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
-
- void Print(uint32_t pos, uint32_t cell) {
- if (cell == seq_type) {
- seq_length++;
- return;
- }
-
- Flush();
-
- if (IsSeq(cell)) {
- seq_start = pos;
- seq_length = 0;
- seq_type = cell;
- return;
- }
-
- PrintF("%d: ", pos);
- PrintWord(cell);
- PrintF("\n");
- }
-
- void Flush() {
- if (seq_length > 0) {
- PrintF("%d: %dx%d\n",
- seq_start,
- seq_type == 0 ? 0 : 1,
- seq_length * kBitsPerCell);
- seq_length = 0;
- }
- }
-
- static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
-
- private:
- uint32_t seq_start;
- uint32_t seq_type;
- uint32_t seq_length;
- };
-
- void Print() {
- CellPrinter printer;
- for (int i = 0; i < CellsCount(); i++) {
- printer.Print(i, cells()[i]);
- }
- printer.Flush();
- PrintF("\n");
- }
-
- bool IsClean() {
- for (int i = 0; i < CellsCount(); i++) {
- if (cells()[i] != 0) {
- return false;
- }
- }
- return true;
- }
-};
-
-
-class SkipList;
-class SlotsBuffer;
-
-// MemoryChunk represents a memory region owned by a specific space.
-// It is divided into the header and the body. Chunk start is always
-// 1MB aligned. Start of the body is aligned so it can accommodate
-// any heap object.
-class MemoryChunk {
- public:
- // Only works if the pointer is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromAddress(Address a) {
- return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
- }
-
- // Only works for addresses in pointer spaces, not data or code spaces.
- static inline MemoryChunk* FromAnyPointerAddress(Address addr);
-
- Address address() { return reinterpret_cast<Address>(this); }
-
- bool is_valid() { return address() != NULL; }
-
- MemoryChunk* next_chunk() const { return next_chunk_; }
- MemoryChunk* prev_chunk() const { return prev_chunk_; }
-
- void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
-
- Space* owner() const {
- if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
- kFailureTag) {
- return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
- kFailureTag);
- } else {
- return NULL;
- }
- }
-
- void set_owner(Space* space) {
- ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
- owner_ = reinterpret_cast<Address>(space) + kFailureTag;
- ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
- kFailureTag);
- }
-
- VirtualMemory* reserved_memory() {
- return &reservation_;
- }
-
- void InitializeReservedMemory() {
- reservation_.Reset();
- }
-
- void set_reserved_memory(VirtualMemory* reservation) {
- ASSERT_NOT_NULL(reservation);
- reservation_.TakeControl(reservation);
- }
-
- bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
- void initialize_scan_on_scavenge(bool scan) {
- if (scan) {
- SetFlag(SCAN_ON_SCAVENGE);
- } else {
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- }
- inline void set_scan_on_scavenge(bool scan);
-
- int store_buffer_counter() { return store_buffer_counter_; }
- void set_store_buffer_counter(int counter) {
- store_buffer_counter_ = counter;
- }
-
- bool Contains(Address addr) {
- return addr >= area_start() && addr < area_end();
- }
-
- // Checks whether addr can be a limit of addresses in this page.
- // It's a limit if it's in the page, or if it's just after the
- // last byte of the page.
- bool ContainsLimit(Address addr) {
- return addr >= area_start() && addr <= area_end();
- }
-
- // Every n write barrier invocations we go to runtime even though
- // we could have handled it in generated code. This lets us check
- // whether we have hit the limit and should do some more marking.
- static const int kWriteBarrierCounterGranularity = 500;
-
- enum MemoryChunkFlags {
- IS_EXECUTABLE,
- ABOUT_TO_BE_FREED,
- POINTERS_TO_HERE_ARE_INTERESTING,
- POINTERS_FROM_HERE_ARE_INTERESTING,
- SCAN_ON_SCAVENGE,
- IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
- IN_TO_SPACE, // All pages in new space has one of these two set.
- NEW_SPACE_BELOW_AGE_MARK,
- CONTAINS_ONLY_DATA,
- EVACUATION_CANDIDATE,
- RESCAN_ON_EVACUATION,
-
- // Pages swept precisely can be iterated, hitting only the live objects.
- // Whereas those swept conservatively cannot be iterated over. Both flags
- // indicate that marking bits have been cleared by the sweeper, otherwise
- // marking bits are still intact.
- WAS_SWEPT_PRECISELY,
- WAS_SWEPT_CONSERVATIVELY,
-
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR,
-
- // Last flag, keep at bottom.
- NUM_MEMORY_CHUNK_FLAGS
- };
-
-
- static const int kPointersToHereAreInterestingMask =
- 1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const int kPointersFromHereAreInterestingMask =
- 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const int kEvacuationCandidateMask =
- 1 << EVACUATION_CANDIDATE;
-
- static const int kSkipEvacuationSlotsRecordingMask =
- (1 << EVACUATION_CANDIDATE) |
- (1 << RESCAN_ON_EVACUATION) |
- (1 << IN_FROM_SPACE) |
- (1 << IN_TO_SPACE);
-
-
- void SetFlag(int flag) {
- flags_ |= static_cast<uintptr_t>(1) << flag;
- }
-
- void ClearFlag(int flag) {
- flags_ &= ~(static_cast<uintptr_t>(1) << flag);
- }
-
- void SetFlagTo(int flag, bool value) {
- if (value) {
- SetFlag(flag);
- } else {
- ClearFlag(flag);
- }
- }
-
- bool IsFlagSet(int flag) {
- return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
- }
-
- // Set or clear multiple flags at a time. The flags in the mask
- // are set to the value in "flags", the rest retain the current value
- // in flags_.
- void SetFlags(intptr_t flags, intptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
- }
-
- // Return all current flags.
- intptr_t GetFlags() { return flags_; }
-
- intptr_t parallel_sweeping() const {
- return parallel_sweeping_;
- }
-
- void set_parallel_sweeping(intptr_t state) {
- parallel_sweeping_ = state;
- }
-
- bool TryParallelSweeping() {
- return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
- }
-
- // Manage live byte count (count of bytes known to be live,
- // because they are marked black).
- void ResetLiveBytes() {
- if (FLAG_gc_verbose) {
- PrintF("ResetLiveBytes:%p:%x->0\n",
- static_cast<void*>(this), live_byte_count_);
- }
- live_byte_count_ = 0;
- }
- void IncrementLiveBytes(int by) {
- if (FLAG_gc_verbose) {
- printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
- static_cast<void*>(this), live_byte_count_,
- ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
- live_byte_count_ + by);
- }
- live_byte_count_ += by;
- ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
- }
- int LiveBytes() {
- ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
- return live_byte_count_;
- }
-
- int write_barrier_counter() {
- return static_cast<int>(write_barrier_counter_);
- }
-
- void set_write_barrier_counter(int counter) {
- write_barrier_counter_ = counter;
- }
-
- int progress_bar() {
- ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
- return progress_bar_;
- }
-
- void set_progress_bar(int progress_bar) {
- ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
- progress_bar_ = progress_bar;
- }
-
- void ResetProgressBar() {
- if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- set_progress_bar(0);
- ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
- }
- }
-
- bool IsLeftOfProgressBar(Object** slot) {
- Address slot_address = reinterpret_cast<Address>(slot);
- ASSERT(slot_address > this->address());
- return (slot_address - (this->address() + kObjectStartOffset)) <
- progress_bar();
- }
-
- static void IncrementLiveBytesFromGC(Address address, int by) {
- MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
- }
-
- static void IncrementLiveBytesFromMutator(Address address, int by);
-
- static const intptr_t kAlignment =
- (static_cast<uintptr_t>(1) << kPageSizeBits);
-
- static const intptr_t kAlignmentMask = kAlignment - 1;
-
- static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
-
- static const intptr_t kLiveBytesOffset =
- kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
- kPointerSize + kPointerSize +
- kPointerSize + kPointerSize + kPointerSize + kIntSize;
-
- static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
-
- static const size_t kWriteBarrierCounterOffset =
- kSlotsBufferOffset + kPointerSize + kPointerSize;
-
- static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
- kIntSize + kIntSize + kPointerSize;
-
- static const int kBodyOffset =
- CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
-
- // The start offset of the object area in a page. Aligned to both maps and
- // code alignment to be suitable for both. Also aligned to 32 words because
- // the marking bitmap is arranged in 32 bit chunks.
- static const int kObjectStartAlignment = 32 * kPointerSize;
- static const int kObjectStartOffset = kBodyOffset - 1 +
- (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
-
- size_t size() const { return size_; }
-
- void set_size(size_t size) {
- size_ = size;
- }
-
- void SetArea(Address area_start, Address area_end) {
- area_start_ = area_start;
- area_end_ = area_end;
- }
-
- Executability executable() {
- return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- }
-
- bool ContainsOnlyData() {
- return IsFlagSet(CONTAINS_ONLY_DATA);
- }
-
- bool InNewSpace() {
- return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
- }
-
- bool InToSpace() {
- return IsFlagSet(IN_TO_SPACE);
- }
-
- bool InFromSpace() {
- return IsFlagSet(IN_FROM_SPACE);
- }
-
- // ---------------------------------------------------------------------
- // Markbits support
-
- inline Bitmap* markbits() {
- return Bitmap::FromAddress(address() + kHeaderSize);
- }
-
- void PrintMarkbits() { markbits()->Print(); }
-
- inline uint32_t AddressToMarkbitIndex(Address addr) {
- return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
- }
-
- inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
- const intptr_t offset =
- reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
-
- return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
- }
-
- inline Address MarkbitIndexToAddress(uint32_t index) {
- return this->address() + (index << kPointerSizeLog2);
- }
-
- void InsertAfter(MemoryChunk* other);
- void Unlink();
-
- inline Heap* heap() { return heap_; }
-
- static const int kFlagsOffset = kPointerSize * 3;
-
- bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
-
- bool ShouldSkipEvacuationSlotRecording() {
- return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
- }
-
- inline SkipList* skip_list() {
- return skip_list_;
- }
-
- inline void set_skip_list(SkipList* skip_list) {
- skip_list_ = skip_list;
- }
-
- inline SlotsBuffer* slots_buffer() {
- return slots_buffer_;
- }
-
- inline SlotsBuffer** slots_buffer_address() {
- return &slots_buffer_;
- }
-
- void MarkEvacuationCandidate() {
- ASSERT(slots_buffer_ == NULL);
- SetFlag(EVACUATION_CANDIDATE);
- }
-
- void ClearEvacuationCandidate() {
- ASSERT(slots_buffer_ == NULL);
- ClearFlag(EVACUATION_CANDIDATE);
- }
-
- Address area_start() { return area_start_; }
- Address area_end() { return area_end_; }
- int area_size() {
- return static_cast<int>(area_end() - area_start());
- }
- bool CommitArea(size_t requested);
-
- // Approximate amount of physical memory committed for this chunk.
- size_t CommittedPhysicalMemory() {
- return high_water_mark_;
- }
-
- static inline void UpdateHighWaterMark(Address mark);
-
- protected:
- MemoryChunk* next_chunk_;
- MemoryChunk* prev_chunk_;
- size_t size_;
- intptr_t flags_;
-
- // Start and end of allocatable memory on this chunk.
- Address area_start_;
- Address area_end_;
-
- // If the chunk needs to remember its memory reservation, it is stored here.
- VirtualMemory reservation_;
- // The identity of the owning space. This is tagged as a failure pointer, but
- // no failure can be in an object, so this can be distinguished from any entry
- // in a fixed array.
- Address owner_;
- Heap* heap_;
- // Used by the store buffer to keep track of which pages to mark scan-on-
- // scavenge.
- int store_buffer_counter_;
- // Count of bytes marked black on page.
- int live_byte_count_;
- SlotsBuffer* slots_buffer_;
- SkipList* skip_list_;
- intptr_t write_barrier_counter_;
- // Used by the incremental marker to keep track of the scanning progress in
- // large objects that have a progress bar and are scanned in increments.
- int progress_bar_;
- // Assuming the initial allocation on a page is sequential,
- // count highest number of bytes ever allocated on the page.
- int high_water_mark_;
-
- intptr_t parallel_sweeping_;
-
- static MemoryChunk* Initialize(Heap* heap,
- Address base,
- size_t size,
- Address area_start,
- Address area_end,
- Executability executable,
- Space* owner);
-
- friend class MemoryAllocator;
-};
-
-
-STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
-
-
-// -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 1MB. Large object pages may be larger.
-//
-// The only way to get a page pointer is by calling factory methods:
-// Page* p = Page::FromAddress(addr); or
-// Page* p = Page::FromAllocationTop(top);
-class Page : public MemoryChunk {
- public:
- // Returns the page containing a given address. The address ranges
- // from [page_addr .. page_addr + kPageSize[
- // This only works if the object is in fact in a page. See also MemoryChunk::
- // FromAddress() and FromAnyAddress().
- INLINE(static Page* FromAddress(Address a)) {
- return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
- }
-
- // Returns the page containing an allocation top. Because an allocation
- // top address can be the upper bound of the page, we need to subtract
- // it with kPointerSize first. The address ranges from
- // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
- INLINE(static Page* FromAllocationTop(Address top)) {
- Page* p = FromAddress(top - kPointerSize);
- return p;
- }
-
- // Returns the next page in the chain of pages owned by a space.
- inline Page* next_page();
- inline Page* prev_page();
- inline void set_next_page(Page* page);
- inline void set_prev_page(Page* page);
-
- // Checks whether an address is page aligned.
- static bool IsAlignedToPageSize(Address a) {
- return 0 == (OffsetFrom(a) & kPageAlignmentMask);
- }
-
- // Returns the offset of a given address to this page.
- INLINE(int Offset(Address a)) {
- int offset = static_cast<int>(a - address());
- return offset;
- }
-
- // Returns the address for a given offset to the this page.
- Address OffsetToAddress(int offset) {
- ASSERT_PAGE_OFFSET(offset);
- return address() + offset;
- }
-
- // ---------------------------------------------------------------------
-
- // Page size in bytes. This must be a multiple of the OS page size.
- static const int kPageSize = 1 << kPageSizeBits;
-
- // Object area size in bytes.
- static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
-
- // Maximum object size that fits in a page.
- static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
-
- // Page size mask.
- static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
-
- inline void ClearGCFields();
-
- static inline Page* Initialize(Heap* heap,
- MemoryChunk* chunk,
- Executability executable,
- PagedSpace* owner);
-
- void InitializeAsAnchor(PagedSpace* owner);
-
- bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
- bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
- bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
-
- void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
- void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
-
- void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
- void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
-
-#ifdef DEBUG
- void Print();
-#endif // DEBUG
-
- friend class MemoryAllocator;
-};
-
-
-STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
-
-
-class LargePage : public MemoryChunk {
- public:
- HeapObject* GetObject() {
- return HeapObject::FromAddress(area_start());
- }
-
- inline LargePage* next_page() const {
- return static_cast<LargePage*>(next_chunk());
- }
-
- inline void set_next_page(LargePage* page) {
- set_next_chunk(page);
- }
- private:
- static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
-
- friend class MemoryAllocator;
-};
-
-STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
-
-// ----------------------------------------------------------------------------
-// Space is the abstract superclass for all allocation spaces.
-class Space : public Malloced {
- public:
- Space(Heap* heap, AllocationSpace id, Executability executable)
- : heap_(heap), id_(id), executable_(executable) {}
-
- virtual ~Space() {}
-
- Heap* heap() const { return heap_; }
-
- // Does the space need executable memory?
- Executability executable() { return executable_; }
-
- // Identity used in error reporting.
- AllocationSpace identity() { return id_; }
-
- // Returns allocated size.
- virtual intptr_t Size() = 0;
-
- // Returns size of objects. Can differ from the allocated size
- // (e.g. see LargeObjectSpace).
- virtual intptr_t SizeOfObjects() { return Size(); }
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (id_ == CODE_SPACE) {
- return RoundDown(size, kCodeAlignment);
- } else {
- return RoundDown(size, kPointerSize);
- }
- }
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-
- private:
- Heap* heap_;
- AllocationSpace id_;
- Executability executable_;
-};
-
-
-// ----------------------------------------------------------------------------
-// All heap objects containing executable code (code objects) must be allocated
-// from a 2 GB range of memory, so that they can call each other using 32-bit
-// displacements. This happens automatically on 32-bit platforms, where 32-bit
-// displacements cover the entire 4GB virtual address space. On 64-bit
-// platforms, we support this using the CodeRange object, which reserves and
-// manages a range of virtual memory.
-class CodeRange {
- public:
- explicit CodeRange(Isolate* isolate);
- ~CodeRange() { TearDown(); }
-
- // Reserves a range of virtual memory, but does not commit any of it.
- // Can only be called once, at heap initialization time.
- // Returns false on failure.
- bool SetUp(const size_t requested_size);
-
- // Frees the range of virtual memory, and frees the data structures used to
- // manage it.
- void TearDown();
-
- bool exists() { return this != NULL && code_range_ != NULL; }
- bool contains(Address address) {
- if (this == NULL || code_range_ == NULL) return false;
- Address start = static_cast<Address>(code_range_->address());
- return start <= address && address < start + code_range_->size();
- }
-
- // Allocates a chunk of memory from the large-object portion of
- // the code range. On platforms with no separate code range, should
- // not be called.
- MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
- const size_t commit_size,
- size_t* allocated);
- bool CommitRawMemory(Address start, size_t length);
- bool UncommitRawMemory(Address start, size_t length);
- void FreeRawMemory(Address buf, size_t length);
-
- private:
- Isolate* isolate_;
-
- // The reserved range of virtual memory that all code objects are put in.
- VirtualMemory* code_range_;
- // Plain old data class, just a struct plus a constructor.
- class FreeBlock {
- public:
- FreeBlock(Address start_arg, size_t size_arg)
- : start(start_arg), size(size_arg) {
- ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
- ASSERT(size >= static_cast<size_t>(Page::kPageSize));
- }
- FreeBlock(void* start_arg, size_t size_arg)
- : start(static_cast<Address>(start_arg)), size(size_arg) {
- ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
- ASSERT(size >= static_cast<size_t>(Page::kPageSize));
- }
-
- Address start;
- size_t size;
- };
-
- // Freed blocks of memory are added to the free list. When the allocation
- // list is exhausted, the free list is sorted and merged to make the new
- // allocation list.
- List<FreeBlock> free_list_;
- // Memory is allocated from the free blocks on the allocation list.
- // The block at current_allocation_block_index_ is the current block.
- List<FreeBlock> allocation_list_;
- int current_allocation_block_index_;
-
- // Finds a block on the allocation list that contains at least the
- // requested amount of memory. If none is found, sorts and merges
- // the existing free memory blocks, and searches again.
- // If none can be found, terminates V8 with FatalProcessOutOfMemory.
- void GetNextAllocationBlock(size_t requested);
- // Compares the start addresses of two free blocks.
- static int CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right);
-
- DISALLOW_COPY_AND_ASSIGN(CodeRange);
-};
-
-
-class SkipList {
- public:
- SkipList() {
- Clear();
- }
-
- void Clear() {
- for (int idx = 0; idx < kSize; idx++) {
- starts_[idx] = reinterpret_cast<Address>(-1);
- }
- }
-
- Address StartFor(Address addr) {
- return starts_[RegionNumber(addr)];
- }
-
- void AddObject(Address addr, int size) {
- int start_region = RegionNumber(addr);
- int end_region = RegionNumber(addr + size - kPointerSize);
- for (int idx = start_region; idx <= end_region; idx++) {
- if (starts_[idx] > addr) starts_[idx] = addr;
- }
- }
-
- static inline int RegionNumber(Address addr) {
- return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
- }
-
- static void Update(Address addr, int size) {
- Page* page = Page::FromAddress(addr);
- SkipList* list = page->skip_list();
- if (list == NULL) {
- list = new SkipList();
- page->set_skip_list(list);
- }
-
- list->AddObject(addr, size);
- }
-
- private:
- static const int kRegionSizeLog2 = 13;
- static const int kRegionSize = 1 << kRegionSizeLog2;
- static const int kSize = Page::kPageSize / kRegionSize;
-
- STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
-
- Address starts_[kSize];
-};
-
-
-// ----------------------------------------------------------------------------
-// A space acquires chunks of memory from the operating system. The memory
-// allocator allocated and deallocates pages for the paged heap spaces and large
-// pages for large object space.
-//
-// Each space has to manage it's own pages.
-//
-class MemoryAllocator {
- public:
- explicit MemoryAllocator(Isolate* isolate);
-
- // Initializes its internal bookkeeping structures.
- // Max capacity of the total space and executable memory limit.
- bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
-
- void TearDown();
-
- Page* AllocatePage(
- intptr_t size, PagedSpace* owner, Executability executable);
-
- LargePage* AllocateLargePage(
- intptr_t object_size, Space* owner, Executability executable);
-
- void Free(MemoryChunk* chunk);
-
- // Returns the maximum available bytes of heaps.
- intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
-
- // Returns allocated spaces in bytes.
- intptr_t Size() { return size_; }
-
- // Returns the maximum available executable bytes of heaps.
- intptr_t AvailableExecutable() {
- if (capacity_executable_ < size_executable_) return 0;
- return capacity_executable_ - size_executable_;
- }
-
- // Returns allocated executable spaces in bytes.
- intptr_t SizeExecutable() { return size_executable_; }
-
- // Returns maximum available bytes that the old space can have.
- intptr_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
- }
-
-#ifdef DEBUG
- // Reports statistic info of the space.
- void ReportStatistics();
-#endif
-
- // Returns a MemoryChunk in which the memory region from commit_area_size to
- // reserve_area_size of the chunk area is reserved but not committed, it
- // could be committed later by calling MemoryChunk::CommitArea.
- MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
- intptr_t commit_area_size,
- Executability executable,
- Space* space);
-
- Address ReserveAlignedMemory(size_t requested,
- size_t alignment,
- VirtualMemory* controller);
- Address AllocateAlignedMemory(size_t reserve_size,
- size_t commit_size,
- size_t alignment,
- Executability executable,
- VirtualMemory* controller);
-
- void FreeMemory(VirtualMemory* reservation, Executability executable);
- void FreeMemory(Address addr, size_t size, Executability executable);
-
- // Commit a contiguous block of memory from the initial chunk. Assumes that
- // the address is not NULL, the size is greater than zero, and that the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool CommitBlock(Address start, size_t size, Executability executable);
-
- // Uncommit a contiguous block of memory [start..(start+size)[.
- // start is not NULL, the size is greater than zero, and the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool UncommitBlock(Address start, size_t size);
-
- // Zaps a contiguous block of memory [start..(start+size)[ thus
- // filling it up with a recognizable non-NULL bit pattern.
- void ZapBlock(Address start, size_t size);
-
- void PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size);
-
- void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
-
- void RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback);
-
- bool MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback);
-
- static int CodePageGuardStartOffset();
-
- static int CodePageGuardSize();
-
- static int CodePageAreaStartOffset();
-
- static int CodePageAreaEndOffset();
-
- static int CodePageAreaSize() {
- return CodePageAreaEndOffset() - CodePageAreaStartOffset();
- }
-
- MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size);
-
- private:
- Isolate* isolate_;
-
- // Maximum space size in bytes.
- size_t capacity_;
- // Maximum subset of capacity_ that can be executable
- size_t capacity_executable_;
-
- // Allocated space size in bytes.
- size_t size_;
- // Allocated executable space size in bytes.
- size_t size_executable_;
-
- struct MemoryAllocationCallbackRegistration {
- MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action)
- : callback(callback), space(space), action(action) {
- }
- MemoryAllocationCallback callback;
- ObjectSpace space;
- AllocationAction action;
- };
-
- // A List of callback that are triggered when memory is allocated or free'd
- List<MemoryAllocationCallbackRegistration>
- memory_allocation_callbacks_;
-
- // Initializes pages in a chunk. Returns the first page address.
- // This function and GetChunkId() are provided for the mark-compact
- // collector to rebuild page headers in the from space, which is
- // used as a marking stack and its page headers are destroyed.
- Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
- PagedSpace* owner);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
-};
-
-
-// -----------------------------------------------------------------------------
-// Interface for heap object iterator to be implemented by all object space
-// object iterators.
-//
-// NOTE: The space specific object iterators also implements the own next()
-// method which is used to avoid using virtual functions
-// iterating a specific space.
-
-class ObjectIterator : public Malloced {
- public:
- virtual ~ObjectIterator() { }
-
- virtual HeapObject* next_object() = 0;
-};
-
-
-// -----------------------------------------------------------------------------
-// Heap object iterator in new/old/map spaces.
-//
-// A HeapObjectIterator iterates objects from the bottom of the given space
-// to its top or from the bottom of the given page to its top.
-//
-// If objects are allocated in the page during iteration the iterator may
-// or may not iterate over those objects. The caller must create a new
-// iterator in order to be sure to visit these new objects.
-class HeapObjectIterator: public ObjectIterator {
- public:
- // Creates a new object iterator in a given space.
- // If the size function is not given, the iterator calls the default
- // Object::Size().
- explicit HeapObjectIterator(PagedSpace* space);
- HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
- HeapObjectIterator(Page* page, HeapObjectCallback size_func);
-
- // Advance to the next object, skipping free spaces and other fillers and
- // skipping the special garbage section of which there is one per space.
- // Returns NULL when the iteration has ended.
- inline HeapObject* Next() {
- do {
- HeapObject* next_obj = FromCurrentPage();
- if (next_obj != NULL) return next_obj;
- } while (AdvanceToNextPage());
- return NULL;
- }
-
- virtual HeapObject* next_object() {
- return Next();
- }
-
- private:
- enum PageMode { kOnePageOnly, kAllPagesInSpace };
-
- Address cur_addr_; // Current iteration point.
- Address cur_end_; // End iteration point.
- HeapObjectCallback size_func_; // Size function or NULL.
- PagedSpace* space_;
- PageMode page_mode_;
-
- // Fast (inlined) path of next().
- inline HeapObject* FromCurrentPage();
-
- // Slow path of next(), goes into the next page. Returns false if the
- // iteration has ended.
- bool AdvanceToNextPage();
-
- // Initializes fields.
- inline void Initialize(PagedSpace* owner,
- Address start,
- Address end,
- PageMode mode,
- HeapObjectCallback size_func);
-};
-
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a paged space.
-
-class PageIterator BASE_EMBEDDED {
- public:
- explicit inline PageIterator(PagedSpace* space);
-
- inline bool has_next();
- inline Page* next();
-
- private:
- PagedSpace* space_;
- Page* prev_page_; // Previous page returned.
- // Next page that will be returned. Cached here so that we can use this
- // iterator for operations that deallocate pages.
- Page* next_page_;
-};
-
-
-// -----------------------------------------------------------------------------
-// A space has a circular list of pages. The next page can be accessed via
-// Page::next_page() call.
-
-// An abstraction of allocation and relocation pointers in a page-structured
-// space.
-class AllocationInfo {
- public:
- AllocationInfo() : top(NULL), limit(NULL) {
- }
-
- Address top; // Current allocation top.
- Address limit; // Current allocation limit.
-
-#ifdef DEBUG
- bool VerifyPagedAllocation() {
- return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
- && (top <= limit);
- }
-#endif
-};
-
-
-// An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (i.e., not
-// including page bookkeeping structures) currently in the space. The 'size'
-// of a space is the number of allocated bytes, the 'waste' in the space is
-// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (e.g. small blocks due
-// to internal fragmentation, top of page areas in map space), and the bytes
-// 'available' is the number of unallocated bytes that are not waste. The
-// capacity is the sum of size, waste, and available.
-//
-// The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in
-// conjunction with capacity, or else they always balance increases and
-// decreases to the non-capacity stats.
-class AllocationStats BASE_EMBEDDED {
- public:
- AllocationStats() { Clear(); }
-
- // Zero out all the allocation statistics (i.e., no capacity).
- void Clear() {
- capacity_ = 0;
- size_ = 0;
- waste_ = 0;
- }
-
- void ClearSizeWaste() {
- size_ = capacity_;
- waste_ = 0;
- }
-
- // Reset the allocation statistics (i.e., available = capacity with no
- // wasted or allocated bytes).
- void Reset() {
- size_ = 0;
- waste_ = 0;
- }
-
- // Accessors for the allocation statistics.
- intptr_t Capacity() { return capacity_; }
- intptr_t Size() { return size_; }
- intptr_t Waste() { return waste_; }
-
- // Grow the space by adding available bytes. They are initially marked as
- // being in use (part of the size), but will normally be immediately freed,
- // putting them on the free list and removing them from size_.
- void ExpandSpace(int size_in_bytes) {
- capacity_ += size_in_bytes;
- size_ += size_in_bytes;
- ASSERT(size_ >= 0);
- }
-
- // Shrink the space by removing available bytes. Since shrinking is done
- // during sweeping, bytes have been marked as being in use (part of the size)
- // and are hereby freed.
- void ShrinkSpace(int size_in_bytes) {
- capacity_ -= size_in_bytes;
- size_ -= size_in_bytes;
- ASSERT(size_ >= 0);
- }
-
- // Allocate from available bytes (available -> size).
- void AllocateBytes(intptr_t size_in_bytes) {
- size_ += size_in_bytes;
- ASSERT(size_ >= 0);
- }
-
- // Free allocated bytes, making them available (size -> available).
- void DeallocateBytes(intptr_t size_in_bytes) {
- size_ -= size_in_bytes;
- ASSERT(size_ >= 0);
- }
-
- // Waste free bytes (available -> waste).
- void WasteBytes(int size_in_bytes) {
- size_ -= size_in_bytes;
- waste_ += size_in_bytes;
- ASSERT(size_ >= 0);
- }
-
- private:
- intptr_t capacity_;
- intptr_t size_;
- intptr_t waste_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap. They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object). They have a size and a next pointer. The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
- // Obtain a free-list node from a raw address. This is not a cast because
- // it does not check nor require that the first word at the address is a map
- // pointer.
- static FreeListNode* FromAddress(Address address) {
- return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
- }
-
- static inline bool IsFreeListNode(HeapObject* object);
-
- // Set the size in bytes, which can be read with HeapObject::Size(). This
- // function also writes a map to the first word of the block so that it
- // looks like a heap object to the garbage collector and heap iteration
- // functions.
- void set_size(Heap* heap, int size_in_bytes);
-
- // Accessors for the next field.
- inline FreeListNode* next();
- inline FreeListNode** next_address();
- inline void set_next(FreeListNode* next);
-
- inline void Zap();
-
- static inline FreeListNode* cast(MaybeObject* maybe) {
- ASSERT(!maybe->IsFailure());
- return reinterpret_cast<FreeListNode*>(maybe);
- }
-
- private:
- static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list category holds a pointer to the top element and a pointer to
-// the end element of the linked list of free memory blocks.
-class FreeListCategory {
- public:
- FreeListCategory() :
- top_(NULL),
- end_(NULL),
- mutex_(OS::CreateMutex()),
- available_(0) {}
-
- ~FreeListCategory() {
- delete mutex_;
- }
-
- intptr_t Concatenate(FreeListCategory* category);
-
- void Reset();
-
- void Free(FreeListNode* node, int size_in_bytes);
-
- FreeListNode* PickNodeFromList(int *node_size);
-
- intptr_t CountFreeListItemsInList(Page* p);
-
- intptr_t EvictFreeListItemsInList(Page* p);
-
- void RepairFreeList(Heap* heap);
-
- FreeListNode** GetTopAddress() { return &top_; }
- FreeListNode* top() const { return top_; }
- void set_top(FreeListNode* top) { top_ = top; }
-
- FreeListNode** GetEndAddress() { return &end_; }
- FreeListNode* end() const { return end_; }
- void set_end(FreeListNode* end) { end_ = end; }
-
- int* GetAvailableAddress() { return &available_; }
- int available() const { return available_; }
- void set_available(int available) { available_ = available; }
-
- Mutex* mutex() { return mutex_; }
-
-#ifdef DEBUG
- intptr_t SumFreeList();
- int FreeListLength();
-#endif
-
- private:
- FreeListNode* top_;
- FreeListNode* end_;
- Mutex* mutex_;
-
- // Total available bytes in all blocks of this free list category.
- int available_;
-};
-
-
-// The free list for the old space. The free list is organized in such a way
-// as to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which
-// is divided up into rough categories to cut down on waste. Having finer
-// categories would scatter allocation more.
-
-// The old space free list is organized in categories.
-// 1-31 words: Such small free areas are discarded for efficiency reasons.
-// They can be reclaimed by the compactor. However the distance between top
-// and limit may be this small.
-// 32-255 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 1-31 words in size. These
-// spaces are called small.
-// 256-2047 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 32-255 words in size. These
-// spaces are called medium.
-// 1048-16383 words: There is a list of spaces this large. It is used for top
-// and limit when the object we need to allocate is 256-2047 words in size.
-// These spaces are call large.
-// At least 16384 words. This list is for objects of 2048 words or larger.
-// Empty pages are added to this list. These spaces are called huge.
-class FreeList BASE_EMBEDDED {
- public:
- explicit FreeList(PagedSpace* owner);
-
- intptr_t Concatenate(FreeList* free_list);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() {
- return small_list_.available() + medium_list_.available() +
- large_list_.available() + huge_list_.available();
- }
-
- // Place a node on the free list. The block of size 'size_in_bytes'
- // starting at 'start' is placed on the free list. The return value is the
- // number of bytes that have been lost due to internal fragmentation by
- // freeing the block. Bookkeeping information will be written to the block,
- // i.e., its contents will be destroyed. The start address should be word
- // aligned, and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes);
-
- // Allocate a block of size 'size_in_bytes' from the free list. The block
- // is unitialized. A failure is returned if no block is available. The
- // number of bytes lost to fragmentation is returned in the output parameter
- // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
- MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
-
-#ifdef DEBUG
- void Zap();
- intptr_t SumFreeLists();
- bool IsVeryLong();
-#endif
-
- // Used after booting the VM.
- void RepairLists(Heap* heap);
-
- struct SizeStats {
- intptr_t Total() {
- return small_size_ + medium_size_ + large_size_ + huge_size_;
- }
-
- intptr_t small_size_;
- intptr_t medium_size_;
- intptr_t large_size_;
- intptr_t huge_size_;
- };
-
- void CountFreeListItems(Page* p, SizeStats* sizes);
-
- intptr_t EvictFreeListItems(Page* p);
-
- FreeListCategory* small_list() { return &small_list_; }
- FreeListCategory* medium_list() { return &medium_list_; }
- FreeListCategory* large_list() { return &large_list_; }
- FreeListCategory* huge_list() { return &huge_list_; }
-
- private:
- // The size range of blocks, in bytes.
- static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
-
- FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
-
- PagedSpace* owner_;
- Heap* heap_;
-
- static const int kSmallListMin = 0x20 * kPointerSize;
- static const int kSmallListMax = 0xff * kPointerSize;
- static const int kMediumListMax = 0x7ff * kPointerSize;
- static const int kLargeListMax = 0x3fff * kPointerSize;
- static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
- static const int kMediumAllocationMax = kSmallListMax;
- static const int kLargeAllocationMax = kMediumListMax;
- FreeListCategory small_list_;
- FreeListCategory medium_list_;
- FreeListCategory large_list_;
- FreeListCategory huge_list_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
-};
-
-
-class PagedSpace : public Space {
- public:
- // Creates a space with a maximum capacity, and an id.
- PagedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- Executability executable);
-
- virtual ~PagedSpace() {}
-
- // Set up the space using the given address range of virtual memory (from
- // the memory allocator's initial chunk) if possible. If the block of
- // addresses is not big enough to contain a single page-aligned page, a
- // fresh chunk will be allocated.
- bool SetUp();
-
- // Returns true if the space has been successfully set up and not
- // subsequently torn down.
- bool HasBeenSetUp();
-
- // Cleans up the space, frees all pages in this space except those belonging
- // to the initial chunk, uncommits addresses in the initial chunk.
- void TearDown();
-
- // Checks whether an object/address is in this space.
- inline bool Contains(Address a);
- bool Contains(HeapObject* o) { return Contains(o->address()); }
-
- // Given an address occupied by a live object, return that object if it is
- // in this space, or Failure::Exception() if it is not. The implementation
- // iterates over objects in the page containing the address, the cost is
- // linear in the number of objects in the page. It may be slow.
- MUST_USE_RESULT MaybeObject* FindObject(Address addr);
-
- // During boot the free_space_map is created, and afterwards we may need
- // to write it into the free list nodes that were already created.
- virtual void RepairFreeListsAfterBoot();
-
- // Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
-
- // Current capacity without growing (Size() + Available()).
- intptr_t Capacity() { return accounting_stats_.Capacity(); }
-
- // Total amount of memory committed for this space. For paged
- // spaces this equals the capacity.
- intptr_t CommittedMemory() { return Capacity(); }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
-
- // Sets the capacity, the available space and the wasted space to zero.
- // The stats are rebuilt during sweeping by adding each page to the
- // capacity and the size when it is encountered. As free spaces are
- // discovered during the sweeping they are subtracted from the size and added
- // to the available and wasted totals.
- void ClearStats() {
- accounting_stats_.ClearSizeWaste();
- }
-
- // Increases the number of available bytes of that space.
- void AddToAccountingStats(intptr_t bytes) {
- accounting_stats_.DeallocateBytes(bytes);
- }
-
- // Available bytes without growing. These are the bytes on the free list.
- // The bytes in the linear allocation area are not included in this total
- // because updating the stats would slow down allocation. New pages are
- // immediately added to the free list so they show up here.
- intptr_t Available() { return free_list_.available(); }
-
- // Allocated bytes in this space. Garbage bytes that were not found due to
- // lazy sweeping are counted as being allocated! The bytes in the current
- // linear allocation area (between top and limit) are also counted here.
- virtual intptr_t Size() { return accounting_stats_.Size(); }
-
- // As size, but the bytes in lazily swept pages are estimated and the bytes
- // in the current linear allocation area are not included.
- virtual intptr_t SizeOfObjects() {
- // TODO(hpayer): broken when concurrent sweeping turned on
- ASSERT(!IsLazySweepingComplete() || (unswept_free_bytes_ == 0));
- return Size() - unswept_free_bytes_ - (limit() - top());
- }
-
- // Wasted bytes in this space. These are just the bytes that were thrown away
- // due to being too small to use for allocation. They do not include the
- // free bytes that were not found at all due to lazy sweeping.
- virtual intptr_t Waste() { return accounting_stats_.Waste(); }
-
- // Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top; }
- Address limit() { return allocation_info_.limit; }
-
- // Allocate the requested number of bytes in the space if possible, return a
- // failure object if not.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
-
- virtual bool ReserveSpace(int bytes);
-
- // Give a block of memory to the space's free list. It might be added to
- // the free list or accounted as waste.
- // If add_to_freelist is false then just accounting stats are updated and
- // no attempt to add area to free list is made.
- int Free(Address start, int size_in_bytes) {
- int wasted = free_list_.Free(start, size_in_bytes);
- accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
- return size_in_bytes - wasted;
- }
-
- void ResetFreeList() {
- free_list_.Reset();
- }
-
- // Set space allocation info.
- void SetTop(Address top, Address limit) {
- ASSERT(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = top;
- allocation_info_.limit = limit;
- }
-
- void Allocate(int bytes) {
- accounting_stats_.AllocateBytes(bytes);
- }
-
- void IncreaseCapacity(int size) {
- accounting_stats_.ExpandSpace(size);
- }
-
- // Releases an unused page and shrinks the space.
- void ReleasePage(Page* page);
-
- // The dummy page that anchors the linked list of pages.
- Page* anchor() { return &anchor_; }
-
-#ifdef VERIFY_HEAP
- // Verify integrity of this space.
- virtual void Verify(ObjectVisitor* visitor);
-
- // Overridden by subclasses to verify space-specific object
- // properties (e.g., only maps or free-list nodes are in map space).
- virtual void VerifyObject(HeapObject* obj) {}
-#endif
-
-#ifdef DEBUG
- // Print meta info and objects in this space.
- virtual void Print();
-
- // Reports statistics for the space
- void ReportStatistics();
-
- // Report code object related statistics
- void CollectCodeStatistics();
- static void ReportCodeStatistics();
- static void ResetCodeStatistics();
-#endif
-
- bool was_swept_conservatively() { return was_swept_conservatively_; }
- void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
-
- // Evacuation candidates are swept by evacuator. Needs to return a valid
- // result before _and_ after evacuation has finished.
- static bool ShouldBeSweptLazily(Page* p) {
- return !p->IsEvacuationCandidate() &&
- !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
- !p->WasSweptPrecisely();
- }
-
- void SetPagesToSweep(Page* first) {
- ASSERT(unswept_free_bytes_ == 0);
- if (first == &anchor_) first = NULL;
- first_unswept_page_ = first;
- }
-
- void IncrementUnsweptFreeBytes(intptr_t by) {
- unswept_free_bytes_ += by;
- }
-
- void IncreaseUnsweptFreeBytes(Page* p) {
- ASSERT(ShouldBeSweptLazily(p));
- unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
- }
-
- void DecrementUnsweptFreeBytes(intptr_t by) {
- unswept_free_bytes_ -= by;
- }
-
- void DecreaseUnsweptFreeBytes(Page* p) {
- ASSERT(ShouldBeSweptLazily(p));
- unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
- }
-
- void ResetUnsweptFreeBytes() {
- unswept_free_bytes_ = 0;
- }
-
- bool AdvanceSweeper(intptr_t bytes_to_sweep);
-
- // When parallel sweeper threads are active this function waits
- // for them to complete, otherwise AdvanceSweeper with size_in_bytes
- // is called.
- bool EnsureSweeperProgress(intptr_t size_in_bytes);
-
- bool IsLazySweepingComplete() {
- return !first_unswept_page_->is_valid();
- }
-
- Page* FirstPage() { return anchor_.next_page(); }
- Page* LastPage() { return anchor_.prev_page(); }
-
- void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
- free_list_.CountFreeListItems(p, sizes);
- }
-
- void EvictEvacuationCandidatesFromFreeLists();
-
- bool CanExpand();
-
- // Returns the number of total pages in this space.
- int CountTotalPages();
-
- // Return size of allocatable area on a page in this space.
- inline int AreaSize() {
- return area_size_;
- }
-
- protected:
- FreeList* free_list() { return &free_list_; }
-
- int area_size_;
-
- // Maximum capacity of this space.
- intptr_t max_capacity_;
-
- intptr_t SizeOfFirstPage();
-
- // Accounting information for this space.
- AllocationStats accounting_stats_;
-
- // The dummy page that anchors the double linked list of pages.
- Page anchor_;
-
- // The space's free list.
- FreeList free_list_;
-
- // Normal allocation information.
- AllocationInfo allocation_info_;
-
- // Bytes of each page that cannot be allocated. Possibly non-zero
- // for pages in spaces with only fixed-size objects. Always zero
- // for pages in spaces with variable sized objects (those pages are
- // padded with free-list nodes).
- int page_extra_;
-
- bool was_swept_conservatively_;
-
- // The first page to be swept when the lazy sweeper advances. Is set
- // to NULL when all pages have been swept.
- Page* first_unswept_page_;
-
- // The number of free bytes which could be reclaimed by advancing the
- // lazy sweeper. This is only an estimation because lazy sweeping is
- // done conservatively.
- intptr_t unswept_free_bytes_;
-
- // Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS, or if the hard heap
- // size limit has been hit.
- bool Expand();
-
- // Generic fast case allocation function that tries linear allocation at the
- // address denoted by top in allocation_info_.
- inline HeapObject* AllocateLinearly(int size_in_bytes);
-
- // Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
-
- friend class PageIterator;
- friend class SweeperThread;
-};
-
-
-class NumberAndSizeInfo BASE_EMBEDDED {
- public:
- NumberAndSizeInfo() : number_(0), bytes_(0) {}
-
- int number() const { return number_; }
- void increment_number(int num) { number_ += num; }
-
- int bytes() const { return bytes_; }
- void increment_bytes(int size) { bytes_ += size; }
-
- void clear() {
- number_ = 0;
- bytes_ = 0;
- }
-
- private:
- int number_;
- int bytes_;
-};
-
-
-// HistogramInfo class for recording a single "bar" of a histogram. This
-// class is used for collecting statistics to print to the log file.
-class HistogramInfo: public NumberAndSizeInfo {
- public:
- HistogramInfo() : NumberAndSizeInfo() {}
-
- const char* name() { return name_; }
- void set_name(const char* name) { name_ = name; }
-
- private:
- const char* name_;
-};
-
-
-enum SemiSpaceId {
- kFromSpace = 0,
- kToSpace = 1
-};
-
-
-class SemiSpace;
-
-
-class NewSpacePage : public MemoryChunk {
- public:
- // GC related flags copied from from-space to to-space when
- // flipping semispaces.
- static const intptr_t kCopyOnFlipFlagsMask =
- (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::SCAN_ON_SCAVENGE);
-
- static const int kAreaSize = Page::kNonCodeObjectAreaSize;
-
- inline NewSpacePage* next_page() const {
- return static_cast<NewSpacePage*>(next_chunk());
- }
-
- inline void set_next_page(NewSpacePage* page) {
- set_next_chunk(page);
- }
-
- inline NewSpacePage* prev_page() const {
- return static_cast<NewSpacePage*>(prev_chunk());
- }
-
- inline void set_prev_page(NewSpacePage* page) {
- set_prev_chunk(page);
- }
-
- SemiSpace* semi_space() {
- return reinterpret_cast<SemiSpace*>(owner());
- }
-
- bool is_anchor() { return !this->InNewSpace(); }
-
- static bool IsAtStart(Address addr) {
- return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
- == kObjectStartOffset;
- }
-
- static bool IsAtEnd(Address addr) {
- return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
- }
-
- Address address() {
- return reinterpret_cast<Address>(this);
- }
-
- // Finds the NewSpacePage containg the given address.
- static inline NewSpacePage* FromAddress(Address address_in_page) {
- Address page_start =
- reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
- ~Page::kPageAlignmentMask);
- NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
- return page;
- }
-
- // Find the page for a limit address. A limit address is either an address
- // inside a page, or the address right after the last byte of a page.
- static inline NewSpacePage* FromLimit(Address address_limit) {
- return NewSpacePage::FromAddress(address_limit - 1);
- }
-
- private:
- // Create a NewSpacePage object that is only used as anchor
- // for the doubly-linked list of real pages.
- explicit NewSpacePage(SemiSpace* owner) {
- InitializeAsAnchor(owner);
- }
-
- static NewSpacePage* Initialize(Heap* heap,
- Address start,
- SemiSpace* semi_space);
-
- // Intialize a fake NewSpacePage used as sentinel at the ends
- // of a doubly-linked list of real NewSpacePages.
- // Only uses the prev/next links, and sets flags to not be in new-space.
- void InitializeAsAnchor(SemiSpace* owner);
-
- friend class SemiSpace;
- friend class SemiSpaceIterator;
-};
-
-
-// -----------------------------------------------------------------------------
-// SemiSpace in young generation
-//
-// A semispace is a contiguous chunk of memory holding page-like memory
-// chunks. The mark-compact collector uses the memory of the first page in
-// the from space as a marking stack when tracing live objects.
-
-class SemiSpace : public Space {
- public:
- // Constructor.
- SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- start_(NULL),
- age_mark_(NULL),
- id_(semispace),
- anchor_(this),
- current_page_(NULL) { }
-
- // Sets up the semispace using the given chunk.
- void SetUp(Address start, int initial_capacity, int maximum_capacity);
-
- // Tear down the space. Heap memory was not allocated by the space, so it
- // is not deallocated here.
- void TearDown();
-
- // True if the space has been set up but not torn down.
- bool HasBeenSetUp() { return start_ != NULL; }
-
- // Grow the semispace to the new capacity. The new capacity
- // requested must be larger than the current capacity and less than
- // the maximum capacity.
- bool GrowTo(int new_capacity);
-
- // Shrinks the semispace to the new capacity. The new capacity
- // requested must be more than the amount of used memory in the
- // semispace and less than the current capacity.
- bool ShrinkTo(int new_capacity);
-
- // Returns the start address of the first page of the space.
- Address space_start() {
- ASSERT(anchor_.next_page() != &anchor_);
- return anchor_.next_page()->area_start();
- }
-
- // Returns the start address of the current page of the space.
- Address page_low() {
- return current_page_->area_start();
- }
-
- // Returns one past the end address of the space.
- Address space_end() {
- return anchor_.prev_page()->area_end();
- }
-
- // Returns one past the end address of the current page of the space.
- Address page_high() {
- return current_page_->area_end();
- }
-
- bool AdvancePage() {
- NewSpacePage* next_page = current_page_->next_page();
- if (next_page == anchor()) return false;
- current_page_ = next_page;
- return true;
- }
-
- // Resets the space to using the first page.
- void Reset();
-
- // Age mark accessors.
- Address age_mark() { return age_mark_; }
- void set_age_mark(Address mark);
-
- // True if the address is in the address range of this semispace (not
- // necessarily below the allocation pointer).
- bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_)
- == reinterpret_cast<uintptr_t>(start_);
- }
-
- // True if the object is a heap object in the address range of this
- // semispace (not necessarily below the allocation pointer).
- bool Contains(Object* o) {
- return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
- }
-
- // If we don't have these here then SemiSpace will be abstract. However
- // they should never be called.
- virtual intptr_t Size() {
- UNREACHABLE();
- return 0;
- }
-
- virtual bool ReserveSpace(int bytes) {
- UNREACHABLE();
- return false;
- }
-
- bool is_committed() { return committed_; }
- bool Commit();
- bool Uncommit();
-
- NewSpacePage* first_page() { return anchor_.next_page(); }
- NewSpacePage* current_page() { return current_page_; }
-
-#ifdef VERIFY_HEAP
- virtual void Verify();
-#endif
-
-#ifdef DEBUG
- virtual void Print();
- // Validate a range of of addresses in a SemiSpace.
- // The "from" address must be on a page prior to the "to" address,
- // in the linked page order, or it must be earlier on the same page.
- static void AssertValidRange(Address from, Address to);
-#else
- // Do nothing.
- inline static void AssertValidRange(Address from, Address to) {}
-#endif
-
- // Returns the current capacity of the semi space.
- int Capacity() { return capacity_; }
-
- // Returns the maximum capacity of the semi space.
- int MaximumCapacity() { return maximum_capacity_; }
-
- // Returns the initial capacity of the semi space.
- int InitialCapacity() { return initial_capacity_; }
-
- SemiSpaceId id() { return id_; }
-
- static void Swap(SemiSpace* from, SemiSpace* to);
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
-
- private:
- // Flips the semispace between being from-space and to-space.
- // Copies the flags into the masked positions on all pages in the space.
- void FlipPages(intptr_t flags, intptr_t flag_mask);
-
- NewSpacePage* anchor() { return &anchor_; }
-
- // The current and maximum capacity of the space.
- int capacity_;
- int maximum_capacity_;
- int initial_capacity_;
-
- // The start address of the space.
- Address start_;
- // Used to govern object promotion during mark-compact collection.
- Address age_mark_;
-
- // Masks and comparison values to test for containment in this semispace.
- uintptr_t address_mask_;
- uintptr_t object_mask_;
- uintptr_t object_expected_;
-
- bool committed_;
- SemiSpaceId id_;
-
- NewSpacePage anchor_;
- NewSpacePage* current_page_;
-
- friend class SemiSpaceIterator;
- friend class NewSpacePageIterator;
- public:
- TRACK_MEMORY("SemiSpace")
-};
-
-
-// A SemiSpaceIterator is an ObjectIterator that iterates over the active
-// semispace of the heap's new space. It iterates over the objects in the
-// semispace from a given start address (defaulting to the bottom of the
-// semispace) to the top of the semispace. New objects allocated after the
-// iterator is created are not iterated.
-class SemiSpaceIterator : public ObjectIterator {
- public:
- // Create an iterator over the objects in the given space. If no start
- // address is given, the iterator starts from the bottom of the space. If
- // no size function is given, the iterator calls Object::Size().
-
- // Iterate over all of allocated to-space.
- explicit SemiSpaceIterator(NewSpace* space);
- // Iterate over all of allocated to-space, with a custome size function.
- SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
- // Iterate over part of allocated to-space, from start to the end
- // of allocation.
- SemiSpaceIterator(NewSpace* space, Address start);
- // Iterate from one address to another in the same semi-space.
- SemiSpaceIterator(Address from, Address to);
-
- HeapObject* Next() {
- if (current_ == limit_) return NULL;
- if (NewSpacePage::IsAtEnd(current_)) {
- NewSpacePage* page = NewSpacePage::FromLimit(current_);
- page = page->next_page();
- ASSERT(!page->is_anchor());
- current_ = page->area_start();
- if (current_ == limit_) return NULL;
- }
-
- HeapObject* object = HeapObject::FromAddress(current_);
- int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
-
- current_ += size;
- return object;
- }
-
- // Implementation of the ObjectIterator functions.
- virtual HeapObject* next_object() { return Next(); }
-
- private:
- void Initialize(Address start,
- Address end,
- HeapObjectCallback size_func);
-
- // The current iteration point.
- Address current_;
- // The end of iteration.
- Address limit_;
- // The callback function.
- HeapObjectCallback size_func_;
-};
-
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a semi-space.
-class NewSpacePageIterator BASE_EMBEDDED {
- public:
- // Make an iterator that runs over all pages in to-space.
- explicit inline NewSpacePageIterator(NewSpace* space);
-
- // Make an iterator that runs over all pages in the given semispace,
- // even those not used in allocation.
- explicit inline NewSpacePageIterator(SemiSpace* space);
-
- // Make iterator that iterates from the page containing start
- // to the page that contains limit in the same semispace.
- inline NewSpacePageIterator(Address start, Address limit);
-
- inline bool has_next();
- inline NewSpacePage* next();
-
- private:
- NewSpacePage* prev_page_; // Previous page returned.
- // Next page that will be returned. Cached here so that we can use this
- // iterator for operations that deallocate pages.
- NewSpacePage* next_page_;
- // Last page returned.
- NewSpacePage* last_page_;
-};
-
-
-// -----------------------------------------------------------------------------
-// The young generation space.
-//
-// The new space consists of a contiguous pair of semispaces. It simply
-// forwards most functions to the appropriate semispace.
-
-class NewSpace : public Space {
- public:
- // Constructor.
- explicit NewSpace(Heap* heap)
- : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- to_space_(heap, kToSpace),
- from_space_(heap, kFromSpace),
- reservation_(),
- inline_allocation_limit_step_(0) {}
-
- // Sets up the new space using the given chunk.
- bool SetUp(int reserved_semispace_size_, int max_semispace_size);
-
- // Tears down the space. Heap memory was not allocated by the space, so it
- // is not deallocated here.
- void TearDown();
-
- // True if the space has been set up but not torn down.
- bool HasBeenSetUp() {
- return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
- }
-
- // Flip the pair of spaces.
- void Flip();
-
- // Grow the capacity of the semispaces. Assumes that they are not at
- // their maximum capacity.
- void Grow();
-
- // Shrink the capacity of the semispaces.
- void Shrink();
-
- // True if the address or object lies in the address range of either
- // semispace (not necessarily below the allocation pointer).
- bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_)
- == reinterpret_cast<uintptr_t>(start_);
- }
-
- bool Contains(Object* o) {
- Address a = reinterpret_cast<Address>(o);
- return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
- }
-
- // Return the allocated bytes in the active semispace.
- virtual intptr_t Size() {
- return pages_used_ * NewSpacePage::kAreaSize +
- static_cast<int>(top() - to_space_.page_low());
- }
-
- // The same, but returning an int. We have to have the one that returns
- // intptr_t because it is inherited, but if we know we are dealing with the
- // new space, which can't get as big as the other spaces then this is useful:
- int SizeAsInt() { return static_cast<int>(Size()); }
-
- // Return the current capacity of a semispace.
- intptr_t EffectiveCapacity() {
- SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
- return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
- }
-
- // Return the current capacity of a semispace.
- intptr_t Capacity() {
- ASSERT(to_space_.Capacity() == from_space_.Capacity());
- return to_space_.Capacity();
- }
-
- // Return the total amount of memory committed for new space.
- intptr_t CommittedMemory() {
- if (from_space_.is_committed()) return 2 * Capacity();
- return Capacity();
- }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
-
- // Return the available bytes without growing.
- intptr_t Available() {
- return Capacity() - Size();
- }
-
- // Return the maximum capacity of a semispace.
- int MaximumCapacity() {
- ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
- return to_space_.MaximumCapacity();
- }
-
- // Returns the initial capacity of a semispace.
- int InitialCapacity() {
- ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
- return to_space_.InitialCapacity();
- }
-
- // Return the address of the allocation pointer in the active semispace.
- Address top() {
- ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
- return allocation_info_.top;
- }
- // Return the address of the first object in the active semispace.
- Address bottom() { return to_space_.space_start(); }
-
- // Get the age mark of the inactive semispace.
- Address age_mark() { return from_space_.age_mark(); }
- // Set the age mark in the active semispace.
- void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
-
- // The start address of the space and a bit mask. Anding an address in the
- // new space with the mask will result in the start address.
- Address start() { return start_; }
- uintptr_t mask() { return address_mask_; }
-
- INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
- ASSERT(Contains(addr));
- ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
- IsAligned(OffsetFrom(addr) - 1, kPointerSize));
- return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
- }
-
- INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
- return reinterpret_cast<Address>(index << kPointerSizeLog2);
- }
-
- // The allocation top and limit addresses.
- Address* allocation_top_address() { return &allocation_info_.top; }
- Address* allocation_limit_address() { return &allocation_info_.limit; }
-
- MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
-
- // Reset the allocation pointer to the beginning of the active semispace.
- void ResetAllocationInfo();
-
- void LowerInlineAllocationLimit(intptr_t step) {
- inline_allocation_limit_step_ = step;
- if (step == 0) {
- allocation_info_.limit = to_space_.page_high();
- } else {
- allocation_info_.limit = Min(
- allocation_info_.top + inline_allocation_limit_step_,
- allocation_info_.limit);
- }
- top_on_previous_step_ = allocation_info_.top;
- }
-
- // Get the extent of the inactive semispace (for use as a marking stack,
- // or to zap it). Notice: space-addresses are not necessarily on the
- // same page, so FromSpaceStart() might be above FromSpaceEnd().
- Address FromSpacePageLow() { return from_space_.page_low(); }
- Address FromSpacePageHigh() { return from_space_.page_high(); }
- Address FromSpaceStart() { return from_space_.space_start(); }
- Address FromSpaceEnd() { return from_space_.space_end(); }
-
- // Get the extent of the active semispace's pages' memory.
- Address ToSpaceStart() { return to_space_.space_start(); }
- Address ToSpaceEnd() { return to_space_.space_end(); }
-
- inline bool ToSpaceContains(Address address) {
- return to_space_.Contains(address);
- }
- inline bool FromSpaceContains(Address address) {
- return from_space_.Contains(address);
- }
-
- // True if the object is a heap object in the address range of the
- // respective semispace (not necessarily below the allocation pointer of the
- // semispace).
- inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
- inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-
- // Try to switch the active semispace to a new, empty, page.
- // Returns false if this isn't possible or reasonable (i.e., there
- // are no pages, or the current page is already empty), or true
- // if successful.
- bool AddFreshPage();
-
- virtual bool ReserveSpace(int bytes);
-
- // Resizes a sequential string which must be the most recent thing that was
- // allocated in new space.
- template <typename StringType>
- inline void ShrinkStringAtAllocationBoundary(String* string, int len);
-
-#ifdef VERIFY_HEAP
- // Verify the active semispace.
- virtual void Verify();
-#endif
-
-#ifdef DEBUG
- // Print the active semispace.
- virtual void Print() { to_space_.Print(); }
-#endif
-
- // Iterates the active semispace to collect statistics.
- void CollectStatistics();
- // Reports previously collected statistics of the active semispace.
- void ReportStatistics();
- // Clears previously collected statistics.
- void ClearHistograms();
-
- // Record the allocation or promotion of a heap object. Note that we don't
- // record every single allocation, but only those that happen in the
- // to space during a scavenge GC.
- void RecordAllocation(HeapObject* obj);
- void RecordPromotion(HeapObject* obj);
-
- // Return whether the operation succeded.
- bool CommitFromSpaceIfNeeded() {
- if (from_space_.is_committed()) return true;
- return from_space_.Commit();
- }
-
- bool UncommitFromSpace() {
- if (!from_space_.is_committed()) return true;
- return from_space_.Uncommit();
- }
-
- inline intptr_t inline_allocation_limit_step() {
- return inline_allocation_limit_step_;
- }
-
- SemiSpace* active_space() { return &to_space_; }
-
- private:
- // Update allocation info to match the current to-space page.
- void UpdateAllocationInfo();
-
- Address chunk_base_;
- uintptr_t chunk_size_;
-
- // The semispaces.
- SemiSpace to_space_;
- SemiSpace from_space_;
- VirtualMemory reservation_;
- int pages_used_;
-
- // Start address and bit mask for containment testing.
- Address start_;
- uintptr_t address_mask_;
- uintptr_t object_mask_;
- uintptr_t object_expected_;
-
- // Allocation pointer and limit for normal allocation and allocation during
- // mark-compact collection.
- AllocationInfo allocation_info_;
-
- // When incremental marking is active we will set allocation_info_.limit
- // to be lower than actual limit and then will gradually increase it
- // in steps to guarantee that we do incremental marking steps even
- // when all allocation is performed from inlined generated code.
- intptr_t inline_allocation_limit_step_;
-
- Address top_on_previous_step_;
-
- HistogramInfo* allocated_histogram_;
- HistogramInfo* promoted_histogram_;
-
- MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
-
- friend class SemiSpaceIterator;
-
- public:
- TRACK_MEMORY("NewSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Old object space (excluding map objects)
-
-class OldSpace : public PagedSpace {
- public:
- // Creates an old space object with a given maximum capacity.
- // The constructor does not allocate pages from OS.
- OldSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- Executability executable)
- : PagedSpace(heap, max_capacity, id, executable) {
- page_extra_ = 0;
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->area_end();
- }
-
- public:
- TRACK_MEMORY("OldSpace")
-};
-
-
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_ASSERT((space).page_low() <= (info).top \
- && (info).top <= (space).page_high() \
- && (info).limit <= (space).page_high())
-
-
-// -----------------------------------------------------------------------------
-// Old space for objects of a fixed size
-
-class FixedSpace : public PagedSpace {
- public:
- FixedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- int object_size_in_bytes)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
- object_size_in_bytes_(object_size_in_bytes) {
- page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->area_end() - page_extra_;
- }
-
- int object_size_in_bytes() { return object_size_in_bytes_; }
-
- // Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
-
- private:
- // The size of objects in this space.
- int object_size_in_bytes_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Old space for all map objects
-
-class MapSpace : public FixedSpace {
- public:
- // Creates a map space object with a maximum capacity.
- MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, Map::kSize),
- max_map_space_pages_(kMaxMapPageIndex - 1) {
- }
-
- // Given an index, returns the page address.
- // TODO(1600): this limit is artifical just to keep code compilable
- static const int kMaxMapPageIndex = 1 << 16;
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(Map::kSize)) {
- return RoundDown(size, Map::kSize);
- } else {
- return (size / Map::kSize) * Map::kSize;
- }
- }
-
- protected:
- virtual void VerifyObject(HeapObject* obj);
-
- private:
- static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
-
- // Do map space compaction if there is a page gap.
- int CompactionThreshold() {
- return kMapsPerPage * (max_map_space_pages_ - 1);
- }
-
- const int max_map_space_pages_;
-
- public:
- TRACK_MEMORY("MapSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Old space for all global object property cell objects
-
-class CellSpace : public FixedSpace {
- public:
- // Creates a property cell space object with a maximum capacity.
- CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize)
- {}
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
- return RoundDown(size, JSGlobalPropertyCell::kSize);
- } else {
- return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
- }
- }
-
- protected:
- virtual void VerifyObject(HeapObject* obj);
-
- public:
- TRACK_MEMORY("CellSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
-// the large object space. A large object is allocated from OS heap with
-// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
-// A large object always starts at Page::kObjectStartOffset to a page.
-// Large objects do not move during garbage collections.
-
-class LargeObjectSpace : public Space {
- public:
- LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
- virtual ~LargeObjectSpace() {}
-
- // Initializes internal data structures.
- bool SetUp();
-
- // Releases internal resources, frees objects in this space.
- void TearDown();
-
- static intptr_t ObjectSizeFor(intptr_t chunk_size) {
- if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
- return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
- }
-
- // Shared implementation of AllocateRaw, AllocateRawCode and
- // AllocateRawFixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
- Executability executable);
-
- // Available bytes for objects in this space.
- inline intptr_t Available();
-
- virtual intptr_t Size() {
- return size_;
- }
-
- virtual intptr_t SizeOfObjects() {
- return objects_size_;
- }
-
- intptr_t CommittedMemory() {
- return Size();
- }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
-
- int PageCount() {
- return page_count_;
- }
-
- // Finds an object for a given address, returns Failure::Exception()
- // if it is not found. The function iterates through all objects in this
- // space, may be slow.
- MaybeObject* FindObject(Address a);
-
- // Finds a large object page containing the given address, returns NULL
- // if such a page doesn't exist.
- LargePage* FindPage(Address a);
-
- // Frees unmarked objects.
- void FreeUnmarkedObjects();
-
- // Checks whether a heap object is in this space; O(1).
- bool Contains(HeapObject* obj);
-
- // Checks whether the space is empty.
- bool IsEmpty() { return first_page_ == NULL; }
-
- // See the comments for ReserveSpace in the Space class. This has to be
- // called after ReserveSpace has been called on the paged spaces, since they
- // may use some memory, leaving less for large objects.
- virtual bool ReserveSpace(int bytes);
-
- LargePage* first_page() { return first_page_; }
-
-#ifdef VERIFY_HEAP
- virtual void Verify();
-#endif
-
-#ifdef DEBUG
- virtual void Print();
- void ReportStatistics();
- void CollectCodeStatistics();
-#endif
- // Checks whether an address is in the object area in this space. It
- // iterates all objects in the space. May be slow.
- bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
-
- private:
- intptr_t max_capacity_;
- // The head of the linked list of large object chunks.
- LargePage* first_page_;
- intptr_t size_; // allocated bytes
- int page_count_; // number of chunks
- intptr_t objects_size_; // size of objects
- // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
- HashMap chunk_map_;
-
- friend class LargeObjectIterator;
-
- public:
- TRACK_MEMORY("LargeObjectSpace")
-};
-
-
-class LargeObjectIterator: public ObjectIterator {
- public:
- explicit LargeObjectIterator(LargeObjectSpace* space);
- LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
-
- HeapObject* Next();
-
- // implementation of ObjectIterator.
- virtual HeapObject* next_object() { return Next(); }
-
- private:
- LargePage* current_;
- HeapObjectCallback size_func_;
-};
-
-
-// Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space.
-class PointerChunkIterator BASE_EMBEDDED {
- public:
- inline explicit PointerChunkIterator(Heap* heap);
-
- // Return NULL when the iterator is done.
- MemoryChunk* next() {
- switch (state_) {
- case kOldPointerState: {
- if (old_pointer_iterator_.has_next()) {
- return old_pointer_iterator_.next();
- }
- state_ = kMapState;
- // Fall through.
- }
- case kMapState: {
- if (map_iterator_.has_next()) {
- return map_iterator_.next();
- }
- state_ = kLargeObjectState;
- // Fall through.
- }
- case kLargeObjectState: {
- HeapObject* heap_object;
- do {
- heap_object = lo_iterator_.Next();
- if (heap_object == NULL) {
- state_ = kFinishedState;
- return NULL;
- }
- // Fixed arrays are the only pointer-containing objects in large
- // object space.
- } while (!heap_object->IsFixedArray());
- MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
- return answer;
- }
- case kFinishedState:
- return NULL;
- default:
- break;
- }
- UNREACHABLE();
- return NULL;
- }
-
-
- private:
- enum State {
- kOldPointerState,
- kMapState,
- kLargeObjectState,
- kFinishedState
- };
- State state_;
- PageIterator old_pointer_iterator_;
- PageIterator map_iterator_;
- LargeObjectIterator lo_iterator_;
-};
-
-
-#ifdef DEBUG
-struct CommentStatistic {
- const char* comment;
- int size;
- int count;
- void Clear() {
- comment = NULL;
- size = 0;
- count = 0;
- }
- // Must be small, since an iteration is used for lookup.
- static const int kMaxComments = 64;
-};
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_SPACES_H_
diff --git a/src/3rdparty/v8/src/splay-tree-inl.h b/src/3rdparty/v8/src/splay-tree-inl.h
deleted file mode 100644
index 4eca71d..0000000
--- a/src/3rdparty/v8/src/splay-tree-inl.h
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SPLAY_TREE_INL_H_
-#define V8_SPLAY_TREE_INL_H_
-
-#include "splay-tree.h"
-
-namespace v8 {
-namespace internal {
-
-
-template<typename Config, class Allocator>
-SplayTree<Config, Allocator>::~SplayTree() {
- NodeDeleter deleter;
- ForEachNode(&deleter);
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Insert(const Key& key,
- Locator* locator) {
- if (is_empty()) {
- // If the tree is empty, insert the new node.
- root_ = new(allocator_) Node(key, Config::NoValue());
- } else {
- // Splay on the key to move the last node on the search path
- // for the key to the root of the tree.
- Splay(key);
- // Ignore repeated insertions with the same key.
- int cmp = Config::Compare(key, root_->key_);
- if (cmp == 0) {
- locator->bind(root_);
- return false;
- }
- // Insert the new node.
- Node* node = new(allocator_) Node(key, Config::NoValue());
- InsertInternal(cmp, node);
- }
- locator->bind(root_);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::InsertInternal(int cmp, Node* node) {
- if (cmp > 0) {
- node->left_ = root_;
- node->right_ = root_->right_;
- root_->right_ = NULL;
- } else {
- node->right_ = root_;
- node->left_ = root_->left_;
- root_->left_ = NULL;
- }
- root_ = node;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindInternal(const Key& key) {
- if (is_empty())
- return false;
- Splay(key);
- return Config::Compare(key, root_->key_) == 0;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
- if (FindInternal(key)) {
- locator->bind(root_);
- return true;
- } else {
- return false;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindGreatestLessThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the greatest node in
- // the left subtree.
- int cmp = Config::Compare(root_->key_, key);
- if (cmp <= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->left_;
- bool result = FindGreatest(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindLeastGreaterThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the least node in
- // the right subtree.
- int cmp = Config::Compare(root_->key_, key);
- if (cmp >= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->right_;
- bool result = FindLeast(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindGreatest(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->right_ != NULL)
- current = current->right_;
- locator->bind(current);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindLeast(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->left_ != NULL)
- current = current->left_;
- locator->bind(current);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Move(const Key& old_key,
- const Key& new_key) {
- if (!FindInternal(old_key))
- return false;
- Node* node_to_move = root_;
- RemoveRootNode(old_key);
- Splay(new_key);
- int cmp = Config::Compare(new_key, root_->key_);
- if (cmp == 0) {
- // A node with the target key already exists.
- delete node_to_move;
- return false;
- }
- node_to_move->key_ = new_key;
- InsertInternal(cmp, node_to_move);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Remove(const Key& key) {
- if (!FindInternal(key))
- return false;
- Node* node_to_remove = root_;
- RemoveRootNode(key);
- delete node_to_remove;
- return true;
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::RemoveRootNode(const Key& key) {
- if (root_->left_ == NULL) {
- // No left child, so the new tree is just the right child.
- root_ = root_->right_;
- } else {
- // Left child exists.
- Node* right = root_->right_;
- // Make the original left child the new root.
- root_ = root_->left_;
- // Splay to make sure that the new root has an empty right child.
- Splay(key);
- // Insert the original right child as the right child of the new
- // root.
- root_->right_ = right;
- }
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::Splay(const Key& key) {
- if (is_empty())
- return;
- Node dummy_node(Config::kNoKey, Config::NoValue());
- // Create a dummy node. The use of the dummy node is a bit
- // counter-intuitive: The right child of the dummy node will hold
- // the L tree of the algorithm. The left child of the dummy node
- // will hold the R tree of the algorithm. Using a dummy node, left
- // and right will always be nodes and we avoid special cases.
- Node* dummy = &dummy_node;
- Node* left = dummy;
- Node* right = dummy;
- Node* current = root_;
- while (true) {
- int cmp = Config::Compare(key, current->key_);
- if (cmp < 0) {
- if (current->left_ == NULL)
- break;
- if (Config::Compare(key, current->left_->key_) < 0) {
- // Rotate right.
- Node* temp = current->left_;
- current->left_ = temp->right_;
- temp->right_ = current;
- current = temp;
- if (current->left_ == NULL)
- break;
- }
- // Link right.
- right->left_ = current;
- right = current;
- current = current->left_;
- } else if (cmp > 0) {
- if (current->right_ == NULL)
- break;
- if (Config::Compare(key, current->right_->key_) > 0) {
- // Rotate left.
- Node* temp = current->right_;
- current->right_ = temp->left_;
- temp->left_ = current;
- current = temp;
- if (current->right_ == NULL)
- break;
- }
- // Link left.
- left->right_ = current;
- left = current;
- current = current->right_;
- } else {
- break;
- }
- }
- // Assemble.
- left->right_ = current->left_;
- right->left_ = current->right_;
- current->left_ = dummy->right_;
- current->right_ = dummy->left_;
- root_ = current;
-}
-
-
-template <typename Config, class Allocator> template <class Callback>
-void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
- NodeToPairAdaptor<Callback> callback_adaptor(callback);
- ForEachNode(&callback_adaptor);
-}
-
-
-template <typename Config, class Allocator> template <class Callback>
-void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
- // Pre-allocate some space for tiny trees.
- List<Node*, Allocator> nodes_to_visit(10, allocator_);
- if (root_ != NULL) nodes_to_visit.Add(root_, allocator_);
- int pos = 0;
- while (pos < nodes_to_visit.length()) {
- Node* node = nodes_to_visit[pos++];
- if (node->left() != NULL) nodes_to_visit.Add(node->left(), allocator_);
- if (node->right() != NULL) nodes_to_visit.Add(node->right(), allocator_);
- callback->Call(node);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_SPLAY_TREE_INL_H_
diff --git a/src/3rdparty/v8/src/splay-tree.h b/src/3rdparty/v8/src/splay-tree.h
deleted file mode 100644
index 8844d8a..0000000
--- a/src/3rdparty/v8/src/splay-tree.h
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SPLAY_TREE_H_
-#define V8_SPLAY_TREE_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-// A splay tree. The config type parameter encapsulates the different
-// configurations of a concrete splay tree:
-//
-// typedef Key: the key type
-// typedef Value: the value type
-// static const kNoKey: the dummy key used when no key is set
-// static const kNoValue: the dummy value used to initialize nodes
-// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
-//
-// The tree is also parameterized by an allocation policy
-// (Allocator). The policy is used for allocating lists in the C free
-// store or the zone; see zone.h.
-
-// Forward defined as
-// template <typename Config, class Allocator = FreeStoreAllocationPolicy>
-// class SplayTree;
-template <typename Config, class AllocationPolicy>
-class SplayTree {
- public:
- typedef typename Config::Key Key;
- typedef typename Config::Value Value;
-
- class Locator;
-
- SplayTree(AllocationPolicy allocator = AllocationPolicy())
- : root_(NULL), allocator_(allocator) { }
- ~SplayTree();
-
- INLINE(void* operator new(size_t size,
- AllocationPolicy allocator = AllocationPolicy())) {
- return allocator.New(static_cast<int>(size));
- }
- INLINE(void operator delete(void* p)) {
- AllocationPolicy::Delete(p);
- }
- // Please the MSVC compiler. We should never have to execute this.
- INLINE(void operator delete(void* p, AllocationPolicy policy)) {
- UNREACHABLE();
- }
-
- // Inserts the given key in this tree with the given value. Returns
- // true if a node was inserted, otherwise false. If found the locator
- // is enabled and provides access to the mapping for the key.
- bool Insert(const Key& key, Locator* locator);
-
- // Looks up the key in this tree and returns true if it was found,
- // otherwise false. If the node is found the locator is enabled and
- // provides access to the mapping for the key.
- bool Find(const Key& key, Locator* locator);
-
- // Finds the mapping with the greatest key less than or equal to the
- // given key.
- bool FindGreatestLessThan(const Key& key, Locator* locator);
-
- // Find the mapping with the greatest key in this tree.
- bool FindGreatest(Locator* locator);
-
- // Finds the mapping with the least key greater than or equal to the
- // given key.
- bool FindLeastGreaterThan(const Key& key, Locator* locator);
-
- // Find the mapping with the least key in this tree.
- bool FindLeast(Locator* locator);
-
- // Move the node from one key to another.
- bool Move(const Key& old_key, const Key& new_key);
-
- // Remove the node with the given key from the tree.
- bool Remove(const Key& key);
-
- bool is_empty() { return root_ == NULL; }
-
- // Perform the splay operation for the given key. Moves the node with
- // the given key to the top of the tree. If no node has the given
- // key, the last node on the search path is moved to the top of the
- // tree.
- void Splay(const Key& key);
-
- class Node {
- public:
- Node(const Key& key, const Value& value)
- : key_(key),
- value_(value),
- left_(NULL),
- right_(NULL) { }
-
- INLINE(void* operator new(size_t size, AllocationPolicy allocator)) {
- return allocator.New(static_cast<int>(size));
- }
- INLINE(void operator delete(void* p)) {
- return AllocationPolicy::Delete(p);
- }
- // Please the MSVC compiler. We should never have to execute
- // this.
- INLINE(void operator delete(void* p, AllocationPolicy allocator)) {
- UNREACHABLE();
- }
-
- Key key() { return key_; }
- Value value() { return value_; }
- Node* left() { return left_; }
- Node* right() { return right_; }
-
- private:
- friend class SplayTree;
- friend class Locator;
- Key key_;
- Value value_;
- Node* left_;
- Node* right_;
- };
-
- // A locator provides access to a node in the tree without actually
- // exposing the node.
- class Locator BASE_EMBEDDED {
- public:
- explicit Locator(Node* node) : node_(node) { }
- Locator() : node_(NULL) { }
- const Key& key() { return node_->key_; }
- Value& value() { return node_->value_; }
- void set_value(const Value& value) { node_->value_ = value; }
- inline void bind(Node* node) { node_ = node; }
-
- private:
- Node* node_;
- };
-
- template <class Callback>
- void ForEach(Callback* callback);
-
- protected:
- // Resets tree root. Existing nodes become unreachable.
- void ResetRoot() { root_ = NULL; }
-
- private:
- // Search for a node with a given key. If found, root_ points
- // to the node.
- bool FindInternal(const Key& key);
-
- // Inserts a node assuming that root_ is already set up.
- void InsertInternal(int cmp, Node* node);
-
- // Removes root_ node.
- void RemoveRootNode(const Key& key);
-
- template<class Callback>
- class NodeToPairAdaptor BASE_EMBEDDED {
- public:
- explicit NodeToPairAdaptor(Callback* callback)
- : callback_(callback) { }
- void Call(Node* node) {
- callback_->Call(node->key(), node->value());
- }
-
- private:
- Callback* callback_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeToPairAdaptor);
- };
-
- class NodeDeleter BASE_EMBEDDED {
- public:
- NodeDeleter() { }
- void Call(Node* node) { AllocationPolicy::Delete(node); }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(NodeDeleter);
- };
-
- template <class Callback>
- void ForEachNode(Callback* callback);
-
- Node* root_;
- AllocationPolicy allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(SplayTree);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_SPLAY_TREE_H_
diff --git a/src/3rdparty/v8/src/store-buffer-inl.h b/src/3rdparty/v8/src/store-buffer-inl.h
deleted file mode 100644
index dd65cbc..0000000
--- a/src/3rdparty/v8/src/store-buffer-inl.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STORE_BUFFER_INL_H_
-#define V8_STORE_BUFFER_INL_H_
-
-#include "store-buffer.h"
-
-namespace v8 {
-namespace internal {
-
-Address StoreBuffer::TopAddress() {
- return reinterpret_cast<Address>(heap_->store_buffer_top_address());
-}
-
-
-void StoreBuffer::Mark(Address addr) {
- ASSERT(!heap_->cell_space()->Contains(addr));
- ASSERT(!heap_->code_space()->Contains(addr));
- Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
- *top++ = addr;
- heap_->public_set_store_buffer_top(top);
- if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
- ASSERT(top == limit_);
- Compact();
- } else {
- ASSERT(top < limit_);
- }
-}
-
-
-void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
- if (store_buffer_rebuilding_enabled_) {
- SLOW_ASSERT(!heap_->cell_space()->Contains(addr) &&
- !heap_->code_space()->Contains(addr) &&
- !heap_->old_data_space()->Contains(addr) &&
- !heap_->new_space()->Contains(addr));
- Address* top = old_top_;
- *top++ = addr;
- old_top_ = top;
- old_buffer_is_sorted_ = false;
- old_buffer_is_filtered_ = false;
- if (top >= old_limit_) {
- ASSERT(callback_ != NULL);
- (*callback_)(heap_,
- MemoryChunk::FromAnyPointerAddress(addr),
- kStoreBufferFullEvent);
- }
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_STORE_BUFFER_INL_H_
diff --git a/src/3rdparty/v8/src/store-buffer.cc b/src/3rdparty/v8/src/store-buffer.cc
deleted file mode 100644
index 8a69164..0000000
--- a/src/3rdparty/v8/src/store-buffer.cc
+++ /dev/null
@@ -1,726 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "store-buffer.h"
-#include "store-buffer-inl.h"
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap),
- start_(NULL),
- limit_(NULL),
- old_start_(NULL),
- old_limit_(NULL),
- old_top_(NULL),
- old_reserved_limit_(NULL),
- old_buffer_is_sorted_(false),
- old_buffer_is_filtered_(false),
- during_gc_(false),
- store_buffer_rebuilding_enabled_(false),
- callback_(NULL),
- may_move_store_buffer_entries_(true),
- virtual_memory_(NULL),
- hash_set_1_(NULL),
- hash_set_2_(NULL),
- hash_sets_are_empty_(true) {
-}
-
-
-void StoreBuffer::SetUp() {
- virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
- uintptr_t start_as_int =
- reinterpret_cast<uintptr_t>(virtual_memory_->address());
- start_ =
- reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
- limit_ = start_ + (kStoreBufferSize / kPointerSize);
-
- old_virtual_memory_ =
- new VirtualMemory(kOldStoreBufferLength * kPointerSize);
- old_top_ = old_start_ =
- reinterpret_cast<Address*>(old_virtual_memory_->address());
- // Don't know the alignment requirements of the OS, but it is certainly not
- // less than 0xfff.
- ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
- int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
- ASSERT(initial_length > 0);
- ASSERT(initial_length <= kOldStoreBufferLength);
- old_limit_ = old_start_ + initial_length;
- old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
-
- CHECK(old_virtual_memory_->Commit(
- reinterpret_cast<void*>(old_start_),
- (old_limit_ - old_start_) * kPointerSize,
- false));
-
- ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
- ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
- Address* vm_limit = reinterpret_cast<Address*>(
- reinterpret_cast<char*>(virtual_memory_->address()) +
- virtual_memory_->size());
- ASSERT(start_ <= vm_limit);
- ASSERT(limit_ <= vm_limit);
- USE(vm_limit);
- ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
- ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
- 0);
-
- CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
- kStoreBufferSize,
- false)); // Not executable.
- heap_->public_set_store_buffer_top(start_);
-
- hash_set_1_ = new uintptr_t[kHashSetLength];
- hash_set_2_ = new uintptr_t[kHashSetLength];
- hash_sets_are_empty_ = false;
-
- ClearFilteringHashSets();
-}
-
-
-void StoreBuffer::TearDown() {
- delete virtual_memory_;
- delete old_virtual_memory_;
- delete[] hash_set_1_;
- delete[] hash_set_2_;
- old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
- start_ = limit_ = NULL;
- heap_->public_set_store_buffer_top(start_);
-}
-
-
-void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
- isolate->heap()->store_buffer()->Compact();
-}
-
-
-#if V8_TARGET_ARCH_X64
-static int CompareAddresses(const void* void_a, const void* void_b) {
- intptr_t a =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
- intptr_t b =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
- // Unfortunately if int is smaller than intptr_t there is no branch-free
- // way to return a number with the same sign as the difference between the
- // pointers.
- if (a == b) return 0;
- if (a < b) return -1;
- ASSERT(a > b);
- return 1;
-}
-#else
-static int CompareAddresses(const void* void_a, const void* void_b) {
- intptr_t a =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
- intptr_t b =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
- ASSERT(sizeof(1) == sizeof(a));
- // Shift down to avoid wraparound.
- return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
-}
-#endif
-
-
-void StoreBuffer::Uniq() {
- // Remove adjacent duplicates and cells that do not point at new space.
- Address previous = NULL;
- Address* write = old_start_;
- ASSERT(may_move_store_buffer_entries_);
- for (Address* read = old_start_; read < old_top_; read++) {
- Address current = *read;
- if (current != previous) {
- if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
- *write++ = current;
- }
- }
- previous = current;
- }
- old_top_ = write;
-}
-
-
-void StoreBuffer::EnsureSpace(intptr_t space_needed) {
- while (old_limit_ - old_top_ < space_needed &&
- old_limit_ < old_reserved_limit_) {
- size_t grow = old_limit_ - old_start_; // Double size.
- CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
- grow * kPointerSize,
- false));
- old_limit_ += grow;
- }
-
- if (old_limit_ - old_top_ >= space_needed) return;
-
- if (old_buffer_is_filtered_) return;
- ASSERT(may_move_store_buffer_entries_);
- Compact();
-
- old_buffer_is_filtered_ = true;
- bool page_has_scan_on_scavenge_flag = false;
-
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
- }
-
- if (page_has_scan_on_scavenge_flag) {
- Filter(MemoryChunk::SCAN_ON_SCAVENGE);
- }
-
- // If filtering out the entries from scan_on_scavenge pages got us down to
- // less than half full, then we are satisfied with that.
- if (old_limit_ - old_top_ > old_top_ - old_start_) return;
-
- // Sample 1 entry in 97 and filter out the pages where we estimate that more
- // than 1 in 8 pointers are to new space.
- static const int kSampleFinenesses = 5;
- static const struct Samples {
- int prime_sample_step;
- int threshold;
- } samples[kSampleFinenesses] = {
- { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
- { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
- { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
- { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
- { 1, 0}
- };
- for (int i = kSampleFinenesses - 1; i >= 0; i--) {
- ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
- // As a last resort we mark all pages as being exempt from the store buffer.
- ASSERT(i != 0 || old_top_ == old_start_);
- if (old_limit_ - old_top_ > old_top_ - old_start_) return;
- }
- UNREACHABLE();
-}
-
-
-// Sample the store buffer to see if some pages are taking up a lot of space
-// in the store buffer.
-void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != NULL) {
- chunk->set_store_buffer_counter(0);
- }
- bool created_new_scan_on_scavenge_pages = false;
- MemoryChunk* previous_chunk = NULL;
- for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
- Address addr = *p;
- MemoryChunk* containing_chunk = NULL;
- if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
- containing_chunk = previous_chunk;
- } else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
- }
- int old_counter = containing_chunk->store_buffer_counter();
- if (old_counter == threshold) {
- containing_chunk->set_scan_on_scavenge(true);
- created_new_scan_on_scavenge_pages = true;
- }
- containing_chunk->set_store_buffer_counter(old_counter + 1);
- previous_chunk = containing_chunk;
- }
- if (created_new_scan_on_scavenge_pages) {
- Filter(MemoryChunk::SCAN_ON_SCAVENGE);
- }
- old_buffer_is_filtered_ = true;
-}
-
-
-void StoreBuffer::Filter(int flag) {
- Address* new_top = old_start_;
- MemoryChunk* previous_chunk = NULL;
- for (Address* p = old_start_; p < old_top_; p++) {
- Address addr = *p;
- MemoryChunk* containing_chunk = NULL;
- if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
- containing_chunk = previous_chunk;
- } else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
- previous_chunk = containing_chunk;
- }
- if (!containing_chunk->IsFlagSet(flag)) {
- *new_top++ = addr;
- }
- }
- old_top_ = new_top;
-
- // Filtering hash sets are inconsistent with the store buffer after this
- // operation.
- ClearFilteringHashSets();
-}
-
-
-void StoreBuffer::SortUniq() {
- Compact();
- if (old_buffer_is_sorted_) return;
- qsort(reinterpret_cast<void*>(old_start_),
- old_top_ - old_start_,
- sizeof(*old_top_),
- &CompareAddresses);
- Uniq();
-
- old_buffer_is_sorted_ = true;
-
- // Filtering hash sets are inconsistent with the store buffer after this
- // operation.
- ClearFilteringHashSets();
-}
-
-
-bool StoreBuffer::PrepareForIteration() {
- Compact();
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- bool page_has_scan_on_scavenge_flag = false;
- while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
- }
-
- if (page_has_scan_on_scavenge_flag) {
- Filter(MemoryChunk::SCAN_ON_SCAVENGE);
- }
-
- // Filtering hash sets are inconsistent with the store buffer after
- // iteration.
- ClearFilteringHashSets();
-
- return page_has_scan_on_scavenge_flag;
-}
-
-
-#ifdef DEBUG
-void StoreBuffer::Clean() {
- ClearFilteringHashSets();
- Uniq(); // Also removes things that no longer point to new space.
- CheckForFullBuffer();
-}
-
-
-static Address* in_store_buffer_1_element_cache = NULL;
-
-
-bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
- if (!FLAG_enable_slow_asserts) return true;
- if (in_store_buffer_1_element_cache != NULL &&
- *in_store_buffer_1_element_cache == cell_address) {
- return true;
- }
- Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
- for (Address* current = top - 1; current >= start_; current--) {
- if (*current == cell_address) {
- in_store_buffer_1_element_cache = current;
- return true;
- }
- }
- for (Address* current = old_top_ - 1; current >= old_start_; current--) {
- if (*current == cell_address) {
- in_store_buffer_1_element_cache = current;
- return true;
- }
- }
- return false;
-}
-#endif
-
-
-void StoreBuffer::ClearFilteringHashSets() {
- if (!hash_sets_are_empty_) {
- memset(reinterpret_cast<void*>(hash_set_1_),
- 0,
- sizeof(uintptr_t) * kHashSetLength);
- memset(reinterpret_cast<void*>(hash_set_2_),
- 0,
- sizeof(uintptr_t) * kHashSetLength);
- hash_sets_are_empty_ = true;
- }
-}
-
-
-void StoreBuffer::GCPrologue() {
- ClearFilteringHashSets();
- during_gc_ = true;
-}
-
-
-#ifdef VERIFY_HEAP
-static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
- // Do nothing.
-}
-
-
-void StoreBuffer::VerifyPointers(PagedSpace* space,
- RegionCallback region_callback) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* page = it.next();
- FindPointersToNewSpaceOnPage(
- reinterpret_cast<PagedSpace*>(page->owner()),
- page,
- region_callback,
- &DummyScavengePointer);
- }
-}
-
-
-void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- if (object->IsFixedArray()) {
- Address slot_address = object->address();
- Address end = object->address() + object->Size();
-
- while (slot_address < end) {
- HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
- // When we are not in GC the Heap::InNewSpace() predicate
- // checks that pointers which satisfy predicate point into
- // the active semispace.
- heap_->InNewSpace(*slot);
- slot_address += kPointerSize;
- }
- }
- }
-}
-#endif
-
-
-void StoreBuffer::Verify() {
-#ifdef VERIFY_HEAP
- VerifyPointers(heap_->old_pointer_space(),
- &StoreBuffer::FindPointersToNewSpaceInRegion);
- VerifyPointers(heap_->map_space(),
- &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
- VerifyPointers(heap_->lo_space());
-#endif
-}
-
-
-void StoreBuffer::GCEpilogue() {
- during_gc_ = false;
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
-#endif
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInRegion(
- Address start, Address end, ObjectSlotCallback slot_callback) {
- for (Address slot_address = start;
- slot_address < end;
- slot_address += kPointerSize) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- if (heap_->InNewSpace(*slot)) {
- HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
- ASSERT(object->IsHeapObject());
- slot_callback(reinterpret_cast<HeapObject**>(slot), object);
- if (heap_->InNewSpace(*slot)) {
- EnterDirectlyIntoStoreBuffer(slot_address);
- }
- }
- }
-}
-
-
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
- Address page = Page::FromAddress(addr)->area_start();
- return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
-
-
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
- Address page = Page::FromAllocationTop(addr)->area_start();
- return page + ((addr - page) / Map::kSize * Map::kSize);
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInMaps(
- Address start,
- Address end,
- ObjectSlotCallback slot_callback) {
- ASSERT(MapStartAlign(start) == start);
- ASSERT(MapEndAlign(end) == end);
-
- Address map_address = start;
- while (map_address < end) {
- ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
- ASSERT(Memory::Object_at(map_address)->IsMap());
-
- Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
- Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
-
- FindPointersToNewSpaceInRegion(pointer_fields_start,
- pointer_fields_end,
- slot_callback);
- map_address += Map::kSize;
- }
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
- Address start,
- Address end,
- ObjectSlotCallback slot_callback) {
- Address map_aligned_start = MapStartAlign(start);
- Address map_aligned_end = MapEndAlign(end);
-
- ASSERT(map_aligned_start == start);
- ASSERT(map_aligned_end == end);
-
- FindPointersToNewSpaceInMaps(map_aligned_start,
- map_aligned_end,
- slot_callback);
-}
-
-
-// This function iterates over all the pointers in a paged space in the heap,
-// looking for pointers into new space. Within the pages there may be dead
-// objects that have not been overwritten by free spaces or fillers because of
-// lazy sweeping. These dead objects may not contain pointers to new space.
-// The garbage areas that have been swept properly (these will normally be the
-// large ones) will be marked with free space and filler map words. In
-// addition any area that has never been used at all for object allocation must
-// be marked with a free space or filler. Because the free space and filler
-// maps do not move we can always recognize these even after a compaction.
-// Normal objects like FixedArrays and JSObjects should not contain references
-// to these maps. The special garbage section (see comment in spaces.h) is
-// skipped since it can contain absolutely anything. Any objects that are
-// allocated during iteration may or may not be visited by the iteration, but
-// they will not be partially visited.
-void StoreBuffer::FindPointersToNewSpaceOnPage(
- PagedSpace* space,
- Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback) {
- Address visitable_start = page->area_start();
- Address end_of_page = page->area_end();
-
- Address visitable_end = visitable_start;
-
- Object* free_space_map = heap_->free_space_map();
- Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
-
- while (visitable_end < end_of_page) {
- Object* o = *reinterpret_cast<Object**>(visitable_end);
- // Skip fillers but not things that look like fillers in the special
- // garbage section which can contain anything.
- if (o == free_space_map ||
- o == two_pointer_filler_map ||
- (visitable_end == space->top() && visitable_end != space->limit())) {
- if (visitable_start != visitable_end) {
- // After calling this the special garbage section may have moved.
- (this->*region_callback)(visitable_start,
- visitable_end,
- slot_callback);
- if (visitable_end >= space->top() && visitable_end < space->limit()) {
- visitable_end = space->limit();
- visitable_start = visitable_end;
- continue;
- }
- }
- if (visitable_end == space->top() && visitable_end != space->limit()) {
- visitable_start = visitable_end = space->limit();
- } else {
- // At this point we are either at the start of a filler or we are at
- // the point where the space->top() used to be before the
- // visit_pointer_region call above. Either way we can skip the
- // object at the current spot: We don't promise to visit objects
- // allocated during heap traversal, and if space->top() moved then it
- // must be because an object was allocated at this point.
- visitable_start =
- visitable_end + HeapObject::FromAddress(visitable_end)->Size();
- visitable_end = visitable_start;
- }
- } else {
- ASSERT(o != free_space_map);
- ASSERT(o != two_pointer_filler_map);
- ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
- visitable_end += kPointerSize;
- }
- }
- ASSERT(visitable_end == end_of_page);
- if (visitable_start != visitable_end) {
- (this->*region_callback)(visitable_start,
- visitable_end,
- slot_callback);
- }
-}
-
-
-void StoreBuffer::IteratePointersInStoreBuffer(
- ObjectSlotCallback slot_callback) {
- Address* limit = old_top_;
- old_top_ = old_start_;
- {
- DontMoveStoreBufferEntriesScope scope(this);
- for (Address* current = old_start_; current < limit; current++) {
-#ifdef DEBUG
- Address* saved_top = old_top_;
-#endif
- Object** slot = reinterpret_cast<Object**>(*current);
- Object* object = *slot;
- if (heap_->InFromSpace(object)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
- if (heap_->InNewSpace(*slot)) {
- EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
- }
- }
- ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
- }
- }
-}
-
-
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
- // We do not sort or remove duplicated entries from the store buffer because
- // we expect that callback will rebuild the store buffer thus removing
- // all duplicates and pointers to old space.
- bool some_pages_to_scan = PrepareForIteration();
-
- // TODO(gc): we want to skip slots on evacuation candidates
- // but we can't simply figure that out from slot address
- // because slot can belong to a large object.
- IteratePointersInStoreBuffer(slot_callback);
-
- // We are done scanning all the pointers that were in the store buffer, but
- // there may be some pages marked scan_on_scavenge that have pointers to new
- // space that are not in the store buffer. We must scan them now. As we
- // scan, the surviving pointers to new space will be added to the store
- // buffer. If there are still a lot of pointers to new space then we will
- // keep the scan_on_scavenge flag on the page and discard the pointers that
- // were added to the store buffer. If there are not many pointers to new
- // space left on the page we will keep the pointers in the store buffer and
- // remove the flag from the page.
- if (some_pages_to_scan) {
- if (callback_ != NULL) {
- (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
- }
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) {
- chunk->set_scan_on_scavenge(false);
- if (callback_ != NULL) {
- (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
- }
- if (chunk->owner() == heap_->lo_space()) {
- LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
- HeapObject* array = large_page->GetObject();
- ASSERT(array->IsFixedArray());
- Address start = array->address();
- Address end = start + array->Size();
- FindPointersToNewSpaceInRegion(start, end, slot_callback);
- } else {
- Page* page = reinterpret_cast<Page*>(chunk);
- PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
- FindPointersToNewSpaceOnPage(
- owner,
- page,
- (owner == heap_->map_space() ?
- &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
- &StoreBuffer::FindPointersToNewSpaceInRegion),
- slot_callback);
- }
- }
- }
- if (callback_ != NULL) {
- (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
- }
- }
-}
-
-
-void StoreBuffer::Compact() {
- Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-
- if (top == start_) return;
-
- // There's no check of the limit in the loop below so we check here for
- // the worst case (compaction doesn't eliminate any pointers).
- ASSERT(top <= limit_);
- heap_->public_set_store_buffer_top(start_);
- EnsureSpace(top - start_);
- ASSERT(may_move_store_buffer_entries_);
- // Goes through the addresses in the store buffer attempting to remove
- // duplicates. In the interest of speed this is a lossy operation. Some
- // duplicates will remain. We have two hash sets with different hash
- // functions to reduce the number of unnecessary clashes.
- hash_sets_are_empty_ = false; // Hash sets are in use.
- for (Address* current = start_; current < top; current++) {
- ASSERT(!heap_->cell_space()->Contains(*current));
- ASSERT(!heap_->code_space()->Contains(*current));
- ASSERT(!heap_->old_data_space()->Contains(*current));
- uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
- // Shift out the last bits including any tags.
- int_addr >>= kPointerSizeLog2;
- // The upper part of an address is basically random because of ASLR and OS
- // non-determinism, so we use only the bits within a page for hashing to
- // make v8's behavior (more) deterministic.
- uintptr_t hash_addr =
- int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
- int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
- (kHashSetLength - 1));
- if (hash_set_1_[hash1] == int_addr) continue;
- uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
- hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
- hash2 &= (kHashSetLength - 1);
- if (hash_set_2_[hash2] == int_addr) continue;
- if (hash_set_1_[hash1] == 0) {
- hash_set_1_[hash1] = int_addr;
- } else if (hash_set_2_[hash2] == 0) {
- hash_set_2_[hash2] = int_addr;
- } else {
- // Rather than slowing down we just throw away some entries. This will
- // cause some duplicates to remain undetected.
- hash_set_1_[hash1] = int_addr;
- hash_set_2_[hash2] = 0;
- }
- old_buffer_is_sorted_ = false;
- old_buffer_is_filtered_ = false;
- *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
- ASSERT(old_top_ <= old_limit_);
- }
- heap_->isolate()->counters()->store_buffer_compactions()->Increment();
- CheckForFullBuffer();
-}
-
-
-void StoreBuffer::CheckForFullBuffer() {
- EnsureSpace(kStoreBufferSize * 2);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/store-buffer.h b/src/3rdparty/v8/src/store-buffer.h
deleted file mode 100644
index 79046d1..0000000
--- a/src/3rdparty/v8/src/store-buffer.h
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STORE_BUFFER_H_
-#define V8_STORE_BUFFER_H_
-
-#include "allocation.h"
-#include "checks.h"
-#include "globals.h"
-#include "platform.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-class StoreBuffer;
-
-typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-
-typedef void (StoreBuffer::*RegionCallback)(
- Address start, Address end, ObjectSlotCallback slot_callback);
-
-// Used to implement the write barrier by collecting addresses of pointers
-// between spaces.
-class StoreBuffer {
- public:
- explicit StoreBuffer(Heap* heap);
-
- static void StoreBufferOverflow(Isolate* isolate);
-
- inline Address TopAddress();
-
- void SetUp();
- void TearDown();
-
- // This is used by the mutator to enter addresses into the store buffer.
- inline void Mark(Address addr);
-
- // This is used by the heap traversal to enter the addresses into the store
- // buffer that should still be in the store buffer after GC. It enters
- // addresses directly into the old buffer because the GC starts by wiping the
- // old buffer and thereafter only visits each cell once so there is no need
- // to attempt to remove any dupes. During the first part of a GC we
- // are using the store buffer to access the old spaces and at the same time
- // we are rebuilding the store buffer using this function. There is, however
- // no issue of overwriting the buffer we are iterating over, because this
- // stage of the scavenge can only reduce the number of addresses in the store
- // buffer (some objects are promoted so pointers to them do not need to be in
- // the store buffer). The later parts of the GC scan the pages that are
- // exempt from the store buffer and process the promotion queue. These steps
- // can overflow this buffer. We check for this and on overflow we call the
- // callback set up with the StoreBufferRebuildScope object.
- inline void EnterDirectlyIntoStoreBuffer(Address addr);
-
- // Iterates over all pointers that go from old space to new space. It will
- // delete the store buffer as it starts so the callback should reenter
- // surviving old-to-new pointers into the store buffer to rebuild it.
- void IteratePointersToNewSpace(ObjectSlotCallback callback);
-
- static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
- static const int kStoreBufferSize = kStoreBufferOverflowBit;
- static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
- static const int kOldStoreBufferLength = kStoreBufferLength * 16;
- static const int kHashSetLengthLog2 = 12;
- static const int kHashSetLength = 1 << kHashSetLengthLog2;
-
- void Compact();
-
- void GCPrologue();
- void GCEpilogue();
-
- Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
- Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
- Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
- void SetTop(Object*** top) {
- ASSERT(top >= Start());
- ASSERT(top <= Limit());
- old_top_ = reinterpret_cast<Address*>(top);
- }
-
- bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
- bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
-
- // Goes through the store buffer removing pointers to things that have
- // been promoted. Rebuilds the store buffer completely if it overflowed.
- void SortUniq();
-
- void EnsureSpace(intptr_t space_needed);
- void Verify();
-
- bool PrepareForIteration();
-
-#ifdef DEBUG
- void Clean();
- // Slow, for asserts only.
- bool CellIsInStoreBuffer(Address cell);
-#endif
-
- void Filter(int flag);
-
- private:
- Heap* heap_;
-
- // The store buffer is divided up into a new buffer that is constantly being
- // filled by mutator activity and an old buffer that is filled with the data
- // from the new buffer after compression.
- Address* start_;
- Address* limit_;
-
- Address* old_start_;
- Address* old_limit_;
- Address* old_top_;
- Address* old_reserved_limit_;
- VirtualMemory* old_virtual_memory_;
-
- bool old_buffer_is_sorted_;
- bool old_buffer_is_filtered_;
- bool during_gc_;
- // The garbage collector iterates over many pointers to new space that are not
- // handled by the store buffer. This flag indicates whether the pointers
- // found by the callbacks should be added to the store buffer or not.
- bool store_buffer_rebuilding_enabled_;
- StoreBufferCallback callback_;
- bool may_move_store_buffer_entries_;
-
- VirtualMemory* virtual_memory_;
-
- // Two hash sets used for filtering.
- // If address is in the hash set then it is guaranteed to be in the
- // old part of the store buffer.
- uintptr_t* hash_set_1_;
- uintptr_t* hash_set_2_;
- bool hash_sets_are_empty_;
-
- void ClearFilteringHashSets();
-
- void CheckForFullBuffer();
- void Uniq();
- void ExemptPopularPages(int prime_sample_step, int threshold);
-
- void FindPointersToNewSpaceInRegion(Address start,
- Address end,
- ObjectSlotCallback slot_callback);
-
- // For each region of pointers on a page in use from an old space call
- // visit_pointer_region callback.
- // If either visit_pointer_region or callback can cause an allocation
- // in old space and changes in allocation watermark then
- // can_preallocate_during_iteration should be set to true.
- void IteratePointersOnPage(
- PagedSpace* space,
- Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback);
-
- void FindPointersToNewSpaceInMaps(
- Address start,
- Address end,
- ObjectSlotCallback slot_callback);
-
- void FindPointersToNewSpaceInMapsRegion(
- Address start,
- Address end,
- ObjectSlotCallback slot_callback);
-
- void FindPointersToNewSpaceOnPage(
- PagedSpace* space,
- Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback);
-
- void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
-
-#ifdef VERIFY_HEAP
- void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
- void VerifyPointers(LargeObjectSpace* space);
-#endif
-
- friend class StoreBufferRebuildScope;
- friend class DontMoveStoreBufferEntriesScope;
-};
-
-
-class StoreBufferRebuildScope {
- public:
- explicit StoreBufferRebuildScope(Heap* heap,
- StoreBuffer* store_buffer,
- StoreBufferCallback callback)
- : store_buffer_(store_buffer),
- stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
- stored_callback_(store_buffer->callback_) {
- store_buffer_->store_buffer_rebuilding_enabled_ = true;
- store_buffer_->callback_ = callback;
- (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
- }
-
- ~StoreBufferRebuildScope() {
- store_buffer_->callback_ = stored_callback_;
- store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
- store_buffer_->CheckForFullBuffer();
- }
-
- private:
- StoreBuffer* store_buffer_;
- bool stored_state_;
- StoreBufferCallback stored_callback_;
-};
-
-
-class DontMoveStoreBufferEntriesScope {
- public:
- explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer),
- stored_state_(store_buffer->may_move_store_buffer_entries_) {
- store_buffer_->may_move_store_buffer_entries_ = false;
- }
-
- ~DontMoveStoreBufferEntriesScope() {
- store_buffer_->may_move_store_buffer_entries_ = stored_state_;
- }
-
- private:
- StoreBuffer* store_buffer_;
- bool stored_state_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_STORE_BUFFER_H_
diff --git a/src/3rdparty/v8/src/string-search.cc b/src/3rdparty/v8/src/string-search.cc
deleted file mode 100644
index 3ae68b5..0000000
--- a/src/3rdparty/v8/src/string-search.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "string-search.h"
-
-namespace v8 {
-namespace internal {
-
-// Storage for constants used by string-search.
-
-// Now in Isolate:
-// bad_char_shift_table()
-// good_suffix_shift_table()
-// suffix_table()
-
-}} // namespace v8::internal
diff --git a/src/3rdparty/v8/src/string-search.h b/src/3rdparty/v8/src/string-search.h
deleted file mode 100644
index 86237f3..0000000
--- a/src/3rdparty/v8/src/string-search.h
+++ /dev/null
@@ -1,588 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STRING_SEARCH_H_
-#define V8_STRING_SEARCH_H_
-
-namespace v8 {
-namespace internal {
-
-
-//---------------------------------------------------------------------
-// String Search object.
-//---------------------------------------------------------------------
-
-// Class holding constants and methods that apply to all string search variants,
-// independently of subject and pattern char size.
-class StringSearchBase {
- protected:
- // Cap on the maximal shift in the Boyer-Moore implementation. By setting a
- // limit, we can fix the size of tables. For a needle longer than this limit,
- // search will not be optimal, since we only build tables for a suffix
- // of the string, but it is a safe approximation.
- static const int kBMMaxShift = Isolate::kBMMaxShift;
-
- // Reduce alphabet to this size.
- // One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
- // proportional to the input alphabet. We reduce the alphabet size by
- // equating input characters modulo a smaller alphabet size. This gives
- // a potentially less efficient searching, but is a safe approximation.
- // For needles using only characters in the same Unicode 256-code point page,
- // there is no search speed degradation.
-#ifndef ENABLE_LATIN_1
- static const int kAsciiAlphabetSize = 128;
-#else
- static const int kAsciiAlphabetSize = 256;
-#endif
- static const int kUC16AlphabetSize = Isolate::kUC16AlphabetSize;
-
- // Bad-char shift table stored in the state. It's length is the alphabet size.
- // For patterns below this length, the skip length of Boyer-Moore is too short
- // to compensate for the algorithmic overhead compared to simple brute force.
- static const int kBMMinPatternLength = 7;
-
- static inline bool IsOneByteString(Vector<const uint8_t> string) {
- return true;
- }
-
- static inline bool IsOneByteString(Vector<const uc16> string) {
- return String::IsOneByte(string.start(), string.length());
- }
-
- friend class Isolate;
-};
-
-
-template <typename PatternChar, typename SubjectChar>
-class StringSearch : private StringSearchBase {
- public:
- StringSearch(Isolate* isolate, Vector<const PatternChar> pattern)
- : isolate_(isolate),
- pattern_(pattern),
- start_(Max(0, pattern.length() - kBMMaxShift)) {
- if (sizeof(PatternChar) > sizeof(SubjectChar)) {
- if (!IsOneByteString(pattern_)) {
- strategy_ = &FailSearch;
- return;
- }
- }
- int pattern_length = pattern_.length();
- if (pattern_length < kBMMinPatternLength) {
- if (pattern_length == 1) {
- strategy_ = &SingleCharSearch;
- return;
- }
- strategy_ = &LinearSearch;
- return;
- }
- strategy_ = &InitialSearch;
- }
-
- int Search(Vector<const SubjectChar> subject, int index) {
- return strategy_(this, subject, index);
- }
-
- static inline int AlphabetSize() {
- if (sizeof(PatternChar) == 1) {
- // ASCII needle.
- return kAsciiAlphabetSize;
- } else {
- ASSERT(sizeof(PatternChar) == 2);
- // UC16 needle.
- return kUC16AlphabetSize;
- }
- }
-
- private:
- typedef int (*SearchFunction)( // NOLINT - it's not a cast!
- StringSearch<PatternChar, SubjectChar>*,
- Vector<const SubjectChar>,
- int);
-
- static int FailSearch(StringSearch<PatternChar, SubjectChar>*,
- Vector<const SubjectChar>,
- int) {
- return -1;
- }
-
- static int SingleCharSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- static int LinearSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- static int InitialSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- static int BoyerMooreHorspoolSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- static int BoyerMooreSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
-
- void PopulateBoyerMooreHorspoolTable();
-
- void PopulateBoyerMooreTable();
-
- static inline bool exceedsOneByte(uint8_t c) {
-#ifdef ENABLE_LATIN_1
- return false;
-#else
- return c > String::kMaxOneByteCharCodeU;
-#endif
- }
-
- static inline bool exceedsOneByte(uint16_t c) {
- return c > String::kMaxOneByteCharCodeU;
- }
-
- static inline int CharOccurrence(int* bad_char_occurrence,
- SubjectChar char_code) {
- if (sizeof(SubjectChar) == 1) {
- return bad_char_occurrence[static_cast<int>(char_code)];
- }
- if (sizeof(PatternChar) == 1) {
- if (exceedsOneByte(char_code)) {
- return -1;
- }
- return bad_char_occurrence[static_cast<unsigned int>(char_code)];
- }
- // Both pattern and subject are UC16. Reduce character to equivalence class.
- int equiv_class = char_code % kUC16AlphabetSize;
- return bad_char_occurrence[equiv_class];
- }
-
- // The following tables are shared by all searches.
- // TODO(lrn): Introduce a way for a pattern to keep its tables
- // between searches (e.g., for an Atom RegExp).
-
- // Store for the BoyerMoore(Horspool) bad char shift table.
- // Return a table covering the last kBMMaxShift+1 positions of
- // pattern.
- int* bad_char_table() {
- return isolate_->bad_char_shift_table();
- }
-
- // Store for the BoyerMoore good suffix shift table.
- int* good_suffix_shift_table() {
- // Return biased pointer that maps the range [start_..pattern_.length()
- // to the kGoodSuffixShiftTable array.
- return isolate_->good_suffix_shift_table() - start_;
- }
-
- // Table used temporarily while building the BoyerMoore good suffix
- // shift table.
- int* suffix_table() {
- // Return biased pointer that maps the range [start_..pattern_.length()
- // to the kSuffixTable array.
- return isolate_->suffix_table() - start_;
- }
-
- Isolate* isolate_;
- // The pattern to search for.
- Vector<const PatternChar> pattern_;
- // Pointer to implementation of the search.
- SearchFunction strategy_;
- // Cache value of Max(0, pattern_length() - kBMMaxShift)
- int start_;
-};
-
-
-//---------------------------------------------------------------------
-// Single Character Pattern Search Strategy
-//---------------------------------------------------------------------
-
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int index) {
- ASSERT_EQ(1, search->pattern_.length());
- PatternChar pattern_first_char = search->pattern_[0];
- int i = index;
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- subject.length() - i));
- if (pos == NULL) return -1;
- return static_cast<int>(pos - subject.start());
- } else {
- if (sizeof(PatternChar) > sizeof(SubjectChar)) {
- if (exceedsOneByte(pattern_first_char)) {
- return -1;
- }
- }
- SubjectChar search_char = static_cast<SubjectChar>(pattern_first_char);
- int n = subject.length();
- while (i < n) {
- if (subject[i++] == search_char) return i - 1;
- }
- return -1;
- }
-}
-
-//---------------------------------------------------------------------
-// Linear Search Strategy
-//---------------------------------------------------------------------
-
-
-template <typename PatternChar, typename SubjectChar>
-inline bool CharCompare(const PatternChar* pattern,
- const SubjectChar* subject,
- int length) {
- ASSERT(length > 0);
- int pos = 0;
- do {
- if (pattern[pos] != subject[pos]) {
- return false;
- }
- pos++;
- } while (pos < length);
- return true;
-}
-
-
-// Simple linear search for short patterns. Never bails out.
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::LinearSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int index) {
- Vector<const PatternChar> pattern = search->pattern_;
- ASSERT(pattern.length() > 1);
- int pattern_length = pattern.length();
- PatternChar pattern_first_char = pattern[0];
- int i = index;
- int n = subject.length() - pattern_length;
- while (i <= n) {
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) return -1;
- i = static_cast<int>(pos - subject.start()) + 1;
- } else {
- if (subject[i++] != pattern_first_char) continue;
- }
- // Loop extracted to separate function to allow using return to do
- // a deeper break.
- if (CharCompare(pattern.start() + 1,
- subject.start() + i,
- pattern_length - 1)) {
- return i - 1;
- }
- }
- return -1;
-}
-
-//---------------------------------------------------------------------
-// Boyer-Moore string search
-//---------------------------------------------------------------------
-
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::BoyerMooreSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index) {
- Vector<const PatternChar> pattern = search->pattern_;
- int subject_length = subject.length();
- int pattern_length = pattern.length();
- // Only preprocess at most kBMMaxShift last characters of pattern.
- int start = search->start_;
-
- int* bad_char_occurence = search->bad_char_table();
- int* good_suffix_shift = search->good_suffix_shift_table();
-
- PatternChar last_char = pattern[pattern_length - 1];
- int index = start_index;
- // Continue search from i.
- while (index <= subject_length - pattern_length) {
- int j = pattern_length - 1;
- int c;
- while (last_char != (c = subject[index + j])) {
- int shift =
- j - CharOccurrence(bad_char_occurence, c);
- index += shift;
- if (index > subject_length - pattern_length) {
- return -1;
- }
- }
- while (j >= 0 && pattern[j] == (c = subject[index + j])) j--;
- if (j < 0) {
- return index;
- } else if (j < start) {
- // we have matched more than our tables allow us to be smart about.
- // Fall back on BMH shift.
- index += pattern_length - 1
- - CharOccurrence(bad_char_occurence,
- static_cast<SubjectChar>(last_char));
- } else {
- int gs_shift = good_suffix_shift[j + 1];
- int bc_occ =
- CharOccurrence(bad_char_occurence, c);
- int shift = j - bc_occ;
- if (gs_shift > shift) {
- shift = gs_shift;
- }
- index += shift;
- }
- }
-
- return -1;
-}
-
-
-template <typename PatternChar, typename SubjectChar>
-void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreTable() {
- int pattern_length = pattern_.length();
- const PatternChar* pattern = pattern_.start();
- // Only look at the last kBMMaxShift characters of pattern (from start_
- // to pattern_length).
- int start = start_;
- int length = pattern_length - start;
-
- // Biased tables so that we can use pattern indices as table indices,
- // even if we only cover the part of the pattern from offset start.
- int* shift_table = good_suffix_shift_table();
- int* suffix_table = this->suffix_table();
-
- // Initialize table.
- for (int i = start; i < pattern_length; i++) {
- shift_table[i] = length;
- }
- shift_table[pattern_length] = 1;
- suffix_table[pattern_length] = pattern_length + 1;
-
- if (pattern_length <= start) {
- return;
- }
-
- // Find suffixes.
- PatternChar last_char = pattern[pattern_length - 1];
- int suffix = pattern_length + 1;
- {
- int i = pattern_length;
- while (i > start) {
- PatternChar c = pattern[i - 1];
- while (suffix <= pattern_length && c != pattern[suffix - 1]) {
- if (shift_table[suffix] == length) {
- shift_table[suffix] = suffix - i;
- }
- suffix = suffix_table[suffix];
- }
- suffix_table[--i] = --suffix;
- if (suffix == pattern_length) {
- // No suffix to extend, so we check against last_char only.
- while ((i > start) && (pattern[i - 1] != last_char)) {
- if (shift_table[pattern_length] == length) {
- shift_table[pattern_length] = pattern_length - i;
- }
- suffix_table[--i] = pattern_length;
- }
- if (i > start) {
- suffix_table[--i] = --suffix;
- }
- }
- }
- }
- // Build shift table using suffixes.
- if (suffix < pattern_length) {
- for (int i = start; i <= pattern_length; i++) {
- if (shift_table[i] == length) {
- shift_table[i] = suffix - start;
- }
- if (i == suffix) {
- suffix = suffix_table[suffix];
- }
- }
- }
-}
-
-//---------------------------------------------------------------------
-// Boyer-Moore-Horspool string search.
-//---------------------------------------------------------------------
-
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::BoyerMooreHorspoolSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index) {
- Vector<const PatternChar> pattern = search->pattern_;
- int subject_length = subject.length();
- int pattern_length = pattern.length();
- int* char_occurrences = search->bad_char_table();
- int badness = -pattern_length;
-
- // How bad we are doing without a good-suffix table.
- PatternChar last_char = pattern[pattern_length - 1];
- int last_char_shift = pattern_length - 1 -
- CharOccurrence(char_occurrences, static_cast<SubjectChar>(last_char));
- // Perform search
- int index = start_index; // No matches found prior to this index.
- while (index <= subject_length - pattern_length) {
- int j = pattern_length - 1;
- int subject_char;
- while (last_char != (subject_char = subject[index + j])) {
- int bc_occ = CharOccurrence(char_occurrences, subject_char);
- int shift = j - bc_occ;
- index += shift;
- badness += 1 - shift; // at most zero, so badness cannot increase.
- if (index > subject_length - pattern_length) {
- return -1;
- }
- }
- j--;
- while (j >= 0 && pattern[j] == (subject[index + j])) j--;
- if (j < 0) {
- return index;
- } else {
- index += last_char_shift;
- // Badness increases by the number of characters we have
- // checked, and decreases by the number of characters we
- // can skip by shifting. It's a measure of how we are doing
- // compared to reading each character exactly once.
- badness += (pattern_length - j) - last_char_shift;
- if (badness > 0) {
- search->PopulateBoyerMooreTable();
- search->strategy_ = &BoyerMooreSearch;
- return BoyerMooreSearch(search, subject, index);
- }
- }
- }
- return -1;
-}
-
-
-template <typename PatternChar, typename SubjectChar>
-void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreHorspoolTable() {
- int pattern_length = pattern_.length();
-
- int* bad_char_occurrence = bad_char_table();
-
- // Only preprocess at most kBMMaxShift last characters of pattern.
- int start = start_;
- // Run forwards to populate bad_char_table, so that *last* instance
- // of character equivalence class is the one registered.
- // Notice: Doesn't include the last character.
- int table_size = AlphabetSize();
- if (start == 0) { // All patterns less than kBMMaxShift in length.
- memset(bad_char_occurrence,
- -1,
- table_size * sizeof(*bad_char_occurrence));
- } else {
- for (int i = 0; i < table_size; i++) {
- bad_char_occurrence[i] = start - 1;
- }
- }
- for (int i = start; i < pattern_length - 1; i++) {
- PatternChar c = pattern_[i];
- int bucket = (sizeof(PatternChar) == 1) ? c : c % AlphabetSize();
- bad_char_occurrence[bucket] = i;
- }
-}
-
-//---------------------------------------------------------------------
-// Linear string search with bailout to BMH.
-//---------------------------------------------------------------------
-
-// Simple linear search for short patterns, which bails out if the string
-// isn't found very early in the subject. Upgrades to BoyerMooreHorspool.
-template <typename PatternChar, typename SubjectChar>
-int StringSearch<PatternChar, SubjectChar>::InitialSearch(
- StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int index) {
- Vector<const PatternChar> pattern = search->pattern_;
- int pattern_length = pattern.length();
- // Badness is a count of how much work we have done. When we have
- // done enough work we decide it's probably worth switching to a better
- // algorithm.
- int badness = -10 - (pattern_length << 2);
-
- // We know our pattern is at least 2 characters, we cache the first so
- // the common case of the first character not matching is faster.
- PatternChar pattern_first_char = pattern[0];
- for (int i = index, n = subject.length() - pattern_length; i <= n; i++) {
- badness++;
- if (badness <= 0) {
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) {
- return -1;
- }
- i = static_cast<int>(pos - subject.start());
- } else {
- if (subject[i] != pattern_first_char) continue;
- }
- int j = 1;
- do {
- if (pattern[j] != subject[i + j]) {
- break;
- }
- j++;
- } while (j < pattern_length);
- if (j == pattern_length) {
- return i;
- }
- badness += j;
- } else {
- search->PopulateBoyerMooreHorspoolTable();
- search->strategy_ = &BoyerMooreHorspoolSearch;
- return BoyerMooreHorspoolSearch(search, subject, i);
- }
- }
- return -1;
-}
-
-
-// Perform a a single stand-alone search.
-// If searching multiple times for the same pattern, a search
-// object should be constructed once and the Search function then called
-// for each search.
-template <typename SubjectChar, typename PatternChar>
-int SearchString(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int start_index) {
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
- return search.Search(subject, start_index);
-}
-
-}} // namespace v8::internal
-
-#endif // V8_STRING_SEARCH_H_
diff --git a/src/3rdparty/v8/src/string-stream.cc b/src/3rdparty/v8/src/string-stream.cc
deleted file mode 100644
index 97b4d32..0000000
--- a/src/3rdparty/v8/src/string-stream.cc
+++ /dev/null
@@ -1,594 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "factory.h"
-#include "string-stream.h"
-
-#include "allocation-inl.h"
-
-namespace v8 {
-namespace internal {
-
-static const int kMentionedObjectCacheMaxSize = 256;
-
-char* HeapStringAllocator::allocate(unsigned bytes) {
- space_ = NewArray<char>(bytes);
- return space_;
-}
-
-
-NoAllocationStringAllocator::NoAllocationStringAllocator(char* memory,
- unsigned size) {
- size_ = size;
- space_ = memory;
-}
-
-
-bool StringStream::Put(char c) {
- if (full()) return false;
- ASSERT(length_ < capacity_);
- // Since the trailing '\0' is not accounted for in length_ fullness is
- // indicated by a difference of 1 between length_ and capacity_. Thus when
- // reaching a difference of 2 we need to grow the buffer.
- if (length_ == capacity_ - 2) {
- unsigned new_capacity = capacity_;
- char* new_buffer = allocator_->grow(&new_capacity);
- if (new_capacity > capacity_) {
- capacity_ = new_capacity;
- buffer_ = new_buffer;
- } else {
- // Reached the end of the available buffer.
- ASSERT(capacity_ >= 5);
- length_ = capacity_ - 1; // Indicate fullness of the stream.
- buffer_[length_ - 4] = '.';
- buffer_[length_ - 3] = '.';
- buffer_[length_ - 2] = '.';
- buffer_[length_ - 1] = '\n';
- buffer_[length_] = '\0';
- return false;
- }
- }
- buffer_[length_] = c;
- buffer_[length_ + 1] = '\0';
- length_++;
- return true;
-}
-
-
-// A control character is one that configures a format element. For
-// instance, in %.5s, .5 are control characters.
-static bool IsControlChar(char c) {
- switch (c) {
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7': case '8': case '9': case '.': case '-':
- return true;
- default:
- return false;
- }
-}
-
-
-void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
- // If we already ran out of space then return immediately.
- if (full()) return;
- int offset = 0;
- int elm = 0;
- while (offset < format.length()) {
- if (format[offset] != '%' || elm == elms.length()) {
- Put(format[offset]);
- offset++;
- continue;
- }
- // Read this formatting directive into a temporary buffer
- EmbeddedVector<char, 24> temp;
- int format_length = 0;
- // Skip over the whole control character sequence until the
- // format element type
- temp[format_length++] = format[offset++];
- while (offset < format.length() && IsControlChar(format[offset]))
- temp[format_length++] = format[offset++];
- if (offset >= format.length())
- return;
- char type = format[offset];
- temp[format_length++] = type;
- temp[format_length] = '\0';
- offset++;
- FmtElm current = elms[elm++];
- switch (type) {
- case 's': {
- ASSERT_EQ(FmtElm::C_STR, current.type_);
- const char* value = current.data_.u_c_str_;
- Add(value);
- break;
- }
- case 'w': {
- ASSERT_EQ(FmtElm::LC_STR, current.type_);
- Vector<const uc16> value = *current.data_.u_lc_str_;
- for (int i = 0; i < value.length(); i++)
- Put(static_cast<char>(value[i]));
- break;
- }
- case 'o': {
- ASSERT_EQ(FmtElm::OBJ, current.type_);
- Object* obj = current.data_.u_obj_;
- PrintObject(obj);
- break;
- }
- case 'k': {
- ASSERT_EQ(FmtElm::INT, current.type_);
- int value = current.data_.u_int_;
- if (0x20 <= value && value <= 0x7F) {
- Put(value);
- } else if (value <= 0xff) {
- Add("\\x%02x", value);
- } else {
- Add("\\u%04x", value);
- }
- break;
- }
- case 'i': case 'd': case 'u': case 'x': case 'c': case 'X': {
- int value = current.data_.u_int_;
- EmbeddedVector<char, 24> formatted;
- int length = OS::SNPrintF(formatted, temp.start(), value);
- Add(Vector<const char>(formatted.start(), length));
- break;
- }
- case 'f': case 'g': case 'G': case 'e': case 'E': {
- double value = current.data_.u_double_;
- EmbeddedVector<char, 28> formatted;
- OS::SNPrintF(formatted, temp.start(), value);
- Add(formatted.start());
- break;
- }
- case 'p': {
- void* value = current.data_.u_pointer_;
- EmbeddedVector<char, 20> formatted;
- OS::SNPrintF(formatted, temp.start(), value);
- Add(formatted.start());
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
-
- // Verify that the buffer is 0-terminated
- ASSERT(buffer_[length_] == '\0');
-}
-
-
-void StringStream::PrintObject(Object* o) {
- o->ShortPrint(this);
- if (o->IsString()) {
- if (String::cast(o)->length() <= String::kMaxShortPrintLength) {
- return;
- }
- } else if (o->IsNumber() || o->IsOddball()) {
- return;
- }
- if (o->IsHeapObject()) {
- DebugObjectCache* debug_object_cache = Isolate::Current()->
- string_stream_debug_object_cache();
- for (int i = 0; i < debug_object_cache->length(); i++) {
- if ((*debug_object_cache)[i] == o) {
- Add("#%d#", i);
- return;
- }
- }
- if (debug_object_cache->length() < kMentionedObjectCacheMaxSize) {
- Add("#%d#", debug_object_cache->length());
- debug_object_cache->Add(HeapObject::cast(o));
- } else {
- Add("@%p", o);
- }
- }
-}
-
-
-void StringStream::Add(const char* format) {
- Add(CStrVector(format));
-}
-
-
-void StringStream::Add(Vector<const char> format) {
- Add(format, Vector<FmtElm>::empty());
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0) {
- const char argc = 1;
- FmtElm argv[argc] = { arg0 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1) {
- const char argc = 2;
- FmtElm argv[argc] = { arg0, arg1 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
- FmtElm arg2) {
- const char argc = 3;
- FmtElm argv[argc] = { arg0, arg1, arg2 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
- FmtElm arg2, FmtElm arg3) {
- const char argc = 4;
- FmtElm argv[argc] = { arg0, arg1, arg2, arg3 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-SmartArrayPointer<const char> StringStream::ToCString() const {
- char* str = NewArray<char>(length_ + 1);
- memcpy(str, buffer_, length_);
- str[length_] = '\0';
- return SmartArrayPointer<const char>(str);
-}
-
-
-void StringStream::Log() {
- LOG(ISOLATE, StringEvent("StackDump", buffer_));
-}
-
-
-void StringStream::OutputToFile(FILE* out) {
- // Dump the output to stdout, but make sure to break it up into
- // manageable chunks to avoid losing parts of the output in the OS
- // printing code. This is a problem on Windows in particular; see
- // the VPrint() function implementations in platform-win32.cc.
- unsigned position = 0;
- for (unsigned next; (next = position + 2048) < length_; position = next) {
- char save = buffer_[next];
- buffer_[next] = '\0';
- internal::FPrintF(out, "%s", &buffer_[position]);
- buffer_[next] = save;
- }
- internal::FPrintF(out, "%s", &buffer_[position]);
-}
-
-
-Handle<String> StringStream::ToString() {
- return FACTORY->NewStringFromUtf8(Vector<const char>(buffer_, length_));
-}
-
-
-void StringStream::ClearMentionedObjectCache() {
- Isolate* isolate = Isolate::Current();
- isolate->set_string_stream_current_security_token(NULL);
- if (isolate->string_stream_debug_object_cache() == NULL) {
- isolate->set_string_stream_debug_object_cache(
- new List<HeapObject*, PreallocatedStorageAllocationPolicy>(0));
- }
- isolate->string_stream_debug_object_cache()->Clear();
-}
-
-
-#ifdef DEBUG
-bool StringStream::IsMentionedObjectCacheClear() {
- return (
- Isolate::Current()->string_stream_debug_object_cache()->length() == 0);
-}
-#endif
-
-
-bool StringStream::Put(String* str) {
- return Put(str, 0, str->length());
-}
-
-
-bool StringStream::Put(String* str, int start, int end) {
- ConsStringIteratorOp op;
- StringCharacterStream stream(str, &op, start);
- for (int i = start; i < end && stream.HasMore(); i++) {
- uint16_t c = stream.GetNext();
- if (c >= 127 || c < 32) {
- c = '?';
- }
- if (!Put(static_cast<char>(c))) {
- return false; // Output was truncated.
- }
- }
- return true;
-}
-
-
-void StringStream::PrintName(Object* name) {
- if (name->IsString()) {
- String* str = String::cast(name);
- if (str->length() > 0) {
- Put(str);
- } else {
- Add("/* anonymous */");
- }
- } else {
- Add("%o", name);
- }
-}
-
-
-void StringStream::PrintUsingMap(JSObject* js_object) {
- Map* map = js_object->map();
- if (!HEAP->Contains(map) ||
- !map->IsHeapObject() ||
- !map->IsMap()) {
- Add("<Invalid map>\n");
- return;
- }
- int real_size = map->NumberOfOwnDescriptors();
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- if (details.descriptor_index() > real_size) continue;
- if (details.type() == FIELD) {
- Object* key = descs->GetKey(i);
- if (key->IsString() || key->IsNumber()) {
- int len = 3;
- if (key->IsString()) {
- len = String::cast(key)->length();
- }
- for (; len < 18; len++)
- Put(' ');
- if (key->IsString()) {
- Put(String::cast(key));
- } else {
- key->ShortPrint();
- }
- Add(": ");
- Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
- Add("%o\n", value);
- }
- }
- }
-}
-
-
-void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
- Heap* heap = HEAP;
- for (unsigned int i = 0; i < 10 && i < limit; i++) {
- Object* element = array->get(i);
- if (element != heap->the_hole_value()) {
- for (int len = 1; len < 18; len++)
- Put(' ');
- Add("%d: %o\n", i, array->get(i));
- }
- }
- if (limit >= 10) {
- Add(" ...\n");
- }
-}
-
-
-void StringStream::PrintByteArray(ByteArray* byte_array) {
- unsigned int limit = byte_array->length();
- for (unsigned int i = 0; i < 10 && i < limit; i++) {
- byte b = byte_array->get(i);
- Add(" %d: %3d 0x%02x", i, b, b);
- if (b >= ' ' && b <= '~') {
- Add(" '%c'", b);
- } else if (b == '\n') {
- Add(" '\n'");
- } else if (b == '\r') {
- Add(" '\r'");
- } else if (b >= 1 && b <= 26) {
- Add(" ^%c", b + 'A' - 1);
- }
- Add("\n");
- }
- if (limit >= 10) {
- Add(" ...\n");
- }
-}
-
-
-void StringStream::PrintMentionedObjectCache() {
- DebugObjectCache* debug_object_cache =
- Isolate::Current()->string_stream_debug_object_cache();
- Add("==== Key ============================================\n\n");
- for (int i = 0; i < debug_object_cache->length(); i++) {
- HeapObject* printee = (*debug_object_cache)[i];
- Add(" #%d# %p: ", i, printee);
- printee->ShortPrint(this);
- Add("\n");
- if (printee->IsJSObject()) {
- if (printee->IsJSValue()) {
- Add(" value(): %o\n", JSValue::cast(printee)->value());
- }
- PrintUsingMap(JSObject::cast(printee));
- if (printee->IsJSArray()) {
- JSArray* array = JSArray::cast(printee);
- if (array->HasFastObjectElements()) {
- unsigned int limit = FixedArray::cast(array->elements())->length();
- unsigned int length =
- static_cast<uint32_t>(JSArray::cast(array)->length()->Number());
- if (length < limit) limit = length;
- PrintFixedArray(FixedArray::cast(array->elements()), limit);
- }
- }
- } else if (printee->IsByteArray()) {
- PrintByteArray(ByteArray::cast(printee));
- } else if (printee->IsFixedArray()) {
- unsigned int limit = FixedArray::cast(printee)->length();
- PrintFixedArray(FixedArray::cast(printee), limit);
- }
- }
-}
-
-
-void StringStream::PrintSecurityTokenIfChanged(Object* f) {
- Isolate* isolate = Isolate::Current();
- Heap* heap = isolate->heap();
- if (!f->IsHeapObject() || !heap->Contains(HeapObject::cast(f))) {
- return;
- }
- Map* map = HeapObject::cast(f)->map();
- if (!map->IsHeapObject() ||
- !heap->Contains(map) ||
- !map->IsMap() ||
- !f->IsJSFunction()) {
- return;
- }
-
- JSFunction* fun = JSFunction::cast(f);
- Object* perhaps_context = fun->unchecked_context();
- if (perhaps_context->IsHeapObject() &&
- heap->Contains(HeapObject::cast(perhaps_context)) &&
- perhaps_context->IsContext()) {
- Context* context = fun->context();
- if (!heap->Contains(context)) {
- Add("(Function context is outside heap)\n");
- return;
- }
- Object* token = context->native_context()->security_token();
- if (token != isolate->string_stream_current_security_token()) {
- Add("Security context: %o\n", token);
- isolate->set_string_stream_current_security_token(token);
- }
- } else {
- Add("(Function context is corrupt)\n");
- }
-}
-
-
-void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
- if (f->IsHeapObject() &&
- HEAP->Contains(HeapObject::cast(f)) &&
- HEAP->Contains(HeapObject::cast(f)->map()) &&
- HeapObject::cast(f)->map()->IsMap()) {
- if (f->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(f);
- // Common case: on-stack function present and resolved.
- PrintPrototype(fun, receiver);
- *code = fun->code();
- } else if (f->IsInternalizedString()) {
- // Unresolved and megamorphic calls: Instead of the function
- // we have the function name on the stack.
- PrintName(f);
- Add("/* unresolved */ ");
- } else {
- // Unless this is the frame of a built-in function, we should always have
- // the callee function or name on the stack. If we don't, we have a
- // problem or a change of the stack frame layout.
- Add("%o", f);
- Add("/* warning: no JSFunction object or function name found */ ");
- }
- /* } else if (is_trampoline()) {
- Print("trampoline ");
- */
- } else {
- if (!f->IsHeapObject()) {
- Add("/* warning: 'function' was not a heap object */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f))) {
- Add("/* warning: 'function' was not on the heap */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f)->map())) {
- Add("/* warning: function's map was not on the heap */ ");
- return;
- }
- if (!HeapObject::cast(f)->map()->IsMap()) {
- Add("/* warning: function's map was not a valid map */ ");
- return;
- }
- Add("/* warning: Invalid JSFunction object found */ ");
- }
-}
-
-
-void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
- Object* name = fun->shared()->name();
- bool print_name = false;
- Isolate* isolate = fun->GetIsolate();
- for (Object* p = receiver;
- p != isolate->heap()->null_value();
- p = p->GetPrototype(isolate)) {
- if (p->IsJSObject()) {
- Object* key = JSObject::cast(p)->SlowReverseLookup(fun);
- if (key != isolate->heap()->undefined_value()) {
- if (!name->IsString() ||
- !key->IsString() ||
- !String::cast(name)->Equals(String::cast(key))) {
- print_name = true;
- }
- if (name->IsString() && String::cast(name)->length() == 0) {
- print_name = false;
- }
- name = key;
- }
- } else {
- print_name = true;
- }
- }
- PrintName(name);
- // Also known as - if the name in the function doesn't match the name under
- // which it was looked up.
- if (print_name) {
- Add("(aka ");
- PrintName(fun->shared()->name());
- Put(')');
- }
-}
-
-
-char* HeapStringAllocator::grow(unsigned* bytes) {
- unsigned new_bytes = *bytes * 2;
- // Check for overflow.
- if (new_bytes <= *bytes) {
- return space_;
- }
- char* new_space = NewArray<char>(new_bytes);
- if (new_space == NULL) {
- return space_;
- }
- memcpy(new_space, space_, *bytes);
- *bytes = new_bytes;
- DeleteArray(space_);
- space_ = new_space;
- return new_space;
-}
-
-
-// Only grow once to the maximum allowable size.
-char* NoAllocationStringAllocator::grow(unsigned* bytes) {
- ASSERT(size_ >= *bytes);
- *bytes = size_;
- return space_;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/string-stream.h b/src/3rdparty/v8/src/string-stream.h
deleted file mode 100644
index 0ba8f52..0000000
--- a/src/3rdparty/v8/src/string-stream.h
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STRING_STREAM_H_
-#define V8_STRING_STREAM_H_
-
-namespace v8 {
-namespace internal {
-
-
-class StringAllocator {
- public:
- virtual ~StringAllocator() {}
- // Allocate a number of bytes.
- virtual char* allocate(unsigned bytes) = 0;
- // Allocate a larger number of bytes and copy the old buffer to the new one.
- // bytes is an input and output parameter passing the old size of the buffer
- // and returning the new size. If allocation fails then we return the old
- // buffer and do not increase the size.
- virtual char* grow(unsigned* bytes) = 0;
-};
-
-
-// Normal allocator uses new[] and delete[].
-class HeapStringAllocator: public StringAllocator {
- public:
- ~HeapStringAllocator() { DeleteArray(space_); }
- char* allocate(unsigned bytes);
- char* grow(unsigned* bytes);
- private:
- char* space_;
-};
-
-
-// Allocator for use when no new c++ heap allocation is allowed.
-// Given a preallocated buffer up front and does no allocation while
-// building message.
-class NoAllocationStringAllocator: public StringAllocator {
- public:
- NoAllocationStringAllocator(char* memory, unsigned size);
- char* allocate(unsigned bytes) { return space_; }
- char* grow(unsigned* bytes);
- private:
- unsigned size_;
- char* space_;
-};
-
-
-class FmtElm {
- public:
- FmtElm(int value) : type_(INT) { // NOLINT
- data_.u_int_ = value;
- }
- explicit FmtElm(double value) : type_(DOUBLE) {
- data_.u_double_ = value;
- }
- FmtElm(const char* value) : type_(C_STR) { // NOLINT
- data_.u_c_str_ = value;
- }
- FmtElm(const Vector<const uc16>& value) : type_(LC_STR) { // NOLINT
- data_.u_lc_str_ = &value;
- }
- FmtElm(Object* value) : type_(OBJ) { // NOLINT
- data_.u_obj_ = value;
- }
- FmtElm(Handle<Object> value) : type_(HANDLE) { // NOLINT
- data_.u_handle_ = value.location();
- }
- FmtElm(void* value) : type_(POINTER) { // NOLINT
- data_.u_pointer_ = value;
- }
-
- private:
- friend class StringStream;
- enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
- Type type_;
- union {
- int u_int_;
- double u_double_;
- const char* u_c_str_;
- const Vector<const uc16>* u_lc_str_;
- Object* u_obj_;
- Object** u_handle_;
- void* u_pointer_;
- } data_;
-};
-
-
-class StringStream {
- public:
- explicit StringStream(StringAllocator* allocator):
- allocator_(allocator),
- capacity_(kInitialCapacity),
- length_(0),
- buffer_(allocator_->allocate(kInitialCapacity)) {
- buffer_[0] = 0;
- }
-
- ~StringStream() {
- }
-
- bool Put(char c);
- bool Put(String* str);
- bool Put(String* str, int start, int end);
- void Add(Vector<const char> format, Vector<FmtElm> elms);
- void Add(const char* format);
- void Add(Vector<const char> format);
- void Add(const char* format, FmtElm arg0);
- void Add(const char* format, FmtElm arg0, FmtElm arg1);
- void Add(const char* format, FmtElm arg0, FmtElm arg1, FmtElm arg2);
- void Add(const char* format,
- FmtElm arg0,
- FmtElm arg1,
- FmtElm arg2,
- FmtElm arg3);
-
- // Getting the message out.
- void OutputToFile(FILE* out);
- void OutputToStdOut() { OutputToFile(stdout); }
- void Log();
- Handle<String> ToString();
- SmartArrayPointer<const char> ToCString() const;
- int length() const { return length_; }
-
- // Object printing support.
- void PrintName(Object* o);
- void PrintFixedArray(FixedArray* array, unsigned int limit);
- void PrintByteArray(ByteArray* ba);
- void PrintUsingMap(JSObject* js_object);
- void PrintPrototype(JSFunction* fun, Object* receiver);
- void PrintSecurityTokenIfChanged(Object* function);
- // NOTE: Returns the code in the output parameter.
- void PrintFunction(Object* function, Object* receiver, Code** code);
-
- // Reset the stream.
- void Reset() {
- length_ = 0;
- buffer_[0] = 0;
- }
-
- // Mentioned object cache support.
- void PrintMentionedObjectCache();
- static void ClearMentionedObjectCache();
-#ifdef DEBUG
- static bool IsMentionedObjectCacheClear();
-#endif
-
-
- static const int kInitialCapacity = 16;
-
- private:
- void PrintObject(Object* obj);
-
- StringAllocator* allocator_;
- unsigned capacity_;
- unsigned length_; // does not include terminating 0-character
- char* buffer_;
-
- bool full() const { return (capacity_ - length_) == 1; }
- int space() const { return capacity_ - length_; }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringStream);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_STRING_STREAM_H_
diff --git a/src/3rdparty/v8/src/string.js b/src/3rdparty/v8/src/string.js
deleted file mode 100644
index 2f8043c..0000000
--- a/src/3rdparty/v8/src/string.js
+++ /dev/null
@@ -1,1047 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// var $String = global.String;
-// var $NaN = 0/0;
-
-
-// Set the String function and constructor.
-%SetCode($String, function(x) {
- var value = %_ArgumentsLength() == 0 ? '' : TO_STRING_INLINE(x);
- if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
- } else {
- return value;
- }
-});
-
-%FunctionSetPrototype($String, new $String());
-
-// ECMA-262 section 15.5.4.2
-function StringToString() {
- if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
- throw new $TypeError('String.prototype.toString is not generic');
- }
- return %_ValueOf(this);
-}
-
-
-// ECMA-262 section 15.5.4.3
-function StringValueOf() {
- if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
- throw new $TypeError('String.prototype.valueOf is not generic');
- }
- return %_ValueOf(this);
-}
-
-
-// ECMA-262, section 15.5.4.4
-function StringCharAt(pos) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.charAt"]);
- }
- var result = %_StringCharAt(this, pos);
- if (%_IsSmi(result)) {
- result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
- }
- return result;
-}
-
-
-// ECMA-262 section 15.5.4.5
-function StringCharCodeAt(pos) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.charCodeAt"]);
- }
- var result = %_StringCharCodeAt(this, pos);
- if (!%_IsSmi(result)) {
- result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
- }
- return result;
-}
-
-
-// ECMA-262, section 15.5.4.6
-function StringConcat() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.concat"]);
- }
- var len = %_ArgumentsLength();
- var this_as_string = TO_STRING_INLINE(this);
- if (len === 1) {
- return this_as_string + %_Arguments(0);
- }
- var parts = new InternalArray(len + 1);
- parts[0] = this_as_string;
- for (var i = 0; i < len; i++) {
- var part = %_Arguments(i);
- parts[i + 1] = TO_STRING_INLINE(part);
- }
- return %StringBuilderConcat(parts, len + 1, "");
-}
-
-// Match ES3 and Safari
-%FunctionSetLength(StringConcat, 1);
-
-
-// ECMA-262 section 15.5.4.7
-function StringIndexOf(pattern /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.indexOf"]);
- }
- var subject = TO_STRING_INLINE(this);
- pattern = TO_STRING_INLINE(pattern);
- var index = 0;
- if (%_ArgumentsLength() > 1) {
- index = %_Arguments(1); // position
- index = TO_INTEGER(index);
- if (index < 0) index = 0;
- if (index > subject.length) index = subject.length;
- }
- return %StringIndexOf(subject, pattern, index);
-}
-
-
-// ECMA-262 section 15.5.4.8
-function StringLastIndexOf(pat /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.lastIndexOf"]);
- }
- var sub = TO_STRING_INLINE(this);
- var subLength = sub.length;
- var pat = TO_STRING_INLINE(pat);
- var patLength = pat.length;
- var index = subLength - patLength;
- if (%_ArgumentsLength() > 1) {
- var position = ToNumber(%_Arguments(1));
- if (!NUMBER_IS_NAN(position)) {
- position = TO_INTEGER(position);
- if (position < 0) {
- position = 0;
- }
- if (position + patLength < subLength) {
- index = position;
- }
- }
- }
- if (index < 0) {
- return -1;
- }
- return %StringLastIndexOf(sub, pat, index);
-}
-
-
-// ECMA-262 section 15.5.4.9
-//
-// This function is implementation specific. For now, we do not
-// do anything locale specific.
-function StringLocaleCompare(other) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.localeCompare"]);
- }
- if (%_ArgumentsLength() === 0) return 0;
- return %StringLocaleCompare(TO_STRING_INLINE(this),
- TO_STRING_INLINE(other));
-}
-
-
-// ECMA-262 section 15.5.4.10
-function StringMatch(regexp) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.match"]);
- }
- var subject = TO_STRING_INLINE(this);
- if (IS_REGEXP(regexp)) {
- // Emulate RegExp.prototype.exec's side effect in step 5, even though
- // value is discarded.
- ToInteger(regexp.lastIndex);
- if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
- %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
- // lastMatchInfo is defined in regexp.js.
- var result = %StringMatch(subject, regexp, lastMatchInfo);
- if (result !== null) lastMatchInfoOverride = null;
- regexp.lastIndex = 0;
- return result;
- }
- // Non-regexp argument.
- regexp = new $RegExp(regexp);
- return RegExpExecNoTests(regexp, subject, 0);
-}
-
-
-// This has the same size as the lastMatchInfo array, and can be used for
-// functions that expect that structure to be returned. It is used when the
-// needle is a string rather than a regexp. In this case we can't update
-// lastMatchArray without erroneously affecting the properties on the global
-// RegExp object.
-var reusableMatchInfo = [2, "", "", -1, -1];
-
-
-// ECMA-262, section 15.5.4.11
-function StringReplace(search, replace) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.replace"]);
- }
- var subject = TO_STRING_INLINE(this);
-
- // Decision tree for dispatch
- // .. regexp search
- // .... string replace
- // ...... non-global search
- // ........ empty string replace
- // ........ non-empty string replace (with $-expansion)
- // ...... global search
- // ........ no need to circumvent last match info override
- // ........ need to circument last match info override
- // .... function replace
- // ...... global search
- // ...... non-global search
- // .. string search
- // .... special case that replaces with one single character
- // ...... function replace
- // ...... string replace (with $-expansion)
-
- if (IS_REGEXP(search)) {
- // Emulate RegExp.prototype.exec's side effect in step 5, even if
- // value is discarded.
- ToInteger(search.lastIndex);
- %_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
-
- if (!IS_SPEC_FUNCTION(replace)) {
- replace = TO_STRING_INLINE(replace);
-
- if (!search.global) {
- // Non-global regexp search, string replace.
- var match = DoRegExpExec(search, subject, 0);
- if (match == null) {
- search.lastIndex = 0
- return subject;
- }
- if (replace.length == 0) {
- return %_SubString(subject, 0, match[CAPTURE0]) +
- %_SubString(subject, match[CAPTURE1], subject.length)
- }
- return ExpandReplacement(replace, subject, lastMatchInfo,
- %_SubString(subject, 0, match[CAPTURE0])) +
- %_SubString(subject, match[CAPTURE1], subject.length);
- }
-
- // Global regexp search, string replace.
- search.lastIndex = 0;
- if (lastMatchInfoOverride == null) {
- return %StringReplaceGlobalRegExpWithString(
- subject, search, replace, lastMatchInfo);
- } else {
- // We use this hack to detect whether StringReplaceRegExpWithString
- // found at least one hit. In that case we need to remove any
- // override.
- var saved_subject = lastMatchInfo[LAST_SUBJECT_INDEX];
- lastMatchInfo[LAST_SUBJECT_INDEX] = 0;
- var answer = %StringReplaceGlobalRegExpWithString(
- subject, search, replace, lastMatchInfo);
- if (%_IsSmi(lastMatchInfo[LAST_SUBJECT_INDEX])) {
- lastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
- } else {
- lastMatchInfoOverride = null;
- }
- return answer;
- }
- }
-
- if (search.global) {
- // Global regexp search, function replace.
- return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
- }
- // Non-global regexp search, function replace.
- return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
- }
-
- search = TO_STRING_INLINE(search);
-
- if (search.length == 1 &&
- subject.length > 0xFF &&
- IS_STRING(replace) &&
- %StringIndexOf(replace, '$', 0) < 0) {
- // Searching by traversing a cons string tree and replace with cons of
- // slices works only when the replaced string is a single character, being
- // replaced by a simple string and only pays off for long strings.
- return %StringReplaceOneCharWithString(subject, search, replace);
- }
- var start = %StringIndexOf(subject, search, 0);
- if (start < 0) return subject;
- var end = start + search.length;
-
- var result = %_SubString(subject, 0, start);
-
- // Compute the string to replace with.
- if (IS_SPEC_FUNCTION(replace)) {
- var receiver = %GetDefaultReceiver(replace);
- result += %_CallFunction(receiver, search, start, subject, replace);
- } else {
- reusableMatchInfo[CAPTURE0] = start;
- reusableMatchInfo[CAPTURE1] = end;
- result = ExpandReplacement(TO_STRING_INLINE(replace),
- subject,
- reusableMatchInfo,
- result);
- }
-
- return result + %_SubString(subject, end, subject.length);
-}
-
-
-// Expand the $-expressions in the string and return a new string with
-// the result.
-function ExpandReplacement(string, subject, matchInfo, result) {
- var length = string.length;
- var next = %StringIndexOf(string, '$', 0);
- if (next < 0) {
- if (length > 0) result += string;
- return result;
- }
-
- if (next > 0) result += %_SubString(string, 0, next);
-
- while (true) {
- var expansion = '$';
- var position = next + 1;
- if (position < length) {
- var peek = %_StringCharCodeAt(string, position);
- if (peek == 36) { // $$
- ++position;
- result += '$';
- } else if (peek == 38) { // $& - match
- ++position;
- result +=
- %_SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]);
- } else if (peek == 96) { // $` - prefix
- ++position;
- result += %_SubString(subject, 0, matchInfo[CAPTURE0]);
- } else if (peek == 39) { // $' - suffix
- ++position;
- result += %_SubString(subject, matchInfo[CAPTURE1], subject.length);
- } else if (peek >= 48 && peek <= 57) {
- // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
- var scaled_index = (peek - 48) << 1;
- var advance = 1;
- var number_of_captures = NUMBER_OF_CAPTURES(matchInfo);
- if (position + 1 < string.length) {
- var next = %_StringCharCodeAt(string, position + 1);
- if (next >= 48 && next <= 57) {
- var new_scaled_index = scaled_index * 10 + ((next - 48) << 1);
- if (new_scaled_index < number_of_captures) {
- scaled_index = new_scaled_index;
- advance = 2;
- }
- }
- }
- if (scaled_index != 0 && scaled_index < number_of_captures) {
- var start = matchInfo[CAPTURE(scaled_index)];
- if (start >= 0) {
- result +=
- %_SubString(subject, start, matchInfo[CAPTURE(scaled_index + 1)]);
- }
- position += advance;
- } else {
- result += '$';
- }
- } else {
- result += '$';
- }
- } else {
- result += '$';
- }
-
- // Go the the next $ in the string.
- next = %StringIndexOf(string, '$', position);
-
- // Return if there are no more $ characters in the string. If we
- // haven't reached the end, we need to append the suffix.
- if (next < 0) {
- if (position < length) {
- result += %_SubString(string, position, length);
- }
- return result;
- }
-
- // Append substring between the previous and the next $ character.
- if (next > position) {
- result += %_SubString(string, position, next);
- }
- }
- return result;
-}
-
-
-// Compute the string of a given regular expression capture.
-function CaptureString(string, lastCaptureInfo, index) {
- // Scale the index.
- var scaled = index << 1;
- // Compute start and end.
- var start = lastCaptureInfo[CAPTURE(scaled)];
- // If start isn't valid, return undefined.
- if (start < 0) return;
- var end = lastCaptureInfo[CAPTURE(scaled + 1)];
- return %_SubString(string, start, end);
-}
-
-
-// TODO(lrn): This array will survive indefinitely if replace is never
-// called again. However, it will be empty, since the contents are cleared
-// in the finally block.
-var reusableReplaceArray = new InternalArray(16);
-
-// Helper function for replacing regular expressions with the result of a
-// function application in String.prototype.replace.
-function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
- var resultArray = reusableReplaceArray;
- if (resultArray) {
- reusableReplaceArray = null;
- } else {
- // Inside a nested replace (replace called from the replacement function
- // of another replace) or we have failed to set the reusable array
- // back due to an exception in a replacement function. Create a new
- // array to use in the future, or until the original is written back.
- resultArray = new InternalArray(16);
- }
- var res = %RegExpExecMultiple(regexp,
- subject,
- lastMatchInfo,
- resultArray);
- regexp.lastIndex = 0;
- if (IS_NULL(res)) {
- // No matches at all.
- reusableReplaceArray = resultArray;
- return subject;
- }
- var len = res.length;
- if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
- // If the number of captures is two then there are no explicit captures in
- // the regexp, just the implicit capture that captures the whole match. In
- // this case we can simplify quite a bit and end up with something faster.
- // The builder will consist of some integers that indicate slices of the
- // input string and some replacements that were returned from the replace
- // function.
- var match_start = 0;
- var override = new InternalPackedArray(null, 0, subject);
- var receiver = %GetDefaultReceiver(replace);
- for (var i = 0; i < len; i++) {
- var elem = res[i];
- if (%_IsSmi(elem)) {
- // Integers represent slices of the original string. Use these to
- // get the offsets we need for the override array (so things like
- // RegExp.leftContext work during the callback function.
- if (elem > 0) {
- match_start = (elem >> 11) + (elem & 0x7ff);
- } else {
- match_start = res[++i] - elem;
- }
- } else {
- override[0] = elem;
- override[1] = match_start;
- lastMatchInfoOverride = override;
- var func_result =
- %_CallFunction(receiver, elem, match_start, subject, replace);
- // Overwrite the i'th element in the results with the string we got
- // back from the callback function.
- res[i] = TO_STRING_INLINE(func_result);
- match_start += elem.length;
- }
- }
- } else {
- var receiver = %GetDefaultReceiver(replace);
- for (var i = 0; i < len; i++) {
- var elem = res[i];
- if (!%_IsSmi(elem)) {
- // elem must be an Array.
- // Use the apply argument as backing for global RegExp properties.
- lastMatchInfoOverride = elem;
- var func_result = %Apply(replace, receiver, elem, 0, elem.length);
- // Overwrite the i'th element in the results with the string we got
- // back from the callback function.
- res[i] = TO_STRING_INLINE(func_result);
- }
- }
- }
- var resultBuilder = new ReplaceResultBuilder(subject, res);
- var result = resultBuilder.generate();
- resultArray.length = 0;
- reusableReplaceArray = resultArray;
- return result;
-}
-
-
-function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
- var matchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) {
- regexp.lastIndex = 0;
- return subject;
- }
- var index = matchInfo[CAPTURE0];
- var result = %_SubString(subject, 0, index);
- var endOfMatch = matchInfo[CAPTURE1];
- // Compute the parameter list consisting of the match, captures, index,
- // and subject for the replace function invocation.
- // The number of captures plus one for the match.
- var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
- var replacement;
- var receiver = %GetDefaultReceiver(replace);
- if (m == 1) {
- // No captures, only the match, which is always valid.
- var s = %_SubString(subject, index, endOfMatch);
- // Don't call directly to avoid exposing the built-in global object.
- replacement = %_CallFunction(receiver, s, index, subject, replace);
- } else {
- var parameters = new InternalArray(m + 2);
- for (var j = 0; j < m; j++) {
- parameters[j] = CaptureString(subject, matchInfo, j);
- }
- parameters[j] = index;
- parameters[j + 1] = subject;
-
- replacement = %Apply(replace, receiver, parameters, 0, j + 2);
- }
-
- result += replacement; // The add method converts to string if necessary.
- // Can't use matchInfo any more from here, since the function could
- // overwrite it.
- return result + %_SubString(subject, endOfMatch, subject.length);
-}
-
-
-// ECMA-262 section 15.5.4.12
-function StringSearch(re) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.search"]);
- }
- var regexp;
- if (IS_STRING(re)) {
- regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
- } else if (IS_REGEXP(re)) {
- regexp = re;
- } else {
- regexp = new $RegExp(re);
- }
- var match = DoRegExpExec(regexp, TO_STRING_INLINE(this), 0);
- if (match) {
- return match[CAPTURE0];
- }
- return -1;
-}
-
-
-// ECMA-262 section 15.5.4.13
-function StringSlice(start, end) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.slice"]);
- }
- var s = TO_STRING_INLINE(this);
- var s_len = s.length;
- var start_i = TO_INTEGER(start);
- var end_i = s_len;
- if (end !== void 0) {
- end_i = TO_INTEGER(end);
- }
-
- if (start_i < 0) {
- start_i += s_len;
- if (start_i < 0) {
- start_i = 0;
- }
- } else {
- if (start_i > s_len) {
- return '';
- }
- }
-
- if (end_i < 0) {
- end_i += s_len;
- if (end_i < 0) {
- return '';
- }
- } else {
- if (end_i > s_len) {
- end_i = s_len;
- }
- }
-
- if (end_i <= start_i) {
- return '';
- }
-
- return %_SubString(s, start_i, end_i);
-}
-
-
-// ECMA-262 section 15.5.4.14
-function StringSplit(separator, limit) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.split"]);
- }
- var subject = TO_STRING_INLINE(this);
- limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
-
- // ECMA-262 says that if separator is undefined, the result should
- // be an array of size 1 containing the entire string.
- if (IS_UNDEFINED(separator)) {
- return [subject];
- }
-
- var length = subject.length;
- if (!IS_REGEXP(separator)) {
- separator = TO_STRING_INLINE(separator);
-
- if (limit === 0) return [];
-
- var separator_length = separator.length;
-
- // If the separator string is empty then return the elements in the subject.
- if (separator_length === 0) return %StringToArray(subject, limit);
-
- var result = %StringSplit(subject, separator, limit);
-
- return result;
- }
-
- if (limit === 0) return [];
-
- // Separator is a regular expression.
- return StringSplitOnRegExp(subject, separator, limit, length);
-}
-
-
-function StringSplitOnRegExp(subject, separator, limit, length) {
- %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
-
- if (length === 0) {
- if (DoRegExpExec(separator, subject, 0, 0) != null) {
- return [];
- }
- return [subject];
- }
-
- var currentIndex = 0;
- var startIndex = 0;
- var startMatch = 0;
- var result = [];
-
- outer_loop:
- while (true) {
-
- if (startIndex === length) {
- result.push(%_SubString(subject, currentIndex, length));
- break;
- }
-
- var matchInfo = DoRegExpExec(separator, subject, startIndex);
- if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
- result.push(%_SubString(subject, currentIndex, length));
- break;
- }
- var endIndex = matchInfo[CAPTURE1];
-
- // We ignore a zero-length match at the currentIndex.
- if (startIndex === endIndex && endIndex === currentIndex) {
- startIndex++;
- continue;
- }
-
- result.push(%_SubString(subject, currentIndex, startMatch));
-
- if (result.length === limit) break;
-
- var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
- for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
- var start = matchInfo[i++];
- var end = matchInfo[i++];
- if (end != -1) {
- result.push(%_SubString(subject, start, end));
- } else {
- result.push(void 0);
- }
- if (result.length === limit) break outer_loop;
- }
-
- startIndex = currentIndex = endIndex;
- }
- return result;
-}
-
-
-// ECMA-262 section 15.5.4.15
-function StringSubstring(start, end) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.subString"]);
- }
- var s = TO_STRING_INLINE(this);
- var s_len = s.length;
-
- var start_i = TO_INTEGER(start);
- if (start_i < 0) {
- start_i = 0;
- } else if (start_i > s_len) {
- start_i = s_len;
- }
-
- var end_i = s_len;
- if (!IS_UNDEFINED(end)) {
- end_i = TO_INTEGER(end);
- if (end_i > s_len) {
- end_i = s_len;
- } else {
- if (end_i < 0) end_i = 0;
- if (start_i > end_i) {
- var tmp = end_i;
- end_i = start_i;
- start_i = tmp;
- }
- }
- }
-
- return %_SubString(s, start_i, end_i);
-}
-
-
-// This is not a part of ECMA-262.
-function StringSubstr(start, n) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.substr"]);
- }
- var s = TO_STRING_INLINE(this);
- var len;
-
- // Correct n: If not given, set to string length; if explicitly
- // set to undefined, zero, or negative, returns empty string.
- if (n === void 0) {
- len = s.length;
- } else {
- len = TO_INTEGER(n);
- if (len <= 0) return '';
- }
-
- // Correct start: If not given (or undefined), set to zero; otherwise
- // convert to integer and handle negative case.
- if (start === void 0) {
- start = 0;
- } else {
- start = TO_INTEGER(start);
- // If positive, and greater than or equal to the string length,
- // return empty string.
- if (start >= s.length) return '';
- // If negative and absolute value is larger than the string length,
- // use zero.
- if (start < 0) {
- start += s.length;
- if (start < 0) start = 0;
- }
- }
-
- var end = start + len;
- if (end > s.length) end = s.length;
-
- return %_SubString(s, start, end);
-}
-
-
-// ECMA-262, 15.5.4.16
-function StringToLowerCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toLowerCase"]);
- }
- return %StringToLowerCase(TO_STRING_INLINE(this));
-}
-
-
-// ECMA-262, 15.5.4.17
-function StringToLocaleLowerCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toLocaleLowerCase"]);
- }
- return %StringToLowerCase(TO_STRING_INLINE(this));
-}
-
-
-// ECMA-262, 15.5.4.18
-function StringToUpperCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toUpperCase"]);
- }
- return %StringToUpperCase(TO_STRING_INLINE(this));
-}
-
-
-// ECMA-262, 15.5.4.19
-function StringToLocaleUpperCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toLocaleUpperCase"]);
- }
- return %StringToUpperCase(TO_STRING_INLINE(this));
-}
-
-// ES5, 15.5.4.20
-function StringTrim() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.trim"]);
- }
- return %StringTrim(TO_STRING_INLINE(this), true, true);
-}
-
-function StringTrimLeft() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.trimLeft"]);
- }
- return %StringTrim(TO_STRING_INLINE(this), true, false);
-}
-
-function StringTrimRight() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.trimRight"]);
- }
- return %StringTrim(TO_STRING_INLINE(this), false, true);
-}
-
-
-// ECMA-262, section 15.5.3.2
-function StringFromCharCode(code) {
- var n = %_ArgumentsLength();
- if (n == 1) {
- if (!%_IsSmi(code)) code = ToNumber(code);
- return %_StringCharFromCode(code & 0xffff);
- }
-
- var one_byte = %NewString(n, NEW_ONE_BYTE_STRING);
- var i;
- for (i = 0; i < n; i++) {
- var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
- if (code < 0) code = code & 0xffff;
- if (code > 0xff) break;
- %_OneByteSeqStringSetChar(one_byte, i, code);
- }
- if (i == n) return one_byte;
- one_byte = %TruncateString(one_byte, i);
-
- var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
- for (var j = 0; i < n; i++, j++) {
- var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
- %_TwoByteSeqStringSetChar(two_byte, j, code);
- }
- return one_byte + two_byte;
-}
-
-
-// Helper function for very basic XSS protection.
-function HtmlEscape(str) {
- return TO_STRING_INLINE(str).replace(/</g, "&lt;")
- .replace(/>/g, "&gt;")
- .replace(/"/g, "&quot;")
- .replace(/'/g, "&#039;");
-}
-
-
-// Compatibility support for KJS.
-// Tested by mozilla/js/tests/js1_5/Regress/regress-276103.js.
-function StringLink(s) {
- return "<a href=\"" + HtmlEscape(s) + "\">" + this + "</a>";
-}
-
-
-function StringAnchor(name) {
- return "<a name=\"" + HtmlEscape(name) + "\">" + this + "</a>";
-}
-
-
-function StringFontcolor(color) {
- return "<font color=\"" + HtmlEscape(color) + "\">" + this + "</font>";
-}
-
-
-function StringFontsize(size) {
- return "<font size=\"" + HtmlEscape(size) + "\">" + this + "</font>";
-}
-
-
-function StringBig() {
- return "<big>" + this + "</big>";
-}
-
-
-function StringBlink() {
- return "<blink>" + this + "</blink>";
-}
-
-
-function StringBold() {
- return "<b>" + this + "</b>";
-}
-
-
-function StringFixed() {
- return "<tt>" + this + "</tt>";
-}
-
-
-function StringItalics() {
- return "<i>" + this + "</i>";
-}
-
-
-function StringSmall() {
- return "<small>" + this + "</small>";
-}
-
-
-function StringStrike() {
- return "<strike>" + this + "</strike>";
-}
-
-
-function StringSub() {
- return "<sub>" + this + "</sub>";
-}
-
-
-function StringSup() {
- return "<sup>" + this + "</sup>";
-}
-
-
-// ReplaceResultBuilder support.
-function ReplaceResultBuilder(str) {
- if (%_ArgumentsLength() > 1) {
- this.elements = %_Arguments(1);
- } else {
- this.elements = new InternalArray();
- }
- this.special_string = str;
-}
-
-SetUpLockedPrototype(ReplaceResultBuilder,
- $Array("elements", "special_string"), $Array(
- "add", function(str) {
- str = TO_STRING_INLINE(str);
- if (str.length > 0) this.elements.push(str);
- },
- "addSpecialSlice", function(start, end) {
- var len = end - start;
- if (start < 0 || len <= 0) return;
- if (start < 0x80000 && len < 0x800) {
- this.elements.push((start << 11) | len);
- } else {
- // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
- // so -len is a smi.
- var elements = this.elements;
- elements.push(-len);
- elements.push(start);
- }
- },
- "generate", function() {
- var elements = this.elements;
- return %StringBuilderConcat(elements, elements.length, this.special_string);
- }
-));
-
-
-// -------------------------------------------------------------------
-
-function SetUpString() {
- %CheckIsBootstrapping();
- // Set up the constructor property on the String prototype object.
- %SetProperty($String.prototype, "constructor", $String, DONT_ENUM);
-
-
- // Set up the non-enumerable functions on the String object.
- InstallFunctions($String, DONT_ENUM, $Array(
- "fromCharCode", StringFromCharCode
- ));
-
-
- // Set up the non-enumerable functions on the String prototype object.
- InstallFunctions($String.prototype, DONT_ENUM, $Array(
- "valueOf", StringValueOf,
- "toString", StringToString,
- "charAt", StringCharAt,
- "charCodeAt", StringCharCodeAt,
- "concat", StringConcat,
- "indexOf", StringIndexOf,
- "lastIndexOf", StringLastIndexOf,
- "localeCompare", StringLocaleCompare,
- "match", StringMatch,
- "replace", StringReplace,
- "search", StringSearch,
- "slice", StringSlice,
- "split", StringSplit,
- "substring", StringSubstring,
- "substr", StringSubstr,
- "toLowerCase", StringToLowerCase,
- "toLocaleLowerCase", StringToLocaleLowerCase,
- "toUpperCase", StringToUpperCase,
- "toLocaleUpperCase", StringToLocaleUpperCase,
- "trim", StringTrim,
- "trimLeft", StringTrimLeft,
- "trimRight", StringTrimRight,
- "link", StringLink,
- "anchor", StringAnchor,
- "fontcolor", StringFontcolor,
- "fontsize", StringFontsize,
- "big", StringBig,
- "blink", StringBlink,
- "bold", StringBold,
- "fixed", StringFixed,
- "italics", StringItalics,
- "small", StringSmall,
- "strike", StringStrike,
- "sub", StringSub,
- "sup", StringSup
- ));
-}
-
-SetUpString();
diff --git a/src/3rdparty/v8/src/strtod.cc b/src/3rdparty/v8/src/strtod.cc
deleted file mode 100644
index 0dc618a..0000000
--- a/src/3rdparty/v8/src/strtod.cc
+++ /dev/null
@@ -1,442 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include <math.h>
-
-#include "globals.h"
-#include "utils.h"
-#include "strtod.h"
-#include "bignum.h"
-#include "cached-powers.h"
-#include "double.h"
-
-namespace v8 {
-namespace internal {
-
-// 2^53 = 9007199254740992.
-// Any integer with at most 15 decimal digits will hence fit into a double
-// (which has a 53bit significand) without loss of precision.
-static const int kMaxExactDoubleIntegerDecimalDigits = 15;
-// 2^64 = 18446744073709551616 > 10^19
-static const int kMaxUint64DecimalDigits = 19;
-
-// Max double: 1.7976931348623157 x 10^308
-// Min non-zero double: 4.9406564584124654 x 10^-324
-// Any x >= 10^309 is interpreted as +infinity.
-// Any x <= 10^-324 is interpreted as 0.
-// Note that 2.5e-324 (despite being smaller than the min double) will be read
-// as non-zero (equal to the min non-zero double).
-static const int kMaxDecimalPower = 309;
-static const int kMinDecimalPower = -324;
-
-// 2^64 = 18446744073709551616
-static const uint64_t kMaxUint64 = V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF);
-
-
-static const double exact_powers_of_ten[] = {
- 1.0, // 10^0
- 10.0,
- 100.0,
- 1000.0,
- 10000.0,
- 100000.0,
- 1000000.0,
- 10000000.0,
- 100000000.0,
- 1000000000.0,
- 10000000000.0, // 10^10
- 100000000000.0,
- 1000000000000.0,
- 10000000000000.0,
- 100000000000000.0,
- 1000000000000000.0,
- 10000000000000000.0,
- 100000000000000000.0,
- 1000000000000000000.0,
- 10000000000000000000.0,
- 100000000000000000000.0, // 10^20
- 1000000000000000000000.0,
- // 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
- 10000000000000000000000.0
-};
-static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
-
-// Maximum number of significant digits in the decimal representation.
-// In fact the value is 772 (see conversions.cc), but to give us some margin
-// we round up to 780.
-static const int kMaxSignificantDecimalDigits = 780;
-
-static Vector<const char> TrimLeadingZeros(Vector<const char> buffer) {
- for (int i = 0; i < buffer.length(); i++) {
- if (buffer[i] != '0') {
- return buffer.SubVector(i, buffer.length());
- }
- }
- return Vector<const char>(buffer.start(), 0);
-}
-
-
-static Vector<const char> TrimTrailingZeros(Vector<const char> buffer) {
- for (int i = buffer.length() - 1; i >= 0; --i) {
- if (buffer[i] != '0') {
- return buffer.SubVector(0, i + 1);
- }
- }
- return Vector<const char>(buffer.start(), 0);
-}
-
-
-static void TrimToMaxSignificantDigits(Vector<const char> buffer,
- int exponent,
- char* significant_buffer,
- int* significant_exponent) {
- for (int i = 0; i < kMaxSignificantDecimalDigits - 1; ++i) {
- significant_buffer[i] = buffer[i];
- }
- // The input buffer has been trimmed. Therefore the last digit must be
- // different from '0'.
- ASSERT(buffer[buffer.length() - 1] != '0');
- // Set the last digit to be non-zero. This is sufficient to guarantee
- // correct rounding.
- significant_buffer[kMaxSignificantDecimalDigits - 1] = '1';
- *significant_exponent =
- exponent + (buffer.length() - kMaxSignificantDecimalDigits);
-}
-
-// Reads digits from the buffer and converts them to a uint64.
-// Reads in as many digits as fit into a uint64.
-// When the string starts with "1844674407370955161" no further digit is read.
-// Since 2^64 = 18446744073709551616 it would still be possible read another
-// digit if it was less or equal than 6, but this would complicate the code.
-static uint64_t ReadUint64(Vector<const char> buffer,
- int* number_of_read_digits) {
- uint64_t result = 0;
- int i = 0;
- while (i < buffer.length() && result <= (kMaxUint64 / 10 - 1)) {
- int digit = buffer[i++] - '0';
- ASSERT(0 <= digit && digit <= 9);
- result = 10 * result + digit;
- }
- *number_of_read_digits = i;
- return result;
-}
-
-
-// Reads a DiyFp from the buffer.
-// The returned DiyFp is not necessarily normalized.
-// If remaining_decimals is zero then the returned DiyFp is accurate.
-// Otherwise it has been rounded and has error of at most 1/2 ulp.
-static void ReadDiyFp(Vector<const char> buffer,
- DiyFp* result,
- int* remaining_decimals) {
- int read_digits;
- uint64_t significand = ReadUint64(buffer, &read_digits);
- if (buffer.length() == read_digits) {
- *result = DiyFp(significand, 0);
- *remaining_decimals = 0;
- } else {
- // Round the significand.
- if (buffer[read_digits] >= '5') {
- significand++;
- }
- // Compute the binary exponent.
- int exponent = 0;
- *result = DiyFp(significand, exponent);
- *remaining_decimals = buffer.length() - read_digits;
- }
-}
-
-
-static bool DoubleStrtod(Vector<const char> trimmed,
- int exponent,
- double* result) {
-#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) \
- && !defined(_MSC_VER)
- // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
- // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
- // result is not accurate.
- // We know that Windows32 with MSVC, unlike with MinGW32, uses 64 bits and is
- // therefore accurate.
- // Note that the ARM and MIPS simulators are compiled for 32bits. They
- // therefore exhibit the same problem.
- return false;
-#endif
- if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
- int read_digits;
- // The trimmed input fits into a double.
- // If the 10^exponent (resp. 10^-exponent) fits into a double too then we
- // can compute the result-double simply by multiplying (resp. dividing) the
- // two numbers.
- // This is possible because IEEE guarantees that floating-point operations
- // return the best possible approximation.
- if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
- // 10^-exponent fits into a double.
- *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
- ASSERT(read_digits == trimmed.length());
- *result /= exact_powers_of_ten[-exponent];
- return true;
- }
- if (0 <= exponent && exponent < kExactPowersOfTenSize) {
- // 10^exponent fits into a double.
- *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
- ASSERT(read_digits == trimmed.length());
- *result *= exact_powers_of_ten[exponent];
- return true;
- }
- int remaining_digits =
- kMaxExactDoubleIntegerDecimalDigits - trimmed.length();
- if ((0 <= exponent) &&
- (exponent - remaining_digits < kExactPowersOfTenSize)) {
- // The trimmed string was short and we can multiply it with
- // 10^remaining_digits. As a result the remaining exponent now fits
- // into a double too.
- *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
- ASSERT(read_digits == trimmed.length());
- *result *= exact_powers_of_ten[remaining_digits];
- *result *= exact_powers_of_ten[exponent - remaining_digits];
- return true;
- }
- }
- return false;
-}
-
-
-// Returns 10^exponent as an exact DiyFp.
-// The given exponent must be in the range [1; kDecimalExponentDistance[.
-static DiyFp AdjustmentPowerOfTen(int exponent) {
- ASSERT(0 < exponent);
- ASSERT(exponent < PowersOfTenCache::kDecimalExponentDistance);
- // Simply hardcode the remaining powers for the given decimal exponent
- // distance.
- ASSERT(PowersOfTenCache::kDecimalExponentDistance == 8);
- switch (exponent) {
- case 1: return DiyFp(V8_2PART_UINT64_C(0xa0000000, 00000000), -60);
- case 2: return DiyFp(V8_2PART_UINT64_C(0xc8000000, 00000000), -57);
- case 3: return DiyFp(V8_2PART_UINT64_C(0xfa000000, 00000000), -54);
- case 4: return DiyFp(V8_2PART_UINT64_C(0x9c400000, 00000000), -50);
- case 5: return DiyFp(V8_2PART_UINT64_C(0xc3500000, 00000000), -47);
- case 6: return DiyFp(V8_2PART_UINT64_C(0xf4240000, 00000000), -44);
- case 7: return DiyFp(V8_2PART_UINT64_C(0x98968000, 00000000), -40);
- default:
- UNREACHABLE();
- return DiyFp(0, 0);
- }
-}
-
-
-// If the function returns true then the result is the correct double.
-// Otherwise it is either the correct double or the double that is just below
-// the correct double.
-static bool DiyFpStrtod(Vector<const char> buffer,
- int exponent,
- double* result) {
- DiyFp input;
- int remaining_decimals;
- ReadDiyFp(buffer, &input, &remaining_decimals);
- // Since we may have dropped some digits the input is not accurate.
- // If remaining_decimals is different than 0 than the error is at most
- // .5 ulp (unit in the last place).
- // We don't want to deal with fractions and therefore keep a common
- // denominator.
- const int kDenominatorLog = 3;
- const int kDenominator = 1 << kDenominatorLog;
- // Move the remaining decimals into the exponent.
- exponent += remaining_decimals;
- int error = (remaining_decimals == 0 ? 0 : kDenominator / 2);
-
- int old_e = input.e();
- input.Normalize();
- error <<= old_e - input.e();
-
- ASSERT(exponent <= PowersOfTenCache::kMaxDecimalExponent);
- if (exponent < PowersOfTenCache::kMinDecimalExponent) {
- *result = 0.0;
- return true;
- }
- DiyFp cached_power;
- int cached_decimal_exponent;
- PowersOfTenCache::GetCachedPowerForDecimalExponent(exponent,
- &cached_power,
- &cached_decimal_exponent);
-
- if (cached_decimal_exponent != exponent) {
- int adjustment_exponent = exponent - cached_decimal_exponent;
- DiyFp adjustment_power = AdjustmentPowerOfTen(adjustment_exponent);
- input.Multiply(adjustment_power);
- if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) {
- // The product of input with the adjustment power fits into a 64 bit
- // integer.
- ASSERT(DiyFp::kSignificandSize == 64);
- } else {
- // The adjustment power is exact. There is hence only an error of 0.5.
- error += kDenominator / 2;
- }
- }
-
- input.Multiply(cached_power);
- // The error introduced by a multiplication of a*b equals
- // error_a + error_b + error_a*error_b/2^64 + 0.5
- // Substituting a with 'input' and b with 'cached_power' we have
- // error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
- // error_ab = 0 or 1 / kDenominator > error_a*error_b/ 2^64
- int error_b = kDenominator / 2;
- int error_ab = (error == 0 ? 0 : 1); // We round up to 1.
- int fixed_error = kDenominator / 2;
- error += error_b + error_ab + fixed_error;
-
- old_e = input.e();
- input.Normalize();
- error <<= old_e - input.e();
-
- // See if the double's significand changes if we add/subtract the error.
- int order_of_magnitude = DiyFp::kSignificandSize + input.e();
- int effective_significand_size =
- Double::SignificandSizeForOrderOfMagnitude(order_of_magnitude);
- int precision_digits_count =
- DiyFp::kSignificandSize - effective_significand_size;
- if (precision_digits_count + kDenominatorLog >= DiyFp::kSignificandSize) {
- // This can only happen for very small denormals. In this case the
- // half-way multiplied by the denominator exceeds the range of an uint64.
- // Simply shift everything to the right.
- int shift_amount = (precision_digits_count + kDenominatorLog) -
- DiyFp::kSignificandSize + 1;
- input.set_f(input.f() >> shift_amount);
- input.set_e(input.e() + shift_amount);
- // We add 1 for the lost precision of error, and kDenominator for
- // the lost precision of input.f().
- error = (error >> shift_amount) + 1 + kDenominator;
- precision_digits_count -= shift_amount;
- }
- // We use uint64_ts now. This only works if the DiyFp uses uint64_ts too.
- ASSERT(DiyFp::kSignificandSize == 64);
- ASSERT(precision_digits_count < 64);
- uint64_t one64 = 1;
- uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1;
- uint64_t precision_bits = input.f() & precision_bits_mask;
- uint64_t half_way = one64 << (precision_digits_count - 1);
- precision_bits *= kDenominator;
- half_way *= kDenominator;
- DiyFp rounded_input(input.f() >> precision_digits_count,
- input.e() + precision_digits_count);
- if (precision_bits >= half_way + error) {
- rounded_input.set_f(rounded_input.f() + 1);
- }
- // If the last_bits are too close to the half-way case than we are too
- // inaccurate and round down. In this case we return false so that we can
- // fall back to a more precise algorithm.
-
- *result = Double(rounded_input).value();
- if (half_way - error < precision_bits && precision_bits < half_way + error) {
- // Too imprecise. The caller will have to fall back to a slower version.
- // However the returned number is guaranteed to be either the correct
- // double, or the next-lower double.
- return false;
- } else {
- return true;
- }
-}
-
-
-// Returns the correct double for the buffer*10^exponent.
-// The variable guess should be a close guess that is either the correct double
-// or its lower neighbor (the nearest double less than the correct one).
-// Preconditions:
-// buffer.length() + exponent <= kMaxDecimalPower + 1
-// buffer.length() + exponent > kMinDecimalPower
-// buffer.length() <= kMaxDecimalSignificantDigits
-static double BignumStrtod(Vector<const char> buffer,
- int exponent,
- double guess) {
- if (guess == V8_INFINITY) {
- return guess;
- }
-
- DiyFp upper_boundary = Double(guess).UpperBoundary();
-
- ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1);
- ASSERT(buffer.length() + exponent > kMinDecimalPower);
- ASSERT(buffer.length() <= kMaxSignificantDecimalDigits);
- // Make sure that the Bignum will be able to hold all our numbers.
- // Our Bignum implementation has a separate field for exponents. Shifts will
- // consume at most one bigit (< 64 bits).
- // ln(10) == 3.3219...
- ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
- Bignum input;
- Bignum boundary;
- input.AssignDecimalString(buffer);
- boundary.AssignUInt64(upper_boundary.f());
- if (exponent >= 0) {
- input.MultiplyByPowerOfTen(exponent);
- } else {
- boundary.MultiplyByPowerOfTen(-exponent);
- }
- if (upper_boundary.e() > 0) {
- boundary.ShiftLeft(upper_boundary.e());
- } else {
- input.ShiftLeft(-upper_boundary.e());
- }
- int comparison = Bignum::Compare(input, boundary);
- if (comparison < 0) {
- return guess;
- } else if (comparison > 0) {
- return Double(guess).NextDouble();
- } else if ((Double(guess).Significand() & 1) == 0) {
- // Round towards even.
- return guess;
- } else {
- return Double(guess).NextDouble();
- }
-}
-
-
-double Strtod(Vector<const char> buffer, int exponent) {
- Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
- Vector<const char> trimmed = TrimTrailingZeros(left_trimmed);
- exponent += left_trimmed.length() - trimmed.length();
- if (trimmed.length() == 0) return 0.0;
- if (trimmed.length() > kMaxSignificantDecimalDigits) {
- char significant_buffer[kMaxSignificantDecimalDigits];
- int significant_exponent;
- TrimToMaxSignificantDigits(trimmed, exponent,
- significant_buffer, &significant_exponent);
- return Strtod(Vector<const char>(significant_buffer,
- kMaxSignificantDecimalDigits),
- significant_exponent);
- }
- if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY;
- if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0;
-
- double guess;
- if (DoubleStrtod(trimmed, exponent, &guess) ||
- DiyFpStrtod(trimmed, exponent, &guess)) {
- return guess;
- }
- return BignumStrtod(trimmed, exponent, guess);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/strtod.h b/src/3rdparty/v8/src/strtod.h
deleted file mode 100644
index 1a5a96c..0000000
--- a/src/3rdparty/v8/src/strtod.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STRTOD_H_
-#define V8_STRTOD_H_
-
-namespace v8 {
-namespace internal {
-
-// The buffer must only contain digits in the range [0-9]. It must not
-// contain a dot or a sign. It must not start with '0', and must not be empty.
-double Strtod(Vector<const char> buffer, int exponent);
-
-} } // namespace v8::internal
-
-#endif // V8_STRTOD_H_
diff --git a/src/3rdparty/v8/src/stub-cache.cc b/src/3rdparty/v8/src/stub-cache.cc
deleted file mode 100644
index 2711bbf..0000000
--- a/src/3rdparty/v8/src/stub-cache.cc
+++ /dev/null
@@ -1,1856 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "ast.h"
-#include "code-stubs.h"
-#include "gdb-jit.h"
-#include "ic-inl.h"
-#include "stub-cache.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------
-// StubCache implementation.
-
-
-StubCache::StubCache(Isolate* isolate, Zone* zone)
- : isolate_(isolate) {
- ASSERT(isolate == Isolate::Current());
-}
-
-
-void StubCache::Initialize() {
- ASSERT(IsPowerOf2(kPrimaryTableSize));
- ASSERT(IsPowerOf2(kSecondaryTableSize));
- Clear();
-}
-
-
-Code* StubCache::Set(String* name, Map* map, Code* code) {
- // Get the flags from the code.
- Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
-
- // Validate that the name does not move on scavenge, and that we
- // can use identity checks instead of string equality checks.
- ASSERT(!heap()->InNewSpace(name));
- ASSERT(name->IsInternalizedString());
-
- // The state bits are not important to the hash function because
- // the stub cache only contains monomorphic stubs. Make sure that
- // the bits are the least significant so they will be the ones
- // masked out.
- ASSERT(Code::ExtractICStateFromFlags(flags) == MONOMORPHIC);
- STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
-
- // Make sure that the code type is not included in the hash.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Compute the primary entry.
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary = entry(primary_, primary_offset);
- Code* old_code = primary->value;
-
- // If the primary entry has useful data in it, we retire it to the
- // secondary cache before overwriting it.
- if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
- Map* old_map = primary->map;
- Code::Flags old_flags = Code::RemoveTypeFromFlags(old_code->flags());
- int seed = PrimaryOffset(primary->key, old_flags, old_map);
- int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
- Entry* secondary = entry(secondary_, secondary_offset);
- *secondary = *primary;
- }
-
- // Update primary cache.
- primary->key = name;
- primary->value = code;
- primary->map = map;
- isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
- return code;
-}
-
-
-Handle<JSObject> StubCache::StubHolder(Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*receiver, *holder);
- return Handle<JSObject>(IC::GetCodeCacheHolder(
- isolate_, *receiver, cache_holder));
-}
-
-
-Handle<Code> StubCache::FindStub(Handle<String> name,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type,
- Code::IcFragment fragment) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind, fragment, type);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
- return Handle<Code>::null();
-}
-
-
-Handle<Code> StubCache::FindHandler(Handle<String> name,
- Handle<JSObject> handler_holder,
- Code::Kind kind,
- Code::StubType type) {
- return FindStub(name, handler_holder, kind, type, Code::HANDLER_FRAGMENT);
-}
-
-
-Handle<Code> StubCache::ComputeMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<String> name) {
- Handle<Code> ic = FindStub(name, receiver, Code::LOAD_IC,
- handler->type(), Code::IC_FRAGMENT);
- if (!ic.is_null()) return ic;
-
- LoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
-
- JSObject::UpdateMapCodeCache(receiver, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<String> name) {
- Handle<Code> ic = FindStub(name, receiver, Code::KEYED_LOAD_IC,
- handler->type(), Code::IC_FRAGMENT);
- if (!ic.is_null()) return ic;
-
- KeyedLoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
-
- JSObject::UpdateMapCodeCache(receiver, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
- Handle<JSObject> receiver) {
- // If no global objects are present in the prototype chain, the load
- // nonexistent IC stub can be shared for all names for a given map
- // and we use the empty string for the map cache in that case. If
- // there are global objects involved, we need to check global
- // property cells in the stub and therefore the stub will be
- // specific to the name.
- Handle<String> cache_name = factory()->empty_string();
- Handle<JSObject> current;
- Handle<Object> next = receiver;
- Handle<GlobalObject> global;
- do {
- current = Handle<JSObject>::cast(next);
- next = Handle<Object>(current->GetPrototype(), isolate_);
- if (current->IsGlobalObject()) {
- global = Handle<GlobalObject>::cast(current);
- cache_name = name;
- } else if (!current->HasFastProperties()) {
- cache_name = name;
- }
- } while (!next->IsNull());
-
- // Compile the stub that is either shared for all names or
- // name specific if there are global objects involved.
- Handle<Code> handler = FindHandler(
- cache_name, receiver, Code::LOAD_IC, Code::NONEXISTENT);
- if (!handler.is_null()) return handler;
-
- LoadStubCompiler compiler(isolate_);
- handler =
- compiler.CompileLoadNonexistent(receiver, current, cache_name, global);
- JSObject::UpdateMapCodeCache(receiver, cache_name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- PropertyIndex field) {
- if (receiver.is_identical_to(holder)) {
- LoadFieldStub stub(LoadStubCompiler::receiver(),
- field.is_inobject(holder),
- field.translate(holder));
- return stub.GetCode(isolate());
- }
-
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
- name, stub_holder, Code::LOAD_IC, Code::FIELD);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
- name, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, callback);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadViaGetter(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
- name, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadViaGetter(receiver, holder, name, getter);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> handler = FindHandler(
- name, stub_holder, Code::LOAD_IC, Code::CONSTANT_FUNCTION);
- if (!handler.is_null()) return handler;
-
- LoadStubCompiler compiler(isolate_);
- handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
-
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
- name, stub_holder, Code::LOAD_IC, Code::INTERCEPTOR);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadNormal(Handle<String> name,
- Handle<JSObject> receiver) {
- return isolate_->builtins()->LoadIC_Normal();
-}
-
-
-Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- bool is_dont_delete) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindStub(
- name, stub_holder, Code::LOAD_IC, Code::NORMAL, Code::IC_FRAGMENT);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> ic =
- compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
- JSObject::UpdateMapCodeCache(stub_holder, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- PropertyIndex field) {
- if (receiver.is_identical_to(holder)) {
- LoadFieldStub stub(KeyedLoadStubCompiler::receiver(),
- field.is_inobject(holder),
- field.translate(holder));
- return stub.GetCode(isolate());
- }
-
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
- name, stub_holder, Code::KEYED_LOAD_IC, Code::FIELD);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> handler = FindHandler(
- name, stub_holder, Code::KEYED_LOAD_IC, Code::CONSTANT_FUNCTION);
- if (!handler.is_null()) return handler;
-
- KeyedLoadStubCompiler compiler(isolate_);
- handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
- name, stub_holder, Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
- name, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, callback);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
- Handle<JSObject> receiver,
- int field_index,
- Handle<Map> transition,
- StrictModeFlag strict_mode) {
- Code::StubType type =
- (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, strict_mode, type);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code =
- compiler.CompileStoreField(receiver, field_index, transition, name);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
- Handle<String> name =
- isolate()->factory()->KeyedLoadElementMonomorphic_string();
-
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate());
- Handle<Code> code = compiler.CompileLoadElement(receiver_map);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedStoreElement(
- Handle<Map> receiver_map,
- KeyedStoreIC::StubKind stub_kind,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode) {
- Code::ExtraICState extra_state =
- Code::ComputeExtraICState(grow_mode, strict_mode);
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::KEYED_STORE_IC, extra_state);
-
- ASSERT(stub_kind == KeyedStoreIC::STORE_NO_TRANSITION ||
- stub_kind == KeyedStoreIC::STORE_AND_GROW_NO_TRANSITION);
-
- Handle<String> name = stub_kind == KeyedStoreIC::STORE_NO_TRANSITION
- ? isolate()->factory()->KeyedStoreElementMonomorphic_string()
- : isolate()->factory()->KeyedStoreAndGrowElementMonomorphic_string();
-
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode);
- Handle<Code> code = compiler.CompileStoreElement(receiver_map);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
- return (strict_mode == kStrictMode)
- ? isolate_->builtins()->Builtins::StoreIC_Normal_Strict()
- : isolate_->builtins()->Builtins::StoreIC_Normal();
-}
-
-
-Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
- Handle<GlobalObject> receiver,
- Handle<JSGlobalPropertyCell> cell,
- StrictModeFlag strict_mode) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreGlobal(receiver, cell, name);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback,
- StrictModeFlag strict_mode) {
- ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, strict_mode, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code =
- compiler.CompileStoreCallback(name, receiver, holder, callback);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeStoreViaSetter(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter,
- StrictModeFlag strict_mode) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, strict_mode, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code =
- compiler.CompileStoreViaSetter(name, receiver, holder, setter);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
- StrictModeFlag strict_mode) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, strict_mode, Code::INTERCEPTOR);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreInterceptor(receiver, name);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
- Handle<JSObject> receiver,
- int field_index,
- Handle<Map> transition,
- StrictModeFlag strict_mode) {
- Code::StubType type =
- (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::KEYED_STORE_IC, strict_mode, type);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedStoreStubCompiler compiler(isolate(), strict_mode,
- DO_NOT_ALLOW_JSARRAY_GROWTH);
- Handle<Code> code =
- compiler.CompileStoreField(receiver, field_index, transition, name);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-#define CALL_LOGGER_TAG(kind, type) \
- (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
-
-Handle<Code> StubCache::ComputeCallConstant(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- Handle<String> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *object, cache_holder));
-
- // Compute check type based on receiver/holder.
- CheckType check = RECEIVER_MAP_CHECK;
- if (object->IsString()) {
- check = STRING_CHECK;
- } else if (object->IsSymbol()) {
- check = SYMBOL_CHECK;
- } else if (object->IsNumber()) {
- check = NUMBER_CHECK;
- } else if (object->IsBoolean()) {
- check = BOOLEAN_CHECK;
- }
-
- if (check != RECEIVER_MAP_CHECK &&
- !function->IsBuiltin() &&
- function->shared()->is_classic_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- return Handle<Code>::null();
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::CONSTANT_FUNCTION, argc, cache_holder);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallConstant(object, holder, name, check, function);
- code->set_check_type(check);
- ASSERT_EQ(flags, code->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallField(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- Handle<String> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- PropertyIndex index) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *object, cache_holder));
-
- // TODO(1233596): We cannot do receiver map check for non-JS objects
- // because they may be represented as immediates without a
- // map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsSymbol() ||
- object->IsBoolean() || object->IsString()) {
- object = holder;
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::FIELD, argc, cache_holder);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallField(Handle<JSObject>::cast(object),
- holder, index, name);
- ASSERT_EQ(flags, code->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallInterceptor(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- Handle<String> name,
- Handle<Object> object,
- Handle<JSObject> holder) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *object, cache_holder));
-
- // TODO(1233596): We cannot do receiver map check for non-JS objects
- // because they may be represented as immediates without a
- // map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsSymbol() ||
- object->IsBoolean() || object->IsString()) {
- object = holder;
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::INTERCEPTOR, argc, cache_holder);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallInterceptor(Handle<JSObject>::cast(object),
- holder, name);
- ASSERT_EQ(flags, code->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallGlobal(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function) {
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*receiver, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *receiver, cache_holder));
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::NORMAL, argc, cache_holder);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallGlobal(receiver, holder, cell, function, name);
- ASSERT_EQ(flags, code->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
- return code;
-}
-
-
-static void FillCache(Isolate* isolate, Handle<Code> code) {
- Handle<UnseededNumberDictionary> dictionary =
- UnseededNumberDictionary::Set(isolate->factory()->non_monomorphic_cache(),
- code->flags(),
- code);
- isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
-}
-
-
-Code* StubCache::FindCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind) {
- Code::ExtraICState extra_state =
- CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
- CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
- Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
- UnseededNumberDictionary* dictionary =
- isolate()->heap()->non_monomorphic_cache();
- int entry = dictionary->FindEntry(isolate(), flags);
- ASSERT(entry != -1);
- Object* code = dictionary->ValueAt(entry);
- // This might be called during the marking phase of the collector
- // hence the unchecked cast.
- return reinterpret_cast<Code*>(code);
-}
-
-
-Handle<Code> StubCache::ComputeCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind) {
- Code::ExtraICState extra_state =
- CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
- CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
- Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallInitialize(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallInitialize(int argc, RelocInfo::Mode mode) {
- return ComputeCallInitialize(argc, mode, Code::CALL_IC);
-}
-
-
-Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc) {
- return ComputeCallInitialize(argc, RelocInfo::CODE_TARGET,
- Code::KEYED_CALL_IC);
-}
-
-
-Handle<Code> StubCache::ComputeCallPreMonomorphic(
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallPreMonomorphic(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallNormal(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- bool has_qml_global_receiver) {
- Code::Flags flags =
- Code::ComputeFlags(kind, MONOMORPHIC, extra_state, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallNormal(flags,
- has_qml_global_receiver);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallArguments(int argc) {
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_CALL_IC, MEGAMORPHIC,
- Code::kNoExtraICState, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallArguments(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallMegamorphic(
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
- Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallMegamorphic(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallMiss(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
- // and monomorphic stubs are not mixed up together in the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
- Code::NORMAL, argc, OWN_MAP);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallMiss(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeLoadElementPolymorphic(
- MapHandleList* receiver_maps) {
- Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
- Handle<PolymorphicCodeCache> cache =
- isolate_->factory()->polymorphic_code_cache();
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CodeHandleList handlers(receiver_maps->length());
- KeyedLoadStubCompiler compiler(isolate_);
- compiler.CompileElementHandlers(receiver_maps, &handlers);
- Handle<Code> code = compiler.CompilePolymorphicIC(
- receiver_maps, &handlers, factory()->empty_string(),
- Code::NORMAL, ELEMENT);
-
- isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
-
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name) {
- LoadStubCompiler ic_compiler(isolate_);
- Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
- receiver_maps, handlers, name, Code::NORMAL, PROPERTY);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeStoreElementPolymorphic(
- MapHandleList* receiver_maps,
- KeyedAccessGrowMode grow_mode,
- StrictModeFlag strict_mode) {
- Handle<PolymorphicCodeCache> cache =
- isolate_->factory()->polymorphic_code_cache();
- Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode,
- strict_mode);
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedStoreStubCompiler compiler(isolate_, strict_mode, grow_mode);
- Handle<Code> code = compiler.CompileStoreElementPolymorphic(receiver_maps);
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
- Code::Kind kind) {
- // Extra IC state is irrelevant for debug break ICs. They jump to
- // the actual call ic to carry out the work.
- Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_BREAK,
- Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallDebugBreak(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
- Code::Kind kind) {
- // Extra IC state is irrelevant for debug break ICs. They jump to
- // the actual call ic to carry out the work.
- Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_PREPARE_STEP_IN,
- Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallDebugPrepareStepIn(flags);
- FillCache(isolate_, code);
- return code;
-}
-#endif
-
-
-void StubCache::Clear() {
- Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
- for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = heap()->empty_string();
- primary_[i].value = empty;
- }
- for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = heap()->empty_string();
- secondary_[j].value = empty;
- }
-}
-
-
-void StubCache::CollectMatchingMaps(SmallMapList* types,
- String* name,
- Code::Flags flags,
- Handle<Context> native_context,
- Zone* zone) {
- for (int i = 0; i < kPrimaryTableSize; i++) {
- if (primary_[i].key == name) {
- Map* map = primary_[i].value->FindFirstMap();
- // Map can be NULL, if the stub is constant function call
- // with a primitive receiver.
- if (map == NULL) continue;
-
- int offset = PrimaryOffset(name, flags, map);
- if (entry(primary_, offset) == &primary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->Add(Handle<Map>(map), zone);
- }
- }
- }
-
- for (int i = 0; i < kSecondaryTableSize; i++) {
- if (secondary_[i].key == name) {
- Map* map = secondary_[i].value->FindFirstMap();
- // Map can be NULL, if the stub is constant function call
- // with a primitive receiver.
- if (map == NULL) continue;
-
- // Lookup in primary table and skip duplicates.
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary_entry = entry(primary_, primary_offset);
- if (primary_entry->key == name) {
- Map* primary_map = primary_entry->value->FindFirstMap();
- if (map == primary_map) continue;
- }
-
- // Lookup in secondary table and add matches.
- int offset = SecondaryOffset(name, flags, primary_offset);
- if (entry(secondary_, offset) == &secondary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->Add(Handle<Map>(map), zone);
- }
- }
- }
-}
-
-
-// ------------------------------------------------------------------------
-// StubCompiler implementation.
-
-
-RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
- JSObject* recv = JSObject::cast(args[0]);
- ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[1]);
- Address setter_address = v8::ToCData<Address>(callback->setter());
- v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
- ASSERT(fun != NULL);
- ASSERT(callback->IsCompatibleReceiver(recv));
- Handle<String> name = args.at<String>(2);
- Handle<Object> value = args.at<Object>(3);
- HandleScope scope(isolate);
- LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
- CustomArguments custom_args(isolate, callback->data(), recv, recv);
- v8::AccessorInfo info(custom_args.end());
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate, setter_address);
- fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value;
-}
-
-
-static const int kAccessorInfoOffsetInInterceptorArgs = 2;
-
-
-/**
- * Attempts to load a property with an interceptor (which must be present),
- * but doesn't search the prototype chain.
- *
- * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
- * provide any value for the given name.
- */
-RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
- Handle<String> name_handle = args.at<String>(0);
- Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
- ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
- ASSERT(args[2]->IsJSObject()); // Receiver.
- ASSERT(args[3]->IsJSObject()); // Holder.
- ASSERT(args[5]->IsSmi()); // Isolate.
- ASSERT(args.length() == 6);
-
- Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
- ASSERT(getter != NULL);
-
- {
- // Use the interceptor getter.
- v8::AccessorInfo info(args.arguments() -
- kAccessorInfoOffsetInInterceptorArgs);
- HandleScope scope(isolate);
- v8::Handle<v8::Value> r;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- r = getter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!r.IsEmpty()) {
- Handle<Object> result = v8::Utils::OpenHandle(*r);
- result->VerifyApiCallResultType();
- return *v8::Utils::OpenHandle(*r);
- }
- }
-
- return isolate->heap()->no_interceptor_result_sentinel();
-}
-
-
-static MaybeObject* ThrowReferenceError(Isolate* isolate, String* name) {
- // If the load is non-contextual, just return the undefined result.
- // Note that both keyed and non-keyed loads may end up here, so we
- // can't use either LoadIC or KeyedLoadIC constructors.
- IC ic(IC::NO_EXTRA_FRAME, isolate);
- ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsUndeclaredGlobal()) return HEAP->undefined_value();
-
- // Throw a reference error.
- HandleScope scope(isolate);
- Handle<String> name_handle(name);
- Handle<Object> error =
- FACTORY->NewReferenceError("not_defined",
- HandleVector(&name_handle, 1));
- return isolate->Throw(*error);
-}
-
-
-static MaybeObject* LoadWithInterceptor(Arguments* args,
- PropertyAttributes* attrs) {
- Handle<String> name_handle = args->at<String>(0);
- Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
- ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
- Handle<JSObject> receiver_handle = args->at<JSObject>(2);
- Handle<JSObject> holder_handle = args->at<JSObject>(3);
- ASSERT(args->length() == 6);
-
- Isolate* isolate = receiver_handle->GetIsolate();
-
- Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
- ASSERT(getter != NULL);
-
- {
- // Use the interceptor getter.
- v8::AccessorInfo info(args->arguments() -
- kAccessorInfoOffsetInInterceptorArgs);
- HandleScope scope(isolate);
- v8::Handle<v8::Value> r;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- r = getter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!r.IsEmpty()) {
- *attrs = NONE;
- Handle<Object> result = v8::Utils::OpenHandle(*r);
- result->VerifyApiCallResultType();
- return *result;
- }
- }
-
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attrs);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
-}
-
-
-/**
- * Loads a property with an interceptor performing post interceptor
- * lookup if interceptor failed.
- */
-RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
- PropertyAttributes attr = NONE;
- Object* result;
- { MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // If the property is present, return it.
- if (attr != ABSENT) return result;
- return ThrowReferenceError(isolate, String::cast(args[0]));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
- PropertyAttributes attr;
- MaybeObject* result = LoadWithInterceptor(&args, &attr);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- // This is call IC. In this case, we simply return the undefined result which
- // will lead to an exception when trying to invoke the result as a
- // function.
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
- ASSERT(args.length() == 4);
- JSObject* recv = JSObject::cast(args[0]);
- String* name = String::cast(args[1]);
- Object* value = args[2];
- ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
- ASSERT(recv->HasNamedInterceptor());
- PropertyAttributes attr = NONE;
- MaybeObject* result = recv->SetPropertyWithInterceptor(
- name, value, attr, strict_mode);
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
- JSObject* receiver = JSObject::cast(args[0]);
- ASSERT(args.smi_at(1) >= 0);
- uint32_t index = args.smi_at(1);
- return receiver->GetElementWithInterceptor(receiver, index);
-}
-
-
-Handle<Code> StubCompiler::CompileCallInitialize(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateInitialize(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallInitialize");
- isolate()->counters()->call_initialize_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, *code));
- return code;
-}
-
-
-Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- // The code of the PreMonomorphic stub is the same as the code
- // of the Initialized stub. They just differ on the code object flags.
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateInitialize(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
- isolate()->counters()->call_premonomorphic_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, *code));
- return code;
-}
-
-
-Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags,
- bool has_qml_global_receiver) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- // Call normal is always with a explict receiver,
- // or with an implicit qml global receiver.
- ASSERT(!CallIC::Contextual::decode(
- Code::ExtractExtraICStateFromFlags(flags)) ||
- has_qml_global_receiver);
- CallIC::GenerateNormal(masm(), argc);
- } else {
- KeyedCallIC::GenerateNormal(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallNormal");
- isolate()->counters()->call_normal_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, *code));
- return code;
-}
-
-
-Handle<Code> StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMegamorphic(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateMegamorphic(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMegamorphic");
- isolate()->counters()->call_megamorphic_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
- return code;
-}
-
-
-Handle<Code> StubCompiler::CompileCallArguments(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallArguments");
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
- CALL_MEGAMORPHIC_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
- return code;
-}
-
-
-Handle<Code> StubCompiler::CompileCallMiss(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMiss(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateMiss(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMiss");
- isolate()->counters()->call_megamorphic_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MISS, *code));
- return code;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Code> StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
- Debug::GenerateCallICDebugBreak(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugBreak");
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
- CALL_DEBUG_BREAK_TAG),
- *code, code->arguments_count()));
- return code;
-}
-
-
-Handle<Code> StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
- // Use the same code for the the step in preparations as we do for the
- // miss case.
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- // For the debugger extra ic state is irrelevant.
- CallIC::GenerateMiss(masm(), argc, Code::kNoExtraICState);
- } else {
- KeyedCallIC::GenerateMiss(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
- PROFILE(isolate(),
- CodeCreateEvent(
- CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
- *code,
- code->arguments_count()));
- return code;
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-#undef CALL_LOGGER_TAG
-
-
-Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
- const char* name) {
- // Create code object in the heap.
- CodeDesc desc;
- masm_.GetCode(&desc);
- Handle<Code> code = factory()->NewCode(desc, flags, masm_.CodeObject());
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs) code->Disassemble(name);
-#endif
- return code;
-}
-
-
-Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
- Handle<String> name) {
- return (FLAG_print_code_stubs && !name.is_null())
- ? GetCodeWithFlags(flags, *name->ToCString())
- : GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
-}
-
-
-void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup) {
- holder->LocalLookupRealNamedProperty(*name, lookup);
- if (lookup->IsFound()) return;
- if (holder->GetPrototype()->IsNull()) return;
- holder->GetPrototype()->Lookup(*name, lookup);
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-
-Register BaseLoadStubCompiler::HandlerFrontendHeader(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss) {
- // Check the prototype chain.
- return CheckPrototypes(object, object_reg, holder,
- scratch1(), scratch2(), scratch3(),
- name, miss, SKIP_RECEIVER);
-}
-
-
-Register BaseLoadStubCompiler::HandlerFrontend(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-
- HandlerFrontendFooter(success, &miss);
- return reg;
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- PropertyIndex field) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, receiver(), holder, name, &miss);
-
- LoadFieldStub stub(reg, field.is_inobject(holder), field.translate(holder));
- GenerateTailCall(stub.GetCode(isolate()));
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), kind());
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::FIELD, name);
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompileLoadConstant(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Handle<JSFunction> value) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
- GenerateLoadConstant(value);
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Handle<ExecutableAccessorInfo> callback) {
- Label success;
-
- Register reg = CallbackHandlerFrontend(
- object, receiver(), holder, name, &success, callback);
- __ bind(&success);
- GenerateLoadCallback(reg, callback);
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::CALLBACKS, name);
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- Label success;
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- Register reg = HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
- // TODO(368): Compile in the whole chain: all the interceptors in
- // prototypes and ultimate answer.
- GenerateLoadInterceptor(reg, object, holder, &lookup, name);
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::INTERCEPTOR, name);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadPostInterceptor(
- Register interceptor_reg,
- Handle<JSObject> interceptor_holder,
- Handle<String> name,
- LookupResult* lookup) {
- Label success;
- Handle<JSObject> holder(lookup->holder());
- if (lookup->IsField()) {
- PropertyIndex field = lookup->GetFieldIndex();
- if (interceptor_holder.is_identical_to(holder)) {
- LoadFieldStub stub(interceptor_reg,
- field.is_inobject(holder),
- field.translate(holder));
- GenerateTailCall(stub.GetCode(isolate()));
- } else {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- Register reg = HandlerFrontend(
- interceptor_holder, interceptor_reg, holder, name, &success);
- __ bind(&success);
- GenerateLoadField(reg, holder, field);
- }
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- Register reg = CallbackHandlerFrontend(
- interceptor_holder, interceptor_reg, holder, name, &success, callback);
- __ bind(&success);
- GenerateLoadCallback(reg, callback);
- }
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompileMonomorphicIC(
- Handle<Map> receiver_map,
- Handle<Code> handler,
- Handle<String> name) {
- MapHandleList receiver_maps(1);
- receiver_maps.Add(receiver_map);
- CodeHandleList handlers(1);
- handlers.Add(handler);
- Code::StubType type = handler->type();
- return CompilePolymorphicIC(&receiver_maps, &handlers, name, type, PROPERTY);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Handle<JSFunction> getter) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
-
- __ bind(&success);
- GenerateLoadViaGetter(masm(), getter);
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::CALLBACKS, name);
-}
-
-
-#undef __
-
-
-void LoadStubCompiler::JitEvent(Handle<String> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-}
-
-
-void KeyedLoadStubCompiler::JitEvent(Handle<String> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-}
-
-
-Handle<Code> BaseLoadStubCompiler::GetCode(Code::IcFragment fragment,
- Code::StubType type,
- Handle<String> name,
- InlineCacheState state) {
- Code::Flags flags = Code::ComputeFlags(kind(), state, fragment, type);
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
- JitEvent(name, code);
- return code;
-}
-
-
-void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps,
- CodeHandleList* handlers) {
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map = receiver_maps->at(i);
- Handle<Code> cached_stub;
-
- if ((receiver_map->instance_type() & kNotStringTag) == 0) {
- cached_stub = isolate()->builtins()->KeyedLoadIC_String();
- } else {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
-
- if (IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind)) {
- cached_stub =
- KeyedLoadFastElementStub(is_js_array,
- elements_kind).GetCode(isolate());
- } else {
- ASSERT(elements_kind == DICTIONARY_ELEMENTS);
- cached_stub = KeyedLoadDictionaryElementStub().GetCode(isolate());
- }
- }
-
- handlers->Add(cached_stub);
- }
-}
-
-
-Handle<Code> StoreStubCompiler::GetCode(Code::StubType type,
- Handle<String> name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, strict_mode_, type);
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
- return code;
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::GetCode(Code::StubType type,
- Handle<String> name,
- InlineCacheState state) {
- Code::ExtraICState extra_state =
- Code::ComputeExtraICState(grow_mode_, strict_mode_);
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_STORE_IC, state, extra_state, type);
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
- return code;
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
- MapHandleList* receiver_maps) {
- // Collect MONOMORPHIC stubs for all |receiver_maps|.
- CodeHandleList handlers(receiver_maps->length());
- MapHandleList transitioned_maps(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map(receiver_maps->at(i));
- Handle<Code> cached_stub;
- Handle<Map> transitioned_map =
- receiver_map->FindTransitionedMap(receiver_maps);
-
- // TODO(mvstanton): The code below is doing pessimistic elements
- // transitions. I would like to stop doing that and rely on Allocation Site
- // Tracking to do a better job of ensuring the data types are what they need
- // to be. Not all the elements are in place yet, pessimistic elements
- // transitions are still important for performance.
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (!transitioned_map.is_null()) {
- cached_stub = ElementsTransitionAndStoreStub(
- elements_kind,
- transitioned_map->elements_kind(),
- is_js_array,
- strict_mode_,
- grow_mode_).GetCode(isolate());
- } else {
- cached_stub = KeyedStoreElementStub(
- is_js_array,
- elements_kind,
- grow_mode_).GetCode(isolate());
- }
- ASSERT(!cached_stub.is_null());
- handlers.Add(cached_stub);
- transitioned_maps.Add(transitioned_map);
- }
- Handle<Code> code =
- CompileStorePolymorphic(receiver_maps, &handlers, &transitioned_maps);
- isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_STORE_POLYMORPHIC_IC_TAG, *code, 0));
- return code;
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
- MacroAssembler* masm) {
- KeyedStoreIC::GenerateSlow(masm);
-}
-
-
-CallStubCompiler::CallStubCompiler(Isolate* isolate,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- InlineCacheHolderFlag cache_holder)
- : StubCompiler(isolate),
- arguments_(argc),
- kind_(kind),
- extra_state_(extra_state),
- cache_holder_(cache_holder) {
-}
-
-
-bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
- if (function->shared()->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function->shared()->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
-
- CallOptimization optimization(function);
- return optimization.is_simple_api_call();
-}
-
-
-Handle<Code> CallStubCompiler::CompileCustomCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> fname) {
- ASSERT(HasCustomCallGenerator(function));
-
- if (function->shared()->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function->shared()->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) \
- if (id == k##name) { \
- return CallStubCompiler::Compile##name##Call(object, \
- holder, \
- cell, \
- function, \
- fname); \
- }
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
- CallOptimization optimization(function);
-#ifndef _WIN32_WCE
- ASSERT(optimization.is_simple_api_call());
- return CompileFastApiCall(optimization,
- object,
- holder,
- cell,
- function,
- fname);
-#else
- // Disable optimization for wince as the calling convention looks different.
- return Handle<Code>::null();
-#endif // _WIN32_WCE
-}
-
-
-Handle<Code> CallStubCompiler::GetCode(Code::StubType type,
- Handle<String> name) {
- int argc = arguments_.immediate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
- extra_state_,
- type,
- argc,
- cache_holder_);
- return GetCodeWithFlags(flags, name);
-}
-
-
-Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
- Handle<String> function_name;
- if (function->shared()->name()->IsString()) {
- function_name = Handle<String>(String::cast(function->shared()->name()));
- }
- return GetCode(Code::CONSTANT_FUNCTION, function_name);
-}
-
-
-Handle<Code> ConstructStubCompiler::GetCode() {
- Code::Flags flags = Code::ComputeFlags(Code::STUB);
- Handle<Code> code = GetCodeWithFlags(flags, "ConstructStub");
- PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, "ConstructStub"));
- GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", *code));
- return code;
-}
-
-
-CallOptimization::CallOptimization(LookupResult* lookup) {
- if (lookup->IsFound() &&
- lookup->IsCacheable() &&
- lookup->type() == CONSTANT_FUNCTION) {
- // We only optimize constant function calls.
- Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
- } else {
- Initialize(Handle<JSFunction>::null());
- }
-}
-
-CallOptimization::CallOptimization(Handle<JSFunction> function) {
- Initialize(function);
-}
-
-
-int CallOptimization::GetPrototypeDepthOfExpectedType(
- Handle<JSObject> object,
- Handle<JSObject> holder) const {
- ASSERT(is_simple_api_call());
- if (expected_receiver_type_.is_null()) return 0;
- int depth = 0;
- while (!object.is_identical_to(holder)) {
- if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
- object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
- if (!object->map()->is_hidden_prototype()) return kInvalidProtoDepth;
- ++depth;
- }
- if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
- return kInvalidProtoDepth;
-}
-
-
-void CallOptimization::Initialize(Handle<JSFunction> function) {
- constant_function_ = Handle<JSFunction>::null();
- is_simple_api_call_ = false;
- expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
- api_call_info_ = Handle<CallHandlerInfo>::null();
-
- if (function.is_null() || !function->is_compiled()) return;
-
- constant_function_ = function;
- AnalyzePossibleApiFunction(function);
-}
-
-
-void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
- if (!function->shared()->IsApiFunction()) return;
- Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
-
- // Require a C++ callback.
- if (info->call_code()->IsUndefined()) return;
- api_call_info_ =
- Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
-
- // Accept signatures that either have no restrictions at all or
- // only have restrictions on the receiver.
- if (!info->signature()->IsUndefined()) {
- Handle<SignatureInfo> signature =
- Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
- if (!signature->args()->IsUndefined()) return;
- if (!signature->receiver()->IsUndefined()) {
- expected_receiver_type_ =
- Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(signature->receiver()));
- }
- }
-
- is_simple_api_call_ = true;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/stub-cache.h b/src/3rdparty/v8/src/stub-cache.h
deleted file mode 100644
index e8eb6cf..0000000
--- a/src/3rdparty/v8/src/stub-cache.h
+++ /dev/null
@@ -1,1014 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STUB_CACHE_H_
-#define V8_STUB_CACHE_H_
-
-#include "allocation.h"
-#include "arguments.h"
-#include "ic-inl.h"
-#include "macro-assembler.h"
-#include "objects.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// The stub cache is used for megamorphic calls and property accesses.
-// It maps (map, name, type)->Code*
-
-// The design of the table uses the inline cache stubs used for
-// mono-morphic calls. The beauty of this, we do not have to
-// invalidate the cache whenever a prototype map is changed. The stub
-// validates the map chain as in the mono-morphic case.
-
-class SmallMapList;
-class StubCache;
-
-
-class SCTableReference {
- public:
- Address address() const { return address_; }
-
- private:
- explicit SCTableReference(Address address) : address_(address) {}
-
- Address address_;
-
- friend class StubCache;
-};
-
-
-class StubCache {
- public:
- struct Entry {
- String* key;
- Code* value;
- Map* map;
- };
-
- void Initialize();
-
- Handle<JSObject> StubHolder(Handle<JSObject> receiver,
- Handle<JSObject> holder);
-
- Handle<Code> FindStub(Handle<String> name,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type,
- Code::IcFragment fragment);
-
- Handle<Code> FindHandler(Handle<String> name,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type);
-
- Handle<Code> ComputeMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<String> name);
- Handle<Code> ComputeKeyedMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<String> name);
-
- // Computes the right stub matching. Inserts the result in the
- // cache before returning. This might compile a stub if needed.
- Handle<Code> ComputeLoadNonexistent(Handle<String> name,
- Handle<JSObject> object);
-
- Handle<Code> ComputeLoadField(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex field_index);
-
- Handle<Code> ComputeLoadCallback(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> ComputeLoadViaGetter(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> getter);
-
- Handle<Code> ComputeLoadConstant(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value);
-
- Handle<Code> ComputeLoadInterceptor(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder);
-
- Handle<Code> ComputeLoadNormal(Handle<String> name,
- Handle<JSObject> object);
-
- Handle<Code> ComputeLoadGlobal(Handle<String> name,
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- bool is_dont_delete);
-
- // ---
-
- Handle<Code> ComputeKeyedLoadField(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex field_index);
-
- Handle<Code> ComputeKeyedLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value);
-
- Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder);
-
- // ---
-
- Handle<Code> ComputeStoreField(Handle<String> name,
- Handle<JSObject> object,
- int field_index,
- Handle<Map> transition,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreGlobal(Handle<String> name,
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreCallback(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreViaSetter(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> setter,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreInterceptor(Handle<String> name,
- Handle<JSObject> object,
- StrictModeFlag strict_mode);
-
- // ---
-
- Handle<Code> ComputeKeyedStoreField(Handle<String> name,
- Handle<JSObject> object,
- int field_index,
- Handle<Map> transition,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
-
- Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map,
- KeyedStoreIC::StubKind stub_kind,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode);
-
- // ---
-
- Handle<Code> ComputeCallField(int argc,
- Code::Kind,
- Code::ExtraICState extra_state,
- Handle<String> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- PropertyIndex index);
-
- Handle<Code> ComputeCallConstant(int argc,
- Code::Kind,
- Code::ExtraICState extra_state,
- Handle<String> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function);
-
- Handle<Code> ComputeCallInterceptor(int argc,
- Code::Kind,
- Code::ExtraICState extra_state,
- Handle<String> name,
- Handle<Object> object,
- Handle<JSObject> holder);
-
- Handle<Code> ComputeCallGlobal(int argc,
- Code::Kind,
- Code::ExtraICState extra_state,
- Handle<String> name,
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function);
-
- // ---
-
- Handle<Code> ComputeCallInitialize(int argc, RelocInfo::Mode mode);
-
- Handle<Code> ComputeKeyedCallInitialize(int argc);
-
- Handle<Code> ComputeCallPreMonomorphic(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state);
-
- Handle<Code> ComputeCallNormal(int argc,
- Code::Kind kind,
- Code::ExtraICState state,
- bool has_qml_global_receiver);
-
- Handle<Code> ComputeCallArguments(int argc);
-
- Handle<Code> ComputeCallMegamorphic(int argc,
- Code::Kind kind,
- Code::ExtraICState state);
-
- Handle<Code> ComputeCallMiss(int argc,
- Code::Kind kind,
- Code::ExtraICState state);
-
- // ---
-
- Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps);
- Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps,
- KeyedAccessGrowMode grow_mode,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name);
-
- // Finds the Code object stored in the Heap::non_monomorphic_cache().
- Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind);
-
- Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
-#endif
-
- // Update cache for entry hash(name, map).
- Code* Set(String* name, Map* map, Code* code);
-
- // Clear the lookup table (@ mark compact collection).
- void Clear();
-
- // Collect all maps that match the name and flags.
- void CollectMatchingMaps(SmallMapList* types,
- String* name,
- Code::Flags flags,
- Handle<Context> native_context,
- Zone* zone);
-
- // Generate code for probing the stub cache table.
- // Arguments extra, extra2 and extra3 may be used to pass additional scratch
- // registers. Set to no_reg if not needed.
- void GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2 = no_reg,
- Register extra3 = no_reg);
-
- enum Table {
- kPrimary,
- kSecondary
- };
-
-
- SCTableReference key_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->key));
- }
-
-
- SCTableReference map_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->map));
- }
-
-
- SCTableReference value_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->value));
- }
-
-
- StubCache::Entry* first_entry(StubCache::Table table) {
- switch (table) {
- case StubCache::kPrimary: return StubCache::primary_;
- case StubCache::kSecondary: return StubCache::secondary_;
- }
- UNREACHABLE();
- return NULL;
- }
-
- Isolate* isolate() { return isolate_; }
- Heap* heap() { return isolate()->heap(); }
- Factory* factory() { return isolate()->factory(); }
-
- private:
- StubCache(Isolate* isolate, Zone* zone);
-
- Handle<Code> ComputeCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind);
-
- // The stub cache has a primary and secondary level. The two levels have
- // different hashing algorithms in order to avoid simultaneous collisions
- // in both caches. Unlike a probing strategy (quadratic or otherwise) the
- // update strategy on updates is fairly clear and simple: Any existing entry
- // in the primary cache is moved to the secondary cache, and secondary cache
- // entries are overwritten.
-
- // Hash algorithm for the primary table. This algorithm is replicated in
- // assembler for every architecture. Returns an index into the table that
- // is scaled by 1 << kHeapObjectTagSize.
- static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
- // This works well because the heap object tag size and the hash
- // shift are equal. Shifting down the length field to get the
- // hash code would effectively throw away two bits of the hash
- // code.
- STATIC_ASSERT(kHeapObjectTagSize == String::kHashShift);
- // Compute the hash of the name (use entire hash field).
- ASSERT(name->HasHashCode());
- uint32_t field = name->hash_field();
- // Using only the low bits in 64-bit mode is unlikely to increase the
- // risk of collision even if the heap is spread over an area larger than
- // 4Gb (and not at all if it isn't).
- uint32_t map_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- // Base the offset on a simple combination of name, flags, and map.
- uint32_t key = (map_low32bits + field) ^ iflags;
- return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
- }
-
- // Hash algorithm for the secondary table. This algorithm is replicated in
- // assembler for every architecture. Returns an index into the table that
- // is scaled by 1 << kHeapObjectTagSize.
- static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
- // Use the seed from the primary cache in the secondary cache.
- uint32_t string_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- uint32_t key = (seed - string_low32bits) + iflags;
- return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
- }
-
- // Compute the entry for a given offset in exactly the same way as
- // we do in generated code. We generate an hash code that already
- // ends in String::kHashShift 0s. Then we multiply it so it is a multiple
- // of sizeof(Entry). This makes it easier to avoid making mistakes
- // in the hashed offset computations.
- static Entry* entry(Entry* table, int offset) {
- const int multiplier = sizeof(*table) >> String::kHashShift;
- return reinterpret_cast<Entry*>(
- reinterpret_cast<Address>(table) + offset * multiplier);
- }
-
- static const int kPrimaryTableBits = 11;
- static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
- static const int kSecondaryTableBits = 9;
- static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
-
- Entry primary_[kPrimaryTableSize];
- Entry secondary_[kSecondaryTableSize];
- Isolate* isolate_;
-
- friend class Isolate;
- friend class SCTableReference;
-
- DISALLOW_COPY_AND_ASSIGN(StubCache);
-};
-
-
-// ------------------------------------------------------------------------
-
-
-// Support functions for IC stubs for callbacks.
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty);
-
-
-// Support functions for IC stubs for interceptors.
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
-
-
-enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
-enum IcCheckType { ELEMENT, PROPERTY };
-
-
-// The stub compilers compile stubs for the stub cache.
-class StubCompiler BASE_EMBEDDED {
- public:
- explicit StubCompiler(Isolate* isolate)
- : isolate_(isolate), masm_(isolate, NULL, 256), failure_(NULL) { }
-
- // Functions to compile either CallIC or KeyedCallIC. The specific kind
- // is extracted from the code flags.
- Handle<Code> CompileCallInitialize(Code::Flags flags);
- Handle<Code> CompileCallPreMonomorphic(Code::Flags flags);
- Handle<Code> CompileCallNormal(Code::Flags flags,
- bool has_qml_global_receiver);
- Handle<Code> CompileCallMegamorphic(Code::Flags flags);
- Handle<Code> CompileCallArguments(Code::Flags flags);
- Handle<Code> CompileCallMiss(Code::Flags flags);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Handle<Code> CompileCallDebugBreak(Code::Flags flags);
- Handle<Code> CompileCallDebugPrepareStepIn(Code::Flags flags);
-#endif
-
- // Static functions for generating parts of stubs.
- static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype);
-
- // Generates prototype loading code that uses the objects from the
- // context we were in when this function was called. If the context
- // has changed, a jump to miss is performed. This ties the generated
- // code to a particular context and so must not be used in cases
- // where the generated code is not allowed to have references to
- // objects from a context.
- static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss);
-
- static void GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index);
- static void DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index);
-
- static void GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label);
-
- static void GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label,
- bool support_wrappers);
-
- static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
- void GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
- static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
- static void GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind);
-
- static void GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm);
-
- // Generates code that verifies that the property holder has not changed
- // (checking maps of objects in the prototype chain for fast and global
- // objects or doing negative lookup for slow objects, ensures that the
- // property cells for global objects are still empty) and checks that the map
- // of the holder has not changed. If necessary the function also generates
- // code for security check in case of global object holders. Helps to make
- // sure that the current IC is still valid.
- //
- // The scratch and holder registers are always clobbered, but the object
- // register is only clobbered if it the same as the holder register. The
- // function returns a register containing the holder - either object_reg or
- // holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [esp + kPointerSize].
- Register CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- Label* miss,
- PrototypeCheckType check = CHECK_ALL_MAPS) {
- return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
- scratch2, name, kInvalidProtoDepth, miss, check);
- }
-
- Register CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- int save_at_depth,
- Label* miss,
- PrototypeCheckType check = CHECK_ALL_MAPS);
-
-
- protected:
- Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
- Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
-
- MacroAssembler* masm() { return &masm_; }
- void set_failure(Failure* failure) { failure_ = failure; }
-
- static void LookupPostInterceptor(Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup);
-
- Isolate* isolate() { return isolate_; }
- Heap* heap() { return isolate()->heap(); }
- Factory* factory() { return isolate()->factory(); }
-
- void GenerateTailCall(Handle<Code> code);
-
- private:
- Isolate* isolate_;
- MacroAssembler masm_;
- Failure* failure_;
-};
-
-
-enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS };
-
-
-class BaseLoadStubCompiler: public StubCompiler {
- public:
- BaseLoadStubCompiler(Isolate* isolate, Register* registers)
- : StubCompiler(isolate), registers_(registers) { }
- virtual ~BaseLoadStubCompiler() { }
-
- Handle<Code> CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- PropertyIndex index);
-
- Handle<Code> CompileLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Handle<JSFunction> value);
-
- Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name);
-
- Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map,
- Handle<Code> handler,
- Handle<String> name);
- Handle<Code> CompilePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name,
- Code::StubType type,
- IcCheckType check);
-
- protected:
- Register HandlerFrontendHeader(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success);
- void HandlerFrontendFooter(Label* success, Label* miss);
-
- Register HandlerFrontend(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success);
- Register CallbackHandlerFrontend(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success,
- Handle<ExecutableAccessorInfo> callback);
- void NonexistentHandlerFrontend(Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Label* success,
- Handle<GlobalObject> global);
-
- void GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex index);
- void GenerateLoadConstant(Handle<JSFunction> value);
- void GenerateLoadCallback(Register reg,
- Handle<ExecutableAccessorInfo> callback);
- void GenerateLoadInterceptor(Register holder_reg,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- LookupResult* lookup,
- Handle<String> name);
- void GenerateLoadPostInterceptor(Register reg,
- Handle<JSObject> interceptor_holder,
- Handle<String> name,
- LookupResult* lookup);
-
- Handle<Code> GetCode(Code::IcFragment fragment,
- Code::StubType type,
- Handle<String> name,
- InlineCacheState state = MONOMORPHIC);
-
- Register receiver() { return registers_[0]; }
- Register name() { return registers_[1]; }
- Register scratch1() { return registers_[2]; }
- Register scratch2() { return registers_[3]; }
- Register scratch3() { return registers_[4]; }
- Register scratch4() { return registers_[5]; }
-
- private:
- virtual Code::Kind kind() = 0;
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0;
- virtual void JitEvent(Handle<String> name, Handle<Code> code) = 0;
- virtual void GenerateNameCheck(Handle<String> name,
- Register name_reg,
- Label* miss) { }
- Register* registers_;
-};
-
-
-class LoadStubCompiler: public BaseLoadStubCompiler {
- public:
- explicit LoadStubCompiler(Isolate* isolate)
- : BaseLoadStubCompiler(isolate, registers()) { }
-
- Handle<Code> CompileLoadNonexistent(Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Handle<GlobalObject> global);
-
- static void GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter);
-
- Handle<Code> CompileLoadViaGetter(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Handle<JSFunction> getter);
-
- Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete);
-
- static Register receiver() { return registers()[0]; }
-
- private:
- static Register* registers();
- virtual Code::Kind kind() { return Code::LOAD_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- return code->ic_state() == MONOMORPHIC
- ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<String> name, Handle<Code> code);
-};
-
-
-class KeyedLoadStubCompiler: public BaseLoadStubCompiler {
- public:
- explicit KeyedLoadStubCompiler(Isolate* isolate)
- : BaseLoadStubCompiler(isolate, registers()) { }
-
- Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
-
- void CompileElementHandlers(MapHandleList* receiver_maps,
- CodeHandleList* handlers);
-
- Handle<Code> CompileLoadElementPolymorphic(MapHandleList* receiver_maps);
-
- static void GenerateLoadDictionaryElement(MacroAssembler* masm);
-
- static Register receiver() { return registers()[0]; }
-
- private:
- static Register* registers();
- virtual Code::Kind kind() { return Code::KEYED_LOAD_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<String> name, Handle<Code> code);
- virtual void GenerateNameCheck(Handle<String> name,
- Register name_reg,
- Label* miss);
-};
-
-
-class StoreStubCompiler: public StubCompiler {
- public:
- StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
- : StubCompiler(isolate), strict_mode_(strict_mode) { }
-
-
- Handle<Code> CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name);
-
- Handle<Code> CompileStoreCallback(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback);
-
- static void GenerateStoreViaSetter(MacroAssembler* masm,
- Handle<JSFunction> setter);
-
- Handle<Code> CompileStoreViaSetter(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> setter);
-
- Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
- Handle<String> name);
-
- Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> holder,
- Handle<String> name);
-
- private:
- Handle<Code> GetCode(Code::StubType type, Handle<String> name);
-
- StrictModeFlag strict_mode_;
-};
-
-
-class KeyedStoreStubCompiler: public StubCompiler {
- public:
- KeyedStoreStubCompiler(Isolate* isolate,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode)
- : StubCompiler(isolate),
- strict_mode_(strict_mode),
- grow_mode_(grow_mode) { }
-
- Handle<Code> CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name);
-
- Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
-
- Handle<Code> CompileStorePolymorphic(MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps);
-
- Handle<Code> CompileStoreElementPolymorphic(MapHandleList* receiver_maps);
-
- static void GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array,
- ElementsKind element_kind,
- KeyedAccessGrowMode grow_mode);
-
- static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode);
-
- static void GenerateStoreExternalArray(MacroAssembler* masm,
- ElementsKind elements_kind);
-
- static void GenerateStoreDictionaryElement(MacroAssembler* masm);
-
- private:
- Handle<Code> GetCode(Code::StubType type,
- Handle<String> name,
- InlineCacheState state = MONOMORPHIC);
-
- StrictModeFlag strict_mode_;
- KeyedAccessGrowMode grow_mode_;
-};
-
-
-// Subset of FUNCTIONS_WITH_ID_LIST with custom constant/global call
-// IC stubs.
-#define CUSTOM_CALL_IC_GENERATORS(V) \
- V(ArrayPush) \
- V(ArrayPop) \
- V(StringCharCodeAt) \
- V(StringCharAt) \
- V(StringFromCharCode) \
- V(MathFloor) \
- V(MathAbs)
-
-
-class CallOptimization;
-
-class CallStubCompiler: public StubCompiler {
- public:
- CallStubCompiler(Isolate* isolate,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- InlineCacheHolderFlag cache_holder);
-
- Handle<Code> CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<String> name);
-
- void CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Label* success);
-
- void CompileHandlerBackend(Handle<JSFunction> function);
-
- Handle<Code> CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Handle<JSFunction> function);
-
- Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name);
-
- Handle<Code> CompileCallGlobal(Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name);
-
- static bool HasCustomCallGenerator(Handle<JSFunction> function);
-
- private:
- // Compiles a custom call constant/global IC. For constant calls cell is
- // NULL. Returns an empty handle if there is no custom call code for the
- // given function.
- Handle<Code> CompileCustomCall(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name);
-
-#define DECLARE_CALL_GENERATOR(name) \
- Handle<Code> Compile##name##Call(Handle<Object> object, \
- Handle<JSObject> holder, \
- Handle<JSGlobalPropertyCell> cell, \
- Handle<JSFunction> function, \
- Handle<String> fname);
- CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
-#undef DECLARE_CALL_GENERATOR
-
- Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name);
-
- Handle<Code> GetCode(Code::StubType type, Handle<String> name);
- Handle<Code> GetCode(Handle<JSFunction> function);
-
- const ParameterCount& arguments() { return arguments_; }
-
- void GenerateNameCheck(Handle<String> name, Label* miss);
-
- void GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss);
-
- // Generates code to load the function from the cell checking that
- // it still contains the same function.
- void GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Label* miss);
-
- // Generates a jump to CallIC miss stub.
- void GenerateMissBranch();
-
- const ParameterCount arguments_;
- const Code::Kind kind_;
- const Code::ExtraICState extra_state_;
- const InlineCacheHolderFlag cache_holder_;
-};
-
-
-class ConstructStubCompiler: public StubCompiler {
- public:
- explicit ConstructStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
-
- Handle<Code> CompileConstructStub(Handle<JSFunction> function);
-
- private:
- Handle<Code> GetCode();
-};
-
-
-// Holds information about possible function call optimizations.
-class CallOptimization BASE_EMBEDDED {
- public:
- explicit CallOptimization(LookupResult* lookup);
-
- explicit CallOptimization(Handle<JSFunction> function);
-
- bool is_constant_call() const {
- return !constant_function_.is_null();
- }
-
- Handle<JSFunction> constant_function() const {
- ASSERT(is_constant_call());
- return constant_function_;
- }
-
- bool is_simple_api_call() const {
- return is_simple_api_call_;
- }
-
- Handle<FunctionTemplateInfo> expected_receiver_type() const {
- ASSERT(is_simple_api_call());
- return expected_receiver_type_;
- }
-
- Handle<CallHandlerInfo> api_call_info() const {
- ASSERT(is_simple_api_call());
- return api_call_info_;
- }
-
- // Returns the depth of the object having the expected type in the
- // prototype chain between the two arguments.
- int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
- Handle<JSObject> holder) const;
-
- private:
- void Initialize(Handle<JSFunction> function);
-
- // Determines whether the given function can be called using the
- // fast api call builtin.
- void AnalyzePossibleApiFunction(Handle<JSFunction> function);
-
- Handle<JSFunction> constant_function_;
- bool is_simple_api_call_;
- Handle<FunctionTemplateInfo> expected_receiver_type_;
- Handle<CallHandlerInfo> api_call_info_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_STUB_CACHE_H_
diff --git a/src/3rdparty/v8/src/sweeper-thread.cc b/src/3rdparty/v8/src/sweeper-thread.cc
deleted file mode 100644
index f08fcfb..0000000
--- a/src/3rdparty/v8/src/sweeper-thread.cc
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "sweeper-thread.h"
-
-#include "v8.h"
-
-#include "isolate.h"
-#include "v8threads.h"
-
-namespace v8 {
-namespace internal {
-
-static const int kSweeperThreadStackSize = 64 * KB;
-
-SweeperThread::SweeperThread(Isolate* isolate)
- : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
- isolate_(isolate),
- heap_(isolate->heap()),
- collector_(heap_->mark_compact_collector()),
- start_sweeping_semaphore_(OS::CreateSemaphore(0)),
- end_sweeping_semaphore_(OS::CreateSemaphore(0)),
- stop_semaphore_(OS::CreateSemaphore(0)),
- free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
- free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
- private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
- private_free_list_old_pointer_space_(
- heap_->paged_space(OLD_POINTER_SPACE)) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
-}
-
-
-void SweeperThread::Run() {
- Isolate::SetIsolateThreadLocals(isolate_, NULL);
- while (true) {
- start_sweeping_semaphore_->Wait();
-
- if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
- return;
- }
-
- collector_->SweepInParallel(heap_->old_data_space(),
- &private_free_list_old_data_space_,
- &free_list_old_data_space_);
- collector_->SweepInParallel(heap_->old_pointer_space(),
- &private_free_list_old_pointer_space_,
- &free_list_old_pointer_space_);
- end_sweeping_semaphore_->Signal();
- }
-}
-
-
-intptr_t SweeperThread::StealMemory(PagedSpace* space) {
- if (space->identity() == OLD_POINTER_SPACE) {
- return space->free_list()->Concatenate(&free_list_old_pointer_space_);
- } else if (space->identity() == OLD_DATA_SPACE) {
- return space->free_list()->Concatenate(&free_list_old_data_space_);
- }
- return 0;
-}
-
-
-void SweeperThread::Stop() {
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- start_sweeping_semaphore_->Signal();
- stop_semaphore_->Wait();
-}
-
-
-void SweeperThread::StartSweeping() {
- start_sweeping_semaphore_->Signal();
-}
-
-
-void SweeperThread::WaitForSweeperThread() {
- end_sweeping_semaphore_->Wait();
-}
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/sweeper-thread.h b/src/3rdparty/v8/src/sweeper-thread.h
deleted file mode 100644
index a170982..0000000
--- a/src/3rdparty/v8/src/sweeper-thread.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_SWEEPER_THREAD_H_
-#define V8_SWEEPER_THREAD_H_
-
-#include "atomicops.h"
-#include "flags.h"
-#include "platform.h"
-#include "v8utils.h"
-
-#include "spaces.h"
-
-#include "heap.h"
-
-namespace v8 {
-namespace internal {
-
-class SweeperThread : public Thread {
- public:
- explicit SweeperThread(Isolate* isolate);
-
- void Run();
- void Stop();
- void StartSweeping();
- void WaitForSweeperThread();
- intptr_t StealMemory(PagedSpace* space);
-
- ~SweeperThread() {
- delete start_sweeping_semaphore_;
- delete end_sweeping_semaphore_;
- delete stop_semaphore_;
- }
-
- private:
- Isolate* isolate_;
- Heap* heap_;
- MarkCompactCollector* collector_;
- Semaphore* start_sweeping_semaphore_;
- Semaphore* end_sweeping_semaphore_;
- Semaphore* stop_semaphore_;
- FreeList free_list_old_data_space_;
- FreeList free_list_old_pointer_space_;
- FreeList private_free_list_old_data_space_;
- FreeList private_free_list_old_pointer_space_;
- volatile AtomicWord stop_thread_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_SWEEPER_THREAD_H_
diff --git a/src/3rdparty/v8/src/symbol.js b/src/3rdparty/v8/src/symbol.js
deleted file mode 100644
index b7f9dc9..0000000
--- a/src/3rdparty/v8/src/symbol.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"use strict";
-
-var $Symbol = function() { return %CreateSymbol() }
-global.Symbol = $Symbol
-
-// Symbols only have a toString method and no prototype.
-var SymbolDelegate = {
- __proto__: null,
- toString: $Object.prototype.toString
-}
-
-$Object.freeze(SymbolDelegate)
diff --git a/src/3rdparty/v8/src/third_party/valgrind/valgrind.h b/src/3rdparty/v8/src/third_party/valgrind/valgrind.h
deleted file mode 100644
index 7a3ee2f..0000000
--- a/src/3rdparty/v8/src/third_party/valgrind/valgrind.h
+++ /dev/null
@@ -1,4033 +0,0 @@
-/* -*- c -*-
- ----------------------------------------------------------------
-
- Notice that the following BSD-style license applies to this one
- file (valgrind.h) only. The rest of Valgrind is licensed under the
- terms of the GNU General Public License, version 2, unless
- otherwise indicated. See the COPYING file in the source
- distribution for details.
-
- ----------------------------------------------------------------
-
- This file is part of Valgrind, a dynamic binary instrumentation
- framework.
-
- Copyright (C) 2000-2010 Julian Seward. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
- documentation would be appreciated but is not required.
-
- 3. Altered source versions must be plainly marked as such, and must
- not be misrepresented as being the original software.
-
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
- GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- ----------------------------------------------------------------
-
- Notice that the above BSD-style license applies to this one file
- (valgrind.h) only. The entire rest of Valgrind is licensed under
- the terms of the GNU General Public License, version 2. See the
- COPYING file in the source distribution for details.
-
- ----------------------------------------------------------------
-*/
-
-
-/* This file is for inclusion into client (your!) code.
-
- You can use these macros to manipulate and query Valgrind's
- execution inside your own programs.
-
- The resulting executables will still run without Valgrind, just a
- little bit more slowly than they otherwise would, but otherwise
- unchanged. When not running on valgrind, each client request
- consumes very few (eg. 7) instructions, so the resulting performance
- loss is negligible unless you plan to execute client requests
- millions of times per second. Nevertheless, if that is still a
- problem, you can compile with the NVALGRIND symbol defined (gcc
- -DNVALGRIND) so that client requests are not even compiled in. */
-
-#ifndef __VALGRIND_H
-#define __VALGRIND_H
-
-
-/* ------------------------------------------------------------------ */
-/* VERSION NUMBER OF VALGRIND */
-/* ------------------------------------------------------------------ */
-
-/* Specify Valgrind's version number, so that user code can
- conditionally compile based on our version number. Note that these
- were introduced at version 3.6 and so do not exist in version 3.5
- or earlier. The recommended way to use them to check for "version
- X.Y or later" is (eg)
-
-#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
- && (__VALGRIND_MAJOR__ > 3 \
- || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
-*/
-#define __VALGRIND_MAJOR__ 3
-#define __VALGRIND_MINOR__ 6
-
-
-#include <stdarg.h>
-#include <stdint.h>
-
-/* Nb: this file might be included in a file compiled with -ansi. So
- we can't use C++ style "//" comments nor the "asm" keyword (instead
- use "__asm__"). */
-
-/* Derive some tags indicating what the target platform is. Note
- that in this file we're using the compiler's CPP symbols for
- identifying architectures, which are different to the ones we use
- within the rest of Valgrind. Note, __powerpc__ is active for both
- 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
- latter (on Linux, that is).
-
- Misc note: how to find out what's predefined in gcc by default:
- gcc -Wp,-dM somefile.c
-*/
-#undef PLAT_x86_darwin
-#undef PLAT_amd64_darwin
-#undef PLAT_x86_win32
-#undef PLAT_x86_linux
-#undef PLAT_amd64_linux
-#undef PLAT_ppc32_linux
-#undef PLAT_ppc64_linux
-#undef PLAT_arm_linux
-#undef PLAT_s390x_linux
-
-
-#if defined(__APPLE__) && defined(__i386__)
-# define PLAT_x86_darwin 1
-#elif defined(__APPLE__) && defined(__x86_64__)
-# define PLAT_amd64_darwin 1
-#elif defined(__MINGW32__) || defined(__CYGWIN32__) \
- || (defined(_WIN32) && defined(_M_IX86))
-# define PLAT_x86_win32 1
-#elif defined(__linux__) && defined(__i386__)
-# define PLAT_x86_linux 1
-#elif defined(__linux__) && defined(__x86_64__)
-# define PLAT_amd64_linux 1
-#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
-# define PLAT_ppc32_linux 1
-#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
-# define PLAT_ppc64_linux 1
-#elif defined(__linux__) && defined(__arm__)
-# define PLAT_arm_linux 1
-#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
-# define PLAT_s390x_linux 1
-#else
-/* If we're not compiling for our target platform, don't generate
- any inline asms. */
-# if !defined(NVALGRIND)
-# define NVALGRIND 1
-# endif
-#endif
-
-
-/* ------------------------------------------------------------------ */
-/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
-/* in here of use to end-users -- skip to the next section. */
-/* ------------------------------------------------------------------ */
-
-/*
- * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
- * request. Accepts both pointers and integers as arguments.
- *
- * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
- * client request and whose value equals the client request result. Accepts
- * both pointers and integers as arguments.
- */
-
-#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \
- _zzq_request, _zzq_arg1, _zzq_arg2, \
- _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \
- (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
- (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); }
-
-#if defined(NVALGRIND)
-
-/* Define NVALGRIND to completely remove the Valgrind magic sequence
- from the compiled code (analogous to NDEBUG's effects on
- assert()) */
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- (_zzq_default)
-
-#else /* ! NVALGRIND */
-
-/* The following defines the magic code sequences which the JITter
- spots and handles magically. Don't look too closely at them as
- they will rot your brain.
-
- The assembly code sequences for all architectures is in this one
- file. This is because this file must be stand-alone, and we don't
- want to have multiple files.
-
- For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
- value gets put in the return slot, so that everything works when
- this is executed not under Valgrind. Args are passed in a memory
- block, and so there's no intrinsic limit to the number that could
- be passed, but it's currently five.
-
- The macro args are:
- _zzq_rlval result lvalue
- _zzq_default default value (result returned when running on real CPU)
- _zzq_request request code
- _zzq_arg1..5 request params
-
- The other two macros are used to support function wrapping, and are
- a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
- guest's NRADDR pseudo-register and whatever other information is
- needed to safely run the call original from the wrapper: on
- ppc64-linux, the R2 value at the divert point is also needed. This
- information is abstracted into a user-visible type, OrigFn.
-
- VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
- guest, but guarantees that the branch instruction will not be
- redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
- branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
- complete inline asm, since it needs to be combined with more magic
- inline asm stuff to be useful.
-*/
-
-/* ------------------------- x86-{linux,darwin} ---------------- */
-
-#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
- || (defined(PLAT_x86_win32) && defined(__GNUC__))
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "roll $3, %%edi ; roll $13, %%edi\n\t" \
- "roll $29, %%edi ; roll $19, %%edi\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({volatile unsigned int _zzq_args[6]; \
- volatile unsigned int _zzq_result; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EDX = client_request ( %EAX ) */ \
- "xchgl %%ebx,%%ebx" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EAX = guest_NRADDR */ \
- "xchgl %%ecx,%%ecx" \
- : "=a" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_EAX \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%EAX */ \
- "xchgl %%edx,%%edx\n\t"
-#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
-
-/* ------------------------- x86-Win32 ------------------------- */
-
-#if defined(PLAT_x86_win32) && !defined(__GNUC__)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#if defined(_MSC_VER)
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- __asm rol edi, 3 __asm rol edi, 13 \
- __asm rol edi, 29 __asm rol edi, 19
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \
- (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
- (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \
- (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
-
-static __inline uintptr_t
-valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
- uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
- uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
- uintptr_t _zzq_arg5)
-{
- volatile uintptr_t _zzq_args[6];
- volatile unsigned int _zzq_result;
- _zzq_args[0] = (uintptr_t)(_zzq_request);
- _zzq_args[1] = (uintptr_t)(_zzq_arg1);
- _zzq_args[2] = (uintptr_t)(_zzq_arg2);
- _zzq_args[3] = (uintptr_t)(_zzq_arg3);
- _zzq_args[4] = (uintptr_t)(_zzq_arg4);
- _zzq_args[5] = (uintptr_t)(_zzq_arg5);
- __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
- __SPECIAL_INSTRUCTION_PREAMBLE
- /* %EDX = client_request ( %EAX ) */
- __asm xchg ebx,ebx
- __asm mov _zzq_result, edx
- }
- return _zzq_result;
-}
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned int __addr; \
- __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EAX = guest_NRADDR */ \
- __asm xchg ecx,ecx \
- __asm mov __addr, eax \
- } \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_EAX ERROR
-
-#else
-#error Unsupported compiler.
-#endif
-
-#endif /* PLAT_x86_win32 */
-
-/* ------------------------ amd64-{linux,darwin} --------------- */
-
-#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
-
-typedef
- struct {
- uint64_t nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
- "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({ volatile uint64_t _zzq_args[6]; \
- volatile uint64_t _zzq_result; \
- _zzq_args[0] = (uint64_t)(_zzq_request); \
- _zzq_args[1] = (uint64_t)(_zzq_arg1); \
- _zzq_args[2] = (uint64_t)(_zzq_arg2); \
- _zzq_args[3] = (uint64_t)(_zzq_arg3); \
- _zzq_args[4] = (uint64_t)(_zzq_arg4); \
- _zzq_args[5] = (uint64_t)(_zzq_arg5); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %RDX = client_request ( %RAX ) */ \
- "xchgq %%rbx,%%rbx" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile uint64_t __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %RAX = guest_NRADDR */ \
- "xchgq %%rcx,%%rcx" \
- : "=a" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_RAX \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%RAX */ \
- "xchgq %%rdx,%%rdx\n\t"
-#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
-
-/* ------------------------ ppc32-linux ------------------------ */
-
-#if defined(PLAT_ppc32_linux)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
- "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({ unsigned int _zzq_args[6]; \
- unsigned int _zzq_result; \
- unsigned int* _zzq_ptr; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 3,%1\n\t" /*default*/ \
- "mr 4,%2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" /*result*/ \
- : "=b" (_zzq_result) \
- : "b" (_zzq_default), "b" (_zzq_ptr) \
- : "cc", "memory", "r3", "r4"); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
-#endif /* PLAT_ppc32_linux */
-
-/* ------------------------ ppc64-linux ------------------------ */
-
-#if defined(PLAT_ppc64_linux)
-
-typedef
- struct {
- uint64_t nraddr; /* where's the code? */
- uint64_t r2; /* what tocptr do we need? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
- "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({ uint64_t _zzq_args[6]; \
- register uint64_t _zzq_result __asm__("r3"); \
- register uint64_t* _zzq_ptr __asm__("r4"); \
- _zzq_args[0] = (uint64_t)(_zzq_request); \
- _zzq_args[1] = (uint64_t)(_zzq_arg1); \
- _zzq_args[2] = (uint64_t)(_zzq_arg2); \
- _zzq_args[3] = (uint64_t)(_zzq_arg3); \
- _zzq_args[4] = (uint64_t)(_zzq_arg4); \
- _zzq_args[5] = (uint64_t)(_zzq_arg5); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1" \
- : "=r" (_zzq_result) \
- : "0" (_zzq_default), "r" (_zzq_ptr) \
- : "cc", "memory"); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register uint64_t __addr __asm__("r3"); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2" \
- : "=r" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4" \
- : "=r" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->r2 = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
-
-#endif /* PLAT_ppc64_linux */
-
-/* ------------------------- arm-linux ------------------------- */
-
-#if defined(PLAT_arm_linux)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
- "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({volatile unsigned int _zzq_args[6]; \
- volatile unsigned int _zzq_result; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- __asm__ volatile("mov r3, %1\n\t" /*default*/ \
- "mov r4, %2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* R3 = client_request ( R4 ) */ \
- "orr r10, r10, r10\n\t" \
- "mov %0, r3" /*result*/ \
- : "=r" (_zzq_result) \
- : "r" (_zzq_default), "r" (&_zzq_args[0]) \
- : "cc","memory", "r3", "r4"); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* R3 = guest_NRADDR */ \
- "orr r11, r11, r11\n\t" \
- "mov %0, r3" \
- : "=r" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R4 */ \
- "orr r12, r12, r12\n\t"
-
-#endif /* PLAT_arm_linux */
-
-/* ------------------------ s390x-linux ------------------------ */
-
-#if defined(PLAT_s390x_linux)
-
-typedef
- struct {
- uint64_t nraddr; /* where's the code? */
- }
- OrigFn;
-
-/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
- * code. This detection is implemented in platform specific toIR.c
- * (e.g. VEX/priv/guest_s390_decoder.c).
- */
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "lr 15,15\n\t" \
- "lr 1,1\n\t" \
- "lr 2,2\n\t" \
- "lr 3,3\n\t"
-
-#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
-#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
-#define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({volatile uint64_t _zzq_args[6]; \
- volatile uint64_t _zzq_result; \
- _zzq_args[0] = (uint64_t)(_zzq_request); \
- _zzq_args[1] = (uint64_t)(_zzq_arg1); \
- _zzq_args[2] = (uint64_t)(_zzq_arg2); \
- _zzq_args[3] = (uint64_t)(_zzq_arg3); \
- _zzq_args[4] = (uint64_t)(_zzq_arg4); \
- _zzq_args[5] = (uint64_t)(_zzq_arg5); \
- __asm__ volatile(/* r2 = args */ \
- "lgr 2,%1\n\t" \
- /* r3 = default */ \
- "lgr 3,%2\n\t" \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- __CLIENT_REQUEST_CODE \
- /* results = r3 */ \
- "lgr %0, 3\n\t" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "2", "3", "memory" \
- ); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile uint64_t __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- __GET_NR_CONTEXT_CODE \
- "lgr %0, 3\n\t" \
- : "=a" (__addr) \
- : \
- : "cc", "3", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_R1 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- __CALL_NO_REDIR_CODE
-
-#endif /* PLAT_s390x_linux */
-
-/* Insert assembly code for other platforms here... */
-
-#endif /* NVALGRIND */
-
-
-/* ------------------------------------------------------------------ */
-/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
-/* ugly. It's the least-worst tradeoff I can think of. */
-/* ------------------------------------------------------------------ */
-
-/* This section defines magic (a.k.a appalling-hack) macros for doing
- guaranteed-no-redirection macros, so as to get from function
- wrappers to the functions they are wrapping. The whole point is to
- construct standard call sequences, but to do the call itself with a
- special no-redirect call pseudo-instruction that the JIT
- understands and handles specially. This section is long and
- repetitious, and I can't see a way to make it shorter.
-
- The naming scheme is as follows:
-
- CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
-
- 'W' stands for "word" and 'v' for "void". Hence there are
- different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
- and for each, the possibility of returning a word-typed result, or
- no result.
-*/
-
-/* Use these to write the name of your wrapper. NOTE: duplicates
- VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
-
-/* Use an extra level of macroisation so as to ensure the soname/fnname
- args are fully macro-expanded before pasting them together. */
-#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
-
-#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
- VG_CONCAT4(_vgwZU_,soname,_,fnname)
-
-#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
- VG_CONCAT4(_vgwZZ_,soname,_,fnname)
-
-/* Use this macro from within a wrapper function to collect the
- context (address and possibly other info) of the original function.
- Once you have that you can then use it in one of the CALL_FN_
- macros. The type of the argument _lval is OrigFn. */
-#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
-
-/* Derivatives of the main macros below, for calling functions
- returning void. */
-
-#define CALL_FN_v_v(fnptr) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_v(_junk,fnptr); } while (0)
-
-#define CALL_FN_v_W(fnptr, arg1) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
-
-#define CALL_FN_v_WW(fnptr, arg1,arg2) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
-
-#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
-
-#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
-
-#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
-
-#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
-
-#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
-
-/* ------------------------- x86-{linux,darwin} ---------------- */
-
-#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
-
-/* These regs are trashed by the hidden call. No need to mention eax
- as gcc can already see that, plus causes gcc to bomb. */
-#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
-
-/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
- long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- "subl $12, %%esp\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- "subl $8, %%esp\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- "subl $4, %%esp\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- "subl $12, %%esp\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- "subl $8, %%esp\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- "subl $4, %%esp\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- "subl $12, %%esp\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- "subl $8, %%esp\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- "subl $4, %%esp\n\t" \
- "pushl 44(%%eax)\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- "pushl 48(%%eax)\n\t" \
- "pushl 44(%%eax)\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_x86_linux || PLAT_x86_darwin */
-
-/* ------------------------ amd64-{linux,darwin} --------------- */
-
-#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
-
-/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
- "rdi", "r8", "r9", "r10", "r11"
-
-/* This is all pretty complex. It's so as to make stack unwinding
- work reliably. See bug 243270. The basic problem is the sub and
- add of 128 of %rsp in all of the following macros. If gcc believes
- the CFA is in %rsp, then unwinding may fail, because what's at the
- CFA is not what gcc "expected" when it constructs the CFIs for the
- places where the macros are instantiated.
-
- But we can't just add a CFI annotation to increase the CFA offset
- by 128, to match the sub of 128 from %rsp, because we don't know
- whether gcc has chosen %rsp as the CFA at that point, or whether it
- has chosen some other register (eg, %rbp). In the latter case,
- adding a CFI annotation to change the CFA offset is simply wrong.
-
- So the solution is to get hold of the CFA using
- __builtin_dwarf_cfa(), put it in a known register, and add a
- CFI annotation to say what the register is. We choose %rbp for
- this (perhaps perversely), because:
-
- (1) %rbp is already subject to unwinding. If a new register was
- chosen then the unwinder would have to unwind it in all stack
- traces, which is expensive, and
-
- (2) %rbp is already subject to precise exception updates in the
- JIT. If a new register was chosen, we'd have to have precise
- exceptions for it too, which reduces performance of the
- generated code.
-
- However .. one extra complication. We can't just whack the result
- of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
- list of trashed registers at the end of the inline assembly
- fragments; gcc won't allow %rbp to appear in that list. Hence
- instead we need to stash %rbp in %r15 for the duration of the asm,
- and say that %r15 is trashed instead. gcc seems happy to go with
- that.
-
- Oh .. and this all needs to be conditionalised so that it is
- unchanged from before this commit, when compiled with older gccs
- that don't support __builtin_dwarf_cfa. Furthermore, since
- this header file is freestanding, it has to be independent of
- config.h, and so the following conditionalisation cannot depend on
- configure time checks.
-
- Although it's not clear from
- 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
- this expression excludes Darwin.
- .cfi directives in Darwin assembly appear to be completely
- different and I haven't investigated how they work.
-
- For even more entertainment value, note we have to use the
- completely undocumented __builtin_dwarf_cfa(), which appears to
- really compute the CFA, whereas __builtin_frame_address(0) claims
- to but actually doesn't. See
- https://bugs.kde.org/show_bug.cgi?id=243270#c47
-*/
-#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
-# define __FRAME_POINTER \
- ,"r"(__builtin_dwarf_cfa())
-# define VALGRIND_CFI_PROLOGUE \
- "movq %%rbp, %%r15\n\t" \
- "movq %2, %%rbp\n\t" \
- ".cfi_remember_state\n\t" \
- ".cfi_def_cfa rbp, 0\n\t"
-# define VALGRIND_CFI_EPILOGUE \
- "movq %%r15, %%rbp\n\t" \
- ".cfi_restore_state\n\t"
-#else
-# define __FRAME_POINTER
-# define VALGRIND_CFI_PROLOGUE
-# define VALGRIND_CFI_EPILOGUE
-#endif
-
-
-/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
- long) == 8. */
-
-/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
- macros. In order not to trash the stack redzone, we need to drop
- %rsp by 128 before the hidden call, and restore afterwards. The
- nastyness is that it is only by luck that the stack still appears
- to be unwindable during the hidden call - since then the behaviour
- of any routine using this macro does not match what the CFI data
- says. Sigh.
-
- Why is this important? Imagine that a wrapper has a stack
- allocated local, and passes to the hidden call, a pointer to it.
- Because gcc does not know about the hidden call, it may allocate
- that local in the redzone. Unfortunately the hidden call may then
- trash it before it comes to use it. So we must step clear of the
- redzone, for the duration of the hidden call, to make it safe.
-
- Probably the same problem afflicts the other redzone-style ABIs too
- (ppc64-linux); but for those, the stack is
- self describing (none of this CFI nonsense) so at least messing
- with the stack pointer doesn't give a danger of non-unwindable
- stack. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $136,%%rsp\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $8, %%rsp\n" \
- "addq $136,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $16, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $136,%%rsp\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $24, %%rsp\n" \
- "addq $136,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $32, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $136,%%rsp\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $40, %%rsp\n" \
- "addq $136,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "pushq 96(%%rax)\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $48, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
-
-/* ------------------------ ppc32-linux ------------------------ */
-
-#if defined(PLAT_ppc32_linux)
-
-/* This is useful for finding out about the on-stack stuff:
-
- extern int f9 ( int,int,int,int,int,int,int,int,int );
- extern int f10 ( int,int,int,int,int,int,int,int,int,int );
- extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
- extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
-
- int g9 ( void ) {
- return f9(11,22,33,44,55,66,77,88,99);
- }
- int g10 ( void ) {
- return f10(11,22,33,44,55,66,77,88,99,110);
- }
- int g11 ( void ) {
- return f11(11,22,33,44,55,66,77,88,99,110,121);
- }
- int g12 ( void ) {
- return f12(11,22,33,44,55,66,77,88,99,110,121,132);
- }
-*/
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* These CALL_FN_ macros assume that on ppc32-linux,
- sizeof(unsigned long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,16\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,16\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-32\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,16(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,32\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- _argvec[12] = (unsigned long)arg12; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-32\n\t" \
- /* arg12 */ \
- "lwz 3,48(11)\n\t" \
- "stw 3,20(1)\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,16(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,32\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_ppc32_linux */
-
-/* ------------------------ ppc64-linux ------------------------ */
-
-#if defined(PLAT_ppc64_linux)
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
- long) == 8. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,128" /* restore frame */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,128" /* restore frame */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,144" /* restore frame */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg12 */ \
- "ld 3,96(11)\n\t" \
- "std 3,136(1)\n\t" \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,144" /* restore frame */ \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_ppc64_linux */
-
-/* ------------------------- arm-linux ------------------------- */
-
-#if defined(PLAT_arm_linux)
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
-
-/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
- long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- "ldr r0, [%1, #20] \n\t" \
- "push {r0} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #4 \n\t" \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "push {r0, r1} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #8 \n\t" \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "push {r0, r1, r2} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #12 \n\t" \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "push {r0, r1, r2, r3} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #16 \n\t" \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #20 \n\t" \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- "ldr r0, [%1, #40] \n\t" \
- "push {r0} \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #24 \n\t" \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- "ldr r0, [%1, #40] \n\t" \
- "ldr r1, [%1, #44] \n\t" \
- "push {r0, r1} \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #28 \n\t" \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- "ldr r0, [%1, #40] \n\t" \
- "ldr r1, [%1, #44] \n\t" \
- "ldr r2, [%1, #48] \n\t" \
- "push {r0, r1, r2} \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #32 \n\t" \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_arm_linux */
-
-/* ------------------------- s390x-linux ------------------------- */
-
-#if defined(PLAT_s390x_linux)
-
-/* Similar workaround as amd64 (see above), but we use r11 as frame
- pointer and save the old r11 in r7. r11 might be used for
- argvec, therefore we copy argvec in r1 since r1 is clobbered
- after the call anyway. */
-#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
-# define __FRAME_POINTER \
- ,"d"(__builtin_dwarf_cfa())
-# define VALGRIND_CFI_PROLOGUE \
- ".cfi_remember_state\n\t" \
- "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
- "lgr 7,11\n\t" \
- "lgr 11,%2\n\t" \
- ".cfi_def_cfa r11, 0\n\t"
-# define VALGRIND_CFI_EPILOGUE \
- "lgr 11, 7\n\t" \
- ".cfi_restore_state\n\t"
-#else
-# define __FRAME_POINTER
-# define VALGRIND_CFI_PROLOGUE \
- "lgr 1,%1\n\t"
-# define VALGRIND_CFI_EPILOGUE
-#endif
-
-
-
-
-/* These regs are trashed by the hidden call. Note that we overwrite
- r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
- function a proper return address. All others are ABI defined call
- clobbers. */
-#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
- "f0","f1","f2","f3","f4","f5","f6","f7"
-
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 1, 0(1)\n\t" /* target->r1 */ \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-/* The call abi has the arguments in r2-r6 and stack */
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-168\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,168\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-176\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,176\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-184\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,184\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-192\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,192\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-200\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,200\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10, arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-208\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "mvc 200(8,15), 88(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,208\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- _argvec[12] = (unsigned long)arg12; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-216\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "mvc 200(8,15), 88(1)\n\t" \
- "mvc 208(8,15), 96(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,216\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-
-#endif /* PLAT_s390x_linux */
-
-
-/* ------------------------------------------------------------------ */
-/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
-/* */
-/* ------------------------------------------------------------------ */
-
-/* Some request codes. There are many more of these, but most are not
- exposed to end-user view. These are the public ones, all of the
- form 0x1000 + small_number.
-
- Core ones are in the range 0x00000000--0x0000ffff. The non-public
- ones start at 0x2000.
-*/
-
-/* These macros are used by tools -- they must be public, but don't
- embed them into other programs. */
-#define VG_USERREQ_TOOL_BASE(a,b) \
- ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
-#define VG_IS_TOOL_USERREQ(a, b, v) \
- (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
-
-/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
- This enum comprises an ABI exported by Valgrind to programs
- which use client requests. DO NOT CHANGE THE ORDER OF THESE
- ENTRIES, NOR DELETE ANY -- add new ones at the end. */
-typedef
- enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
- VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
-
- /* These allow any function to be called from the simulated
- CPU but run on the real CPU. Nb: the first arg passed to
- the function is always the ThreadId of the running
- thread! So CLIENT_CALL0 actually requires a 1 arg
- function, etc. */
- VG_USERREQ__CLIENT_CALL0 = 0x1101,
- VG_USERREQ__CLIENT_CALL1 = 0x1102,
- VG_USERREQ__CLIENT_CALL2 = 0x1103,
- VG_USERREQ__CLIENT_CALL3 = 0x1104,
-
- /* Can be useful in regression testing suites -- eg. can
- send Valgrind's output to /dev/null and still count
- errors. */
- VG_USERREQ__COUNT_ERRORS = 0x1201,
-
- /* Allows a string (gdb monitor command) to be passed to the tool
- Used for interaction with vgdb/gdb */
- VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
-
- /* These are useful and can be interpreted by any tool that
- tracks malloc() et al, by using vg_replace_malloc.c. */
- VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
- VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b,
- VG_USERREQ__FREELIKE_BLOCK = 0x1302,
- /* Memory pool support. */
- VG_USERREQ__CREATE_MEMPOOL = 0x1303,
- VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
- VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
- VG_USERREQ__MEMPOOL_FREE = 0x1306,
- VG_USERREQ__MEMPOOL_TRIM = 0x1307,
- VG_USERREQ__MOVE_MEMPOOL = 0x1308,
- VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
- VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
-
- /* Allow printfs to valgrind log. */
- /* The first two pass the va_list argument by value, which
- assumes it is the same size as or smaller than a UWord,
- which generally isn't the case. Hence are deprecated.
- The second two pass the vargs by reference and so are
- immune to this problem. */
- /* both :: char* fmt, va_list vargs (DEPRECATED) */
- VG_USERREQ__PRINTF = 0x1401,
- VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
- /* both :: char* fmt, va_list* vargs */
- VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
- VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
-
- /* Stack support. */
- VG_USERREQ__STACK_REGISTER = 0x1501,
- VG_USERREQ__STACK_DEREGISTER = 0x1502,
- VG_USERREQ__STACK_CHANGE = 0x1503,
-
- /* Wine support */
- VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
-
- /* Querying of debug info. */
- VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701
- } Vg_ClientRequest;
-
-#if !defined(__GNUC__)
-# define __extension__ /* */
-#endif
-
-
-/* Returns the number of Valgrinds this code is running under. That
- is, 0 if running natively, 1 if running under Valgrind, 2 if
- running under Valgrind which is running under another Valgrind,
- etc. */
-#define RUNNING_ON_VALGRIND \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
- VG_USERREQ__RUNNING_ON_VALGRIND, \
- 0, 0, 0, 0, 0) \
-
-
-/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
- _qzz_len - 1]. Useful if you are debugging a JITter or some such,
- since it provides a way to make sure valgrind will retranslate the
- invalidated area. Returns no value. */
-#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__DISCARD_TRANSLATIONS, \
- _qzz_addr, _qzz_len, 0, 0, 0)
-
-
-/* These requests are for getting Valgrind itself to print something.
- Possibly with a backtrace. This is a really ugly hack. The return value
- is the number of characters printed, excluding the "**<pid>** " part at the
- start and the backtrace (if present). */
-
-#if defined(__GNUC__) || defined(__INTEL_COMPILER)
-/* Modern GCC will optimize the static routine out if unused,
- and unused attribute will shut down warnings about it. */
-static int VALGRIND_PRINTF(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
-#endif
-static int
-#if defined(_MSC_VER)
-__inline
-#endif
-VALGRIND_PRINTF(const char *format, ...)
-{
-#if defined(NVALGRIND)
- return 0;
-#else /* NVALGRIND */
-#if defined(_MSC_VER)
- uintptr_t _qzz_res;
-#else
- unsigned long _qzz_res;
-#endif
- va_list vargs;
- va_start(vargs, format);
-#if defined(_MSC_VER)
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_VALIST_BY_REF,
- (uintptr_t)format,
- (uintptr_t)&vargs,
- 0, 0, 0);
-#else
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_VALIST_BY_REF,
- (unsigned long)format,
- (unsigned long)&vargs,
- 0, 0, 0);
-#endif
- va_end(vargs);
- return (int)_qzz_res;
-#endif /* NVALGRIND */
-}
-
-#if defined(__GNUC__) || defined(__INTEL_COMPILER)
-static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
-#endif
-static int
-#if defined(_MSC_VER)
-__inline
-#endif
-VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
-{
-#if defined(NVALGRIND)
- return 0;
-#else /* NVALGRIND */
-#if defined(_MSC_VER)
- uintptr_t _qzz_res;
-#else
- unsigned long _qzz_res;
-#endif
- va_list vargs;
- va_start(vargs, format);
-#if defined(_MSC_VER)
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
- (uintptr_t)format,
- (uintptr_t)&vargs,
- 0, 0, 0);
-#else
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
- (unsigned long)format,
- (unsigned long)&vargs,
- 0, 0, 0);
-#endif
- va_end(vargs);
- return (int)_qzz_res;
-#endif /* NVALGRIND */
-}
-
-
-/* These requests allow control to move from the simulated CPU to the
- real CPU, calling an arbitary function.
-
- Note that the current ThreadId is inserted as the first argument.
- So this call:
-
- VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
-
- requires f to have this signature:
-
- Word f(Word tid, Word arg1, Word arg2)
-
- where "Word" is a word-sized type.
-
- Note that these client requests are not entirely reliable. For example,
- if you call a function with them that subsequently calls printf(),
- there's a high chance Valgrind will crash. Generally, your prospects of
- these working are made higher if the called function does not refer to
- any global variables, and does not refer to any libc or other functions
- (printf et al). Any kind of entanglement with libc or dynamic linking is
- likely to have a bad outcome, for tricky reasons which we've grappled
- with a lot in the past.
-*/
-#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL0, \
- _qyy_fn, \
- 0, 0, 0, 0)
-
-#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL1, \
- _qyy_fn, \
- _qyy_arg1, 0, 0, 0)
-
-#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL2, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, 0, 0)
-
-#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL3, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, \
- _qyy_arg3, 0)
-
-
-/* Counts the number of errors that have been recorded by a tool. Nb:
- the tool must record the errors with VG_(maybe_record_error)() or
- VG_(unique_error)() for them to be counted. */
-#define VALGRIND_COUNT_ERRORS \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- 0 /* default return */, \
- VG_USERREQ__COUNT_ERRORS, \
- 0, 0, 0, 0, 0)
-
-/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
- when heap blocks are allocated in order to give accurate results. This
- happens automatically for the standard allocator functions such as
- malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
- delete[], etc.
-
- But if your program uses a custom allocator, this doesn't automatically
- happen, and Valgrind will not do as well. For example, if you allocate
- superblocks with mmap() and then allocates chunks of the superblocks, all
- Valgrind's observations will be at the mmap() level and it won't know that
- the chunks should be considered separate entities. In Memcheck's case,
- that means you probably won't get heap block overrun detection (because
- there won't be redzones marked as unaddressable) and you definitely won't
- get any leak detection.
-
- The following client requests allow a custom allocator to be annotated so
- that it can be handled accurately by Valgrind.
-
- VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
- by a malloc()-like function. For Memcheck (an illustrative case), this
- does two things:
-
- - It records that the block has been allocated. This means any addresses
- within the block mentioned in error messages will be
- identified as belonging to the block. It also means that if the block
- isn't freed it will be detected by the leak checker.
-
- - It marks the block as being addressable and undefined (if 'is_zeroed' is
- not set), or addressable and defined (if 'is_zeroed' is set). This
- controls how accesses to the block by the program are handled.
-
- 'addr' is the start of the usable block (ie. after any
- redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
- can apply redzones -- these are blocks of padding at the start and end of
- each block. Adding redzones is recommended as it makes it much more likely
- Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
- zeroed (or filled with another predictable value), as is the case for
- calloc().
-
- VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
- heap block -- that will be used by the client program -- is allocated.
- It's best to put it at the outermost level of the allocator if possible;
- for example, if you have a function my_alloc() which calls
- internal_alloc(), and the client request is put inside internal_alloc(),
- stack traces relating to the heap block will contain entries for both
- my_alloc() and internal_alloc(), which is probably not what you want.
-
- For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
- custom blocks from within a heap block, B, that has been allocated with
- malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
- -- the custom blocks will take precedence.
-
- VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
- Memcheck, it does two things:
-
- - It records that the block has been deallocated. This assumes that the
- block was annotated as having been allocated via
- VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
-
- - It marks the block as being unaddressable.
-
- VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
- heap block is deallocated.
-
- VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
- Memcheck, it does four things:
-
- - It records that the size of a block has been changed. This assumes that
- the block was annotated as having been allocated via
- VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
-
- - If the block shrunk, it marks the freed memory as being unaddressable.
-
- - If the block grew, it marks the new area as undefined and defines a red
- zone past the end of the new block.
-
- - The V-bits of the overlap between the old and the new block are preserved.
-
- VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
- and before deallocation of the old block.
-
- In many cases, these three client requests will not be enough to get your
- allocator working well with Memcheck. More specifically, if your allocator
- writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
- will be necessary to mark the memory as addressable just before the zeroing
- occurs, otherwise you'll get a lot of invalid write errors. For example,
- you'll need to do this if your allocator recycles freed blocks, but it
- zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
- Alternatively, if your allocator reuses freed blocks for allocator-internal
- data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
-
- Really, what's happening is a blurring of the lines between the client
- program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
- memory should be considered unaddressable to the client program, but the
- allocator knows more than the rest of the client program and so may be able
- to safely access it. Extra client requests are necessary for Valgrind to
- understand the distinction between the allocator and the rest of the
- program.
-
- Ignored if addr == 0.
-*/
-#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MALLOCLIKE_BLOCK, \
- addr, sizeB, rzB, is_zeroed, 0)
-
-/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
- Ignored if addr == 0.
-*/
-#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__RESIZEINPLACE_BLOCK, \
- addr, oldSizeB, newSizeB, rzB, 0)
-
-/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
- Ignored if addr == 0.
-*/
-#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__FREELIKE_BLOCK, \
- addr, rzB, 0, 0, 0)
-
-/* Create a memory pool. */
-#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__CREATE_MEMPOOL, \
- pool, rzB, is_zeroed, 0, 0)
-
-/* Destroy a memory pool. */
-#define VALGRIND_DESTROY_MEMPOOL(pool) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__DESTROY_MEMPOOL, \
- pool, 0, 0, 0, 0)
-
-/* Associate a piece of memory with a memory pool. */
-#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MEMPOOL_ALLOC, \
- pool, addr, size, 0, 0)
-
-/* Disassociate a piece of memory from a memory pool. */
-#define VALGRIND_MEMPOOL_FREE(pool, addr) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MEMPOOL_FREE, \
- pool, addr, 0, 0, 0)
-
-/* Disassociate any pieces outside a particular range. */
-#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MEMPOOL_TRIM, \
- pool, addr, size, 0, 0)
-
-/* Resize and/or move a piece associated with a memory pool. */
-#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MOVE_MEMPOOL, \
- poolA, poolB, 0, 0, 0)
-
-/* Resize and/or move a piece associated with a memory pool. */
-#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MEMPOOL_CHANGE, \
- pool, addrA, addrB, size, 0)
-
-/* Return 1 if a mempool exists, else 0. */
-#define VALGRIND_MEMPOOL_EXISTS(pool) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MEMPOOL_EXISTS, \
- pool, 0, 0, 0, 0)
-
-/* Mark a piece of memory as being a stack. Returns a stack id. */
-#define VALGRIND_STACK_REGISTER(start, end) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__STACK_REGISTER, \
- start, end, 0, 0, 0)
-
-/* Unmark the piece of memory associated with a stack id as being a
- stack. */
-#define VALGRIND_STACK_DEREGISTER(id) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__STACK_DEREGISTER, \
- id, 0, 0, 0, 0)
-
-/* Change the start and end address of the stack id. */
-#define VALGRIND_STACK_CHANGE(id, start, end) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__STACK_CHANGE, \
- id, start, end, 0, 0)
-
-/* Load PDB debug info for Wine PE image_map. */
-#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__LOAD_PDB_DEBUGINFO, \
- fd, ptr, total_size, delta, 0)
-
-/* Map a code address to a source file name and line number. buf64
- must point to a 64-byte buffer in the caller's address space. The
- result will be dumped in there and is guaranteed to be zero
- terminated. If no info is found, the first byte is set to zero. */
-#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MAP_IP_TO_SRCLOC, \
- addr, buf64, 0, 0, 0)
-
-
-#undef PLAT_x86_darwin
-#undef PLAT_amd64_darwin
-#undef PLAT_x86_win32
-#undef PLAT_x86_linux
-#undef PLAT_amd64_linux
-#undef PLAT_ppc32_linux
-#undef PLAT_ppc64_linux
-#undef PLAT_arm_linux
-#undef PLAT_s390x_linux
-
-#endif /* __VALGRIND_H */
diff --git a/src/3rdparty/v8/src/token.cc b/src/3rdparty/v8/src/token.cc
deleted file mode 100644
index 7ba7ed3..0000000
--- a/src/3rdparty/v8/src/token.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "token.h"
-
-namespace v8 {
-namespace internal {
-
-#define T(name, string, precedence) #name,
-const char* const Token::name_[NUM_TOKENS] = {
- TOKEN_LIST(T, T)
-};
-#undef T
-
-
-#define T(name, string, precedence) string,
-const char* const Token::string_[NUM_TOKENS] = {
- TOKEN_LIST(T, T)
-};
-#undef T
-
-
-#define T(name, string, precedence) precedence,
-const int8_t Token::precedence_[NUM_TOKENS] = {
- TOKEN_LIST(T, T)
-};
-#undef T
-
-
-#define KT(a, b, c) 'T',
-#define KK(a, b, c) 'K',
-const char Token::token_type[] = {
- TOKEN_LIST(KT, KK)
-};
-#undef KT
-#undef KK
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/token.h b/src/3rdparty/v8/src/token.h
deleted file mode 100644
index 4078a15..0000000
--- a/src/3rdparty/v8/src/token.h
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_TOKEN_H_
-#define V8_TOKEN_H_
-
-#include "checks.h"
-
-namespace v8 {
-namespace internal {
-
-// TOKEN_LIST takes a list of 3 macros M, all of which satisfy the
-// same signature M(name, string, precedence), where name is the
-// symbolic token name, string is the corresponding syntactic symbol
-// (or NULL, for literals), and precedence is the precedence (or 0).
-// The parameters are invoked for token categories as follows:
-//
-// T: Non-keyword tokens
-// K: Keyword tokens
-
-// IGNORE_TOKEN is a convenience macro that can be supplied as
-// an argument (at any position) for a TOKEN_LIST call. It does
-// nothing with tokens belonging to the respective category.
-
-#define IGNORE_TOKEN(name, string, precedence)
-
-#define TOKEN_LIST(T, K) \
- /* End of source indicator. */ \
- T(EOS, "EOS", 0) \
- \
- /* Punctuators (ECMA-262, section 7.7, page 15). */ \
- T(LPAREN, "(", 0) \
- T(RPAREN, ")", 0) \
- T(LBRACK, "[", 0) \
- T(RBRACK, "]", 0) \
- T(LBRACE, "{", 0) \
- T(RBRACE, "}", 0) \
- T(COLON, ":", 0) \
- T(SEMICOLON, ";", 0) \
- T(PERIOD, ".", 0) \
- T(CONDITIONAL, "?", 3) \
- T(INC, "++", 0) \
- T(DEC, "--", 0) \
- \
- /* Assignment operators. */ \
- /* IsAssignmentOp() and Assignment::is_compound() relies on */ \
- /* this block of enum values being contiguous and sorted in the */ \
- /* same order! */ \
- T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
- T(INIT_LET, "=init_let", 2) /* AST-use only. */ \
- T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
- T(INIT_CONST_HARMONY, "=init_const_harmony", 2) /* AST-use only. */ \
- T(ASSIGN, "=", 2) \
- T(ASSIGN_BIT_OR, "|=", 2) \
- T(ASSIGN_BIT_XOR, "^=", 2) \
- T(ASSIGN_BIT_AND, "&=", 2) \
- T(ASSIGN_SHL, "<<=", 2) \
- T(ASSIGN_SAR, ">>=", 2) \
- T(ASSIGN_SHR, ">>>=", 2) \
- T(ASSIGN_ADD, "+=", 2) \
- T(ASSIGN_SUB, "-=", 2) \
- T(ASSIGN_MUL, "*=", 2) \
- T(ASSIGN_DIV, "/=", 2) \
- T(ASSIGN_MOD, "%=", 2) \
- \
- /* Binary operators sorted by precedence. */ \
- /* IsBinaryOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(COMMA, ",", 1) \
- T(OR, "||", 4) \
- T(AND, "&&", 5) \
- T(BIT_OR, "|", 6) \
- T(BIT_XOR, "^", 7) \
- T(BIT_AND, "&", 8) \
- T(SHL, "<<", 11) \
- T(SAR, ">>", 11) \
- T(SHR, ">>>", 11) \
- T(ROR, "rotate right", 11) /* only used by Crankshaft */ \
- T(ADD, "+", 12) \
- T(SUB, "-", 12) \
- T(MUL, "*", 13) \
- T(DIV, "/", 13) \
- T(MOD, "%", 13) \
- \
- /* Compare operators sorted by precedence. */ \
- /* IsCompareOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(EQ, "==", 9) \
- T(NE, "!=", 9) \
- T(EQ_STRICT, "===", 9) \
- T(NE_STRICT, "!==", 9) \
- T(LT, "<", 10) \
- T(GT, ">", 10) \
- T(LTE, "<=", 10) \
- T(GTE, ">=", 10) \
- K(INSTANCEOF, "instanceof", 10) \
- K(IN, "in", 10) \
- \
- /* Unary operators. */ \
- /* IsUnaryOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(NOT, "!", 0) \
- T(BIT_NOT, "~", 0) \
- K(DELETE, "delete", 0) \
- K(TYPEOF, "typeof", 0) \
- K(VOID, "void", 0) \
- \
- /* Keywords (ECMA-262, section 7.5.2, page 13). */ \
- K(BREAK, "break", 0) \
- K(CASE, "case", 0) \
- K(CATCH, "catch", 0) \
- K(CONTINUE, "continue", 0) \
- K(DEBUGGER, "debugger", 0) \
- K(DEFAULT, "default", 0) \
- /* DELETE */ \
- K(DO, "do", 0) \
- K(ELSE, "else", 0) \
- K(FINALLY, "finally", 0) \
- K(FOR, "for", 0) \
- K(FUNCTION, "function", 0) \
- K(IF, "if", 0) \
- /* IN */ \
- /* INSTANCEOF */ \
- K(NEW, "new", 0) \
- K(RETURN, "return", 0) \
- K(SWITCH, "switch", 0) \
- K(THIS, "this", 0) \
- K(THROW, "throw", 0) \
- K(TRY, "try", 0) \
- /* TYPEOF */ \
- K(VAR, "var", 0) \
- /* VOID */ \
- K(WHILE, "while", 0) \
- K(WITH, "with", 0) \
- \
- /* Literals (ECMA-262, section 7.8, page 16). */ \
- K(NULL_LITERAL, "null", 0) \
- K(TRUE_LITERAL, "true", 0) \
- K(FALSE_LITERAL, "false", 0) \
- T(NUMBER, NULL, 0) \
- T(STRING, NULL, 0) \
- \
- /* Identifiers (not keywords or future reserved words). */ \
- T(IDENTIFIER, NULL, 0) \
- \
- /* Future reserved words (ECMA-262, section 7.6.1.2). */ \
- T(FUTURE_RESERVED_WORD, NULL, 0) \
- T(FUTURE_STRICT_RESERVED_WORD, NULL, 0) \
- K(CONST, "const", 0) \
- K(EXPORT, "export", 0) \
- K(IMPORT, "import", 0) \
- K(LET, "let", 0) \
- \
- /* Illegal token - not able to scan. */ \
- T(ILLEGAL, "ILLEGAL", 0) \
- \
- /* Scanner-internal use only. */ \
- T(WHITESPACE, NULL, 0)
-
-
-class Token {
- public:
- // All token values.
-#define T(name, string, precedence) name,
- enum Value {
- TOKEN_LIST(T, T)
- NUM_TOKENS
- };
-#undef T
-
- // Returns a string corresponding to the C++ token name
- // (e.g. "LT" for the token LT).
- static const char* Name(Value tok) {
- ASSERT(tok < NUM_TOKENS); // tok is unsigned
- return name_[tok];
- }
-
- // Predicates
- static bool IsKeyword(Value tok) {
- return token_type[tok] == 'K';
- }
-
- static bool IsAssignmentOp(Value tok) {
- return INIT_VAR <= tok && tok <= ASSIGN_MOD;
- }
-
- static bool IsBinaryOp(Value op) {
- return COMMA <= op && op <= MOD;
- }
-
- static bool IsCompareOp(Value op) {
- return EQ <= op && op <= IN;
- }
-
- static bool IsOrderedRelationalCompareOp(Value op) {
- return op == LT || op == LTE || op == GT || op == GTE;
- }
-
- static bool IsEqualityOp(Value op) {
- return op == EQ || op == EQ_STRICT;
- }
-
- static Value NegateCompareOp(Value op) {
- ASSERT(IsCompareOp(op));
- switch (op) {
- case EQ: return NE;
- case NE: return EQ;
- case EQ_STRICT: return NE_STRICT;
- case NE_STRICT: return EQ_STRICT;
- case LT: return GTE;
- case GT: return LTE;
- case LTE: return GT;
- case GTE: return LT;
- default:
- UNREACHABLE();
- return op;
- }
- }
-
- static Value ReverseCompareOp(Value op) {
- ASSERT(IsCompareOp(op));
- switch (op) {
- case EQ: return EQ;
- case NE: return NE;
- case EQ_STRICT: return EQ_STRICT;
- case NE_STRICT: return NE_STRICT;
- case LT: return GT;
- case GT: return LT;
- case LTE: return GTE;
- case GTE: return LTE;
- default:
- UNREACHABLE();
- return op;
- }
- }
-
- static bool IsBitOp(Value op) {
- return (BIT_OR <= op && op <= SHR) || op == BIT_NOT;
- }
-
- static bool IsUnaryOp(Value op) {
- return (NOT <= op && op <= VOID) || op == ADD || op == SUB;
- }
-
- static bool IsCountOp(Value op) {
- return op == INC || op == DEC;
- }
-
- static bool IsShiftOp(Value op) {
- return (SHL <= op) && (op <= SHR);
- }
-
- // Returns a string corresponding to the JS token string
- // (.e., "<" for the token LT) or NULL if the token doesn't
- // have a (unique) string (e.g. an IDENTIFIER).
- static const char* String(Value tok) {
- ASSERT(tok < NUM_TOKENS); // tok is unsigned.
- return string_[tok];
- }
-
- // Returns the precedence > 0 for binary and compare
- // operators; returns 0 otherwise.
- static int Precedence(Value tok) {
- ASSERT(tok < NUM_TOKENS); // tok is unsigned.
- return precedence_[tok];
- }
-
- private:
- static const char* const name_[NUM_TOKENS];
- static const char* const string_[NUM_TOKENS];
- static const int8_t precedence_[NUM_TOKENS];
- static const char token_type[NUM_TOKENS];
-};
-
-} } // namespace v8::internal
-
-#endif // V8_TOKEN_H_
diff --git a/src/3rdparty/v8/src/transitions-inl.h b/src/3rdparty/v8/src/transitions-inl.h
deleted file mode 100644
index cfaa99d..0000000
--- a/src/3rdparty/v8/src/transitions-inl.h
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_TRANSITIONS_INL_H_
-#define V8_TRANSITIONS_INL_H_
-
-#include "objects-inl.h"
-#include "transitions.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define FIELD_ADDR(p, offset) \
- (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
-
-#define WRITE_FIELD(p, offset, value) \
- (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- if (heap->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- } \
- }
-
-
-TransitionArray* TransitionArray::cast(Object* object) {
- ASSERT(object->IsTransitionArray());
- return reinterpret_cast<TransitionArray*>(object);
-}
-
-
-Map* TransitionArray::elements_transition() {
- Object* transition_map = get(kElementsTransitionIndex);
- return Map::cast(transition_map);
-}
-
-
-void TransitionArray::ClearElementsTransition() {
- WRITE_FIELD(this, kElementsTransitionOffset, Smi::FromInt(0));
-}
-
-
-bool TransitionArray::HasElementsTransition() {
- return IsFullTransitionArray() &&
- get(kElementsTransitionIndex) != Smi::FromInt(0);
-}
-
-
-void TransitionArray::set_elements_transition(Map* transition_map,
- WriteBarrierMode mode) {
- ASSERT(IsFullTransitionArray());
- Heap* heap = GetHeap();
- WRITE_FIELD(this, kElementsTransitionOffset, transition_map);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kElementsTransitionOffset, transition_map, mode);
-}
-
-
-Object* TransitionArray::back_pointer_storage() {
- return get(kBackPointerStorageIndex);
-}
-
-
-void TransitionArray::set_back_pointer_storage(Object* back_pointer,
- WriteBarrierMode mode) {
- Heap* heap = GetHeap();
- WRITE_FIELD(this, kBackPointerStorageOffset, back_pointer);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kBackPointerStorageOffset, back_pointer, mode);
-}
-
-
-bool TransitionArray::HasPrototypeTransitions() {
- return IsFullTransitionArray() &&
- get(kPrototypeTransitionsIndex) != Smi::FromInt(0);
-}
-
-
-FixedArray* TransitionArray::GetPrototypeTransitions() {
- ASSERT(IsFullTransitionArray());
- Object* prototype_transitions = get(kPrototypeTransitionsIndex);
- return FixedArray::cast(prototype_transitions);
-}
-
-
-HeapObject* TransitionArray::UncheckedPrototypeTransitions() {
- ASSERT(HasPrototypeTransitions());
- return reinterpret_cast<HeapObject*>(get(kPrototypeTransitionsIndex));
-}
-
-
-void TransitionArray::SetPrototypeTransitions(FixedArray* transitions,
- WriteBarrierMode mode) {
- ASSERT(IsFullTransitionArray());
- ASSERT(transitions->IsFixedArray());
- Heap* heap = GetHeap();
- WRITE_FIELD(this, kPrototypeTransitionsOffset, transitions);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kPrototypeTransitionsOffset, transitions, mode);
-}
-
-
-Object** TransitionArray::GetPrototypeTransitionsSlot() {
- return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kPrototypeTransitionsOffset);
-}
-
-
-Object** TransitionArray::GetKeySlot(int transition_number) {
- ASSERT(!IsSimpleTransition());
- ASSERT(transition_number < number_of_transitions());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToKeyIndex(transition_number)));
-}
-
-
-String* TransitionArray::GetKey(int transition_number) {
- if (IsSimpleTransition()) {
- Map* target = GetTarget(kSimpleTransitionIndex);
- int descriptor = target->LastAdded();
- String* key = target->instance_descriptors()->GetKey(descriptor);
- return key;
- }
- ASSERT(transition_number < number_of_transitions());
- return String::cast(get(ToKeyIndex(transition_number)));
-}
-
-
-void TransitionArray::SetKey(int transition_number, String* key) {
- ASSERT(!IsSimpleTransition());
- ASSERT(transition_number < number_of_transitions());
- set(ToKeyIndex(transition_number), key);
-}
-
-
-Map* TransitionArray::GetTarget(int transition_number) {
- if (IsSimpleTransition()) {
- ASSERT(transition_number == kSimpleTransitionIndex);
- return Map::cast(get(kSimpleTransitionTarget));
- }
- ASSERT(transition_number < number_of_transitions());
- return Map::cast(get(ToTargetIndex(transition_number)));
-}
-
-
-void TransitionArray::SetTarget(int transition_number, Map* value) {
- if (IsSimpleTransition()) {
- ASSERT(transition_number == kSimpleTransitionIndex);
- return set(kSimpleTransitionTarget, value);
- }
- ASSERT(transition_number < number_of_transitions());
- set(ToTargetIndex(transition_number), value);
-}
-
-
-PropertyDetails TransitionArray::GetTargetDetails(int transition_number) {
- Map* map = GetTarget(transition_number);
- DescriptorArray* descriptors = map->instance_descriptors();
- int descriptor = map->LastAdded();
- return descriptors->GetDetails(descriptor);
-}
-
-
-int TransitionArray::Search(String* name) {
- if (IsSimpleTransition()) {
- String* key = GetKey(kSimpleTransitionIndex);
- if (key->Equals(name)) return kSimpleTransitionIndex;
- return kNotFound;
- }
- return internal::Search<ALL_ENTRIES>(this, name);
-}
-
-
-void TransitionArray::NoIncrementalWriteBarrierSet(int transition_number,
- String* key,
- Map* target) {
- FixedArray::NoIncrementalWriteBarrierSet(
- this, ToKeyIndex(transition_number), key);
- FixedArray::NoIncrementalWriteBarrierSet(
- this, ToTargetIndex(transition_number), target);
-}
-
-
-#undef FIELD_ADDR
-#undef WRITE_FIELD
-#undef CONDITIONAL_WRITE_BARRIER
-
-
-} } // namespace v8::internal
-
-#endif // V8_TRANSITIONS_INL_H_
diff --git a/src/3rdparty/v8/src/transitions.cc b/src/3rdparty/v8/src/transitions.cc
deleted file mode 100644
index 56b6caf..0000000
--- a/src/3rdparty/v8/src/transitions.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "objects.h"
-#include "transitions-inl.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-
-static MaybeObject* AllocateRaw(int length) {
- Heap* heap = Isolate::Current()->heap();
-
- // Use FixedArray to not use TransitionArray::cast on incomplete object.
- FixedArray* array;
- MaybeObject* maybe_array = heap->AllocateFixedArray(length);
- if (!maybe_array->To(&array)) return maybe_array;
- return array;
-}
-
-
-MaybeObject* TransitionArray::Allocate(int number_of_transitions) {
- FixedArray* array;
- MaybeObject* maybe_array = AllocateRaw(ToKeyIndex(number_of_transitions));
- if (!maybe_array->To(&array)) return maybe_array;
- array->set(kElementsTransitionIndex, Smi::FromInt(0));
- array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
- return array;
-}
-
-
-void TransitionArray::NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
- int origin_transition,
- int target_transition) {
- NoIncrementalWriteBarrierSet(target_transition,
- origin->GetKey(origin_transition),
- origin->GetTarget(origin_transition));
-}
-
-
-static bool InsertionPointFound(String* key1, String* key2) {
- return key1->Hash() > key2->Hash();
-}
-
-
-MaybeObject* TransitionArray::NewWith(SimpleTransitionFlag flag,
- String* key,
- Map* target,
- Object* back_pointer) {
- TransitionArray* result;
- MaybeObject* maybe_result;
-
- if (flag == SIMPLE_TRANSITION) {
- maybe_result = AllocateRaw(kSimpleTransitionSize);
- if (!maybe_result->To(&result)) return maybe_result;
- result->set(kSimpleTransitionTarget, target);
- } else {
- maybe_result = Allocate(1);
- if (!maybe_result->To(&result)) return maybe_result;
- result->NoIncrementalWriteBarrierSet(0, key, target);
- }
- result->set_back_pointer_storage(back_pointer);
- return result;
-}
-
-
-MaybeObject* TransitionArray::ExtendToFullTransitionArray() {
- ASSERT(!IsFullTransitionArray());
- int nof = number_of_transitions();
- TransitionArray* result;
- MaybeObject* maybe_result = Allocate(nof);
- if (!maybe_result->To(&result)) return maybe_result;
-
- if (nof == 1) {
- result->NoIncrementalWriteBarrierCopyFrom(this, kSimpleTransitionIndex, 0);
- }
-
- result->set_back_pointer_storage(back_pointer_storage());
- return result;
-}
-
-
-MaybeObject* TransitionArray::CopyInsert(String* name, Map* target) {
- TransitionArray* result;
-
- int number_of_transitions = this->number_of_transitions();
- int new_size = number_of_transitions;
-
- int insertion_index = this->Search(name);
- if (insertion_index == kNotFound) ++new_size;
-
- MaybeObject* maybe_array;
- maybe_array = TransitionArray::Allocate(new_size);
- if (!maybe_array->To(&result)) return maybe_array;
-
- if (HasElementsTransition()) {
- result->set_elements_transition(elements_transition());
- }
-
- if (HasPrototypeTransitions()) {
- result->SetPrototypeTransitions(GetPrototypeTransitions());
- }
-
- if (insertion_index != kNotFound) {
- for (int i = 0; i < number_of_transitions; ++i) {
- if (i != insertion_index) {
- result->NoIncrementalWriteBarrierCopyFrom(this, i, i);
- }
- }
- result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
- return result;
- }
-
- insertion_index = 0;
- for (; insertion_index < number_of_transitions; ++insertion_index) {
- if (InsertionPointFound(GetKey(insertion_index), name)) break;
- result->NoIncrementalWriteBarrierCopyFrom(
- this, insertion_index, insertion_index);
- }
-
- result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
-
- for (; insertion_index < number_of_transitions; ++insertion_index) {
- result->NoIncrementalWriteBarrierCopyFrom(
- this, insertion_index, insertion_index + 1);
- }
-
- result->set_back_pointer_storage(back_pointer_storage());
- return result;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/transitions.h b/src/3rdparty/v8/src/transitions.h
deleted file mode 100644
index 0a66026..0000000
--- a/src/3rdparty/v8/src/transitions.h
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_TRANSITIONS_H_
-#define V8_TRANSITIONS_H_
-
-#include "elements-kind.h"
-#include "heap.h"
-#include "isolate.h"
-#include "objects.h"
-#include "v8checks.h"
-
-namespace v8 {
-namespace internal {
-
-
-// TransitionArrays are fixed arrays used to hold map transitions for property,
-// constant, and element changes. They can either be simple transition arrays
-// that store a single property transition, or a full transition array that has
-// space for elements transitions, prototype transitions and multiple property
-// transitons. The details related to property transitions are accessed in the
-// descriptor array of the target map. In the case of a simple transition, the
-// key is also read from the descriptor array of the target map.
-//
-// The simple format of the these objects is:
-// [0] Undefined or back pointer map
-// [1] Single transition
-//
-// The full format is:
-// [0] Undefined or back pointer map
-// [1] Smi(0) or elements transition map
-// [2] Smi(0) or fixed array of prototype transitions
-// [3] First transition
-// [length() - kTransitionSize] Last transition
-class TransitionArray: public FixedArray {
- public:
- // Accessors for fetching instance transition at transition number.
- inline String* GetKey(int transition_number);
- inline void SetKey(int transition_number, String* value);
- inline Object** GetKeySlot(int transition_number);
- int GetSortedKeyIndex(int transition_number) { return transition_number; }
-
- String* GetSortedKey(int transition_number) {
- return GetKey(transition_number);
- }
-
- inline Map* GetTarget(int transition_number);
- inline void SetTarget(int transition_number, Map* target);
-
- inline PropertyDetails GetTargetDetails(int transition_number);
-
- inline Map* elements_transition();
- inline void set_elements_transition(
- Map* target,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline bool HasElementsTransition();
- inline void ClearElementsTransition();
-
- inline Object* back_pointer_storage();
- inline void set_back_pointer_storage(
- Object* back_pointer,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- inline FixedArray* GetPrototypeTransitions();
- inline void SetPrototypeTransitions(
- FixedArray* prototype_transitions,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline Object** GetPrototypeTransitionsSlot();
- inline bool HasPrototypeTransitions();
- inline HeapObject* UncheckedPrototypeTransitions();
-
- // Returns the number of transitions in the array.
- int number_of_transitions() {
- if (IsSimpleTransition()) return 1;
- int len = length();
- return len <= kFirstIndex ? 0 : (len - kFirstIndex) / kTransitionSize;
- }
-
- inline int number_of_entries() { return number_of_transitions(); }
-
- // Allocate a new transition array with a single entry.
- static MUST_USE_RESULT MaybeObject* NewWith(
- SimpleTransitionFlag flag,
- String* key,
- Map* target,
- Object* back_pointer);
-
- MUST_USE_RESULT MaybeObject* ExtendToFullTransitionArray();
-
- // Copy the transition array, inserting a new transition.
- // TODO(verwaest): This should not cause an existing transition to be
- // overwritten.
- MUST_USE_RESULT MaybeObject* CopyInsert(String* name, Map* target);
-
- // Copy a single transition from the origin array.
- inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
- int origin_transition,
- int target_transition);
-
- // Search a transition for a given property name.
- inline int Search(String* name);
-
- // Allocates a TransitionArray.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_transitions);
-
- bool IsSimpleTransition() { return length() == kSimpleTransitionSize; }
- bool IsFullTransitionArray() { return length() >= kFirstIndex; }
-
- // Casting.
- static inline TransitionArray* cast(Object* obj);
-
- // Constant for denoting key was not found.
- static const int kNotFound = -1;
-
- static const int kBackPointerStorageIndex = 0;
-
- // Layout for full transition arrays.
- static const int kElementsTransitionIndex = 1;
- static const int kPrototypeTransitionsIndex = 2;
- static const int kFirstIndex = 3;
-
- // Layout for simple transition arrays.
- static const int kSimpleTransitionTarget = 1;
- static const int kSimpleTransitionSize = 2;
- static const int kSimpleTransitionIndex = 0;
- STATIC_ASSERT(kSimpleTransitionIndex != kNotFound);
-
- static const int kBackPointerStorageOffset = FixedArray::kHeaderSize;
-
- // Layout for the full transition array header.
- static const int kElementsTransitionOffset = kBackPointerStorageOffset +
- kPointerSize;
- static const int kPrototypeTransitionsOffset = kElementsTransitionOffset +
- kPointerSize;
-
- // Layout of map transition entries in full transition arrays.
- static const int kTransitionKey = 0;
- static const int kTransitionTarget = 1;
- static const int kTransitionSize = 2;
-
-#ifdef OBJECT_PRINT
- // Print all the transitions.
- inline void PrintTransitions() {
- PrintTransitions(stdout);
- }
- void PrintTransitions(FILE* out);
-#endif
-
-#ifdef DEBUG
- bool IsSortedNoDuplicates(int valid_entries = -1);
- bool IsConsistentWithBackPointers(Map* current_map);
- bool IsEqualTo(TransitionArray* other);
-#endif
-
- // The maximum number of transitions we want in a transition array (should
- // fit in a page).
- static const int kMaxNumberOfTransitions = 1024 + 512;
-
- private:
- // Conversion from transition number to array indices.
- static int ToKeyIndex(int transition_number) {
- return kFirstIndex +
- (transition_number * kTransitionSize) +
- kTransitionKey;
- }
-
- static int ToTargetIndex(int transition_number) {
- return kFirstIndex +
- (transition_number * kTransitionSize) +
- kTransitionTarget;
- }
-
- inline void NoIncrementalWriteBarrierSet(int transition_number,
- String* key,
- Map* target);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_TRANSITIONS_H_
diff --git a/src/3rdparty/v8/src/type-info.cc b/src/3rdparty/v8/src/type-info.cc
deleted file mode 100644
index 62ca324..0000000
--- a/src/3rdparty/v8/src/type-info.cc
+++ /dev/null
@@ -1,755 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "code-stubs.h"
-#include "compiler.h"
-#include "ic.h"
-#include "macro-assembler.h"
-#include "stub-cache.h"
-#include "type-info.h"
-
-#include "ic-inl.h"
-#include "objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
- TypeInfo info;
- if (value->IsSmi()) {
- info = TypeInfo::Smi();
- } else if (value->IsHeapNumber()) {
- info = TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
- ? TypeInfo::Integer32()
- : TypeInfo::Double();
- } else if (value->IsString()) {
- info = TypeInfo::String();
- } else {
- info = TypeInfo::Unknown();
- }
- return info;
-}
-
-
-TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> native_context,
- Isolate* isolate,
- Zone* zone) {
- native_context_ = native_context;
- isolate_ = isolate;
- zone_ = zone;
- BuildDictionary(code);
- ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
-}
-
-
-static uint32_t IdToKey(TypeFeedbackId ast_id) {
- return static_cast<uint32_t>(ast_id.ToInt());
-}
-
-
-Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
- int entry = dictionary_->FindEntry(IdToKey(ast_id));
- return entry != UnseededNumberDictionary::kNotFound
- ? Handle<Object>(dictionary_->ValueAt(entry), isolate_)
- : Handle<Object>::cast(isolate_->factory()->undefined_value());
-}
-
-
-bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsMap()) return false;
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- return code->is_inline_cache_stub() && code->ic_state() == UNINITIALIZED;
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsMap()) return true;
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool preliminary_checks = code->is_keyed_load_stub() &&
- code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
- if (!preliminary_checks) return false;
- Map* map = code->FindFirstMap();
- return map != NULL && !CanRetainOtherContext(map, *native_context_);
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- return code->is_keyed_load_stub() && code->ic_state() == POLYMORPHIC;
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsMap()) return true;
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool allow_growth =
- Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
- ALLOW_JSARRAY_GROWTH;
- bool preliminary_checks =
- code->is_keyed_store_stub() &&
- !allow_growth &&
- code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
- if (!preliminary_checks) return false;
- Map* map = code->FindFirstMap();
- return map != NULL && !CanRetainOtherContext(map, *native_context_);
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::StoreIsPolymorphic(TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool allow_growth =
- Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
- ALLOW_JSARRAY_GROWTH;
- return code->is_keyed_store_stub() && !allow_growth &&
- code->ic_state() == POLYMORPHIC;
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
- Handle<Object> value = GetInfo(expr->CallFeedbackId());
- return value->IsMap() || value->IsSmi() || value->IsJSFunction();
-}
-
-
-bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
- Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
- if (info->IsSmi()) {
- ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
- LAST_FAST_ELEMENTS_KIND);
- return Isolate::Current()->global_context()->array_function();
- }
- return info->IsJSFunction();
-}
-
-
-bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
- ObjectLiteral::Property* prop) {
- Handle<Object> map_or_code = GetInfo(prop->key()->LiteralFeedbackId());
- return map_or_code->IsMap();
-}
-
-
-bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
- Handle<Object> value = GetInfo(stmt->ForInFeedbackId());
- return value->IsSmi() &&
- Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker;
-}
-
-
-Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
- ASSERT(LoadIsMonomorphicNormal(expr));
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* first_map = code->FindFirstMap();
- ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *native_context_)
- ? Handle<Map>::null()
- : Handle<Map>(first_map);
- }
- return Handle<Map>::cast(map_or_code);
-}
-
-
-Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
- TypeFeedbackId ast_id) {
- ASSERT(StoreIsMonomorphicNormal(ast_id));
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* first_map = code->FindFirstMap();
- ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *native_context_)
- ? Handle<Map>::null()
- : Handle<Map>(first_map);
- }
- return Handle<Map>::cast(map_or_code);
-}
-
-
-void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
- Handle<String> name,
- SmallMapList* types) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC);
- CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
-}
-
-
-void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
- Handle<String> name,
- SmallMapList* types) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC);
- CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types);
-}
-
-
-void TypeFeedbackOracle::CallReceiverTypes(Call* expr,
- Handle<String> name,
- CallKind call_kind,
- SmallMapList* types) {
- int arity = expr->arguments()->length();
-
- // Note: Currently we do not take string extra ic data into account
- // here.
- Code::ExtraICState extra_ic_state =
- CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
- extra_ic_state,
- Code::NORMAL,
- arity,
- OWN_MAP);
- CollectReceiverTypes(expr->CallFeedbackId(), name, flags, types);
-}
-
-
-CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
- Handle<Object> value = GetInfo(expr->CallFeedbackId());
- if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
- CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
- ASSERT(check != RECEIVER_MAP_CHECK);
- return check;
-}
-
-
-Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
- CheckType check) {
- JSFunction* function = NULL;
- switch (check) {
- case RECEIVER_MAP_CHECK:
- UNREACHABLE();
- break;
- case SYMBOL_CHECK:
- return Handle<JSObject>(native_context_->symbol_delegate());
- case STRING_CHECK:
- function = native_context_->string_function();
- break;
- case NUMBER_CHECK:
- function = native_context_->number_function();
- break;
- case BOOLEAN_CHECK:
- function = native_context_->boolean_function();
- break;
- }
- ASSERT(function != NULL);
- return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
-}
-
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->CallFeedbackId()));
-}
-
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
- Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
- if (info->IsSmi()) {
- ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
- LAST_FAST_ELEMENTS_KIND);
- return Handle<JSFunction>(Isolate::Current()->global_context()->
- array_function());
- } else {
- return Handle<JSFunction>::cast(info);
- }
-}
-
-
-ElementsKind TypeFeedbackOracle::GetCallNewElementsKind(CallNew* expr) {
- Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
- if (info->IsSmi()) {
- return static_cast<ElementsKind>(Smi::cast(*info)->value());
- } else {
- // TODO(mvstanton): avoided calling GetInitialFastElementsKind() for perf
- // reasons. Is there a better fix?
- if (FLAG_packed_arrays) {
- return FAST_SMI_ELEMENTS;
- } else {
- return FAST_HOLEY_SMI_ELEMENTS;
- }
- }
-}
-
-Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
- ObjectLiteral::Property* prop) {
- ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
- return Handle<Map>::cast(GetInfo(prop->key()->LiteralFeedbackId()));
-}
-
-
-bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
- return *GetInfo(expr->PropertyFeedbackId()) ==
- isolate_->builtins()->builtin(id);
-}
-
-
-bool TypeFeedbackOracle::LoadIsStub(Property* expr, ICStub* stub) {
- Handle<Object> object = GetInfo(expr->PropertyFeedbackId());
- if (!object->IsCode()) return false;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_load_stub()) return false;
- if (code->ic_state() != MONOMORPHIC) return false;
- return stub->Describes(*code);
-}
-
-
-static TypeInfo TypeFromCompareType(CompareIC::State state) {
- switch (state) {
- case CompareIC::UNINITIALIZED:
- // Uninitialized means never executed.
- return TypeInfo::Uninitialized();
- case CompareIC::SMI:
- return TypeInfo::Smi();
- case CompareIC::NUMBER:
- return TypeInfo::Number();
- case CompareIC::INTERNALIZED_STRING:
- return TypeInfo::InternalizedString();
- case CompareIC::STRING:
- return TypeInfo::String();
- case CompareIC::OBJECT:
- case CompareIC::KNOWN_OBJECT:
- // TODO(kasperl): We really need a type for JS objects here.
- return TypeInfo::NonPrimitive();
- case CompareIC::GENERIC:
- default:
- return TypeInfo::Unknown();
- }
-}
-
-
-void TypeFeedbackOracle::CompareType(CompareOperation* expr,
- TypeInfo* left_type,
- TypeInfo* right_type,
- TypeInfo* overall_type) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) {
- *left_type = *right_type = *overall_type = unknown;
- return;
- }
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) {
- *left_type = *right_type = *overall_type = unknown;
- return;
- }
-
- int stub_minor_key = code->stub_info();
- CompareIC::State left_state, right_state, handler_state;
- ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
- &handler_state, NULL);
- *left_type = TypeFromCompareType(left_state);
- *right_type = TypeFromCompareType(right_state);
- *overall_type = TypeFromCompareType(handler_state);
-}
-
-
-Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- if (!object->IsCode()) return Handle<Map>::null();
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return Handle<Map>::null();
- CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
- if (state != CompareIC::KNOWN_OBJECT) {
- return Handle<Map>::null();
- }
- Map* first_map = code->FindFirstMap();
- ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *native_context_)
- ? Handle<Map>::null()
- : Handle<Map>(first_map);
-}
-
-
-TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
- Handle<Object> object = GetInfo(expr->UnaryOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- ASSERT(code->is_unary_op_stub());
- UnaryOpIC::TypeInfo type = static_cast<UnaryOpIC::TypeInfo>(
- code->unary_op_type());
- switch (type) {
- case UnaryOpIC::SMI:
- return TypeInfo::Smi();
- case UnaryOpIC::NUMBER:
- return TypeInfo::Double();
- default:
- return unknown;
- }
-}
-
-
-static TypeInfo TypeFromBinaryOpType(BinaryOpIC::TypeInfo binary_type) {
- switch (binary_type) {
- // Uninitialized means never executed.
- case BinaryOpIC::UNINITIALIZED: return TypeInfo::Uninitialized();
- case BinaryOpIC::SMI: return TypeInfo::Smi();
- case BinaryOpIC::INT32: return TypeInfo::Integer32();
- case BinaryOpIC::NUMBER: return TypeInfo::Double();
- case BinaryOpIC::ODDBALL: return TypeInfo::Unknown();
- case BinaryOpIC::STRING: return TypeInfo::String();
- case BinaryOpIC::GENERIC: return TypeInfo::Unknown();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
-
-
-void TypeFeedbackOracle::BinaryType(BinaryOperation* expr,
- TypeInfo* left,
- TypeInfo* right,
- TypeInfo* result) {
- Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) {
- *left = *right = *result = unknown;
- return;
- }
- Handle<Code> code = Handle<Code>::cast(object);
- if (code->is_binary_op_stub()) {
- BinaryOpIC::TypeInfo left_type, right_type, result_type;
- BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
- &right_type, &result_type);
- *left = TypeFromBinaryOpType(left_type);
- *right = TypeFromBinaryOpType(right_type);
- *result = TypeFromBinaryOpType(result_type);
- return;
- }
- // Not a binary op stub.
- *left = *right = *result = unknown;
-}
-
-
-TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
- Handle<Object> object = GetInfo(clause->CompareId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return unknown;
-
- CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
- return TypeFromCompareType(state);
-}
-
-
-TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
- Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_binary_op_stub()) return unknown;
-
- BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
- BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
- &right_type, &unused_result_type);
- // CountOperations should always have +1 or -1 as their right input.
- ASSERT(right_type == BinaryOpIC::SMI ||
- right_type == BinaryOpIC::UNINITIALIZED);
-
- switch (left_type) {
- case BinaryOpIC::UNINITIALIZED:
- case BinaryOpIC::SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case BinaryOpIC::NUMBER:
- return TypeInfo::Double();
- case BinaryOpIC::STRING:
- case BinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
- }
- UNREACHABLE();
- return unknown;
-}
-
-
-static void AddMapIfMissing(Handle<Map> map, SmallMapList* list,
- Zone* zone) {
- for (int i = 0; i < list->length(); ++i) {
- if (list->at(i).is_identical_to(map)) return;
- }
- list->Add(map, zone);
-}
-
-
-void TypeFeedbackOracle::CollectPolymorphicMaps(Handle<Code> code,
- SmallMapList* types) {
- MapHandleList maps;
- code->FindAllMaps(&maps);
- types->Reserve(maps.length(), zone());
- for (int i = 0; i < maps.length(); i++) {
- Handle<Map> map(maps.at(i));
- if (!CanRetainOtherContext(*map, *native_context_)) {
- AddMapIfMissing(map, types, zone());
- }
- }
-}
-
-
-void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
- Handle<String> name,
- Code::Flags flags,
- SmallMapList* types) {
- Handle<Object> object = GetInfo(ast_id);
- if (object->IsUndefined() || object->IsSmi()) return;
-
- if (object.is_identical_to(isolate_->builtins()->StoreIC_GlobalProxy())) {
- // TODO(fschneider): We could collect the maps and signal that
- // we need a generic store (or load) here.
- ASSERT(Handle<Code>::cast(object)->ic_state() == GENERIC);
- } else if (object->IsMap()) {
- types->Add(Handle<Map>::cast(object), zone());
- } else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC) {
- CollectPolymorphicMaps(Handle<Code>::cast(object), types);
- } else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
- Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
- types->Reserve(4, zone());
- ASSERT(object->IsCode());
- isolate_->stub_cache()->CollectMatchingMaps(types,
- *name,
- flags,
- native_context_,
- zone());
- }
-}
-
-
-// Check if a map originates from a given native context. We use this
-// information to filter out maps from different context to avoid
-// retaining objects from different tabs in Chrome via optimized code.
-bool TypeFeedbackOracle::CanRetainOtherContext(Map* map,
- Context* native_context) {
- Object* constructor = NULL;
- while (!map->prototype()->IsNull()) {
- constructor = map->constructor();
- if (!constructor->IsNull()) {
- // If the constructor is not null or a JSFunction, we have to
- // conservatively assume that it may retain a native context.
- if (!constructor->IsJSFunction()) return true;
- // Check if the constructor directly references a foreign context.
- if (CanRetainOtherContext(JSFunction::cast(constructor),
- native_context)) {
- return true;
- }
- }
- map = HeapObject::cast(map->prototype())->map();
- }
- constructor = map->constructor();
- if (constructor->IsNull()) return false;
- JSFunction* function = JSFunction::cast(constructor);
- return CanRetainOtherContext(function, native_context);
-}
-
-
-bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
- Context* native_context) {
- return function->context()->global_object() != native_context->global_object()
- && function->context()->global_object() != native_context->builtins();
-}
-
-
-void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types) {
- Handle<Object> object = GetInfo(ast_id);
- if (!object->IsCode()) return;
- Handle<Code> code = Handle<Code>::cast(object);
- if (code->kind() == Code::KEYED_LOAD_IC ||
- code->kind() == Code::KEYED_STORE_IC) {
- CollectPolymorphicMaps(code, types);
- }
-}
-
-
-byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId ast_id) {
- Handle<Object> object = GetInfo(ast_id);
- return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0;
-}
-
-
-// Things are a bit tricky here: The iterator for the RelocInfos and the infos
-// themselves are not GC-safe, so we first get all infos, then we create the
-// dictionary (possibly triggering GC), and finally we relocate the collected
-// infos before we process them.
-void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
- AssertNoAllocation no_allocation;
- ZoneList<RelocInfo> infos(16, zone());
- HandleScope scope(code->GetIsolate());
- GetRelocInfos(code, &infos);
- CreateDictionary(code, &infos);
- ProcessRelocInfos(&infos);
- ProcessTypeFeedbackCells(code);
- // Allocate handle in the parent scope.
- dictionary_ = scope.CloseAndEscape(dictionary_);
-}
-
-
-void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
- ZoneList<RelocInfo>* infos) {
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- infos->Add(*it.rinfo(), zone());
- }
-}
-
-
-void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
- ZoneList<RelocInfo>* infos) {
- DisableAssertNoAllocation allocation_allowed;
- int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
- ? TypeFeedbackInfo::cast(code->type_feedback_info())->
- type_feedback_cells()->CellCount()
- : 0;
- int length = infos->length() + cell_count;
- byte* old_start = code->instruction_start();
- dictionary_ = FACTORY->NewUnseededNumberDictionary(length);
- byte* new_start = code->instruction_start();
- RelocateRelocInfos(infos, old_start, new_start);
-}
-
-
-void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- byte* old_start,
- byte* new_start) {
- for (int i = 0; i < infos->length(); i++) {
- RelocInfo* info = &(*infos)[i];
- info->set_pc(new_start + (info->pc() - old_start));
- }
-}
-
-
-void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
- for (int i = 0; i < infos->length(); i++) {
- RelocInfo reloc_entry = (*infos)[i];
- Address target_address = reloc_entry.target_address();
- TypeFeedbackId ast_id =
- TypeFeedbackId(static_cast<unsigned>((*infos)[i].data()));
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- switch (target->kind()) {
- case Code::LOAD_IC:
- case Code::STORE_IC:
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- if (target->ic_state() == MONOMORPHIC) {
- if (target->kind() == Code::CALL_IC &&
- target->check_type() != RECEIVER_MAP_CHECK) {
- SetInfo(ast_id, Smi::FromInt(target->check_type()));
- } else {
- Object* map = target->FindFirstMap();
- if (map == NULL) {
- SetInfo(ast_id, static_cast<Object*>(target));
- } else if (!CanRetainOtherContext(Map::cast(map),
- *native_context_)) {
- SetInfo(ast_id, map);
- }
- }
- } else {
- SetInfo(ast_id, target);
- }
- break;
-
- case Code::KEYED_LOAD_IC:
- case Code::KEYED_STORE_IC:
- if (target->ic_state() == MONOMORPHIC ||
- target->ic_state() == POLYMORPHIC) {
- SetInfo(ast_id, target);
- }
- break;
-
- case Code::UNARY_OP_IC:
- case Code::BINARY_OP_IC:
- case Code::COMPARE_IC:
- case Code::TO_BOOLEAN_IC:
- SetInfo(ast_id, target);
- break;
-
- default:
- break;
- }
- }
-}
-
-
-void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
- Object* raw_info = code->type_feedback_info();
- if (!raw_info->IsTypeFeedbackInfo()) return;
- Handle<TypeFeedbackCells> cache(
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
- for (int i = 0; i < cache->CellCount(); i++) {
- TypeFeedbackId ast_id = cache->AstId(i);
- Object* value = cache->Cell(i)->value();
- if (value->IsSmi() ||
- (value->IsJSFunction() &&
- !CanRetainOtherContext(JSFunction::cast(value),
- *native_context_))) {
- SetInfo(ast_id, value);
- }
- }
-}
-
-
-void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
- ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) ==
- UnseededNumberDictionary::kNotFound);
- MaybeObject* maybe_result = dictionary_->AtNumberPut(IdToKey(ast_id), target);
- USE(maybe_result);
-#ifdef DEBUG
- Object* result = NULL;
- // Dictionary has been allocated with sufficient size for all elements.
- ASSERT(maybe_result->ToObject(&result));
- ASSERT(*dictionary_ == result);
-#endif
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/type-info.h b/src/3rdparty/v8/src/type-info.h
deleted file mode 100644
index 2b50bf4..0000000
--- a/src/3rdparty/v8/src/type-info.h
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_TYPE_INFO_H_
-#define V8_TYPE_INFO_H_
-
-#include "allocation.h"
-#include "ast.h"
-#include "globals.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-const int kMaxKeyedPolymorphism = 4;
-
-// Unknown
-// | \____________
-// | |
-// Primitive Non-primitive
-// | \_______ |
-// | | |
-// Number String |
-// / \ | |
-// Double Integer32 | /
-// | | / /
-// | Smi / /
-// | | / __/
-// Uninitialized.
-
-class TypeInfo {
- public:
- TypeInfo() : type_(kUninitialized) { }
-
- static TypeInfo Unknown() { return TypeInfo(kUnknown); }
- // We know it's a primitive type.
- static TypeInfo Primitive() { return TypeInfo(kPrimitive); }
- // We know it's a number of some sort.
- static TypeInfo Number() { return TypeInfo(kNumber); }
- // We know it's a signed 32 bit integer.
- static TypeInfo Integer32() { return TypeInfo(kInteger32); }
- // We know it's a Smi.
- static TypeInfo Smi() { return TypeInfo(kSmi); }
- // We know it's a heap number.
- static TypeInfo Double() { return TypeInfo(kDouble); }
- // We know it's a string.
- static TypeInfo String() { return TypeInfo(kString); }
- // We know it's an internalized string.
- static TypeInfo InternalizedString() { return TypeInfo(kInternalizedString); }
- // We know it's a non-primitive (object) type.
- static TypeInfo NonPrimitive() { return TypeInfo(kNonPrimitive); }
- // We haven't started collecting info yet.
- static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
-
- int ToInt() {
- return type_;
- }
-
- static TypeInfo FromInt(int bit_representation) {
- Type t = static_cast<Type>(bit_representation);
- ASSERT(t == kUnknown ||
- t == kPrimitive ||
- t == kNumber ||
- t == kInteger32 ||
- t == kSmi ||
- t == kDouble ||
- t == kString ||
- t == kNonPrimitive);
- return TypeInfo(t);
- }
-
- // Return the weakest (least precise) common type.
- static TypeInfo Combine(TypeInfo a, TypeInfo b) {
- return TypeInfo(static_cast<Type>(a.type_ & b.type_));
- }
-
-
- // Integer32 is an integer that can be represented as a signed
- // 32-bit integer. It has to be
- // in the range [-2^31, 2^31 - 1]. We also have to check for negative 0
- // as it is not an Integer32.
- static inline bool IsInt32Double(double value) {
- const DoubleRepresentation minus_zero(-0.0);
- DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) return false;
- if (value >= kMinInt && value <= kMaxInt &&
- value == static_cast<int32_t>(value)) {
- return true;
- }
- return false;
- }
-
- static TypeInfo TypeFromValue(Handle<Object> value);
-
- bool Equals(const TypeInfo& other) {
- return type_ == other.type_;
- }
-
- inline bool IsUnknown() {
- ASSERT(type_ != kUninitialized);
- return type_ == kUnknown;
- }
-
- inline bool IsPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kPrimitive) == kPrimitive);
- }
-
- inline bool IsNumber() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNumber) == kNumber);
- }
-
- inline bool IsSmi() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kSmi) == kSmi);
- }
-
- inline bool IsInternalizedString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInternalizedString) == kInternalizedString);
- }
-
- inline bool IsNonInternalizedString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInternalizedString) == kString);
- }
-
- inline bool IsInteger32() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInteger32) == kInteger32);
- }
-
- inline bool IsDouble() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kDouble) == kDouble);
- }
-
- inline bool IsString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kString) == kString);
- }
-
- inline bool IsNonPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNonPrimitive) == kNonPrimitive);
- }
-
- inline bool IsUninitialized() {
- return type_ == kUninitialized;
- }
-
- const char* ToString() {
- switch (type_) {
- case kUnknown: return "Unknown";
- case kPrimitive: return "Primitive";
- case kNumber: return "Number";
- case kInteger32: return "Integer32";
- case kSmi: return "Smi";
- case kInternalizedString: return "InternalizedString";
- case kDouble: return "Double";
- case kString: return "String";
- case kNonPrimitive: return "Object";
- case kUninitialized: return "Uninitialized";
- }
- UNREACHABLE();
- return "Unreachable code";
- }
-
- private:
- enum Type {
- kUnknown = 0, // 0000000
- kPrimitive = 0x10, // 0010000
- kNumber = 0x11, // 0010001
- kInteger32 = 0x13, // 0010011
- kSmi = 0x17, // 0010111
- kDouble = 0x19, // 0011001
- kString = 0x30, // 0110000
- kInternalizedString = 0x32, // 0110010
- kNonPrimitive = 0x40, // 1000000
- kUninitialized = 0x7f // 1111111
- };
-
- explicit inline TypeInfo(Type t) : type_(t) { }
-
- Type type_;
-};
-
-
-enum StringStubFeedback {
- DEFAULT_STRING_STUB = 0,
- STRING_INDEX_OUT_OF_BOUNDS = 1
-};
-
-
-// Forward declarations.
-class Assignment;
-class BinaryOperation;
-class Call;
-class CallNew;
-class CaseClause;
-class CompareOperation;
-class CompilationInfo;
-class CountOperation;
-class Expression;
-class ForInStatement;
-class ICStub;
-class Property;
-class SmallMapList;
-class UnaryOperation;
-
-
-class TypeFeedbackOracle: public ZoneObject {
- public:
- TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> native_context,
- Isolate* isolate,
- Zone* zone);
-
- bool LoadIsMonomorphicNormal(Property* expr);
- bool LoadIsUninitialized(Property* expr);
- bool LoadIsPolymorphic(Property* expr);
- bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
- bool StoreIsPolymorphic(TypeFeedbackId ast_id);
- bool CallIsMonomorphic(Call* expr);
- bool CallNewIsMonomorphic(CallNew* expr);
- bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop);
-
- bool IsForInFastCase(ForInStatement* expr);
-
- Handle<Map> LoadMonomorphicReceiverType(Property* expr);
- Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId ast_id);
-
- void LoadReceiverTypes(Property* expr,
- Handle<String> name,
- SmallMapList* types);
- void StoreReceiverTypes(Assignment* expr,
- Handle<String> name,
- SmallMapList* types);
- void CallReceiverTypes(Call* expr,
- Handle<String> name,
- CallKind call_kind,
- SmallMapList* types);
- void CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types);
-
- static bool CanRetainOtherContext(Map* map, Context* native_context);
- static bool CanRetainOtherContext(JSFunction* function,
- Context* native_context);
-
- void CollectPolymorphicMaps(Handle<Code> code, SmallMapList* types);
-
- CheckType GetCallCheckType(Call* expr);
- Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
-
- Handle<JSFunction> GetCallTarget(Call* expr);
- Handle<JSFunction> GetCallNewTarget(CallNew* expr);
- ElementsKind GetCallNewElementsKind(CallNew* expr);
-
- Handle<Map> GetObjectLiteralStoreMap(ObjectLiteral::Property* prop);
-
- bool LoadIsBuiltin(Property* expr, Builtins::Name id);
- bool LoadIsStub(Property* expr, ICStub* stub);
-
- // TODO(1571) We can't use ToBooleanStub::Types as the return value because
- // of various cylces in our headers. Death to tons of implementations in
- // headers!! :-P
- byte ToBooleanTypes(TypeFeedbackId ast_id);
-
- // Get type information for arithmetic operations and compares.
- TypeInfo UnaryType(UnaryOperation* expr);
- void BinaryType(BinaryOperation* expr,
- TypeInfo* left,
- TypeInfo* right,
- TypeInfo* result);
- void CompareType(CompareOperation* expr,
- TypeInfo* left_type,
- TypeInfo* right_type,
- TypeInfo* overall_type);
- Handle<Map> GetCompareMap(CompareOperation* expr);
- TypeInfo SwitchType(CaseClause* clause);
- TypeInfo IncrementType(CountOperation* expr);
-
- Zone* zone() const { return zone_; }
-
- private:
- void CollectReceiverTypes(TypeFeedbackId ast_id,
- Handle<String> name,
- Code::Flags flags,
- SmallMapList* types);
-
- void SetInfo(TypeFeedbackId ast_id, Object* target);
-
- void BuildDictionary(Handle<Code> code);
- void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
- void CreateDictionary(Handle<Code> code, ZoneList<RelocInfo>* infos);
- void RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- byte* old_start,
- byte* new_start);
- void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
- void ProcessTypeFeedbackCells(Handle<Code> code);
-
- // Returns an element from the backing store. Returns undefined if
- // there is no information.
- public:
- // TODO(mvstanton): how to get this information without making the method
- // public?
- Handle<Object> GetInfo(TypeFeedbackId ast_id);
-
- private:
- Handle<Context> native_context_;
- Isolate* isolate_;
- Handle<UnseededNumberDictionary> dictionary_;
- Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_TYPE_INFO_H_
diff --git a/src/3rdparty/v8/src/unbound-queue-inl.h b/src/3rdparty/v8/src/unbound-queue-inl.h
deleted file mode 100644
index fffb1db..0000000
--- a/src/3rdparty/v8/src/unbound-queue-inl.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UNBOUND_QUEUE_INL_H_
-#define V8_UNBOUND_QUEUE_INL_H_
-
-#include "unbound-queue.h"
-
-namespace v8 {
-namespace internal {
-
-template<typename Record>
-struct UnboundQueue<Record>::Node: public Malloced {
- explicit Node(const Record& value)
- : value(value), next(NULL) {
- }
-
- Record value;
- Node* next;
-};
-
-
-template<typename Record>
-UnboundQueue<Record>::UnboundQueue() {
- first_ = new Node(Record());
- divider_ = last_ = reinterpret_cast<AtomicWord>(first_);
-}
-
-
-template<typename Record>
-UnboundQueue<Record>::~UnboundQueue() {
- while (first_ != NULL) DeleteFirst();
-}
-
-
-template<typename Record>
-void UnboundQueue<Record>::DeleteFirst() {
- Node* tmp = first_;
- first_ = tmp->next;
- delete tmp;
-}
-
-
-template<typename Record>
-void UnboundQueue<Record>::Dequeue(Record* rec) {
- ASSERT(divider_ != last_);
- Node* next = reinterpret_cast<Node*>(divider_)->next;
- *rec = next->value;
- OS::ReleaseStore(&divider_, reinterpret_cast<AtomicWord>(next));
-}
-
-
-template<typename Record>
-void UnboundQueue<Record>::Enqueue(const Record& rec) {
- Node*& next = reinterpret_cast<Node*>(last_)->next;
- next = new Node(rec);
- OS::ReleaseStore(&last_, reinterpret_cast<AtomicWord>(next));
- while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
-}
-
-
-template<typename Record>
-Record* UnboundQueue<Record>::Peek() {
- ASSERT(divider_ != last_);
- Node* next = reinterpret_cast<Node*>(divider_)->next;
- return &next->value;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_UNBOUND_QUEUE_INL_H_
diff --git a/src/3rdparty/v8/src/unbound-queue.h b/src/3rdparty/v8/src/unbound-queue.h
deleted file mode 100644
index 59a426b..0000000
--- a/src/3rdparty/v8/src/unbound-queue.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UNBOUND_QUEUE_
-#define V8_UNBOUND_QUEUE_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Lock-free unbound queue for small records. Intended for
-// transferring small records between a Single producer and a Single
-// consumer. Doesn't have restrictions on the number of queued
-// elements, so producer never blocks. Implemented after Herb
-// Sutter's article:
-// http://www.ddj.com/high-performance-computing/210604448
-template<typename Record>
-class UnboundQueue BASE_EMBEDDED {
- public:
- inline UnboundQueue();
- inline ~UnboundQueue();
-
- INLINE(void Dequeue(Record* rec));
- INLINE(void Enqueue(const Record& rec));
- INLINE(bool IsEmpty()) { return divider_ == last_; }
- INLINE(Record* Peek());
-
- private:
- INLINE(void DeleteFirst());
-
- struct Node;
-
- Node* first_;
- AtomicWord divider_; // Node*
- AtomicWord last_; // Node*
-
- DISALLOW_COPY_AND_ASSIGN(UnboundQueue);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_UNBOUND_QUEUE_
diff --git a/src/3rdparty/v8/src/unicode-inl.h b/src/3rdparty/v8/src/unicode-inl.h
deleted file mode 100644
index c80c67e..0000000
--- a/src/3rdparty/v8/src/unicode-inl.h
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2007-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UNICODE_INL_H_
-#define V8_UNICODE_INL_H_
-
-#include "unicode.h"
-#include "checks.h"
-
-namespace unibrow {
-
-template <class T, int s> bool Predicate<T, s>::get(uchar code_point) {
- CacheEntry entry = entries_[code_point & kMask];
- if (entry.code_point_ == code_point) return entry.value_;
- return CalculateValue(code_point);
-}
-
-template <class T, int s> bool Predicate<T, s>::CalculateValue(
- uchar code_point) {
- bool result = T::Is(code_point);
- entries_[code_point & kMask] = CacheEntry(code_point, result);
- return result;
-}
-
-template <class T, int s> int Mapping<T, s>::get(uchar c, uchar n,
- uchar* result) {
- CacheEntry entry = entries_[c & kMask];
- if (entry.code_point_ == c) {
- if (entry.offset_ == 0) {
- return 0;
- } else {
- result[0] = c + entry.offset_;
- return 1;
- }
- } else {
- return CalculateValue(c, n, result);
- }
-}
-
-template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n,
- uchar* result) {
- bool allow_caching = true;
- int length = T::Convert(c, n, result, &allow_caching);
- if (allow_caching) {
- if (length == 1) {
- entries_[c & kMask] = CacheEntry(c, result[0] - c);
- return 1;
- } else {
- entries_[c & kMask] = CacheEntry(c, 0);
- return 0;
- }
- } else {
- return length;
- }
-}
-
-
-uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) {
- ASSERT(c > Latin1::kMaxChar);
- switch (c) {
- // This are equivalent characters in unicode.
- case 0x39c:
- case 0x3bc:
- return 0xb5;
- // This is an uppercase of a Latin-1 character
- // outside of Latin-1.
- case 0x178:
- return 0xff;
- }
- return 0;
-}
-
-
-unsigned Utf8::Encode(char* str, uchar c, int previous) {
- static const int kMask = ~(1 << 6);
- if (c <= kMaxOneByteChar) {
- str[0] = c;
- return 1;
- } else if (c <= kMaxTwoByteChar) {
- str[0] = 0xC0 | (c >> 6);
- str[1] = 0x80 | (c & kMask);
- return 2;
- } else if (c <= kMaxThreeByteChar) {
- if (Utf16::IsTrailSurrogate(c) &&
- Utf16::IsLeadSurrogate(previous)) {
- const int kUnmatchedSize = kSizeOfUnmatchedSurrogate;
- return Encode(str - kUnmatchedSize,
- Utf16::CombineSurrogatePair(previous, c),
- Utf16::kNoPreviousCharacter) - kUnmatchedSize;
- }
- str[0] = 0xE0 | (c >> 12);
- str[1] = 0x80 | ((c >> 6) & kMask);
- str[2] = 0x80 | (c & kMask);
- return 3;
- } else {
- str[0] = 0xF0 | (c >> 18);
- str[1] = 0x80 | ((c >> 12) & kMask);
- str[2] = 0x80 | ((c >> 6) & kMask);
- str[3] = 0x80 | (c & kMask);
- return 4;
- }
-}
-
-
-uchar Utf8::ValueOf(const byte* bytes, unsigned length, unsigned* cursor) {
- if (length <= 0) return kBadChar;
- byte first = bytes[0];
- // Characters between 0000 and 0007F are encoded as a single character
- if (first <= kMaxOneByteChar) {
- *cursor += 1;
- return first;
- }
- return CalculateValue(bytes, length, cursor);
-}
-
-unsigned Utf8::Length(uchar c, int previous) {
- if (c <= kMaxOneByteChar) {
- return 1;
- } else if (c <= kMaxTwoByteChar) {
- return 2;
- } else if (c <= kMaxThreeByteChar) {
- if (Utf16::IsTrailSurrogate(c) &&
- Utf16::IsLeadSurrogate(previous)) {
- return kSizeOfUnmatchedSurrogate - kBytesSavedByCombiningSurrogates;
- }
- return 3;
- } else {
- return 4;
- }
-}
-
-Utf8DecoderBase::Utf8DecoderBase()
- : unbuffered_start_(NULL),
- utf16_length_(0),
- last_byte_of_buffer_unused_(false) {}
-
-Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer,
- unsigned buffer_length,
- const uint8_t* stream,
- unsigned stream_length) {
- Reset(buffer, buffer_length, stream, stream_length);
-}
-
-template<unsigned kBufferSize>
-Utf8Decoder<kBufferSize>::Utf8Decoder(const char* stream, unsigned length)
- : Utf8DecoderBase(buffer_,
- kBufferSize,
- reinterpret_cast<const uint8_t*>(stream),
- length) {
-}
-
-template<unsigned kBufferSize>
-void Utf8Decoder<kBufferSize>::Reset(const char* stream, unsigned length) {
- Utf8DecoderBase::Reset(buffer_,
- kBufferSize,
- reinterpret_cast<const uint8_t*>(stream),
- length);
-}
-
-template <unsigned kBufferSize>
-unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
- unsigned length) const {
- ASSERT(length > 0);
- if (length > utf16_length_) length = utf16_length_;
- // memcpy everything in buffer.
- unsigned buffer_length =
- last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
- unsigned memcpy_length = length <= buffer_length ? length : buffer_length;
- memcpy(data, buffer_, memcpy_length*sizeof(uint16_t));
- if (length <= buffer_length) return length;
- ASSERT(unbuffered_start_ != NULL);
- // Copy the rest the slow way.
- WriteUtf16Slow(unbuffered_start_,
- data + buffer_length,
- length - buffer_length);
- return length;
-}
-
-} // namespace unibrow
-
-#endif // V8_UNICODE_INL_H_
diff --git a/src/3rdparty/v8/src/unicode.cc b/src/3rdparty/v8/src/unicode.cc
deleted file mode 100644
index 04065b0..0000000
--- a/src/3rdparty/v8/src/unicode.cc
+++ /dev/null
@@ -1,1861 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// This file was generated at 2012-03-06 09:55:58.934483
-
-#include "unicode-inl.h"
-#include <stdlib.h>
-#include <stdio.h>
-
-namespace unibrow {
-
-static const int kStartBit = (1 << 30);
-static const int kChunkBits = (1 << 13);
-static const uchar kSentinel = static_cast<uchar>(-1);
-
-/**
- * \file
- * Implementations of functions for working with unicode.
- */
-
-typedef signed short int16_t; // NOLINT
-typedef unsigned short uint16_t; // NOLINT
-typedef int int32_t; // NOLINT
-
-// All access to the character table should go through this function.
-template <int D>
-static inline uchar TableGet(const int32_t* table, int index) {
- return table[D * index];
-}
-
-static inline uchar GetEntry(int32_t entry) {
- return entry & (kStartBit - 1);
-}
-
-static inline bool IsStart(int32_t entry) {
- return (entry & kStartBit) != 0;
-}
-
-/**
- * Look up a character in the unicode table using a mix of binary and
- * interpolation search. For a uniformly distributed array
- * interpolation search beats binary search by a wide margin. However,
- * in this case interpolation search degenerates because of some very
- * high values in the lower end of the table so this function uses a
- * combination. The average number of steps to look up the information
- * about a character is around 10, slightly higher if there is no
- * information available about the character.
- */
-static bool LookupPredicate(const int32_t* table, uint16_t size, uchar chr) {
- static const int kEntryDist = 1;
- uint16_t value = chr & (kChunkBits - 1);
- unsigned int low = 0;
- unsigned int high = size - 1;
- while (high != low) {
- unsigned int mid = low + ((high - low) >> 1);
- uchar current_value = GetEntry(TableGet<kEntryDist>(table, mid));
- // If we've found an entry less than or equal to this one, and the
- // next one is not also less than this one, we've arrived.
- if ((current_value <= value) &&
- (mid + 1 == size ||
- GetEntry(TableGet<kEntryDist>(table, mid + 1)) > value)) {
- low = mid;
- break;
- } else if (current_value < value) {
- low = mid + 1;
- } else if (current_value > value) {
- // If we've just checked the bottom-most value and it's not
- // the one we're looking for, we're done.
- if (mid == 0) break;
- high = mid - 1;
- }
- }
- int32_t field = TableGet<kEntryDist>(table, low);
- uchar entry = GetEntry(field);
- bool is_start = IsStart(field);
- return (entry == value) || (entry < value && is_start);
-}
-
-template <int kW>
-struct MultiCharacterSpecialCase {
- static const uchar kEndOfEncoding = kSentinel;
- uchar chars[kW];
-};
-
-// Look up the mapping for the given character in the specified table,
-// which is of the specified length and uses the specified special case
-// mapping for multi-char mappings. The next parameter is the character
-// following the one to map. The result will be written in to the result
-// buffer and the number of characters written will be returned. Finally,
-// if the allow_caching_ptr is non-null then false will be stored in
-// it if the result contains multiple characters or depends on the
-// context.
-// If ranges are linear, a match between a start and end point is
-// offset by the distance between the match and the start. Otherwise
-// the result is the same as for the start point on the entire range.
-template <bool ranges_are_linear, int kW>
-static int LookupMapping(const int32_t* table,
- uint16_t size,
- const MultiCharacterSpecialCase<kW>* multi_chars,
- uchar chr,
- uchar next,
- uchar* result,
- bool* allow_caching_ptr) {
- static const int kEntryDist = 2;
- uint16_t key = chr & (kChunkBits - 1);
- uint16_t chunk_start = chr - key;
- unsigned int low = 0;
- unsigned int high = size - 1;
- while (high != low) {
- unsigned int mid = low + ((high - low) >> 1);
- uchar current_value = GetEntry(TableGet<kEntryDist>(table, mid));
- // If we've found an entry less than or equal to this one, and the next one
- // is not also less than this one, we've arrived.
- if ((current_value <= key) &&
- (mid + 1 == size ||
- GetEntry(TableGet<kEntryDist>(table, mid + 1)) > key)) {
- low = mid;
- break;
- } else if (current_value < key) {
- low = mid + 1;
- } else if (current_value > key) {
- // If we've just checked the bottom-most value and it's not
- // the one we're looking for, we're done.
- if (mid == 0) break;
- high = mid - 1;
- }
- }
- int32_t field = TableGet<kEntryDist>(table, low);
- uchar entry = GetEntry(field);
- bool is_start = IsStart(field);
- bool found = (entry == key) || (entry < key && is_start);
- if (found) {
- int32_t value = table[2 * low + 1];
- if (value == 0) {
- // 0 means not present
- return 0;
- } else if ((value & 3) == 0) {
- // Low bits 0 means a constant offset from the given character.
- if (ranges_are_linear) {
- result[0] = chr + (value >> 2);
- } else {
- result[0] = entry + chunk_start + (value >> 2);
- }
- return 1;
- } else if ((value & 3) == 1) {
- // Low bits 1 means a special case mapping
- if (allow_caching_ptr) *allow_caching_ptr = false;
- const MultiCharacterSpecialCase<kW>& mapping = multi_chars[value >> 2];
- int length = 0;
- for (length = 0; length < kW; length++) {
- uchar mapped = mapping.chars[length];
- if (mapped == MultiCharacterSpecialCase<kW>::kEndOfEncoding) break;
- if (ranges_are_linear) {
- result[length] = mapped + (key - entry);
- } else {
- result[length] = mapped;
- }
- }
- return length;
- } else {
- // Low bits 2 means a really really special case
- if (allow_caching_ptr) *allow_caching_ptr = false;
- // The cases of this switch are defined in unicode.py in the
- // really_special_cases mapping.
- switch (value >> 2) {
- case 1:
- // Really special case 1: upper case sigma. This letter
- // converts to two different lower case sigmas depending on
- // whether or not it occurs at the end of a word.
- if (next != 0 && Letter::Is(next)) {
- result[0] = 0x03C3;
- } else {
- result[0] = 0x03C2;
- }
- return 1;
- default:
- return 0;
- }
- return -1;
- }
- } else {
- return 0;
- }
-}
-
-uchar Utf8::CalculateValue(const byte* str,
- unsigned length,
- unsigned* cursor) {
- // We only get called for non-ASCII characters.
- if (length == 1) {
- *cursor += 1;
- return kBadChar;
- }
- byte first = str[0];
- byte second = str[1] ^ 0x80;
- if (second & 0xC0) {
- *cursor += 1;
- return kBadChar;
- }
- if (first < 0xE0) {
- if (first < 0xC0) {
- *cursor += 1;
- return kBadChar;
- }
- uchar code_point = ((first << 6) | second) & kMaxTwoByteChar;
- if (code_point <= kMaxOneByteChar) {
- *cursor += 1;
- return kBadChar;
- }
- *cursor += 2;
- return code_point;
- }
- if (length == 2) {
- *cursor += 1;
- return kBadChar;
- }
- byte third = str[2] ^ 0x80;
- if (third & 0xC0) {
- *cursor += 1;
- return kBadChar;
- }
- if (first < 0xF0) {
- uchar code_point = ((((first << 6) | second) << 6) | third)
- & kMaxThreeByteChar;
- if (code_point <= kMaxTwoByteChar) {
- *cursor += 1;
- return kBadChar;
- }
- *cursor += 3;
- return code_point;
- }
- if (length == 3) {
- *cursor += 1;
- return kBadChar;
- }
- byte fourth = str[3] ^ 0x80;
- if (fourth & 0xC0) {
- *cursor += 1;
- return kBadChar;
- }
- if (first < 0xF8) {
- uchar code_point = (((((first << 6 | second) << 6) | third) << 6) | fourth)
- & kMaxFourByteChar;
- if (code_point <= kMaxThreeByteChar) {
- *cursor += 1;
- return kBadChar;
- }
- *cursor += 4;
- return code_point;
- }
- *cursor += 1;
- return kBadChar;
-}
-
-
-void Utf8DecoderBase::Reset(uint16_t* buffer,
- unsigned buffer_length,
- const uint8_t* stream,
- unsigned stream_length) {
- // Assume everything will fit in the buffer and stream won't be needed.
- last_byte_of_buffer_unused_ = false;
- unbuffered_start_ = NULL;
- bool writing_to_buffer = true;
- // Loop until stream is read, writing to buffer as long as buffer has space.
- unsigned utf16_length = 0;
- while (stream_length != 0) {
- unsigned cursor = 0;
- uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
- ASSERT(cursor > 0 && cursor <= stream_length);
- stream += cursor;
- stream_length -= cursor;
- bool is_two_characters = character > Utf16::kMaxNonSurrogateCharCode;
- utf16_length += is_two_characters ? 2 : 1;
- // Don't need to write to the buffer, but still need utf16_length.
- if (!writing_to_buffer) continue;
- // Write out the characters to the buffer.
- // Must check for equality with buffer_length as we've already updated it.
- if (utf16_length <= buffer_length) {
- if (is_two_characters) {
- *buffer++ = Utf16::LeadSurrogate(character);
- *buffer++ = Utf16::TrailSurrogate(character);
- } else {
- *buffer++ = character;
- }
- if (utf16_length == buffer_length) {
- // Just wrote last character of buffer
- writing_to_buffer = false;
- unbuffered_start_ = stream;
- }
- continue;
- }
- // Have gone over buffer.
- // Last char of buffer is unused, set cursor back.
- ASSERT(is_two_characters);
- writing_to_buffer = false;
- last_byte_of_buffer_unused_ = true;
- unbuffered_start_ = stream - cursor;
- }
- utf16_length_ = utf16_length;
-}
-
-
-void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream,
- uint16_t* data,
- unsigned data_length) {
- while (data_length != 0) {
- unsigned cursor = 0;
- uint32_t character = Utf8::ValueOf(stream, Utf8::kMaxEncodedSize, &cursor);
- // There's a total lack of bounds checking for stream
- // as it was already done in Reset.
- stream += cursor;
- if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- *data++ = Utf16::LeadSurrogate(character);
- *data++ = Utf16::TrailSurrogate(character);
- ASSERT(data_length > 1);
- data_length -= 2;
- } else {
- *data++ = character;
- data_length -= 1;
- }
- }
-}
-
-
-// Uppercase: point.category == 'Lu'
-
-static const uint16_t kUppercaseTable0Size = 450;
-static const int32_t kUppercaseTable0[450] = {
- 1073741889, 90, 1073742016, 214, 1073742040, 222, 256, 258, // NOLINT
- 260, 262, 264, 266, 268, 270, 272, 274, // NOLINT
- 276, 278, 280, 282, 284, 286, 288, 290, // NOLINT
- 292, 294, 296, 298, 300, 302, 304, 306, // NOLINT
- 308, 310, 313, 315, 317, 319, 321, 323, // NOLINT
- 325, 327, 330, 332, 334, 336, 338, 340, // NOLINT
- 342, 344, 346, 348, 350, 352, 354, 356, // NOLINT
- 358, 360, 362, 364, 366, 368, 370, 372, // NOLINT
- 374, 1073742200, 377, 379, 381, 1073742209, 386, 388, // NOLINT
- 1073742214, 391, 1073742217, 395, 1073742222, 401, 1073742227, 404, // NOLINT
- 1073742230, 408, 1073742236, 413, 1073742239, 416, 418, 420, // NOLINT
- 1073742246, 423, 425, 428, 1073742254, 431, 1073742257, 435, // NOLINT
- 437, 1073742263, 440, 444, 452, 455, 458, 461, // NOLINT
- 463, 465, 467, 469, 471, 473, 475, 478, // NOLINT
- 480, 482, 484, 486, 488, 490, 492, 494, // NOLINT
- 497, 500, 1073742326, 504, 506, 508, 510, 512, // NOLINT
- 514, 516, 518, 520, 522, 524, 526, 528, // NOLINT
- 530, 532, 534, 536, 538, 540, 542, 544, // NOLINT
- 546, 548, 550, 552, 554, 556, 558, 560, // NOLINT
- 562, 1073742394, 571, 1073742397, 574, 577, 1073742403, 582, // NOLINT
- 584, 586, 588, 590, 880, 882, 886, 902, // NOLINT
- 1073742728, 906, 908, 1073742734, 911, 1073742737, 929, 1073742755, // NOLINT
- 939, 975, 1073742802, 980, 984, 986, 988, 990, // NOLINT
- 992, 994, 996, 998, 1000, 1002, 1004, 1006, // NOLINT
- 1012, 1015, 1073742841, 1018, 1073742845, 1071, 1120, 1122, // NOLINT
- 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, // NOLINT
- 1140, 1142, 1144, 1146, 1148, 1150, 1152, 1162, // NOLINT
- 1164, 1166, 1168, 1170, 1172, 1174, 1176, 1178, // NOLINT
- 1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194, // NOLINT
- 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1210, // NOLINT
- 1212, 1214, 1073743040, 1217, 1219, 1221, 1223, 1225, // NOLINT
- 1227, 1229, 1232, 1234, 1236, 1238, 1240, 1242, // NOLINT
- 1244, 1246, 1248, 1250, 1252, 1254, 1256, 1258, // NOLINT
- 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, // NOLINT
- 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, // NOLINT
- 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, // NOLINT
- 1308, 1310, 1312, 1314, 1316, 1318, 1073743153, 1366, // NOLINT
- 1073746080, 4293, 4295, 4301, 7680, 7682, 7684, 7686, // NOLINT
- 7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702, // NOLINT
- 7704, 7706, 7708, 7710, 7712, 7714, 7716, 7718, // NOLINT
- 7720, 7722, 7724, 7726, 7728, 7730, 7732, 7734, // NOLINT
- 7736, 7738, 7740, 7742, 7744, 7746, 7748, 7750, // NOLINT
- 7752, 7754, 7756, 7758, 7760, 7762, 7764, 7766, // NOLINT
- 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, // NOLINT
- 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798, // NOLINT
- 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, // NOLINT
- 7816, 7818, 7820, 7822, 7824, 7826, 7828, 7838, // NOLINT
- 7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854, // NOLINT
- 7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870, // NOLINT
- 7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886, // NOLINT
- 7888, 7890, 7892, 7894, 7896, 7898, 7900, 7902, // NOLINT
- 7904, 7906, 7908, 7910, 7912, 7914, 7916, 7918, // NOLINT
- 7920, 7922, 7924, 7926, 7928, 7930, 7932, 7934, // NOLINT
- 1073749768, 7951, 1073749784, 7965, 1073749800, 7983, 1073749816, 7999, // NOLINT
- 1073749832, 8013, 8025, 8027, 8029, 8031, 1073749864, 8047, // NOLINT
- 1073749944, 8123, 1073749960, 8139, 1073749976, 8155, 1073749992, 8172, // NOLINT
- 1073750008, 8187 }; // NOLINT
-static const uint16_t kUppercaseTable1Size = 86;
-static const int32_t kUppercaseTable1[86] = {
- 258, 263, 1073742091, 269, 1073742096, 274, 277, 1073742105, // NOLINT
- 285, 292, 294, 296, 1073742122, 301, 1073742128, 307, // NOLINT
- 1073742142, 319, 325, 387, 1073744896, 3118, 3168, 1073744994, // NOLINT
- 3172, 3175, 3177, 3179, 1073745005, 3184, 3186, 3189, // NOLINT
- 1073745022, 3200, 3202, 3204, 3206, 3208, 3210, 3212, // NOLINT
- 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, // NOLINT
- 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, // NOLINT
- 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, // NOLINT
- 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, // NOLINT
- 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, // NOLINT
- 3294, 3296, 3298, 3307, 3309, 3314 }; // NOLINT
-static const uint16_t kUppercaseTable5Size = 91;
-static const int32_t kUppercaseTable5[91] = {
- 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, // NOLINT
- 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, // NOLINT
- 1632, 1634, 1636, 1638, 1640, 1642, 1644, 1664, // NOLINT
- 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, // NOLINT
- 1682, 1684, 1686, 1826, 1828, 1830, 1832, 1834, // NOLINT
- 1836, 1838, 1842, 1844, 1846, 1848, 1850, 1852, // NOLINT
- 1854, 1856, 1858, 1860, 1862, 1864, 1866, 1868, // NOLINT
- 1870, 1872, 1874, 1876, 1878, 1880, 1882, 1884, // NOLINT
- 1886, 1888, 1890, 1892, 1894, 1896, 1898, 1900, // NOLINT
- 1902, 1913, 1915, 1073743741, 1918, 1920, 1922, 1924, // NOLINT
- 1926, 1931, 1933, 1936, 1938, 1952, 1954, 1956, // NOLINT
- 1958, 1960, 1962 }; // NOLINT
-static const uint16_t kUppercaseTable7Size = 2;
-static const int32_t kUppercaseTable7[2] = {
- 1073749793, 7994 }; // NOLINT
-bool Uppercase::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kUppercaseTable0,
- kUppercaseTable0Size,
- c);
- case 1: return LookupPredicate(kUppercaseTable1,
- kUppercaseTable1Size,
- c);
- case 5: return LookupPredicate(kUppercaseTable5,
- kUppercaseTable5Size,
- c);
- case 7: return LookupPredicate(kUppercaseTable7,
- kUppercaseTable7Size,
- c);
- default: return false;
- }
-}
-
-// Lowercase: point.category == 'Ll'
-
-static const uint16_t kLowercaseTable0Size = 463;
-static const int32_t kLowercaseTable0[463] = {
- 1073741921, 122, 181, 1073742047, 246, 1073742072, 255, 257, // NOLINT
- 259, 261, 263, 265, 267, 269, 271, 273, // NOLINT
- 275, 277, 279, 281, 283, 285, 287, 289, // NOLINT
- 291, 293, 295, 297, 299, 301, 303, 305, // NOLINT
- 307, 309, 1073742135, 312, 314, 316, 318, 320, // NOLINT
- 322, 324, 326, 1073742152, 329, 331, 333, 335, // NOLINT
- 337, 339, 341, 343, 345, 347, 349, 351, // NOLINT
- 353, 355, 357, 359, 361, 363, 365, 367, // NOLINT
- 369, 371, 373, 375, 378, 380, 1073742206, 384, // NOLINT
- 387, 389, 392, 1073742220, 397, 402, 405, 1073742233, // NOLINT
- 411, 414, 417, 419, 421, 424, 1073742250, 427, // NOLINT
- 429, 432, 436, 438, 1073742265, 442, 1073742269, 447, // NOLINT
- 454, 457, 460, 462, 464, 466, 468, 470, // NOLINT
- 472, 474, 1073742300, 477, 479, 481, 483, 485, // NOLINT
- 487, 489, 491, 493, 1073742319, 496, 499, 501, // NOLINT
- 505, 507, 509, 511, 513, 515, 517, 519, // NOLINT
- 521, 523, 525, 527, 529, 531, 533, 535, // NOLINT
- 537, 539, 541, 543, 545, 547, 549, 551, // NOLINT
- 553, 555, 557, 559, 561, 1073742387, 569, 572, // NOLINT
- 1073742399, 576, 578, 583, 585, 587, 589, 1073742415, // NOLINT
- 659, 1073742485, 687, 881, 883, 887, 1073742715, 893, // NOLINT
- 912, 1073742764, 974, 1073742800, 977, 1073742805, 983, 985, // NOLINT
- 987, 989, 991, 993, 995, 997, 999, 1001, // NOLINT
- 1003, 1005, 1073742831, 1011, 1013, 1016, 1073742843, 1020, // NOLINT
- 1073742896, 1119, 1121, 1123, 1125, 1127, 1129, 1131, // NOLINT
- 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, // NOLINT
- 1149, 1151, 1153, 1163, 1165, 1167, 1169, 1171, // NOLINT
- 1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187, // NOLINT
- 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, // NOLINT
- 1205, 1207, 1209, 1211, 1213, 1215, 1218, 1220, // NOLINT
- 1222, 1224, 1226, 1228, 1073743054, 1231, 1233, 1235, // NOLINT
- 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, // NOLINT
- 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, // NOLINT
- 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, // NOLINT
- 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, // NOLINT
- 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, // NOLINT
- 1317, 1319, 1073743201, 1415, 1073749248, 7467, 1073749355, 7543, // NOLINT
- 1073749369, 7578, 7681, 7683, 7685, 7687, 7689, 7691, // NOLINT
- 7693, 7695, 7697, 7699, 7701, 7703, 7705, 7707, // NOLINT
- 7709, 7711, 7713, 7715, 7717, 7719, 7721, 7723, // NOLINT
- 7725, 7727, 7729, 7731, 7733, 7735, 7737, 7739, // NOLINT
- 7741, 7743, 7745, 7747, 7749, 7751, 7753, 7755, // NOLINT
- 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, // NOLINT
- 7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787, // NOLINT
- 7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803, // NOLINT
- 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, // NOLINT
- 7821, 7823, 7825, 7827, 1073749653, 7837, 7839, 7841, // NOLINT
- 7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857, // NOLINT
- 7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873, // NOLINT
- 7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889, // NOLINT
- 7891, 7893, 7895, 7897, 7899, 7901, 7903, 7905, // NOLINT
- 7907, 7909, 7911, 7913, 7915, 7917, 7919, 7921, // NOLINT
- 7923, 7925, 7927, 7929, 7931, 7933, 1073749759, 7943, // NOLINT
- 1073749776, 7957, 1073749792, 7975, 1073749808, 7991, 1073749824, 8005, // NOLINT
- 1073749840, 8023, 1073749856, 8039, 1073749872, 8061, 1073749888, 8071, // NOLINT
- 1073749904, 8087, 1073749920, 8103, 1073749936, 8116, 1073749942, 8119, // NOLINT
- 8126, 1073749954, 8132, 1073749958, 8135, 1073749968, 8147, 1073749974, // NOLINT
- 8151, 1073749984, 8167, 1073750002, 8180, 1073750006, 8183 }; // NOLINT
-static const uint16_t kLowercaseTable1Size = 84;
-static const int32_t kLowercaseTable1[84] = {
- 266, 1073742094, 271, 275, 303, 308, 313, 1073742140, // NOLINT
- 317, 1073742150, 329, 334, 388, 1073744944, 3166, 3169, // NOLINT
- 1073744997, 3174, 3176, 3178, 3180, 3185, 1073745011, 3188, // NOLINT
- 1073745014, 3195, 3201, 3203, 3205, 3207, 3209, 3211, // NOLINT
- 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, // NOLINT
- 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, // NOLINT
- 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, // NOLINT
- 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, // NOLINT
- 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, // NOLINT
- 3293, 3295, 3297, 1073745123, 3300, 3308, 3310, 3315, // NOLINT
- 1073745152, 3365, 3367, 3373 }; // NOLINT
-static const uint16_t kLowercaseTable5Size = 93;
-static const int32_t kLowercaseTable5[93] = {
- 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, // NOLINT
- 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, // NOLINT
- 1633, 1635, 1637, 1639, 1641, 1643, 1645, 1665, // NOLINT
- 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, // NOLINT
- 1683, 1685, 1687, 1827, 1829, 1831, 1833, 1835, // NOLINT
- 1837, 1073743663, 1841, 1843, 1845, 1847, 1849, 1851, // NOLINT
- 1853, 1855, 1857, 1859, 1861, 1863, 1865, 1867, // NOLINT
- 1869, 1871, 1873, 1875, 1877, 1879, 1881, 1883, // NOLINT
- 1885, 1887, 1889, 1891, 1893, 1895, 1897, 1899, // NOLINT
- 1901, 1903, 1073743729, 1912, 1914, 1916, 1919, 1921, // NOLINT
- 1923, 1925, 1927, 1932, 1934, 1937, 1939, 1953, // NOLINT
- 1955, 1957, 1959, 1961, 2042 }; // NOLINT
-static const uint16_t kLowercaseTable7Size = 6;
-static const int32_t kLowercaseTable7[6] = {
- 1073748736, 6918, 1073748755, 6935, 1073749825, 8026 }; // NOLINT
-bool Lowercase::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kLowercaseTable0,
- kLowercaseTable0Size,
- c);
- case 1: return LookupPredicate(kLowercaseTable1,
- kLowercaseTable1Size,
- c);
- case 5: return LookupPredicate(kLowercaseTable5,
- kLowercaseTable5Size,
- c);
- case 7: return LookupPredicate(kLowercaseTable7,
- kLowercaseTable7Size,
- c);
- default: return false;
- }
-}
-
-// Letter: point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl' ]
-
-static const uint16_t kLetterTable0Size = 435;
-static const int32_t kLetterTable0[435] = {
- 1073741889, 90, 1073741921, 122, 170, 181, 186, 1073742016, // NOLINT
- 214, 1073742040, 246, 1073742072, 705, 1073742534, 721, 1073742560, // NOLINT
- 740, 748, 750, 1073742704, 884, 1073742710, 887, 1073742714, // NOLINT
- 893, 902, 1073742728, 906, 908, 1073742734, 929, 1073742755, // NOLINT
- 1013, 1073742839, 1153, 1073742986, 1319, 1073743153, 1366, 1369, // NOLINT
- 1073743201, 1415, 1073743312, 1514, 1073743344, 1522, 1073743392, 1610, // NOLINT
- 1073743470, 1647, 1073743473, 1747, 1749, 1073743589, 1766, 1073743598, // NOLINT
- 1775, 1073743610, 1788, 1791, 1808, 1073743634, 1839, 1073743693, // NOLINT
- 1957, 1969, 1073743818, 2026, 1073743860, 2037, 2042, 1073743872, // NOLINT
- 2069, 2074, 2084, 2088, 1073743936, 2136, 2208, 1073744034, // NOLINT
- 2220, 1073744132, 2361, 2365, 2384, 1073744216, 2401, 1073744241, // NOLINT
- 2423, 1073744249, 2431, 1073744261, 2444, 1073744271, 2448, 1073744275, // NOLINT
- 2472, 1073744298, 2480, 2482, 1073744310, 2489, 2493, 2510, // NOLINT
- 1073744348, 2525, 1073744351, 2529, 1073744368, 2545, 1073744389, 2570, // NOLINT
- 1073744399, 2576, 1073744403, 2600, 1073744426, 2608, 1073744434, 2611, // NOLINT
- 1073744437, 2614, 1073744440, 2617, 1073744473, 2652, 2654, 1073744498, // NOLINT
- 2676, 1073744517, 2701, 1073744527, 2705, 1073744531, 2728, 1073744554, // NOLINT
- 2736, 1073744562, 2739, 1073744565, 2745, 2749, 2768, 1073744608, // NOLINT
- 2785, 1073744645, 2828, 1073744655, 2832, 1073744659, 2856, 1073744682, // NOLINT
- 2864, 1073744690, 2867, 1073744693, 2873, 2877, 1073744732, 2909, // NOLINT
- 1073744735, 2913, 2929, 2947, 1073744773, 2954, 1073744782, 2960, // NOLINT
- 1073744786, 2965, 1073744793, 2970, 2972, 1073744798, 2975, 1073744803, // NOLINT
- 2980, 1073744808, 2986, 1073744814, 3001, 3024, 1073744901, 3084, // NOLINT
- 1073744910, 3088, 1073744914, 3112, 1073744938, 3123, 1073744949, 3129, // NOLINT
- 3133, 1073744984, 3161, 1073744992, 3169, 1073745029, 3212, 1073745038, // NOLINT
- 3216, 1073745042, 3240, 1073745066, 3251, 1073745077, 3257, 3261, // NOLINT
- 3294, 1073745120, 3297, 1073745137, 3314, 1073745157, 3340, 1073745166, // NOLINT
- 3344, 1073745170, 3386, 3389, 3406, 1073745248, 3425, 1073745274, // NOLINT
- 3455, 1073745285, 3478, 1073745306, 3505, 1073745331, 3515, 3517, // NOLINT
- 1073745344, 3526, 1073745409, 3632, 1073745458, 3635, 1073745472, 3654, // NOLINT
- 1073745537, 3714, 3716, 1073745543, 3720, 3722, 3725, 1073745556, // NOLINT
- 3735, 1073745561, 3743, 1073745569, 3747, 3749, 3751, 1073745578, // NOLINT
- 3755, 1073745581, 3760, 1073745586, 3763, 3773, 1073745600, 3780, // NOLINT
- 3782, 1073745628, 3807, 3840, 1073745728, 3911, 1073745737, 3948, // NOLINT
- 1073745800, 3980, 1073745920, 4138, 4159, 1073746000, 4181, 1073746010, // NOLINT
- 4189, 4193, 1073746021, 4198, 1073746030, 4208, 1073746037, 4225, // NOLINT
- 4238, 1073746080, 4293, 4295, 4301, 1073746128, 4346, 1073746172, // NOLINT
- 4680, 1073746506, 4685, 1073746512, 4694, 4696, 1073746522, 4701, // NOLINT
- 1073746528, 4744, 1073746570, 4749, 1073746576, 4784, 1073746610, 4789, // NOLINT
- 1073746616, 4798, 4800, 1073746626, 4805, 1073746632, 4822, 1073746648, // NOLINT
- 4880, 1073746706, 4885, 1073746712, 4954, 1073746816, 5007, 1073746848, // NOLINT
- 5108, 1073746945, 5740, 1073747567, 5759, 1073747585, 5786, 1073747616, // NOLINT
- 5866, 1073747694, 5872, 1073747712, 5900, 1073747726, 5905, 1073747744, // NOLINT
- 5937, 1073747776, 5969, 1073747808, 5996, 1073747822, 6000, 1073747840, // NOLINT
- 6067, 6103, 6108, 1073748000, 6263, 1073748096, 6312, 6314, // NOLINT
- 1073748144, 6389, 1073748224, 6428, 1073748304, 6509, 1073748336, 6516, // NOLINT
- 1073748352, 6571, 1073748417, 6599, 1073748480, 6678, 1073748512, 6740, // NOLINT
- 6823, 1073748741, 6963, 1073748805, 6987, 1073748867, 7072, 1073748910, // NOLINT
- 7087, 1073748922, 7141, 1073748992, 7203, 1073749069, 7247, 1073749082, // NOLINT
- 7293, 1073749225, 7404, 1073749230, 7409, 1073749237, 7414, 1073749248, // NOLINT
- 7615, 1073749504, 7957, 1073749784, 7965, 1073749792, 8005, 1073749832, // NOLINT
- 8013, 1073749840, 8023, 8025, 8027, 8029, 1073749855, 8061, // NOLINT
- 1073749888, 8116, 1073749942, 8124, 8126, 1073749954, 8132, 1073749958, // NOLINT
- 8140, 1073749968, 8147, 1073749974, 8155, 1073749984, 8172, 1073750002, // NOLINT
- 8180, 1073750006, 8188 }; // NOLINT
-static const uint16_t kLetterTable1Size = 87;
-static const int32_t kLetterTable1[87] = {
- 113, 127, 1073741968, 156, 258, 263, 1073742090, 275, // NOLINT
- 277, 1073742105, 285, 292, 294, 296, 1073742122, 301, // NOLINT
- 1073742127, 313, 1073742140, 319, 1073742149, 329, 334, 1073742176, // NOLINT
- 392, 1073744896, 3118, 1073744944, 3166, 1073744992, 3300, 1073745131, // NOLINT
- 3310, 1073745138, 3315, 1073745152, 3365, 3367, 3373, 1073745200, // NOLINT
- 3431, 3439, 1073745280, 3478, 1073745312, 3494, 1073745320, 3502, // NOLINT
- 1073745328, 3510, 1073745336, 3518, 1073745344, 3526, 1073745352, 3534, // NOLINT
- 1073745360, 3542, 1073745368, 3550, 3631, 1073745925, 4103, 1073745953, // NOLINT
- 4137, 1073745969, 4149, 1073745976, 4156, 1073745985, 4246, 1073746077, // NOLINT
- 4255, 1073746081, 4346, 1073746172, 4351, 1073746181, 4397, 1073746225, // NOLINT
- 4494, 1073746336, 4538, 1073746416, 4607, 1073746944, 8191 }; // NOLINT
-static const uint16_t kLetterTable2Size = 4;
-static const int32_t kLetterTable2[4] = {
- 1073741824, 3509, 1073745408, 8191 }; // NOLINT
-static const uint16_t kLetterTable3Size = 2;
-static const int32_t kLetterTable3[2] = {
- 1073741824, 8191 }; // NOLINT
-static const uint16_t kLetterTable4Size = 2;
-static const int32_t kLetterTable4[2] = {
- 1073741824, 8140 }; // NOLINT
-static const uint16_t kLetterTable5Size = 88;
-static const int32_t kLetterTable5[88] = {
- 1073741824, 1164, 1073743056, 1277, 1073743104, 1548, 1073743376, 1567, // NOLINT
- 1073743402, 1579, 1073743424, 1646, 1073743487, 1687, 1073743520, 1775, // NOLINT
- 1073743639, 1823, 1073743650, 1928, 1073743755, 1934, 1073743760, 1939, // NOLINT
- 1073743776, 1962, 1073743864, 2049, 1073743875, 2053, 1073743879, 2058, // NOLINT
- 1073743884, 2082, 1073743936, 2163, 1073744002, 2227, 1073744114, 2295, // NOLINT
- 2299, 1073744138, 2341, 1073744176, 2374, 1073744224, 2428, 1073744260, // NOLINT
- 2482, 2511, 1073744384, 2600, 1073744448, 2626, 1073744452, 2635, // NOLINT
- 1073744480, 2678, 2682, 1073744512, 2735, 2737, 1073744565, 2742, // NOLINT
- 1073744569, 2749, 2752, 2754, 1073744603, 2781, 1073744608, 2794, // NOLINT
- 1073744626, 2804, 1073744641, 2822, 1073744649, 2830, 1073744657, 2838, // NOLINT
- 1073744672, 2854, 1073744680, 2862, 1073744832, 3042, 1073744896, 8191 }; // NOLINT
-static const uint16_t kLetterTable6Size = 6;
-static const int32_t kLetterTable6[6] = {
- 1073741824, 6051, 1073747888, 6086, 1073747915, 6139 }; // NOLINT
-static const uint16_t kLetterTable7Size = 48;
-static const int32_t kLetterTable7[48] = {
- 1073748224, 6765, 1073748592, 6873, 1073748736, 6918, 1073748755, 6935, // NOLINT
- 6941, 1073748767, 6952, 1073748778, 6966, 1073748792, 6972, 6974, // NOLINT
- 1073748800, 6977, 1073748803, 6980, 1073748806, 7089, 1073748947, 7485, // NOLINT
- 1073749328, 7567, 1073749394, 7623, 1073749488, 7675, 1073749616, 7796, // NOLINT
- 1073749622, 7932, 1073749793, 7994, 1073749825, 8026, 1073749862, 8126, // NOLINT
- 1073749954, 8135, 1073749962, 8143, 1073749970, 8151, 1073749978, 8156 }; // NOLINT
-bool Letter::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kLetterTable0,
- kLetterTable0Size,
- c);
- case 1: return LookupPredicate(kLetterTable1,
- kLetterTable1Size,
- c);
- case 2: return LookupPredicate(kLetterTable2,
- kLetterTable2Size,
- c);
- case 3: return LookupPredicate(kLetterTable3,
- kLetterTable3Size,
- c);
- case 4: return LookupPredicate(kLetterTable4,
- kLetterTable4Size,
- c);
- case 5: return LookupPredicate(kLetterTable5,
- kLetterTable5Size,
- c);
- case 6: return LookupPredicate(kLetterTable6,
- kLetterTable6Size,
- c);
- case 7: return LookupPredicate(kLetterTable7,
- kLetterTable7Size,
- c);
- default: return false;
- }
-}
-
-// Space: point.category == 'Zs'
-
-static const uint16_t kSpaceTable0Size = 4;
-static const int32_t kSpaceTable0[4] = {
- 32, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kSpaceTable1Size = 5;
-static const int32_t kSpaceTable1[5] = {
- 1073741824, 10, 47, 95, 4096 }; // NOLINT
-bool Space::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kSpaceTable0,
- kSpaceTable0Size,
- c);
- case 1: return LookupPredicate(kSpaceTable1,
- kSpaceTable1Size,
- c);
- default: return false;
- }
-}
-
-// Number: point.category == 'Nd'
-
-static const uint16_t kNumberTable0Size = 56;
-static const int32_t kNumberTable0[56] = {
- 1073741872, 57, 1073743456, 1641, 1073743600, 1785, 1073743808, 1993, // NOLINT
- 1073744230, 2415, 1073744358, 2543, 1073744486, 2671, 1073744614, 2799, // NOLINT
- 1073744742, 2927, 1073744870, 3055, 1073744998, 3183, 1073745126, 3311, // NOLINT
- 1073745254, 3439, 1073745488, 3673, 1073745616, 3801, 1073745696, 3881, // NOLINT
- 1073745984, 4169, 1073746064, 4249, 1073747936, 6121, 1073747984, 6169, // NOLINT
- 1073748294, 6479, 1073748432, 6617, 1073748608, 6793, 1073748624, 6809, // NOLINT
- 1073748816, 7001, 1073748912, 7097, 1073749056, 7241, 1073749072, 7257 }; // NOLINT
-static const uint16_t kNumberTable5Size = 12;
-static const int32_t kNumberTable5[12] = {
- 1073743392, 1577, 1073744080, 2265, 1073744128, 2313, 1073744336, 2521, // NOLINT
- 1073744464, 2649, 1073744880, 3065 }; // NOLINT
-static const uint16_t kNumberTable7Size = 2;
-static const int32_t kNumberTable7[2] = {
- 1073749776, 7961 }; // NOLINT
-bool Number::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kNumberTable0,
- kNumberTable0Size,
- c);
- case 5: return LookupPredicate(kNumberTable5,
- kNumberTable5Size,
- c);
- case 7: return LookupPredicate(kNumberTable7,
- kNumberTable7Size,
- c);
- default: return false;
- }
-}
-
-// WhiteSpace: 'Ws' in point.properties
-
-static const uint16_t kWhiteSpaceTable0Size = 7;
-static const int32_t kWhiteSpaceTable0[7] = {
- 1073741833, 13, 32, 133, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kWhiteSpaceTable1Size = 7;
-static const int32_t kWhiteSpaceTable1[7] = {
- 1073741824, 10, 1073741864, 41, 47, 95, 4096 }; // NOLINT
-bool WhiteSpace::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kWhiteSpaceTable0,
- kWhiteSpaceTable0Size,
- c);
- case 1: return LookupPredicate(kWhiteSpaceTable1,
- kWhiteSpaceTable1Size,
- c);
- default: return false;
- }
-}
-
-// LineTerminator: 'Lt' in point.properties
-
-static const uint16_t kLineTerminatorTable0Size = 2;
-static const int32_t kLineTerminatorTable0[2] = {
- 10, 13 }; // NOLINT
-static const uint16_t kLineTerminatorTable1Size = 2;
-static const int32_t kLineTerminatorTable1[2] = {
- 1073741864, 41 }; // NOLINT
-bool LineTerminator::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kLineTerminatorTable0,
- kLineTerminatorTable0Size,
- c);
- case 1: return LookupPredicate(kLineTerminatorTable1,
- kLineTerminatorTable1Size,
- c);
- default: return false;
- }
-}
-
-// CombiningMark: point.category in ['Mn', 'Mc']
-
-static const uint16_t kCombiningMarkTable0Size = 258;
-static const int32_t kCombiningMarkTable0[258] = {
- 1073742592, 879, 1073742979, 1159, 1073743249, 1469, 1471, 1073743297, // NOLINT
- 1474, 1073743300, 1477, 1479, 1073743376, 1562, 1073743435, 1631, // NOLINT
- 1648, 1073743574, 1756, 1073743583, 1764, 1073743591, 1768, 1073743594, // NOLINT
- 1773, 1809, 1073743664, 1866, 1073743782, 1968, 1073743851, 2035, // NOLINT
- 1073743894, 2073, 1073743899, 2083, 1073743909, 2087, 1073743913, 2093, // NOLINT
- 1073743961, 2139, 1073744100, 2302, 1073744128, 2307, 1073744186, 2364, // NOLINT
- 1073744190, 2383, 1073744209, 2391, 1073744226, 2403, 1073744257, 2435, // NOLINT
- 2492, 1073744318, 2500, 1073744327, 2504, 1073744331, 2509, 2519, // NOLINT
- 1073744354, 2531, 1073744385, 2563, 2620, 1073744446, 2626, 1073744455, // NOLINT
- 2632, 1073744459, 2637, 2641, 1073744496, 2673, 2677, 1073744513, // NOLINT
- 2691, 2748, 1073744574, 2757, 1073744583, 2761, 1073744587, 2765, // NOLINT
- 1073744610, 2787, 1073744641, 2819, 2876, 1073744702, 2884, 1073744711, // NOLINT
- 2888, 1073744715, 2893, 1073744726, 2903, 1073744738, 2915, 2946, // NOLINT
- 1073744830, 3010, 1073744838, 3016, 1073744842, 3021, 3031, 1073744897, // NOLINT
- 3075, 1073744958, 3140, 1073744966, 3144, 1073744970, 3149, 1073744981, // NOLINT
- 3158, 1073744994, 3171, 1073745026, 3203, 3260, 1073745086, 3268, // NOLINT
- 1073745094, 3272, 1073745098, 3277, 1073745109, 3286, 1073745122, 3299, // NOLINT
- 1073745154, 3331, 1073745214, 3396, 1073745222, 3400, 1073745226, 3405, // NOLINT
- 3415, 1073745250, 3427, 1073745282, 3459, 3530, 1073745359, 3540, // NOLINT
- 3542, 1073745368, 3551, 1073745394, 3571, 3633, 1073745460, 3642, // NOLINT
- 1073745479, 3662, 3761, 1073745588, 3769, 1073745595, 3772, 1073745608, // NOLINT
- 3789, 1073745688, 3865, 3893, 3895, 3897, 1073745726, 3903, // NOLINT
- 1073745777, 3972, 1073745798, 3975, 1073745805, 3991, 1073745817, 4028, // NOLINT
- 4038, 1073745963, 4158, 1073746006, 4185, 1073746014, 4192, 1073746018, // NOLINT
- 4196, 1073746023, 4205, 1073746033, 4212, 1073746050, 4237, 4239, // NOLINT
- 1073746074, 4253, 1073746781, 4959, 1073747730, 5908, 1073747762, 5940, // NOLINT
- 1073747794, 5971, 1073747826, 6003, 1073747892, 6099, 6109, 1073747979, // NOLINT
- 6157, 6313, 1073748256, 6443, 1073748272, 6459, 1073748400, 6592, // NOLINT
- 1073748424, 6601, 1073748503, 6683, 1073748565, 6750, 1073748576, 6780, // NOLINT
- 6783, 1073748736, 6916, 1073748788, 6980, 1073748843, 7027, 1073748864, // NOLINT
- 7042, 1073748897, 7085, 1073748966, 7155, 1073749028, 7223, 1073749200, // NOLINT
- 7378, 1073749204, 7400, 7405, 1073749234, 7412, 1073749440, 7654, // NOLINT
- 1073749500, 7679 }; // NOLINT
-static const uint16_t kCombiningMarkTable1Size = 14;
-static const int32_t kCombiningMarkTable1[14] = {
- 1073742032, 220, 225, 1073742053, 240, 1073745135, 3313, 3455, // NOLINT
- 1073745376, 3583, 1073745962, 4143, 1073746073, 4250 }; // NOLINT
-static const uint16_t kCombiningMarkTable5Size = 47;
-static const int32_t kCombiningMarkTable5[47] = {
- 1647, 1073743476, 1661, 1695, 1073743600, 1777, 2050, 2054, // NOLINT
- 2059, 1073743907, 2087, 1073744000, 2177, 1073744052, 2244, 1073744096, // NOLINT
- 2289, 1073744166, 2349, 1073744199, 2387, 1073744256, 2435, 1073744307, // NOLINT
- 2496, 1073744425, 2614, 2627, 1073744460, 2637, 2683, 2736, // NOLINT
- 1073744562, 2740, 1073744567, 2744, 1073744574, 2751, 2753, 1073744619, // NOLINT
- 2799, 1073744629, 2806, 1073744867, 3050, 1073744876, 3053 }; // NOLINT
-static const uint16_t kCombiningMarkTable7Size = 5;
-static const int32_t kCombiningMarkTable7[5] = {
- 6942, 1073749504, 7695, 1073749536, 7718 }; // NOLINT
-bool CombiningMark::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kCombiningMarkTable0,
- kCombiningMarkTable0Size,
- c);
- case 1: return LookupPredicate(kCombiningMarkTable1,
- kCombiningMarkTable1Size,
- c);
- case 5: return LookupPredicate(kCombiningMarkTable5,
- kCombiningMarkTable5Size,
- c);
- case 7: return LookupPredicate(kCombiningMarkTable7,
- kCombiningMarkTable7Size,
- c);
- default: return false;
- }
-}
-
-// ConnectorPunctuation: point.category == 'Pc'
-
-static const uint16_t kConnectorPunctuationTable0Size = 1;
-static const int32_t kConnectorPunctuationTable0[1] = {
- 95 }; // NOLINT
-static const uint16_t kConnectorPunctuationTable1Size = 3;
-static const int32_t kConnectorPunctuationTable1[3] = {
- 1073741887, 64, 84 }; // NOLINT
-static const uint16_t kConnectorPunctuationTable7Size = 5;
-static const int32_t kConnectorPunctuationTable7[5] = {
- 1073749555, 7732, 1073749581, 7759, 7999 }; // NOLINT
-bool ConnectorPunctuation::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kConnectorPunctuationTable0,
- kConnectorPunctuationTable0Size,
- c);
- case 1: return LookupPredicate(kConnectorPunctuationTable1,
- kConnectorPunctuationTable1Size,
- c);
- case 7: return LookupPredicate(kConnectorPunctuationTable7,
- kConnectorPunctuationTable7Size,
- c);
- default: return false;
- }
-}
-
-static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] = { // NOLINT
- {{105, 775}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable0Size = 483; // NOLINT
-static const int32_t kToLowercaseTable0[966] = {
- 1073741889, 128, 90, 128, 1073742016, 128, 214, 128, 1073742040, 128, 222, 128, 256, 4, 258, 4, // NOLINT
- 260, 4, 262, 4, 264, 4, 266, 4, 268, 4, 270, 4, 272, 4, 274, 4, // NOLINT
- 276, 4, 278, 4, 280, 4, 282, 4, 284, 4, 286, 4, 288, 4, 290, 4, // NOLINT
- 292, 4, 294, 4, 296, 4, 298, 4, 300, 4, 302, 4, 304, 1, 306, 4, // NOLINT
- 308, 4, 310, 4, 313, 4, 315, 4, 317, 4, 319, 4, 321, 4, 323, 4, // NOLINT
- 325, 4, 327, 4, 330, 4, 332, 4, 334, 4, 336, 4, 338, 4, 340, 4, // NOLINT
- 342, 4, 344, 4, 346, 4, 348, 4, 350, 4, 352, 4, 354, 4, 356, 4, // NOLINT
- 358, 4, 360, 4, 362, 4, 364, 4, 366, 4, 368, 4, 370, 4, 372, 4, // NOLINT
- 374, 4, 376, -484, 377, 4, 379, 4, 381, 4, 385, 840, 386, 4, 388, 4, // NOLINT
- 390, 824, 391, 4, 1073742217, 820, 394, 820, 395, 4, 398, 316, 399, 808, 400, 812, // NOLINT
- 401, 4, 403, 820, 404, 828, 406, 844, 407, 836, 408, 4, 412, 844, 413, 852, // NOLINT
- 415, 856, 416, 4, 418, 4, 420, 4, 422, 872, 423, 4, 425, 872, 428, 4, // NOLINT
- 430, 872, 431, 4, 1073742257, 868, 434, 868, 435, 4, 437, 4, 439, 876, 440, 4, // NOLINT
- 444, 4, 452, 8, 453, 4, 455, 8, 456, 4, 458, 8, 459, 4, 461, 4, // NOLINT
- 463, 4, 465, 4, 467, 4, 469, 4, 471, 4, 473, 4, 475, 4, 478, 4, // NOLINT
- 480, 4, 482, 4, 484, 4, 486, 4, 488, 4, 490, 4, 492, 4, 494, 4, // NOLINT
- 497, 8, 498, 4, 500, 4, 502, -388, 503, -224, 504, 4, 506, 4, 508, 4, // NOLINT
- 510, 4, 512, 4, 514, 4, 516, 4, 518, 4, 520, 4, 522, 4, 524, 4, // NOLINT
- 526, 4, 528, 4, 530, 4, 532, 4, 534, 4, 536, 4, 538, 4, 540, 4, // NOLINT
- 542, 4, 544, -520, 546, 4, 548, 4, 550, 4, 552, 4, 554, 4, 556, 4, // NOLINT
- 558, 4, 560, 4, 562, 4, 570, 43180, 571, 4, 573, -652, 574, 43168, 577, 4, // NOLINT
- 579, -780, 580, 276, 581, 284, 582, 4, 584, 4, 586, 4, 588, 4, 590, 4, // NOLINT
- 880, 4, 882, 4, 886, 4, 902, 152, 1073742728, 148, 906, 148, 908, 256, 1073742734, 252, // NOLINT
- 911, 252, 1073742737, 128, 929, 128, 931, 6, 1073742756, 128, 939, 128, 975, 32, 984, 4, // NOLINT
- 986, 4, 988, 4, 990, 4, 992, 4, 994, 4, 996, 4, 998, 4, 1000, 4, // NOLINT
- 1002, 4, 1004, 4, 1006, 4, 1012, -240, 1015, 4, 1017, -28, 1018, 4, 1073742845, -520, // NOLINT
- 1023, -520, 1073742848, 320, 1039, 320, 1073742864, 128, 1071, 128, 1120, 4, 1122, 4, 1124, 4, // NOLINT
- 1126, 4, 1128, 4, 1130, 4, 1132, 4, 1134, 4, 1136, 4, 1138, 4, 1140, 4, // NOLINT
- 1142, 4, 1144, 4, 1146, 4, 1148, 4, 1150, 4, 1152, 4, 1162, 4, 1164, 4, // NOLINT
- 1166, 4, 1168, 4, 1170, 4, 1172, 4, 1174, 4, 1176, 4, 1178, 4, 1180, 4, // NOLINT
- 1182, 4, 1184, 4, 1186, 4, 1188, 4, 1190, 4, 1192, 4, 1194, 4, 1196, 4, // NOLINT
- 1198, 4, 1200, 4, 1202, 4, 1204, 4, 1206, 4, 1208, 4, 1210, 4, 1212, 4, // NOLINT
- 1214, 4, 1216, 60, 1217, 4, 1219, 4, 1221, 4, 1223, 4, 1225, 4, 1227, 4, // NOLINT
- 1229, 4, 1232, 4, 1234, 4, 1236, 4, 1238, 4, 1240, 4, 1242, 4, 1244, 4, // NOLINT
- 1246, 4, 1248, 4, 1250, 4, 1252, 4, 1254, 4, 1256, 4, 1258, 4, 1260, 4, // NOLINT
- 1262, 4, 1264, 4, 1266, 4, 1268, 4, 1270, 4, 1272, 4, 1274, 4, 1276, 4, // NOLINT
- 1278, 4, 1280, 4, 1282, 4, 1284, 4, 1286, 4, 1288, 4, 1290, 4, 1292, 4, // NOLINT
- 1294, 4, 1296, 4, 1298, 4, 1300, 4, 1302, 4, 1304, 4, 1306, 4, 1308, 4, // NOLINT
- 1310, 4, 1312, 4, 1314, 4, 1316, 4, 1318, 4, 1073743153, 192, 1366, 192, 1073746080, 29056, // NOLINT
- 4293, 29056, 4295, 29056, 4301, 29056, 7680, 4, 7682, 4, 7684, 4, 7686, 4, 7688, 4, // NOLINT
- 7690, 4, 7692, 4, 7694, 4, 7696, 4, 7698, 4, 7700, 4, 7702, 4, 7704, 4, // NOLINT
- 7706, 4, 7708, 4, 7710, 4, 7712, 4, 7714, 4, 7716, 4, 7718, 4, 7720, 4, // NOLINT
- 7722, 4, 7724, 4, 7726, 4, 7728, 4, 7730, 4, 7732, 4, 7734, 4, 7736, 4, // NOLINT
- 7738, 4, 7740, 4, 7742, 4, 7744, 4, 7746, 4, 7748, 4, 7750, 4, 7752, 4, // NOLINT
- 7754, 4, 7756, 4, 7758, 4, 7760, 4, 7762, 4, 7764, 4, 7766, 4, 7768, 4, // NOLINT
- 7770, 4, 7772, 4, 7774, 4, 7776, 4, 7778, 4, 7780, 4, 7782, 4, 7784, 4, // NOLINT
- 7786, 4, 7788, 4, 7790, 4, 7792, 4, 7794, 4, 7796, 4, 7798, 4, 7800, 4, // NOLINT
- 7802, 4, 7804, 4, 7806, 4, 7808, 4, 7810, 4, 7812, 4, 7814, 4, 7816, 4, // NOLINT
- 7818, 4, 7820, 4, 7822, 4, 7824, 4, 7826, 4, 7828, 4, 7838, -30460, 7840, 4, // NOLINT
- 7842, 4, 7844, 4, 7846, 4, 7848, 4, 7850, 4, 7852, 4, 7854, 4, 7856, 4, // NOLINT
- 7858, 4, 7860, 4, 7862, 4, 7864, 4, 7866, 4, 7868, 4, 7870, 4, 7872, 4, // NOLINT
- 7874, 4, 7876, 4, 7878, 4, 7880, 4, 7882, 4, 7884, 4, 7886, 4, 7888, 4, // NOLINT
- 7890, 4, 7892, 4, 7894, 4, 7896, 4, 7898, 4, 7900, 4, 7902, 4, 7904, 4, // NOLINT
- 7906, 4, 7908, 4, 7910, 4, 7912, 4, 7914, 4, 7916, 4, 7918, 4, 7920, 4, // NOLINT
- 7922, 4, 7924, 4, 7926, 4, 7928, 4, 7930, 4, 7932, 4, 7934, 4, 1073749768, -32, // NOLINT
- 7951, -32, 1073749784, -32, 7965, -32, 1073749800, -32, 7983, -32, 1073749816, -32, 7999, -32, 1073749832, -32, // NOLINT
- 8013, -32, 8025, -32, 8027, -32, 8029, -32, 8031, -32, 1073749864, -32, 8047, -32, 1073749896, -32, // NOLINT
- 8079, -32, 1073749912, -32, 8095, -32, 1073749928, -32, 8111, -32, 1073749944, -32, 8121, -32, 1073749946, -296, // NOLINT
- 8123, -296, 8124, -36, 1073749960, -344, 8139, -344, 8140, -36, 1073749976, -32, 8153, -32, 1073749978, -400, // NOLINT
- 8155, -400, 1073749992, -32, 8169, -32, 1073749994, -448, 8171, -448, 8172, -28, 1073750008, -512, 8185, -512, // NOLINT
- 1073750010, -504, 8187, -504, 8188, -36 }; // NOLINT
-static const uint16_t kToLowercaseMultiStrings0Size = 2; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable1Size = 79; // NOLINT
-static const int32_t kToLowercaseTable1[158] = {
- 294, -30068, 298, -33532, 299, -33048, 306, 112, 1073742176, 64, 367, 64, 387, 4, 1073743030, 104, // NOLINT
- 1231, 104, 1073744896, 192, 3118, 192, 3168, 4, 3170, -42972, 3171, -15256, 3172, -42908, 3175, 4, // NOLINT
- 3177, 4, 3179, 4, 3181, -43120, 3182, -42996, 3183, -43132, 3184, -43128, 3186, 4, 3189, 4, // NOLINT
- 1073745022, -43260, 3199, -43260, 3200, 4, 3202, 4, 3204, 4, 3206, 4, 3208, 4, 3210, 4, // NOLINT
- 3212, 4, 3214, 4, 3216, 4, 3218, 4, 3220, 4, 3222, 4, 3224, 4, 3226, 4, // NOLINT
- 3228, 4, 3230, 4, 3232, 4, 3234, 4, 3236, 4, 3238, 4, 3240, 4, 3242, 4, // NOLINT
- 3244, 4, 3246, 4, 3248, 4, 3250, 4, 3252, 4, 3254, 4, 3256, 4, 3258, 4, // NOLINT
- 3260, 4, 3262, 4, 3264, 4, 3266, 4, 3268, 4, 3270, 4, 3272, 4, 3274, 4, // NOLINT
- 3276, 4, 3278, 4, 3280, 4, 3282, 4, 3284, 4, 3286, 4, 3288, 4, 3290, 4, // NOLINT
- 3292, 4, 3294, 4, 3296, 4, 3298, 4, 3307, 4, 3309, 4, 3314, 4 }; // NOLINT
-static const uint16_t kToLowercaseMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings5[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable5Size = 91; // NOLINT
-static const int32_t kToLowercaseTable5[182] = {
- 1600, 4, 1602, 4, 1604, 4, 1606, 4, 1608, 4, 1610, 4, 1612, 4, 1614, 4, // NOLINT
- 1616, 4, 1618, 4, 1620, 4, 1622, 4, 1624, 4, 1626, 4, 1628, 4, 1630, 4, // NOLINT
- 1632, 4, 1634, 4, 1636, 4, 1638, 4, 1640, 4, 1642, 4, 1644, 4, 1664, 4, // NOLINT
- 1666, 4, 1668, 4, 1670, 4, 1672, 4, 1674, 4, 1676, 4, 1678, 4, 1680, 4, // NOLINT
- 1682, 4, 1684, 4, 1686, 4, 1826, 4, 1828, 4, 1830, 4, 1832, 4, 1834, 4, // NOLINT
- 1836, 4, 1838, 4, 1842, 4, 1844, 4, 1846, 4, 1848, 4, 1850, 4, 1852, 4, // NOLINT
- 1854, 4, 1856, 4, 1858, 4, 1860, 4, 1862, 4, 1864, 4, 1866, 4, 1868, 4, // NOLINT
- 1870, 4, 1872, 4, 1874, 4, 1876, 4, 1878, 4, 1880, 4, 1882, 4, 1884, 4, // NOLINT
- 1886, 4, 1888, 4, 1890, 4, 1892, 4, 1894, 4, 1896, 4, 1898, 4, 1900, 4, // NOLINT
- 1902, 4, 1913, 4, 1915, 4, 1917, -141328, 1918, 4, 1920, 4, 1922, 4, 1924, 4, // NOLINT
- 1926, 4, 1931, 4, 1933, -169120, 1936, 4, 1938, 4, 1952, 4, 1954, 4, 1956, 4, // NOLINT
- 1958, 4, 1960, 4, 1962, -169232 }; // NOLINT
-static const uint16_t kToLowercaseMultiStrings5Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings7[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable7Size = 2; // NOLINT
-static const int32_t kToLowercaseTable7[4] = {
- 1073749793, 128, 7994, 128 }; // NOLINT
-static const uint16_t kToLowercaseMultiStrings7Size = 1; // NOLINT
-int ToLowercase::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<true>(kToLowercaseTable0,
- kToLowercaseTable0Size,
- kToLowercaseMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kToLowercaseTable1,
- kToLowercaseTable1Size,
- kToLowercaseMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 5: return LookupMapping<true>(kToLowercaseTable5,
- kToLowercaseTable5Size,
- kToLowercaseMultiStrings5,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kToLowercaseTable7,
- kToLowercaseTable7Size,
- kToLowercaseMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings0[62] = { // NOLINT
- {{83, 83, kSentinel}}, {{700, 78, kSentinel}}, {{74, 780, kSentinel}}, {{921, 776, 769}}, // NOLINT
- {{933, 776, 769}}, {{1333, 1362, kSentinel}}, {{72, 817, kSentinel}}, {{84, 776, kSentinel}}, // NOLINT
- {{87, 778, kSentinel}}, {{89, 778, kSentinel}}, {{65, 702, kSentinel}}, {{933, 787, kSentinel}}, // NOLINT
- {{933, 787, 768}}, {{933, 787, 769}}, {{933, 787, 834}}, {{7944, 921, kSentinel}}, // NOLINT
- {{7945, 921, kSentinel}}, {{7946, 921, kSentinel}}, {{7947, 921, kSentinel}}, {{7948, 921, kSentinel}}, // NOLINT
- {{7949, 921, kSentinel}}, {{7950, 921, kSentinel}}, {{7951, 921, kSentinel}}, {{7976, 921, kSentinel}}, // NOLINT
- {{7977, 921, kSentinel}}, {{7978, 921, kSentinel}}, {{7979, 921, kSentinel}}, {{7980, 921, kSentinel}}, // NOLINT
- {{7981, 921, kSentinel}}, {{7982, 921, kSentinel}}, {{7983, 921, kSentinel}}, {{8040, 921, kSentinel}}, // NOLINT
- {{8041, 921, kSentinel}}, {{8042, 921, kSentinel}}, {{8043, 921, kSentinel}}, {{8044, 921, kSentinel}}, // NOLINT
- {{8045, 921, kSentinel}}, {{8046, 921, kSentinel}}, {{8047, 921, kSentinel}}, {{8122, 921, kSentinel}}, // NOLINT
- {{913, 921, kSentinel}}, {{902, 921, kSentinel}}, {{913, 834, kSentinel}}, {{913, 834, 921}}, // NOLINT
- {{8138, 921, kSentinel}}, {{919, 921, kSentinel}}, {{905, 921, kSentinel}}, {{919, 834, kSentinel}}, // NOLINT
- {{919, 834, 921}}, {{921, 776, 768}}, {{921, 834, kSentinel}}, {{921, 776, 834}}, // NOLINT
- {{933, 776, 768}}, {{929, 787, kSentinel}}, {{933, 834, kSentinel}}, {{933, 776, 834}}, // NOLINT
- {{8186, 921, kSentinel}}, {{937, 921, kSentinel}}, {{911, 921, kSentinel}}, {{937, 834, kSentinel}}, // NOLINT
- {{937, 834, 921}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable0Size = 580; // NOLINT
-static const int32_t kToUppercaseTable0[1160] = {
- 1073741921, -128, 122, -128, 181, 2972, 223, 1, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128, // NOLINT
- 255, 484, 257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4, // NOLINT
- 271, -4, 273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4, // NOLINT
- 287, -4, 289, -4, 291, -4, 293, -4, 295, -4, 297, -4, 299, -4, 301, -4, // NOLINT
- 303, -4, 305, -928, 307, -4, 309, -4, 311, -4, 314, -4, 316, -4, 318, -4, // NOLINT
- 320, -4, 322, -4, 324, -4, 326, -4, 328, -4, 329, 5, 331, -4, 333, -4, // NOLINT
- 335, -4, 337, -4, 339, -4, 341, -4, 343, -4, 345, -4, 347, -4, 349, -4, // NOLINT
- 351, -4, 353, -4, 355, -4, 357, -4, 359, -4, 361, -4, 363, -4, 365, -4, // NOLINT
- 367, -4, 369, -4, 371, -4, 373, -4, 375, -4, 378, -4, 380, -4, 382, -4, // NOLINT
- 383, -1200, 384, 780, 387, -4, 389, -4, 392, -4, 396, -4, 402, -4, 405, 388, // NOLINT
- 409, -4, 410, 652, 414, 520, 417, -4, 419, -4, 421, -4, 424, -4, 429, -4, // NOLINT
- 432, -4, 436, -4, 438, -4, 441, -4, 445, -4, 447, 224, 453, -4, 454, -8, // NOLINT
- 456, -4, 457, -8, 459, -4, 460, -8, 462, -4, 464, -4, 466, -4, 468, -4, // NOLINT
- 470, -4, 472, -4, 474, -4, 476, -4, 477, -316, 479, -4, 481, -4, 483, -4, // NOLINT
- 485, -4, 487, -4, 489, -4, 491, -4, 493, -4, 495, -4, 496, 9, 498, -4, // NOLINT
- 499, -8, 501, -4, 505, -4, 507, -4, 509, -4, 511, -4, 513, -4, 515, -4, // NOLINT
- 517, -4, 519, -4, 521, -4, 523, -4, 525, -4, 527, -4, 529, -4, 531, -4, // NOLINT
- 533, -4, 535, -4, 537, -4, 539, -4, 541, -4, 543, -4, 547, -4, 549, -4, // NOLINT
- 551, -4, 553, -4, 555, -4, 557, -4, 559, -4, 561, -4, 563, -4, 572, -4, // NOLINT
- 1073742399, 43260, 576, 43260, 578, -4, 583, -4, 585, -4, 587, -4, 589, -4, 591, -4, // NOLINT
- 592, 43132, 593, 43120, 594, 43128, 595, -840, 596, -824, 1073742422, -820, 599, -820, 601, -808, // NOLINT
- 603, -812, 608, -820, 611, -828, 613, 169120, 614, 169232, 616, -836, 617, -844, 619, 42972, // NOLINT
- 623, -844, 625, 42996, 626, -852, 629, -856, 637, 42908, 640, -872, 643, -872, 648, -872, // NOLINT
- 649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, 837, 336, 881, -4, 883, -4, // NOLINT
- 887, -4, 1073742715, 520, 893, 520, 912, 13, 940, -152, 1073742765, -148, 943, -148, 944, 17, // NOLINT
- 1073742769, -128, 961, -128, 962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252, // NOLINT
- 976, -248, 977, -228, 981, -188, 982, -216, 983, -32, 985, -4, 987, -4, 989, -4, // NOLINT
- 991, -4, 993, -4, 995, -4, 997, -4, 999, -4, 1001, -4, 1003, -4, 1005, -4, // NOLINT
- 1007, -4, 1008, -344, 1009, -320, 1010, 28, 1013, -384, 1016, -4, 1019, -4, 1073742896, -128, // NOLINT
- 1103, -128, 1073742928, -320, 1119, -320, 1121, -4, 1123, -4, 1125, -4, 1127, -4, 1129, -4, // NOLINT
- 1131, -4, 1133, -4, 1135, -4, 1137, -4, 1139, -4, 1141, -4, 1143, -4, 1145, -4, // NOLINT
- 1147, -4, 1149, -4, 1151, -4, 1153, -4, 1163, -4, 1165, -4, 1167, -4, 1169, -4, // NOLINT
- 1171, -4, 1173, -4, 1175, -4, 1177, -4, 1179, -4, 1181, -4, 1183, -4, 1185, -4, // NOLINT
- 1187, -4, 1189, -4, 1191, -4, 1193, -4, 1195, -4, 1197, -4, 1199, -4, 1201, -4, // NOLINT
- 1203, -4, 1205, -4, 1207, -4, 1209, -4, 1211, -4, 1213, -4, 1215, -4, 1218, -4, // NOLINT
- 1220, -4, 1222, -4, 1224, -4, 1226, -4, 1228, -4, 1230, -4, 1231, -60, 1233, -4, // NOLINT
- 1235, -4, 1237, -4, 1239, -4, 1241, -4, 1243, -4, 1245, -4, 1247, -4, 1249, -4, // NOLINT
- 1251, -4, 1253, -4, 1255, -4, 1257, -4, 1259, -4, 1261, -4, 1263, -4, 1265, -4, // NOLINT
- 1267, -4, 1269, -4, 1271, -4, 1273, -4, 1275, -4, 1277, -4, 1279, -4, 1281, -4, // NOLINT
- 1283, -4, 1285, -4, 1287, -4, 1289, -4, 1291, -4, 1293, -4, 1295, -4, 1297, -4, // NOLINT
- 1299, -4, 1301, -4, 1303, -4, 1305, -4, 1307, -4, 1309, -4, 1311, -4, 1313, -4, // NOLINT
- 1315, -4, 1317, -4, 1319, -4, 1073743201, -192, 1414, -192, 1415, 21, 7545, 141328, 7549, 15256, // NOLINT
- 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, 7695, -4, // NOLINT
- 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, 7711, -4, // NOLINT
- 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, 7727, -4, // NOLINT
- 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, 7743, -4, // NOLINT
- 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, 7759, -4, // NOLINT
- 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, 7775, -4, // NOLINT
- 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, 7791, -4, // NOLINT
- 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, 7807, -4, // NOLINT
- 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, 7823, -4, // NOLINT
- 7825, -4, 7827, -4, 7829, -4, 7830, 25, 7831, 29, 7832, 33, 7833, 37, 7834, 41, // NOLINT
- 7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4, 7849, -4, 7851, -4, 7853, -4, // NOLINT
- 7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4, 7865, -4, 7867, -4, 7869, -4, // NOLINT
- 7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4, 7881, -4, 7883, -4, 7885, -4, // NOLINT
- 7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4, 7897, -4, 7899, -4, 7901, -4, // NOLINT
- 7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4, 7913, -4, 7915, -4, 7917, -4, // NOLINT
- 7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4, 7929, -4, 7931, -4, 7933, -4, // NOLINT
- 7935, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32, 1073749808, 32, // NOLINT
- 7991, 32, 1073749824, 32, 8005, 32, 8016, 45, 8017, 32, 8018, 49, 8019, 32, 8020, 53, // NOLINT
- 8021, 32, 8022, 57, 8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, // NOLINT
- 8053, 344, 1073749878, 400, 8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, // NOLINT
- 8061, 504, 8064, 61, 8065, 65, 8066, 69, 8067, 73, 8068, 77, 8069, 81, 8070, 85, // NOLINT
- 8071, 89, 8072, 61, 8073, 65, 8074, 69, 8075, 73, 8076, 77, 8077, 81, 8078, 85, // NOLINT
- 8079, 89, 8080, 93, 8081, 97, 8082, 101, 8083, 105, 8084, 109, 8085, 113, 8086, 117, // NOLINT
- 8087, 121, 8088, 93, 8089, 97, 8090, 101, 8091, 105, 8092, 109, 8093, 113, 8094, 117, // NOLINT
- 8095, 121, 8096, 125, 8097, 129, 8098, 133, 8099, 137, 8100, 141, 8101, 145, 8102, 149, // NOLINT
- 8103, 153, 8104, 125, 8105, 129, 8106, 133, 8107, 137, 8108, 141, 8109, 145, 8110, 149, // NOLINT
- 8111, 153, 1073749936, 32, 8113, 32, 8114, 157, 8115, 161, 8116, 165, 8118, 169, 8119, 173, // NOLINT
- 8124, 161, 8126, -28820, 8130, 177, 8131, 181, 8132, 185, 8134, 189, 8135, 193, 8140, 181, // NOLINT
- 1073749968, 32, 8145, 32, 8146, 197, 8147, 13, 8150, 201, 8151, 205, 1073749984, 32, 8161, 32, // NOLINT
- 8162, 209, 8163, 17, 8164, 213, 8165, 28, 8166, 217, 8167, 221, 8178, 225, 8179, 229, // NOLINT
- 8180, 233, 8182, 237, 8183, 241, 8188, 229 }; // NOLINT
-static const uint16_t kToUppercaseMultiStrings0Size = 62; // NOLINT
-static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable1Size = 73; // NOLINT
-static const int32_t kToUppercaseTable1[146] = {
- 334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192, // NOLINT
- 3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4, 3190, -4, // NOLINT
- 3201, -4, 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, // NOLINT
- 3217, -4, 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, // NOLINT
- 3233, -4, 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, // NOLINT
- 3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, // NOLINT
- 3265, -4, 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, // NOLINT
- 3281, -4, 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, // NOLINT
- 3297, -4, 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056, 3365, -29056, 3367, -29056, // NOLINT
- 3373, -29056 }; // NOLINT
-static const uint16_t kToUppercaseMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings5[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable5Size = 88; // NOLINT
-static const int32_t kToUppercaseTable5[176] = {
- 1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4, 1611, -4, 1613, -4, 1615, -4, // NOLINT
- 1617, -4, 1619, -4, 1621, -4, 1623, -4, 1625, -4, 1627, -4, 1629, -4, 1631, -4, // NOLINT
- 1633, -4, 1635, -4, 1637, -4, 1639, -4, 1641, -4, 1643, -4, 1645, -4, 1665, -4, // NOLINT
- 1667, -4, 1669, -4, 1671, -4, 1673, -4, 1675, -4, 1677, -4, 1679, -4, 1681, -4, // NOLINT
- 1683, -4, 1685, -4, 1687, -4, 1827, -4, 1829, -4, 1831, -4, 1833, -4, 1835, -4, // NOLINT
- 1837, -4, 1839, -4, 1843, -4, 1845, -4, 1847, -4, 1849, -4, 1851, -4, 1853, -4, // NOLINT
- 1855, -4, 1857, -4, 1859, -4, 1861, -4, 1863, -4, 1865, -4, 1867, -4, 1869, -4, // NOLINT
- 1871, -4, 1873, -4, 1875, -4, 1877, -4, 1879, -4, 1881, -4, 1883, -4, 1885, -4, // NOLINT
- 1887, -4, 1889, -4, 1891, -4, 1893, -4, 1895, -4, 1897, -4, 1899, -4, 1901, -4, // NOLINT
- 1903, -4, 1914, -4, 1916, -4, 1919, -4, 1921, -4, 1923, -4, 1925, -4, 1927, -4, // NOLINT
- 1932, -4, 1937, -4, 1939, -4, 1953, -4, 1955, -4, 1957, -4, 1959, -4, 1961, -4 }; // NOLINT
-static const uint16_t kToUppercaseMultiStrings5Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings7[12] = { // NOLINT
- {{70, 70, kSentinel}}, {{70, 73, kSentinel}}, {{70, 76, kSentinel}}, {{70, 70, 73}}, // NOLINT
- {{70, 70, 76}}, {{83, 84, kSentinel}}, {{1348, 1350, kSentinel}}, {{1348, 1333, kSentinel}}, // NOLINT
- {{1348, 1339, kSentinel}}, {{1358, 1350, kSentinel}}, {{1348, 1341, kSentinel}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable7Size = 14; // NOLINT
-static const int32_t kToUppercaseTable7[28] = {
- 6912, 1, 6913, 5, 6914, 9, 6915, 13, 6916, 17, 6917, 21, 6918, 21, 6931, 25, // NOLINT
- 6932, 29, 6933, 33, 6934, 37, 6935, 41, 1073749825, -128, 8026, -128 }; // NOLINT
-static const uint16_t kToUppercaseMultiStrings7Size = 12; // NOLINT
-int ToUppercase::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<true>(kToUppercaseTable0,
- kToUppercaseTable0Size,
- kToUppercaseMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kToUppercaseTable1,
- kToUppercaseTable1Size,
- kToUppercaseMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 5: return LookupMapping<true>(kToUppercaseTable5,
- kToUppercaseTable5Size,
- kToUppercaseMultiStrings5,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kToUppercaseTable7,
- kToUppercaseTable7Size,
- kToUppercaseMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings0[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable0Size = 488; // NOLINT
-static const int32_t kEcma262CanonicalizeTable0[976] = {
- 1073741921, -128, 122, -128, 181, 2972, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128, 255, 484, // NOLINT
- 257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4, 271, -4, // NOLINT
- 273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4, 287, -4, // NOLINT
- 289, -4, 291, -4, 293, -4, 295, -4, 297, -4, 299, -4, 301, -4, 303, -4, // NOLINT
- 307, -4, 309, -4, 311, -4, 314, -4, 316, -4, 318, -4, 320, -4, 322, -4, // NOLINT
- 324, -4, 326, -4, 328, -4, 331, -4, 333, -4, 335, -4, 337, -4, 339, -4, // NOLINT
- 341, -4, 343, -4, 345, -4, 347, -4, 349, -4, 351, -4, 353, -4, 355, -4, // NOLINT
- 357, -4, 359, -4, 361, -4, 363, -4, 365, -4, 367, -4, 369, -4, 371, -4, // NOLINT
- 373, -4, 375, -4, 378, -4, 380, -4, 382, -4, 384, 780, 387, -4, 389, -4, // NOLINT
- 392, -4, 396, -4, 402, -4, 405, 388, 409, -4, 410, 652, 414, 520, 417, -4, // NOLINT
- 419, -4, 421, -4, 424, -4, 429, -4, 432, -4, 436, -4, 438, -4, 441, -4, // NOLINT
- 445, -4, 447, 224, 453, -4, 454, -8, 456, -4, 457, -8, 459, -4, 460, -8, // NOLINT
- 462, -4, 464, -4, 466, -4, 468, -4, 470, -4, 472, -4, 474, -4, 476, -4, // NOLINT
- 477, -316, 479, -4, 481, -4, 483, -4, 485, -4, 487, -4, 489, -4, 491, -4, // NOLINT
- 493, -4, 495, -4, 498, -4, 499, -8, 501, -4, 505, -4, 507, -4, 509, -4, // NOLINT
- 511, -4, 513, -4, 515, -4, 517, -4, 519, -4, 521, -4, 523, -4, 525, -4, // NOLINT
- 527, -4, 529, -4, 531, -4, 533, -4, 535, -4, 537, -4, 539, -4, 541, -4, // NOLINT
- 543, -4, 547, -4, 549, -4, 551, -4, 553, -4, 555, -4, 557, -4, 559, -4, // NOLINT
- 561, -4, 563, -4, 572, -4, 1073742399, 43260, 576, 43260, 578, -4, 583, -4, 585, -4, // NOLINT
- 587, -4, 589, -4, 591, -4, 592, 43132, 593, 43120, 594, 43128, 595, -840, 596, -824, // NOLINT
- 1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820, 611, -828, 613, 169120, 614, 169232, // NOLINT
- 616, -836, 617, -844, 619, 42972, 623, -844, 625, 42996, 626, -852, 629, -856, 637, 42908, // NOLINT
- 640, -872, 643, -872, 648, -872, 649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, // NOLINT
- 837, 336, 881, -4, 883, -4, 887, -4, 1073742715, 520, 893, 520, 940, -152, 1073742765, -148, // NOLINT
- 943, -148, 1073742769, -128, 961, -128, 962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, // NOLINT
- 974, -252, 976, -248, 977, -228, 981, -188, 982, -216, 983, -32, 985, -4, 987, -4, // NOLINT
- 989, -4, 991, -4, 993, -4, 995, -4, 997, -4, 999, -4, 1001, -4, 1003, -4, // NOLINT
- 1005, -4, 1007, -4, 1008, -344, 1009, -320, 1010, 28, 1013, -384, 1016, -4, 1019, -4, // NOLINT
- 1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320, 1121, -4, 1123, -4, 1125, -4, 1127, -4, // NOLINT
- 1129, -4, 1131, -4, 1133, -4, 1135, -4, 1137, -4, 1139, -4, 1141, -4, 1143, -4, // NOLINT
- 1145, -4, 1147, -4, 1149, -4, 1151, -4, 1153, -4, 1163, -4, 1165, -4, 1167, -4, // NOLINT
- 1169, -4, 1171, -4, 1173, -4, 1175, -4, 1177, -4, 1179, -4, 1181, -4, 1183, -4, // NOLINT
- 1185, -4, 1187, -4, 1189, -4, 1191, -4, 1193, -4, 1195, -4, 1197, -4, 1199, -4, // NOLINT
- 1201, -4, 1203, -4, 1205, -4, 1207, -4, 1209, -4, 1211, -4, 1213, -4, 1215, -4, // NOLINT
- 1218, -4, 1220, -4, 1222, -4, 1224, -4, 1226, -4, 1228, -4, 1230, -4, 1231, -60, // NOLINT
- 1233, -4, 1235, -4, 1237, -4, 1239, -4, 1241, -4, 1243, -4, 1245, -4, 1247, -4, // NOLINT
- 1249, -4, 1251, -4, 1253, -4, 1255, -4, 1257, -4, 1259, -4, 1261, -4, 1263, -4, // NOLINT
- 1265, -4, 1267, -4, 1269, -4, 1271, -4, 1273, -4, 1275, -4, 1277, -4, 1279, -4, // NOLINT
- 1281, -4, 1283, -4, 1285, -4, 1287, -4, 1289, -4, 1291, -4, 1293, -4, 1295, -4, // NOLINT
- 1297, -4, 1299, -4, 1301, -4, 1303, -4, 1305, -4, 1307, -4, 1309, -4, 1311, -4, // NOLINT
- 1313, -4, 1315, -4, 1317, -4, 1319, -4, 1073743201, -192, 1414, -192, 7545, 141328, 7549, 15256, // NOLINT
- 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, 7695, -4, // NOLINT
- 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, 7711, -4, // NOLINT
- 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, 7727, -4, // NOLINT
- 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, 7743, -4, // NOLINT
- 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, 7759, -4, // NOLINT
- 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, 7775, -4, // NOLINT
- 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, 7791, -4, // NOLINT
- 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, 7807, -4, // NOLINT
- 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, 7823, -4, // NOLINT
- 7825, -4, 7827, -4, 7829, -4, 7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4, // NOLINT
- 7849, -4, 7851, -4, 7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4, // NOLINT
- 7865, -4, 7867, -4, 7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4, // NOLINT
- 7881, -4, 7883, -4, 7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4, // NOLINT
- 7897, -4, 7899, -4, 7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4, // NOLINT
- 7913, -4, 7915, -4, 7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4, // NOLINT
- 7929, -4, 7931, -4, 7933, -4, 7935, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32, // NOLINT
- 1073749792, 32, 7975, 32, 1073749808, 32, 7991, 32, 1073749824, 32, 8005, 32, 8017, 32, 8019, 32, // NOLINT
- 8021, 32, 8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344, // NOLINT
- 1073749878, 400, 8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504, // NOLINT
- 1073749936, 32, 8113, 32, 8126, -28820, 1073749968, 32, 8145, 32, 1073749984, 32, 8161, 32, 8165, 28 }; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings0Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable1Size = 73; // NOLINT
-static const int32_t kEcma262CanonicalizeTable1[146] = {
- 334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192, // NOLINT
- 3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4, 3190, -4, // NOLINT
- 3201, -4, 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, // NOLINT
- 3217, -4, 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, // NOLINT
- 3233, -4, 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, // NOLINT
- 3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, // NOLINT
- 3265, -4, 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, // NOLINT
- 3281, -4, 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, // NOLINT
- 3297, -4, 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056, 3365, -29056, 3367, -29056, // NOLINT
- 3373, -29056 }; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings5[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable5Size = 88; // NOLINT
-static const int32_t kEcma262CanonicalizeTable5[176] = {
- 1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4, 1611, -4, 1613, -4, 1615, -4, // NOLINT
- 1617, -4, 1619, -4, 1621, -4, 1623, -4, 1625, -4, 1627, -4, 1629, -4, 1631, -4, // NOLINT
- 1633, -4, 1635, -4, 1637, -4, 1639, -4, 1641, -4, 1643, -4, 1645, -4, 1665, -4, // NOLINT
- 1667, -4, 1669, -4, 1671, -4, 1673, -4, 1675, -4, 1677, -4, 1679, -4, 1681, -4, // NOLINT
- 1683, -4, 1685, -4, 1687, -4, 1827, -4, 1829, -4, 1831, -4, 1833, -4, 1835, -4, // NOLINT
- 1837, -4, 1839, -4, 1843, -4, 1845, -4, 1847, -4, 1849, -4, 1851, -4, 1853, -4, // NOLINT
- 1855, -4, 1857, -4, 1859, -4, 1861, -4, 1863, -4, 1865, -4, 1867, -4, 1869, -4, // NOLINT
- 1871, -4, 1873, -4, 1875, -4, 1877, -4, 1879, -4, 1881, -4, 1883, -4, 1885, -4, // NOLINT
- 1887, -4, 1889, -4, 1891, -4, 1893, -4, 1895, -4, 1897, -4, 1899, -4, 1901, -4, // NOLINT
- 1903, -4, 1914, -4, 1916, -4, 1919, -4, 1921, -4, 1923, -4, 1925, -4, 1927, -4, // NOLINT
- 1932, -4, 1937, -4, 1939, -4, 1953, -4, 1955, -4, 1957, -4, 1959, -4, 1961, -4 }; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings5Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings7[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable7Size = 2; // NOLINT
-static const int32_t kEcma262CanonicalizeTable7[4] = {
- 1073749825, -128, 8026, -128 }; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings7Size = 1; // NOLINT
-int Ecma262Canonicalize::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<true>(kEcma262CanonicalizeTable0,
- kEcma262CanonicalizeTable0Size,
- kEcma262CanonicalizeMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kEcma262CanonicalizeTable1,
- kEcma262CanonicalizeTable1Size,
- kEcma262CanonicalizeMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 5: return LookupMapping<true>(kEcma262CanonicalizeTable5,
- kEcma262CanonicalizeTable5Size,
- kEcma262CanonicalizeMultiStrings5,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kEcma262CanonicalizeTable7,
- kEcma262CanonicalizeTable7Size,
- kEcma262CanonicalizeMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-static const MultiCharacterSpecialCase<4> kEcma262UnCanonicalizeMultiStrings0[497] = { // NOLINT
- {{65, 97, kSentinel}}, {{90, 122, kSentinel}}, {{181, 924, 956, kSentinel}}, {{192, 224, kSentinel}}, // NOLINT
- {{214, 246, kSentinel}}, {{216, 248, kSentinel}}, {{222, 254, kSentinel}}, {{255, 376, kSentinel}}, // NOLINT
- {{256, 257, kSentinel}}, {{258, 259, kSentinel}}, {{260, 261, kSentinel}}, {{262, 263, kSentinel}}, // NOLINT
- {{264, 265, kSentinel}}, {{266, 267, kSentinel}}, {{268, 269, kSentinel}}, {{270, 271, kSentinel}}, // NOLINT
- {{272, 273, kSentinel}}, {{274, 275, kSentinel}}, {{276, 277, kSentinel}}, {{278, 279, kSentinel}}, // NOLINT
- {{280, 281, kSentinel}}, {{282, 283, kSentinel}}, {{284, 285, kSentinel}}, {{286, 287, kSentinel}}, // NOLINT
- {{288, 289, kSentinel}}, {{290, 291, kSentinel}}, {{292, 293, kSentinel}}, {{294, 295, kSentinel}}, // NOLINT
- {{296, 297, kSentinel}}, {{298, 299, kSentinel}}, {{300, 301, kSentinel}}, {{302, 303, kSentinel}}, // NOLINT
- {{306, 307, kSentinel}}, {{308, 309, kSentinel}}, {{310, 311, kSentinel}}, {{313, 314, kSentinel}}, // NOLINT
- {{315, 316, kSentinel}}, {{317, 318, kSentinel}}, {{319, 320, kSentinel}}, {{321, 322, kSentinel}}, // NOLINT
- {{323, 324, kSentinel}}, {{325, 326, kSentinel}}, {{327, 328, kSentinel}}, {{330, 331, kSentinel}}, // NOLINT
- {{332, 333, kSentinel}}, {{334, 335, kSentinel}}, {{336, 337, kSentinel}}, {{338, 339, kSentinel}}, // NOLINT
- {{340, 341, kSentinel}}, {{342, 343, kSentinel}}, {{344, 345, kSentinel}}, {{346, 347, kSentinel}}, // NOLINT
- {{348, 349, kSentinel}}, {{350, 351, kSentinel}}, {{352, 353, kSentinel}}, {{354, 355, kSentinel}}, // NOLINT
- {{356, 357, kSentinel}}, {{358, 359, kSentinel}}, {{360, 361, kSentinel}}, {{362, 363, kSentinel}}, // NOLINT
- {{364, 365, kSentinel}}, {{366, 367, kSentinel}}, {{368, 369, kSentinel}}, {{370, 371, kSentinel}}, // NOLINT
- {{372, 373, kSentinel}}, {{374, 375, kSentinel}}, {{377, 378, kSentinel}}, {{379, 380, kSentinel}}, // NOLINT
- {{381, 382, kSentinel}}, {{384, 579, kSentinel}}, {{385, 595, kSentinel}}, {{386, 387, kSentinel}}, // NOLINT
- {{388, 389, kSentinel}}, {{390, 596, kSentinel}}, {{391, 392, kSentinel}}, {{393, 598, kSentinel}}, // NOLINT
- {{394, 599, kSentinel}}, {{395, 396, kSentinel}}, {{398, 477, kSentinel}}, {{399, 601, kSentinel}}, // NOLINT
- {{400, 603, kSentinel}}, {{401, 402, kSentinel}}, {{403, 608, kSentinel}}, {{404, 611, kSentinel}}, // NOLINT
- {{405, 502, kSentinel}}, {{406, 617, kSentinel}}, {{407, 616, kSentinel}}, {{408, 409, kSentinel}}, // NOLINT
- {{410, 573, kSentinel}}, {{412, 623, kSentinel}}, {{413, 626, kSentinel}}, {{414, 544, kSentinel}}, // NOLINT
- {{415, 629, kSentinel}}, {{416, 417, kSentinel}}, {{418, 419, kSentinel}}, {{420, 421, kSentinel}}, // NOLINT
- {{422, 640, kSentinel}}, {{423, 424, kSentinel}}, {{425, 643, kSentinel}}, {{428, 429, kSentinel}}, // NOLINT
- {{430, 648, kSentinel}}, {{431, 432, kSentinel}}, {{433, 650, kSentinel}}, {{434, 651, kSentinel}}, // NOLINT
- {{435, 436, kSentinel}}, {{437, 438, kSentinel}}, {{439, 658, kSentinel}}, {{440, 441, kSentinel}}, // NOLINT
- {{444, 445, kSentinel}}, {{447, 503, kSentinel}}, {{452, 453, 454, kSentinel}}, {{455, 456, 457, kSentinel}}, // NOLINT
- {{458, 459, 460, kSentinel}}, {{461, 462, kSentinel}}, {{463, 464, kSentinel}}, {{465, 466, kSentinel}}, // NOLINT
- {{467, 468, kSentinel}}, {{469, 470, kSentinel}}, {{471, 472, kSentinel}}, {{473, 474, kSentinel}}, // NOLINT
- {{475, 476, kSentinel}}, {{478, 479, kSentinel}}, {{480, 481, kSentinel}}, {{482, 483, kSentinel}}, // NOLINT
- {{484, 485, kSentinel}}, {{486, 487, kSentinel}}, {{488, 489, kSentinel}}, {{490, 491, kSentinel}}, // NOLINT
- {{492, 493, kSentinel}}, {{494, 495, kSentinel}}, {{497, 498, 499, kSentinel}}, {{500, 501, kSentinel}}, // NOLINT
- {{504, 505, kSentinel}}, {{506, 507, kSentinel}}, {{508, 509, kSentinel}}, {{510, 511, kSentinel}}, // NOLINT
- {{512, 513, kSentinel}}, {{514, 515, kSentinel}}, {{516, 517, kSentinel}}, {{518, 519, kSentinel}}, // NOLINT
- {{520, 521, kSentinel}}, {{522, 523, kSentinel}}, {{524, 525, kSentinel}}, {{526, 527, kSentinel}}, // NOLINT
- {{528, 529, kSentinel}}, {{530, 531, kSentinel}}, {{532, 533, kSentinel}}, {{534, 535, kSentinel}}, // NOLINT
- {{536, 537, kSentinel}}, {{538, 539, kSentinel}}, {{540, 541, kSentinel}}, {{542, 543, kSentinel}}, // NOLINT
- {{546, 547, kSentinel}}, {{548, 549, kSentinel}}, {{550, 551, kSentinel}}, {{552, 553, kSentinel}}, // NOLINT
- {{554, 555, kSentinel}}, {{556, 557, kSentinel}}, {{558, 559, kSentinel}}, {{560, 561, kSentinel}}, // NOLINT
- {{562, 563, kSentinel}}, {{570, 11365, kSentinel}}, {{571, 572, kSentinel}}, {{574, 11366, kSentinel}}, // NOLINT
- {{575, 11390, kSentinel}}, {{576, 11391, kSentinel}}, {{577, 578, kSentinel}}, {{580, 649, kSentinel}}, // NOLINT
- {{581, 652, kSentinel}}, {{582, 583, kSentinel}}, {{584, 585, kSentinel}}, {{586, 587, kSentinel}}, // NOLINT
- {{588, 589, kSentinel}}, {{590, 591, kSentinel}}, {{592, 11375, kSentinel}}, {{593, 11373, kSentinel}}, // NOLINT
- {{594, 11376, kSentinel}}, {{613, 42893, kSentinel}}, {{614, 42922, kSentinel}}, {{619, 11362, kSentinel}}, // NOLINT
- {{625, 11374, kSentinel}}, {{637, 11364, kSentinel}}, {{837, 921, 953, 8126}}, {{880, 881, kSentinel}}, // NOLINT
- {{882, 883, kSentinel}}, {{886, 887, kSentinel}}, {{891, 1021, kSentinel}}, {{893, 1023, kSentinel}}, // NOLINT
- {{902, 940, kSentinel}}, {{904, 941, kSentinel}}, {{906, 943, kSentinel}}, {{908, 972, kSentinel}}, // NOLINT
- {{910, 973, kSentinel}}, {{911, 974, kSentinel}}, {{913, 945, kSentinel}}, {{914, 946, 976, kSentinel}}, // NOLINT
- {{915, 947, kSentinel}}, {{916, 948, kSentinel}}, {{917, 949, 1013, kSentinel}}, {{918, 950, kSentinel}}, // NOLINT
- {{919, 951, kSentinel}}, {{920, 952, 977, kSentinel}}, {{922, 954, 1008, kSentinel}}, {{923, 955, kSentinel}}, // NOLINT
- {{925, 957, kSentinel}}, {{927, 959, kSentinel}}, {{928, 960, 982, kSentinel}}, {{929, 961, 1009, kSentinel}}, // NOLINT
- {{931, 962, 963, kSentinel}}, {{932, 964, kSentinel}}, {{933, 965, kSentinel}}, {{934, 966, 981, kSentinel}}, // NOLINT
- {{935, 967, kSentinel}}, {{939, 971, kSentinel}}, {{975, 983, kSentinel}}, {{984, 985, kSentinel}}, // NOLINT
- {{986, 987, kSentinel}}, {{988, 989, kSentinel}}, {{990, 991, kSentinel}}, {{992, 993, kSentinel}}, // NOLINT
- {{994, 995, kSentinel}}, {{996, 997, kSentinel}}, {{998, 999, kSentinel}}, {{1000, 1001, kSentinel}}, // NOLINT
- {{1002, 1003, kSentinel}}, {{1004, 1005, kSentinel}}, {{1006, 1007, kSentinel}}, {{1010, 1017, kSentinel}}, // NOLINT
- {{1015, 1016, kSentinel}}, {{1018, 1019, kSentinel}}, {{1024, 1104, kSentinel}}, {{1039, 1119, kSentinel}}, // NOLINT
- {{1040, 1072, kSentinel}}, {{1071, 1103, kSentinel}}, {{1120, 1121, kSentinel}}, {{1122, 1123, kSentinel}}, // NOLINT
- {{1124, 1125, kSentinel}}, {{1126, 1127, kSentinel}}, {{1128, 1129, kSentinel}}, {{1130, 1131, kSentinel}}, // NOLINT
- {{1132, 1133, kSentinel}}, {{1134, 1135, kSentinel}}, {{1136, 1137, kSentinel}}, {{1138, 1139, kSentinel}}, // NOLINT
- {{1140, 1141, kSentinel}}, {{1142, 1143, kSentinel}}, {{1144, 1145, kSentinel}}, {{1146, 1147, kSentinel}}, // NOLINT
- {{1148, 1149, kSentinel}}, {{1150, 1151, kSentinel}}, {{1152, 1153, kSentinel}}, {{1162, 1163, kSentinel}}, // NOLINT
- {{1164, 1165, kSentinel}}, {{1166, 1167, kSentinel}}, {{1168, 1169, kSentinel}}, {{1170, 1171, kSentinel}}, // NOLINT
- {{1172, 1173, kSentinel}}, {{1174, 1175, kSentinel}}, {{1176, 1177, kSentinel}}, {{1178, 1179, kSentinel}}, // NOLINT
- {{1180, 1181, kSentinel}}, {{1182, 1183, kSentinel}}, {{1184, 1185, kSentinel}}, {{1186, 1187, kSentinel}}, // NOLINT
- {{1188, 1189, kSentinel}}, {{1190, 1191, kSentinel}}, {{1192, 1193, kSentinel}}, {{1194, 1195, kSentinel}}, // NOLINT
- {{1196, 1197, kSentinel}}, {{1198, 1199, kSentinel}}, {{1200, 1201, kSentinel}}, {{1202, 1203, kSentinel}}, // NOLINT
- {{1204, 1205, kSentinel}}, {{1206, 1207, kSentinel}}, {{1208, 1209, kSentinel}}, {{1210, 1211, kSentinel}}, // NOLINT
- {{1212, 1213, kSentinel}}, {{1214, 1215, kSentinel}}, {{1216, 1231, kSentinel}}, {{1217, 1218, kSentinel}}, // NOLINT
- {{1219, 1220, kSentinel}}, {{1221, 1222, kSentinel}}, {{1223, 1224, kSentinel}}, {{1225, 1226, kSentinel}}, // NOLINT
- {{1227, 1228, kSentinel}}, {{1229, 1230, kSentinel}}, {{1232, 1233, kSentinel}}, {{1234, 1235, kSentinel}}, // NOLINT
- {{1236, 1237, kSentinel}}, {{1238, 1239, kSentinel}}, {{1240, 1241, kSentinel}}, {{1242, 1243, kSentinel}}, // NOLINT
- {{1244, 1245, kSentinel}}, {{1246, 1247, kSentinel}}, {{1248, 1249, kSentinel}}, {{1250, 1251, kSentinel}}, // NOLINT
- {{1252, 1253, kSentinel}}, {{1254, 1255, kSentinel}}, {{1256, 1257, kSentinel}}, {{1258, 1259, kSentinel}}, // NOLINT
- {{1260, 1261, kSentinel}}, {{1262, 1263, kSentinel}}, {{1264, 1265, kSentinel}}, {{1266, 1267, kSentinel}}, // NOLINT
- {{1268, 1269, kSentinel}}, {{1270, 1271, kSentinel}}, {{1272, 1273, kSentinel}}, {{1274, 1275, kSentinel}}, // NOLINT
- {{1276, 1277, kSentinel}}, {{1278, 1279, kSentinel}}, {{1280, 1281, kSentinel}}, {{1282, 1283, kSentinel}}, // NOLINT
- {{1284, 1285, kSentinel}}, {{1286, 1287, kSentinel}}, {{1288, 1289, kSentinel}}, {{1290, 1291, kSentinel}}, // NOLINT
- {{1292, 1293, kSentinel}}, {{1294, 1295, kSentinel}}, {{1296, 1297, kSentinel}}, {{1298, 1299, kSentinel}}, // NOLINT
- {{1300, 1301, kSentinel}}, {{1302, 1303, kSentinel}}, {{1304, 1305, kSentinel}}, {{1306, 1307, kSentinel}}, // NOLINT
- {{1308, 1309, kSentinel}}, {{1310, 1311, kSentinel}}, {{1312, 1313, kSentinel}}, {{1314, 1315, kSentinel}}, // NOLINT
- {{1316, 1317, kSentinel}}, {{1318, 1319, kSentinel}}, {{1329, 1377, kSentinel}}, {{1366, 1414, kSentinel}}, // NOLINT
- {{4256, 11520, kSentinel}}, {{4293, 11557, kSentinel}}, {{4295, 11559, kSentinel}}, {{4301, 11565, kSentinel}}, // NOLINT
- {{7545, 42877, kSentinel}}, {{7549, 11363, kSentinel}}, {{7680, 7681, kSentinel}}, {{7682, 7683, kSentinel}}, // NOLINT
- {{7684, 7685, kSentinel}}, {{7686, 7687, kSentinel}}, {{7688, 7689, kSentinel}}, {{7690, 7691, kSentinel}}, // NOLINT
- {{7692, 7693, kSentinel}}, {{7694, 7695, kSentinel}}, {{7696, 7697, kSentinel}}, {{7698, 7699, kSentinel}}, // NOLINT
- {{7700, 7701, kSentinel}}, {{7702, 7703, kSentinel}}, {{7704, 7705, kSentinel}}, {{7706, 7707, kSentinel}}, // NOLINT
- {{7708, 7709, kSentinel}}, {{7710, 7711, kSentinel}}, {{7712, 7713, kSentinel}}, {{7714, 7715, kSentinel}}, // NOLINT
- {{7716, 7717, kSentinel}}, {{7718, 7719, kSentinel}}, {{7720, 7721, kSentinel}}, {{7722, 7723, kSentinel}}, // NOLINT
- {{7724, 7725, kSentinel}}, {{7726, 7727, kSentinel}}, {{7728, 7729, kSentinel}}, {{7730, 7731, kSentinel}}, // NOLINT
- {{7732, 7733, kSentinel}}, {{7734, 7735, kSentinel}}, {{7736, 7737, kSentinel}}, {{7738, 7739, kSentinel}}, // NOLINT
- {{7740, 7741, kSentinel}}, {{7742, 7743, kSentinel}}, {{7744, 7745, kSentinel}}, {{7746, 7747, kSentinel}}, // NOLINT
- {{7748, 7749, kSentinel}}, {{7750, 7751, kSentinel}}, {{7752, 7753, kSentinel}}, {{7754, 7755, kSentinel}}, // NOLINT
- {{7756, 7757, kSentinel}}, {{7758, 7759, kSentinel}}, {{7760, 7761, kSentinel}}, {{7762, 7763, kSentinel}}, // NOLINT
- {{7764, 7765, kSentinel}}, {{7766, 7767, kSentinel}}, {{7768, 7769, kSentinel}}, {{7770, 7771, kSentinel}}, // NOLINT
- {{7772, 7773, kSentinel}}, {{7774, 7775, kSentinel}}, {{7776, 7777, 7835, kSentinel}}, {{7778, 7779, kSentinel}}, // NOLINT
- {{7780, 7781, kSentinel}}, {{7782, 7783, kSentinel}}, {{7784, 7785, kSentinel}}, {{7786, 7787, kSentinel}}, // NOLINT
- {{7788, 7789, kSentinel}}, {{7790, 7791, kSentinel}}, {{7792, 7793, kSentinel}}, {{7794, 7795, kSentinel}}, // NOLINT
- {{7796, 7797, kSentinel}}, {{7798, 7799, kSentinel}}, {{7800, 7801, kSentinel}}, {{7802, 7803, kSentinel}}, // NOLINT
- {{7804, 7805, kSentinel}}, {{7806, 7807, kSentinel}}, {{7808, 7809, kSentinel}}, {{7810, 7811, kSentinel}}, // NOLINT
- {{7812, 7813, kSentinel}}, {{7814, 7815, kSentinel}}, {{7816, 7817, kSentinel}}, {{7818, 7819, kSentinel}}, // NOLINT
- {{7820, 7821, kSentinel}}, {{7822, 7823, kSentinel}}, {{7824, 7825, kSentinel}}, {{7826, 7827, kSentinel}}, // NOLINT
- {{7828, 7829, kSentinel}}, {{7840, 7841, kSentinel}}, {{7842, 7843, kSentinel}}, {{7844, 7845, kSentinel}}, // NOLINT
- {{7846, 7847, kSentinel}}, {{7848, 7849, kSentinel}}, {{7850, 7851, kSentinel}}, {{7852, 7853, kSentinel}}, // NOLINT
- {{7854, 7855, kSentinel}}, {{7856, 7857, kSentinel}}, {{7858, 7859, kSentinel}}, {{7860, 7861, kSentinel}}, // NOLINT
- {{7862, 7863, kSentinel}}, {{7864, 7865, kSentinel}}, {{7866, 7867, kSentinel}}, {{7868, 7869, kSentinel}}, // NOLINT
- {{7870, 7871, kSentinel}}, {{7872, 7873, kSentinel}}, {{7874, 7875, kSentinel}}, {{7876, 7877, kSentinel}}, // NOLINT
- {{7878, 7879, kSentinel}}, {{7880, 7881, kSentinel}}, {{7882, 7883, kSentinel}}, {{7884, 7885, kSentinel}}, // NOLINT
- {{7886, 7887, kSentinel}}, {{7888, 7889, kSentinel}}, {{7890, 7891, kSentinel}}, {{7892, 7893, kSentinel}}, // NOLINT
- {{7894, 7895, kSentinel}}, {{7896, 7897, kSentinel}}, {{7898, 7899, kSentinel}}, {{7900, 7901, kSentinel}}, // NOLINT
- {{7902, 7903, kSentinel}}, {{7904, 7905, kSentinel}}, {{7906, 7907, kSentinel}}, {{7908, 7909, kSentinel}}, // NOLINT
- {{7910, 7911, kSentinel}}, {{7912, 7913, kSentinel}}, {{7914, 7915, kSentinel}}, {{7916, 7917, kSentinel}}, // NOLINT
- {{7918, 7919, kSentinel}}, {{7920, 7921, kSentinel}}, {{7922, 7923, kSentinel}}, {{7924, 7925, kSentinel}}, // NOLINT
- {{7926, 7927, kSentinel}}, {{7928, 7929, kSentinel}}, {{7930, 7931, kSentinel}}, {{7932, 7933, kSentinel}}, // NOLINT
- {{7934, 7935, kSentinel}}, {{7936, 7944, kSentinel}}, {{7943, 7951, kSentinel}}, {{7952, 7960, kSentinel}}, // NOLINT
- {{7957, 7965, kSentinel}}, {{7968, 7976, kSentinel}}, {{7975, 7983, kSentinel}}, {{7984, 7992, kSentinel}}, // NOLINT
- {{7991, 7999, kSentinel}}, {{8000, 8008, kSentinel}}, {{8005, 8013, kSentinel}}, {{8017, 8025, kSentinel}}, // NOLINT
- {{8019, 8027, kSentinel}}, {{8021, 8029, kSentinel}}, {{8023, 8031, kSentinel}}, {{8032, 8040, kSentinel}}, // NOLINT
- {{8039, 8047, kSentinel}}, {{8048, 8122, kSentinel}}, {{8049, 8123, kSentinel}}, {{8050, 8136, kSentinel}}, // NOLINT
- {{8053, 8139, kSentinel}}, {{8054, 8154, kSentinel}}, {{8055, 8155, kSentinel}}, {{8056, 8184, kSentinel}}, // NOLINT
- {{8057, 8185, kSentinel}}, {{8058, 8170, kSentinel}}, {{8059, 8171, kSentinel}}, {{8060, 8186, kSentinel}}, // NOLINT
- {{8061, 8187, kSentinel}}, {{8112, 8120, kSentinel}}, {{8113, 8121, kSentinel}}, {{8144, 8152, kSentinel}}, // NOLINT
- {{8145, 8153, kSentinel}}, {{8160, 8168, kSentinel}}, {{8161, 8169, kSentinel}}, {{8165, 8172, kSentinel}}, // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable0Size = 990; // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable0[1980] = {
- 1073741889, 1, 90, 5, 1073741921, 1, 122, 5, 181, 9, 1073742016, 13, 214, 17, 1073742040, 21, // NOLINT
- 222, 25, 1073742048, 13, 246, 17, 1073742072, 21, 254, 25, 255, 29, 256, 33, 257, 33, // NOLINT
- 258, 37, 259, 37, 260, 41, 261, 41, 262, 45, 263, 45, 264, 49, 265, 49, // NOLINT
- 266, 53, 267, 53, 268, 57, 269, 57, 270, 61, 271, 61, 272, 65, 273, 65, // NOLINT
- 274, 69, 275, 69, 276, 73, 277, 73, 278, 77, 279, 77, 280, 81, 281, 81, // NOLINT
- 282, 85, 283, 85, 284, 89, 285, 89, 286, 93, 287, 93, 288, 97, 289, 97, // NOLINT
- 290, 101, 291, 101, 292, 105, 293, 105, 294, 109, 295, 109, 296, 113, 297, 113, // NOLINT
- 298, 117, 299, 117, 300, 121, 301, 121, 302, 125, 303, 125, 306, 129, 307, 129, // NOLINT
- 308, 133, 309, 133, 310, 137, 311, 137, 313, 141, 314, 141, 315, 145, 316, 145, // NOLINT
- 317, 149, 318, 149, 319, 153, 320, 153, 321, 157, 322, 157, 323, 161, 324, 161, // NOLINT
- 325, 165, 326, 165, 327, 169, 328, 169, 330, 173, 331, 173, 332, 177, 333, 177, // NOLINT
- 334, 181, 335, 181, 336, 185, 337, 185, 338, 189, 339, 189, 340, 193, 341, 193, // NOLINT
- 342, 197, 343, 197, 344, 201, 345, 201, 346, 205, 347, 205, 348, 209, 349, 209, // NOLINT
- 350, 213, 351, 213, 352, 217, 353, 217, 354, 221, 355, 221, 356, 225, 357, 225, // NOLINT
- 358, 229, 359, 229, 360, 233, 361, 233, 362, 237, 363, 237, 364, 241, 365, 241, // NOLINT
- 366, 245, 367, 245, 368, 249, 369, 249, 370, 253, 371, 253, 372, 257, 373, 257, // NOLINT
- 374, 261, 375, 261, 376, 29, 377, 265, 378, 265, 379, 269, 380, 269, 381, 273, // NOLINT
- 382, 273, 384, 277, 385, 281, 386, 285, 387, 285, 388, 289, 389, 289, 390, 293, // NOLINT
- 391, 297, 392, 297, 1073742217, 301, 394, 305, 395, 309, 396, 309, 398, 313, 399, 317, // NOLINT
- 400, 321, 401, 325, 402, 325, 403, 329, 404, 333, 405, 337, 406, 341, 407, 345, // NOLINT
- 408, 349, 409, 349, 410, 353, 412, 357, 413, 361, 414, 365, 415, 369, 416, 373, // NOLINT
- 417, 373, 418, 377, 419, 377, 420, 381, 421, 381, 422, 385, 423, 389, 424, 389, // NOLINT
- 425, 393, 428, 397, 429, 397, 430, 401, 431, 405, 432, 405, 1073742257, 409, 434, 413, // NOLINT
- 435, 417, 436, 417, 437, 421, 438, 421, 439, 425, 440, 429, 441, 429, 444, 433, // NOLINT
- 445, 433, 447, 437, 452, 441, 453, 441, 454, 441, 455, 445, 456, 445, 457, 445, // NOLINT
- 458, 449, 459, 449, 460, 449, 461, 453, 462, 453, 463, 457, 464, 457, 465, 461, // NOLINT
- 466, 461, 467, 465, 468, 465, 469, 469, 470, 469, 471, 473, 472, 473, 473, 477, // NOLINT
- 474, 477, 475, 481, 476, 481, 477, 313, 478, 485, 479, 485, 480, 489, 481, 489, // NOLINT
- 482, 493, 483, 493, 484, 497, 485, 497, 486, 501, 487, 501, 488, 505, 489, 505, // NOLINT
- 490, 509, 491, 509, 492, 513, 493, 513, 494, 517, 495, 517, 497, 521, 498, 521, // NOLINT
- 499, 521, 500, 525, 501, 525, 502, 337, 503, 437, 504, 529, 505, 529, 506, 533, // NOLINT
- 507, 533, 508, 537, 509, 537, 510, 541, 511, 541, 512, 545, 513, 545, 514, 549, // NOLINT
- 515, 549, 516, 553, 517, 553, 518, 557, 519, 557, 520, 561, 521, 561, 522, 565, // NOLINT
- 523, 565, 524, 569, 525, 569, 526, 573, 527, 573, 528, 577, 529, 577, 530, 581, // NOLINT
- 531, 581, 532, 585, 533, 585, 534, 589, 535, 589, 536, 593, 537, 593, 538, 597, // NOLINT
- 539, 597, 540, 601, 541, 601, 542, 605, 543, 605, 544, 365, 546, 609, 547, 609, // NOLINT
- 548, 613, 549, 613, 550, 617, 551, 617, 552, 621, 553, 621, 554, 625, 555, 625, // NOLINT
- 556, 629, 557, 629, 558, 633, 559, 633, 560, 637, 561, 637, 562, 641, 563, 641, // NOLINT
- 570, 645, 571, 649, 572, 649, 573, 353, 574, 653, 1073742399, 657, 576, 661, 577, 665, // NOLINT
- 578, 665, 579, 277, 580, 669, 581, 673, 582, 677, 583, 677, 584, 681, 585, 681, // NOLINT
- 586, 685, 587, 685, 588, 689, 589, 689, 590, 693, 591, 693, 592, 697, 593, 701, // NOLINT
- 594, 705, 595, 281, 596, 293, 1073742422, 301, 599, 305, 601, 317, 603, 321, 608, 329, // NOLINT
- 611, 333, 613, 709, 614, 713, 616, 345, 617, 341, 619, 717, 623, 357, 625, 721, // NOLINT
- 626, 361, 629, 369, 637, 725, 640, 385, 643, 393, 648, 401, 649, 669, 1073742474, 409, // NOLINT
- 651, 413, 652, 673, 658, 425, 837, 729, 880, 733, 881, 733, 882, 737, 883, 737, // NOLINT
- 886, 741, 887, 741, 1073742715, 745, 893, 749, 902, 753, 1073742728, 757, 906, 761, 908, 765, // NOLINT
- 1073742734, 769, 911, 773, 913, 777, 914, 781, 1073742739, 785, 916, 789, 917, 793, 1073742742, 797, // NOLINT
- 919, 801, 920, 805, 921, 729, 922, 809, 923, 813, 924, 9, 1073742749, 817, 927, 821, // NOLINT
- 928, 825, 929, 829, 931, 833, 1073742756, 837, 933, 841, 934, 845, 1073742759, 849, 939, 853, // NOLINT
- 940, 753, 1073742765, 757, 943, 761, 945, 777, 946, 781, 1073742771, 785, 948, 789, 949, 793, // NOLINT
- 1073742774, 797, 951, 801, 952, 805, 953, 729, 954, 809, 955, 813, 956, 9, 1073742781, 817, // NOLINT
- 959, 821, 960, 825, 961, 829, 962, 833, 963, 833, 1073742788, 837, 965, 841, 966, 845, // NOLINT
- 1073742791, 849, 971, 853, 972, 765, 1073742797, 769, 974, 773, 975, 857, 976, 781, 977, 805, // NOLINT
- 981, 845, 982, 825, 983, 857, 984, 861, 985, 861, 986, 865, 987, 865, 988, 869, // NOLINT
- 989, 869, 990, 873, 991, 873, 992, 877, 993, 877, 994, 881, 995, 881, 996, 885, // NOLINT
- 997, 885, 998, 889, 999, 889, 1000, 893, 1001, 893, 1002, 897, 1003, 897, 1004, 901, // NOLINT
- 1005, 901, 1006, 905, 1007, 905, 1008, 809, 1009, 829, 1010, 909, 1013, 793, 1015, 913, // NOLINT
- 1016, 913, 1017, 909, 1018, 917, 1019, 917, 1073742845, 745, 1023, 749, 1073742848, 921, 1039, 925, // NOLINT
- 1073742864, 929, 1071, 933, 1073742896, 929, 1103, 933, 1073742928, 921, 1119, 925, 1120, 937, 1121, 937, // NOLINT
- 1122, 941, 1123, 941, 1124, 945, 1125, 945, 1126, 949, 1127, 949, 1128, 953, 1129, 953, // NOLINT
- 1130, 957, 1131, 957, 1132, 961, 1133, 961, 1134, 965, 1135, 965, 1136, 969, 1137, 969, // NOLINT
- 1138, 973, 1139, 973, 1140, 977, 1141, 977, 1142, 981, 1143, 981, 1144, 985, 1145, 985, // NOLINT
- 1146, 989, 1147, 989, 1148, 993, 1149, 993, 1150, 997, 1151, 997, 1152, 1001, 1153, 1001, // NOLINT
- 1162, 1005, 1163, 1005, 1164, 1009, 1165, 1009, 1166, 1013, 1167, 1013, 1168, 1017, 1169, 1017, // NOLINT
- 1170, 1021, 1171, 1021, 1172, 1025, 1173, 1025, 1174, 1029, 1175, 1029, 1176, 1033, 1177, 1033, // NOLINT
- 1178, 1037, 1179, 1037, 1180, 1041, 1181, 1041, 1182, 1045, 1183, 1045, 1184, 1049, 1185, 1049, // NOLINT
- 1186, 1053, 1187, 1053, 1188, 1057, 1189, 1057, 1190, 1061, 1191, 1061, 1192, 1065, 1193, 1065, // NOLINT
- 1194, 1069, 1195, 1069, 1196, 1073, 1197, 1073, 1198, 1077, 1199, 1077, 1200, 1081, 1201, 1081, // NOLINT
- 1202, 1085, 1203, 1085, 1204, 1089, 1205, 1089, 1206, 1093, 1207, 1093, 1208, 1097, 1209, 1097, // NOLINT
- 1210, 1101, 1211, 1101, 1212, 1105, 1213, 1105, 1214, 1109, 1215, 1109, 1216, 1113, 1217, 1117, // NOLINT
- 1218, 1117, 1219, 1121, 1220, 1121, 1221, 1125, 1222, 1125, 1223, 1129, 1224, 1129, 1225, 1133, // NOLINT
- 1226, 1133, 1227, 1137, 1228, 1137, 1229, 1141, 1230, 1141, 1231, 1113, 1232, 1145, 1233, 1145, // NOLINT
- 1234, 1149, 1235, 1149, 1236, 1153, 1237, 1153, 1238, 1157, 1239, 1157, 1240, 1161, 1241, 1161, // NOLINT
- 1242, 1165, 1243, 1165, 1244, 1169, 1245, 1169, 1246, 1173, 1247, 1173, 1248, 1177, 1249, 1177, // NOLINT
- 1250, 1181, 1251, 1181, 1252, 1185, 1253, 1185, 1254, 1189, 1255, 1189, 1256, 1193, 1257, 1193, // NOLINT
- 1258, 1197, 1259, 1197, 1260, 1201, 1261, 1201, 1262, 1205, 1263, 1205, 1264, 1209, 1265, 1209, // NOLINT
- 1266, 1213, 1267, 1213, 1268, 1217, 1269, 1217, 1270, 1221, 1271, 1221, 1272, 1225, 1273, 1225, // NOLINT
- 1274, 1229, 1275, 1229, 1276, 1233, 1277, 1233, 1278, 1237, 1279, 1237, 1280, 1241, 1281, 1241, // NOLINT
- 1282, 1245, 1283, 1245, 1284, 1249, 1285, 1249, 1286, 1253, 1287, 1253, 1288, 1257, 1289, 1257, // NOLINT
- 1290, 1261, 1291, 1261, 1292, 1265, 1293, 1265, 1294, 1269, 1295, 1269, 1296, 1273, 1297, 1273, // NOLINT
- 1298, 1277, 1299, 1277, 1300, 1281, 1301, 1281, 1302, 1285, 1303, 1285, 1304, 1289, 1305, 1289, // NOLINT
- 1306, 1293, 1307, 1293, 1308, 1297, 1309, 1297, 1310, 1301, 1311, 1301, 1312, 1305, 1313, 1305, // NOLINT
- 1314, 1309, 1315, 1309, 1316, 1313, 1317, 1313, 1318, 1317, 1319, 1317, 1073743153, 1321, 1366, 1325, // NOLINT
- 1073743201, 1321, 1414, 1325, 1073746080, 1329, 4293, 1333, 4295, 1337, 4301, 1341, 7545, 1345, 7549, 1349, // NOLINT
- 7680, 1353, 7681, 1353, 7682, 1357, 7683, 1357, 7684, 1361, 7685, 1361, 7686, 1365, 7687, 1365, // NOLINT
- 7688, 1369, 7689, 1369, 7690, 1373, 7691, 1373, 7692, 1377, 7693, 1377, 7694, 1381, 7695, 1381, // NOLINT
- 7696, 1385, 7697, 1385, 7698, 1389, 7699, 1389, 7700, 1393, 7701, 1393, 7702, 1397, 7703, 1397, // NOLINT
- 7704, 1401, 7705, 1401, 7706, 1405, 7707, 1405, 7708, 1409, 7709, 1409, 7710, 1413, 7711, 1413, // NOLINT
- 7712, 1417, 7713, 1417, 7714, 1421, 7715, 1421, 7716, 1425, 7717, 1425, 7718, 1429, 7719, 1429, // NOLINT
- 7720, 1433, 7721, 1433, 7722, 1437, 7723, 1437, 7724, 1441, 7725, 1441, 7726, 1445, 7727, 1445, // NOLINT
- 7728, 1449, 7729, 1449, 7730, 1453, 7731, 1453, 7732, 1457, 7733, 1457, 7734, 1461, 7735, 1461, // NOLINT
- 7736, 1465, 7737, 1465, 7738, 1469, 7739, 1469, 7740, 1473, 7741, 1473, 7742, 1477, 7743, 1477, // NOLINT
- 7744, 1481, 7745, 1481, 7746, 1485, 7747, 1485, 7748, 1489, 7749, 1489, 7750, 1493, 7751, 1493, // NOLINT
- 7752, 1497, 7753, 1497, 7754, 1501, 7755, 1501, 7756, 1505, 7757, 1505, 7758, 1509, 7759, 1509, // NOLINT
- 7760, 1513, 7761, 1513, 7762, 1517, 7763, 1517, 7764, 1521, 7765, 1521, 7766, 1525, 7767, 1525, // NOLINT
- 7768, 1529, 7769, 1529, 7770, 1533, 7771, 1533, 7772, 1537, 7773, 1537, 7774, 1541, 7775, 1541, // NOLINT
- 7776, 1545, 7777, 1545, 7778, 1549, 7779, 1549, 7780, 1553, 7781, 1553, 7782, 1557, 7783, 1557, // NOLINT
- 7784, 1561, 7785, 1561, 7786, 1565, 7787, 1565, 7788, 1569, 7789, 1569, 7790, 1573, 7791, 1573, // NOLINT
- 7792, 1577, 7793, 1577, 7794, 1581, 7795, 1581, 7796, 1585, 7797, 1585, 7798, 1589, 7799, 1589, // NOLINT
- 7800, 1593, 7801, 1593, 7802, 1597, 7803, 1597, 7804, 1601, 7805, 1601, 7806, 1605, 7807, 1605, // NOLINT
- 7808, 1609, 7809, 1609, 7810, 1613, 7811, 1613, 7812, 1617, 7813, 1617, 7814, 1621, 7815, 1621, // NOLINT
- 7816, 1625, 7817, 1625, 7818, 1629, 7819, 1629, 7820, 1633, 7821, 1633, 7822, 1637, 7823, 1637, // NOLINT
- 7824, 1641, 7825, 1641, 7826, 1645, 7827, 1645, 7828, 1649, 7829, 1649, 7835, 1545, 7840, 1653, // NOLINT
- 7841, 1653, 7842, 1657, 7843, 1657, 7844, 1661, 7845, 1661, 7846, 1665, 7847, 1665, 7848, 1669, // NOLINT
- 7849, 1669, 7850, 1673, 7851, 1673, 7852, 1677, 7853, 1677, 7854, 1681, 7855, 1681, 7856, 1685, // NOLINT
- 7857, 1685, 7858, 1689, 7859, 1689, 7860, 1693, 7861, 1693, 7862, 1697, 7863, 1697, 7864, 1701, // NOLINT
- 7865, 1701, 7866, 1705, 7867, 1705, 7868, 1709, 7869, 1709, 7870, 1713, 7871, 1713, 7872, 1717, // NOLINT
- 7873, 1717, 7874, 1721, 7875, 1721, 7876, 1725, 7877, 1725, 7878, 1729, 7879, 1729, 7880, 1733, // NOLINT
- 7881, 1733, 7882, 1737, 7883, 1737, 7884, 1741, 7885, 1741, 7886, 1745, 7887, 1745, 7888, 1749, // NOLINT
- 7889, 1749, 7890, 1753, 7891, 1753, 7892, 1757, 7893, 1757, 7894, 1761, 7895, 1761, 7896, 1765, // NOLINT
- 7897, 1765, 7898, 1769, 7899, 1769, 7900, 1773, 7901, 1773, 7902, 1777, 7903, 1777, 7904, 1781, // NOLINT
- 7905, 1781, 7906, 1785, 7907, 1785, 7908, 1789, 7909, 1789, 7910, 1793, 7911, 1793, 7912, 1797, // NOLINT
- 7913, 1797, 7914, 1801, 7915, 1801, 7916, 1805, 7917, 1805, 7918, 1809, 7919, 1809, 7920, 1813, // NOLINT
- 7921, 1813, 7922, 1817, 7923, 1817, 7924, 1821, 7925, 1821, 7926, 1825, 7927, 1825, 7928, 1829, // NOLINT
- 7929, 1829, 7930, 1833, 7931, 1833, 7932, 1837, 7933, 1837, 7934, 1841, 7935, 1841, 1073749760, 1845, // NOLINT
- 7943, 1849, 1073749768, 1845, 7951, 1849, 1073749776, 1853, 7957, 1857, 1073749784, 1853, 7965, 1857, 1073749792, 1861, // NOLINT
- 7975, 1865, 1073749800, 1861, 7983, 1865, 1073749808, 1869, 7991, 1873, 1073749816, 1869, 7999, 1873, 1073749824, 1877, // NOLINT
- 8005, 1881, 1073749832, 1877, 8013, 1881, 8017, 1885, 8019, 1889, 8021, 1893, 8023, 1897, 8025, 1885, // NOLINT
- 8027, 1889, 8029, 1893, 8031, 1897, 1073749856, 1901, 8039, 1905, 1073749864, 1901, 8047, 1905, 1073749872, 1909, // NOLINT
- 8049, 1913, 1073749874, 1917, 8053, 1921, 1073749878, 1925, 8055, 1929, 1073749880, 1933, 8057, 1937, 1073749882, 1941, // NOLINT
- 8059, 1945, 1073749884, 1949, 8061, 1953, 1073749936, 1957, 8113, 1961, 1073749944, 1957, 8121, 1961, 1073749946, 1909, // NOLINT
- 8123, 1913, 8126, 729, 1073749960, 1917, 8139, 1921, 1073749968, 1965, 8145, 1969, 1073749976, 1965, 8153, 1969, // NOLINT
- 1073749978, 1925, 8155, 1929, 1073749984, 1973, 8161, 1977, 8165, 1981, 1073749992, 1973, 8169, 1977, 1073749994, 1941, // NOLINT
- 8171, 1945, 8172, 1981, 1073750008, 1933, 8185, 1937, 1073750010, 1949, 8187, 1953 }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings0Size = 497; // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings1[83] = { // NOLINT
- {{8498, 8526}}, {{8544, 8560}}, {{8559, 8575}}, {{8579, 8580}}, // NOLINT
- {{9398, 9424}}, {{9423, 9449}}, {{11264, 11312}}, {{11310, 11358}}, // NOLINT
- {{11360, 11361}}, {{619, 11362}}, {{7549, 11363}}, {{637, 11364}}, // NOLINT
- {{570, 11365}}, {{574, 11366}}, {{11367, 11368}}, {{11369, 11370}}, // NOLINT
- {{11371, 11372}}, {{593, 11373}}, {{625, 11374}}, {{592, 11375}}, // NOLINT
- {{594, 11376}}, {{11378, 11379}}, {{11381, 11382}}, {{575, 11390}}, // NOLINT
- {{576, 11391}}, {{11392, 11393}}, {{11394, 11395}}, {{11396, 11397}}, // NOLINT
- {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}}, {{11404, 11405}}, // NOLINT
- {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}}, {{11412, 11413}}, // NOLINT
- {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}}, {{11420, 11421}}, // NOLINT
- {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}}, {{11428, 11429}}, // NOLINT
- {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}}, {{11436, 11437}}, // NOLINT
- {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}}, {{11444, 11445}}, // NOLINT
- {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}}, {{11452, 11453}}, // NOLINT
- {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}}, {{11460, 11461}}, // NOLINT
- {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}}, {{11468, 11469}}, // NOLINT
- {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}}, {{11476, 11477}}, // NOLINT
- {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}}, {{11484, 11485}}, // NOLINT
- {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}}, {{11499, 11500}}, // NOLINT
- {{11501, 11502}}, {{11506, 11507}}, {{4256, 11520}}, {{4293, 11557}}, // NOLINT
- {{4295, 11559}}, {{4301, 11565}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable1Size = 149; // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable1[298] = {
- 306, 1, 334, 1, 1073742176, 5, 367, 9, 1073742192, 5, 383, 9, 387, 13, 388, 13, // NOLINT
- 1073743030, 17, 1231, 21, 1073743056, 17, 1257, 21, 1073744896, 25, 3118, 29, 1073744944, 25, 3166, 29, // NOLINT
- 3168, 33, 3169, 33, 3170, 37, 3171, 41, 3172, 45, 3173, 49, 3174, 53, 3175, 57, // NOLINT
- 3176, 57, 3177, 61, 3178, 61, 3179, 65, 3180, 65, 3181, 69, 3182, 73, 3183, 77, // NOLINT
- 3184, 81, 3186, 85, 3187, 85, 3189, 89, 3190, 89, 1073745022, 93, 3199, 97, 3200, 101, // NOLINT
- 3201, 101, 3202, 105, 3203, 105, 3204, 109, 3205, 109, 3206, 113, 3207, 113, 3208, 117, // NOLINT
- 3209, 117, 3210, 121, 3211, 121, 3212, 125, 3213, 125, 3214, 129, 3215, 129, 3216, 133, // NOLINT
- 3217, 133, 3218, 137, 3219, 137, 3220, 141, 3221, 141, 3222, 145, 3223, 145, 3224, 149, // NOLINT
- 3225, 149, 3226, 153, 3227, 153, 3228, 157, 3229, 157, 3230, 161, 3231, 161, 3232, 165, // NOLINT
- 3233, 165, 3234, 169, 3235, 169, 3236, 173, 3237, 173, 3238, 177, 3239, 177, 3240, 181, // NOLINT
- 3241, 181, 3242, 185, 3243, 185, 3244, 189, 3245, 189, 3246, 193, 3247, 193, 3248, 197, // NOLINT
- 3249, 197, 3250, 201, 3251, 201, 3252, 205, 3253, 205, 3254, 209, 3255, 209, 3256, 213, // NOLINT
- 3257, 213, 3258, 217, 3259, 217, 3260, 221, 3261, 221, 3262, 225, 3263, 225, 3264, 229, // NOLINT
- 3265, 229, 3266, 233, 3267, 233, 3268, 237, 3269, 237, 3270, 241, 3271, 241, 3272, 245, // NOLINT
- 3273, 245, 3274, 249, 3275, 249, 3276, 253, 3277, 253, 3278, 257, 3279, 257, 3280, 261, // NOLINT
- 3281, 261, 3282, 265, 3283, 265, 3284, 269, 3285, 269, 3286, 273, 3287, 273, 3288, 277, // NOLINT
- 3289, 277, 3290, 281, 3291, 281, 3292, 285, 3293, 285, 3294, 289, 3295, 289, 3296, 293, // NOLINT
- 3297, 293, 3298, 297, 3299, 297, 3307, 301, 3308, 301, 3309, 305, 3310, 305, 3314, 309, // NOLINT
- 3315, 309, 1073745152, 313, 3365, 317, 3367, 321, 3373, 325 }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings1Size = 83; // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings5[92] = { // NOLINT
- {{42560, 42561}}, {{42562, 42563}}, {{42564, 42565}}, {{42566, 42567}}, // NOLINT
- {{42568, 42569}}, {{42570, 42571}}, {{42572, 42573}}, {{42574, 42575}}, // NOLINT
- {{42576, 42577}}, {{42578, 42579}}, {{42580, 42581}}, {{42582, 42583}}, // NOLINT
- {{42584, 42585}}, {{42586, 42587}}, {{42588, 42589}}, {{42590, 42591}}, // NOLINT
- {{42592, 42593}}, {{42594, 42595}}, {{42596, 42597}}, {{42598, 42599}}, // NOLINT
- {{42600, 42601}}, {{42602, 42603}}, {{42604, 42605}}, {{42624, 42625}}, // NOLINT
- {{42626, 42627}}, {{42628, 42629}}, {{42630, 42631}}, {{42632, 42633}}, // NOLINT
- {{42634, 42635}}, {{42636, 42637}}, {{42638, 42639}}, {{42640, 42641}}, // NOLINT
- {{42642, 42643}}, {{42644, 42645}}, {{42646, 42647}}, {{42786, 42787}}, // NOLINT
- {{42788, 42789}}, {{42790, 42791}}, {{42792, 42793}}, {{42794, 42795}}, // NOLINT
- {{42796, 42797}}, {{42798, 42799}}, {{42802, 42803}}, {{42804, 42805}}, // NOLINT
- {{42806, 42807}}, {{42808, 42809}}, {{42810, 42811}}, {{42812, 42813}}, // NOLINT
- {{42814, 42815}}, {{42816, 42817}}, {{42818, 42819}}, {{42820, 42821}}, // NOLINT
- {{42822, 42823}}, {{42824, 42825}}, {{42826, 42827}}, {{42828, 42829}}, // NOLINT
- {{42830, 42831}}, {{42832, 42833}}, {{42834, 42835}}, {{42836, 42837}}, // NOLINT
- {{42838, 42839}}, {{42840, 42841}}, {{42842, 42843}}, {{42844, 42845}}, // NOLINT
- {{42846, 42847}}, {{42848, 42849}}, {{42850, 42851}}, {{42852, 42853}}, // NOLINT
- {{42854, 42855}}, {{42856, 42857}}, {{42858, 42859}}, {{42860, 42861}}, // NOLINT
- {{42862, 42863}}, {{42873, 42874}}, {{42875, 42876}}, {{7545, 42877}}, // NOLINT
- {{42878, 42879}}, {{42880, 42881}}, {{42882, 42883}}, {{42884, 42885}}, // NOLINT
- {{42886, 42887}}, {{42891, 42892}}, {{613, 42893}}, {{42896, 42897}}, // NOLINT
- {{42898, 42899}}, {{42912, 42913}}, {{42914, 42915}}, {{42916, 42917}}, // NOLINT
- {{42918, 42919}}, {{42920, 42921}}, {{614, 42922}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable5Size = 179; // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable5[358] = {
- 1600, 1, 1601, 1, 1602, 5, 1603, 5, 1604, 9, 1605, 9, 1606, 13, 1607, 13, // NOLINT
- 1608, 17, 1609, 17, 1610, 21, 1611, 21, 1612, 25, 1613, 25, 1614, 29, 1615, 29, // NOLINT
- 1616, 33, 1617, 33, 1618, 37, 1619, 37, 1620, 41, 1621, 41, 1622, 45, 1623, 45, // NOLINT
- 1624, 49, 1625, 49, 1626, 53, 1627, 53, 1628, 57, 1629, 57, 1630, 61, 1631, 61, // NOLINT
- 1632, 65, 1633, 65, 1634, 69, 1635, 69, 1636, 73, 1637, 73, 1638, 77, 1639, 77, // NOLINT
- 1640, 81, 1641, 81, 1642, 85, 1643, 85, 1644, 89, 1645, 89, 1664, 93, 1665, 93, // NOLINT
- 1666, 97, 1667, 97, 1668, 101, 1669, 101, 1670, 105, 1671, 105, 1672, 109, 1673, 109, // NOLINT
- 1674, 113, 1675, 113, 1676, 117, 1677, 117, 1678, 121, 1679, 121, 1680, 125, 1681, 125, // NOLINT
- 1682, 129, 1683, 129, 1684, 133, 1685, 133, 1686, 137, 1687, 137, 1826, 141, 1827, 141, // NOLINT
- 1828, 145, 1829, 145, 1830, 149, 1831, 149, 1832, 153, 1833, 153, 1834, 157, 1835, 157, // NOLINT
- 1836, 161, 1837, 161, 1838, 165, 1839, 165, 1842, 169, 1843, 169, 1844, 173, 1845, 173, // NOLINT
- 1846, 177, 1847, 177, 1848, 181, 1849, 181, 1850, 185, 1851, 185, 1852, 189, 1853, 189, // NOLINT
- 1854, 193, 1855, 193, 1856, 197, 1857, 197, 1858, 201, 1859, 201, 1860, 205, 1861, 205, // NOLINT
- 1862, 209, 1863, 209, 1864, 213, 1865, 213, 1866, 217, 1867, 217, 1868, 221, 1869, 221, // NOLINT
- 1870, 225, 1871, 225, 1872, 229, 1873, 229, 1874, 233, 1875, 233, 1876, 237, 1877, 237, // NOLINT
- 1878, 241, 1879, 241, 1880, 245, 1881, 245, 1882, 249, 1883, 249, 1884, 253, 1885, 253, // NOLINT
- 1886, 257, 1887, 257, 1888, 261, 1889, 261, 1890, 265, 1891, 265, 1892, 269, 1893, 269, // NOLINT
- 1894, 273, 1895, 273, 1896, 277, 1897, 277, 1898, 281, 1899, 281, 1900, 285, 1901, 285, // NOLINT
- 1902, 289, 1903, 289, 1913, 293, 1914, 293, 1915, 297, 1916, 297, 1917, 301, 1918, 305, // NOLINT
- 1919, 305, 1920, 309, 1921, 309, 1922, 313, 1923, 313, 1924, 317, 1925, 317, 1926, 321, // NOLINT
- 1927, 321, 1931, 325, 1932, 325, 1933, 329, 1936, 333, 1937, 333, 1938, 337, 1939, 337, // NOLINT
- 1952, 341, 1953, 341, 1954, 345, 1955, 345, 1956, 349, 1957, 349, 1958, 353, 1959, 353, // NOLINT
- 1960, 357, 1961, 357, 1962, 361 }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings5Size = 92; // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings7[3] = { // NOLINT
- {{65313, 65345}}, {{65338, 65370}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable7Size = 4; // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable7[8] = {
- 1073749793, 1, 7994, 5, 1073749825, 1, 8026, 5 }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings7Size = 3; // NOLINT
-int Ecma262UnCanonicalize::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<true>(kEcma262UnCanonicalizeTable0,
- kEcma262UnCanonicalizeTable0Size,
- kEcma262UnCanonicalizeMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kEcma262UnCanonicalizeTable1,
- kEcma262UnCanonicalizeTable1Size,
- kEcma262UnCanonicalizeMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 5: return LookupMapping<true>(kEcma262UnCanonicalizeTable5,
- kEcma262UnCanonicalizeTable5Size,
- kEcma262UnCanonicalizeMultiStrings5,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kEcma262UnCanonicalizeTable7,
- kEcma262UnCanonicalizeTable7Size,
- kEcma262UnCanonicalizeMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings0[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kCanonicalizationRangeTable0Size = 70; // NOLINT
-static const int32_t kCanonicalizationRangeTable0[140] = {
- 1073741889, 100, 90, 0, 1073741921, 100, 122, 0, 1073742016, 88, 214, 0, 1073742040, 24, 222, 0, // NOLINT
- 1073742048, 88, 246, 0, 1073742072, 24, 254, 0, 1073742715, 8, 893, 0, 1073742728, 8, 906, 0, // NOLINT
- 1073742749, 8, 927, 0, 1073742759, 16, 939, 0, 1073742765, 8, 943, 0, 1073742781, 8, 959, 0, // NOLINT
- 1073742791, 16, 971, 0, 1073742845, 8, 1023, 0, 1073742848, 60, 1039, 0, 1073742864, 124, 1071, 0, // NOLINT
- 1073742896, 124, 1103, 0, 1073742928, 60, 1119, 0, 1073743153, 148, 1366, 0, 1073743201, 148, 1414, 0, // NOLINT
- 1073746080, 148, 4293, 0, 1073749760, 28, 7943, 0, 1073749768, 28, 7951, 0, 1073749776, 20, 7957, 0, // NOLINT
- 1073749784, 20, 7965, 0, 1073749792, 28, 7975, 0, 1073749800, 28, 7983, 0, 1073749808, 28, 7991, 0, // NOLINT
- 1073749816, 28, 7999, 0, 1073749824, 20, 8005, 0, 1073749832, 20, 8013, 0, 1073749856, 28, 8039, 0, // NOLINT
- 1073749864, 28, 8047, 0, 1073749874, 12, 8053, 0, 1073749960, 12, 8139, 0 }; // NOLINT
-static const uint16_t kCanonicalizationRangeMultiStrings0Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kCanonicalizationRangeTable1Size = 14; // NOLINT
-static const int32_t kCanonicalizationRangeTable1[28] = {
- 1073742176, 60, 367, 0, 1073742192, 60, 383, 0, 1073743030, 100, 1231, 0, 1073743056, 100, 1257, 0, // NOLINT
- 1073744896, 184, 3118, 0, 1073744944, 184, 3166, 0, 1073745152, 148, 3365, 0 }; // NOLINT
-static const uint16_t kCanonicalizationRangeMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings7[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
-static const uint16_t kCanonicalizationRangeTable7Size = 4; // NOLINT
-static const int32_t kCanonicalizationRangeTable7[8] = {
- 1073749793, 100, 7994, 0, 1073749825, 100, 8026, 0 }; // NOLINT
-static const uint16_t kCanonicalizationRangeMultiStrings7Size = 1; // NOLINT
-int CanonicalizationRange::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupMapping<false>(kCanonicalizationRangeTable0,
- kCanonicalizationRangeTable0Size,
- kCanonicalizationRangeMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<false>(kCanonicalizationRangeTable1,
- kCanonicalizationRangeTable1Size,
- kCanonicalizationRangeMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<false>(kCanonicalizationRangeTable7,
- kCanonicalizationRangeTable7Size,
- kCanonicalizationRangeMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
- }
-}
-
-
-const uchar UnicodeData::kMaxCodePoint = 65533;
-
-int UnicodeData::GetByteCount() {
- return kUppercaseTable0Size * sizeof(int32_t) // NOLINT
- + kUppercaseTable1Size * sizeof(int32_t) // NOLINT
- + kUppercaseTable5Size * sizeof(int32_t) // NOLINT
- + kUppercaseTable7Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable0Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable1Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable5Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable7Size * sizeof(int32_t) // NOLINT
- + kLetterTable0Size * sizeof(int32_t) // NOLINT
- + kLetterTable1Size * sizeof(int32_t) // NOLINT
- + kLetterTable2Size * sizeof(int32_t) // NOLINT
- + kLetterTable3Size * sizeof(int32_t) // NOLINT
- + kLetterTable4Size * sizeof(int32_t) // NOLINT
- + kLetterTable5Size * sizeof(int32_t) // NOLINT
- + kLetterTable6Size * sizeof(int32_t) // NOLINT
- + kLetterTable7Size * sizeof(int32_t) // NOLINT
- + kSpaceTable0Size * sizeof(int32_t) // NOLINT
- + kSpaceTable1Size * sizeof(int32_t) // NOLINT
- + kNumberTable0Size * sizeof(int32_t) // NOLINT
- + kNumberTable5Size * sizeof(int32_t) // NOLINT
- + kNumberTable7Size * sizeof(int32_t) // NOLINT
- + kWhiteSpaceTable0Size * sizeof(int32_t) // NOLINT
- + kWhiteSpaceTable1Size * sizeof(int32_t) // NOLINT
- + kLineTerminatorTable0Size * sizeof(int32_t) // NOLINT
- + kLineTerminatorTable1Size * sizeof(int32_t) // NOLINT
- + kCombiningMarkTable0Size * sizeof(int32_t) // NOLINT
- + kCombiningMarkTable1Size * sizeof(int32_t) // NOLINT
- + kCombiningMarkTable5Size * sizeof(int32_t) // NOLINT
- + kCombiningMarkTable7Size * sizeof(int32_t) // NOLINT
- + kConnectorPunctuationTable0Size * sizeof(int32_t) // NOLINT
- + kConnectorPunctuationTable1Size * sizeof(int32_t) // NOLINT
- + kConnectorPunctuationTable7Size * sizeof(int32_t) // NOLINT
- + kToLowercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kToLowercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToLowercaseMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToLowercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToUppercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<3>) // NOLINT
- + kToUppercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToUppercaseMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToUppercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<3>) // NOLINT
- + kEcma262CanonicalizeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262CanonicalizeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262CanonicalizeMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262CanonicalizeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<4>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings5Size * sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kCanonicalizationRangeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kCanonicalizationRangeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kCanonicalizationRangeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>); // NOLINT
-}
-
-} // namespace unicode
diff --git a/src/3rdparty/v8/src/unicode.h b/src/3rdparty/v8/src/unicode.h
deleted file mode 100644
index 550b04a..0000000
--- a/src/3rdparty/v8/src/unicode.h
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UNICODE_H_
-#define V8_UNICODE_H_
-
-#ifndef _WIN32_WCE
-#include <sys/types.h>
-#endif
-#include <globals.h>
-/**
- * \file
- * Definitions and convenience functions for working with unicode.
- */
-
-namespace unibrow {
-
-typedef unsigned int uchar;
-typedef unsigned char byte;
-
-/**
- * The max length of the result of converting the case of a single
- * character.
- */
-const int kMaxMappingSize = 4;
-
-template <class T, int size = 256>
-class Predicate {
- public:
- inline Predicate() { }
- inline bool get(uchar c);
- private:
- friend class Test;
- bool CalculateValue(uchar c);
- struct CacheEntry {
- inline CacheEntry() : code_point_(0), value_(0) { }
- inline CacheEntry(uchar code_point, bool value)
- : code_point_(code_point),
- value_(value) { }
- uchar code_point_ : 21;
- bool value_ : 1;
- };
- static const int kSize = size;
- static const int kMask = kSize - 1;
- CacheEntry entries_[kSize];
-};
-
-// A cache used in case conversion. It caches the value for characters
-// that either have no mapping or map to a single character independent
-// of context. Characters that map to more than one character or that
-// map differently depending on context are always looked up.
-template <class T, int size = 256>
-class Mapping {
- public:
- inline Mapping() { }
- inline int get(uchar c, uchar n, uchar* result);
- private:
- friend class Test;
- int CalculateValue(uchar c, uchar n, uchar* result);
- struct CacheEntry {
- inline CacheEntry() : code_point_(kNoChar), offset_(0) { }
- inline CacheEntry(uchar code_point, signed offset)
- : code_point_(code_point),
- offset_(offset) { }
- uchar code_point_;
- signed offset_;
- static const int kNoChar = (1 << 21) - 1;
- };
- static const int kSize = size;
- static const int kMask = kSize - 1;
- CacheEntry entries_[kSize];
-};
-
-class UnicodeData {
- private:
- friend class Test;
- static int GetByteCount();
- static const uchar kMaxCodePoint;
-};
-
-class Utf16 {
- public:
- static inline bool IsLeadSurrogate(int code) {
- if (code == kNoPreviousCharacter) return false;
- return (code & 0xfc00) == 0xd800;
- }
- static inline bool IsTrailSurrogate(int code) {
- if (code == kNoPreviousCharacter) return false;
- return (code & 0xfc00) == 0xdc00;
- }
-
- static inline int CombineSurrogatePair(uchar lead, uchar trail) {
- return 0x10000 + ((lead & 0x3ff) << 10) + (trail & 0x3ff);
- }
- static const int kNoPreviousCharacter = -1;
- static const uchar kMaxNonSurrogateCharCode = 0xffff;
- // Encoding a single UTF-16 code unit will produce 1, 2 or 3 bytes
- // of UTF-8 data. The special case where the unit is a surrogate
- // trail produces 1 byte net, because the encoding of the pair is
- // 4 bytes and the 3 bytes that were used to encode the lead surrogate
- // can be reclaimed.
- static const int kMaxExtraUtf8BytesForOneUtf16CodeUnit = 3;
- // One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes.
- // The illegality stems from the surrogate not being part of a pair.
- static const int kUtf8BytesToCodeASurrogate = 3;
- static inline uint16_t LeadSurrogate(uint32_t char_code) {
- return 0xd800 + (((char_code - 0x10000) >> 10) & 0x3ff);
- }
- static inline uint16_t TrailSurrogate(uint32_t char_code) {
- return 0xdc00 + (char_code & 0x3ff);
- }
-};
-
-class Latin1 {
- public:
-#ifndef ENABLE_LATIN_1
- static const unsigned kMaxChar = 0x7f;
-#else
- static const unsigned kMaxChar = 0xff;
-#endif
- // Returns 0 if character does not convert to single latin-1 character
- // or if the character doesn't not convert back to latin-1 via inverse
- // operation (upper to lower, etc).
- static inline uint16_t ConvertNonLatin1ToLatin1(uint16_t);
-};
-
-class Utf8 {
- public:
- static inline uchar Length(uchar chr, int previous);
- static inline unsigned Encode(
- char* out, uchar c, int previous);
- static uchar CalculateValue(const byte* str,
- unsigned length,
- unsigned* cursor);
- static const uchar kBadChar = 0xFFFD;
- static const unsigned kMaxEncodedSize = 4;
- static const unsigned kMaxOneByteChar = 0x7f;
- static const unsigned kMaxTwoByteChar = 0x7ff;
- static const unsigned kMaxThreeByteChar = 0xffff;
- static const unsigned kMaxFourByteChar = 0x1fffff;
-
- // A single surrogate is coded as a 3 byte UTF-8 sequence, but two together
- // that match are coded as a 4 byte UTF-8 sequence.
- static const unsigned kBytesSavedByCombiningSurrogates = 2;
- static const unsigned kSizeOfUnmatchedSurrogate = 3;
- static inline uchar ValueOf(const byte* str,
- unsigned length,
- unsigned* cursor);
-};
-
-
-class Utf8DecoderBase {
- public:
- // Initialization done in subclass.
- inline Utf8DecoderBase();
- inline Utf8DecoderBase(uint16_t* buffer,
- unsigned buffer_length,
- const uint8_t* stream,
- unsigned stream_length);
- inline unsigned Utf16Length() const { return utf16_length_; }
- protected:
- // This reads all characters and sets the utf16_length_.
- // The first buffer_length utf16 chars are cached in the buffer.
- void Reset(uint16_t* buffer,
- unsigned buffer_length,
- const uint8_t* stream,
- unsigned stream_length);
- static void WriteUtf16Slow(const uint8_t* stream,
- uint16_t* data,
- unsigned length);
- const uint8_t* unbuffered_start_;
- unsigned utf16_length_;
- bool last_byte_of_buffer_unused_;
- private:
- DISALLOW_COPY_AND_ASSIGN(Utf8DecoderBase);
-};
-
-template <unsigned kBufferSize>
-class Utf8Decoder : public Utf8DecoderBase {
- public:
- inline Utf8Decoder() {}
- inline Utf8Decoder(const char* stream, unsigned length);
- inline void Reset(const char* stream, unsigned length);
- inline unsigned WriteUtf16(uint16_t* data, unsigned length) const;
- private:
- uint16_t buffer_[kBufferSize];
-};
-
-
-struct Uppercase {
- static bool Is(uchar c);
-};
-struct Lowercase {
- static bool Is(uchar c);
-};
-struct Letter {
- static bool Is(uchar c);
-};
-struct Space {
- static bool Is(uchar c);
-};
-struct Number {
- static bool Is(uchar c);
-};
-struct WhiteSpace {
- static bool Is(uchar c);
-};
-struct LineTerminator {
- static bool Is(uchar c);
-};
-struct CombiningMark {
- static bool Is(uchar c);
-};
-struct ConnectorPunctuation {
- static bool Is(uchar c);
-};
-struct ToLowercase {
- static const int kMaxWidth = 3;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-struct ToUppercase {
- static const int kMaxWidth = 3;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-struct Ecma262Canonicalize {
- static const int kMaxWidth = 1;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-struct Ecma262UnCanonicalize {
- static const int kMaxWidth = 4;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-struct CanonicalizationRange {
- static const int kMaxWidth = 1;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
-};
-
-} // namespace unibrow
-
-#endif // V8_UNICODE_H_
diff --git a/src/3rdparty/v8/src/uri.h b/src/3rdparty/v8/src/uri.h
deleted file mode 100644
index c7a6301..0000000
--- a/src/3rdparty/v8/src/uri.h
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_URI_H_
-#define V8_URI_H_
-
-#include "v8.h"
-
-#include "string-search.h"
-#include "v8utils.h"
-#include "v8conversions.h"
-
-namespace v8 {
-namespace internal {
-
-
-template <typename Char>
-static INLINE(Vector<const Char> GetCharVector(Handle<String> string));
-
-
-template <>
-Vector<const uint8_t> GetCharVector(Handle<String> string) {
- String::FlatContent flat = string->GetFlatContent();
- ASSERT(flat.IsAscii());
- return flat.ToOneByteVector();
-}
-
-
-template <>
-Vector<const uc16> GetCharVector(Handle<String> string) {
- String::FlatContent flat = string->GetFlatContent();
- ASSERT(flat.IsTwoByte());
- return flat.ToUC16Vector();
-}
-
-
-class URIUnescape : public AllStatic {
- public:
- template<typename Char>
- static Handle<String> Unescape(Isolate* isolate, Handle<String> source);
-
- private:
- static const signed char kHexValue['g'];
-
- template<typename Char>
- static Handle<String> UnescapeSlow(
- Isolate* isolate, Handle<String> string, int start_index);
-
- static INLINE(int TwoDigitHex(uint16_t character1, uint16_t character2));
-
- template <typename Char>
- static INLINE(int UnescapeChar(Vector<const Char> vector,
- int i,
- int length,
- int* step));
-};
-
-
-const signed char URIUnescape::kHexValue[] = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15 };
-
-
-template<typename Char>
-Handle<String> URIUnescape::Unescape(Isolate* isolate, Handle<String> source) {
- int index;
- { AssertNoAllocation no_allocation;
- StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%"));
- index = search.Search(GetCharVector<Char>(source), 0);
- if (index < 0) return source;
- }
- return UnescapeSlow<Char>(isolate, source, index);
-}
-
-
-template <typename Char>
-Handle<String> URIUnescape::UnescapeSlow(
- Isolate* isolate, Handle<String> string, int start_index) {
- bool one_byte = true;
- int length = string->length();
-
- int unescaped_length = 0;
- { AssertNoAllocation no_allocation;
- Vector<const Char> vector = GetCharVector<Char>(string);
- for (int i = start_index; i < length; unescaped_length++) {
- int step;
- if (UnescapeChar(vector, i, length, &step) >
- String::kMaxOneByteCharCode) {
- one_byte = false;
- }
- i += step;
- }
- }
-
- ASSERT(start_index < length);
- Handle<String> first_part =
- isolate->factory()->NewProperSubString(string, 0, start_index);
-
- int dest_position = 0;
- Handle<String> second_part;
- if (one_byte) {
- Handle<SeqOneByteString> dest =
- isolate->factory()->NewRawOneByteString(unescaped_length);
- AssertNoAllocation no_allocation;
- Vector<const Char> vector = GetCharVector<Char>(string);
- for (int i = start_index; i < length; dest_position++) {
- int step;
- dest->SeqOneByteStringSet(dest_position,
- UnescapeChar(vector, i, length, &step));
- i += step;
- }
- second_part = dest;
- } else {
- Handle<SeqTwoByteString> dest =
- isolate->factory()->NewRawTwoByteString(unescaped_length);
- AssertNoAllocation no_allocation;
- Vector<const Char> vector = GetCharVector<Char>(string);
- for (int i = start_index; i < length; dest_position++) {
- int step;
- dest->SeqTwoByteStringSet(dest_position,
- UnescapeChar(vector, i, length, &step));
- i += step;
- }
- second_part = dest;
- }
- return isolate->factory()->NewConsString(first_part, second_part);
-}
-
-
-int URIUnescape::TwoDigitHex(uint16_t character1, uint16_t character2) {
- if (character1 > 'f') return -1;
- int hi = kHexValue[character1];
- if (hi == -1) return -1;
- if (character2 > 'f') return -1;
- int lo = kHexValue[character2];
- if (lo == -1) return -1;
- return (hi << 4) + lo;
-}
-
-
-template <typename Char>
-int URIUnescape::UnescapeChar(Vector<const Char> vector,
- int i,
- int length,
- int* step) {
- uint16_t character = vector[i];
- int32_t hi = 0;
- int32_t lo = 0;
- if (character == '%' &&
- i <= length - 6 &&
- vector[i + 1] == 'u' &&
- (hi = TwoDigitHex(vector[i + 2],
- vector[i + 3])) != -1 &&
- (lo = TwoDigitHex(vector[i + 4],
- vector[i + 5])) != -1) {
- *step = 6;
- return (hi << 8) + lo;
- } else if (character == '%' &&
- i <= length - 3 &&
- (lo = TwoDigitHex(vector[i + 1],
- vector[i + 2])) != -1) {
- *step = 3;
- return lo;
- } else {
- *step = 1;
- return character;
- }
-}
-
-
-class URIEscape : public AllStatic {
- public:
- template<typename Char>
- static Handle<String> Escape(Isolate* isolate, Handle<String> string);
-
- private:
- static const char kHexChars[17];
- static const char kNotEscaped[256];
-
- static bool IsNotEscaped(uint16_t c) { return kNotEscaped[c] != 0; }
-};
-
-
-const char URIEscape::kHexChars[] = "0123456789ABCDEF";
-
-
-// kNotEscaped is generated by the following:
-//
-// #!/bin/perl
-// for (my $i = 0; $i < 256; $i++) {
-// print "\n" if $i % 16 == 0;
-// my $c = chr($i);
-// my $escaped = 1;
-// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
-// print $escaped ? "0, " : "1, ";
-// }
-
-const char URIEscape::kNotEscaped[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-
-template<typename Char>
-Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
- ASSERT(string->IsFlat());
- int escaped_length = 0;
- int length = string->length();
-
- { AssertNoAllocation no_allocation;
- Vector<const Char> vector = GetCharVector<Char>(string);
- for (int i = 0; i < length; i++) {
- uint16_t c = vector[i];
- if (c >= 256) {
- escaped_length += 6;
- } else if (IsNotEscaped(c)) {
- escaped_length++;
- } else {
- escaped_length += 3;
- }
-
- // We don't allow strings that are longer than a maximal length.
- ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
- if (escaped_length > String::kMaxLength) {
- isolate->context()->mark_out_of_memory();
- return Handle<String>::null();
- }
- }
- }
-
- // No length change implies no change. Return original string if no change.
- if (escaped_length == length) return string;
-
- Handle<SeqOneByteString> dest =
- isolate->factory()->NewRawOneByteString(escaped_length);
- int dest_position = 0;
-
- { AssertNoAllocation no_allocation;
- Vector<const Char> vector = GetCharVector<Char>(string);
- for (int i = 0; i < length; i++) {
- uint16_t c = vector[i];
- if (c >= 256) {
- dest->SeqOneByteStringSet(dest_position, '%');
- dest->SeqOneByteStringSet(dest_position+1, 'u');
- dest->SeqOneByteStringSet(dest_position+2, kHexChars[c >> 12]);
- dest->SeqOneByteStringSet(dest_position+3, kHexChars[(c >> 8) & 0xf]);
- dest->SeqOneByteStringSet(dest_position+4, kHexChars[(c >> 4) & 0xf]);
- dest->SeqOneByteStringSet(dest_position+5, kHexChars[c & 0xf]);
- dest_position += 6;
- } else if (IsNotEscaped(c)) {
- dest->SeqOneByteStringSet(dest_position, c);
- dest_position++;
- } else {
- dest->SeqOneByteStringSet(dest_position, '%');
- dest->SeqOneByteStringSet(dest_position+1, kHexChars[c >> 4]);
- dest->SeqOneByteStringSet(dest_position+2, kHexChars[c & 0xf]);
- dest_position += 3;
- }
- }
- }
-
- return dest;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_URI_H_
diff --git a/src/3rdparty/v8/src/uri.js b/src/3rdparty/v8/src/uri.js
deleted file mode 100644
index 1de22f8..0000000
--- a/src/3rdparty/v8/src/uri.js
+++ /dev/null
@@ -1,452 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file contains support for URI manipulations written in
-// JavaScript.
-
-// Expect $String = global.String;
-
-// Lazily initialized.
-var hexCharArray = 0;
-var hexCharCodeArray = 0;
-
-
-function URIAddEncodedOctetToBuffer(octet, result, index) {
- result[index++] = 37; // Char code of '%'.
- result[index++] = hexCharCodeArray[octet >> 4];
- result[index++] = hexCharCodeArray[octet & 0x0F];
- return index;
-}
-
-
-function URIEncodeOctets(octets, result, index) {
- if (hexCharCodeArray === 0) {
- hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- 65, 66, 67, 68, 69, 70];
- }
- index = URIAddEncodedOctetToBuffer(octets[0], result, index);
- if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index);
- if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index);
- if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index);
- return index;
-}
-
-
-function URIEncodeSingle(cc, result, index) {
- var x = (cc >> 12) & 0xF;
- var y = (cc >> 6) & 63;
- var z = cc & 63;
- var octets = new $Array(3);
- if (cc <= 0x007F) {
- octets[0] = cc;
- } else if (cc <= 0x07FF) {
- octets[0] = y + 192;
- octets[1] = z + 128;
- } else {
- octets[0] = x + 224;
- octets[1] = y + 128;
- octets[2] = z + 128;
- }
- return URIEncodeOctets(octets, result, index);
-}
-
-
-function URIEncodePair(cc1 , cc2, result, index) {
- var u = ((cc1 >> 6) & 0xF) + 1;
- var w = (cc1 >> 2) & 0xF;
- var x = cc1 & 3;
- var y = (cc2 >> 6) & 0xF;
- var z = cc2 & 63;
- var octets = new $Array(4);
- octets[0] = (u >> 2) + 240;
- octets[1] = (((u & 3) << 4) | w) + 128;
- octets[2] = ((x << 4) | y) + 128;
- octets[3] = z + 128;
- return URIEncodeOctets(octets, result, index);
-}
-
-
-function URIHexCharsToCharCode(highChar, lowChar) {
- var highCode = HexValueOf(highChar);
- var lowCode = HexValueOf(lowChar);
- if (highCode == -1 || lowCode == -1) {
- throw new $URIError("URI malformed");
- }
- return (highCode << 4) | lowCode;
-}
-
-
-function URIDecodeOctets(octets, result, index) {
- var value;
- var o0 = octets[0];
- if (o0 < 0x80) {
- value = o0;
- } else if (o0 < 0xc2) {
- throw new $URIError("URI malformed");
- } else {
- var o1 = octets[1];
- if (o0 < 0xe0) {
- var a = o0 & 0x1f;
- if ((o1 < 0x80) || (o1 > 0xbf)) {
- throw new $URIError("URI malformed");
- }
- var b = o1 & 0x3f;
- value = (a << 6) + b;
- if (value < 0x80 || value > 0x7ff) {
- throw new $URIError("URI malformed");
- }
- } else {
- var o2 = octets[2];
- if (o0 < 0xf0) {
- var a = o0 & 0x0f;
- if ((o1 < 0x80) || (o1 > 0xbf)) {
- throw new $URIError("URI malformed");
- }
- var b = o1 & 0x3f;
- if ((o2 < 0x80) || (o2 > 0xbf)) {
- throw new $URIError("URI malformed");
- }
- var c = o2 & 0x3f;
- value = (a << 12) + (b << 6) + c;
- if ((value < 0x800) || (value > 0xffff)) {
- throw new $URIError("URI malformed");
- }
- } else {
- var o3 = octets[3];
- if (o0 < 0xf8) {
- var a = (o0 & 0x07);
- if ((o1 < 0x80) || (o1 > 0xbf)) {
- throw new $URIError("URI malformed");
- }
- var b = (o1 & 0x3f);
- if ((o2 < 0x80) || (o2 > 0xbf)) {
- throw new $URIError("URI malformed");
- }
- var c = (o2 & 0x3f);
- if ((o3 < 0x80) || (o3 > 0xbf)) {
- throw new $URIError("URI malformed");
- }
- var d = (o3 & 0x3f);
- value = (a << 18) + (b << 12) + (c << 6) + d;
- if ((value < 0x10000) || (value > 0x10ffff)) {
- throw new $URIError("URI malformed");
- }
- } else {
- throw new $URIError("URI malformed");
- }
- }
- }
- }
- if (0xD800 <= value && value <= 0xDFFF) {
- throw new $URIError("URI malformed");
- }
- if (value < 0x10000) {
- %_TwoByteSeqStringSetChar(result, index++, value);
- return index;
- } else {
- %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0);
- %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00);
- return index;
- }
-}
-
-
-// ECMA-262, section 15.1.3
-function Encode(uri, unescape) {
- var uriLength = uri.length;
- var array = new InternalArray(uriLength);
- var index = 0;
- for (var k = 0; k < uriLength; k++) {
- var cc1 = uri.charCodeAt(k);
- if (unescape(cc1)) {
- array[index++] = cc1;
- } else {
- if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
- if (cc1 < 0xD800 || cc1 > 0xDBFF) {
- index = URIEncodeSingle(cc1, array, index);
- } else {
- k++;
- if (k == uriLength) throw new $URIError("URI malformed");
- var cc2 = uri.charCodeAt(k);
- if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
- index = URIEncodePair(cc1, cc2, array, index);
- }
- }
- }
-
- var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
- for (var i = 0; i < array.length; i++) {
- %_OneByteSeqStringSetChar(result, i, array[i]);
- }
- return result;
-}
-
-
-// ECMA-262, section 15.1.3
-function Decode(uri, reserved) {
- var uriLength = uri.length;
- var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
- var index = 0;
- var k = 0;
-
- // Optimistically assume ascii string.
- for ( ; k < uriLength; k++) {
- var code = uri.charCodeAt(k);
- if (code == 37) { // '%'
- if (k + 2 >= uriLength) throw new $URIError("URI malformed");
- var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
- if (cc >> 7) break; // Assumption wrong, two byte string.
- if (reserved(cc)) {
- %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'.
- %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1));
- %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2));
- } else {
- %_OneByteSeqStringSetChar(one_byte, index++, cc);
- }
- k += 2;
- } else {
- if (code > 0x7f) break; // Assumption wrong, two byte string.
- %_OneByteSeqStringSetChar(one_byte, index++, code);
- }
- }
-
- one_byte = %TruncateString(one_byte, index);
- if (k == uriLength) return one_byte;
-
- // Write into two byte string.
- var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING);
- index = 0;
-
- for ( ; k < uriLength; k++) {
- var code = uri.charCodeAt(k);
- if (code == 37) { // '%'
- if (k + 2 >= uriLength) throw new $URIError("URI malformed");
- var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
- if (cc >> 7) {
- var n = 0;
- while (((cc << ++n) & 0x80) != 0) { }
- if (n == 1 || n > 4) throw new $URIError("URI malformed");
- var octets = new $Array(n);
- octets[0] = cc;
- if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
- for (var i = 1; i < n; i++) {
- if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
- octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
- uri.charCodeAt(++k));
- }
- index = URIDecodeOctets(octets, two_byte, index);
- } else if (reserved(cc)) {
- %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'.
- %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1));
- %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k));
- } else {
- %_TwoByteSeqStringSetChar(two_byte, index++, cc);
- }
- } else {
- %_TwoByteSeqStringSetChar(two_byte, index++, code);
- }
- }
-
- two_byte = %TruncateString(two_byte, index);
- return one_byte + two_byte;
-}
-
-
-// ECMA-262 - 15.1.3.1.
-function URIDecode(uri) {
- var reservedPredicate = function(cc) {
- // #$
- if (35 <= cc && cc <= 36) return true;
- // &
- if (cc == 38) return true;
- // +,
- if (43 <= cc && cc <= 44) return true;
- // /
- if (cc == 47) return true;
- // :;
- if (58 <= cc && cc <= 59) return true;
- // =
- if (cc == 61) return true;
- // ?@
- if (63 <= cc && cc <= 64) return true;
-
- return false;
- };
- var string = ToString(uri);
- return Decode(string, reservedPredicate);
-}
-
-
-// ECMA-262 - 15.1.3.2.
-function URIDecodeComponent(component) {
- var reservedPredicate = function(cc) { return false; };
- var string = ToString(component);
- return Decode(string, reservedPredicate);
-}
-
-
-// Does the char code correspond to an alpha-numeric char.
-function isAlphaNumeric(cc) {
- // a - z
- if (97 <= cc && cc <= 122) return true;
- // A - Z
- if (65 <= cc && cc <= 90) return true;
- // 0 - 9
- if (48 <= cc && cc <= 57) return true;
-
- return false;
-}
-
-
-// ECMA-262 - 15.1.3.3.
-function URIEncode(uri) {
- var unescapePredicate = function(cc) {
- if (isAlphaNumeric(cc)) return true;
- // !
- if (cc == 33) return true;
- // #$
- if (35 <= cc && cc <= 36) return true;
- // &'()*+,-./
- if (38 <= cc && cc <= 47) return true;
- // :;
- if (58 <= cc && cc <= 59) return true;
- // =
- if (cc == 61) return true;
- // ?@
- if (63 <= cc && cc <= 64) return true;
- // _
- if (cc == 95) return true;
- // ~
- if (cc == 126) return true;
-
- return false;
- };
-
- var string = ToString(uri);
- return Encode(string, unescapePredicate);
-}
-
-
-// ECMA-262 - 15.1.3.4
-function URIEncodeComponent(component) {
- var unescapePredicate = function(cc) {
- if (isAlphaNumeric(cc)) return true;
- // !
- if (cc == 33) return true;
- // '()*
- if (39 <= cc && cc <= 42) return true;
- // -.
- if (45 <= cc && cc <= 46) return true;
- // _
- if (cc == 95) return true;
- // ~
- if (cc == 126) return true;
-
- return false;
- };
-
- var string = ToString(component);
- return Encode(string, unescapePredicate);
-}
-
-
-function HexValueOf(code) {
- // 0-9
- if (code >= 48 && code <= 57) return code - 48;
- // A-F
- if (code >= 65 && code <= 70) return code - 55;
- // a-f
- if (code >= 97 && code <= 102) return code - 87;
-
- return -1;
-}
-
-
-// Convert a character code to 4-digit hex string representation
-// 64 -> 0040, 62234 -> F31A.
-function CharCodeToHex4Str(cc) {
- var r = "";
- if (hexCharArray === 0) {
- hexCharArray = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
- "A", "B", "C", "D", "E", "F"];
- }
- for (var i = 0; i < 4; ++i) {
- var c = hexCharArray[cc & 0x0F];
- r = c + r;
- cc = cc >>> 4;
- }
- return r;
-}
-
-
-// Returns true if all digits in string s are valid hex numbers
-function IsValidHex(s) {
- for (var i = 0; i < s.length; ++i) {
- var cc = s.charCodeAt(i);
- if ((48 <= cc && cc <= 57) ||
- (65 <= cc && cc <= 70) ||
- (97 <= cc && cc <= 102)) {
- // '0'..'9', 'A'..'F' and 'a' .. 'f'.
- } else {
- return false;
- }
- }
- return true;
-}
-
-
-// ECMA-262 - B.2.1.
-function URIEscape(str) {
- var s = ToString(str);
- return %URIEscape(s);
-}
-
-
-// ECMA-262 - B.2.2.
-function URIUnescape(str) {
- var s = ToString(str);
- return %URIUnescape(s);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpUri() {
- %CheckIsBootstrapping();
- // Set up non-enumerable URI functions on the global object and set
- // their names.
- InstallFunctions(global, DONT_ENUM, $Array(
- "escape", URIEscape,
- "unescape", URIUnescape,
- "decodeURI", URIDecode,
- "decodeURIComponent", URIDecodeComponent,
- "encodeURI", URIEncode,
- "encodeURIComponent", URIEncodeComponent
- ));
-}
-
-SetUpUri();
diff --git a/src/3rdparty/v8/src/utils-inl.h b/src/3rdparty/v8/src/utils-inl.h
deleted file mode 100644
index 76a3c10..0000000
--- a/src/3rdparty/v8/src/utils-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UTILS_INL_H_
-#define V8_UTILS_INL_H_
-
-#include "list-inl.h"
-
-namespace v8 {
-namespace internal {
-
-template<typename T, int growth_factor, int max_growth>
-void Collector<T, growth_factor, max_growth>::Reset() {
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
- }
- chunks_.Rewind(0);
- index_ = 0;
- size_ = 0;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_UTILS_INL_H_
diff --git a/src/3rdparty/v8/src/utils.cc b/src/3rdparty/v8/src/utils.cc
deleted file mode 100644
index 7e8c088..0000000
--- a/src/3rdparty/v8/src/utils.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-
-SimpleStringBuilder::SimpleStringBuilder(int size) {
- buffer_ = Vector<char>::New(size);
- position_ = 0;
-}
-
-
-void SimpleStringBuilder::AddString(const char* s) {
- AddSubstring(s, StrLength(s));
-}
-
-
-void SimpleStringBuilder::AddSubstring(const char* s, int n) {
- ASSERT(!is_finalized() && position_ + n < buffer_.length());
- ASSERT(static_cast<size_t>(n) <= strlen(s));
- memcpy(&buffer_[position_], s, n * kCharSize);
- position_ += n;
-}
-
-
-void SimpleStringBuilder::AddPadding(char c, int count) {
- for (int i = 0; i < count; i++) {
- AddCharacter(c);
- }
-}
-
-
-void SimpleStringBuilder::AddDecimalInteger(int32_t value) {
- uint32_t number = static_cast<uint32_t>(value);
- if (value < 0) {
- AddCharacter('-');
- number = static_cast<uint32_t>(-value);
- }
- int digits = 1;
- for (uint32_t factor = 10; digits < 10; digits++, factor *= 10) {
- if (factor > number) break;
- }
- position_ += digits;
- for (int i = 1; i <= digits; i++) {
- buffer_[position_ - i] = '0' + static_cast<char>(number % 10);
- number /= 10;
- }
-}
-
-
-char* SimpleStringBuilder::Finalize() {
- ASSERT(!is_finalized() && position_ < buffer_.length());
- buffer_[position_] = '\0';
- // Make sure nobody managed to add a 0-character to the
- // buffer while building the string.
- ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_));
- position_ = -1;
- ASSERT(is_finalized());
- return buffer_.start();
-}
-
-
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor) {
- switch (divisor) {
- case 3: return DivMagicNumberFor3;
- case 5: return DivMagicNumberFor5;
- case 7: return DivMagicNumberFor7;
- case 9: return DivMagicNumberFor9;
- case 11: return DivMagicNumberFor11;
- case 25: return DivMagicNumberFor25;
- case 125: return DivMagicNumberFor125;
- case 625: return DivMagicNumberFor625;
- default: return InvalidDivMagicNumber;
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/utils.h b/src/3rdparty/v8/src/utils.h
deleted file mode 100644
index c391b9c..0000000
--- a/src/3rdparty/v8/src/utils.h
+++ /dev/null
@@ -1,1086 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UTILS_H_
-#define V8_UTILS_H_
-
-#include <stdlib.h>
-#include <string.h>
-#include <climits>
-
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// General helper functions
-
-#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
-
-// Returns true iff x is a power of 2 (or zero). Cannot be used with the
-// maximally negative value of the type T (the -1 overflows).
-template <typename T>
-inline bool IsPowerOf2(T x) {
- return IS_POWER_OF_TWO(x);
-}
-
-
-// X must be a power of 2. Returns the number of trailing zeros.
-inline int WhichPowerOf2(uint32_t x) {
- ASSERT(IsPowerOf2(x));
- ASSERT(x != 0);
- int bits = 0;
-#ifdef DEBUG
- int original_x = x;
-#endif
- if (x >= 0x10000) {
- bits += 16;
- x >>= 16;
- }
- if (x >= 0x100) {
- bits += 8;
- x >>= 8;
- }
- if (x >= 0x10) {
- bits += 4;
- x >>= 4;
- }
- switch (x) {
- default: UNREACHABLE();
- case 8: bits++; // Fall through.
- case 4: bits++; // Fall through.
- case 2: bits++; // Fall through.
- case 1: break;
- }
- ASSERT_EQ(1 << bits, original_x);
- return bits;
- return 0;
-}
-
-
-// Magic numbers for integer division.
-// These are kind of 2's complement reciprocal of the divisors.
-// Details and proofs can be found in:
-// - Hacker's Delight, Henry S. Warren, Jr.
-// - The PowerPC Compiler Writer’s Guide
-// and probably many others.
-// See details in the implementation of the algorithm in
-// lithium-codegen-arm.cc : LCodeGen::TryEmitSignedIntegerDivisionByConstant().
-struct DivMagicNumbers {
- unsigned M;
- unsigned s;
-};
-
-const DivMagicNumbers InvalidDivMagicNumber= {0, 0};
-const DivMagicNumbers DivMagicNumberFor3 = {0x55555556, 0};
-const DivMagicNumbers DivMagicNumberFor5 = {0x66666667, 1};
-const DivMagicNumbers DivMagicNumberFor7 = {0x92492493, 2};
-const DivMagicNumbers DivMagicNumberFor9 = {0x38e38e39, 1};
-const DivMagicNumbers DivMagicNumberFor11 = {0x2e8ba2e9, 1};
-const DivMagicNumbers DivMagicNumberFor25 = {0x51eb851f, 3};
-const DivMagicNumbers DivMagicNumberFor125 = {0x10624dd3, 3};
-const DivMagicNumbers DivMagicNumberFor625 = {0x68db8bad, 8};
-
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor);
-
-
-// The C++ standard leaves the semantics of '>>' undefined for
-// negative signed operands. Most implementations do the right thing,
-// though.
-inline int ArithmeticShiftRight(int x, int s) {
- return x >> s;
-}
-
-
-// Compute the 0-relative offset of some absolute value x of type T.
-// This allows conversion of Addresses and integral types into
-// 0-relative int offsets.
-template <typename T>
-inline intptr_t OffsetFrom(T x) {
- return x - static_cast<T>(0);
-}
-
-
-// Compute the absolute value of type T for some 0-relative offset x.
-// This allows conversion of 0-relative int offsets into Addresses and
-// integral types.
-template <typename T>
-inline T AddressFrom(intptr_t x) {
- return static_cast<T>(static_cast<T>(0) + x);
-}
-
-
-// Return the largest multiple of m which is <= x.
-template <typename T>
-inline T RoundDown(T x, intptr_t m) {
- ASSERT(IsPowerOf2(m));
- return AddressFrom<T>(OffsetFrom(x) & -m);
-}
-
-
-// Return the smallest multiple of m which is >= x.
-template <typename T>
-inline T RoundUp(T x, intptr_t m) {
- return RoundDown<T>(static_cast<T>(x + m - 1), m);
-}
-
-
-template <typename T>
-int Compare(const T& a, const T& b) {
- if (a == b)
- return 0;
- else if (a < b)
- return -1;
- else
- return 1;
-}
-
-
-template <typename T>
-int PointerValueCompare(const T* a, const T* b) {
- return Compare<T>(*a, *b);
-}
-
-
-// Compare function to compare the object pointer value of two
-// handlified objects. The handles are passed as pointers to the
-// handles.
-template<typename T> class Handle; // Forward declaration.
-template <typename T>
-int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
- return Compare<T*>(*(*a), *(*b));
-}
-
-
-// Returns the smallest power of two which is >= x. If you pass in a
-// number that is already a power of two, it is returned as is.
-// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
-// figure 3-3, page 48, where the function is called clp2.
-inline uint32_t RoundUpToPowerOf2(uint32_t x) {
- ASSERT(x <= 0x80000000u);
- x = x - 1;
- x = x | (x >> 1);
- x = x | (x >> 2);
- x = x | (x >> 4);
- x = x | (x >> 8);
- x = x | (x >> 16);
- return x + 1;
-}
-
-
-inline uint32_t RoundDownToPowerOf2(uint32_t x) {
- uint32_t rounded_up = RoundUpToPowerOf2(x);
- if (rounded_up > x) return rounded_up >> 1;
- return rounded_up;
-}
-
-
-template <typename T, typename U>
-inline bool IsAligned(T value, U alignment) {
- return (value & (alignment - 1)) == 0;
-}
-
-
-// Returns true if (addr + offset) is aligned.
-inline bool IsAddressAligned(Address addr,
- intptr_t alignment,
- int offset = 0) {
- intptr_t offs = OffsetFrom(addr + offset);
- return IsAligned(offs, alignment);
-}
-
-
-// Returns the maximum of the two parameters.
-template <typename T>
-T Max(T a, T b) {
- return a < b ? b : a;
-}
-
-
-// Returns the minimum of the two parameters.
-template <typename T>
-T Min(T a, T b) {
- return a < b ? a : b;
-}
-
-
-inline int StrLength(const char* string) {
- size_t length = strlen(string);
- ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
- return static_cast<int>(length);
-}
-
-
-// ----------------------------------------------------------------------------
-// BitField is a help template for encoding and decode bitfield with
-// unsigned content.
-template<class T, int shift, int size>
-class BitField {
- public:
- // A uint32_t mask of bit field. To use all bits of a uint32 in a
- // bitfield without compiler warnings we have to compute 2^32 without
- // using a shift count of 32.
- static const uint32_t kMask = ((1U << shift) << size) - (1U << shift);
- static const uint32_t kShift = shift;
- static const uint32_t kSize = size;
-
- // Value for the field with all bits set.
- static const T kMax = static_cast<T>((1U << size) - 1);
-
- // Tells whether the provided value fits into the bit field.
- static bool is_valid(T value) {
- return (static_cast<uint32_t>(value) & ~static_cast<uint32_t>(kMax)) == 0;
- }
-
- // Returns a uint32_t with the bit field value encoded.
- static uint32_t encode(T value) {
- ASSERT(is_valid(value));
- return static_cast<uint32_t>(value) << shift;
- }
-
- // Returns a uint32_t with the bit field value updated.
- static uint32_t update(uint32_t previous, T value) {
- return (previous & ~kMask) | encode(value);
- }
-
- // Extracts the bit field from the value.
- static T decode(uint32_t value) {
- return static_cast<T>((value & kMask) >> shift);
- }
-};
-
-
-// ----------------------------------------------------------------------------
-// Hash function.
-
-static const uint32_t kZeroHashSeed = 0;
-
-// Thomas Wang, Integer Hash Functions.
-// http://www.concentric.net/~Ttwang/tech/inthash.htm
-inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
- uint32_t hash = key;
- hash = hash ^ seed;
- hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
- hash = hash ^ (hash >> 12);
- hash = hash + (hash << 2);
- hash = hash ^ (hash >> 4);
- hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11);
- hash = hash ^ (hash >> 16);
- return hash;
-}
-
-
-inline uint32_t ComputeLongHash(uint64_t key) {
- uint64_t hash = key;
- hash = ~hash + (hash << 18); // hash = (hash << 18) - hash - 1;
- hash = hash ^ (hash >> 31);
- hash = hash * 21; // hash = (hash + (hash << 2)) + (hash << 4);
- hash = hash ^ (hash >> 11);
- hash = hash + (hash << 6);
- hash = hash ^ (hash >> 22);
- return static_cast<uint32_t>(hash);
-}
-
-
-inline uint32_t ComputePointerHash(void* ptr) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
- v8::internal::kZeroHashSeed);
-}
-
-
-// ----------------------------------------------------------------------------
-// Miscellaneous
-
-// A static resource holds a static instance that can be reserved in
-// a local scope using an instance of Access. Attempts to re-reserve
-// the instance will cause an error.
-template <typename T>
-class StaticResource {
- public:
- StaticResource() : is_reserved_(false) {}
-
- private:
- template <typename S> friend class Access;
- T instance_;
- bool is_reserved_;
-};
-
-
-// Locally scoped access to a static resource.
-template <typename T>
-class Access {
- public:
- explicit Access(StaticResource<T>* resource)
- : resource_(resource)
- , instance_(&resource->instance_) {
- ASSERT(!resource->is_reserved_);
- resource->is_reserved_ = true;
- }
-
- ~Access() {
- resource_->is_reserved_ = false;
- resource_ = NULL;
- instance_ = NULL;
- }
-
- T* value() { return instance_; }
- T* operator -> () { return instance_; }
-
- private:
- StaticResource<T>* resource_;
- T* instance_;
-};
-
-
-template <typename T>
-class Vector {
- public:
- Vector() : start_(NULL), length_(0) {}
- Vector(T* data, int length) : start_(data), length_(length) {
- ASSERT(length == 0 || (length > 0 && data != NULL));
- }
-
- static Vector<T> New(int length) {
- return Vector<T>(NewArray<T>(length), length);
- }
-
- // Returns a vector using the same backing storage as this one,
- // spanning from and including 'from', to but not including 'to'.
- Vector<T> SubVector(int from, int to) {
- ASSERT(to <= length_);
- ASSERT(from < to);
- ASSERT(0 <= from);
- return Vector<T>(start() + from, to - from);
- }
-
- // Returns the length of the vector.
- int length() const { return length_; }
-
- // Returns whether or not the vector is empty.
- bool is_empty() const { return length_ == 0; }
-
- // Returns the pointer to the start of the data in the vector.
- T* start() const { return start_; }
-
- // Access individual vector elements - checks bounds in debug mode.
- T& operator[](int index) const {
- ASSERT(0 <= index && index < length_);
- return start_[index];
- }
-
- const T& at(int index) const { return operator[](index); }
-
- T& first() { return start_[0]; }
-
- T& last() { return start_[length_ - 1]; }
-
- // Returns a clone of this vector with a new backing store.
- Vector<T> Clone() const {
- T* result = NewArray<T>(length_);
- for (int i = 0; i < length_; i++) result[i] = start_[i];
- return Vector<T>(result, length_);
- }
-
- void Sort(int (*cmp)(const T*, const T*)) {
- typedef int (*RawComparer)(const void*, const void*);
- qsort(start(),
- length(),
- sizeof(T),
- reinterpret_cast<RawComparer>(cmp));
- }
-
- void Sort() {
- Sort(PointerValueCompare<T>);
- }
-
- void Truncate(int length) {
- ASSERT(length <= length_);
- length_ = length;
- }
-
- // Releases the array underlying this vector. Once disposed the
- // vector is empty.
- void Dispose() {
- DeleteArray(start_);
- start_ = NULL;
- length_ = 0;
- }
-
- inline Vector<T> operator+(int offset) {
- ASSERT(offset < length_);
- return Vector<T>(start_ + offset, length_ - offset);
- }
-
- // Factory method for creating empty vectors.
- static Vector<T> empty() { return Vector<T>(NULL, 0); }
-
- template<typename S>
- static Vector<T> cast(Vector<S> input) {
- return Vector<T>(reinterpret_cast<T*>(input.start()),
- input.length() * sizeof(S) / sizeof(T));
- }
-
- protected:
- void set_start(T* start) { start_ = start; }
-
- private:
- T* start_;
- int length_;
-};
-
-
-// A pointer that can only be set once and doesn't allow NULL values.
-template<typename T>
-class SetOncePointer {
- public:
- SetOncePointer() : pointer_(NULL) { }
-
- bool is_set() const { return pointer_ != NULL; }
-
- T* get() const {
- ASSERT(pointer_ != NULL);
- return pointer_;
- }
-
- void set(T* value) {
- ASSERT(pointer_ == NULL && value != NULL);
- pointer_ = value;
- }
-
- private:
- T* pointer_;
-};
-
-
-template <typename T, int kSize>
-class EmbeddedVector : public Vector<T> {
- public:
- EmbeddedVector() : Vector<T>(buffer_, kSize) { }
-
- explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
- for (int i = 0; i < kSize; ++i) {
- buffer_[i] = initial_value;
- }
- }
-
- // When copying, make underlying Vector to reference our buffer.
- EmbeddedVector(const EmbeddedVector& rhs)
- : Vector<T>(rhs) {
- memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- set_start(buffer_);
- }
-
- EmbeddedVector& operator=(const EmbeddedVector& rhs) {
- if (this == &rhs) return *this;
- Vector<T>::operator=(rhs);
- memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- this->set_start(buffer_);
- return *this;
- }
-
- private:
- T buffer_[kSize];
-};
-
-
-template <typename T>
-class ScopedVector : public Vector<T> {
- public:
- explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
- ~ScopedVector() {
- DeleteArray(this->start());
- }
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
-};
-
-#define STATIC_ASCII_VECTOR(x) \
- v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \
- ARRAY_SIZE(x)-1)
-
-inline Vector<const char> CStrVector(const char* data) {
- return Vector<const char>(data, StrLength(data));
-}
-
-inline Vector<const uint8_t> OneByteVector(const char* data, int length) {
- return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length);
-}
-
-inline Vector<const uint8_t> OneByteVector(const char* data) {
- return OneByteVector(data, StrLength(data));
-}
-
-inline Vector<char> MutableCStrVector(char* data) {
- return Vector<char>(data, StrLength(data));
-}
-
-inline Vector<char> MutableCStrVector(char* data, int max) {
- int length = StrLength(data);
- return Vector<char>(data, (length < max) ? length : max);
-}
-
-
-/*
- * A class that collects values into a backing store.
- * Specialized versions of the class can allow access to the backing store
- * in different ways.
- * There is no guarantee that the backing store is contiguous (and, as a
- * consequence, no guarantees that consecutively added elements are adjacent
- * in memory). The collector may move elements unless it has guaranteed not
- * to.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class Collector {
- public:
- explicit Collector(int initial_capacity = kMinCapacity)
- : index_(0), size_(0) {
- current_chunk_ = Vector<T>::New(initial_capacity);
- }
-
- virtual ~Collector() {
- // Free backing store (in reverse allocation order).
- current_chunk_.Dispose();
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
- }
- }
-
- // Add a single element.
- inline void Add(T value) {
- if (index_ >= current_chunk_.length()) {
- Grow(1);
- }
- current_chunk_[index_] = value;
- index_++;
- size_++;
- }
-
- // Add a block of contiguous elements and return a Vector backed by the
- // memory area.
- // A basic Collector will keep this vector valid as long as the Collector
- // is alive.
- inline Vector<T> AddBlock(int size, T initial_value) {
- ASSERT(size > 0);
- if (size > current_chunk_.length() - index_) {
- Grow(size);
- }
- T* position = current_chunk_.start() + index_;
- index_ += size;
- size_ += size;
- for (int i = 0; i < size; i++) {
- position[i] = initial_value;
- }
- return Vector<T>(position, size);
- }
-
-
- // Add a contiguous block of elements and return a vector backed
- // by the added block.
- // A basic Collector will keep this vector valid as long as the Collector
- // is alive.
- inline Vector<T> AddBlock(Vector<const T> source) {
- if (source.length() > current_chunk_.length() - index_) {
- Grow(source.length());
- }
- T* position = current_chunk_.start() + index_;
- index_ += source.length();
- size_ += source.length();
- for (int i = 0; i < source.length(); i++) {
- position[i] = source[i];
- }
- return Vector<T>(position, source.length());
- }
-
-
- // Write the contents of the collector into the provided vector.
- void WriteTo(Vector<T> destination) {
- ASSERT(size_ <= destination.length());
- int position = 0;
- for (int i = 0; i < chunks_.length(); i++) {
- Vector<T> chunk = chunks_.at(i);
- for (int j = 0; j < chunk.length(); j++) {
- destination[position] = chunk[j];
- position++;
- }
- }
- for (int i = 0; i < index_; i++) {
- destination[position] = current_chunk_[i];
- position++;
- }
- }
-
- // Allocate a single contiguous vector, copy all the collected
- // elements to the vector, and return it.
- // The caller is responsible for freeing the memory of the returned
- // vector (e.g., using Vector::Dispose).
- Vector<T> ToVector() {
- Vector<T> new_store = Vector<T>::New(size_);
- WriteTo(new_store);
- return new_store;
- }
-
- // Resets the collector to be empty.
- virtual void Reset();
-
- // Total number of elements added to collector so far.
- inline int size() { return size_; }
-
- protected:
- static const int kMinCapacity = 16;
- List<Vector<T> > chunks_;
- Vector<T> current_chunk_; // Block of memory currently being written into.
- int index_; // Current index in current chunk.
- int size_; // Total number of elements in collector.
-
- // Creates a new current chunk, and stores the old chunk in the chunks_ list.
- void Grow(int min_capacity) {
- ASSERT(growth_factor > 1);
- int new_capacity;
- int current_length = current_chunk_.length();
- if (current_length < kMinCapacity) {
- // The collector started out as empty.
- new_capacity = min_capacity * growth_factor;
- if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
- } else {
- int growth = current_length * (growth_factor - 1);
- if (growth > max_growth) {
- growth = max_growth;
- }
- new_capacity = current_length + growth;
- if (new_capacity < min_capacity) {
- new_capacity = min_capacity + growth;
- }
- }
- NewChunk(new_capacity);
- ASSERT(index_ + min_capacity <= current_chunk_.length());
- }
-
- // Before replacing the current chunk, give a subclass the option to move
- // some of the current data into the new chunk. The function may update
- // the current index_ value to represent data no longer in the current chunk.
- // Returns the initial index of the new chunk (after copied data).
- virtual void NewChunk(int new_capacity) {
- Vector<T> new_chunk = Vector<T>::New(new_capacity);
- if (index_ > 0) {
- chunks_.Add(current_chunk_.SubVector(0, index_));
- } else {
- current_chunk_.Dispose();
- }
- current_chunk_ = new_chunk;
- index_ = 0;
- }
-};
-
-
-/*
- * A collector that allows sequences of values to be guaranteed to
- * stay consecutive.
- * If the backing store grows while a sequence is active, the current
- * sequence might be moved, but after the sequence is ended, it will
- * not move again.
- * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
- * as well, if inside an active sequence where another element is added.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class SequenceCollector : public Collector<T, growth_factor, max_growth> {
- public:
- explicit SequenceCollector(int initial_capacity)
- : Collector<T, growth_factor, max_growth>(initial_capacity),
- sequence_start_(kNoSequence) { }
-
- virtual ~SequenceCollector() {}
-
- void StartSequence() {
- ASSERT(sequence_start_ == kNoSequence);
- sequence_start_ = this->index_;
- }
-
- Vector<T> EndSequence() {
- ASSERT(sequence_start_ != kNoSequence);
- int sequence_start = sequence_start_;
- sequence_start_ = kNoSequence;
- if (sequence_start == this->index_) return Vector<T>();
- return this->current_chunk_.SubVector(sequence_start, this->index_);
- }
-
- // Drops the currently added sequence, and all collected elements in it.
- void DropSequence() {
- ASSERT(sequence_start_ != kNoSequence);
- int sequence_length = this->index_ - sequence_start_;
- this->index_ = sequence_start_;
- this->size_ -= sequence_length;
- sequence_start_ = kNoSequence;
- }
-
- virtual void Reset() {
- sequence_start_ = kNoSequence;
- this->Collector<T, growth_factor, max_growth>::Reset();
- }
-
- private:
- static const int kNoSequence = -1;
- int sequence_start_;
-
- // Move the currently active sequence to the new chunk.
- virtual void NewChunk(int new_capacity) {
- if (sequence_start_ == kNoSequence) {
- // Fall back on default behavior if no sequence has been started.
- this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
- return;
- }
- int sequence_length = this->index_ - sequence_start_;
- Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
- ASSERT(sequence_length < new_chunk.length());
- for (int i = 0; i < sequence_length; i++) {
- new_chunk[i] = this->current_chunk_[sequence_start_ + i];
- }
- if (sequence_start_ > 0) {
- this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
- } else {
- this->current_chunk_.Dispose();
- }
- this->current_chunk_ = new_chunk;
- this->index_ = sequence_length;
- sequence_start_ = 0;
- }
-};
-
-
-// Compare ASCII/16bit chars to ASCII/16bit chars.
-template <typename lchar, typename rchar>
-inline int CompareCharsUnsigned(const lchar* lhs,
- const rchar* rhs,
- int chars) {
- const lchar* limit = lhs + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*lhs) == sizeof(*rhs)) {
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); // NOLINT
- while (lhs <= limit - kStepSize) {
- if (*reinterpret_cast<const uintptr_t*>(lhs) !=
- *reinterpret_cast<const uintptr_t*>(rhs)) {
- break;
- }
- lhs += kStepSize;
- rhs += kStepSize;
- }
- }
-#endif
- while (lhs < limit) {
- int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
- if (r != 0) return r;
- ++lhs;
- ++rhs;
- }
- return 0;
-}
-
-template<typename lchar, typename rchar>
-inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
- ASSERT(sizeof(lchar) <= 2);
- ASSERT(sizeof(rchar) <= 2);
- if (sizeof(lchar) == 1) {
- if (sizeof(rchar) == 1) {
- return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
- reinterpret_cast<const uint8_t*>(rhs),
- chars);
- } else {
- return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
- reinterpret_cast<const uint16_t*>(rhs),
- chars);
- }
- } else {
- if (sizeof(rchar) == 1) {
- return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
- reinterpret_cast<const uint8_t*>(rhs),
- chars);
- } else {
- return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
- reinterpret_cast<const uint16_t*>(rhs),
- chars);
- }
- }
-}
-
-
-// Calculate 10^exponent.
-inline int TenToThe(int exponent) {
- ASSERT(exponent <= 9);
- ASSERT(exponent >= 1);
- int answer = 10;
- for (int i = 1; i < exponent; i++) answer *= 10;
- return answer;
-}
-
-
-// The type-based aliasing rule allows the compiler to assume that pointers of
-// different types (for some definition of different) never alias each other.
-// Thus the following code does not work:
-//
-// float f = foo();
-// int fbits = *(int*)(&f);
-//
-// The compiler 'knows' that the int pointer can't refer to f since the types
-// don't match, so the compiler may cache f in a register, leaving random data
-// in fbits. Using C++ style casts makes no difference, however a pointer to
-// char data is assumed to alias any other pointer. This is the 'memcpy
-// exception'.
-//
-// Bit_cast uses the memcpy exception to move the bits from a variable of one
-// type of a variable of another type. Of course the end result is likely to
-// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
-// will completely optimize BitCast away.
-//
-// There is an additional use for BitCast.
-// Recent gccs will warn when they see casts that may result in breakage due to
-// the type-based aliasing rule. If you have checked that there is no breakage
-// you can use BitCast to cast one pointer type to another. This confuses gcc
-// enough that it can no longer see that you have cast one pointer type to
-// another thus avoiding the warning.
-
-// We need different implementations of BitCast for pointer and non-pointer
-// values. We use partial specialization of auxiliary struct to work around
-// issues with template functions overloading.
-template <class Dest, class Source>
-struct BitCastHelper {
- STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
-
- INLINE(static Dest cast(const Source& source)) {
- Dest dest;
- memcpy(&dest, &source, sizeof(dest));
- return dest;
- }
-};
-
-template <class Dest, class Source>
-struct BitCastHelper<Dest, Source*> {
- INLINE(static Dest cast(Source* source)) {
- return BitCastHelper<Dest, uintptr_t>::
- cast(reinterpret_cast<uintptr_t>(source));
- }
-};
-
-template <class Dest, class Source>
-INLINE(Dest BitCast(const Source& source));
-
-template <class Dest, class Source>
-inline Dest BitCast(const Source& source) {
- return BitCastHelper<Dest, Source>::cast(source);
-}
-
-
-template<typename ElementType, int NumElements>
-class EmbeddedContainer {
- public:
- EmbeddedContainer() : elems_() { }
-
- int length() const { return NumElements; }
- const ElementType& operator[](int i) const {
- ASSERT(i < length());
- return elems_[i];
- }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class EmbeddedContainer<ElementType, 0> {
- public:
- int length() const { return 0; }
- const ElementType& operator[](int i) const {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
-// Helper class for building result strings in a character buffer. The
-// purpose of the class is to use safe operations that checks the
-// buffer bounds on all operations in debug mode.
-// This simple base class does not allow formatted output.
-class SimpleStringBuilder {
- public:
- // Create a string builder with a buffer of the given size. The
- // buffer is allocated through NewArray<char> and must be
- // deallocated by the caller of Finalize().
- explicit SimpleStringBuilder(int size);
-
- SimpleStringBuilder(char* buffer, int size)
- : buffer_(buffer, size), position_(0) { }
-
- ~SimpleStringBuilder() { if (!is_finalized()) Finalize(); }
-
- int size() const { return buffer_.length(); }
-
- // Get the current position in the builder.
- int position() const {
- ASSERT(!is_finalized());
- return position_;
- }
-
- // Reset the position.
- void Reset() { position_ = 0; }
-
- // Add a single character to the builder. It is not allowed to add
- // 0-characters; use the Finalize() method to terminate the string
- // instead.
- void AddCharacter(char c) {
- ASSERT(c != '\0');
- ASSERT(!is_finalized() && position_ < buffer_.length());
- buffer_[position_++] = c;
- }
-
- // Add an entire string to the builder. Uses strlen() internally to
- // compute the length of the input string.
- void AddString(const char* s);
-
- // Add the first 'n' characters of the given string 's' to the
- // builder. The input string must have enough characters.
- void AddSubstring(const char* s, int n);
-
- // Add character padding to the builder. If count is non-positive,
- // nothing is added to the builder.
- void AddPadding(char c, int count);
-
- // Add the decimal representation of the value.
- void AddDecimalInteger(int value);
-
- // Finalize the string by 0-terminating it and returning the buffer.
- char* Finalize();
-
- protected:
- Vector<char> buffer_;
- int position_;
-
- bool is_finalized() const { return position_ < 0; }
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder);
-};
-
-
-// A poor man's version of STL's bitset: A bit set of enums E (without explicit
-// values), fitting into an integral type T.
-template <class E, class T = int>
-class EnumSet {
- public:
- explicit EnumSet(T bits = 0) : bits_(bits) {}
- bool IsEmpty() const { return bits_ == 0; }
- bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
- bool ContainsAnyOf(const EnumSet& set) const {
- return (bits_ & set.bits_) != 0;
- }
- void Add(E element) { bits_ |= Mask(element); }
- void Add(const EnumSet& set) { bits_ |= set.bits_; }
- void Remove(E element) { bits_ &= ~Mask(element); }
- void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
- void RemoveAll() { bits_ = 0; }
- void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
- T ToIntegral() const { return bits_; }
- bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
-
- private:
- T Mask(E element) const {
- // The strange typing in ASSERT is necessary to avoid stupid warnings, see:
- // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
- ASSERT(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT));
- return 1 << element;
- }
-
- T bits_;
-};
-
-
-class TypeFeedbackId {
- public:
- explicit TypeFeedbackId(int id) : id_(id) { }
- int ToInt() const { return id_; }
-
- static TypeFeedbackId None() { return TypeFeedbackId(kNoneId); }
- bool IsNone() const { return id_ == kNoneId; }
-
- private:
- static const int kNoneId = -1;
-
- int id_;
-};
-
-
-class BailoutId {
- public:
- explicit BailoutId(int id) : id_(id) { }
- int ToInt() const { return id_; }
-
- static BailoutId None() { return BailoutId(kNoneId); }
- static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
- static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
- static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
- static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
-
- bool IsNone() const { return id_ == kNoneId; }
- bool operator==(const BailoutId& other) const { return id_ == other.id_; }
-
- private:
- static const int kNoneId = -1;
-
- // Using 0 could disguise errors.
- static const int kFunctionEntryId = 2;
-
- // This AST id identifies the point after the declarations have been visited.
- // We need it to capture the environment effects of declarations that emit
- // code (function declarations).
- static const int kDeclarationsId = 3;
-
- // Every FunctionState starts with this id.
- static const int kFirstUsableId = 4;
-
- // Every compiled stub starts with this id.
- static const int kStubEntryId = 5;
-
- int id_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_UTILS_H_
diff --git a/src/3rdparty/v8/src/v8-counters.cc b/src/3rdparty/v8/src/v8-counters.cc
deleted file mode 100644
index 4107dd3..0000000
--- a/src/3rdparty/v8/src/v8-counters.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-Counters::Counters() {
-#define HT(name, caption) \
- HistogramTimer name = { {#caption, 0, 10000, 50, NULL, false}, 0, 0 }; \
- name##_ = name;
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define HP(name, caption) \
- Histogram name = { #caption, 0, 101, 100, NULL, false }; \
- name##_ = name;
- HISTOGRAM_PERCENTAGE_LIST(HP)
-#undef HP
-
-#define HM(name, caption) \
- Histogram name = { #caption, 1000, 500000, 50, NULL, false }; \
- name##_ = name;
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-
-#define SC(name, caption) \
- StatsCounter name = { "c:" #caption, NULL, false };\
- name##_ = name;
-
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter count_of_##name = { "c:" "V8.CountOf_" #name, NULL, false };\
- count_of_##name##_ = count_of_##name; \
- StatsCounter size_of_##name = { "c:" "V8.SizeOf_" #name, NULL, false };\
- size_of_##name##_ = size_of_##name;
- INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter count_of_CODE_TYPE_##name = { \
- "c:" "V8.CountOf_CODE_TYPE-" #name, NULL, false }; \
- count_of_CODE_TYPE_##name##_ = count_of_CODE_TYPE_##name; \
- StatsCounter size_of_CODE_TYPE_##name = { \
- "c:" "V8.SizeOf_CODE_TYPE-" #name, NULL, false }; \
- size_of_CODE_TYPE_##name##_ = size_of_CODE_TYPE_##name;
- CODE_KIND_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter count_of_FIXED_ARRAY_##name = { \
- "c:" "V8.CountOf_FIXED_ARRAY-" #name, NULL, false }; \
- count_of_FIXED_ARRAY_##name##_ = count_of_FIXED_ARRAY_##name; \
- StatsCounter size_of_FIXED_ARRAY_##name = { \
- "c:" "V8.SizeOf_FIXED_ARRAY-" #name, NULL, false }; \
- size_of_FIXED_ARRAY_##name##_ = size_of_FIXED_ARRAY_##name;
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
-#undef SC
-}
-
-void Counters::ResetHistograms() {
-#define HT(name, caption) name##_.Reset();
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define HP(name, caption) name##_.Reset();
- HISTOGRAM_PERCENTAGE_LIST(HP)
-#undef HP
-
-#define HM(name, caption) name##_.Reset();
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/v8-counters.h b/src/3rdparty/v8/src/v8-counters.h
deleted file mode 100644
index 374ebbc..0000000
--- a/src/3rdparty/v8/src/v8-counters.h
+++ /dev/null
@@ -1,428 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8_COUNTERS_H_
-#define V8_V8_COUNTERS_H_
-
-#include "allocation.h"
-#include "counters.h"
-#include "objects.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-#define HISTOGRAM_TIMER_LIST(HT) \
- /* Garbage collection timers. */ \
- HT(gc_compactor, V8.GCCompactor) \
- HT(gc_scavenger, V8.GCScavenger) \
- HT(gc_context, V8.GCContext) /* GC context cleanup time */ \
- /* Parsing timers. */ \
- HT(parse, V8.Parse) \
- HT(parse_lazy, V8.ParseLazy) \
- HT(pre_parse, V8.PreParse) \
- /* Total compilation times. */ \
- HT(compile, V8.Compile) \
- HT(compile_eval, V8.CompileEval) \
- HT(compile_lazy, V8.CompileLazy)
-
-#define HISTOGRAM_PERCENTAGE_LIST(HP) \
- HP(external_fragmentation_total, \
- V8.MemoryExternalFragmentationTotal) \
- HP(external_fragmentation_old_pointer_space, \
- V8.MemoryExternalFragmentationOldPointerSpace) \
- HP(external_fragmentation_old_data_space, \
- V8.MemoryExternalFragmentationOldDataSpace) \
- HP(external_fragmentation_code_space, \
- V8.MemoryExternalFragmentationCodeSpace) \
- HP(external_fragmentation_map_space, \
- V8.MemoryExternalFragmentationMapSpace) \
- HP(external_fragmentation_cell_space, \
- V8.MemoryExternalFragmentationCellSpace) \
- HP(external_fragmentation_lo_space, \
- V8.MemoryExternalFragmentationLoSpace) \
- HP(heap_fraction_map_space, \
- V8.MemoryHeapFractionMapSpace) \
- HP(heap_fraction_cell_space, \
- V8.MemoryHeapFractionCellSpace) \
-
-
-#define HISTOGRAM_MEMORY_LIST(HM) \
- HM(heap_sample_total_committed, V8.MemoryHeapSampleTotalCommitted) \
- HM(heap_sample_total_used, V8.MemoryHeapSampleTotalUsed) \
- HM(heap_sample_map_space_committed, \
- V8.MemoryHeapSampleMapSpaceCommitted) \
- HM(heap_sample_cell_space_committed, \
- V8.MemoryHeapSampleCellSpaceCommitted)
-
-
-// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
-// Intellisense to crash. It was broken into two macros (each of length 40
-// lines) rather than one macro (of length about 80 lines) to work around
-// this problem. Please avoid using recursive macros of this length when
-// possible.
-#define STATS_COUNTER_LIST_1(SC) \
- /* Global Handle Count*/ \
- SC(global_handles, V8.GlobalHandles) \
- /* Mallocs from PCRE */ \
- SC(pcre_mallocs, V8.PcreMallocCount) \
- /* OS Memory allocated */ \
- SC(memory_allocated, V8.OsMemoryAllocated) \
- SC(normalized_maps, V8.NormalizedMaps) \
- SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
- SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
- SC(alive_after_last_gc, V8.AliveAfterLastGC) \
- SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
- SC(objs_since_last_full, V8.ObjsSinceLastFull) \
- SC(string_table_capacity, V8.StringTableCapacity) \
- SC(number_of_symbols, V8.NumberOfSymbols) \
- SC(script_wrappers, V8.ScriptWrappers) \
- SC(call_initialize_stubs, V8.CallInitializeStubs) \
- SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
- SC(call_normal_stubs, V8.CallNormalStubs) \
- SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
- SC(arguments_adaptors, V8.ArgumentsAdaptors) \
- SC(compilation_cache_hits, V8.CompilationCacheHits) \
- SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- SC(regexp_cache_hits, V8.RegExpCacheHits) \
- SC(regexp_cache_misses, V8.RegExpCacheMisses) \
- SC(string_ctor_calls, V8.StringConstructorCalls) \
- SC(string_ctor_conversions, V8.StringConstructorConversions) \
- SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \
- SC(string_ctor_string_value, V8.StringConstructorStringValue) \
- SC(string_ctor_gc_required, V8.StringConstructorGCRequired) \
- /* Amount of evaled source code. */ \
- SC(total_eval_size, V8.TotalEvalSize) \
- /* Amount of loaded source code. */ \
- SC(total_load_size, V8.TotalLoadSize) \
- /* Amount of parsed source code. */ \
- SC(total_parse_size, V8.TotalParseSize) \
- /* Amount of source code skipped over using preparsing. */ \
- SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
- /* Number of symbol lookups skipped using preparsing */ \
- SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \
- /* Amount of compiled source code. */ \
- SC(total_compile_size, V8.TotalCompileSize) \
- /* Amount of source code compiled with the old codegen. */ \
- SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize) \
- /* Amount of source code compiled with the full codegen. */ \
- SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) \
- /* Number of contexts created from scratch. */ \
- SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
- /* Number of contexts created by partial snapshot. */ \
- SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
- /* Number of code objects found from pc. */ \
- SC(pc_to_code, V8.PcToCode) \
- SC(pc_to_code_cached, V8.PcToCodeCached) \
- /* The store-buffer implementation of the write barrier. */ \
- SC(store_buffer_compactions, V8.StoreBufferCompactions) \
- SC(store_buffer_overflows, V8.StoreBufferOverflows)
-
-
-#define STATS_COUNTER_LIST_2(SC) \
- /* Number of code stubs. */ \
- SC(code_stubs, V8.CodeStubs) \
- /* Amount of stub code. */ \
- SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
- /* Amount of (JS) compiled code. */ \
- SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
- SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
- SC(gc_compactor_caused_by_promoted_data, \
- V8.GCCompactorCausedByPromotedData) \
- SC(gc_compactor_caused_by_oldspace_exhaustion, \
- V8.GCCompactorCausedByOldspaceExhaustion) \
- SC(gc_compactor_caused_by_weak_handles, \
- V8.GCCompactorCausedByWeakHandles) \
- SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
- SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
- /* How is the generic keyed-load stub used? */ \
- SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
- SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
- SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
- SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
- SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs) \
- SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
- /* How is the generic keyed-call stub used? */ \
- SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
- SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
- SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
- SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
- SC(keyed_call_generic_value_type, V8.KeyedCallGenericValueType) \
- SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
- SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
- /* Count how much the monomorphic keyed-load stubs are hit. */ \
- SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
- SC(keyed_load_string_length, V8.KeyedLoadStringLength) \
- SC(keyed_load_array_length, V8.KeyedLoadArrayLength) \
- SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction) \
- SC(keyed_load_field, V8.KeyedLoadField) \
- SC(keyed_load_callback, V8.KeyedLoadCallback) \
- SC(keyed_load_interceptor, V8.KeyedLoadInterceptor) \
- SC(keyed_load_inline, V8.KeyedLoadInline) \
- SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss) \
- SC(named_load_inline, V8.NamedLoadInline) \
- SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
- SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
- SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
- SC(dont_delete_hint_hit, V8.DontDeleteHintHit) \
- SC(dont_delete_hint_miss, V8.DontDeleteHintMiss) \
- SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
- SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss) \
- SC(keyed_store_field, V8.KeyedStoreField) \
- SC(named_store_inline_field, V8.NamedStoreInlineField) \
- SC(keyed_store_inline, V8.KeyedStoreInline) \
- SC(named_load_inline_generic, V8.NamedLoadInlineGeneric) \
- SC(named_load_inline_field, V8.NamedLoadInlineFast) \
- SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric) \
- SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast) \
- SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric) \
- SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast) \
- SC(named_store_inline_generic, V8.NamedStoreInlineGeneric) \
- SC(named_store_inline_fast, V8.NamedStoreInlineFast) \
- SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
- SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
- SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
- SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \
- SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
- SC(store_normal_miss, V8.StoreNormalMiss) \
- SC(store_normal_hit, V8.StoreNormalHit) \
- SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \
- SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
- SC(cow_arrays_converted, V8.COWArraysConverted) \
- SC(call_miss, V8.CallMiss) \
- SC(keyed_call_miss, V8.KeyedCallMiss) \
- SC(load_miss, V8.LoadMiss) \
- SC(keyed_load_miss, V8.KeyedLoadMiss) \
- SC(call_const, V8.CallConst) \
- SC(call_const_fast_api, V8.CallConstFastApi) \
- SC(call_const_interceptor, V8.CallConstInterceptor) \
- SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
- SC(call_global_inline, V8.CallGlobalInline) \
- SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
- SC(constructed_objects, V8.ConstructedObjects) \
- SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
- SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
- SC(negative_lookups, V8.NegativeLookups) \
- SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
- SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
- SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \
- SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \
- SC(array_function_runtime, V8.ArrayFunctionRuntime) \
- SC(array_function_native, V8.ArrayFunctionNative) \
- SC(for_in, V8.ForIn) \
- SC(enum_cache_hits, V8.EnumCacheHits) \
- SC(enum_cache_misses, V8.EnumCacheMisses) \
- SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(compute_entry_frame, V8.ComputeEntryFrame) \
- SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
- SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
- SC(fast_new_closure_total, V8.FastNewClosureTotal) \
- SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
- SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
- SC(string_add_runtime, V8.StringAddRuntime) \
- SC(string_add_native, V8.StringAddNative) \
- SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \
- SC(sub_string_runtime, V8.SubStringRuntime) \
- SC(sub_string_native, V8.SubStringNative) \
- SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \
- SC(string_compare_native, V8.StringCompareNative) \
- SC(string_compare_runtime, V8.StringCompareRuntime) \
- SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
- SC(regexp_entry_native, V8.RegExpEntryNative) \
- SC(number_to_string_native, V8.NumberToStringNative) \
- SC(number_to_string_runtime, V8.NumberToStringRuntime) \
- SC(math_acos, V8.MathAcos) \
- SC(math_asin, V8.MathAsin) \
- SC(math_atan, V8.MathAtan) \
- SC(math_atan2, V8.MathAtan2) \
- SC(math_ceil, V8.MathCeil) \
- SC(math_cos, V8.MathCos) \
- SC(math_exp, V8.MathExp) \
- SC(math_floor, V8.MathFloor) \
- SC(math_log, V8.MathLog) \
- SC(math_pow, V8.MathPow) \
- SC(math_round, V8.MathRound) \
- SC(math_sin, V8.MathSin) \
- SC(math_sqrt, V8.MathSqrt) \
- SC(math_tan, V8.MathTan) \
- SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \
- SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
- SC(stack_interrupts, V8.StackInterrupts) \
- SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
- SC(smi_checks_removed, V8.SmiChecksRemoved) \
- SC(map_checks_removed, V8.MapChecksRemoved) \
- SC(quote_json_char_count, V8.QuoteJsonCharacterCount) \
- SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount) \
- SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
- SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
- SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
- SC(old_pointer_space_bytes_available, \
- V8.MemoryOldPointerSpaceBytesAvailable) \
- SC(old_pointer_space_bytes_committed, \
- V8.MemoryOldPointerSpaceBytesCommitted) \
- SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \
- SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \
- SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \
- SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed) \
- SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable) \
- SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted) \
- SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed) \
- SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable) \
- SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted) \
- SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \
- SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
- SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
- SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
- SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
- SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
- SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
-
-
-// This file contains all the v8 counters that are in use.
-class Counters {
- public:
-#define HT(name, caption) \
- HistogramTimer* name() { return &name##_; }
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define HP(name, caption) \
- Histogram* name() { return &name##_; }
- HISTOGRAM_PERCENTAGE_LIST(HP)
-#undef HP
-
-#define HM(name, caption) \
- Histogram* name() { return &name##_; }
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-
-#define SC(name, caption) \
- StatsCounter* name() { return &name##_; }
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_##name() { return &count_of_##name##_; } \
- StatsCounter* size_of_##name() { return &size_of_##name##_; }
- INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_CODE_TYPE_##name() \
- { return &count_of_CODE_TYPE_##name##_; } \
- StatsCounter* size_of_CODE_TYPE_##name() \
- { return &size_of_CODE_TYPE_##name##_; }
- CODE_KIND_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_FIXED_ARRAY_##name() \
- { return &count_of_FIXED_ARRAY_##name##_; } \
- StatsCounter* size_of_FIXED_ARRAY_##name() \
- { return &size_of_FIXED_ARRAY_##name##_; }
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
-#undef SC
-
- enum Id {
-#define RATE_ID(name, caption) k_##name,
- HISTOGRAM_TIMER_LIST(RATE_ID)
-#undef RATE_ID
-#define PERCENTAGE_ID(name, caption) k_##name,
- HISTOGRAM_PERCENTAGE_LIST(PERCENTAGE_ID)
-#undef PERCENTAGE_ID
-#define MEMORY_ID(name, caption) k_##name,
- HISTOGRAM_MEMORY_LIST(MEMORY_ID)
-#undef MEMORY_ID
-#define COUNTER_ID(name, caption) k_##name,
- STATS_COUNTER_LIST_1(COUNTER_ID)
- STATS_COUNTER_LIST_2(COUNTER_ID)
-#undef COUNTER_ID
-#define COUNTER_ID(name) kCountOf##name, kSizeOf##name,
- INSTANCE_TYPE_LIST(COUNTER_ID)
-#undef COUNTER_ID
-#define COUNTER_ID(name) kCountOfCODE_TYPE_##name, \
- kSizeOfCODE_TYPE_##name,
- CODE_KIND_LIST(COUNTER_ID)
-#undef COUNTER_ID
-#define COUNTER_ID(name) kCountOfFIXED_ARRAY__##name, \
- kSizeOfFIXED_ARRAY__##name,
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
-#undef COUNTER_ID
- stats_counter_count
- };
-
- void ResetHistograms();
-
- private:
-#define HT(name, caption) \
- HistogramTimer name##_;
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define HP(name, caption) \
- Histogram name##_;
- HISTOGRAM_PERCENTAGE_LIST(HP)
-#undef HP
-
-#define HM(name, caption) \
- Histogram name##_;
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-
-#define SC(name, caption) \
- StatsCounter name##_;
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter size_of_##name##_; \
- StatsCounter count_of_##name##_;
- INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter size_of_CODE_TYPE_##name##_; \
- StatsCounter count_of_CODE_TYPE_##name##_;
- CODE_KIND_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter size_of_FIXED_ARRAY_##name##_; \
- StatsCounter count_of_FIXED_ARRAY_##name##_;
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
-#undef SC
-
- friend class Isolate;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_V8_COUNTERS_H_
diff --git a/src/3rdparty/v8/src/v8.cc b/src/3rdparty/v8/src/v8.cc
deleted file mode 100644
index 1753650..0000000
--- a/src/3rdparty/v8/src/v8.cc
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "assembler.h"
-#include "isolate.h"
-#include "elements.h"
-#include "bootstrapper.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "frames.h"
-#include "heap-profiler.h"
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "log.h"
-#include "objects.h"
-#include "once.h"
-#include "platform.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-#include "store-buffer.h"
-
-namespace v8 {
-namespace internal {
-
-V8_DECLARE_ONCE(init_once);
-
-bool V8::is_running_ = false;
-bool V8::has_been_set_up_ = false;
-bool V8::has_been_disposed_ = false;
-bool V8::has_fatal_error_ = false;
-bool V8::use_crankshaft_ = true;
-List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
-
-static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
-
-static EntropySource entropy_source;
-
-
-bool V8::Initialize(Deserializer* des) {
- InitializeOncePerProcess();
-
- // The current thread may not yet had entered an isolate to run.
- // Note the Isolate::Current() may be non-null because for various
- // initialization purposes an initializing thread may be assigned an isolate
- // but not actually enter it.
- if (i::Isolate::CurrentPerIsolateThreadData() == NULL) {
- i::Isolate::EnterDefaultIsolate();
- }
-
- ASSERT(i::Isolate::CurrentPerIsolateThreadData() != NULL);
- ASSERT(i::Isolate::CurrentPerIsolateThreadData()->thread_id().Equals(
- i::ThreadId::Current()));
- ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() ==
- i::Isolate::Current());
-
- if (IsDead()) return false;
-
- Isolate* isolate = Isolate::Current();
- if (isolate->IsInitialized()) return true;
-
- is_running_ = true;
- has_been_set_up_ = true;
- has_fatal_error_ = false;
- has_been_disposed_ = false;
-
- return isolate->Init(des);
-}
-
-
-void V8::SetFatalError() {
- is_running_ = false;
- has_fatal_error_ = true;
-}
-
-
-void V8::TearDown() {
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->IsDefaultIsolate());
-
- if (!has_been_set_up_ || has_been_disposed_) return;
-
- // The isolate has to be torn down before clearing the LOperand
- // caches so that the optimizing compiler thread (if running)
- // doesn't see an inconsistent view of the lithium instructions.
- isolate->TearDown();
- delete isolate;
-
- ElementsAccessor::TearDown();
- LOperand::TearDownCaches();
- ExternalReference::TearDownMathExpData();
- RegisteredExtension::UnregisterAll();
- Isolate::GlobalTearDown();
-
- is_running_ = false;
- has_been_disposed_ = true;
-
- delete call_completed_callbacks_;
- call_completed_callbacks_ = NULL;
-
- OS::TearDown();
-}
-
-
-static void seed_random(uint32_t* state) {
- for (int i = 0; i < 2; ++i) {
- if (FLAG_random_seed != 0) {
- state[i] = FLAG_random_seed;
- } else if (entropy_source != NULL) {
- uint32_t val;
- ScopedLock lock(entropy_mutex.Pointer());
- entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
- state[i] = val;
- } else {
- state[i] = random();
- }
- }
-}
-
-
-// Random number generator using George Marsaglia's MWC algorithm.
-static uint32_t random_base(uint32_t* state) {
- // Initialize seed using the system random().
- // No non-zero seed will ever become zero again.
- if (state[0] == 0) seed_random(state);
-
- // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
- state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
- state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
-
- return (state[0] << 14) + (state[1] & 0x3FFFF);
-}
-
-
-void V8::SetEntropySource(EntropySource source) {
- entropy_source = source;
-}
-
-
-void V8::SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver resolver) {
- StackFrame::SetReturnAddressLocationResolver(resolver);
-}
-
-
-// Used by JavaScript APIs
-uint32_t V8::Random(Context* context) {
- ASSERT(context->IsNativeContext());
- ByteArray* seed = context->random_seed();
- return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
-}
-
-
-// Used internally by the JIT and memory allocator for security
-// purposes. So, we keep a different state to prevent informations
-// leaks that could be used in an exploit.
-uint32_t V8::RandomPrivate(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- return random_base(isolate->private_random_seed());
-}
-
-
-bool V8::IdleNotification(int hint) {
- // Returning true tells the caller that there is no need to call
- // IdleNotification again.
- if (!FLAG_use_idle_notification) return true;
-
- // Tell the heap that it may want to adjust.
- return HEAP->IdleNotification(hint);
-}
-
-
-void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
- if (call_completed_callbacks_ == NULL) { // Lazy init.
- call_completed_callbacks_ = new List<CallCompletedCallback>();
- }
- for (int i = 0; i < call_completed_callbacks_->length(); i++) {
- if (callback == call_completed_callbacks_->at(i)) return;
- }
- call_completed_callbacks_->Add(callback);
-}
-
-
-void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
- if (call_completed_callbacks_ == NULL) return;
- for (int i = 0; i < call_completed_callbacks_->length(); i++) {
- if (callback == call_completed_callbacks_->at(i)) {
- call_completed_callbacks_->Remove(i);
- }
- }
-}
-
-
-void V8::FireCallCompletedCallback(Isolate* isolate) {
- bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
- bool observer_delivery_pending =
- FLAG_harmony_observation && isolate->observer_delivery_pending();
- if (!has_call_completed_callbacks && !observer_delivery_pending) return;
- HandleScopeImplementer* handle_scope_implementer =
- isolate->handle_scope_implementer();
- if (!handle_scope_implementer->CallDepthIsZero()) return;
- // Fire callbacks. Increase call depth to prevent recursive callbacks.
- handle_scope_implementer->IncrementCallDepth();
- if (observer_delivery_pending) {
- JSObject::DeliverChangeRecords(isolate);
- }
- if (has_call_completed_callbacks) {
- for (int i = 0; i < call_completed_callbacks_->length(); i++) {
- call_completed_callbacks_->at(i)();
- }
- }
- handle_scope_implementer->DecrementCallDepth();
-}
-
-
-// Use a union type to avoid type-aliasing optimizations in GCC.
-typedef union {
- double double_value;
- uint64_t uint64_t_value;
-} double_int_union;
-
-
-Object* V8::FillHeapNumberWithRandom(Object* heap_number,
- Context* context) {
- double_int_union r;
- uint64_t random_bits = Random(context);
- // Convert 32 random bits to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- static const double binary_million = 1048576.0;
- r.double_value = binary_million;
- r.uint64_t_value |= random_bits;
- r.double_value -= binary_million;
-
- HeapNumber::cast(heap_number)->set_value(r.double_value);
- return heap_number;
-}
-
-void V8::InitializeOncePerProcessImpl() {
- FlagList::EnforceFlagImplications();
- if (FLAG_stress_compaction) {
- FLAG_force_marking_deque_overflows = true;
- FLAG_gc_global = true;
- FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
- }
- OS::SetUp();
- CPU::SetUp();
- use_crankshaft_ = FLAG_crankshaft
- && !Serializer::enabled()
- && CPU::SupportsCrankshaft();
- OS::PostSetUp();
- RuntimeProfiler::GlobalSetUp();
- ElementsAccessor::InitializeOncePerProcess();
- LOperand::SetUpCaches();
- SetUpJSCallerSavedCodeData();
- SamplerRegistry::SetUp();
- ExternalReference::SetUp();
-}
-
-void V8::InitializeOncePerProcess() {
- CallOnce(&init_once, &InitializeOncePerProcessImpl);
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/v8.h b/src/3rdparty/v8/src/v8.h
deleted file mode 100644
index e9c3d40..0000000
--- a/src/3rdparty/v8/src/v8.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-//
-// Top include for all V8 .cc files.
-//
-
-#ifndef V8_V8_H_
-#define V8_V8_H_
-
-#if defined(GOOGLE3)
-// Google3 special flag handling.
-#if defined(DEBUG) && defined(NDEBUG)
-// V8 only uses DEBUG and whenever it is set we are building a debug
-// version of V8. We do not use NDEBUG and simply undef it here for
-// consistency.
-#undef NDEBUG
-#endif
-#endif // defined(GOOGLE3)
-
-// V8 only uses DEBUG, but included external files
-// may use NDEBUG - make sure they are consistent.
-#if defined(DEBUG) && defined(NDEBUG)
-#error both DEBUG and NDEBUG are set
-#endif
-
-// For Windows CE, Windows headers need to be included first as they define ASSERT
-#ifdef _WIN32_WCE
-# include "win32-headers.h"
-#endif
-
-// Basic includes
-#include "../include/v8.h"
-#include "v8globals.h"
-#include "v8checks.h"
-#include "allocation.h"
-#include "v8utils.h"
-#include "flags.h"
-
-// Objects & heap
-#include "objects-inl.h"
-#include "spaces-inl.h"
-#include "heap-inl.h"
-#include "incremental-marking-inl.h"
-#include "mark-compact-inl.h"
-#include "log-inl.h"
-#include "cpu-profiler-inl.h"
-#include "handles-inl.h"
-#include "heap-snapshot-generator-inl.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-class Deserializer;
-
-class V8 : public AllStatic {
- public:
- // Global actions.
-
- // If Initialize is called with des == NULL, the initial state is
- // created from scratch. If a non-null Deserializer is given, the
- // initial state is created by reading the deserialized data into an
- // empty heap.
- static bool Initialize(Deserializer* des);
- static void TearDown();
- static bool IsRunning() { return is_running_; }
- static bool UseCrankshaft() { return use_crankshaft_; }
- // To be dead you have to have lived
- // TODO(isolates): move IsDead to Isolate.
- static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
- static void SetFatalError();
-
- // Report process out of memory. Implementation found in api.cc.
- static void FatalProcessOutOfMemory(const char* location,
- bool take_snapshot = false);
-
- // Allows an entropy source to be provided for use in random number
- // generation.
- static void SetEntropySource(EntropySource source);
- // Support for return-address rewriting profilers.
- static void SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver resolver);
- // Random number generation support. Not cryptographically safe.
- static uint32_t Random(Context* context);
- // We use random numbers internally in memory allocation and in the
- // compilers for security. In order to prevent information leaks we
- // use a separate random state for internal random number
- // generation.
- static uint32_t RandomPrivate(Isolate* isolate);
- static Object* FillHeapNumberWithRandom(Object* heap_number,
- Context* context);
-
- // Idle notification directly from the API.
- static bool IdleNotification(int hint);
-
- static void AddCallCompletedCallback(CallCompletedCallback callback);
- static void RemoveCallCompletedCallback(CallCompletedCallback callback);
- static void FireCallCompletedCallback(Isolate* isolate);
-
- private:
- static void InitializeOncePerProcessImpl();
- static void InitializeOncePerProcess();
-
- // True if engine is currently running
- static bool is_running_;
- // True if V8 has ever been run
- static bool has_been_set_up_;
- // True if error has been signaled for current engine
- // (reset to false if engine is restarted)
- static bool has_fatal_error_;
- // True if engine has been shut down
- // (reset if engine is restarted)
- static bool has_been_disposed_;
- // True if we are using the crankshaft optimizing compiler.
- static bool use_crankshaft_;
- // List of callbacks when a Call completes.
- static List<CallCompletedCallback>* call_completed_callbacks_;
-};
-
-
-// JavaScript defines two kinds of 'nil'.
-enum NilValue { kNullValue, kUndefinedValue };
-
-
-// JavaScript defines two kinds of equality.
-enum EqualityKind { kStrictEquality, kNonStrictEquality };
-
-
-} } // namespace v8::internal
-
-namespace i = v8::internal;
-
-#endif // V8_V8_H_
diff --git a/src/3rdparty/v8/src/v8checks.h b/src/3rdparty/v8/src/v8checks.h
deleted file mode 100644
index 9857f73..0000000
--- a/src/3rdparty/v8/src/v8checks.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8CHECKS_H_
-#define V8_V8CHECKS_H_
-
-#include "checks.h"
-
-void API_Fatal(const char* location, const char* format, ...);
-
-namespace v8 {
- class Value;
- template <class T> class Handle;
-
-namespace internal {
- intptr_t HeapObjectTagMask();
-
-} } // namespace v8::internal
-
-
-void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- v8::Handle<v8::Value> unexpected,
- const char* value_source,
- v8::Handle<v8::Value> value);
-
-void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- v8::Handle<v8::Value> expected,
- const char* value_source,
- v8::Handle<v8::Value> value);
-
-#define ASSERT_TAG_ALIGNED(address) \
- ASSERT((reinterpret_cast<intptr_t>(address) & HeapObjectTagMask()) == 0)
-
-#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & HeapObjectTagMask()) == 0)
-
-#endif // V8_V8CHECKS_H_
diff --git a/src/3rdparty/v8/src/v8conversions.cc b/src/3rdparty/v8/src/v8conversions.cc
deleted file mode 100644
index 900b62d..0000000
--- a/src/3rdparty/v8/src/v8conversions.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include <limits.h>
-
-#include "v8.h"
-
-#include "conversions-inl.h"
-#include "v8conversions.h"
-#include "dtoa.h"
-#include "factory.h"
-#include "strtod.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// C++-style iterator adaptor for StringCharacterStream
-// (unlike C++ iterators the end-marker has different type).
-class StringCharacterStreamIterator {
- public:
- class EndMarker {};
-
- explicit StringCharacterStreamIterator(StringCharacterStream* stream);
-
- uint16_t operator*() const;
- void operator++();
- bool operator==(EndMarker const&) const { return end_; }
- bool operator!=(EndMarker const& m) const { return !end_; }
-
- private:
- StringCharacterStream* const stream_;
- uint16_t current_;
- bool end_;
-};
-
-
-StringCharacterStreamIterator::StringCharacterStreamIterator(
- StringCharacterStream* stream) : stream_(stream) {
- ++(*this);
-}
-
-uint16_t StringCharacterStreamIterator::operator*() const {
- return current_;
-}
-
-
-void StringCharacterStreamIterator::operator++() {
- end_ = !stream_->HasMore();
- if (!end_) {
- current_ = stream_->GetNext();
- }
-}
-} // End anonymous namespace.
-
-
-double StringToDouble(UnicodeCache* unicode_cache,
- String* str, int flags, double empty_string_val) {
- StringShape shape(str);
- // TODO(dcarney): Use a Visitor here.
- if (shape.IsSequentialAscii()) {
- const uint8_t* begin = SeqOneByteString::cast(str)->GetChars();
- const uint8_t* end = begin + str->length();
- return InternalStringToDouble(unicode_cache, begin, end, flags,
- empty_string_val);
- } else if (shape.IsSequentialTwoByte()) {
- const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
- const uc16* end = begin + str->length();
- return InternalStringToDouble(unicode_cache, begin, end, flags,
- empty_string_val);
- } else {
- ConsStringIteratorOp op;
- StringCharacterStream stream(str, &op);
- return InternalStringToDouble(unicode_cache,
- StringCharacterStreamIterator(&stream),
- StringCharacterStreamIterator::EndMarker(),
- flags,
- empty_string_val);
- }
-}
-
-
-double StringToInt(UnicodeCache* unicode_cache,
- String* str,
- int radix) {
- StringShape shape(str);
- // TODO(dcarney): Use a Visitor here.
- if (shape.IsSequentialAscii()) {
- const uint8_t* begin = SeqOneByteString::cast(str)->GetChars();
- const uint8_t* end = begin + str->length();
- return InternalStringToInt(unicode_cache, begin, end, radix);
- } else if (shape.IsSequentialTwoByte()) {
- const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
- const uc16* end = begin + str->length();
- return InternalStringToInt(unicode_cache, begin, end, radix);
- } else {
- ConsStringIteratorOp op;
- StringCharacterStream stream(str, &op);
- return InternalStringToInt(unicode_cache,
- StringCharacterStreamIterator(&stream),
- StringCharacterStreamIterator::EndMarker(),
- radix);
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/v8conversions.h b/src/3rdparty/v8/src/v8conversions.h
deleted file mode 100644
index 0147d8c..0000000
--- a/src/3rdparty/v8/src/v8conversions.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8CONVERSIONS_H_
-#define V8_V8CONVERSIONS_H_
-
-#include "conversions.h"
-
-namespace v8 {
-namespace internal {
-
-// Convert from Number object to C integer.
-inline int32_t NumberToInt32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToInt32(number->Number());
-}
-
-
-inline uint32_t NumberToUint32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToUint32(number->Number());
-}
-
-
-// Converts a string into a double value according to ECMA-262 9.3.1
-double StringToDouble(UnicodeCache* unicode_cache,
- String* str,
- int flags,
- double empty_string_val = 0);
-
-// Converts a string into an integer.
-double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
-
-} } // namespace v8::internal
-
-#endif // V8_V8CONVERSIONS_H_
diff --git a/src/3rdparty/v8/src/v8dll-main.cc b/src/3rdparty/v8/src/v8dll-main.cc
deleted file mode 100644
index 49d8689..0000000
--- a/src/3rdparty/v8/src/v8dll-main.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The GYP based build ends up defining USING_V8_SHARED when compiling this
-// file.
-#undef USING_V8_SHARED
-#include "../include/v8.h"
-
-#ifdef WIN32
-#include <windows.h> // NOLINT
-
-extern "C" {
-BOOL WINAPI DllMain(HANDLE hinstDLL,
- DWORD dwReason,
- LPVOID lpvReserved) {
- // Do nothing.
- return TRUE;
-}
-}
-#endif
diff --git a/src/3rdparty/v8/src/v8globals.h b/src/3rdparty/v8/src/v8globals.h
deleted file mode 100644
index 072c365..0000000
--- a/src/3rdparty/v8/src/v8globals.h
+++ /dev/null
@@ -1,579 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8GLOBALS_H_
-#define V8_V8GLOBALS_H_
-
-#include "globals.h"
-#include "checks.h"
-
-namespace v8 {
-namespace internal {
-
-// This file contains constants and global declarations related to the
-// V8 system.
-
-// Mask for the sign bit in a smi.
-const intptr_t kSmiSignMask = kIntptrSignBit;
-
-const int kObjectAlignmentBits = kPointerSizeLog2;
-const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
-const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
-
-// Desired alignment for pointers.
-const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
-const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
-
-// Desired alignment for double values.
-const intptr_t kDoubleAlignment = 8;
-const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
-
-// Desired alignment for generated code is 32 bytes (to improve cache line
-// utilization).
-const int kCodeAlignmentBits = 5;
-const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
-const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
-
-// Tag information for Failure.
-const int kFailureTag = 3;
-const int kFailureTagSize = 2;
-const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
-
-
-// Zap-value: The value used for zapping dead objects.
-// Should be a recognizable hex value tagged as a failure.
-#ifdef V8_HOST_ARCH_64_BIT
-const Address kZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
-const Address kHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
-const Address kGlobalHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
-const Address kFromSpaceZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
-const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
-const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
-const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
-#else
-const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
-const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
-const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
-const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
-const uint32_t kSlotsZapValue = 0xbeefdeef;
-const uint32_t kDebugZapValue = 0xbadbaddb;
-const uint32_t kFreeListZapValue = 0xfeed1eaf;
-#endif
-
-const int kCodeZapValue = 0xbadc0de;
-
-// Number of bits to represent the page size for paged spaces. The value of 20
-// gives 1Mb bytes per page.
-const int kPageSizeBits = 20;
-
-// On Intel architecture, cache line size is 64 bytes.
-// On ARM it may be less (32 bytes), but as far this constant is
-// used for aligning data, it doesn't hurt to align on a greater value.
-const int kProcessorCacheLineSize = 64;
-
-// Constants relevant to double precision floating point numbers.
-// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
-const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
-
-
-// -----------------------------------------------------------------------------
-// Forward declarations for frequently used classes
-
-class AccessorInfo;
-class Allocation;
-class Arguments;
-class Assembler;
-class AssertNoAllocation;
-class Code;
-class CodeGenerator;
-class CodeStub;
-class Context;
-class Debug;
-class Debugger;
-class DebugInfo;
-class Descriptor;
-class DescriptorArray;
-class TransitionArray;
-class ExternalReference;
-class FixedArray;
-class FunctionTemplateInfo;
-class MemoryChunk;
-class SeededNumberDictionary;
-class UnseededNumberDictionary;
-class StringDictionary;
-template <typename T> class Handle;
-class Heap;
-class HeapObject;
-class IC;
-class InterceptorInfo;
-class JSArray;
-class JSFunction;
-class JSObject;
-class LargeObjectSpace;
-class LookupResult;
-class MacroAssembler;
-class Map;
-class MapSpace;
-class MarkCompactCollector;
-class NewSpace;
-class Object;
-class MaybeObject;
-class OldSpace;
-class Foreign;
-class Scope;
-class ScopeInfo;
-class Script;
-class Smi;
-template <typename Config, class Allocator = FreeStoreAllocationPolicy>
- class SplayTree;
-class String;
-class Struct;
-class Variable;
-class RelocInfo;
-class Deserializer;
-class MessageLocation;
-class ObjectGroup;
-class TickSample;
-class VirtualMemory;
-class Mutex;
-
-typedef bool (*WeakSlotCallback)(Object** pointer);
-
-typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
-
-// -----------------------------------------------------------------------------
-// Miscellaneous
-
-// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
-// consecutive.
-enum AllocationSpace {
- NEW_SPACE, // Semispaces collected with copying collector.
- OLD_POINTER_SPACE, // May contain pointers to new space.
- OLD_DATA_SPACE, // Must not have pointers to new space.
- CODE_SPACE, // No pointers to new space, marked executable.
- MAP_SPACE, // Only and all map objects.
- CELL_SPACE, // Only and all cell objects.
- LO_SPACE, // Promoted large objects.
-
- FIRST_SPACE = NEW_SPACE,
- LAST_SPACE = LO_SPACE,
- FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
- LAST_PAGED_SPACE = CELL_SPACE
-};
-const int kSpaceTagSize = 3;
-const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
-
-
-// A flag that indicates whether objects should be pretenured when
-// allocated (allocated directly into the old generation) or not
-// (allocated in the young generation if the object size and type
-// allows).
-enum PretenureFlag { NOT_TENURED, TENURED };
-
-enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
-
-enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-
-enum VisitMode {
- VISIT_ALL,
- VISIT_ALL_IN_SCAVENGE,
- VISIT_ALL_IN_SWEEP_NEWSPACE,
- VISIT_ONLY_STRONG
-};
-
-// Flag indicating whether code is built into the VM (one of the natives files).
-enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
-
-
-// A CodeDesc describes a buffer holding instructions and relocation
-// information. The instructions start at the beginning of the buffer
-// and grow forward, the relocation information starts at the end of
-// the buffer and grows backward.
-//
-// |<--------------- buffer_size ---------------->|
-// |<-- instr_size -->| |<-- reloc_size -->|
-// +==================+========+==================+
-// | instructions | free | reloc info |
-// +==================+========+==================+
-// ^
-// |
-// buffer
-
-struct CodeDesc {
- byte* buffer;
- int buffer_size;
- int instr_size;
- int reloc_size;
- Assembler* origin;
-};
-
-
-// Callback function used for iterating objects in heap spaces,
-// for example, scanning heap objects.
-typedef int (*HeapObjectCallback)(HeapObject* obj);
-
-
-// Callback function used for checking constraints when copying/relocating
-// objects. Returns true if an object can be copied/relocated from its
-// old_addr to a new_addr.
-typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
-
-
-// Callback function on inline caches, used for iterating over inline caches
-// in compiled code.
-typedef void (*InlineCacheCallback)(Code* code, Address ic);
-
-
-// State for inline cache call sites. Aliased as IC::State.
-enum InlineCacheState {
- // Has never been executed.
- UNINITIALIZED,
- // Has been executed but monomorhic state has been delayed.
- PREMONOMORPHIC,
- // Has been executed and only one receiver type has been seen.
- MONOMORPHIC,
- // Like MONOMORPHIC but check failed due to prototype.
- MONOMORPHIC_PROTOTYPE_FAILURE,
- // Multiple receiver types have been seen.
- POLYMORPHIC,
- // Many receiver types have been seen.
- MEGAMORPHIC,
- // A generic handler is installed and no extra typefeedback is recorded.
- GENERIC,
- // Special state for debug break or step in prepare stubs.
- DEBUG_STUB
-};
-
-
-enum CheckType {
- RECEIVER_MAP_CHECK,
- STRING_CHECK,
- SYMBOL_CHECK,
- NUMBER_CHECK,
- BOOLEAN_CHECK
-};
-
-
-enum CallFunctionFlags {
- NO_CALL_FUNCTION_FLAGS = 0,
- // Receiver might implicitly be the global objects. If it is, the
- // hole is passed to the call function stub.
- RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0,
- // The call target is cached in the instruction stream.
- RECORD_CALL_TARGET = 1 << 1
-};
-
-
-enum InlineCacheHolderFlag {
- OWN_MAP, // For fast properties objects.
- DELEGATE_MAP // For slow properties objects (except GlobalObjects).
-};
-
-
-// The Store Buffer (GC).
-typedef enum {
- kStoreBufferFullEvent,
- kStoreBufferStartScanningPagesEvent,
- kStoreBufferScanningPageEvent
-} StoreBufferEvent;
-
-
-typedef void (*StoreBufferCallback)(Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event);
-
-
-// Union used for fast testing of specific double values.
-union DoubleRepresentation {
- double value;
- int64_t bits;
- DoubleRepresentation(double x) { value = x; }
-};
-
-
-// Union used for customized checking of the IEEE double types
-// inlined within v8 runtime, rather than going to the underlying
-// platform headers and libraries
-union IeeeDoubleLittleEndianArchType {
- double d;
- struct {
- unsigned int man_low :32;
- unsigned int man_high :20;
- unsigned int exp :11;
- unsigned int sign :1;
- } bits;
-};
-
-
-union IeeeDoubleBigEndianArchType {
- double d;
- struct {
- unsigned int sign :1;
- unsigned int exp :11;
- unsigned int man_high :20;
- unsigned int man_low :32;
- } bits;
-};
-
-
-// AccessorCallback
-struct AccessorDescriptor {
- MaybeObject* (*getter)(Object* object, void* data);
- MaybeObject* (*setter)(JSObject* object, Object* value, void* data);
- void* data;
-};
-
-
-// Logging and profiling. A StateTag represents a possible state of
-// the VM. The logger maintains a stack of these. Creating a VMState
-// object enters a state by pushing on the stack, and destroying a
-// VMState object leaves a state by popping the current state from the
-// stack.
-
-enum StateTag {
- JS,
- GC,
- COMPILER,
- PARALLEL_COMPILER,
- OTHER,
- EXTERNAL
-};
-
-
-// -----------------------------------------------------------------------------
-// Macros
-
-// Testers for test.
-
-#define HAS_SMI_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
-
-#define HAS_FAILURE_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
-
-// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
-#define OBJECT_POINTER_ALIGN(value) \
- (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
-
-// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
-#define POINTER_SIZE_ALIGN(value) \
- (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
-
-// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
-#define CODE_POINTER_ALIGN(value) \
- (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
-
-// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
-// inside a C++ class and new and delete will be overloaded so logging is
-// performed.
-// This file (globals.h) is included before log.h, so we use direct calls to
-// the Logger rather than the LOG macro.
-#ifdef DEBUG
-#define TRACK_MEMORY(name) \
- void* operator new(size_t size) { \
- void* result = ::operator new(size); \
- Logger::NewEventStatic(name, result, size); \
- return result; \
- } \
- void operator delete(void* object) { \
- Logger::DeleteEventStatic(name, object); \
- ::operator delete(object); \
- }
-#else
-#define TRACK_MEMORY(name)
-#endif
-
-
-enum CpuImplementer {
- UNKNOWN_IMPLEMENTER,
- ARM_IMPLEMENTER,
- QUALCOMM_IMPLEMENTER
-};
-
-
-// Feature flags bit positions. They are mostly based on the CPUID spec.
-// (We assign CPUID itself to one of the currently reserved bits --
-// feel free to change this if needed.)
-// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
-enum CpuFeature { SSE4_1 = 32 + 19, // x86
- SSE3 = 32 + 0, // x86
- SSE2 = 26, // x86
- CMOV = 15, // x86
- RDTSC = 4, // x86
- CPUID = 10, // x86
- VFP3 = 1, // ARM
- ARMv7 = 2, // ARM
- VFP2 = 3, // ARM
- SUDIV = 4, // ARM
- UNALIGNED_ACCESSES = 5, // ARM
- MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM
- VFP32DREGS = 7, // ARM
- SAHF = 0, // x86
- FPU = 1}; // MIPS
-
-
-// Used to specify if a macro instruction must perform a smi check on tagged
-// values.
-enum SmiCheckType {
- DONT_DO_SMI_CHECK,
- DO_SMI_CHECK
-};
-
-
-// Used to specify whether a receiver is implicitly or explicitly
-// provided to a call.
-enum CallKind {
- CALL_AS_METHOD,
- CALL_AS_FUNCTION
-};
-
-
-enum ScopeType {
- EVAL_SCOPE, // The top-level scope for an eval source.
- FUNCTION_SCOPE, // The top-level scope for a function.
- MODULE_SCOPE, // The scope introduced by a module literal
- GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval.
- CATCH_SCOPE, // The scope introduced by catch.
- BLOCK_SCOPE, // The scope introduced by a new block.
- WITH_SCOPE // The scope introduced by with.
-};
-
-
-const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
-const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
-const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
-
-const uint64_t kHoleNanInt64 =
- (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
-const uint64_t kLastNonNaNInt64 =
- (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
-
-
-// The order of this enum has to be kept in sync with the predicates below.
-enum VariableMode {
- // User declared variables:
- VAR, // declared via 'var', and 'function' declarations
-
- CONST, // declared via 'const' declarations
-
- LET, // declared via 'let' declarations (first lexical)
-
- CONST_HARMONY, // declared via 'const' declarations in harmony mode
-
- MODULE, // declared via 'module' declaration (last lexical)
-
- // Variables introduced by the compiler:
- INTERNAL, // like VAR, but not user-visible (may or may not
- // be in a context)
-
- TEMPORARY, // temporary variables (not user-visible), never
- // in a context
-
- DYNAMIC, // always require dynamic lookup (we don't know
- // the declaration)
-
- DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
- // variable is global unless it has been shadowed
- // by an eval-introduced variable
-
- DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
- // variable is local and where it is unless it
- // has been shadowed by an eval-introduced
- // variable
-};
-
-
-inline bool IsDynamicVariableMode(VariableMode mode) {
- return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
-}
-
-
-inline bool IsDeclaredVariableMode(VariableMode mode) {
- return mode >= VAR && mode <= MODULE;
-}
-
-
-inline bool IsLexicalVariableMode(VariableMode mode) {
- return mode >= LET && mode <= MODULE;
-}
-
-
-inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || (mode >= CONST_HARMONY && mode <= MODULE);
-}
-
-
-// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
-// and immutable bindings that can be in two states: initialized and
-// uninitialized. In ES5 only immutable bindings have these two states. When
-// accessing a binding, it needs to be checked for initialization. However in
-// the following cases the binding is initialized immediately after creation
-// so the initialization check can always be skipped:
-// 1. Var declared local variables.
-// var foo;
-// 2. A local variable introduced by a function declaration.
-// function foo() {}
-// 3. Parameters
-// function x(foo) {}
-// 4. Catch bound variables.
-// try {} catch (foo) {}
-// 6. Function variables of named function expressions.
-// var x = function foo() {}
-// 7. Implicit binding of 'this'.
-// 8. Implicit binding of 'arguments' in functions.
-//
-// ES5 specified object environment records which are introduced by ES elements
-// such as Program and WithStatement that associate identifier bindings with the
-// properties of some object. In the specification only mutable bindings exist
-// (which may be non-writable) and have no distinct initialization step. However
-// V8 allows const declarations in global code with distinct creation and
-// initialization steps which are represented by non-writable properties in the
-// global object. As a result also these bindings need to be checked for
-// initialization.
-//
-// The following enum specifies a flag that indicates if the binding needs a
-// distinct initialization step (kNeedsInitialization) or if the binding is
-// immediately initialized upon creation (kCreatedInitialized).
-enum InitializationFlag {
- kNeedsInitialization,
- kCreatedInitialized
-};
-
-
-enum ClearExceptionFlag {
- KEEP_EXCEPTION,
- CLEAR_EXCEPTION
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_V8GLOBALS_H_
diff --git a/src/3rdparty/v8/src/v8memory.h b/src/3rdparty/v8/src/v8memory.h
deleted file mode 100644
index f71de82..0000000
--- a/src/3rdparty/v8/src/v8memory.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MEMORY_H_
-#define V8_MEMORY_H_
-
-namespace v8 {
-namespace internal {
-
-// Memory provides an interface to 'raw' memory. It encapsulates the casts
-// that typically are needed when incompatible pointer types are used.
-
-class Memory {
- public:
- static uint8_t& uint8_at(Address addr) {
- return *reinterpret_cast<uint8_t*>(addr);
- }
-
- static uint16_t& uint16_at(Address addr) {
- return *reinterpret_cast<uint16_t*>(addr);
- }
-
- static uint32_t& uint32_at(Address addr) {
- return *reinterpret_cast<uint32_t*>(addr);
- }
-
- static int32_t& int32_at(Address addr) {
- return *reinterpret_cast<int32_t*>(addr);
- }
-
- static uint64_t& uint64_at(Address addr) {
- return *reinterpret_cast<uint64_t*>(addr);
- }
-
- static int& int_at(Address addr) {
- return *reinterpret_cast<int*>(addr);
- }
-
- static unsigned& unsigned_at(Address addr) {
- return *reinterpret_cast<unsigned*>(addr);
- }
-
- static double& double_at(Address addr) {
- return *reinterpret_cast<double*>(addr);
- }
-
- static Address& Address_at(Address addr) {
- return *reinterpret_cast<Address*>(addr);
- }
-
- static Object*& Object_at(Address addr) {
- return *reinterpret_cast<Object**>(addr);
- }
-
- static Handle<Object>& Object_Handle_at(Address addr) {
- return *reinterpret_cast<Handle<Object>*>(addr);
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_MEMORY_H_
diff --git a/src/3rdparty/v8/src/v8natives.js b/src/3rdparty/v8/src/v8natives.js
deleted file mode 100644
index 356ce88..0000000
--- a/src/3rdparty/v8/src/v8natives.js
+++ /dev/null
@@ -1,1732 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file relies on the fact that the following declarations have been made
-//
-// in runtime.js:
-// var $Object = global.Object;
-// var $Boolean = global.Boolean;
-// var $Number = global.Number;
-// var $Function = global.Function;
-// var $Array = global.Array;
-// var $NaN = 0/0;
-//
-// in math.js:
-// var $floor = MathFloor
-
-var $isNaN = GlobalIsNaN;
-var $isFinite = GlobalIsFinite;
-
-// ----------------------------------------------------------------------------
-
-
-// Helper function used to install functions on objects.
-function InstallFunctions(object, attributes, functions) {
- if (functions.length >= 8) {
- %OptimizeObjectForAddingMultipleProperties(object, functions.length >> 1);
- }
- for (var i = 0; i < functions.length; i += 2) {
- var key = functions[i];
- var f = functions[i + 1];
- %FunctionSetName(f, key);
- %FunctionRemovePrototype(f);
- %SetProperty(object, key, f, attributes);
- %SetNativeFlag(f);
- }
- %ToFastProperties(object);
-}
-
-
-// Helper function to install a getter only property.
-function InstallGetter(object, name, getter) {
- %FunctionSetName(getter, name);
- %FunctionRemovePrototype(getter);
- %DefineOrRedefineAccessorProperty(object, name, getter, null, DONT_ENUM);
- %SetNativeFlag(getter);
-}
-
-
-// Prevents changes to the prototype of a built-in function.
-// The "prototype" property of the function object is made non-configurable,
-// and the prototype object is made non-extensible. The latter prevents
-// changing the __proto__ property.
-function SetUpLockedPrototype(constructor, fields, methods) {
- %CheckIsBootstrapping();
- var prototype = constructor.prototype;
- // Install functions first, because this function is used to initialize
- // PropertyDescriptor itself.
- var property_count = (methods.length >> 1) + (fields ? fields.length : 0);
- if (property_count >= 4) {
- %OptimizeObjectForAddingMultipleProperties(prototype, property_count);
- }
- if (fields) {
- for (var i = 0; i < fields.length; i++) {
- %SetProperty(prototype, fields[i], void 0, DONT_ENUM | DONT_DELETE);
- }
- }
- for (var i = 0; i < methods.length; i += 2) {
- var key = methods[i];
- var f = methods[i + 1];
- %SetProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetNativeFlag(f);
- }
- prototype.__proto__ = null;
- %ToFastProperties(prototype);
-}
-
-
-// ----------------------------------------------------------------------------
-
-
-// ECMA 262 - 15.1.4
-function GlobalIsNaN(number) {
- if (!IS_NUMBER(number)) number = NonNumberToNumber(number);
- return NUMBER_IS_NAN(number);
-}
-
-
-// ECMA 262 - 15.1.5
-function GlobalIsFinite(number) {
- if (!IS_NUMBER(number)) number = NonNumberToNumber(number);
- return NUMBER_IS_FINITE(number);
-}
-
-
-// ECMA-262 - 15.1.2.2
-function GlobalParseInt(string, radix) {
- if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
- // Some people use parseInt instead of Math.floor. This
- // optimization makes parseInt on a Smi 12 times faster (60ns
- // vs 800ns). The following optimization makes parseInt on a
- // non-Smi number 9 times faster (230ns vs 2070ns). Together
- // they make parseInt on a string 1.4% slower (274ns vs 270ns).
- if (%_IsSmi(string)) return string;
- if (IS_NUMBER(string) &&
- ((0.01 < string && string < 1e9) ||
- (-1e9 < string && string < -0.01))) {
- // Truncate number.
- return string | 0;
- }
- string = TO_STRING_INLINE(string);
- radix = radix | 0;
- } else {
- // The spec says ToString should be evaluated before ToInt32.
- string = TO_STRING_INLINE(string);
- radix = TO_INT32(radix);
- if (!(radix == 0 || (2 <= radix && radix <= 36))) {
- return $NaN;
- }
- }
-
- if (%_HasCachedArrayIndex(string) &&
- (radix == 0 || radix == 10)) {
- return %_GetCachedArrayIndex(string);
- }
- return %StringParseInt(string, radix);
-}
-
-
-// ECMA-262 - 15.1.2.3
-function GlobalParseFloat(string) {
- string = TO_STRING_INLINE(string);
- if (%_HasCachedArrayIndex(string)) return %_GetCachedArrayIndex(string);
- return %StringParseFloat(string);
-}
-
-
-function GlobalEval(x) {
- if (!IS_STRING(x)) return x;
-
- var global_receiver = %GlobalReceiver(global);
- var global_is_detached = (global === global_receiver);
-
- // For consistency with JSC we require the global object passed to
- // eval to be the global object from which 'eval' originated. This
- // is not mandated by the spec.
- // We only throw if the global has been detached, since we need the
- // receiver as this-value for the call.
- if (global_is_detached) {
- throw new $EvalError('The "this" value passed to eval must ' +
- 'be the global object from which eval originated');
- }
-
- var f = %CompileString(x);
- if (!IS_FUNCTION(f)) return f;
-
- return %_CallFunction(global_receiver, f);
-}
-
-
-// ----------------------------------------------------------------------------
-
-// Set up global object.
-function SetUpGlobal() {
- %CheckIsBootstrapping();
- // ECMA 262 - 15.1.1.1.
- %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 - 15.1.1.2.
- %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 - 15.1.1.3.
- %SetProperty(global, "undefined", void 0,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // Set up non-enumerable function on the global object.
- InstallFunctions(global, DONT_ENUM, $Array(
- "isNaN", GlobalIsNaN,
- "isFinite", GlobalIsFinite,
- "parseInt", GlobalParseInt,
- "parseFloat", GlobalParseFloat,
- "eval", GlobalEval
- ));
-}
-
-SetUpGlobal();
-
-// ----------------------------------------------------------------------------
-// Boolean (first part of definition)
-
-
-%SetCode($Boolean, function(x) {
- if (%_IsConstructCall()) {
- %_SetValueOf(this, ToBoolean(x));
- } else {
- return ToBoolean(x);
- }
-});
-
-%FunctionSetPrototype($Boolean, new $Boolean(false));
-
-%SetProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
-
-// ----------------------------------------------------------------------------
-// Object
-
-$Object.prototype.constructor = $Object;
-
-// ECMA-262 - 15.2.4.2
-function ObjectToString() {
- if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) return "[object Undefined]";
- if (IS_NULL(this)) return "[object Null]";
- if (IS_SYMBOL(this)) return "[object Symbol]";
- return "[object " + %_ClassOf(ToObject(this)) + "]";
-}
-
-
-// ECMA-262 - 15.2.4.3
-function ObjectToLocaleString() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Object.prototype.toLocaleString"]);
- }
- return this.toString();
-}
-
-
-// ECMA-262 - 15.2.4.4
-function ObjectValueOf() {
- return ToObject(this);
-}
-
-
-// ECMA-262 - 15.2.4.5
-function ObjectHasOwnProperty(V) {
- if (%IsJSProxy(this)) {
- var handler = %GetHandler(this);
- return CallTrap1(handler, "hasOwn", DerivedHasOwnTrap, TO_STRING_INLINE(V));
- }
- return %HasLocalProperty(TO_OBJECT_INLINE(this), TO_STRING_INLINE(V));
-}
-
-
-// ECMA-262 - 15.2.4.6
-function ObjectIsPrototypeOf(V) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Object.prototype.isPrototypeOf"]);
- }
- if (!IS_SPEC_OBJECT(V)) return false;
- return %IsInPrototypeChain(this, V);
-}
-
-
-// ECMA-262 - 15.2.4.6
-function ObjectPropertyIsEnumerable(V) {
- var P = ToString(V);
- if (%IsJSProxy(this)) {
- var desc = GetOwnProperty(this, P);
- return IS_UNDEFINED(desc) ? false : desc.isEnumerable();
- }
- return %IsPropertyEnumerable(ToObject(this), P);
-}
-
-
-// Extensions for providing property getters and setters.
-function ObjectDefineGetter(name, fun) {
- var receiver = this;
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
- receiver = %GlobalReceiver(global);
- }
- if (!IS_SPEC_FUNCTION(fun)) {
- throw new $TypeError(
- 'Object.prototype.__defineGetter__: Expecting function');
- }
- var desc = new PropertyDescriptor();
- desc.setGet(fun);
- desc.setEnumerable(true);
- desc.setConfigurable(true);
- DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
-}
-
-
-function ObjectLookupGetter(name) {
- var receiver = this;
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
- receiver = %GlobalReceiver(global);
- }
- return %LookupAccessor(ToObject(receiver), ToString(name), GETTER);
-}
-
-
-function ObjectDefineSetter(name, fun) {
- var receiver = this;
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
- receiver = %GlobalReceiver(global);
- }
- if (!IS_SPEC_FUNCTION(fun)) {
- throw new $TypeError(
- 'Object.prototype.__defineSetter__: Expecting function');
- }
- var desc = new PropertyDescriptor();
- desc.setSet(fun);
- desc.setEnumerable(true);
- desc.setConfigurable(true);
- DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
-}
-
-
-function ObjectLookupSetter(name) {
- var receiver = this;
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
- receiver = %GlobalReceiver(global);
- }
- return %LookupAccessor(ToObject(receiver), ToString(name), SETTER);
-}
-
-
-function ObjectKeys(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.keys"]);
- }
- if (%IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "keys", DerivedKeysTrap);
- return ToStringArray(names, "keys");
- }
- return %LocalKeys(obj);
-}
-
-
-// ES5 8.10.1.
-function IsAccessorDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return desc.hasGetter() || desc.hasSetter();
-}
-
-
-// ES5 8.10.2.
-function IsDataDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return desc.hasValue() || desc.hasWritable();
-}
-
-
-// ES5 8.10.3.
-function IsGenericDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
-}
-
-
-function IsInconsistentDescriptor(desc) {
- return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
-}
-
-
-// ES5 8.10.4
-function FromPropertyDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return desc;
-
- if (IsDataDescriptor(desc)) {
- return { value: desc.getValue(),
- writable: desc.isWritable(),
- enumerable: desc.isEnumerable(),
- configurable: desc.isConfigurable() };
- }
- // Must be an AccessorDescriptor then. We never return a generic descriptor.
- return { get: desc.getGet(),
- set: desc.getSet(),
- enumerable: desc.isEnumerable(),
- configurable: desc.isConfigurable() };
-}
-
-
-// Harmony Proxies
-function FromGenericPropertyDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return desc;
- var obj = new $Object();
-
- if (desc.hasValue()) {
- %IgnoreAttributesAndSetProperty(obj, "value", desc.getValue(), NONE);
- }
- if (desc.hasWritable()) {
- %IgnoreAttributesAndSetProperty(obj, "writable", desc.isWritable(), NONE);
- }
- if (desc.hasGetter()) {
- %IgnoreAttributesAndSetProperty(obj, "get", desc.getGet(), NONE);
- }
- if (desc.hasSetter()) {
- %IgnoreAttributesAndSetProperty(obj, "set", desc.getSet(), NONE);
- }
- if (desc.hasEnumerable()) {
- %IgnoreAttributesAndSetProperty(obj, "enumerable",
- desc.isEnumerable(), NONE);
- }
- if (desc.hasConfigurable()) {
- %IgnoreAttributesAndSetProperty(obj, "configurable",
- desc.isConfigurable(), NONE);
- }
- return obj;
-}
-
-
-// ES5 8.10.5.
-function ToPropertyDescriptor(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("property_desc_object", [obj]);
- }
- var desc = new PropertyDescriptor();
-
- if ("enumerable" in obj) {
- desc.setEnumerable(ToBoolean(obj.enumerable));
- }
-
- if ("configurable" in obj) {
- desc.setConfigurable(ToBoolean(obj.configurable));
- }
-
- if ("value" in obj) {
- desc.setValue(obj.value);
- }
-
- if ("writable" in obj) {
- desc.setWritable(ToBoolean(obj.writable));
- }
-
- if ("get" in obj) {
- var get = obj.get;
- if (!IS_UNDEFINED(get) && !IS_SPEC_FUNCTION(get)) {
- throw MakeTypeError("getter_must_be_callable", [get]);
- }
- desc.setGet(get);
- }
-
- if ("set" in obj) {
- var set = obj.set;
- if (!IS_UNDEFINED(set) && !IS_SPEC_FUNCTION(set)) {
- throw MakeTypeError("setter_must_be_callable", [set]);
- }
- desc.setSet(set);
- }
-
- if (IsInconsistentDescriptor(desc)) {
- throw MakeTypeError("value_and_accessor", [obj]);
- }
- return desc;
-}
-
-
-// For Harmony proxies.
-function ToCompletePropertyDescriptor(obj) {
- var desc = ToPropertyDescriptor(obj);
- if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
- if (!desc.hasValue()) desc.setValue(void 0);
- if (!desc.hasWritable()) desc.setWritable(false);
- } else {
- // Is accessor descriptor.
- if (!desc.hasGetter()) desc.setGet(void 0);
- if (!desc.hasSetter()) desc.setSet(void 0);
- }
- if (!desc.hasEnumerable()) desc.setEnumerable(false);
- if (!desc.hasConfigurable()) desc.setConfigurable(false);
- return desc;
-}
-
-
-function PropertyDescriptor() {
- // Initialize here so they are all in-object and have the same map.
- // Default values from ES5 8.6.1.
- this.value_ = void 0;
- this.hasValue_ = false;
- this.writable_ = false;
- this.hasWritable_ = false;
- this.enumerable_ = false;
- this.hasEnumerable_ = false;
- this.configurable_ = false;
- this.hasConfigurable_ = false;
- this.get_ = void 0;
- this.hasGetter_ = false;
- this.set_ = void 0;
- this.hasSetter_ = false;
-}
-
-SetUpLockedPrototype(PropertyDescriptor, $Array(
- "value_",
- "hasValue_",
- "writable_",
- "hasWritable_",
- "enumerable_",
- "hasEnumerable_",
- "configurable_",
- "hasConfigurable_",
- "get_",
- "hasGetter_",
- "set_",
- "hasSetter_"
- ), $Array(
- "toString", function() {
- return "[object PropertyDescriptor]";
- },
- "setValue", function(value) {
- this.value_ = value;
- this.hasValue_ = true;
- },
- "getValue", function() {
- return this.value_;
- },
- "hasValue", function() {
- return this.hasValue_;
- },
- "setEnumerable", function(enumerable) {
- this.enumerable_ = enumerable;
- this.hasEnumerable_ = true;
- },
- "isEnumerable", function () {
- return this.enumerable_;
- },
- "hasEnumerable", function() {
- return this.hasEnumerable_;
- },
- "setWritable", function(writable) {
- this.writable_ = writable;
- this.hasWritable_ = true;
- },
- "isWritable", function() {
- return this.writable_;
- },
- "hasWritable", function() {
- return this.hasWritable_;
- },
- "setConfigurable", function(configurable) {
- this.configurable_ = configurable;
- this.hasConfigurable_ = true;
- },
- "hasConfigurable", function() {
- return this.hasConfigurable_;
- },
- "isConfigurable", function() {
- return this.configurable_;
- },
- "setGet", function(get) {
- this.get_ = get;
- this.hasGetter_ = true;
- },
- "getGet", function() {
- return this.get_;
- },
- "hasGetter", function() {
- return this.hasGetter_;
- },
- "setSet", function(set) {
- this.set_ = set;
- this.hasSetter_ = true;
- },
- "getSet", function() {
- return this.set_;
- },
- "hasSetter", function() {
- return this.hasSetter_;
- }));
-
-
-// Converts an array returned from Runtime_GetOwnProperty to an actual
-// property descriptor. For a description of the array layout please
-// see the runtime.cc file.
-function ConvertDescriptorArrayToDescriptor(desc_array) {
- if (desc_array === false) {
- throw 'Internal error: invalid desc_array';
- }
-
- if (IS_UNDEFINED(desc_array)) {
- return void 0;
- }
-
- var desc = new PropertyDescriptor();
- // This is an accessor.
- if (desc_array[IS_ACCESSOR_INDEX]) {
- desc.setGet(desc_array[GETTER_INDEX]);
- desc.setSet(desc_array[SETTER_INDEX]);
- } else {
- desc.setValue(desc_array[VALUE_INDEX]);
- desc.setWritable(desc_array[WRITABLE_INDEX]);
- }
- desc.setEnumerable(desc_array[ENUMERABLE_INDEX]);
- desc.setConfigurable(desc_array[CONFIGURABLE_INDEX]);
-
- return desc;
-}
-
-
-// For Harmony proxies.
-function GetTrap(handler, name, defaultTrap) {
- var trap = handler[name];
- if (IS_UNDEFINED(trap)) {
- if (IS_UNDEFINED(defaultTrap)) {
- throw MakeTypeError("handler_trap_missing", [handler, name]);
- }
- trap = defaultTrap;
- } else if (!IS_SPEC_FUNCTION(trap)) {
- throw MakeTypeError("handler_trap_must_be_callable", [handler, name]);
- }
- return trap;
-}
-
-
-function CallTrap0(handler, name, defaultTrap) {
- return %_CallFunction(handler, GetTrap(handler, name, defaultTrap));
-}
-
-
-function CallTrap1(handler, name, defaultTrap, x) {
- return %_CallFunction(handler, x, GetTrap(handler, name, defaultTrap));
-}
-
-
-function CallTrap2(handler, name, defaultTrap, x, y) {
- return %_CallFunction(handler, x, y, GetTrap(handler, name, defaultTrap));
-}
-
-
-// ES5 section 8.12.1.
-function GetOwnProperty(obj, v) {
- var p = ToString(v);
- if (%IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- var descriptor = CallTrap1(handler, "getOwnPropertyDescriptor", void 0, p);
- if (IS_UNDEFINED(descriptor)) return descriptor;
- var desc = ToCompletePropertyDescriptor(descriptor);
- if (!desc.isConfigurable()) {
- throw MakeTypeError("proxy_prop_not_configurable",
- [handler, "getOwnPropertyDescriptor", p, descriptor]);
- }
- return desc;
- }
-
- // GetOwnProperty returns an array indexed by the constants
- // defined in macros.py.
- // If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(ToObject(obj), ToString(v));
-
- // A false value here means that access checks failed.
- if (props === false) return void 0;
-
- return ConvertDescriptorArrayToDescriptor(props);
-}
-
-
-// ES5 section 8.12.7.
-function Delete(obj, p, should_throw) {
- var desc = GetOwnProperty(obj, p);
- if (IS_UNDEFINED(desc)) return true;
- if (desc.isConfigurable()) {
- %DeleteProperty(obj, p, 0);
- return true;
- } else if (should_throw) {
- throw MakeTypeError("define_disallowed", [p]);
- } else {
- return;
- }
-}
-
-
-// Harmony proxies.
-function DefineProxyProperty(obj, p, attributes, should_throw) {
- var handler = %GetHandler(obj);
- var result = CallTrap2(handler, "defineProperty", void 0, p, attributes);
- if (!ToBoolean(result)) {
- if (should_throw) {
- throw MakeTypeError("handler_returned_false",
- [handler, "defineProperty"]);
- } else {
- return false;
- }
- }
- return true;
-}
-
-
-// ES5 8.12.9.
-function DefineObjectProperty(obj, p, desc, should_throw) {
- var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
- // A false value here means that access checks failed.
- if (current_or_access === false) return void 0;
-
- var current = ConvertDescriptorArrayToDescriptor(current_or_access);
- var extensible = %IsExtensible(ToObject(obj));
-
- // Error handling according to spec.
- // Step 3
- if (IS_UNDEFINED(current) && !extensible) {
- if (should_throw) {
- throw MakeTypeError("define_disallowed", [p]);
- } else {
- return false;
- }
- }
-
- if (!IS_UNDEFINED(current)) {
- // Step 5 and 6
- if ((IsGenericDescriptor(desc) ||
- IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
- (!desc.hasEnumerable() ||
- SameValue(desc.isEnumerable(), current.isEnumerable())) &&
- (!desc.hasConfigurable() ||
- SameValue(desc.isConfigurable(), current.isConfigurable())) &&
- (!desc.hasWritable() ||
- SameValue(desc.isWritable(), current.isWritable())) &&
- (!desc.hasValue() ||
- SameValue(desc.getValue(), current.getValue())) &&
- (!desc.hasGetter() ||
- SameValue(desc.getGet(), current.getGet())) &&
- (!desc.hasSetter() ||
- SameValue(desc.getSet(), current.getSet()))) {
- return true;
- }
- if (!current.isConfigurable()) {
- // Step 7
- if (desc.isConfigurable() ||
- (desc.hasEnumerable() &&
- desc.isEnumerable() != current.isEnumerable())) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", [p]);
- } else {
- return false;
- }
- }
- // Step 8
- if (!IsGenericDescriptor(desc)) {
- // Step 9a
- if (IsDataDescriptor(current) != IsDataDescriptor(desc)) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", [p]);
- } else {
- return false;
- }
- }
- // Step 10a
- if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
- if (!current.isWritable() && desc.isWritable()) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", [p]);
- } else {
- return false;
- }
- }
- if (!current.isWritable() && desc.hasValue() &&
- !SameValue(desc.getValue(), current.getValue())) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", [p]);
- } else {
- return false;
- }
- }
- }
- // Step 11
- if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
- if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", [p]);
- } else {
- return false;
- }
- }
- if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", [p]);
- } else {
- return false;
- }
- }
- }
- }
- }
- }
-
- // Send flags - enumerable and configurable are common - writable is
- // only send to the data descriptor.
- // Take special care if enumerable and configurable is not defined on
- // desc (we need to preserve the existing values from current).
- var flag = NONE;
- if (desc.hasEnumerable()) {
- flag |= desc.isEnumerable() ? 0 : DONT_ENUM;
- } else if (!IS_UNDEFINED(current)) {
- flag |= current.isEnumerable() ? 0 : DONT_ENUM;
- } else {
- flag |= DONT_ENUM;
- }
-
- if (desc.hasConfigurable()) {
- flag |= desc.isConfigurable() ? 0 : DONT_DELETE;
- } else if (!IS_UNDEFINED(current)) {
- flag |= current.isConfigurable() ? 0 : DONT_DELETE;
- } else
- flag |= DONT_DELETE;
-
- if (IsDataDescriptor(desc) ||
- (IsGenericDescriptor(desc) &&
- (IS_UNDEFINED(current) || IsDataDescriptor(current)))) {
- // There are 3 cases that lead here:
- // Step 4a - defining a new data property.
- // Steps 9b & 12 - replacing an existing accessor property with a data
- // property.
- // Step 12 - updating an existing data property with a data or generic
- // descriptor.
-
- if (desc.hasWritable()) {
- flag |= desc.isWritable() ? 0 : READ_ONLY;
- } else if (!IS_UNDEFINED(current)) {
- flag |= current.isWritable() ? 0 : READ_ONLY;
- } else {
- flag |= READ_ONLY;
- }
-
- var value = void 0; // Default value is undefined.
- if (desc.hasValue()) {
- value = desc.getValue();
- } else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
- value = current.getValue();
- }
-
- %DefineOrRedefineDataProperty(obj, p, value, flag);
- } else {
- // There are 3 cases that lead here:
- // Step 4b - defining a new accessor property.
- // Steps 9c & 12 - replacing an existing data property with an accessor
- // property.
- // Step 12 - updating an existing accessor property with an accessor
- // descriptor.
- var getter = desc.hasGetter() ? desc.getGet() : null;
- var setter = desc.hasSetter() ? desc.getSet() : null;
- %DefineOrRedefineAccessorProperty(obj, p, getter, setter, flag);
- }
- return true;
-}
-
-
-// ES5 section 15.4.5.1.
-function DefineArrayProperty(obj, p, desc, should_throw) {
- // Note that the length of an array is not actually stored as part of the
- // property, hence we use generated code throughout this function instead of
- // DefineObjectProperty() to modify its value.
-
- // Step 3 - Special handling for length property.
- if (p == "length") {
- var length = obj.length;
- if (!desc.hasValue()) {
- return DefineObjectProperty(obj, "length", desc, should_throw);
- }
- var new_length = ToUint32(desc.getValue());
- if (new_length != ToNumber(desc.getValue())) {
- throw new $RangeError('defineProperty() array length out of range');
- }
- var length_desc = GetOwnProperty(obj, "length");
- if (new_length != length && !length_desc.isWritable()) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", [p]);
- } else {
- return false;
- }
- }
- var threw = false;
- while (new_length < length--) {
- if (!Delete(obj, ToString(length), false)) {
- new_length = length + 1;
- threw = true;
- break;
- }
- }
- // Make sure the below call to DefineObjectProperty() doesn't overwrite
- // any magic "length" property by removing the value.
- // TODO(mstarzinger): This hack should be removed once we have addressed the
- // respective TODO in Runtime_DefineOrRedefineDataProperty.
- // For the time being, we need a hack to prevent Object.observe from
- // generating two change records.
- var isObserved = %IsObserved(obj);
- if (isObserved) %SetIsObserved(obj, false);
- obj.length = new_length;
- desc.value_ = void 0;
- desc.hasValue_ = false;
- threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw;
- if (isObserved) %SetIsObserved(obj, true);
- if (threw) {
- if (should_throw) {
- throw MakeTypeError("redefine_disallowed", [p]);
- } else {
- return false;
- }
- }
- if (isObserved) {
- var new_desc = GetOwnProperty(obj, "length");
- var updated = length_desc.value_ !== new_desc.value_;
- var reconfigured = length_desc.writable_ !== new_desc.writable_ ||
- length_desc.configurable_ !== new_desc.configurable_ ||
- length_desc.enumerable_ !== new_desc.configurable_;
- if (updated || reconfigured) {
- NotifyChange(reconfigured ? "reconfigured" : "updated",
- obj, "length", length_desc.value_);
- }
- }
- return true;
- }
-
- // Step 4 - Special handling for array index.
- var index = ToUint32(p);
- if (index == ToNumber(p) && index != 4294967295) {
- var length = obj.length;
- var length_desc = GetOwnProperty(obj, "length");
- if ((index >= length && !length_desc.isWritable()) ||
- !DefineObjectProperty(obj, p, desc, true)) {
- if (should_throw) {
- throw MakeTypeError("define_disallowed", [p]);
- } else {
- return false;
- }
- }
- if (index >= length) {
- obj.length = index + 1;
- }
- return true;
- }
-
- // Step 5 - Fallback to default implementation.
- return DefineObjectProperty(obj, p, desc, should_throw);
-}
-
-
-// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
-function DefineOwnProperty(obj, p, desc, should_throw) {
- if (%IsJSProxy(obj)) {
- var attributes = FromGenericPropertyDescriptor(desc);
- return DefineProxyProperty(obj, p, attributes, should_throw);
- } else if (IS_ARRAY(obj)) {
- return DefineArrayProperty(obj, p, desc, should_throw);
- } else {
- return DefineObjectProperty(obj, p, desc, should_throw);
- }
-}
-
-
-// ES5 section 15.2.3.2.
-function ObjectGetPrototypeOf(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.getPrototypeOf"]);
- }
- return %GetPrototype(obj);
-}
-
-
-// ES5 section 15.2.3.3
-function ObjectGetOwnPropertyDescriptor(obj, p) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object",
- ["Object.getOwnPropertyDescriptor"]);
- }
- var desc = GetOwnProperty(obj, p);
- return FromPropertyDescriptor(desc);
-}
-
-
-// For Harmony proxies
-function ToStringArray(obj, trap) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("proxy_non_object_prop_names", [obj, trap]);
- }
- var n = ToUint32(obj.length);
- var array = new $Array(n);
- var names = { __proto__: null }; // TODO(rossberg): use sets once ready.
- for (var index = 0; index < n; index++) {
- var s = ToString(obj[index]);
- if (%HasLocalProperty(names, s)) {
- throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]);
- }
- array[index] = s;
- names[s] = 0;
- }
- return array;
-}
-
-
-// ES5 section 15.2.3.4.
-function ObjectGetOwnPropertyNames(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.getOwnPropertyNames"]);
- }
- // Special handling for proxies.
- if (%IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "getOwnPropertyNames", void 0);
- return ToStringArray(names, "getOwnPropertyNames");
- }
-
- // Find all the indexed properties.
-
- // Get the local element names.
- var propertyNames = %GetLocalElementNames(obj);
- for (var i = 0; i < propertyNames.length; ++i) {
- propertyNames[i] = %_NumberToString(propertyNames[i]);
- }
-
- // Get names for indexed interceptor properties.
- var interceptorInfo = %GetInterceptorInfo(obj);
- if ((interceptorInfo & 1) != 0) {
- var indexedInterceptorNames =
- %GetIndexedInterceptorElementNames(obj);
- if (indexedInterceptorNames) {
- propertyNames = propertyNames.concat(indexedInterceptorNames);
- }
- }
-
- // Find all the named properties.
-
- // Get the local property names.
- propertyNames = propertyNames.concat(%GetLocalPropertyNames(obj));
-
- // Get names for named interceptor properties if any.
- if ((interceptorInfo & 2) != 0) {
- var namedInterceptorNames =
- %GetNamedInterceptorPropertyNames(obj);
- if (namedInterceptorNames) {
- propertyNames = propertyNames.concat(namedInterceptorNames);
- }
- }
-
- // Property names are expected to be unique strings,
- // but interceptors can interfere with that assumption.
- if (interceptorInfo != 0) {
- var propertySet = { __proto__: null };
- var j = 0;
- for (var i = 0; i < propertyNames.length; ++i) {
- var name = ToString(propertyNames[i]);
- // We need to check for the exact property value since for intrinsic
- // properties like toString if(propertySet["toString"]) will always
- // succeed.
- if (propertySet[name] === true) {
- continue;
- }
- propertySet[name] = true;
- propertyNames[j++] = name;
- }
- propertyNames.length = j;
- }
-
- return propertyNames;
-}
-
-
-// ES5 section 15.2.3.5.
-function ObjectCreate(proto, properties) {
- if (!IS_SPEC_OBJECT(proto) && proto !== null) {
- throw MakeTypeError("proto_object_or_null", [proto]);
- }
- var obj = new $Object();
- obj.__proto__ = proto;
- if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
- return obj;
-}
-
-
-// ES5 section 15.2.3.6.
-function ObjectDefineProperty(obj, p, attributes) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.defineProperty"]);
- }
- var name = ToString(p);
- if (%IsJSProxy(obj)) {
- // Clone the attributes object for protection.
- // TODO(rossberg): not spec'ed yet, so not sure if this should involve
- // non-own properties as it does (or non-enumerable ones, as it doesn't?).
- var attributesClone = { __proto__: null };
- for (var a in attributes) {
- attributesClone[a] = attributes[a];
- }
- DefineProxyProperty(obj, name, attributesClone, true);
- // The following would implement the spec as in the current proposal,
- // but after recent comments on es-discuss, is most likely obsolete.
- /*
- var defineObj = FromGenericPropertyDescriptor(desc);
- var names = ObjectGetOwnPropertyNames(attributes);
- var standardNames =
- {value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0};
- for (var i = 0; i < names.length; i++) {
- var N = names[i];
- if (!(%HasLocalProperty(standardNames, N))) {
- var attr = GetOwnProperty(attributes, N);
- DefineOwnProperty(descObj, N, attr, true);
- }
- }
- // This is really confusing the types, but it is what the proxies spec
- // currently requires:
- desc = descObj;
- */
- } else {
- var desc = ToPropertyDescriptor(attributes);
- DefineOwnProperty(obj, name, desc, true);
- }
- return obj;
-}
-
-
-function GetOwnEnumerablePropertyNames(properties) {
- var names = new InternalArray();
- for (var key in properties) {
- if (%HasLocalProperty(properties, key)) {
- names.push(key);
- }
- }
- return names;
-}
-
-
-// ES5 section 15.2.3.7.
-function ObjectDefineProperties(obj, properties) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.defineProperties"]);
- }
- var props = ToObject(properties);
- var names = GetOwnEnumerablePropertyNames(props);
- var descriptors = new InternalArray();
- for (var i = 0; i < names.length; i++) {
- descriptors.push(ToPropertyDescriptor(props[names[i]]));
- }
- for (var i = 0; i < names.length; i++) {
- DefineOwnProperty(obj, names[i], descriptors[i], true);
- }
- return obj;
-}
-
-
-// Harmony proxies.
-function ProxyFix(obj) {
- var handler = %GetHandler(obj);
- var props = CallTrap0(handler, "fix", void 0);
- if (IS_UNDEFINED(props)) {
- throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
- }
-
- if (%IsJSFunctionProxy(obj)) {
- var callTrap = %GetCallTrap(obj);
- var constructTrap = %GetConstructTrap(obj);
- var code = DelegateCallAndConstruct(callTrap, constructTrap);
- %Fix(obj); // becomes a regular function
- %SetCode(obj, code);
- // TODO(rossberg): What about length and other properties? Not specified.
- // We just put in some half-reasonable defaults for now.
- var prototype = new $Object();
- $Object.defineProperty(prototype, "constructor",
- {value: obj, writable: true, enumerable: false, configurable: true});
- // TODO(v8:1530): defineProperty does not handle prototype and length.
- %FunctionSetPrototype(obj, prototype);
- obj.length = 0;
- } else {
- %Fix(obj);
- }
- ObjectDefineProperties(obj, props);
-}
-
-
-// ES5 section 15.2.3.8.
-function ObjectSeal(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.seal"]);
- }
- if (%IsJSProxy(obj)) {
- ProxyFix(obj);
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (desc.isConfigurable()) {
- desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
- }
- }
- %PreventExtensions(obj);
- return obj;
-}
-
-
-// ES5 section 15.2.3.9.
-function ObjectFreeze(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
- }
- if (%IsJSProxy(obj)) {
- ProxyFix(obj);
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (desc.isWritable() || desc.isConfigurable()) {
- if (IsDataDescriptor(desc)) desc.setWritable(false);
- desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
- }
- }
- %PreventExtensions(obj);
- return obj;
-}
-
-
-// ES5 section 15.2.3.10
-function ObjectPreventExtension(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.preventExtension"]);
- }
- if (%IsJSProxy(obj)) {
- ProxyFix(obj);
- }
- %PreventExtensions(obj);
- return obj;
-}
-
-
-// ES5 section 15.2.3.11
-function ObjectIsSealed(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.isSealed"]);
- }
- if (%IsJSProxy(obj)) {
- return false;
- }
- if (%IsExtensible(obj)) {
- return false;
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (desc.isConfigurable()) return false;
- }
- return true;
-}
-
-
-// ES5 section 15.2.3.12
-function ObjectIsFrozen(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.isFrozen"]);
- }
- if (%IsJSProxy(obj)) {
- return false;
- }
- if (%IsExtensible(obj)) {
- return false;
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (IsDataDescriptor(desc) && desc.isWritable()) return false;
- if (desc.isConfigurable()) return false;
- }
- return true;
-}
-
-
-// ES5 section 15.2.3.13
-function ObjectIsExtensible(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.isExtensible"]);
- }
- if (%IsJSProxy(obj)) {
- return true;
- }
- return %IsExtensible(obj);
-}
-
-
-// Harmony egal.
-function ObjectIs(obj1, obj2) {
- if (obj1 === obj2) {
- return (obj1 !== 0) || (1 / obj1 === 1 / obj2);
- } else {
- return (obj1 !== obj1) && (obj2 !== obj2);
- }
-}
-
-
-%SetCode($Object, function(x) {
- if (%_IsConstructCall()) {
- if (x == null) return this;
- return ToObject(x);
- } else {
- if (x == null) return { };
- return ToObject(x);
- }
-});
-
-%SetExpectedNumberOfProperties($Object, 4);
-
-// ----------------------------------------------------------------------------
-// Object
-
-function SetUpObject() {
- %CheckIsBootstrapping();
- // Set Up non-enumerable functions on the Object.prototype object.
- InstallFunctions($Object.prototype, DONT_ENUM, $Array(
- "toString", ObjectToString,
- "toLocaleString", ObjectToLocaleString,
- "valueOf", ObjectValueOf,
- "hasOwnProperty", ObjectHasOwnProperty,
- "isPrototypeOf", ObjectIsPrototypeOf,
- "propertyIsEnumerable", ObjectPropertyIsEnumerable,
- "__defineGetter__", ObjectDefineGetter,
- "__lookupGetter__", ObjectLookupGetter,
- "__defineSetter__", ObjectDefineSetter,
- "__lookupSetter__", ObjectLookupSetter
- ));
- InstallFunctions($Object, DONT_ENUM, $Array(
- "keys", ObjectKeys,
- "create", ObjectCreate,
- "defineProperty", ObjectDefineProperty,
- "defineProperties", ObjectDefineProperties,
- "freeze", ObjectFreeze,
- "getPrototypeOf", ObjectGetPrototypeOf,
- "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
- "getOwnPropertyNames", ObjectGetOwnPropertyNames,
- "is", ObjectIs,
- "isExtensible", ObjectIsExtensible,
- "isFrozen", ObjectIsFrozen,
- "isSealed", ObjectIsSealed,
- "preventExtensions", ObjectPreventExtension,
- "seal", ObjectSeal
- ));
-}
-
-SetUpObject();
-
-// ----------------------------------------------------------------------------
-// Boolean
-
-function BooleanToString() {
- // NOTE: Both Boolean objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- var b = this;
- if (!IS_BOOLEAN(b)) {
- if (!IS_BOOLEAN_WRAPPER(b)) {
- throw new $TypeError('Boolean.prototype.toString is not generic');
- }
- b = %_ValueOf(b);
- }
- return b ? 'true' : 'false';
-}
-
-
-function BooleanValueOf() {
- // NOTE: Both Boolean objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this)) {
- throw new $TypeError('Boolean.prototype.valueOf is not generic');
- }
- return %_ValueOf(this);
-}
-
-
-// ----------------------------------------------------------------------------
-
-
-function SetUpBoolean () {
- %CheckIsBootstrapping();
- InstallFunctions($Boolean.prototype, DONT_ENUM, $Array(
- "toString", BooleanToString,
- "valueOf", BooleanValueOf
- ));
-}
-
-SetUpBoolean();
-
-
-// ----------------------------------------------------------------------------
-// Number
-
-// Set the Number function and constructor.
-%SetCode($Number, function(x) {
- var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
- if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
- } else {
- return value;
- }
-});
-
-%FunctionSetPrototype($Number, new $Number(0));
-
-// ECMA-262 section 15.7.4.2.
-function NumberToString(radix) {
- // NOTE: Both Number objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- var number = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw new $TypeError('Number.prototype.toString is not generic');
- }
- // Get the value of this number in case it's an object.
- number = %_ValueOf(this);
- }
- // Fast case: Convert number in radix 10.
- if (IS_UNDEFINED(radix) || radix === 10) {
- return %_NumberToString(number);
- }
-
- // Convert the radix to an integer and check the range.
- radix = TO_INTEGER(radix);
- if (radix < 2 || radix > 36) {
- throw new $RangeError('toString() radix argument must be between 2 and 36');
- }
- // Convert the number to a string in the given radix.
- return %NumberToRadixString(number, radix);
-}
-
-
-// ECMA-262 section 15.7.4.3
-function NumberToLocaleString() {
- return %_CallFunction(this, NumberToString);
-}
-
-
-// ECMA-262 section 15.7.4.4
-function NumberValueOf() {
- // NOTE: Both Number objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this)) {
- throw new $TypeError('Number.prototype.valueOf is not generic');
- }
- return %_ValueOf(this);
-}
-
-
-// ECMA-262 section 15.7.4.5
-function NumberToFixed(fractionDigits) {
- var x = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError("incompatible_method_receiver",
- ["Number.prototype.toFixed", this]);
- }
- // Get the value of this number in case it's an object.
- x = %_ValueOf(this);
- }
- var f = TO_INTEGER(fractionDigits);
-
- if (f < 0 || f > 20) {
- throw new $RangeError("toFixed() digits argument must be between 0 and 20");
- }
-
- if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
-
- return %NumberToFixed(x, f);
-}
-
-
-// ECMA-262 section 15.7.4.6
-function NumberToExponential(fractionDigits) {
- var x = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError("incompatible_method_receiver",
- ["Number.prototype.toExponential", this]);
- }
- // Get the value of this number in case it's an object.
- x = %_ValueOf(this);
- }
- var f = IS_UNDEFINED(fractionDigits) ? void 0 : TO_INTEGER(fractionDigits);
-
- if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
-
- if (IS_UNDEFINED(f)) {
- f = -1; // Signal for runtime function that f is not defined.
- } else if (f < 0 || f > 20) {
- throw new $RangeError("toExponential() argument must be between 0 and 20");
- }
- return %NumberToExponential(x, f);
-}
-
-
-// ECMA-262 section 15.7.4.7
-function NumberToPrecision(precision) {
- var x = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError("incompatible_method_receiver",
- ["Number.prototype.toPrecision", this]);
- }
- // Get the value of this number in case it's an object.
- x = %_ValueOf(this);
- }
- if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
- var p = TO_INTEGER(precision);
-
- if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
-
- if (p < 1 || p > 21) {
- throw new $RangeError("toPrecision() argument must be between 1 and 21");
- }
- return %NumberToPrecision(x, p);
-}
-
-
-// Harmony isFinite.
-function NumberIsFinite(number) {
- return IS_NUMBER(number) && NUMBER_IS_FINITE(number);
-}
-
-
-// Harmony isNaN.
-function NumberIsNaN(number) {
- return IS_NUMBER(number) && NUMBER_IS_NAN(number);
-}
-
-
-// ----------------------------------------------------------------------------
-
-function SetUpNumber() {
- %CheckIsBootstrapping();
- %OptimizeObjectForAddingMultipleProperties($Number.prototype, 8);
- // Set up the constructor property on the Number prototype object.
- %SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
-
- %OptimizeObjectForAddingMultipleProperties($Number, 5);
- // ECMA-262 section 15.7.3.1.
- %SetProperty($Number,
- "MAX_VALUE",
- 1.7976931348623157e+308,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.2.
- %SetProperty($Number, "MIN_VALUE", 5e-324,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.3.
- %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.4.
- %SetProperty($Number,
- "NEGATIVE_INFINITY",
- -1/0,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.5.
- %SetProperty($Number,
- "POSITIVE_INFINITY",
- 1/0,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %ToFastProperties($Number);
-
- // Set up non-enumerable functions on the Number prototype object.
- InstallFunctions($Number.prototype, DONT_ENUM, $Array(
- "toString", NumberToString,
- "toLocaleString", NumberToLocaleString,
- "valueOf", NumberValueOf,
- "toFixed", NumberToFixed,
- "toExponential", NumberToExponential,
- "toPrecision", NumberToPrecision
- ));
- InstallFunctions($Number, DONT_ENUM, $Array(
- "isFinite", NumberIsFinite,
- "isNaN", NumberIsNaN
- ));
-}
-
-SetUpNumber();
-
-
-// ----------------------------------------------------------------------------
-// Function
-
-$Function.prototype.constructor = $Function;
-
-function FunctionSourceString(func) {
- while (%IsJSFunctionProxy(func)) {
- func = %GetCallTrap(func);
- }
-
- if (!IS_FUNCTION(func)) {
- throw new $TypeError('Function.prototype.toString is not generic');
- }
-
- var source = %FunctionGetSourceCode(func);
- if (!IS_STRING(source) || %FunctionIsBuiltin(func)) {
- var name = %FunctionGetName(func);
- if (name) {
- // Mimic what KJS does.
- return 'function ' + name + '() { [native code] }';
- } else {
- return 'function () { [native code] }';
- }
- }
-
- var name = %FunctionNameShouldPrintAsAnonymous(func)
- ? 'anonymous'
- : %FunctionGetName(func);
- return 'function ' + name + source;
-}
-
-
-function FunctionToString() {
- return FunctionSourceString(this);
-}
-
-
-// ES5 15.3.4.5
-function FunctionBind(this_arg) { // Length is 1.
- if (!IS_SPEC_FUNCTION(this)) {
- throw new $TypeError('Bind must be called on a function');
- }
- var boundFunction = function () {
- // Poison .arguments and .caller, but is otherwise not detectable.
- "use strict";
- // This function must not use any object literals (Object, Array, RegExp),
- // since the literals-array is being used to store the bound data.
- if (%_IsConstructCall()) {
- return %NewObjectFromBound(boundFunction);
- }
- var bindings = %BoundFunctionGetBindings(boundFunction);
-
- var argc = %_ArgumentsLength();
- if (argc == 0) {
- return %Apply(bindings[0], bindings[1], bindings, 2, bindings.length - 2);
- }
- if (bindings.length === 2) {
- return %Apply(bindings[0], bindings[1], arguments, 0, argc);
- }
- var bound_argc = bindings.length - 2;
- var argv = new InternalArray(bound_argc + argc);
- for (var i = 0; i < bound_argc; i++) {
- argv[i] = bindings[i + 2];
- }
- for (var j = 0; j < argc; j++) {
- argv[i++] = %_Arguments(j);
- }
- return %Apply(bindings[0], bindings[1], argv, 0, bound_argc + argc);
- };
-
- %FunctionRemovePrototype(boundFunction);
- var new_length = 0;
- if (%_ClassOf(this) == "Function") {
- // Function or FunctionProxy.
- var old_length = this.length;
- // FunctionProxies might provide a non-UInt32 value. If so, ignore it.
- if ((typeof old_length === "number") &&
- ((old_length >>> 0) === old_length)) {
- var argc = %_ArgumentsLength();
- if (argc > 0) argc--; // Don't count the thisArg as parameter.
- new_length = old_length - argc;
- if (new_length < 0) new_length = 0;
- }
- }
- // This runtime function finds any remaining arguments on the stack,
- // so we don't pass the arguments object.
- var result = %FunctionBindArguments(boundFunction, this,
- this_arg, new_length);
-
- // We already have caller and arguments properties on functions,
- // which are non-configurable. It therefore makes no sence to
- // try to redefine these as defined by the spec. The spec says
- // that bind should make these throw a TypeError if get or set
- // is called and make them non-enumerable and non-configurable.
- // To be consistent with our normal functions we leave this as it is.
- // TODO(lrn): Do set these to be thrower.
- return result;
-}
-
-
-function NewFunction(arg1) { // length == 1
- var n = %_ArgumentsLength();
- var p = '';
- if (n > 1) {
- p = new InternalArray(n - 1);
- for (var i = 0; i < n - 1; i++) p[i] = %_Arguments(i);
- p = Join(p, n - 1, ',', NonStringToString);
- // If the formal parameters string include ) - an illegal
- // character - it may make the combined function expression
- // compile. We avoid this problem by checking for this early on.
- if (p.indexOf(')') != -1) throw MakeSyntaxError('unable_to_parse',[]);
- }
- var body = (n > 0) ? ToString(%_Arguments(n - 1)) : '';
- var source = '(function(' + p + ') {\n' + body + '\n})';
-
- // The call to SetNewFunctionAttributes will ensure the prototype
- // property of the resulting function is enumerable (ECMA262, 15.3.5.2).
- var global_receiver = %GlobalReceiver(global);
- var f = %_CallFunction(global_receiver, %CompileString(source));
-
- %FunctionMarkNameShouldPrintAsAnonymous(f);
- return %SetNewFunctionAttributes(f);
-}
-
-%SetCode($Function, NewFunction);
-
-// ----------------------------------------------------------------------------
-
-function SetUpFunction() {
- %CheckIsBootstrapping();
- InstallFunctions($Function.prototype, DONT_ENUM, $Array(
- "bind", FunctionBind,
- "toString", FunctionToString
- ));
-}
-
-SetUpFunction();
diff --git a/src/3rdparty/v8/src/v8preparserdll-main.cc b/src/3rdparty/v8/src/v8preparserdll-main.cc
deleted file mode 100644
index c0344d3..0000000
--- a/src/3rdparty/v8/src/v8preparserdll-main.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <windows.h>
-
-#include "../include/v8-preparser.h"
-
-extern "C" {
-BOOL WINAPI DllMain(HANDLE hinstDLL,
- DWORD dwReason,
- LPVOID lpvReserved) {
- // Do nothing.
- return TRUE;
-}
-}
diff --git a/src/3rdparty/v8/src/v8threads.cc b/src/3rdparty/v8/src/v8threads.cc
deleted file mode 100644
index 925e198..0000000
--- a/src/3rdparty/v8/src/v8threads.cc
+++ /dev/null
@@ -1,493 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "debug.h"
-#include "execution.h"
-#include "v8threads.h"
-#include "regexp-stack.h"
-
-namespace v8 {
-
-
-// Track whether this V8 instance has ever called v8::Locker. This allows the
-// API code to verify that the lock is always held when V8 is being entered.
-bool Locker::active_ = false;
-
-
-Locker::Locker() {
- Initialize(i::Isolate::GetDefaultIsolateForLocking());
-}
-
-
-// Once the Locker is initialized, the current thread will be guaranteed to have
-// the lock for a given isolate.
-void Locker::Initialize(v8::Isolate* isolate) {
- ASSERT(isolate != NULL);
- has_lock_= false;
- top_level_ = true;
- isolate_ = reinterpret_cast<i::Isolate*>(isolate);
- // Record that the Locker has been used at least once.
- active_ = true;
- // Get the big lock if necessary.
- if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
- isolate_->thread_manager()->Lock();
- has_lock_ = true;
-
- // Make sure that V8 is initialized. Archiving of threads interferes
- // with deserialization by adding additional root pointers, so we must
- // initialize here, before anyone can call ~Locker() or Unlocker().
- if (!isolate_->IsInitialized()) {
- isolate_->Enter();
- V8::Initialize();
- isolate_->Exit();
- }
-
- // This may be a locker within an unlocker in which case we have to
- // get the saved state for this thread and restore it.
- if (isolate_->thread_manager()->RestoreThread()) {
- top_level_ = false;
- } else {
- internal::ExecutionAccess access(isolate_);
- isolate_->stack_guard()->ClearThread(access);
- isolate_->stack_guard()->InitThread(access);
- }
- if (isolate_->IsDefaultIsolate()) {
- // This only enters if not yet entered.
- internal::Isolate::EnterDefaultIsolate();
- }
- }
- ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
-}
-
-
-bool Locker::IsLocked(v8::Isolate* isolate) {
- ASSERT(isolate != NULL);
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- return internal_isolate->thread_manager()->IsLockedByCurrentThread();
-}
-
-
-bool Locker::IsActive() {
- return active_;
-}
-
-
-Locker::~Locker() {
- ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
- if (has_lock_) {
- if (isolate_->IsDefaultIsolate()) {
- isolate_->Exit();
- }
- if (top_level_) {
- isolate_->thread_manager()->FreeThreadResources();
- } else {
- isolate_->thread_manager()->ArchiveThread();
- }
- isolate_->thread_manager()->Unlock();
- }
-}
-
-
-Unlocker::Unlocker() {
- Initialize(i::Isolate::GetDefaultIsolateForLocking());
-}
-
-
-void Unlocker::Initialize(v8::Isolate* isolate) {
- ASSERT(isolate != NULL);
- isolate_ = reinterpret_cast<i::Isolate*>(isolate);
- ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
- if (isolate_->IsDefaultIsolate()) {
- isolate_->Exit();
- }
- isolate_->thread_manager()->ArchiveThread();
- isolate_->thread_manager()->Unlock();
-}
-
-
-Unlocker::~Unlocker() {
- ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread());
- isolate_->thread_manager()->Lock();
- isolate_->thread_manager()->RestoreThread();
- if (isolate_->IsDefaultIsolate()) {
- isolate_->Enter();
- }
-}
-
-
-void Locker::StartPreemption(int every_n_ms) {
- v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
-}
-
-
-void Locker::StopPreemption() {
- v8::internal::ContextSwitcher::StopPreemption();
-}
-
-
-namespace internal {
-
-
-bool ThreadManager::RestoreThread() {
- ASSERT(IsLockedByCurrentThread());
- // First check whether the current thread has been 'lazily archived', i.e.
- // not archived at all. If that is the case we put the state storage we
- // had prepared back in the free list, since we didn't need it after all.
- if (lazily_archived_thread_.Equals(ThreadId::Current())) {
- lazily_archived_thread_ = ThreadId::Invalid();
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindPerThreadDataForThisThread();
- ASSERT(per_thread != NULL);
- ASSERT(per_thread->thread_state() == lazily_archived_thread_state_);
- lazily_archived_thread_state_->set_id(ThreadId::Invalid());
- lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
- lazily_archived_thread_state_ = NULL;
- per_thread->set_thread_state(NULL);
- return true;
- }
-
- // Make sure that the preemption thread cannot modify the thread state while
- // it is being archived or restored.
- ExecutionAccess access(isolate_);
-
- // If there is another thread that was lazily archived then we have to really
- // archive it now.
- if (lazily_archived_thread_.IsValid()) {
- EagerlyArchiveThread();
- }
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindPerThreadDataForThisThread();
- if (per_thread == NULL || per_thread->thread_state() == NULL) {
- // This is a new thread.
- isolate_->stack_guard()->InitThread(access);
- return false;
- }
- ThreadState* state = per_thread->thread_state();
- char* from = state->data();
- from = isolate_->handle_scope_implementer()->RestoreThread(from);
- from = isolate_->RestoreThread(from);
- from = Relocatable::RestoreState(isolate_, from);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- from = isolate_->debug()->RestoreDebug(from);
-#endif
- from = isolate_->stack_guard()->RestoreStackGuard(from);
- from = isolate_->regexp_stack()->RestoreStack(from);
- from = isolate_->bootstrapper()->RestoreState(from);
- per_thread->set_thread_state(NULL);
- if (state->terminate_on_restore()) {
- isolate_->stack_guard()->TerminateExecution();
- state->set_terminate_on_restore(false);
- }
- state->set_id(ThreadId::Invalid());
- state->Unlink();
- state->LinkInto(ThreadState::FREE_LIST);
- return true;
-}
-
-
-void ThreadManager::Lock() {
- mutex_->Lock();
- mutex_owner_ = ThreadId::Current();
- ASSERT(IsLockedByCurrentThread());
-}
-
-
-void ThreadManager::Unlock() {
- mutex_owner_ = ThreadId::Invalid();
- mutex_->Unlock();
-}
-
-
-static int ArchiveSpacePerThread() {
- return HandleScopeImplementer::ArchiveSpacePerThread() +
- Isolate::ArchiveSpacePerThread() +
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::ArchiveSpacePerThread() +
-#endif
- StackGuard::ArchiveSpacePerThread() +
- RegExpStack::ArchiveSpacePerThread() +
- Bootstrapper::ArchiveSpacePerThread() +
- Relocatable::ArchiveSpacePerThread();
-}
-
-
-ThreadState::ThreadState(ThreadManager* thread_manager)
- : id_(ThreadId::Invalid()),
- terminate_on_restore_(false),
- data_(NULL),
- next_(this),
- previous_(this),
- thread_manager_(thread_manager) {
-}
-
-
-ThreadState::~ThreadState() {
- DeleteArray<char>(data_);
-}
-
-
-void ThreadState::AllocateSpace() {
- data_ = NewArray<char>(ArchiveSpacePerThread());
-}
-
-
-void ThreadState::Unlink() {
- next_->previous_ = previous_;
- previous_->next_ = next_;
-}
-
-
-void ThreadState::LinkInto(List list) {
- ThreadState* flying_anchor =
- list == FREE_LIST ? thread_manager_->free_anchor_
- : thread_manager_->in_use_anchor_;
- next_ = flying_anchor->next_;
- previous_ = flying_anchor;
- flying_anchor->next_ = this;
- next_->previous_ = this;
-}
-
-
-ThreadState* ThreadManager::GetFreeThreadState() {
- ThreadState* gotten = free_anchor_->next_;
- if (gotten == free_anchor_) {
- ThreadState* new_thread_state = new ThreadState(this);
- new_thread_state->AllocateSpace();
- return new_thread_state;
- }
- return gotten;
-}
-
-
-// Gets the first in the list of archived threads.
-ThreadState* ThreadManager::FirstThreadStateInUse() {
- return in_use_anchor_->Next();
-}
-
-
-ThreadState* ThreadState::Next() {
- if (next_ == thread_manager_->in_use_anchor_) return NULL;
- return next_;
-}
-
-
-// Thread ids must start with 1, because in TLS having thread id 0 can't
-// be distinguished from not having a thread id at all (since NULL is
-// defined as 0.)
-ThreadManager::ThreadManager()
- : mutex_(OS::CreateMutex()),
- mutex_owner_(ThreadId::Invalid()),
- lazily_archived_thread_(ThreadId::Invalid()),
- lazily_archived_thread_state_(NULL),
- free_anchor_(NULL),
- in_use_anchor_(NULL) {
- free_anchor_ = new ThreadState(this);
- in_use_anchor_ = new ThreadState(this);
-}
-
-
-ThreadManager::~ThreadManager() {
- delete mutex_;
- DeleteThreadStateList(free_anchor_);
- DeleteThreadStateList(in_use_anchor_);
-}
-
-
-void ThreadManager::DeleteThreadStateList(ThreadState* anchor) {
- // The list starts and ends with the anchor.
- for (ThreadState* current = anchor->next_; current != anchor;) {
- ThreadState* next = current->next_;
- delete current;
- current = next;
- }
- delete anchor;
-}
-
-
-void ThreadManager::ArchiveThread() {
- ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
- ASSERT(!IsArchived());
- ASSERT(IsLockedByCurrentThread());
- ThreadState* state = GetFreeThreadState();
- state->Unlink();
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindOrAllocatePerThreadDataForThisThread();
- per_thread->set_thread_state(state);
- lazily_archived_thread_ = ThreadId::Current();
- lazily_archived_thread_state_ = state;
- ASSERT(state->id().Equals(ThreadId::Invalid()));
- state->set_id(CurrentId());
- ASSERT(!state->id().Equals(ThreadId::Invalid()));
-}
-
-
-void ThreadManager::EagerlyArchiveThread() {
- ASSERT(IsLockedByCurrentThread());
- ThreadState* state = lazily_archived_thread_state_;
- state->LinkInto(ThreadState::IN_USE_LIST);
- char* to = state->data();
- // Ensure that data containing GC roots are archived first, and handle them
- // in ThreadManager::Iterate(ObjectVisitor*).
- to = isolate_->handle_scope_implementer()->ArchiveThread(to);
- to = isolate_->ArchiveThread(to);
- to = Relocatable::ArchiveState(isolate_, to);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- to = isolate_->debug()->ArchiveDebug(to);
-#endif
- to = isolate_->stack_guard()->ArchiveStackGuard(to);
- to = isolate_->regexp_stack()->ArchiveStack(to);
- to = isolate_->bootstrapper()->ArchiveState(to);
- lazily_archived_thread_ = ThreadId::Invalid();
- lazily_archived_thread_state_ = NULL;
-}
-
-
-void ThreadManager::FreeThreadResources() {
- isolate_->handle_scope_implementer()->FreeThreadResources();
- isolate_->FreeThreadResources();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate_->debug()->FreeThreadResources();
-#endif
- isolate_->stack_guard()->FreeThreadResources();
- isolate_->regexp_stack()->FreeThreadResources();
- isolate_->bootstrapper()->FreeThreadResources();
-}
-
-
-bool ThreadManager::IsArchived() {
- Isolate::PerIsolateThreadData* data =
- isolate_->FindPerThreadDataForThisThread();
- return data != NULL && data->thread_state() != NULL;
-}
-
-void ThreadManager::Iterate(ObjectVisitor* v) {
- // Expecting no threads during serialization/deserialization
- for (ThreadState* state = FirstThreadStateInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data = HandleScopeImplementer::Iterate(v, data);
- data = isolate_->Iterate(v, data);
- data = Relocatable::Iterate(v, data);
- }
-}
-
-
-void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
- for (ThreadState* state = FirstThreadStateInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data += HandleScopeImplementer::ArchiveSpacePerThread();
- isolate_->IterateThread(v, data);
- }
-}
-
-
-ThreadId ThreadManager::CurrentId() {
- return ThreadId::Current();
-}
-
-
-void ThreadManager::TerminateExecution(ThreadId thread_id) {
- for (ThreadState* state = FirstThreadStateInUse();
- state != NULL;
- state = state->Next()) {
- if (thread_id.Equals(state->id())) {
- state->set_terminate_on_restore(true);
- }
- }
-}
-
-
-ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
- : Thread("v8:CtxtSwitcher"),
- keep_going_(true),
- sleep_ms_(every_n_ms),
- isolate_(isolate) {
-}
-
-
-// Set the scheduling interval of V8 threads. This function starts the
-// ContextSwitcher thread if needed.
-void ContextSwitcher::StartPreemption(int every_n_ms) {
- Isolate* isolate = Isolate::Current();
- ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
- if (isolate->context_switcher() == NULL) {
- // If the ContextSwitcher thread is not running at the moment start it now.
- isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
- isolate->context_switcher()->Start();
- } else {
- // ContextSwitcher thread is already running, so we just change the
- // scheduling interval.
- isolate->context_switcher()->sleep_ms_ = every_n_ms;
- }
-}
-
-
-// Disable preemption of V8 threads. If multiple threads want to use V8 they
-// must cooperatively schedule amongst them from this point on.
-void ContextSwitcher::StopPreemption() {
- Isolate* isolate = Isolate::Current();
- ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
- if (isolate->context_switcher() != NULL) {
- // The ContextSwitcher thread is running. We need to stop it and release
- // its resources.
- isolate->context_switcher()->keep_going_ = false;
- // Wait for the ContextSwitcher thread to exit.
- isolate->context_switcher()->Join();
- // Thread has exited, now we can delete it.
- delete(isolate->context_switcher());
- isolate->set_context_switcher(NULL);
- }
-}
-
-
-// Main loop of the ContextSwitcher thread: Preempt the currently running V8
-// thread at regular intervals.
-void ContextSwitcher::Run() {
- while (keep_going_) {
- OS::Sleep(sleep_ms_);
- isolate()->stack_guard()->Preempt();
- }
-}
-
-
-// Acknowledge the preemption by the receiving thread.
-void ContextSwitcher::PreemptionReceived() {
- ASSERT(Locker::IsLocked(i::Isolate::GetDefaultIsolateForLocking()));
- // There is currently no accounting being done for this. But could be in the
- // future, which is why we leave this in.
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/src/3rdparty/v8/src/v8threads.h b/src/3rdparty/v8/src/v8threads.h
deleted file mode 100644
index 8dce860..0000000
--- a/src/3rdparty/v8/src/v8threads.h
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8THREADS_H_
-#define V8_V8THREADS_H_
-
-namespace v8 {
-namespace internal {
-
-
-class ThreadState {
- public:
- // Returns NULL after the last one.
- ThreadState* Next();
-
- enum List {FREE_LIST, IN_USE_LIST};
-
- void LinkInto(List list);
- void Unlink();
-
- // Id of thread.
- void set_id(ThreadId id) { id_ = id; }
- ThreadId id() { return id_; }
-
- // Should the thread be terminated when it is restored?
- bool terminate_on_restore() { return terminate_on_restore_; }
- void set_terminate_on_restore(bool terminate_on_restore) {
- terminate_on_restore_ = terminate_on_restore;
- }
-
- // Get data area for archiving a thread.
- char* data() { return data_; }
-
- private:
- explicit ThreadState(ThreadManager* thread_manager);
- ~ThreadState();
-
- void AllocateSpace();
-
- ThreadId id_;
- bool terminate_on_restore_;
- char* data_;
- ThreadState* next_;
- ThreadState* previous_;
-
- ThreadManager* thread_manager_;
-
- friend class ThreadManager;
-};
-
-
-// Defined in isolate.h.
-class ThreadLocalTop;
-
-
-class ThreadVisitor {
- public:
- // ThreadLocalTop may be only available during this call.
- virtual void VisitThread(Isolate* isolate, ThreadLocalTop* top) = 0;
-
- protected:
- virtual ~ThreadVisitor() {}
-};
-
-
-class ThreadManager {
- public:
- void Lock();
- void Unlock();
-
- void ArchiveThread();
- bool RestoreThread();
- void FreeThreadResources();
- bool IsArchived();
-
- void Iterate(ObjectVisitor* v);
- void IterateArchivedThreads(ThreadVisitor* v);
- bool IsLockedByCurrentThread() {
- return mutex_owner_.Equals(ThreadId::Current());
- }
-
- ThreadId CurrentId();
-
- void TerminateExecution(ThreadId thread_id);
-
- // Iterate over in-use states.
- ThreadState* FirstThreadStateInUse();
- ThreadState* GetFreeThreadState();
-
- private:
- ThreadManager();
- ~ThreadManager();
-
- void DeleteThreadStateList(ThreadState* anchor);
-
- void EagerlyArchiveThread();
-
- Mutex* mutex_;
- ThreadId mutex_owner_;
- ThreadId lazily_archived_thread_;
- ThreadState* lazily_archived_thread_state_;
-
- // In the following two lists there is always at least one object on the list.
- // The first object is a flying anchor that is only there to simplify linking
- // and unlinking.
- // Head of linked list of free states.
- ThreadState* free_anchor_;
- // Head of linked list of states in use.
- ThreadState* in_use_anchor_;
-
- Isolate* isolate_;
-
- friend class Isolate;
- friend class ThreadState;
-};
-
-
-// The ContextSwitcher thread is used to schedule regular preemptions to
-// multiple running V8 threads. Generally it is necessary to call
-// StartPreemption if there is more than one thread running. If not, a single
-// JavaScript can take full control of V8 and not allow other threads to run.
-class ContextSwitcher: public Thread {
- public:
- // Set the preemption interval for the ContextSwitcher thread.
- static void StartPreemption(int every_n_ms);
-
- // Stop sending preemption requests to threads.
- static void StopPreemption();
-
- // Preempted thread needs to call back to the ContextSwitcher to acknowledge
- // the handling of a preemption request.
- static void PreemptionReceived();
-
- private:
- ContextSwitcher(Isolate* isolate, int every_n_ms);
-
- Isolate* isolate() const { return isolate_; }
-
- void Run();
-
- bool keep_going_;
- int sleep_ms_;
- Isolate* isolate_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_V8THREADS_H_
diff --git a/src/3rdparty/v8/src/v8utils.cc b/src/3rdparty/v8/src/v8utils.cc
deleted file mode 100644
index 2dfc1ea..0000000
--- a/src/3rdparty/v8/src/v8utils.cc
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-
-#include "v8.h"
-
-#include "platform.h"
-
-#ifndef _WIN32_WCE
-#include "sys/stat.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-void PrintF(const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
-}
-
-
-void FPrintF(FILE* out, const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- OS::VFPrint(out, format, arguments);
- va_end(arguments);
-}
-
-
-void PrintPID(const char* format, ...) {
- OS::Print("[%d] ", OS::GetCurrentProcessId());
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
-}
-
-
-void Flush(FILE* out) {
- fflush(out);
-}
-
-
-char* ReadLine(const char* prompt) {
- char* result = NULL;
- char line_buf[256];
- int offset = 0;
- bool keep_going = true;
- fprintf(stdout, "%s", prompt);
- fflush(stdout);
- while (keep_going) {
- if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
- // fgets got an error. Just give up.
- if (result != NULL) {
- DeleteArray(result);
- }
- return NULL;
- }
- int len = StrLength(line_buf);
- if (len > 1 &&
- line_buf[len - 2] == '\\' &&
- line_buf[len - 1] == '\n') {
- // When we read a line that ends with a "\" we remove the escape and
- // append the remainder.
- line_buf[len - 2] = '\n';
- line_buf[len - 1] = 0;
- len -= 1;
- } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
- // Since we read a new line we are done reading the line. This
- // will exit the loop after copying this buffer into the result.
- keep_going = false;
- }
- if (result == NULL) {
- // Allocate the initial result and make room for the terminating '\0'
- result = NewArray<char>(len + 1);
- } else {
- // Allocate a new result with enough room for the new addition.
- int new_len = offset + len + 1;
- char* new_result = NewArray<char>(new_len);
- // Copy the existing input into the new array and set the new
- // array as the result.
- memcpy(new_result, result, offset * kCharSize);
- DeleteArray(result);
- result = new_result;
- }
- // Copy the newly read line into the result.
- memcpy(result + offset, line_buf, len * kCharSize);
- offset += len;
- }
- ASSERT(result != NULL);
- result[offset] = '\0';
- return result;
-}
-
-
-char* ReadCharsFromFile(FILE* file,
- int* size,
- int extra_space,
- bool verbose,
- const char* filename) {
- if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
- if (verbose) {
- OS::PrintError("Cannot read from file %s.\n", filename);
- }
- return NULL;
- }
-
- // Get the size of the file and rewind it.
- *size = ftell(file);
-#ifdef _WIN32_WCE
- fseek(file, 0, SEEK_SET);
-#else
- rewind(file);
-#endif // _WIN32_WCE
-
- char* result = NewArray<char>(*size + extra_space);
- for (int i = 0; i < *size && feof(file) == 0;) {
- int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
- if (read != (*size - i) && ferror(file) != 0) {
- fclose(file);
- DeleteArray(result);
- return NULL;
- }
- i += read;
- }
- return result;
-}
-
-
-char* ReadCharsFromFile(const char* filename,
- int* size,
- int extra_space,
- bool verbose) {
- FILE* file = OS::FOpen(filename, "rb");
- char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename);
- if (file != NULL) fclose(file);
- return result;
-}
-
-
-byte* ReadBytes(const char* filename, int* size, bool verbose) {
- char* chars = ReadCharsFromFile(filename, size, 0, verbose);
- return reinterpret_cast<byte*>(chars);
-}
-
-
-static Vector<const char> SetVectorContents(char* chars,
- int size,
- bool* exists) {
- if (!chars) {
- *exists = false;
- return Vector<const char>::empty();
- }
- chars[size] = '\0';
- *exists = true;
- return Vector<const char>(chars, size);
-}
-
-
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose) {
- int size;
- char* result = ReadCharsFromFile(filename, &size, 1, verbose);
- return SetVectorContents(result, size, exists);
-}
-
-
-Vector<const char> ReadFile(FILE* file,
- bool* exists,
- bool verbose) {
- int size;
- char* result = ReadCharsFromFile(file, &size, 1, verbose, "");
- return SetVectorContents(result, size, exists);
-}
-
-
-int WriteCharsToFile(const char* str, int size, FILE* f) {
- int total = 0;
- while (total < size) {
- int write = static_cast<int>(fwrite(str, 1, size - total, f));
- if (write == 0) {
- return total;
- }
- total += write;
- str += write;
- }
- return total;
-}
-
-
-int AppendChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
- FILE* f = OS::FOpen(filename, "ab");
- if (f == NULL) {
- if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
- }
- return 0;
- }
- int written = WriteCharsToFile(str, size, f);
- fclose(f);
- return written;
-}
-
-
-int WriteChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
- FILE* f = OS::FOpen(filename, "wb");
- if (f == NULL) {
- if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
- }
- return 0;
- }
- int written = WriteCharsToFile(str, size, f);
- fclose(f);
- return written;
-}
-
-
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
- bool verbose) {
- const char* str = reinterpret_cast<const char*>(bytes);
- return WriteChars(filename, str, size, verbose);
-}
-
-
-
-void StringBuilder::AddFormatted(const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- AddFormattedList(format, arguments);
- va_end(arguments);
-}
-
-
-void StringBuilder::AddFormattedList(const char* format, va_list list) {
- ASSERT(!is_finalized() && position_ < buffer_.length());
- int n = OS::VSNPrintF(buffer_ + position_, format, list);
- if (n < 0 || n >= (buffer_.length() - position_)) {
- position_ = buffer_.length();
- } else {
- position_ += n;
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/v8utils.h b/src/3rdparty/v8/src/v8utils.h
deleted file mode 100644
index 937e93d..0000000
--- a/src/3rdparty/v8/src/v8utils.h
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8UTILS_H_
-#define V8_V8UTILS_H_
-
-#include "utils.h"
-#include "platform.h" // For va_list on Solaris.
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// I/O support.
-
-#if __GNUC__ >= 4
-// On gcc we can ask the compiler to check the types of %d-style format
-// specifiers and their associated arguments. TODO(erikcorry) fix this
-// so it works on MacOSX.
-#if defined(__MACH__) && defined(__APPLE__)
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#else // MacOsX.
-#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
-#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
-#endif
-#else
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#endif
-
-// Our version of printf().
-void PRINTF_CHECKING PrintF(const char* format, ...);
-void FPRINTF_CHECKING FPrintF(FILE* out, const char* format, ...);
-
-// Prepends the current process ID to the output.
-void PRINTF_CHECKING PrintPID(const char* format, ...);
-
-// Our version of fflush.
-void Flush(FILE* out);
-
-inline void Flush() {
- Flush(stdout);
-}
-
-
-// Read a line of characters after printing the prompt to stdout. The resulting
-// char* needs to be disposed off with DeleteArray by the caller.
-char* ReadLine(const char* prompt);
-
-
-// Read and return the raw bytes in a file. the size of the buffer is returned
-// in size.
-// The returned buffer must be freed by the caller.
-byte* ReadBytes(const char* filename, int* size, bool verbose = true);
-
-
-// Append size chars from str to the file given by filename.
-// The file is overwritten. Returns the number of chars written.
-int AppendChars(const char* filename,
- const char* str,
- int size,
- bool verbose = true);
-
-
-// Write size chars from str to the file given by filename.
-// The file is overwritten. Returns the number of chars written.
-int WriteChars(const char* filename,
- const char* str,
- int size,
- bool verbose = true);
-
-
-// Write size bytes to the file given by filename.
-// The file is overwritten. Returns the number of bytes written.
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
- bool verbose = true);
-
-
-// Write the C code
-// const char* <varname> = "<str>";
-// const int <varname>_len = <len>;
-// to the file given by filename. Only the first len chars are written.
-int WriteAsCFile(const char* filename, const char* varname,
- const char* str, int size, bool verbose = true);
-
-
-// Data structures
-
-template <typename T>
-inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
- int length) {
- return Vector< Handle<Object> >(
- reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
-}
-
-// Memory
-
-// Copies data from |src| to |dst|. The data spans MUST not overlap.
-template <typename T>
-inline void CopyWords(T* dst, T* src, int num_words) {
- STATIC_ASSERT(sizeof(T) == kPointerSize);
- ASSERT(Min(dst, src) + num_words <= Max(dst, src));
- ASSERT(num_words > 0);
-
- // Use block copying memcpy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = 16;
-
- if (num_words >= kBlockCopyLimit) {
- memcpy(dst, src, num_words * kPointerSize);
- } else {
- int remaining = num_words;
- do {
- remaining--;
- *dst++ = *src++;
- } while (remaining > 0);
- }
-}
-
-
-template <typename T, typename U>
-inline void MemsetPointer(T** dest, U* value, int counter) {
-#ifdef DEBUG
- T* a = NULL;
- U* b = NULL;
- a = b; // Fake assignment to check assignability.
- USE(a);
-#endif // DEBUG
-#if defined(V8_HOST_ARCH_IA32)
-#define STOS "stosl"
-#elif defined(V8_HOST_ARCH_X64)
-#define STOS "stosq"
-#endif
-
-#if defined(__GNUC__) && defined(STOS)
- asm volatile(
- "cld;"
- "rep ; " STOS
- : "+&c" (counter), "+&D" (dest)
- : "a" (value)
- : "memory", "cc");
-#else
- for (int i = 0; i < counter; i++) {
- dest[i] = value;
- }
-#endif
-
-#undef STOS
-}
-
-
-// Simple wrapper that allows an ExternalString to refer to a
-// Vector<const char>. Doesn't assume ownership of the data.
-class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
- public:
- explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
-
- virtual const char* data() const { return data_.start(); }
-
- virtual size_t length() const { return data_.length(); }
-
- private:
- Vector<const char> data_;
-};
-
-
-// Simple support to read a file into a 0-terminated C-string.
-// The returned buffer must be freed by the caller.
-// On return, *exits tells whether the file existed.
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose = true);
-Vector<const char> ReadFile(FILE* file,
- bool* exists,
- bool verbose = true);
-
-
-template <typename sourcechar, typename sinkchar>
-INLINE(static void CopyCharsUnsigned(sinkchar* dest,
- const sourcechar* src,
- int chars));
-
-// Copy from ASCII/16bit chars to ASCII/16bit chars.
-template <typename sourcechar, typename sinkchar>
-INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
-
-template<typename sourcechar, typename sinkchar>
-void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
- ASSERT(sizeof(sourcechar) <= 2);
- ASSERT(sizeof(sinkchar) <= 2);
- if (sizeof(sinkchar) == 1) {
- if (sizeof(sourcechar) == 1) {
- CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint8_t*>(src),
- chars);
- } else {
- CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint16_t*>(src),
- chars);
- }
- } else {
- if (sizeof(sourcechar) == 1) {
- CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
- reinterpret_cast<const uint8_t*>(src),
- chars);
- } else {
- CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
- reinterpret_cast<const uint16_t*>(src),
- chars);
- }
- }
-}
-
-template <typename sourcechar, typename sinkchar>
-void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
- sinkchar* limit = dest + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*dest) == sizeof(*src)) {
- if (chars >= static_cast<int>(OS::kMinComplexMemCopy / sizeof(*dest))) {
- OS::MemCopy(dest, src, chars * sizeof(*dest));
- return;
- }
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
- ASSERT(dest + kStepSize > dest); // Check for overflow.
- while (dest + kStepSize <= limit) {
- *reinterpret_cast<uintptr_t*>(dest) =
- *reinterpret_cast<const uintptr_t*>(src);
- dest += kStepSize;
- src += kStepSize;
- }
- }
-#endif
- while (dest < limit) {
- *dest++ = static_cast<sinkchar>(*src++);
- }
-}
-
-
-class StringBuilder : public SimpleStringBuilder {
- public:
- explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
- StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
-
- // Add formatted contents to the builder just like printf().
- void AddFormatted(const char* format, ...);
-
- // Add formatted contents like printf based on a va_list.
- void AddFormattedList(const char* format, va_list list);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_V8UTILS_H_
diff --git a/src/3rdparty/v8/src/variables.cc b/src/3rdparty/v8/src/variables.cc
deleted file mode 100644
index 1333ca1..0000000
--- a/src/3rdparty/v8/src/variables.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "scopes.h"
-#include "variables.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Implementation Variable.
-
-const char* Variable::Mode2String(VariableMode mode) {
- switch (mode) {
- case VAR: return "VAR";
- case CONST: return "CONST";
- case LET: return "LET";
- case CONST_HARMONY: return "CONST_HARMONY";
- case MODULE: return "MODULE";
- case DYNAMIC: return "DYNAMIC";
- case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
- case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
- case INTERNAL: return "INTERNAL";
- case TEMPORARY: return "TEMPORARY";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-Variable::Variable(Scope* scope,
- Handle<String> name,
- VariableMode mode,
- bool is_valid_LHS,
- Kind kind,
- InitializationFlag initialization_flag,
- Interface* interface)
- : scope_(scope),
- name_(name),
- mode_(mode),
- kind_(kind),
- location_(UNALLOCATED),
- index_(-1),
- initializer_position_(RelocInfo::kNoPosition),
- local_if_not_shadowed_(NULL),
- is_valid_LHS_(is_valid_LHS),
- force_context_allocation_(false),
- is_used_(false),
- initialization_flag_(initialization_flag),
- interface_(interface),
- is_qml_global_(false) {
- // Names must be canonicalized for fast equality checks.
- ASSERT(name->IsInternalizedString());
- // Var declared variables never need initialization.
- ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
-}
-
-
-bool Variable::IsGlobalObjectProperty() const {
- // Temporaries are never global, they must always be allocated in the
- // activation frame.
- return (IsDynamicVariableMode(mode_) ||
- (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)))
- && scope_ != NULL && scope_->is_global_scope();
-}
-
-
-int Variable::CompareIndex(Variable* const* v, Variable* const* w) {
- int x = (*v)->index();
- int y = (*w)->index();
- // Consider sorting them according to type as well?
- return x - y;
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/variables.h b/src/3rdparty/v8/src/variables.h
deleted file mode 100644
index f76da71..0000000
--- a/src/3rdparty/v8/src/variables.h
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VARIABLES_H_
-#define V8_VARIABLES_H_
-
-#include "zone.h"
-#include "interface.h"
-
-namespace v8 {
-namespace internal {
-
-// The AST refers to variables via VariableProxies - placeholders for the actual
-// variables. Variables themselves are never directly referred to from the AST,
-// they are maintained by scopes, and referred to from VariableProxies and Slots
-// after binding and variable allocation.
-
-class Variable: public ZoneObject {
- public:
- enum Kind {
- NORMAL,
- THIS,
- ARGUMENTS
- };
-
- enum Location {
- // Before and during variable allocation, a variable whose location is
- // not yet determined. After allocation, a variable looked up as a
- // property on the global object (and possibly absent). name() is the
- // variable name, index() is invalid.
- UNALLOCATED,
-
- // A slot in the parameter section on the stack. index() is the
- // parameter index, counting left-to-right. The receiver is index -1;
- // the first parameter is index 0.
- PARAMETER,
-
- // A slot in the local section on the stack. index() is the variable
- // index in the stack frame, starting at 0.
- LOCAL,
-
- // An indexed slot in a heap context. index() is the variable index in
- // the context object on the heap, starting at 0. scope() is the
- // corresponding scope.
- CONTEXT,
-
- // A named slot in a heap context. name() is the variable name in the
- // context object on the heap, with lookup starting at the current
- // context. index() is invalid.
- LOOKUP
- };
-
- Variable(Scope* scope,
- Handle<String> name,
- VariableMode mode,
- bool is_valid_lhs,
- Kind kind,
- InitializationFlag initialization_flag,
- Interface* interface = Interface::NewValue());
-
- // Printing support
- static const char* Mode2String(VariableMode mode);
-
- bool IsValidLeftHandSide() { return is_valid_LHS_; }
-
- // The source code for an eval() call may refer to a variable that is
- // in an outer scope about which we don't know anything (it may not
- // be the global scope). scope() is NULL in that case. Currently the
- // scope is only used to follow the context chain length.
- Scope* scope() const { return scope_; }
-
- Handle<String> name() const { return name_; }
- VariableMode mode() const { return mode_; }
- bool has_forced_context_allocation() const {
- return force_context_allocation_;
- }
- void ForceContextAllocation() {
- ASSERT(mode_ != TEMPORARY);
- force_context_allocation_ = true;
- }
- bool is_used() { return is_used_; }
- void set_is_used(bool flag) { is_used_ = flag; }
-
- int initializer_position() { return initializer_position_; }
- void set_initializer_position(int pos) { initializer_position_ = pos; }
-
- bool IsVariable(Handle<String> n) const {
- return !is_this() && name().is_identical_to(n);
- }
-
- bool IsUnallocated() const { return location_ == UNALLOCATED; }
- bool IsParameter() const { return location_ == PARAMETER; }
- bool IsStackLocal() const { return location_ == LOCAL; }
- bool IsStackAllocated() const { return IsParameter() || IsStackLocal(); }
- bool IsContextSlot() const { return location_ == CONTEXT; }
- bool IsLookupSlot() const { return location_ == LOOKUP; }
- bool IsGlobalObjectProperty() const;
-
- bool is_dynamic() const { return IsDynamicVariableMode(mode_); }
- bool is_const_mode() const { return IsImmutableVariableMode(mode_); }
- bool binding_needs_init() const {
- return initialization_flag_ == kNeedsInitialization;
- }
-
- bool is_this() const { return kind_ == THIS; }
- bool is_arguments() const { return kind_ == ARGUMENTS; }
-
- // True if the variable is named eval and not known to be shadowed.
- bool is_possibly_eval(Isolate* isolate) const {
- return IsVariable(isolate->factory()->eval_string());
- }
-
- Variable* local_if_not_shadowed() const {
- ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
- return local_if_not_shadowed_;
- }
-
- void set_local_if_not_shadowed(Variable* local) {
- local_if_not_shadowed_ = local;
- }
-
- Location location() const { return location_; }
- int index() const { return index_; }
- InitializationFlag initialization_flag() const {
- return initialization_flag_;
- }
- Interface* interface() const { return interface_; }
-
- void AllocateTo(Location location, int index) {
- location_ = location;
- index_ = index;
- }
-
- static int CompareIndex(Variable* const* v, Variable* const* w);
-
- bool is_qml_global() const { return is_qml_global_; }
- void set_is_qml_global(bool is_qml_global) { is_qml_global_ = is_qml_global; }
-
- private:
- Scope* scope_;
- Handle<String> name_;
- VariableMode mode_;
- Kind kind_;
- Location location_;
- int index_;
- int initializer_position_;
-
- // If this field is set, this variable references the stored locally bound
- // variable, but it might be shadowed by variable bindings introduced by
- // non-strict 'eval' calls between the reference scope (inclusive) and the
- // binding scope (exclusive).
- Variable* local_if_not_shadowed_;
-
- // Valid as a LHS? (const and this are not valid LHS, for example)
- bool is_valid_LHS_;
-
- // Usage info.
- bool force_context_allocation_; // set by variable resolver
- bool is_used_;
- InitializationFlag initialization_flag_;
-
- // Module type info.
- Interface* interface_;
-
- // QML info
- bool is_qml_global_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_VARIABLES_H_
diff --git a/src/3rdparty/v8/src/version.cc b/src/3rdparty/v8/src/version.cc
deleted file mode 100644
index f448e3e..0000000
--- a/src/3rdparty/v8/src/version.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "version.h"
-
-// These macros define the version number for the current version.
-// NOTE these macros are used by the SCons build script so their names
-// cannot be changed without changing the SCons build script.
-#define MAJOR_VERSION 3
-#define MINOR_VERSION 17
-#define BUILD_NUMBER 7
-#define PATCH_LEVEL 0
-// Use 1 for candidates and 0 otherwise.
-// (Boolean macro values are not supported by all preprocessors.)
-#define IS_CANDIDATE_VERSION 0
-
-// Define SONAME to have the SCons build the put a specific SONAME into the
-// shared library instead the generic SONAME generated from the V8 version
-// number. This define is mainly used by the SCons build script.
-#define SONAME ""
-
-#if IS_CANDIDATE_VERSION
-#define CANDIDATE_STRING " (candidate)"
-#else
-#define CANDIDATE_STRING ""
-#endif
-
-#define SX(x) #x
-#define S(x) SX(x)
-
-#if PATCH_LEVEL > 0
-#define VERSION_STRING \
- S(MAJOR_VERSION) "." S(MINOR_VERSION) "." S(BUILD_NUMBER) "." \
- S(PATCH_LEVEL) CANDIDATE_STRING
-#else
-#define VERSION_STRING \
- S(MAJOR_VERSION) "." S(MINOR_VERSION) "." S(BUILD_NUMBER) \
- CANDIDATE_STRING
-#endif
-
-namespace v8 {
-namespace internal {
-
-int Version::major_ = MAJOR_VERSION;
-int Version::minor_ = MINOR_VERSION;
-int Version::build_ = BUILD_NUMBER;
-int Version::patch_ = PATCH_LEVEL;
-bool Version::candidate_ = (IS_CANDIDATE_VERSION != 0);
-const char* Version::soname_ = SONAME;
-const char* Version::version_string_ = VERSION_STRING;
-
-// Calculate the V8 version string.
-void Version::GetString(Vector<char> str) {
- const char* candidate = IsCandidate() ? " (candidate)" : "";
-#ifdef USE_SIMULATOR
- const char* is_simulator = " SIMULATOR";
-#else
- const char* is_simulator = "";
-#endif // USE_SIMULATOR
- if (GetPatch() > 0) {
- OS::SNPrintF(str, "%d.%d.%d.%d%s%s",
- GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate,
- is_simulator);
- } else {
- OS::SNPrintF(str, "%d.%d.%d%s%s",
- GetMajor(), GetMinor(), GetBuild(), candidate,
- is_simulator);
- }
-}
-
-
-// Calculate the SONAME for the V8 shared library.
-void Version::GetSONAME(Vector<char> str) {
- if (soname_ == NULL || *soname_ == '\0') {
- // Generate generic SONAME if no specific SONAME is defined.
- const char* candidate = IsCandidate() ? "-candidate" : "";
- if (GetPatch() > 0) {
- OS::SNPrintF(str, "libv8-%d.%d.%d.%d%s.so",
- GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate);
- } else {
- OS::SNPrintF(str, "libv8-%d.%d.%d%s.so",
- GetMajor(), GetMinor(), GetBuild(), candidate);
- }
- } else {
- // Use specific SONAME.
- OS::SNPrintF(str, "%s", soname_);
- }
-}
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/version.h b/src/3rdparty/v8/src/version.h
deleted file mode 100644
index 4b3e7e2..0000000
--- a/src/3rdparty/v8/src/version.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VERSION_H_
-#define V8_VERSION_H_
-
-namespace v8 {
-namespace internal {
-
-class Version {
- public:
- // Return the various version components.
- static int GetMajor() { return major_; }
- static int GetMinor() { return minor_; }
- static int GetBuild() { return build_; }
- static int GetPatch() { return patch_; }
- static bool IsCandidate() { return candidate_; }
-
- // Calculate the V8 version string.
- static void GetString(Vector<char> str);
-
- // Calculate the SONAME for the V8 shared library.
- static void GetSONAME(Vector<char> str);
-
- static const char* GetVersion() { return version_string_; }
-
- private:
- // NOTE: can't make these really const because of test-version.cc.
- static int major_;
- static int minor_;
- static int build_;
- static int patch_;
- static bool candidate_;
- static const char* soname_;
- static const char* version_string_;
-
- // In test-version.cc.
- friend void SetVersion(int major, int minor, int build, int patch,
- bool candidate, const char* soname);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_VERSION_H_
diff --git a/src/3rdparty/v8/src/vm-state-inl.h b/src/3rdparty/v8/src/vm-state-inl.h
deleted file mode 100644
index fae68eb..0000000
--- a/src/3rdparty/v8/src/vm-state-inl.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VM_STATE_INL_H_
-#define V8_VM_STATE_INL_H_
-
-#include "vm-state.h"
-#include "runtime-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-//
-// VMState class implementation. A simple stack of VM states held by the
-// logger and partially threaded through the call stack. States are pushed by
-// VMState construction and popped by destruction.
-//
-inline const char* StateToString(StateTag state) {
- switch (state) {
- case JS:
- return "JS";
- case GC:
- return "GC";
- case COMPILER:
- return "COMPILER";
- case PARALLEL_COMPILER:
- return "PARALLEL_COMPILER";
- case OTHER:
- return "OTHER";
- case EXTERNAL:
- return "EXTERNAL";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-VMState::VMState(Isolate* isolate, StateTag tag)
- : isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
- if (FLAG_log_state_changes) {
- LOG(isolate, UncheckedStringEvent("Entering", StateToString(tag)));
- LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_)));
- }
-
- if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && tag == EXTERNAL) {
- LOG(isolate_, EnterExternal());
- }
-
- isolate_->SetCurrentVMState(tag);
-}
-
-
-VMState::~VMState() {
- if (FLAG_log_state_changes) {
- LOG(isolate_,
- UncheckedStringEvent("Leaving",
- StateToString(isolate_->current_vm_state())));
- LOG(isolate_,
- UncheckedStringEvent("To", StateToString(previous_tag_)));
- }
-
- if (FLAG_log_timer_events &&
- previous_tag_ != EXTERNAL && isolate_->current_vm_state() == EXTERNAL) {
- LOG(isolate_, LeaveExternal());
- }
-
- isolate_->SetCurrentVMState(previous_tag_);
-}
-
-
-ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
- : isolate_(isolate), previous_callback_(isolate->external_callback()) {
- isolate_->set_external_callback(callback);
-}
-
-ExternalCallbackScope::~ExternalCallbackScope() {
- isolate_->set_external_callback(previous_callback_);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_VM_STATE_INL_H_
diff --git a/src/3rdparty/v8/src/vm-state.h b/src/3rdparty/v8/src/vm-state.h
deleted file mode 100644
index 831e2d3..0000000
--- a/src/3rdparty/v8/src/vm-state.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VM_STATE_H_
-#define V8_VM_STATE_H_
-
-#include "allocation.h"
-#include "isolate.h"
-
-namespace v8 {
-namespace internal {
-
-class VMState BASE_EMBEDDED {
- public:
- inline VMState(Isolate* isolate, StateTag tag);
- inline ~VMState();
-
- private:
- Isolate* isolate_;
- StateTag previous_tag_;
-};
-
-
-class ExternalCallbackScope BASE_EMBEDDED {
- public:
- inline ExternalCallbackScope(Isolate* isolate, Address callback);
- inline ~ExternalCallbackScope();
- private:
- Isolate* isolate_;
- Address previous_callback_;
-};
-
-} } // namespace v8::internal
-
-
-#endif // V8_VM_STATE_H_
diff --git a/src/3rdparty/v8/src/win32-headers.h b/src/3rdparty/v8/src/win32-headers.h
deleted file mode 100644
index b476efe..0000000
--- a/src/3rdparty/v8/src/win32-headers.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef WIN32_LEAN_AND_MEAN
-// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
-#define WIN32_LEAN_AND_MEAN
-#endif
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-#ifndef NOKERNEL
-#define NOKERNEL
-#endif
-#ifndef NOUSER
-#define NOUSER
-#endif
-#ifndef NOSERVICE
-#define NOSERVICE
-#endif
-#ifndef NOSOUND
-#define NOSOUND
-#endif
-#ifndef NOMCX
-#define NOMCX
-#endif
-// Require Windows XP or higher (this is required for the RtlCaptureContext
-// function to be present).
-#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif
-
-#include <windows.h>
-
-#ifdef V8_WIN32_HEADERS_FULL
-#ifndef _WIN32_WCE
-#include <signal.h> // For raise().
-#endif // _WIN32_WCE
-#include <time.h> // For LocalOffset() implementation.
-#include <mmsystem.h> // For timeGetTime().
-#ifdef __MINGW32__
-// Require Windows XP or higher when compiling with MinGW. This is for MinGW
-// header files to expose getaddrinfo.
-#undef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif // __MINGW32__
-#if (!defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)) && !defined(_WIN32_WCE)
-#include <dbghelp.h> // For SymLoadModule64 and al.
-#include <errno.h> // For STRUNCATE
-#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) && !defined(_WIN32_WCE)
-#include <limits.h> // For INT_MAX and al.
-#include <tlhelp32.h> // For Module32First and al.
-
-// These additional WIN32 includes have to be right here as the #undef's below
-// makes it impossible to have them elsewhere.
-#include <winsock2.h>
-#include <ws2tcpip.h>
-#if !defined(__MINGW32__) && !defined(_WIN32_WCE)
-#include <wspiapi.h>
-#endif // __MINGW32__ && !defined(_WIN32_WCE)
-#ifndef _WIN32_WCE
-#include <process.h> // For _beginthreadex().
-#endif
-#include <stdlib.h>
-#endif // V8_WIN32_HEADERS_FULL
-
-#ifdef _WIN32_WCE
-#ifdef DebugBreak
-#undef DebugBreak
-inline void DebugBreak() { __debugbreak(); };
-#endif // DebugBreak
-
-#ifndef _IOFBF
-#define _IOFBF 0x0000
-#endif
-#endif
-
-#undef VOID
-#undef DELETE
-#undef IN
-#undef THIS
-#undef CONST
-#undef NAN
-#undef TRUE
-#undef FALSE
-#undef UNKNOWN
-#undef NONE
-#undef ANY
-#undef IGNORE
-#undef GetObject
-#undef CreateMutex
-#undef CreateSemaphore
-#undef interface
diff --git a/src/3rdparty/v8/src/win32-math.cc b/src/3rdparty/v8/src/win32-math.cc
deleted file mode 100644
index 3410872..0000000
--- a/src/3rdparty/v8/src/win32-math.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
-// refer to The Open Group Base Specification for specification of the correct
-// semantics for these functions.
-// (http://www.opengroup.org/onlinepubs/000095399/)
-#ifdef _MSC_VER
-
-#undef V8_WIN32_LEAN_AND_MEAN
-#define V8_WIN32_HEADERS_FULL
-#include "win32-headers.h"
-#include <limits.h> // Required for INT_MAX etc.
-#include <math.h>
-#include <float.h> // Required for DBL_MAX and on Win32 for finite()
-#include "win32-math.h"
-
-#include "checks.h"
-
-namespace v8 {
-
-// Test for finite value - usually defined in math.h
-int isfinite(double x) {
- return _finite(x);
-}
-
-} // namespace v8
-
-
-// Test for a NaN (not a number) value - usually defined in math.h
-int isnan(double x) {
- return _isnan(x);
-}
-
-
-// Test for infinity - usually defined in math.h
-int isinf(double x) {
- return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
-}
-
-
-// Test if x is less than y and both nominal - usually defined in math.h
-int isless(double x, double y) {
- return isnan(x) || isnan(y) ? 0 : x < y;
-}
-
-
-// Test if x is greater than y and both nominal - usually defined in math.h
-int isgreater(double x, double y) {
- return isnan(x) || isnan(y) ? 0 : x > y;
-}
-
-
-// Classify floating point number - usually defined in math.h
-int fpclassify(double x) {
- // Use the MS-specific _fpclass() for classification.
- int flags = _fpclass(x);
-
- // Determine class. We cannot use a switch statement because
- // the _FPCLASS_ constants are defined as flags.
- if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
- if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
- if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
- if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
-
- // All cases should be covered by the code above.
- ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
- return FP_NAN;
-}
-
-
-// Test sign - usually defined in math.h
-int signbit(double x) {
- // We need to take care of the special case of both positive
- // and negative versions of zero.
- if (x == 0)
- return _fpclass(x) & _FPCLASS_NZ;
- else
- return x < 0;
-}
-
-#endif // _MSC_VER
diff --git a/src/3rdparty/v8/src/win32-math.h b/src/3rdparty/v8/src/win32-math.h
deleted file mode 100644
index 6875999..0000000
--- a/src/3rdparty/v8/src/win32-math.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
-// refer to The Open Group Base Specification for specification of the correct
-// semantics for these functions.
-// (http://www.opengroup.org/onlinepubs/000095399/)
-
-#ifndef V8_WIN32_MATH_H_
-#define V8_WIN32_MATH_H_
-
-#ifndef _MSC_VER
-#error Wrong environment, expected MSVC.
-#endif // _MSC_VER
-
-enum {
- FP_NAN,
- FP_INFINITE,
- FP_ZERO,
- FP_SUBNORMAL,
- FP_NORMAL
-};
-
-namespace v8 {
-
-int isfinite(double x);
-
-} // namespace v8
-
-int isnan(double x);
-int isinf(double x);
-int isless(double x, double y);
-int isgreater(double x, double y);
-int fpclassify(double x);
-int signbit(double x);
-
-#endif // V8_WIN32_MATH_H_
diff --git a/src/3rdparty/v8/src/x64/assembler-x64-inl.h b/src/3rdparty/v8/src/x64/assembler-x64-inl.h
deleted file mode 100644
index 67acbf0..0000000
--- a/src/3rdparty/v8/src/x64/assembler-x64-inl.h
+++ /dev/null
@@ -1,521 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_ASSEMBLER_X64_INL_H_
-#define V8_X64_ASSEMBLER_X64_INL_H_
-
-#include "x64/assembler-x64.h"
-
-#include "cpu.h"
-#include "debug.h"
-#include "v8memory.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler
-
-
-static const byte kCallOpcode = 0xE8;
-
-
-void Assembler::emitl(uint32_t x) {
- Memory::uint32_at(pc_) = x;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
- Memory::uint64_at(pc_) = x;
- if (!RelocInfo::IsNone(rmode)) {
- RecordRelocInfo(rmode, x);
- }
- pc_ += sizeof(uint64_t);
-}
-
-
-void Assembler::emitw(uint16_t x) {
- Memory::uint16_at(pc_) = x;
- pc_ += sizeof(uint16_t);
-}
-
-
-void Assembler::emit_code_target(Handle<Code> target,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
- } else {
- RecordRelocInfo(rmode);
- }
- int current = code_targets_.length();
- if (current > 0 && code_targets_.last().is_identical_to(target)) {
- // Optimization if we keep jumping to the same code target.
- emitl(current - 1);
- } else {
- code_targets_.Add(target);
- emitl(current);
- }
-}
-
-
-void Assembler::emit_rex_64(Register reg, Register rm_reg) {
- emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
-}
-
-
-void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
-}
-
-
-void Assembler::emit_rex_64(Register reg, const Operand& op) {
- emit(0x48 | reg.high_bit() << 2 | op.rex_);
-}
-
-
-void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
-}
-
-
-void Assembler::emit_rex_64(Register rm_reg) {
- ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
- emit(0x48 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_64(const Operand& op) {
- emit(0x48 | op.rex_);
-}
-
-
-void Assembler::emit_rex_32(Register reg, Register rm_reg) {
- emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_32(Register reg, const Operand& op) {
- emit(0x40 | reg.high_bit() << 2 | op.rex_);
-}
-
-
-void Assembler::emit_rex_32(Register rm_reg) {
- emit(0x40 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_32(const Operand& op) {
- emit(0x40 | op.rex_);
-}
-
-
-void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
- byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
- byte rex_bits = reg.high_bit() << 2 | op.rex_;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(Register rm_reg) {
- if (rm_reg.high_bit()) emit(0x41);
-}
-
-
-void Assembler::emit_optional_rex_32(const Operand& op) {
- if (op.rex_ != 0) emit(0x40 | op.rex_);
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return Memory::int32_at(pc) + pc + 4;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
- CPU::FlushICache(pc, sizeof(int32_t));
-}
-
-
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
-
-
-Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
- return code_targets_[Memory::int32_at(pc)];
-}
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
- if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- Memory::Address_at(pc_) += static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (IsCodeTarget(rmode_)) {
- Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(int32_t));
- } else if (rmode_ == CODE_AGE_SEQUENCE) {
- if (*pc_ == kCallOpcode) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= static_cast<int32_t>(delta); // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- }
- }
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (IsCodeTarget(rmode_)) {
- return Assembler::target_address_at(pc_);
- } else {
- return Memory::Address_at(pc_);
- }
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(pc_);
-}
-
-
-int RelocInfo::target_address_size() {
- if (IsCodedSpecially()) {
- return Assembler::kSpecialTargetSize;
- } else {
- return kPointerSize;
- }
-}
-
-
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (IsCodeTarget(rmode_)) {
- Assembler::set_target_address_at(pc_, target);
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
- } else {
- Memory::Address_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- }
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(pc_);
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- if (rmode_ == EMBEDDED_OBJECT) {
- return Memory::Object_Handle_at(pc_);
- } else {
- return origin->code_target_object_handle_at(pc_);
- }
-}
-
-
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory::Object_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- // The recognized call sequence is:
- // movq(kScratchRegister, immediate64); call(kScratchRegister);
- // It only needs to be distinguished from a return sequence
- // movq(rsp, rbp); pop(rbp); ret(n); int3 *6
- // The 11th byte is int3 (0xCC) in the return sequence and
- // REX.WB (0x48+register bit) for the call sequence.
-#ifdef ENABLE_DEBUGGER_SUPPORT
- return pc_[10] != 0xCC;
-#else
- return false;
-#endif
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- return !Assembler::IsNop(pc());
-}
-
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- ASSERT(*pc_ == kCallOpcode);
- return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(*pc_ == kCallOpcode);
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Memory::Address_at(
- pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
- target;
- CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
- sizeof(Address));
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(
- pc_ + Assembler::kPatchReturnSequenceAddressOffset);
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-void Operand::set_modrm(int mod, Register rm_reg) {
- ASSERT(is_uint2(mod));
- buf_[0] = mod << 6 | rm_reg.low_bits();
- // Set REX.B to the high bit of rm.code().
- rex_ |= rm_reg.high_bit();
-}
-
-
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- ASSERT(len_ == 1);
- ASSERT(is_uint2(scale));
- // Use SIB with no index register only for base rsp or r12. Otherwise we
- // would skip the SIB byte entirely.
- ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
- buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
- rex_ |= index.high_bit() << 1 | base.high_bit();
- len_ = 2;
-}
-
-void Operand::set_disp8(int disp) {
- ASSERT(is_int8(disp));
- ASSERT(len_ == 1 || len_ == 2);
- int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int8_t);
-}
-
-void Operand::set_disp32(int disp) {
- ASSERT(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.cc b/src/3rdparty/v8/src/x64/assembler-x64.cc
deleted file mode 100644
index 0ac0862..0000000
--- a/src/3rdparty/v8/src/x64/assembler-x64.cc
+++ /dev/null
@@ -1,3064 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "macro-assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Implementation of CpuFeatures
-
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
-void CpuFeatures::Probe() {
- ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
-#ifdef DEBUG
- initialized_ = true;
-#endif
- supported_ = kDefaultCpuFeatures;
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
- }
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
- }
-
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old rsp, since we are going to modify the stack.
- __ push(rbp);
- __ pushfq();
- __ push(rdi);
- __ push(rcx);
- __ push(rbx);
- __ movq(rbp, rsp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfq();
- __ pop(rax);
- __ movq(rdx, rax);
- __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
- __ push(rax);
- __ popfq();
- __ pushfq();
- __ pop(rax);
- __ xor_(rax, rdx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in rax.
- __ xor_(rax, rax);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ movl(rax, Immediate(1));
- supported_ = kDefaultCpuFeatures | (1 << CPUID);
- { Scope fscope(CPUID);
- __ cpuid();
- // Move the result from ecx:edx to rdi.
- __ movl(rdi, rdx); // Zero-extended to 64 bits.
- __ shl(rcx, Immediate(32));
- __ or_(rdi, rcx);
-
- // Get the sahf supported flag, from CPUID(0x80000001)
- __ movq(rax, 0x80000001, RelocInfo::NONE64);
- __ cpuid();
- }
- supported_ = kDefaultCpuFeatures;
-
- // Put the CPU flags in rax.
- // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
- __ movl(rax, Immediate(1));
- __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported.
- __ not_(rax);
- __ and_(rax, rdi);
- __ or_(rax, rcx);
- __ or_(rax, Immediate(1 << CPUID));
-
- // Done.
- __ bind(&done);
- __ movq(rsp, rbp);
- __ pop(rbx);
- __ pop(rcx);
- __ pop(rdi);
- __ popfq();
- __ pop(rbp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- supported_ = probe();
- found_by_runtime_probing_ = supported_;
- found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
- uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
- supported_ |= os_guarantees;
- found_by_runtime_probing_ &= ~os_guarantees;
- // SSE2 and CMOV must be available on an X64 CPU.
- ASSERT(IsSupported(CPUID));
- ASSERT(IsSupported(SSE2));
- ASSERT(IsSupported(CMOV));
-
- delete memory;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Load register with immediate 64 and call through a register instructions
- // takes up 13 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 13;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->movq(r10, target, RelocInfo::NONE64);
- patcher.masm()->call(r10);
-
- // Check that the size of the code generated is as expected.
- ASSERT_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- for (int i = 0; i < instruction_count; i++) {
- *(pc_ + i) = *(instructions + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count);
-}
-
-
-// -----------------------------------------------------------------------------
-// Register constants.
-
-const int
- Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
- 0, 3, 2, 1, 7, 8, 9, 11, 14, 15
-};
-
-const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, 8, 9
-};
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-Operand::Operand(Register base, int32_t disp) : rex_(0) {
- len_ = 1;
- if (base.is(rsp) || base.is(r12)) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(times_1, rsp, base);
- }
-
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
- set_modrm(0, base);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
- }
-}
-
-
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- ASSERT(!index.is(rsp));
- len_ = 1;
- set_sib(scale, index, base);
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
- // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
- // possibly set by set_sib.
- set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, rsp);
- set_disp8(disp);
- } else {
- set_modrm(2, rsp);
- set_disp32(disp);
- }
-}
-
-
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- ASSERT(!index.is(rsp));
- len_ = 1;
- set_modrm(0, rsp);
- set_sib(scale, index, rbp);
- set_disp32(disp);
-}
-
-
-Operand::Operand(const Operand& operand, int32_t offset) {
- ASSERT(operand.len_ >= 1);
- // Operand encodes REX ModR/M [SIB] [Disp].
- byte modrm = operand.buf_[0];
- ASSERT(modrm < 0xC0); // Disallow mode 3 (register target).
- bool has_sib = ((modrm & 0x07) == 0x04);
- byte mode = modrm & 0xC0;
- int disp_offset = has_sib ? 2 : 1;
- int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
- // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
- // displacement.
- bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
- int32_t disp_value = 0;
- if (mode == 0x80 || is_baseless) {
- // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
- } else if (mode == 0x40) {
- // Mode 1: Byte displacement.
- disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
- }
-
- // Write new operand with same registers, but with modified displacement.
- ASSERT(offset >= 0 ? disp_value + offset > disp_value
- : disp_value + offset < disp_value); // No overflow.
- disp_value += offset;
- rex_ = operand.rex_;
- if (!is_int8(disp_value) || is_baseless) {
- // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
- len_ = disp_offset + 4;
- Memory::int32_at(&buf_[disp_offset]) = disp_value;
- } else if (disp_value != 0 || (base_reg == 0x05)) {
- // Need 8 bits of displacement.
- buf_[0] = (modrm & 0x3f) | 0x40; // Mode 1.
- len_ = disp_offset + 1;
- buf_[disp_offset] = static_cast<byte>(disp_value);
- } else {
- // Need no displacement.
- buf_[0] = (modrm & 0x3f); // Mode 0.
- len_ = disp_offset;
- }
- if (has_sib) {
- buf_[1] = operand.buf_[1];
- }
-}
-
-
-bool Operand::AddressUsesRegister(Register reg) const {
- int code = reg.code();
- ASSERT((buf_[0] & 0xC0) != 0xC0); // Always a memory operand.
- // Start with only low three bits of base register. Initial decoding doesn't
- // distinguish on the REX.B bit.
- int base_code = buf_[0] & 0x07;
- if (base_code == rsp.code()) {
- // SIB byte present in buf_[1].
- // Check the index register from the SIB byte + REX.X prefix.
- int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
- // Index code (including REX.X) of 0x04 (rsp) means no index register.
- if (index_code != rsp.code() && index_code == code) return true;
- // Add REX.B to get the full base register code.
- base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
- // A base register of 0x05 (rbp) with mod = 0 means no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
- return code == base_code;
- } else {
- // A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
- // no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
- base_code |= ((rex_ & 0x01) << 3);
- return code == base_code;
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-#ifdef GENERATED_CODE_COVERAGE
-static void InitCoverageLog();
-#endif
-
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- code_targets_(100),
- positions_recorder_(this) {
- // Clear the buffer in debug mode unless it was provided by the
- // caller in which case we can't be sure it's okay to overwrite
- // existing code in it.
-#ifdef DEBUG
- if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size_); // int3
- }
-#endif
-
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-
-
-#ifdef GENERATED_CODE_COVERAGE
- InitCoverageLog();
-#endif
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Finalize code (at this point overflow() may be true, but the gap ensures
- // that we are still not overlapping instructions and relocation info).
- ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system.
- desc->reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
- desc->origin = this;
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(IsPowerOf2(m));
- int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
- Nop(delta);
-}
-
-
-void Assembler::CodeTargetAlign() {
- Align(16); // Preferred alignment of jump targets on x64.
-}
-
-
-bool Assembler::IsNop(Address addr) {
- Address a = addr;
- while (*a == 0x66) a++;
- if (*a == 0x90) return true;
- if (a[0] == 0xf && a[1] == 0x1f) return true;
- return false;
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(!L->is_bound()); // Label may only be bound once.
- ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
- if (L->is_linked()) {
- int current = L->pos();
- int next = long_at(current);
- while (next != current) {
- // Relative address, relative to point after address.
- int imm32 = pos - (current + sizeof(int32_t));
- long_at_put(current, imm32);
- current = next;
- next = long_at(next);
- }
- // Fix up last fixup on linked list.
- int last_imm32 = pos - (current + sizeof(int32_t));
- long_at_put(current, last_imm32);
- }
- while (L->is_near_linked()) {
- int fixup_pos = L->near_link_pos();
- int offset_to_next =
- static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
- ASSERT(offset_to_next <= 0);
- int disp = pos - (fixup_pos + sizeof(int8_t));
- CHECK(is_int8(disp));
- set_byte_at(fixup_pos, disp);
- if (offset_to_next < 0) {
- L->link_to(fixup_pos + offset_to_next, Label::kNear);
- } else {
- L->UnuseNear();
- }
- }
- L->bind_to(pos);
-}
-
-
-void Assembler::bind(Label* L) {
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::GrowBuffer() {
- ASSERT(buffer_overflow());
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else {
- desc.buffer_size = 2*buffer_size_;
- }
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
- }
-
- // Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.instr_size = pc_offset();
- desc.reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
-
- // Clear the buffer in debug mode. Use 'int3' instructions to make
- // sure to get into problems if we ever run uninitialized code.
-#ifdef DEBUG
- memset(desc.buffer, 0xCC, desc.buffer_size);
-#endif
-
- // Copy the data.
- intptr_t pc_delta = desc.buffer - buffer_;
- intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
- (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- if (isolate() != NULL &&
- isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- intptr_t* p = reinterpret_cast<intptr_t*>(it.rinfo()->pc());
- if (*p != 0) { // 0 means uninitialized.
- *p += pc_delta;
- }
- }
- }
-
- ASSERT(!buffer_overflow());
-}
-
-
-void Assembler::emit_operand(int code, const Operand& adr) {
- ASSERT(is_uint3(code));
- const unsigned length = adr.len_;
- ASSERT(length > 0);
-
- // Emit updated ModR/M byte containing the given register.
- ASSERT((adr.buf_[0] & 0x38) == 0);
- pc_[0] = adr.buf_[0] | code << 3;
-
- // Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
-}
-
-
-// Assembler Instruction implementations.
-
-void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
- emit(opcode);
- emit_operand(reg, op);
-}
-
-
-void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
- EnsureSpace ensure_space(this);
- ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
- // Swap reg and rm_reg and change opcode operand order.
- emit_rex_64(rm_reg, reg);
- emit(opcode ^ 0x02);
- emit_modrm(rm_reg, reg);
- } else {
- emit_rex_64(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
- }
-}
-
-
-void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
- EnsureSpace ensure_space(this);
- ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
- // Swap reg and rm_reg and change opcode operand order.
- emit(0x66);
- emit_optional_rex_32(rm_reg, reg);
- emit(opcode ^ 0x02);
- emit_modrm(rm_reg, reg);
- } else {
- emit(0x66);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
- }
-}
-
-
-void Assembler::arithmetic_op_16(byte opcode,
- Register reg,
- const Operand& rm_reg) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_operand(reg, rm_reg);
-}
-
-
-void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
- EnsureSpace ensure_space(this);
- ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
- // Swap reg and rm_reg and change opcode operand order.
- emit_optional_rex_32(rm_reg, reg);
- emit(opcode ^ 0x02); // E.g. 0x03 -> 0x01 for ADD.
- emit_modrm(rm_reg, reg);
- } else {
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
- }
-}
-
-
-void Assembler::arithmetic_op_32(byte opcode,
- Register reg,
- const Operand& rm_reg) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_operand(reg, rm_reg);
-}
-
-
-void Assembler::immediate_arithmetic_op(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitl(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitl(src.value_);
- }
-}
-
-void Assembler::immediate_arithmetic_op(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_16(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit(0x66); // Operand size override prefix.
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitw(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitw(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit(0x66); // Operand size override prefix.
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitw(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitl(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- ASSERT(is_int8(src.value_) || is_uint8(src.value_));
- emit(0x80);
- emit_operand(subcode, dst);
- emit(src.value_);
-}
-
-
-void Assembler::immediate_arithmetic_op_8(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- if (!dst.is_byte_register()) {
- // Use 64-bit mode byte registers.
- emit_rex_64(dst);
- }
- ASSERT(is_int8(src.value_) || is_uint8(src.value_));
- emit(0x80);
- emit_modrm(subcode, dst);
- emit(src.value_);
-}
-
-
-void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
- if (shift_amount.value_ == 1) {
- emit_rex_64(dst);
- emit(0xD1);
- emit_modrm(subcode, dst);
- } else {
- emit_rex_64(dst);
- emit(0xC1);
- emit_modrm(subcode, dst);
- emit(shift_amount.value_);
- }
-}
-
-
-void Assembler::shift(Register dst, int subcode) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xD3);
- emit_modrm(subcode, dst);
-}
-
-
-void Assembler::shift_32(Register dst, int subcode) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xD3);
- emit_modrm(subcode, dst);
-}
-
-
-void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
- if (shift_amount.value_ == 1) {
- emit_optional_rex_32(dst);
- emit(0xD1);
- emit_modrm(subcode, dst);
- } else {
- emit_optional_rex_32(dst);
- emit(0xC1);
- emit_modrm(subcode, dst);
- emit(shift_amount.value_);
- }
-}
-
-
-void Assembler::bt(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xA3);
- emit_operand(src, dst);
-}
-
-
-void Assembler::bts(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xAB);
- emit_operand(src, dst);
-}
-
-
-void Assembler::call(Label* L) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- if (L->is_bound()) {
- int offset = L->pos() - pc_offset() - sizeof(int32_t);
- ASSERT(offset <= 0);
- emitl(offset);
- } else if (L->is_linked()) {
- emitl(L->pos());
- L->link_to(pc_offset() - sizeof(int32_t));
- } else {
- ASSERT(L->is_unused());
- int32_t current = pc_offset();
- emitl(current);
- L->link_to(current);
- }
-}
-
-
-void Assembler::call(Handle<Code> target,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- emit_code_target(target, rmode, ast_id);
-}
-
-
-void Assembler::call(Register adr) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // Opcode: FF /2 r64.
- emit_optional_rex_32(adr);
- emit(0xFF);
- emit_modrm(0x2, adr);
-}
-
-
-void Assembler::call(const Operand& op) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // Opcode: FF /2 m64.
- emit_optional_rex_32(op);
- emit(0xFF);
- emit_operand(0x2, op);
-}
-
-
-// Calls directly to the given address using a relative offset.
-// Should only ever be used in Code objects for calls within the
-// same Code object. Should not be used when generating new code (use labels),
-// but only when patching existing code.
-void Assembler::call(Address target) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- Address source = pc_ + 4;
- intptr_t displacement = target - source;
- ASSERT(is_int32(displacement));
- emitl(static_cast<int32_t>(displacement));
-}
-
-
-void Assembler::clc() {
- EnsureSpace ensure_space(this);
- emit(0xF8);
-}
-
-void Assembler::cld() {
- EnsureSpace ensure_space(this);
- emit(0xFC);
-}
-
-void Assembler::cdq() {
- EnsureSpace ensure_space(this);
- emit(0x99);
-}
-
-
-void Assembler::cmovq(Condition cc, Register dst, Register src) {
- if (cc == always) {
- movq(dst, src);
- } else if (cc == never) {
- return;
- }
- // No need to check CpuInfo for CMOV support, it's a required part of the
- // 64-bit architecture.
- ASSERT(cc >= 0); // Use mov for unconditional moves.
- EnsureSpace ensure_space(this);
- // Opcode: REX.W 0f 40 + cc /r.
- emit_rex_64(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
- if (cc == always) {
- movq(dst, src);
- } else if (cc == never) {
- return;
- }
- ASSERT(cc >= 0);
- EnsureSpace ensure_space(this);
- // Opcode: REX.W 0f 40 + cc /r.
- emit_rex_64(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmovl(Condition cc, Register dst, Register src) {
- if (cc == always) {
- movl(dst, src);
- } else if (cc == never) {
- return;
- }
- ASSERT(cc >= 0);
- EnsureSpace ensure_space(this);
- // Opcode: 0f 40 + cc /r.
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
- if (cc == always) {
- movl(dst, src);
- } else if (cc == never) {
- return;
- }
- ASSERT(cc >= 0);
- EnsureSpace ensure_space(this);
- // Opcode: 0f 40 + cc /r.
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmpb_al(Immediate imm8) {
- ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
- EnsureSpace ensure_space(this);
- emit(0x3c);
- emit(imm8.value_);
-}
-
-
-void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
- EnsureSpace ensure_space(this);
- emit(0x0F);
- emit(0xA2);
-}
-
-
-void Assembler::cqo() {
- EnsureSpace ensure_space(this);
- emit_rex_64();
- emit(0x99);
-}
-
-
-void Assembler::decq(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decb(Register dst) {
- EnsureSpace ensure_space(this);
- if (!dst.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst);
- }
- emit(0xFE);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decb(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFE);
- emit_operand(1, dst);
-}
-
-
-void Assembler::enter(Immediate size) {
- EnsureSpace ensure_space(this);
- emit(0xC8);
- emitw(size.value_); // 16 bit operand, always.
- emit(0);
-}
-
-
-void Assembler::hlt() {
- EnsureSpace ensure_space(this);
- emit(0xF4);
-}
-
-
-void Assembler::idivq(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::idivl(Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::imul(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x5, src);
-}
-
-
-void Assembler::imul(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::imull(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imull(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imull(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::incq(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x0, dst);
-}
-
-
-void Assembler::incq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0, dst);
-}
-
-
-void Assembler::int3() {
- EnsureSpace ensure_space(this);
- emit(0xCC);
-}
-
-
-void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
- if (cc == always) {
- jmp(L);
- return;
- } else if (cc == never) {
- return;
- }
- EnsureSpace ensure_space(this);
- ASSERT(is_uint4(cc));
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 6;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- // Determine whether we can use 1-byte offsets for backwards branches,
- // which have a max range of 128 bytes.
-
- // We also need to check predictable_code_size() flag here, because on x64,
- // when the full code generator recompiles code for debugging, some places
- // need to be padded out to a certain size. The debugger is keeping track of
- // how often it did this so that it can adjust return addresses on the
- // stack, but if the size of jump instructions can also change, that's not
- // enough and the calculated offsets would be incorrect.
- if (is_int8(offs - short_size) && !predictable_code_size()) {
- // 0111 tttn #8-bit disp.
- emit(0x70 | cc);
- emit((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp.
- emit(0x0F);
- emit(0x80 | cc);
- emitl(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- // 0111 tttn #8-bit disp
- emit(0x70 | cc);
- byte disp = 0x00;
- if (L->is_near_linked()) {
- int offset = L->near_link_pos() - pc_offset();
- ASSERT(is_int8(offset));
- disp = static_cast<byte>(offset & 0xFF);
- }
- L->link_to(pc_offset(), Label::kNear);
- emit(disp);
- } else if (L->is_linked()) {
- // 0000 1111 1000 tttn #32-bit disp.
- emit(0x0F);
- emit(0x80 | cc);
- emitl(L->pos());
- L->link_to(pc_offset() - sizeof(int32_t));
- } else {
- ASSERT(L->is_unused());
- emit(0x0F);
- emit(0x80 | cc);
- int32_t current = pc_offset();
- emitl(current);
- L->link_to(current);
- }
-}
-
-
-void Assembler::j(Condition cc,
- Handle<Code> target,
- RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint4(cc));
- // 0000 1111 1000 tttn #32-bit disp.
- emit(0x0F);
- emit(0x80 | cc);
- emit_code_target(target, rmode);
-}
-
-
-void Assembler::jmp(Label* L, Label::Distance distance) {
- EnsureSpace ensure_space(this);
- const int short_size = sizeof(int8_t);
- const int long_size = sizeof(int32_t);
- if (L->is_bound()) {
- int offs = L->pos() - pc_offset() - 1;
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size) && !predictable_code_size()) {
- // 1110 1011 #8-bit disp.
- emit(0xEB);
- emit((offs - short_size) & 0xFF);
- } else {
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emitl(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- emit(0xEB);
- byte disp = 0x00;
- if (L->is_near_linked()) {
- int offset = L->near_link_pos() - pc_offset();
- ASSERT(is_int8(offset));
- disp = static_cast<byte>(offset & 0xFF);
- }
- L->link_to(pc_offset(), Label::kNear);
- emit(disp);
- } else if (L->is_linked()) {
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emitl(L->pos());
- L->link_to(pc_offset() - long_size);
- } else {
- // 1110 1001 #32-bit disp.
- ASSERT(L->is_unused());
- emit(0xE9);
- int32_t current = pc_offset();
- emitl(current);
- L->link_to(current);
- }
-}
-
-
-void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emit_code_target(target, rmode);
-}
-
-
-void Assembler::jmp(Register target) {
- EnsureSpace ensure_space(this);
- // Opcode FF/4 r64.
- emit_optional_rex_32(target);
- emit(0xFF);
- emit_modrm(0x4, target);
-}
-
-
-void Assembler::jmp(const Operand& src) {
- EnsureSpace ensure_space(this);
- // Opcode FF/4 m64.
- emit_optional_rex_32(src);
- emit(0xFF);
- emit_operand(0x4, src);
-}
-
-
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::leal(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
- EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA1);
- emitq(reinterpret_cast<uintptr_t>(value), mode);
-}
-
-
-void Assembler::load_rax(ExternalReference ref) {
- load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
-}
-
-
-void Assembler::leave() {
- EnsureSpace ensure_space(this);
- emit(0xC9);
-}
-
-
-void Assembler::movb(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- if (!dst.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst, src);
- } else {
- emit_optional_rex_32(dst, src);
- }
- emit(0x8A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movb(Register dst, Immediate imm) {
- EnsureSpace ensure_space(this);
- if (!dst.is_byte_register()) {
- emit_rex_32(dst);
- }
- emit(0xB0 + dst.low_bits());
- emit(imm.value_);
-}
-
-
-void Assembler::movb(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- if (!src.is_byte_register()) {
- emit_rex_32(src, dst);
- } else {
- emit_optional_rex_32(src, dst);
- }
- emit(0x88);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movw(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_modrm(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::movl(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movl(const Operand& dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xC7);
- emit_operand(0x0, dst);
- emit(value);
-}
-
-
-void Assembler::movl(Register dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xB8 + dst.low_bits());
- emit(value);
-}
-
-
-void Assembler::movq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x89);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::movq(Register dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xC7);
- emit_modrm(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
-}
-
-
-void Assembler::movq(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
- // This method must not be used with heap object references. The stored
- // address is not GC safe. Use the handle version instead.
- ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value), rmode);
-}
-
-
-void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
- // Non-relocatable values might not need a 64-bit representation.
- if (RelocInfo::IsNone(rmode)) {
- if (is_uint32(value)) {
- movl(dst, Immediate(static_cast<int32_t>(value)));
- return;
- } else if (is_int32(value)) {
- movq(dst, Immediate(static_cast<int32_t>(value)));
- return;
- }
- // Value cannot be represented by 32 bits, so do a full 64 bit immediate
- // value.
- }
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(value, rmode);
-}
-
-
-void Assembler::movq(Register dst, ExternalReference ref) {
- int64_t value = reinterpret_cast<int64_t>(ref.address());
- movq(dst, value, RelocInfo::EXTERNAL_REFERENCE);
-}
-
-
-void Assembler::movq(const Operand& dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xC7);
- emit_operand(0, dst);
- emit(value);
-}
-
-
-// Loads the ip-relative location of the src label into the target location
-// (as a 32-bit offset sign extended to 64-bit).
-void Assembler::movl(const Operand& dst, Label* src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xC7);
- emit_operand(0, dst);
- if (src->is_bound()) {
- int offset = src->pos() - pc_offset() - sizeof(int32_t);
- ASSERT(offset <= 0);
- emitl(offset);
- } else if (src->is_linked()) {
- emitl(src->pos());
- src->link_to(pc_offset() - sizeof(int32_t));
- } else {
- ASSERT(src->is_unused());
- int32_t current = pc_offset();
- emitl(current);
- src->link_to(current);
- }
-}
-
-
-void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
- // If there is no relocation info, emit the value of the handle efficiently
- // (possibly using less that 8 bytes for the value).
- if (RelocInfo::IsNone(mode)) {
- // There is no possible reason to store a heap pointer without relocation
- // info, so it must be a smi.
- ASSERT(value->IsSmi());
- movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64);
- } else {
- EnsureSpace ensure_space(this);
- ASSERT(value->IsHeapObject());
- ASSERT(!HEAP->InNewSpace(*value));
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
- }
-}
-
-
-void Assembler::movsxbq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xBE);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xBF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsxlq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x63);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::movsxlq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x63);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxbq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
- // there is no need to make this a 64 bit operation.
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxbl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::repmovsb() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit(0xA4);
-}
-
-
-void Assembler::repmovsw() {
- EnsureSpace ensure_space(this);
- emit(0x66); // Operand size override.
- emit(0xF3);
- emit(0xA4);
-}
-
-
-void Assembler::repmovsl() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit(0xA5);
-}
-
-
-void Assembler::repmovsq() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_rex_64();
- emit(0xA5);
-}
-
-
-void Assembler::mul(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x4, src);
-}
-
-
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::negl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::neg(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_operand(3, dst);
-}
-
-
-void Assembler::nop() {
- EnsureSpace ensure_space(this);
- emit(0x90);
-}
-
-
-void Assembler::not_(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
-void Assembler::not_(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_operand(2, dst);
-}
-
-
-void Assembler::notl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
-void Assembler::Nop(int n) {
- // The recommended muti-byte sequences of NOP instructions from the Intel 64
- // and IA-32 Architectures Software Developer's Manual.
- //
- // Length Assembly Byte Sequence
- // 2 bytes 66 NOP 66 90H
- // 3 bytes NOP DWORD ptr [EAX] 0F 1F 00H
- // 4 bytes NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H
- // 5 bytes NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H
- // 6 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H
- // 7 bytes NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H
- // 8 bytes NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
- // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
- // 00000000H] 00H
-
- EnsureSpace ensure_space(this);
- while (n > 0) {
- switch (n) {
- case 2:
- emit(0x66);
- case 1:
- emit(0x90);
- return;
- case 3:
- emit(0x0f);
- emit(0x1f);
- emit(0x00);
- return;
- case 4:
- emit(0x0f);
- emit(0x1f);
- emit(0x40);
- emit(0x00);
- return;
- case 6:
- emit(0x66);
- case 5:
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 7:
- emit(0x0f);
- emit(0x1f);
- emit(0x80);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- default:
- case 11:
- emit(0x66);
- n--;
- case 10:
- emit(0x66);
- n--;
- case 9:
- emit(0x66);
- n--;
- case 8:
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- n -= 8;
- }
- }
-}
-
-
-void Assembler::pop(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0x58 | dst.low_bits());
-}
-
-
-void Assembler::pop(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0x8F);
- emit_operand(0, dst);
-}
-
-
-void Assembler::popfq() {
- EnsureSpace ensure_space(this);
- emit(0x9D);
-}
-
-
-void Assembler::push(Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
- emit(0x50 | src.low_bits());
-}
-
-
-void Assembler::push(const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
- emit(0xFF);
- emit_operand(6, src);
-}
-
-
-void Assembler::push(Immediate value) {
- EnsureSpace ensure_space(this);
- if (is_int8(value.value_)) {
- emit(0x6A);
- emit(value.value_); // Emit low byte of value.
- } else {
- emit(0x68);
- emitl(value.value_);
- }
-}
-
-
-void Assembler::push_imm32(int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit(0x68);
- emitl(imm32);
-}
-
-
-void Assembler::pushfq() {
- EnsureSpace ensure_space(this);
- emit(0x9C);
-}
-
-
-void Assembler::rdtsc() {
- EnsureSpace ensure_space(this);
- emit(0x0F);
- emit(0x31);
-}
-
-
-void Assembler::ret(int imm16) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint16(imm16));
- if (imm16 == 0) {
- emit(0xC3);
- } else {
- emit(0xC2);
- emit(imm16 & 0xFF);
- emit((imm16 >> 8) & 0xFF);
- }
-}
-
-
-void Assembler::setcc(Condition cc, Register reg) {
- if (cc > last_condition) {
- movb(reg, Immediate(cc == always ? 1 : 0));
- return;
- }
- EnsureSpace ensure_space(this);
- ASSERT(is_uint4(cc));
- if (!reg.is_byte_register()) { // Use x64 byte registers, where different.
- emit_rex_32(reg);
- }
- emit(0x0F);
- emit(0x90 | cc);
- emit_modrm(0x0, reg);
-}
-
-
-void Assembler::shld(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xA5);
- emit_modrm(src, dst);
-}
-
-
-void Assembler::shrd(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xAD);
- emit_modrm(src, dst);
-}
-
-
-void Assembler::xchg(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
- Register other = src.is(rax) ? dst : src;
- emit_rex_64(other);
- emit(0x90 | other.low_bits());
- } else if (dst.low_bits() == 4) {
- emit_rex_64(dst, src);
- emit(0x87);
- emit_modrm(dst, src);
- } else {
- emit_rex_64(src, dst);
- emit(0x87);
- emit_modrm(src, dst);
- }
-}
-
-
-void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
- EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA3);
- emitq(reinterpret_cast<uintptr_t>(dst), mode);
-}
-
-
-void Assembler::store_rax(ExternalReference ref) {
- store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
-}
-
-
-void Assembler::testb(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_32(src, dst);
- emit(0x84);
- emit_modrm(src, dst);
- } else {
- if (!dst.is_byte_register() || !src.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst, src);
- }
- emit(0x84);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testb(Register reg, Immediate mask) {
- ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
- EnsureSpace ensure_space(this);
- if (reg.is(rax)) {
- emit(0xA8);
- emit(mask.value_); // Low byte emitted.
- } else {
- if (!reg.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(reg);
- }
- emit(0xF6);
- emit_modrm(0x0, reg);
- emit(mask.value_); // Low byte emitted.
- }
-}
-
-
-void Assembler::testb(const Operand& op, Immediate mask) {
- ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
- emit(0xF6);
- emit_operand(rax, op); // Operation code 0
- emit(mask.value_); // Low byte emitted.
-}
-
-
-void Assembler::testb(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- if (!reg.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(reg, op);
- } else {
- emit_optional_rex_32(reg, op);
- }
- emit(0x84);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testl(Register reg, Immediate mask) {
- // testl with a mask that fits in the low byte is exactly testb.
- if (is_uint8(mask.value_)) {
- testb(reg, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- if (reg.is(rax)) {
- emit(0xA9);
- emit(mask);
- } else {
- emit_optional_rex_32(rax, reg);
- emit(0xF7);
- emit_modrm(0x0, reg);
- emit(mask);
- }
-}
-
-
-void Assembler::testl(const Operand& op, Immediate mask) {
- // testl with a mask that fits in the low byte is exactly testb.
- if (is_uint8(mask.value_)) {
- testb(op, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
- emit(0xF7);
- emit_operand(rax, op); // Operation code 0
- emit(mask);
-}
-
-
-void Assembler::testq(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
- emit(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testq(Register dst, Immediate mask) {
- EnsureSpace ensure_space(this);
- if (dst.is(rax)) {
- emit_rex_64();
- emit(0xA9);
- emit(mask);
- } else {
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0, dst);
- emit(mask);
- }
-}
-
-
-// FPU instructions.
-
-
-void Assembler::fld(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC0, i);
-}
-
-
-void Assembler::fld1() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xE8);
-}
-
-
-void Assembler::fldz() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xEE);
-}
-
-
-void Assembler::fldpi() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xEB);
-}
-
-
-void Assembler::fldln2() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xED);
-}
-
-
-void Assembler::fld_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xD9);
- emit_operand(0, adr);
-}
-
-
-void Assembler::fld_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(0, adr);
-}
-
-
-void Assembler::fstp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xD9);
- emit_operand(3, adr);
-}
-
-
-void Assembler::fstp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(3, adr);
-}
-
-
-void Assembler::fstp(int index) {
- ASSERT(is_uint3(index));
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xD8, index);
-}
-
-
-void Assembler::fild_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(0, adr);
-}
-
-
-void Assembler::fild_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDF);
- emit_operand(5, adr);
-}
-
-
-void Assembler::fistp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(3, adr);
-}
-
-
-void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(1, adr);
-}
-
-
-void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(1, adr);
-}
-
-
-void Assembler::fist_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(2, adr);
-}
-
-
-void Assembler::fistp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDF);
- emit_operand(7, adr);
-}
-
-
-void Assembler::fabs() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xE1);
-}
-
-
-void Assembler::fchs() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xE0);
-}
-
-
-void Assembler::fcos() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xFF);
-}
-
-
-void Assembler::fsin() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xFE);
-}
-
-
-void Assembler::fptan() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF2);
-}
-
-
-void Assembler::fyl2x() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF1);
-}
-
-
-void Assembler::f2xm1() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF0);
-}
-
-
-void Assembler::fscale() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xFD);
-}
-
-
-void Assembler::fninit() {
- EnsureSpace ensure_space(this);
- emit(0xDB);
- emit(0xE3);
-}
-
-
-void Assembler::fadd(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC0, i);
-}
-
-
-void Assembler::fsub(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xE8, i);
-}
-
-
-void Assembler::fisub_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDA);
- emit_operand(4, adr);
-}
-
-
-void Assembler::fmul(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC8, i);
-}
-
-
-void Assembler::fdiv(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xF8, i);
-}
-
-
-void Assembler::faddp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC0, i);
-}
-
-
-void Assembler::fsubp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE8, i);
-}
-
-
-void Assembler::fsubrp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE0, i);
-}
-
-
-void Assembler::fmulp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC8, i);
-}
-
-
-void Assembler::fdivp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xF8, i);
-}
-
-
-void Assembler::fprem() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF8);
-}
-
-
-void Assembler::fprem1() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF5);
-}
-
-
-void Assembler::fxch(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC8, i);
-}
-
-
-void Assembler::fincstp() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF7);
-}
-
-
-void Assembler::ffree(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xC0, i);
-}
-
-
-void Assembler::ftst() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xE4);
-}
-
-
-void Assembler::fucomp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xE8, i);
-}
-
-
-void Assembler::fucompp() {
- EnsureSpace ensure_space(this);
- emit(0xDA);
- emit(0xE9);
-}
-
-
-void Assembler::fucomi(int i) {
- EnsureSpace ensure_space(this);
- emit(0xDB);
- emit(0xE8 + i);
-}
-
-
-void Assembler::fucomip() {
- EnsureSpace ensure_space(this);
- emit(0xDF);
- emit(0xE9);
-}
-
-
-void Assembler::fcompp() {
- EnsureSpace ensure_space(this);
- emit(0xDE);
- emit(0xD9);
-}
-
-
-void Assembler::fnstsw_ax() {
- EnsureSpace ensure_space(this);
- emit(0xDF);
- emit(0xE0);
-}
-
-
-void Assembler::fwait() {
- EnsureSpace ensure_space(this);
- emit(0x9B);
-}
-
-
-void Assembler::frndint() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xFC);
-}
-
-
-void Assembler::fnclex() {
- EnsureSpace ensure_space(this);
- emit(0xDB);
- emit(0xE2);
-}
-
-
-void Assembler::sahf() {
- // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
- // in 64-bit mode. Test CpuID.
- EnsureSpace ensure_space(this);
- emit(0x9E);
-}
-
-
-void Assembler::emit_farith(int b1, int b2, int i) {
- ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
- ASSERT(is_uint3(i)); // illegal stack offset
- emit(b1);
- emit(b2 + i);
-}
-
-// SSE 2 operations.
-
-void Assembler::movd(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x6E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movd(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x7E);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movq(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x6E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movq(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0x7E);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movq(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- if (dst.low_bits() == 4) {
- // Avoid unnecessary SIB byte.
- emit(0xf3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x7e);
- emit_sse_operand(dst, src);
- } else {
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0xD6);
- emit_sse_operand(src, dst);
- }
-}
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0x7F);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x6F);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(CpuFeatures::IsSupported(SSE4_1));
- ASSERT(is_uint8(imm8));
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x3A);
- emit(0x17);
- emit_sse_operand(dst, src);
- emit(imm8);
-}
-
-
-void Assembler::movsd(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2); // double
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2); // double
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2); // double
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- // Try to avoid an unnecessary SIB byte.
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x29);
- emit_sse_operand(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x28);
- emit_sse_operand(dst, src);
- }
-}
-
-
-void Assembler::movapd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- // Try to avoid an unnecessary SIB byte.
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x29);
- emit_sse_operand(src, dst);
- } else {
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x28);
- emit_sse_operand(dst, src);
- }
-}
-
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF3); // single
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(const Operand& src, XMMRegister dst) {
- EnsureSpace ensure_space(this);
- emit(0xF3); // single
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x11); // store
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvttss2si(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvttss2si(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2si(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2si(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2D);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x2D);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x54);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x56);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x51);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::roundsd(XMMRegister dst, XMMRegister src,
- Assembler::RoundingMode mode) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x3a);
- emit(0x0b);
- emit_sse_operand(dst, src);
- // Mask precision exeption.
- emit(static_cast<byte>(mode) | 0x8);
-}
-
-
-void Assembler::movmskpd(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movmskps(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
- Register ireg = { reg.code() };
- emit_operand(ireg, adr);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
-}
-
-void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
-}
-
-void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
-}
-
-
-void Assembler::db(uint8_t data) {
- EnsureSpace ensure_space(this);
- emit(data);
-}
-
-
-void Assembler::dd(uint32_t data) {
- EnsureSpace ensure_space(this);
- emitl(data);
-}
-
-
-// Relocation information implementations.
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(!RelocInfo::IsNone(rmode));
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- RelocInfo rinfo(pc_, rmode, data, NULL);
- reloc_info_writer.Write(&rinfo);
-}
-
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
- if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::CODE_AGE_SEQUENCE;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on x64 means that it is a relative 32 bit address, as used
- // by branch instructions.
- return (1 << rmode_) & kApplyMask;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.h b/src/3rdparty/v8/src/x64/assembler-x64.h
deleted file mode 100644
index 69eeb8e..0000000
--- a/src/3rdparty/v8/src/x64/assembler-x64.h
+++ /dev/null
@@ -1,1678 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-// A lightweight X64 Assembler.
-
-#ifndef V8_X64_ASSEMBLER_X64_H_
-#define V8_X64_ASSEMBLER_X64_H_
-
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// Utility functions
-
-// Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-
-struct Register {
- // The non-allocatable registers are:
- // rsp - stack pointer
- // rbp - frame pointer
- // rsi - context register
- // r10 - fixed scratch register
- // r12 - smi constant register
- // r13 - root register
- static const int kMaxNumAllocatableRegisters = 10;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 16;
-
- static int ToAllocationIndex(Register reg) {
- return kAllocationIndexByRegisterCode[reg.code()];
- }
-
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- Register result = { kRegisterCodeByAllocationIndex[index] };
- return result;
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "rax",
- "rbx",
- "rdx",
- "rcx",
- "rdi",
- "r8",
- "r9",
- "r11",
- "r14",
- "r15"
- };
- return names[index];
- }
-
- static Register from_code(int code) {
- Register r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // rax, rbx, rcx and rdx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- return 1 << code_;
- }
-
- // Return the high bit of the register code as a 0 or 1. Used often
- // when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
- // Return the 3 low bits of the register code. Used when encoding registers
- // in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
-
- // Unfortunately we can't make this private in a struct when initializing
- // by assignment.
- int code_;
-
- private:
- static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
- static const int kAllocationIndexByRegisterCode[kNumRegisters];
-};
-
-const int kRegister_rax_Code = 0;
-const int kRegister_rcx_Code = 1;
-const int kRegister_rdx_Code = 2;
-const int kRegister_rbx_Code = 3;
-const int kRegister_rsp_Code = 4;
-const int kRegister_rbp_Code = 5;
-const int kRegister_rsi_Code = 6;
-const int kRegister_rdi_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_r11_Code = 11;
-const int kRegister_r12_Code = 12;
-const int kRegister_r13_Code = 13;
-const int kRegister_r14_Code = 14;
-const int kRegister_r15_Code = 15;
-const int kRegister_no_reg_Code = -1;
-
-const Register rax = { kRegister_rax_Code };
-const Register rcx = { kRegister_rcx_Code };
-const Register rdx = { kRegister_rdx_Code };
-const Register rbx = { kRegister_rbx_Code };
-const Register rsp = { kRegister_rsp_Code };
-const Register rbp = { kRegister_rbp_Code };
-const Register rsi = { kRegister_rsi_Code };
-const Register rdi = { kRegister_rdi_Code };
-const Register r8 = { kRegister_r8_Code };
-const Register r9 = { kRegister_r9_Code };
-const Register r10 = { kRegister_r10_Code };
-const Register r11 = { kRegister_r11_Code };
-const Register r12 = { kRegister_r12_Code };
-const Register r13 = { kRegister_r13_Code };
-const Register r14 = { kRegister_r14_Code };
-const Register r15 = { kRegister_r15_Code };
-const Register no_reg = { kRegister_no_reg_Code };
-
-
-struct XMMRegister {
- static const int kMaxNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters = 15;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
-
- static int ToAllocationIndex(XMMRegister reg) {
- ASSERT(reg.code() != 0);
- return reg.code() - 1;
- }
-
- static XMMRegister FromAllocationIndex(int index) {
- ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
- XMMRegister result = { index + 1 };
- return result;
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7",
- "xmm8",
- "xmm9",
- "xmm10",
- "xmm11",
- "xmm12",
- "xmm13",
- "xmm14",
- "xmm15"
- };
- return names[index];
- }
-
- static XMMRegister from_code(int code) {
- ASSERT(code >= 0);
- ASSERT(code < kMaxNumRegisters);
- XMMRegister r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
-
- // Return the high bit of the register code as a 0 or 1. Used often
- // when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
- // Return the 3 low bits of the register code. Used when encoding registers
- // in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
-
- int code_;
-};
-
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-const XMMRegister xmm8 = { 8 };
-const XMMRegister xmm9 = { 9 };
-const XMMRegister xmm10 = { 10 };
-const XMMRegister xmm11 = { 11 };
-const XMMRegister xmm12 = { 12 };
-const XMMRegister xmm13 = { 13 };
-const XMMRegister xmm14 = { 14 };
-const XMMRegister xmm15 = { 15 };
-
-
-typedef XMMRegister DoubleRegister;
-
-
-enum Condition {
- // any value < 0 is considered no_condition
- no_condition = -1,
-
- overflow = 0,
- no_overflow = 1,
- below = 2,
- above_equal = 3,
- equal = 4,
- not_equal = 5,
- below_equal = 6,
- above = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
- greater_equal = 13,
- less_equal = 14,
- greater = 15,
-
- // Fake conditions that are handled by the
- // opcodes using them.
- always = 16,
- never = 17,
- // aliases
- carry = below,
- not_carry = above_equal,
- zero = equal,
- not_zero = not_equal,
- sign = negative,
- not_sign = positive,
- last_condition = greater
-};
-
-
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case below:
- return above;
- case above:
- return below;
- case above_equal:
- return below_equal;
- case below_equal:
- return above_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Immediates
-
-class Immediate BASE_EMBEDDED {
- public:
- explicit Immediate(int32_t value) : value_(value) {}
-
- private:
- int32_t value_;
-
- friend class Assembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-enum ScaleFactor {
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3,
- times_int_size = times_4,
- times_pointer_size = times_8
-};
-
-
-class Operand BASE_EMBEDDED {
- public:
- // [base + disp/r]
- Operand(Register base, int32_t disp);
-
- // [base + index*scale + disp/r]
- Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp);
-
- // [index*scale + disp/r]
- Operand(Register index,
- ScaleFactor scale,
- int32_t disp);
-
- // Offset from existing memory operand.
- // Offset is added to existing displacement as 32-bit signed values and
- // this must not overflow.
- Operand(const Operand& base, int32_t offset);
-
- // Checks whether either base or index register is the given register.
- // Does not check the "reg" part of the Operand.
- bool AddressUsesRegister(Register reg) const;
-
- // Queries related to the size of the generated instruction.
- // Whether the generated instruction will have a REX prefix.
- bool requires_rex() const { return rex_ != 0; }
- // Size of the ModR/M, SIB and displacement parts of the generated
- // instruction.
- int operand_size() const { return len_; }
-
- private:
- byte rex_;
- byte buf_[6];
- // The number of bytes of buf_ in use.
- byte len_;
-
- // Set the ModR/M byte without an encoded 'reg' register. The
- // register is encoded later as part of the emit_operand operation.
- // set_modrm can be called before or after set_sib and set_disp*.
- inline void set_modrm(int mod, Register rm);
-
- // Set the SIB byte if one is needed. Sets the length to 2 rather than 1.
- inline void set_sib(ScaleFactor scale, Register index, Register base);
-
- // Adds operand displacement fields (offsets added to the memory address).
- // Needs to be called after set_sib, not before it.
- inline void set_disp8(int disp);
- inline void set_disp32(int disp);
-
- friend class Assembler;
-};
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-// Example:
-// if (CpuFeatures::IsSupported(SSE3)) {
-// CpuFeatures::Scope fscope(SSE3);
-// // Generate SSE3 floating point code.
-// } else {
-// // Generate standard x87 or SSE2 floating point code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == SSE2 && !FLAG_enable_sse2) return false;
- if (f == SSE3 && !FLAG_enable_sse3) return false;
- if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
- if (f == SAHF && !FLAG_enable_sahf) return false;
- return (supported_ & (V8_UINT64_C(1) << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- uint64_t enabled = isolate->enabled_cpu_features();
- return (enabled & (V8_UINT64_C(1) << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- uint64_t mask = V8_UINT64_C(1) << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = isolate_->enabled_cpu_features();
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- uint64_t old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- private:
- // Safe defaults include SSE2 and CMOV for X64. It is always available, if
- // anyone checks, but they shouldn't need to check.
- // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
- // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
- static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static uint64_t supported_;
- static uint64_t found_by_runtime_probing_;
-
- friend class ExternalReference;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-class Assembler : public AssemblerBase {
- private:
- // We check before assembling an instruction that there is sufficient
- // space to write an instruction and its relocation information.
- // The relocation writer's position must be kGap bytes above the end of
- // the generated instructions. This leaves enough space for the
- // longest possible x64 instruction, 15 bytes, and the longest possible
- // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
- // (There is a 15 byte limit on x64 instruction length that rules out some
- // otherwise valid instructions.)
- // This allows for a single, fast space check per instruction.
- static const int kGap = 32;
-
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler() { }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Read/Modify the code target in the relative branch/call instruction at pc.
- // On the x64 architecture, we use relative jumps with a 32-bit displacement
- // to jump to other Code objects in the Code space in the heap.
- // Jumps to C functions are done indirectly through a 64-bit register holding
- // the absolute address of the target.
- // These functions convert between absolute Addresses of Code objects and
- // the relative displacements stored in the code.
- static inline Address target_address_at(Address pc);
- static inline void set_target_address_at(Address pc, Address target);
-
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- static inline Address target_address_from_return_address(Address pc);
-
- // This sets the branch destination (which is in the instruction on x64).
- // This is for calls and branches within generated code.
- inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- // This sets the branch destination (which is a load instruction on x64).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- *reinterpret_cast<Address*>(instruction_payload) = target;
- }
-
- inline Handle<Object> code_target_object_handle_at(Address pc);
- // Number of bytes taken up by the branch target in the code.
- static const int kSpecialTargetSize = 4; // Use 32-bit displacement.
- // Distance between the address of the code target in the call instruction
- // and the return address pushed on the stack.
- static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
- // Distance between the start of the JS return sequence and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchReturnSequenceAddressOffset = 13 - 4;
- // Distance between start of patched debug break slot and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchDebugBreakSlotAddressOffset = 13 - 4;
- // TODO(X64): Rename this, removing the "Real", after changing the above.
- static const int kRealPatchReturnSequenceAddressOffset = 2;
-
- // Some x64 JS code is padded with int3 to make it large
- // enough to hold an instruction when the debugger patches it.
- static const int kJumpInstructionLength = 13;
- static const int kCallInstructionLength = 13;
- static const int kJSReturnSequenceLength = 13;
- static const int kShortCallInstructionLength = 5;
- static const int kPatchDebugBreakSlotReturnOffset = 4;
-
- // The debug break slot must be able to contain a call instruction.
- static const int kDebugBreakSlotLength = kCallInstructionLength;
-
- // One byte opcode for test eax,0xXXXXXXXX.
- static const byte kTestEaxByte = 0xA9;
- // One byte opcode for test al, 0xXX.
- static const byte kTestAlByte = 0xA8;
- // One byte opcode for nop.
- static const byte kNopByte = 0x90;
-
- // One byte prefix for a short conditional jump.
- static const byte kJccShortPrefix = 0x70;
- static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
- static const byte kJcShortOpcode = kJccShortPrefix | carry;
- static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
- static const byte kJzShortOpcode = kJccShortPrefix | zero;
-
-
- // ---------------------------------------------------------------------------
- // Code generation
- //
- // Function names correspond one-to-one to x64 instruction mnemonics.
- // Unless specified otherwise, instructions operate on 64-bit operands.
- //
- // If we need versions of an assembly instruction that operate on different
- // width arguments, we add a single-letter suffix specifying the width.
- // This is done for the following instructions: mov, cmp, inc, dec,
- // add, sub, and test.
- // There are no versions of these instructions without the suffix.
- // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
- // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
- // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
- // - Instructions on 64-bit (quadword) operands/registers use 'q'.
- //
- // Some mnemonics, such as "and", are the same as C++ keywords.
- // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m, where m must be a power of 2.
- void Align(int m);
- void Nop(int bytes = 1);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Stack
- void pushfq();
- void popfq();
-
- void push(Immediate value);
- // Push a 32 bit integer, and guarantee that it is actually pushed as a
- // 32 bit value, the normal push will optimize the 8 bit case.
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
-
- void pop(Register dst);
- void pop(const Operand& dst);
-
- void enter(Immediate size);
- void leave();
-
- // Moves
- void movb(Register dst, const Operand& src);
- void movb(Register dst, Immediate imm);
- void movb(const Operand& dst, Register src);
-
- // Move the low 16 bits of a 64-bit register value to a 16-bit
- // memory location.
- void movw(const Operand& dst, Register src);
-
- void movl(Register dst, Register src);
- void movl(Register dst, const Operand& src);
- void movl(const Operand& dst, Register src);
- void movl(const Operand& dst, Immediate imm);
- // Load a 32-bit immediate value, zero-extended to 64 bits.
- void movl(Register dst, Immediate imm32);
-
- // Move 64 bit register value to 64-bit memory location.
- void movq(const Operand& dst, Register src);
- // Move 64 bit memory location to 64-bit register value.
- void movq(Register dst, const Operand& src);
- void movq(Register dst, Register src);
- // Sign extends immediate 32-bit value to 64 bits.
- void movq(Register dst, Immediate x);
- // Move the offset of the label location relative to the current
- // position (after the move) to the destination.
- void movl(const Operand& dst, Label* src);
-
- // Move sign extended immediate to memory location.
- void movq(const Operand& dst, Immediate value);
- // Instructions to load a 64-bit immediate into a register.
- // All 64-bit immediates must have a relocation mode.
- void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
- void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
- void movq(Register dst, const char* s, RelocInfo::Mode rmode);
- // Moves the address of the external reference into the register.
- void movq(Register dst, ExternalReference ext);
- void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
-
- void movsxbq(Register dst, const Operand& src);
- void movsxwq(Register dst, const Operand& src);
- void movsxlq(Register dst, Register src);
- void movsxlq(Register dst, const Operand& src);
- void movzxbq(Register dst, const Operand& src);
- void movzxbl(Register dst, const Operand& src);
- void movzxwq(Register dst, const Operand& src);
- void movzxwl(Register dst, const Operand& src);
- void movzxwl(Register dst, Register src);
-
- // Repeated moves.
-
- void repmovsb();
- void repmovsw();
- void repmovsl();
- void repmovsq();
-
- // Instruction to load from an immediate 64-bit pointer into RAX.
- void load_rax(void* ptr, RelocInfo::Mode rmode);
- void load_rax(ExternalReference ext);
-
- // Conditional moves.
- void cmovq(Condition cc, Register dst, Register src);
- void cmovq(Condition cc, Register dst, const Operand& src);
- void cmovl(Condition cc, Register dst, Register src);
- void cmovl(Condition cc, Register dst, const Operand& src);
-
- // Exchange two registers
- void xchg(Register dst, Register src);
-
- // Arithmetics
- void addl(Register dst, Register src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(Register dst, const Operand& src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(const Operand& dst, Register src) {
- arithmetic_op_32(0x01, src, dst);
- }
-
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(Register dst, const Operand& src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(const Operand& dst, Register src) {
- arithmetic_op(0x01, src, dst);
- }
-
- void addq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void addq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void sbbl(Register dst, Register src) {
- arithmetic_op_32(0x1b, dst, src);
- }
-
- void sbbq(Register dst, Register src) {
- arithmetic_op(0x1b, dst, src);
- }
-
- void cmpb(Register dst, Immediate src) {
- immediate_arithmetic_op_8(0x7, dst, src);
- }
-
- void cmpb_al(Immediate src);
-
- void cmpb(Register dst, Register src) {
- arithmetic_op(0x3A, dst, src);
- }
-
- void cmpb(Register dst, const Operand& src) {
- arithmetic_op(0x3A, dst, src);
- }
-
- void cmpb(const Operand& dst, Register src) {
- arithmetic_op(0x38, src, dst);
- }
-
- void cmpb(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_8(0x7, dst, src);
- }
-
- void cmpw(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_16(0x7, dst, src);
- }
-
- void cmpw(Register dst, Immediate src) {
- immediate_arithmetic_op_16(0x7, dst, src);
- }
-
- void cmpw(Register dst, const Operand& src) {
- arithmetic_op_16(0x3B, dst, src);
- }
-
- void cmpw(Register dst, Register src) {
- arithmetic_op_16(0x3B, dst, src);
- }
-
- void cmpw(const Operand& dst, Register src) {
- arithmetic_op_16(0x39, src, dst);
- }
-
- void cmpl(Register dst, Register src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, src, dst);
- }
-
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpq(Register dst, Register src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(Register dst, const Operand& src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(const Operand& dst, Register src) {
- arithmetic_op(0x39, src, dst);
- }
-
- void cmpq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void cmpq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void and_(Register dst, Register src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(Register dst, const Operand& src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(const Operand& dst, Register src) {
- arithmetic_op(0x21, src, dst);
- }
-
- void and_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void and_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void andl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x4, dst, src);
- }
-
- void andl(Register dst, Register src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andl(Register dst, const Operand& src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andb(Register dst, Immediate src) {
- immediate_arithmetic_op_8(0x4, dst, src);
- }
-
- void decq(Register dst);
- void decq(const Operand& dst);
- void decl(Register dst);
- void decl(const Operand& dst);
- void decb(Register dst);
- void decb(const Operand& dst);
-
- // Sign-extends rax into rdx:rax.
- void cqo();
- // Sign-extends eax into edx:eax.
- void cdq();
-
- // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idivq(Register src);
- // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
- void idivl(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // rdx:rax = rax * src.
- void imul(Register dst, Register src); // dst = dst * src.
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
- // Signed 32-bit multiply instructions.
- void imull(Register dst, Register src); // dst = dst * src.
- void imull(Register dst, const Operand& src); // dst = dst * src.
- void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
-
- void incq(Register dst);
- void incq(const Operand& dst);
- void incl(Register dst);
- void incl(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
- void leal(Register dst, const Operand& src);
-
- // Multiply rax by src, put the result in rdx:rax.
- void mul(Register src);
-
- void neg(Register dst);
- void neg(const Operand& dst);
- void negl(Register dst);
-
- void not_(Register dst);
- void not_(const Operand& dst);
- void notl(Register dst);
-
- void or_(Register dst, Register src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, Register src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(Register dst, const Operand& src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, const Operand& src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(const Operand& dst, Register src) {
- arithmetic_op(0x09, src, dst);
- }
-
- void or_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
- void or_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
-
- void rcl(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x2);
- }
-
- void rol(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x0);
- }
-
- void rcr(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x3);
- }
-
- void ror(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x1);
- }
-
- void rorl(Register dst, Immediate imm8) {
- shift_32(dst, imm8, 0x1);
- }
-
- void rorl_cl(Register dst) {
- shift_32(dst, 0x1);
- }
-
- // Shifts dst:src left by cl bits, affecting only dst.
- void shld(Register dst, Register src);
-
- // Shifts src:dst right by cl bits, affecting only dst.
- void shrd(Register dst, Register src);
-
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sar(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sarl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sar_cl(Register dst) {
- shift(dst, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sarl_cl(Register dst) {
- shift_32(dst, 0x7);
- }
-
- void shl(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x4);
- }
-
- void shl_cl(Register dst) {
- shift(dst, 0x4);
- }
-
- void shll_cl(Register dst) {
- shift_32(dst, 0x4);
- }
-
- void shll(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x4);
- }
-
- void shr(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x5);
- }
-
- void shr_cl(Register dst) {
- shift(dst, 0x5);
- }
-
- void shrl_cl(Register dst) {
- shift_32(dst, 0x5);
- }
-
- void shrl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x5);
- }
-
- void store_rax(void* dst, RelocInfo::Mode mode);
- void store_rax(ExternalReference ref);
-
- void subq(Register dst, Register src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(Register dst, const Operand& src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(const Operand& dst, Register src) {
- arithmetic_op(0x29, src, dst);
- }
-
- void subq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subl(Register dst, Register src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(Register dst, const Operand& src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subb(Register dst, Immediate src) {
- immediate_arithmetic_op_8(0x5, dst, src);
- }
-
- void testb(Register dst, Register src);
- void testb(Register reg, Immediate mask);
- void testb(const Operand& op, Immediate mask);
- void testb(const Operand& op, Register reg);
- void testl(Register dst, Register src);
- void testl(Register reg, Immediate mask);
- void testl(const Operand& op, Immediate mask);
- void testq(const Operand& op, Register reg);
- void testq(Register dst, Register src);
- void testq(Register dst, Immediate mask);
-
- void xor_(Register dst, Register src) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
- }
-
- void xorl(Register dst, Register src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, const Operand& src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xorl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xor_(Register dst, const Operand& src) {
- arithmetic_op(0x33, dst, src);
- }
-
- void xor_(const Operand& dst, Register src) {
- arithmetic_op(0x31, src, dst);
- }
-
- void xor_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- void xor_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- // Bit operations.
- void bt(const Operand& dst, Register src);
- void bts(const Operand& dst, Register src);
-
- // Miscellaneous
- void clc();
- void cld();
- void cpuid();
- void hlt();
- void int3();
- void nop();
- void rdtsc();
- void ret(int imm16);
- void setcc(Condition cc, Register reg);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Calls
- // Call near relative 32-bit displacement, relative to next instruction.
- void call(Label* L);
- void call(Handle<Code> target,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // Calls directly to the given address using a relative offset.
- // Should only ever be used in Code objects for calls within the
- // same Code object. Should not be used when generating new code (use labels),
- // but only when patching existing code.
- void call(Address target);
-
- // Call near absolute indirect, address in register
- void call(Register adr);
-
- // Call near indirect
- void call(const Operand& operand);
-
- // Jumps
- // Jump short or near relative.
- // Use a 32-bit signed displacement.
- // Unconditional jump to L
- void jmp(Label* L, Label::Distance distance = Label::kFar);
- void jmp(Handle<Code> target, RelocInfo::Mode rmode);
-
- // Jump near absolute indirect (r64)
- void jmp(Register adr);
-
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
- // Conditional jumps
- void j(Condition cc,
- Label* L,
- Label::Distance distance = Label::kFar);
- void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
-
- // Floating-point operations
- void fld(int i);
-
- void fld1();
- void fldz();
- void fldpi();
- void fldln2();
-
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
-
- void fstp_s(const Operand& adr);
- void fstp_d(const Operand& adr);
- void fstp(int index);
-
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
-
- void fist_s(const Operand& adr);
-
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
-
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
-
- void fabs();
- void fchs();
-
- void fadd(int i);
- void fsub(int i);
- void fmul(int i);
- void fdiv(int i);
-
- void fisub_s(const Operand& adr);
-
- void faddp(int i = 1);
- void fsubp(int i = 1);
- void fsubrp(int i = 1);
- void fmulp(int i = 1);
- void fdivp(int i = 1);
- void fprem();
- void fprem1();
-
- void fxch(int i = 1);
- void fincstp();
- void ffree(int i = 0);
-
- void ftst();
- void fucomp(int i);
- void fucompp();
- void fucomi(int i);
- void fucomip();
-
- void fcompp();
- void fnstsw_ax();
- void fwait();
- void fnclex();
-
- void fsin();
- void fcos();
- void fptan();
- void fyl2x();
- void f2xm1();
- void fscale();
- void fninit();
-
- void frndint();
-
- void sahf();
-
- // SSE2 instructions
- void movd(XMMRegister dst, Register src);
- void movd(Register dst, XMMRegister src);
- void movq(XMMRegister dst, Register src);
- void movq(Register dst, XMMRegister src);
- void movq(XMMRegister dst, XMMRegister src);
- void extractps(Register dst, XMMRegister src, byte imm8);
-
- // Don't use this unless it's important to keep the
- // top half of the destination register unchanged.
- // Used movaps when moving double values and movq for integer
- // values in xmm registers.
- void movsd(XMMRegister dst, XMMRegister src);
-
- void movsd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, const Operand& src);
-
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqa(XMMRegister dst, const Operand& src);
-
- void movapd(XMMRegister dst, XMMRegister src);
- void movaps(XMMRegister dst, XMMRegister src);
-
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
-
- void cvttss2si(Register dst, const Operand& src);
- void cvttss2si(Register dst, XMMRegister src);
- void cvttsd2si(Register dst, const Operand& src);
- void cvttsd2si(Register dst, XMMRegister src);
- void cvttsd2siq(Register dst, XMMRegister src);
-
- void cvtlsi2sd(XMMRegister dst, const Operand& src);
- void cvtlsi2sd(XMMRegister dst, Register src);
- void cvtqsi2sd(XMMRegister dst, const Operand& src);
- void cvtqsi2sd(XMMRegister dst, Register src);
-
- void cvtlsi2ss(XMMRegister dst, Register src);
-
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtss2sd(XMMRegister dst, const Operand& src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
-
- void cvtsd2si(Register dst, XMMRegister src);
- void cvtsd2siq(Register dst, XMMRegister src);
-
- void addsd(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, const Operand& src);
- void subsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, const Operand& src);
- void divsd(XMMRegister dst, XMMRegister src);
-
- void andpd(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, XMMRegister src);
-
- void ucomisd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, const Operand& src);
-
- enum RoundingMode {
- kRoundToNearest = 0x0,
- kRoundDown = 0x1,
- kRoundUp = 0x2,
- kRoundToZero = 0x3
- };
-
- void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
-
- void movmskpd(Register dst, XMMRegister src);
- void movmskps(Register dst, XMMRegister src);
-
- // The first argument is the reg field, the second argument is the r/m field.
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, Register src);
- void emit_sse_operand(Register dst, XMMRegister src);
-
- // Debugging
- void Print();
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* label) {
- return pc_offset() - label->pos();
- }
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg, bool force = false);
-
- // Writes a single word of data in the code stream.
- // Used for inline tables, e.g., jump-tables.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- // Check if there is less than kGap bytes available in the buffer.
- // If this is the case, we need to grow the buffer before emitting
- // an instruction or relocation information.
- inline bool buffer_overflow() const {
- return pc_ >= reloc_info_writer.pos() - kGap;
- }
-
- // Get the number of bytes available in the buffer.
- inline int available_space() const {
- return static_cast<int>(reloc_info_writer.pos() - pc_);
- }
-
- static bool IsNop(Address addr);
-
- // Avoid overflows for displacements etc.
- static const int kMaximalBufferSize = 512*MB;
-
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
- private:
- byte* addr_at(int pos) { return buffer_ + pos; }
- uint32_t long_at(int pos) {
- return *reinterpret_cast<uint32_t*>(addr_at(pos));
- }
- void long_at_put(int pos, uint32_t x) {
- *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
- }
-
- // code emission
- void GrowBuffer();
-
- void emit(byte x) { *pc_++ = x; }
- inline void emitl(uint32_t x);
- inline void emitq(uint64_t x, RelocInfo::Mode rmode);
- inline void emitw(uint16_t x);
- inline void emit_code_target(Handle<Code> target,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
- void emit(Immediate x) { emitl(x.value_); }
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of both register codes.
- // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
- // REX.W is set.
- inline void emit_rex_64(XMMRegister reg, Register rm_reg);
- inline void emit_rex_64(Register reg, XMMRegister rm_reg);
- inline void emit_rex_64(Register reg, Register rm_reg);
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of the destination, index, and base register codes.
- // The high bit of reg is used for REX.R, the high bit of op's base
- // register is used for REX.B, and the high bit of op's index register
- // is used for REX.X. REX.W is set.
- inline void emit_rex_64(Register reg, const Operand& op);
- inline void emit_rex_64(XMMRegister reg, const Operand& op);
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of the register code.
- // The high bit of register is used for REX.B.
- // REX.W is set and REX.R and REX.X are clear.
- inline void emit_rex_64(Register rm_reg);
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of the index and base register codes.
- // The high bit of op's base register is used for REX.B, and the high
- // bit of op's index register is used for REX.X.
- // REX.W is set and REX.R clear.
- inline void emit_rex_64(const Operand& op);
-
- // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
- void emit_rex_64() { emit(0x48); }
-
- // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
- // REX.W is clear.
- inline void emit_rex_32(Register reg, Register rm_reg);
-
- // The high bit of reg is used for REX.R, the high bit of op's base
- // register is used for REX.B, and the high bit of op's index register
- // is used for REX.X. REX.W is cleared.
- inline void emit_rex_32(Register reg, const Operand& op);
-
- // High bit of rm_reg goes to REX.B.
- // REX.W, REX.R and REX.X are clear.
- inline void emit_rex_32(Register rm_reg);
-
- // High bit of base goes to REX.B and high bit of index to REX.X.
- // REX.W and REX.R are clear.
- inline void emit_rex_32(const Operand& op);
-
- // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
- // REX.W is cleared. If no REX bits are set, no byte is emitted.
- inline void emit_optional_rex_32(Register reg, Register rm_reg);
-
- // The high bit of reg is used for REX.R, the high bit of op's base
- // register is used for REX.B, and the high bit of op's index register
- // is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
- // is emitted.
- inline void emit_optional_rex_32(Register reg, const Operand& op);
-
- // As for emit_optional_rex_32(Register, Register), except that
- // the registers are XMM registers.
- inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
-
- // As for emit_optional_rex_32(Register, Register), except that
- // one of the registers is an XMM registers.
- inline void emit_optional_rex_32(XMMRegister reg, Register base);
-
- // As for emit_optional_rex_32(Register, Register), except that
- // one of the registers is an XMM registers.
- inline void emit_optional_rex_32(Register reg, XMMRegister base);
-
- // As for emit_optional_rex_32(Register, const Operand&), except that
- // the register is an XMM register.
- inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
-
- // Optionally do as emit_rex_32(Register) if the register number has
- // the high bit set.
- inline void emit_optional_rex_32(Register rm_reg);
-
- // Optionally do as emit_rex_32(const Operand&) if the operand register
- // numbers have a high bit set.
- inline void emit_optional_rex_32(const Operand& op);
-
-
- // Emit the ModR/M byte, and optionally the SIB byte and
- // 1- or 4-byte offset for a memory operand. Also encodes
- // the second operand of the operation, a register or operation
- // subcode, into the reg field of the ModR/M byte.
- void emit_operand(Register reg, const Operand& adr) {
- emit_operand(reg.low_bits(), adr);
- }
-
- // Emit the ModR/M byte, and optionally the SIB byte and
- // 1- or 4-byte offset for a memory operand. Also used to encode
- // a three-bit opcode extension into the ModR/M byte.
- void emit_operand(int rm, const Operand& adr);
-
- // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
- void emit_modrm(Register reg, Register rm_reg) {
- emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
- }
-
- // Emit a ModR/M byte with an operation subcode in the reg field and
- // a register in the rm_reg field.
- void emit_modrm(int code, Register rm_reg) {
- ASSERT(is_uint3(code));
- emit(0xC0 | code << 3 | rm_reg.low_bits());
- }
-
- // Emit the code-object-relative offset of the label's position
- inline void emit_code_relative_offset(Label* label);
-
- // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
- // AND, OR, XOR, or CMP. The encodings of these operations are all
- // similar, differing just in the opcode or in the reg field of the
- // ModR/M byte.
- void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
- void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
- void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
- // Operate on a byte in memory or register.
- void immediate_arithmetic_op_8(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
- Immediate src);
- // Operate on a word in memory or register.
- void immediate_arithmetic_op_16(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
- Immediate src);
- // Operate on a 32-bit word in memory or register.
- void immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src);
-
- // Emit machine code for a shift operation.
- void shift(Register dst, Immediate shift_amount, int subcode);
- void shift_32(Register dst, Immediate shift_amount, int subcode);
- // Shift dst by cl % 64 bits.
- void shift(Register dst, int subcode);
- void shift_32(Register dst, int subcode);
-
- void emit_farith(int b1, int b2, int i);
-
- // labels
- // void print(Label* L);
- void bind_to(Label* L, int pos);
-
- // record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class CodePatcher;
- friend class EnsureSpace;
- friend class RegExpMacroAssemblerX64;
-
- // code generation
- RelocInfoWriter reloc_info_writer;
-
- List< Handle<Code> > code_targets_;
-
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
-};
-
-
-// Helper class that ensures that there is enough space for generating
-// instructions and relocation information. The constructor makes
-// sure that there is enough space and (in debug mode) the destructor
-// checks that we did not generate too much.
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
-#ifdef DEBUG
- space_before_ = assembler_->available_space();
-#endif
- }
-
-#ifdef DEBUG
- ~EnsureSpace() {
- int bytes_generated = space_before_ - assembler_->available_space();
- ASSERT(bytes_generated < assembler_->kGap);
- }
-#endif
-
- private:
- Assembler* assembler_;
-#ifdef DEBUG
- int space_before_;
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_ASSEMBLER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/builtins-x64.cc b/src/3rdparty/v8/src/x64/builtins-x64.cc
deleted file mode 100644
index 144962b..0000000
--- a/src/3rdparty/v8/src/x64/builtins-x64.cc
+++ /dev/null
@@ -1,1884 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments excluding receiver
- // -- rdi : called function (only guaranteed when
- // extra_args requires it)
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -- ...
- // -- rsp[8 * argc] : first argument (argc == rax)
- // -- rsp[8 * (argc +1)] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ pop(kScratchRegister); // Save return address.
- __ push(rdi);
- __ push(kScratchRegister); // Restore return address.
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects rax to contain the number of arguments
- // including the receiver and the extra arguments.
- __ addq(rax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
-}
-
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ movq(kScratchRegister,
- FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(kScratchRegister,
- FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
- __ jmp(kScratchRegister);
-}
-
-
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- GenerateTailCallToSharedCode(masm);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -- rdi: constructor function
- // -----------------------------------
-
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Store a smi-tagged arguments count on the stack.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
-
- // Push the function to invoke on the stack.
- __ push(rdi);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ movq(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
- __ j(not_equal, &rt_call);
-#endif
-
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &rt_call);
- // rdi: constructor
- // rax: initial map (if proven valid below)
- __ CmpObjectType(rax, MAP_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // rdi: constructor
- // rax: initial map
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ decb(FieldOperand(rcx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
-
- __ push(rax);
- __ push(rdi);
-
- __ push(rdi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(rdi);
- __ pop(rax);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
- // rdi: size of new object
- __ AllocateInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // rax: initial map
- // rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
- // Set extra fields in the newly allocated object.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ movzxbq(rsi,
- FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(rsi,
- Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
- // rsi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
- __ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
- }
- __ InitializeFieldsWithFiller(rcx, rsi, rdx);
- __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
- }
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- // Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx,
- FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // rbx: JSObject
- // rdi: start of next object (will be start of FixedArray)
- // rdx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // rbx: JSObject
- // rdi: FixedArray
- // rdx: number of elements
- // rax: start of next object
- __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
- __ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
-
- // Initialize the fields to undefined.
- // rbx: JSObject
- // rdi: FixedArray
- // rax: start of next object
- // rdx: number of elements
- { Label loop, entry;
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(below, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // rbx: JSObject
- // rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
-
-
- // Continue with JSObject being successfully allocated
- // rbx: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // rbx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(rbx);
- }
-
- // Allocate the new receiver object using the runtime call.
- // rdi: function (constructor)
- __ bind(&rt_call);
- // Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
-
- // New object allocated.
- // rbx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(rdi);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
- __ SmiToInteger32(rax, rax);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
-
- // Set up pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
- } else {
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &exit);
-
- // Symbols are "objects".
- __ CmpInstanceType(rcx, SYMBOL_TYPE);
- __ j(equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
-
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
-
- // Leave construct frame.
- }
-
- // Remove caller arguments from the stack and return.
- __ pop(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ ret(0);
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Expects five C++ function parameters.
- // - Address entry (ignored)
- // - JSFunction* function (
- // - Object* receiver
- // - int argc
- // - Object*** argv
- // (see Handle::Invoke in execution.cc).
-
- // Open a C++ scope for the FrameScope.
- {
- // Platform specific argument handling. After this, the stack contains
- // an internal frame and the pushed function and receiver, and
- // register rax and rbx holds the argument count and argument array,
- // while rdi holds the function pointer and rsi the context.
-
-#ifdef _WIN64
- // MSVC parameters in:
- // rcx : entry (ignored)
- // rdx : function
- // r8 : receiver
- // r9 : argc
- // [rsp+0x20] : argv
-
- // Clear the context before we push it when entering the internal frame.
- __ Set(rsi, 0);
- // Enter an internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
-
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
- // Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
- // Load the function pointer into rdi.
- __ movq(rdi, rdx);
-#else // _WIN64
- // GCC parameters in:
- // rdi : entry (ignored)
- // rsi : function
- // rdx : receiver
- // rcx : argc
- // r8 : argv
-
- __ movq(rdi, rsi);
- // rdi : function
-
- // Clear the context before we push it when entering the internal frame.
- __ Set(rsi, 0);
- // Enter an internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
-#endif // _WIN64
-
- // Current stack contents:
- // [rsp + 2 * kPointerSize ... ]: Internal frame
- // [rsp + kPointerSize] : function
- // [rsp] : receiver
- // Current register contents:
- // rax : argc
- // rbx : argv
- // rsi : context
- // rdi : function
-
- // Copy arguments to the stack in a loop.
- // Register rbx points to array of pointers to handle locations.
- // Push the values of these handles.
- Label loop, entry;
- __ Set(rcx, 0); // Set loop variable to 0.
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(not_equal, &loop);
-
- // Invoke the code.
- if (is_construct) {
- // No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_sentinel);
- // Expects rdi to hold function pointer.
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(rax);
- // Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
- // Exit the internal frame. Notice that this also removes the empty
- // context and the function left on the stack by the code
- // invocation.
- }
-
- // TODO(X64): Is argument correct? Is there a receiver to remove?
- __ ret(1 * kPointerSize); // Remove receiver.
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
-}
-
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // Re-execute the code that was patched back to the young age when
- // the stub returns.
- __ subq(Operand(rsp, 0), Immediate(5));
- __ Pushad();
-#ifdef _WIN64
- __ movq(rcx, Operand(rsp, kNumSafepointRegisters * kPointerSize));
-#else
- __ movq(rdi, Operand(rsp, kNumSafepointRegisters * kPointerSize));
-#endif
- { // NOLINT
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- }
- __ Popad();
- __ ret(0);
-}
-
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
- __ Popad();
- // Tear down internal frame.
- }
-
- __ pop(MemOperand(rsp, 0)); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
-
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- // Tear down internal frame.
- }
-
- // Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
-
- // Switch on the state.
- Label not_no_registers, not_tos_rax;
- __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
- __ j(not_equal, &not_no_registers, Label::kNear);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
- __ j(not_equal, &not_tos_rax, Label::kNear);
- __ ret(2 * kPointerSize); // Remove state, rax.
-
- __ bind(&not_tos_rax);
- __ Abort("no cases left");
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ Pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ Popad();
- __ ret(0);
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- // Stack Layout:
- // rsp[0]: Return address
- // rsp[1]: Argument n
- // rsp[2]: Argument n-1
- // ...
- // rsp[n]: Argument 1
- // rsp[n+1]: Receiver (function to call)
- //
- // rax contains the number of arguments, n, not counting the receiver.
- //
- // 1. Make sure we have at least one argument.
- { Label done;
- __ testq(rax, rax);
- __ j(not_zero, &done);
- __ pop(rbx);
- __ Push(masm->isolate()->factory()->undefined_value());
- __ push(rbx);
- __ incq(rax);
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label slow, non_function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ JumpIfSmi(rdi, &non_function);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- __ Set(rdx, 0); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &shift_arguments);
-
- // Do not transform the receiver for natives.
- // SharedFunctionInfo is already loaded into rbx.
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_zero, &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
-
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
- {
- // Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
-
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ Set(rdx, 0); // indicate regular JS_FUNCTION
-
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
- }
-
- // Restore the function to rdi.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ jmp(&patch_receiver, Label::kNear);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ Set(rdx, 1); // indicate function proxy
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &shift_arguments);
- __ bind(&non_function);
- __ Set(rdx, 2); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
- __ movq(rcx, rax);
- __ bind(&loop);
- __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- { Label function, non_proxy;
- __ testq(rdx, rdx);
- __ j(zero, &function);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rdx, Immediate(1));
- __ j(not_equal, &non_proxy);
-
- __ pop(rdx); // return address
- __ push(rdi); // re-add proxy object as additional argument
- __ push(rdx);
- __ incq(rax);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rbx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rax, rbx);
- __ j(not_equal,
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- // Stack at entry:
- // rsp: return address
- // rsp+8: arguments
- // rsp+16: receiver ("this")
- // rsp+24: function
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
-
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Do not transform the receiver for natives.
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &push_receiver);
-
- // Convert the receiver to an object.
- __ bind(&call_to_object);
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ jmp(&push_receiver, Label::kNear);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(rbx);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(rax);
-
- // Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
-
- __ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
- __ j(not_equal, &loop);
-
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(rax);
- __ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- frame_scope.GenerateLeaveFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- // Leave internal frame.
- }
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-}
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
-
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
- __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ Move(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- factory->fixed_array_map());
- __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Smi::FromInt(initial_capacity));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- for (int i = 0; i < initial_capacity; i++) {
- __ movq(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ movq(scratch2, Immediate(initial_capacity));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(FieldOperand(scratch1,
- scratch2,
- times_pointer_size,
- FixedArray::kHeaderSize),
- scratch3);
- __ bind(&entry);
- __ decq(scratch2);
- __ j(not_sign, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ testq(array_size, array_size);
- __ Assert(not_zero, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- SmiIndex index =
- masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- index.scale,
- index.reg,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, factory->empty_fixed_array());
- __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ lea(elements_array, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(elements_array, 0), scratch);
- __ addq(elements_array, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(elements_array, elements_array_end);
- __ j(below, &loop);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// rdi: constructor (built-in Array function)
-// rax: argc
-// rsp[0]: return address
-// rsp[8]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in rdi needs to be preserved for
-// entering the generic code. In both cases argc in rax needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// a construct call and a normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments.
- __ testq(rax, rax);
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- rdi,
- rbx,
- rcx,
- rdx,
- r8,
- call_generic_code);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmpq(rax, Immediate(1));
- __ j(not_equal, &argc_two_or_more);
- __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
-
- __ SmiTest(rdx);
- __ j(not_zero, &not_empty_array);
- __ pop(r8); // Adjust stack.
- __ Drop(1);
- __ push(r8);
- __ movq(rax, Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(&not_empty_array);
- __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is to large to actually allocate an elements array.
- __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
- __ j(greater_equal, call_generic_code);
-
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0]: return address
- // esp[8]: argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ movq(rdx, rax);
- __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0] : return address
- // esp[8] : last argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
-
- // rax: argc
- // rbx: JSArray
- // rcx: elements_array
- // r8: elements_array_end (untagged)
- // esp[0]: return address
- // esp[8]: last argument
-
- // Location of the last argument
- __ lea(r9, Operand(rsp, kPointerSize));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in rcx).
- __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // rax: argc
- // rbx: JSArray
- // rdx: location of the first array element
- // r9: location of the last argument
- // esp[0]: return address
- // esp[8]: last argument
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r8, &has_non_smi_element);
- }
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // rax: argc
- // rbx: JSArray
- // esp[0]: return address
- // esp[8]: last argument
- __ bind(&finish);
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(r8,
- masm->isolate()->factory()->heap_number_map(),
- &not_double,
- DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(rbx);
- __ jmp(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // rbx: JSArray
- __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r11,
- kScratchRegister,
- &cant_transition_map);
-
- __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
- __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Finish the array initialization loop.
- Label loop2;
- __ bind(&loop2);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
- __ j(greater_equal, &loop2);
- __ jmp(&finish);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for InternalArray function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for InternalArray function");
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
-
- // Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
-
- if (FLAG_optimize_constructed_arrays) {
- // We should either have undefined in ebx or a valid jsglobalpropertycell
- Label okay_here;
- Handle<Object> undefined_sentinel(
- masm->isolate()->factory()->undefined_value());
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), global_property_cell_map);
- __ Assert(equal, "Expected property cell in register rbx");
- __ bind(&okay_here);
- }
- }
-
- if (FLAG_optimize_constructed_arrays) {
- Label not_zero_case, not_one_case;
- __ testq(rax, rax);
- __ j(not_zero, &not_zero_case);
- ArrayNoArgumentConstructorStub no_argument_stub;
- __ TailCallStub(&no_argument_stub);
-
- __ bind(&not_zero_case);
- __ cmpq(rax, Immediate(1));
- __ j(greater, &not_one_case);
- ArraySingleArgumentConstructorStub single_argument_stub;
- __ TailCallStub(&single_argument_stub);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub n_argument_stub;
- __ TailCallStub(&n_argument_stub);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments
- // -- rdi : constructor function
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1);
-
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
- __ Assert(equal, "Unexpected String function");
- }
-
- // Load the first argument into rax and get rid of the rest
- // (including the receiver).
- Label no_arguments;
- __ testq(rax, rax);
- __ j(zero, &no_arguments);
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
-
- // Lookup the argument in the number to string cache.
- Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- rax, // Input.
- rbx, // Result.
- rcx, // Scratch 1.
- rdx, // Scratch 2.
- false, // Input is known to be smi?
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1);
- __ bind(&argument_is_string);
-
- // ----------- S t a t e -------------
- // -- rbx : argument converted to string
- // -- rdi : constructor function
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Allocate a JSValue and put the tagged pointer into rax.
- Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- rax, // Result.
- rcx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
-
- // Set the map.
- __ LoadGlobalFunctionInitialMap(rdi, rcx);
- if (FLAG_debug_code) {
- __ cmpb(FieldOperand(rcx, Map::kInstanceSizeOffset),
- Immediate(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected string wrapper instance size");
- __ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
- __ Assert(equal, "Unexpected unused properties of string wrapper");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
-
- // Set properties and elements.
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
- // Set the value.
- __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
- // We're done. Return.
- __ ret(0);
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &convert_argument);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
- __ j(NegateCondition(is_string), &convert_argument);
- __ movq(rbx, rax);
- __ IncrementCounter(counters->string_ctor_string_value(), 1);
- __ jmp(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into rbx.
- __ bind(&convert_argument);
- __ IncrementCounter(counters->string_ctor_conversions(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdi); // Preserve the function.
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(rdi);
- }
- __ movq(rbx, rax);
- __ jmp(&argument_is_string);
-
- // Load the empty string into rbx, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, kPointerSize));
- __ push(rcx);
- __ jmp(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rbx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
- __ ret(0);
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Store the arguments adaptor context sentinel.
- __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-
- // Push the function on the stack.
- __ push(rdi);
-
- // Preserve the number of arguments on the stack. Must preserve rax,
- // rbx and rcx because these registers are used when copying the
- // arguments and the receiver.
- __ Integer32ToSmi(r8, rax);
- __ push(r8);
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ movq(rsp, rbp);
- __ pop(rbp);
-
- // Remove caller arguments from the stack.
- __ pop(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : actual number of arguments
- // -- rbx : expected number of arguments
- // -- rcx : call kind information
- // -- rdx : code entry to call
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->arguments_adaptors(), 1);
-
- Label enough, too_few;
- __ cmpq(rax, rbx);
- __ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ j(equal, &dont_adapt_arguments);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
- __ Set(r8, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(r8);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(r8, rbx);
- __ j(less, &copy);
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
- __ Set(r8, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(r8);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(r8, rax);
- __ j(less, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ bind(&fill);
- __ incq(r8);
- __ push(kScratchRegister);
- __ cmpq(r8, rbx);
- __ j(less, &fill);
-
- // Restore function pointer.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ call(rdx);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ jmp(rdx);
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(rax, depth) instruction right after the call.
- Label stack_check;
- __ movq(rbx, Operand(rsp, 0)); // return address
- __ movzxbq(rbx, Operand(rbx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- Label skip;
- __ SmiCompare(rax, Smi::FromInt(-1));
- __ j(not_equal, &skip, Label::kNear);
- __ ret(0);
-
- // If we decide not to perform on-stack replacement we perform a
- // stack guard check to enable interrupts.
- __ bind(&stack_check);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
-
- StackCheckStub stub;
- __ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
- }
- __ bind(&ok);
- __ ret(0);
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiToInteger32(rax, rax);
- __ push(rax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.cc b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
deleted file mode 100644
index c4dd865..0000000
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.cc
+++ /dev/null
@@ -1,6940 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rax, rbx, rcx, rdx };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rdx, rax };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rax, rbx };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-}
-
-
-static void InitializeArrayConstructorDescriptor(Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // register state
- // rdi -- constructor function
- // rbx -- type info cell with elements kind
- // rax -- number of arguments to the constructor function
- static Register registers[] = { rdi, rbx };
- descriptor->register_param_count_ = 2;
- // stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &rax;
- descriptor->register_params_ = registers;
- descriptor->extra_expression_stack_count_ = 1;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ SmiTest(rax);
- __ j(not_zero, &check_heap_number, Label::kNear);
- __ Ret();
-
- __ bind(&check_heap_number);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_builtin, Label::kNear);
- __ Ret();
-
- __ bind(&call_builtin);
- __ pop(rcx); // Pop return address.
- __ push(rax);
- __ push(rcx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
-
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
-
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ movq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ testq(rbx, rbx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
- rdi); // Initialize with undefined.
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
-
- // rcx holds native context, ebx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
- __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
- __ j(equal, &install_optimized);
-
- // Iterate through the rest of map backwards. rdx holds an index.
- Label loop;
- Label restore;
- __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ SmiToInteger32(rdx, rdx);
- __ bind(&loop);
- // Do not double check first entry.
- __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
- __ j(equal, &restore);
- __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry.
- __ cmpq(rcx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ movq(rdx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Now link a function into a list of optimized functions.
- __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
- // No need for write barrier as JSFunction (rax) is in the new space.
-
- __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
- // Store JSFunction (rax) into rdx before issuing write barrier as
- // it clobbers all the registers passed.
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- rdx,
- rbx,
- kDontSaveFPRegs);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&restore);
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(rcx); // Temporarily remove return address.
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ push(rcx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // Set up the fixed slots.
- __ Set(rbx, 0); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
-
- // Copy the qmlglobal object from the previous context.
- __ movq(rbx,
- Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)),
- rbx);
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + (1 * kPointerSize)]: function
- // [rsp + (2 * kPointerSize)]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
- // Get the serialized scope info from the stack.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmpq(rcx, Immediate(0));
- __ Assert(equal, message);
- }
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots.
- __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
- __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
- __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
-
- // Copy the qmlglobal object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::QML_GLOBAL_OBJECT_INDEX));
- __ movq(ContextOperand(rax, Context::QML_GLOBAL_OBJECT_INDEX), rbx);
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- //
- // rcx: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- AllocationFlags flags = TAG_OBJECT;
- if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
- flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
- }
- __ AllocateInNewSpace(size, rax, rbx, rdx, fail, flags);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex);
- __ movq(FieldOperand(rax, allocation_info_start), kScratchRegister);
- __ movq(FieldOperand(rax, allocation_info_start + kPointerSize), rcx);
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ lea(rdx, Operand(rax, JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ lea(rdx, Operand(rax, JSArray::kSize));
- }
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
- // Copy the elements array.
- if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- } else {
- ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
- int i;
- for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- while (i < elements_size) {
- __ movsd(xmm0, FieldOperand(rcx, i));
- __ movsd(FieldOperand(rdx, i), xmm0);
- i += kDoubleSize;
- }
- ASSERT(i == elements_size);
- }
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: constant elements.
- // [rsp + (2 * kPointerSize)]: literal index.
- // [rsp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into rcx and check if we need to create a
- // boilerplate.
- __ movq(rcx, Operand(rsp, 3 * kPointerSize));
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- Label slow_case;
- __ j(equal, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- // rcx is boilerplate object.
- Factory* factory = masm->isolate()->factory();
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->fixed_cow_array_map());
- __ j(not_equal, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&check_fast_elements);
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->fixed_array_map());
- __ j(not_equal, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(rcx);
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- expected_map_index);
- __ Assert(equal, message);
- __ pop(rcx);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// The stub expects its argument on the stack and returns its result in tos_:
-// zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- const Register argument = rax;
- const Register map = rdx;
-
- if (!types_.IsEmpty()) {
- __ movq(argument, Operand(rsp, 1 * kPointerSize));
- }
-
- // undefined -> false
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- Label not_smi;
- __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
- // argument contains the correct return value already
- if (!tos_.is(argument)) {
- __ movq(tos_, argument);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_smi);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(argument, &patch, Label::kNear);
- }
-
- if (types_.NeedsMap()) {
- __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- Label not_undetectable;
- __ j(zero, &not_undetectable, Label::kNear);
- __ Set(tos_, 0);
- __ ret(1 * kPointerSize);
- __ bind(&not_undetectable);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // spec object -> true.
- Label not_js_object;
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, &not_js_object, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_js_object);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
- __ ret(1 * kPointerSize); // the string length is OK as the return value
- __ bind(&not_string);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number, false_result;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
- __ j(zero, &false_result, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(tos_, 0);
- __ ret(1 * kPointerSize);
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- __ PushCallerSaved(save_doubles_);
- const int argument_count = 1;
- __ PrepareCallCFunction(argument_count);
-#ifdef _WIN64
- __ LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- __ LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- __ PopCallerSaved(save_doubles_);
- __ ret(0);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- const Register argument = rax;
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- Label different_value;
- __ CompareRoot(argument, value);
- __ j(not_equal, &different_value, Label::kNear);
- if (!result) {
- // If we have to return zero, there is no way around clearing tos_.
- __ Set(tos_, 0);
- } else if (!tos_.is(argument)) {
- // If we have to return non-zero, we can re-use the argument if it is the
- // same register as the result, because we never see Smi-zero here.
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&different_value);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Get return address, operand is now on top of stack.
- __ Push(Smi::FromInt(tos_.code()));
- __ Push(Smi::FromInt(types_.ToByte()));
- __ push(rcx); // Push return address.
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum ConvertUndefined {
- CONVERT_UNDEFINED_TO_ZERO,
- BAILOUT_ON_UNDEFINED
- };
- // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
- // If the operands are not both numbers, jump to not_numbers.
- // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
- // NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
- static void LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
-
- // Tries to convert two values to smis losslessly.
- // This fails if either argument is not a Smi nor a HeapNumber,
- // or if it's a HeapNumber with a value that can't be converted
- // losslessly to a Smi. In that case, control transitions to the
- // on_not_smis label.
- // On success, either control goes to the on_success label (if one is
- // provided), or it falls through at the end of the code (if on_success
- // is NULL).
- // On success, both first and second holds Smi tagged values.
- // One of first or second must be non-Smi when entering.
- static void NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined);
-};
-
-
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- Label done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus, Label::kNear);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done, Label::kNear);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done, Label::kNear);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
-
- __ push(rax); // the operand
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(mode_));
- __ Push(Smi::FromInt(operand_type_));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label slow;
- GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow,
- Label::Distance non_smi_near,
- Label::Distance slow_near) {
- Label done;
- __ JumpIfNotSmi(rax, non_smi, non_smi_near);
- __ SmiNeg(rax, rax, &done, Label::kNear);
- __ jmp(slow, slow_near);
- __ bind(&done);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near) {
- __ JumpIfNotSmi(rax, non_smi, non_smi_near);
- __ SmiNot(rax, rax);
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(
- MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, slow);
-
- // Operand is a float, negate its value by flipping the sign bit.
- if (mode_ == UNARY_OVERWRITE) {
- __ Set(kScratchRegister, 0x01);
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
- } else {
- // Allocate a heap number before calculating the answer,
- // so we don't have an untagged double around during GC.
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rcx, rax);
- __ pop(rax);
- }
- __ bind(&heapnumber_allocated);
- // rcx: allocated 'empty' number
-
- // Copy the double value to the new heap number, flipping the sign.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Set(kScratchRegister, 0x01);
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
-
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-void BinaryOpStub::Initialize() {}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
- __ push(rdx);
- __ push(rax);
- // Left and right arguments are now on top.
- __ Push(Smi::FromInt(MinorKey()));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
-
- // Arguments to BinaryOpStub are in rdx and rax.
- const Register left = rdx;
- const Register right = rax;
-
- // We only generate heapnumber answers for overflowing calculations
- // for the four basic arithmetic operations and logical right shift by 0.
- bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
- (op == Token::ADD || op == Token::SUB ||
- op == Token::MUL || op == Token::DIV || op == Token::SHR);
-
- // Smi check of both operands. If op is BIT_OR, the check is delayed
- // until after the OR operation.
- Label not_smis;
- Label use_fp_on_smis;
- Label fail;
-
- if (op != Token::BIT_OR) {
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
-
- Label smi_values;
- __ bind(&smi_values);
- // Perform the operation.
- Comment perform_smi(masm, "-- Perform smi operation");
- switch (op) {
- case Token::ADD:
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
-
- case Token::SUB:
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- // SmiDiv will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- // SmiMod will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::BIT_OR: {
- ASSERT(right.is(rax));
- __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
- break;
- }
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in rax. Some operations have registers pushed.
- __ ret(0);
-
- if (use_fp_on_smis.is_linked()) {
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- __ bind(&use_fp_on_smis);
- if (op == Token::DIV || op == Token::MOD) {
- // Restore left and right to rdx and rax.
- __ movq(rdx, rcx);
- __ movq(rax, rbx);
- }
-
- if (generate_inline_heapnumber_results) {
- __ AllocateHeapNumber(rcx, rbx, slow);
- Comment perform_float(masm, "-- Perform float operation on smis");
- if (op == Token::SHR) {
- __ SmiToInteger32(left, left);
- __ cvtqsi2sd(xmm0, left);
- } else {
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- }
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ jmp(&fail);
- }
- }
-
- // 7. Non-smi operands reach the end of the code generated by
- // GenerateSmiCode, and fall through to subsequent code,
- // with the operands in rdx and rax.
- // But first we check if non-smi values are HeapNumbers holding
- // values that could be smi.
- __ bind(&not_smis);
- Comment done_comment(masm, "-- Enter non-smi code");
- FloatingPointHelper::ConvertUndefined convert_undefined =
- FloatingPointHelper::BAILOUT_ON_UNDEFINED;
- // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
- }
- FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail, convert_undefined);
- __ jmp(&smi_values);
- __ bind(&fail);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure,
- Token::Value op,
- OverwriteMode mode) {
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
-
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, allocation_failure, mode);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- break;
- }
- case Token::MOD: {
- // For MOD we jump to the allocation_failure label, to call runtime.
- __ jmp(allocation_failure);
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
- heap_number_map);
- switch (op) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- __ Ret();
-
- // Logical shift right can produce an unsigned int32 that is not
- // an int32, and so is not in the smi range. Allocate a heap number
- // in that case.
- if (op == Token::SHR) {
- __ bind(&non_smi_shr_result);
- Label allocation_failed;
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rdx,
- no_reg,
- &allocation_failed,
- TAG_OBJECT);
- // Set the map.
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ Ret();
-
- __ bind(&allocation_failed);
- // We need tagged values in rdx and rax for the following code,
- // not int32 in rax and rcx.
- __ Integer32ToSmi(rax, rcx);
- __ Integer32ToSmi(rdx, rbx);
- __ jmp(allocation_failure);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- // No fall-through from this generated code.
- if (FLAG_debug_code) {
- __ Abort("Unexpected fall-through in "
- "BinaryStub_GenerateFloatingPointCode.");
- }
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- if (call_runtime.is_linked()) {
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- UNREACHABLE();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rdx, rdx);
- } else {
- __ LoadRoot(rdx, Heap::kNanValueRootIndex);
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rax, rax);
- } else {
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
- Register input,
- Label* fail) {
- Label ok;
- __ JumpIfSmi(input, &ok, Label::kNear);
- Register heap_number_map = r8;
- Register scratch1 = r9;
- Register scratch2 = r10;
- // HeapNumbers containing 32bit integer values are also allowed.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, fail);
- __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
- // Convert, convert back, and compare the two doubles' bits.
- __ cvttsd2siq(scratch2, xmm0);
- __ cvtlsi2sd(xmm1, scratch2);
- __ movq(scratch1, xmm0);
- __ movq(scratch2, xmm1);
- __ cmpq(scratch1, scratch2);
- __ j(not_equal, fail);
- __ bind(&ok);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label gc_required, not_number;
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
- }
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &gc_required, &not_number, op_, mode_);
-
- __ bind(&not_number);
- GenerateTypeTransition(masm);
-
- __ bind(&gc_required);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in rdx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rdx, &skip_allocation);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rdx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- // Use object in rdx as a result holder
- __ movq(rax, rdx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
- __ push(rcx);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
- // Output:
- // rax: tagged double result.
- // UNTAGGED case:
- // Input::
- // rsp[0]: return address.
- // xmm1: untagged double input argument
- // Output:
- // xmm1: untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- Label input_not_smi, loaded;
- // Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kDoubleSize));
- __ cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&loaded, Label::kNear);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- }
-
- // ST[0] == double value, if TAGGED.
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ movq(rax, cache_array);
- int cache_array_index =
- type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
- __ movq(rax, Operand(rax, cache_array_index));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
- }
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Cache hit!
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->transcendental_cache_hit(), 1);
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- __ IncrementCounter(counters->transcendental_cache_miss(), 1);
- // Update cache with new value.
- if (tagged) {
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- }
- GenerateOperation(masm, type_);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), xmm1);
- __ fld_d(Operand(rsp, 0));
- GenerateOperation(masm, type_);
- __ fstp_d(Operand(rsp, 0));
- __ movsd(xmm1, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ Push(Smi::FromInt(2 * kDoubleSize));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(
- ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
- } else { // UNTAGGED.
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(
- MacroAssembler* masm, TranscendentalCache::Type type) {
- // Registers:
- // rax: Newly allocated HeapNumber, which must be preserved.
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- if (type == TranscendentalCache::SIN ||
- type == TranscendentalCache::COS ||
- type == TranscendentalCache::TAN) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, Label::kNear);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ subq(rsp, Immediate(kPointerSize));
- __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
- __ movl(Operand(rsp, 0), Immediate(0x00000000));
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ movq(rdi, rax); // Save rax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
- __ bind(&in_range);
- switch (type) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- case TranscendentalCache::TAN:
- // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
- // FP register stack.
- __ fptan();
- __ fstp(0); // Pop FP register stack.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
- __ JumpIfNotSmi(rax, &rax_is_object);
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-// Jump to conversion_failure: rdx and rax are unchanged.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(r8, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(r8, 0);
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in rcx.
- IntegerConvert(masm, r8, rdx);
-
- // Here r8 has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(rcx, 0);
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the rax heap number in rcx.
- IntegerConvert(masm, rcx, rax);
- __ bind(&done);
- __ movl(rax, r8);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
- // Load operand in rdx into xmm0, or branch to not_numbers.
- __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(rax, &load_smi_rax);
-
- __ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined) {
- Register heap_number_map = scratch3;
- Register smi_result = scratch1;
- Label done, maybe_undefined_first, maybe_undefined_second, first_done;
-
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label first_smi;
- __ JumpIfSmi(first, &first_smi, Label::kNear);
- __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_first
- : on_not_smis);
- // Convert HeapNumber to smi if possible.
- __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- // Check if conversion was successful by converting back and
- // comparing to the original double's bits.
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(first, smi_result);
-
- __ bind(&first_done);
- __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
- __ bind(&first_smi);
- __ AssertNotSmi(second);
- __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_second
- : on_not_smis);
- // Convert second to smi, if possible.
- __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(second, smi_result);
- if (on_success != NULL) {
- __ jmp(on_success);
- } else {
- __ jmp(&done);
- }
-
- __ bind(&maybe_undefined_first);
- __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(first, first);
- __ jmp(&first_done);
-
- __ bind(&maybe_undefined_second);
- __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(second, second);
- if (on_success != NULL) {
- __ jmp(on_success);
- }
- // Else: fall through.
-
- __ bind(&done);
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- // Choose register conforming to calling convention (when bailing out).
-#ifdef _WIN64
- const Register exponent = rdx;
-#else
- const Register exponent = rdi;
-#endif
- const Register base = rax;
- const Register scratch = rcx;
- const XMMRegister double_result = xmm3;
- const XMMRegister double_base = xmm2;
- const XMMRegister double_exponent = xmm1;
- const XMMRegister double_scratch = xmm4;
-
- Label call_runtime, done, exponent_not_smi, int_exponent;
-
- // Save 1 in double_result - we need this several times later on.
- __ movq(scratch, Immediate(1));
- __ cvtlsi2sd(double_result, scratch);
-
- if (exponent_type_ == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack.
- __ movq(base, Operand(rsp, 2 * kPointerSize));
- __ movq(exponent, Operand(rsp, 1 * kPointerSize));
- __ JumpIfSmi(base, &base_is_smi, Label::kNear);
- __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
-
- __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent, Label::kNear);
-
- __ bind(&base_is_smi);
- __ SmiToInteger32(base, base);
- __ cvtlsi2sd(double_base, base);
- __ bind(&unpack_exponent);
-
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiToInteger32(exponent, exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiToInteger32(exponent, exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type_ != INTEGER) {
- Label fast_power;
- // Detect integer exponents stored as double.
- __ cvttsd2si(exponent, double_exponent);
- // Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmpl(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
- __ cvtlsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
-
- if (exponent_type_ == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label continue_sqrt, continue_rsqrt, not_plus_half;
- // Test for 0.5.
- // Load double_scratch with 0.5.
- __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
- __ movq(double_scratch, scratch);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &not_plus_half, Label::kNear);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- // According to IEEE-754, double-precision -Infinity has the highest
- // 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
- __ movq(double_scratch, scratch);
- __ ucomisd(double_scratch, double_base);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_sqrt, Label::kNear);
- __ j(carry, &continue_sqrt, Label::kNear);
-
- // Set result to Infinity in the special case.
- __ xorps(double_result, double_result);
- __ subsd(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&continue_sqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_scratch, double_scratch);
- __ addsd(double_scratch, double_base); // Convert -0 to 0.
- __ sqrtsd(double_result, double_scratch);
- __ jmp(&done);
-
- // Test for -0.5.
- __ bind(&not_plus_half);
- // Load double_scratch with -0.5 by substracting 1.
- __ subsd(double_scratch, double_result);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &fast_power, Label::kNear);
-
- // Calculates reciprocal of square root of base. Check for the special
- // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- // According to IEEE-754, double-precision -Infinity has the highest
- // 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
- __ movq(double_scratch, scratch);
- __ ucomisd(double_scratch, double_base);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_rsqrt, Label::kNear);
- __ j(carry, &continue_rsqrt, Label::kNear);
-
- // Set result to 0 in the special case.
- __ xorps(double_result, double_result);
- __ jmp(&done);
-
- __ bind(&continue_rsqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_exponent, double_exponent);
- __ addsd(double_exponent, double_base); // Convert -0 to +0.
- __ sqrtsd(double_exponent, double_exponent);
- __ divsd(double_result, double_exponent);
- __ jmp(&done);
- }
-
- // Using FPU instructions to calculate power.
- Label fast_power_failed;
- __ bind(&fast_power);
- __ fnclex(); // Clear flags to catch exceptions later.
- // Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), double_exponent);
- __ fld_d(Operand(rsp, 0)); // E
- __ movsd(Operand(rsp, 0), double_base);
- __ fld_d(Operand(rsp, 0)); // B, E
-
- // Exponent is in st(1) and base is in st(0)
- // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
- // FYL2X calculates st(1) * log2(st(0))
- __ fyl2x(); // X
- __ fld(0); // X, X
- __ frndint(); // rnd(X), X
- __ fsub(1); // rnd(X), X-rnd(X)
- __ fxch(1); // X - rnd(X), rnd(X)
- // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
- __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
- __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
- // FSCALE calculates st(0) * 2^st(1)
- __ fscale(); // 2^X, rnd(X)
- __ fstp(1);
- // Bail out to runtime in case of exceptions in the status word.
- __ fnstsw_ax();
- __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
- __ j(not_zero, &fast_power_failed, Label::kNear);
- __ fstp_d(Operand(rsp, 0));
- __ movsd(double_result, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&done);
-
- __ bind(&fast_power_failed);
- __ fninit();
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&call_runtime);
- }
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
- const XMMRegister double_scratch2 = double_exponent;
- // Back up exponent as we need to check if exponent is negative later.
- __ movq(scratch, exponent); // Back up exponent.
- __ movsd(double_scratch, double_base); // Back up base.
- __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
-
- // Get absolute value of exponent.
- Label no_neg, while_true, while_false;
- __ testl(scratch, scratch);
- __ j(positive, &no_neg, Label::kNear);
- __ negl(scratch);
- __ bind(&no_neg);
-
- __ j(zero, &while_false, Label::kNear);
- __ shrl(scratch, Immediate(1));
- // Above condition means CF==0 && ZF==0. This means that the
- // bit that has been shifted out is 0 and the result is not 0.
- __ j(above, &while_true, Label::kNear);
- __ movsd(double_result, double_scratch);
- __ j(zero, &while_false, Label::kNear);
-
- __ bind(&while_true);
- __ shrl(scratch, Immediate(1));
- __ mulsd(double_scratch, double_scratch);
- __ j(above, &while_true, Label::kNear);
- __ mulsd(double_result, double_scratch);
- __ j(not_zero, &while_true);
-
- __ bind(&while_false);
- // If the exponent is negative, return 1/result.
- __ testl(exponent, exponent);
- __ j(greater, &done);
- __ divsd(double_scratch2, double_result);
- __ movsd(double_result, double_scratch2);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ xorps(double_scratch2, double_scratch2);
- __ ucomisd(double_scratch2, double_result);
- // double_exponent aliased as double_scratch2 has already been overwritten
- // and may not have contained the exponent value in the first place when the
- // input was a smi. We reset it with exponent value before bailing out.
- __ j(not_equal, &done);
- __ cvtlsi2sd(double_exponent, exponent);
-
- // Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in eax.
- __ bind(&done);
- __ AllocateHeapNumber(rax, rcx, &call_runtime);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
- __ IncrementCounter(counters->math_pow(), 1);
- __ ret(2 * kPointerSize);
- } else {
- __ bind(&call_runtime);
- // Move base to the correct argument register. Exponent is already in xmm1.
- __ movsd(xmm0, double_base);
- ASSERT(double_exponent.is(xmm1));
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(2);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 2);
- }
- // Return value is in xmm0.
- __ movsd(double_result, xmm0);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- __ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1);
- __ ret(0);
- }
-}
-
-
-void ArrayLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->length_string());
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadArrayLength(masm, receiver, r8, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->prototype_string());
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->length_string());
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
- support_wrapper_);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
- if (kind() == Code::KEYED_STORE_IC) {
- __ Cmp(rcx, masm->isolate()->factory()->length_string());
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::GenerateStoreMiss(masm, kind());
-}
-
-
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, rax, reg_, inobject_, index_);
- __ ret(0);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in rdx and the parameter count is in rax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(rdx, &slow);
-
- // Check if the calling frame is an arguments adaptor frame. We look at the
- // context offset, and if the frame is not a regular one, then we find a
- // Smi instead of the context. We can't use SmiCompare here, because that
- // only works for comparing two smis.
- Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register rax. Use unsigned comparison to get negative
- // check for free.
- __ cmpq(rdx, rax);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(rbx); // Return address.
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
- // Stack layout:
- // rsp[0] : return address
- // rsp[8] : number of parameters (tagged)
- // rsp[16] : receiver displacement
- // rsp[24] : function
- // Registers used over the whole function:
- // rbx: the mapped parameter count (untagged)
- // rax: the allocated object (tagged).
-
- Factory* factory = masm->isolate()->factory();
-
- __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
- // rbx = parameter count (untagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ movq(rcx, rbx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger64(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- // rbx = parameter count (untagged)
- // rcx = argument count (untagged)
- // Compute the mapped parameter count = min(rbx, rcx) in rbx.
- __ cmpq(rbx, rcx);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ movq(rbx, rcx);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ xor_(r8, r8);
- __ testq(rbx, rbx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
-
- // rax = address of new object(s) (tagged)
- // rcx = argument count (untagged)
- // Get the arguments boilerplate from the current native context into rdi.
- Label has_mapped_parameters, copy;
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- __ testq(rbx, rbx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
-
- const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
- __ jmp(&copy, Label::kNear);
-
- const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
- __ bind(&has_mapped_parameters);
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
- __ bind(&copy);
-
- // rax = address of new object (tagged)
- // rbx = mapped parameter count (untagged)
- // rcx = argument count (untagged)
- // rdi = address of boilerplate object (tagged)
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rdx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rdx);
- }
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- rdx);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- // Note: rcx is tagged from here on.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- rcx);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, edi will point there, otherwise to the
- // backing store.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
-
- // rax = address of new object (tagged)
- // rbx = mapped parameter count (untagged)
- // rcx = argument count (tagged)
- // rdi = address of parameter map or backing store (tagged)
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ testq(rbx, rbx);
- __ j(zero, &skip_parameter_map);
-
- __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
- // rbx contains the untagged argument count. Add 2 and tag to write.
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ Integer64PlusConstantToSmi(r9, rbx, 2);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
- __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
-
- // Load tagged parameter count into r9.
- __ Integer32ToSmi(r9, rbx);
- __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, Operand(rsp, 1 * kPointerSize));
- __ subq(r8, r9);
- __ Move(r11, factory->the_hole_value());
- __ movq(rdx, rdi);
- __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- // r9 = loop variable (tagged)
- // r8 = mapping index (tagged)
- // r11 = the hole value
- // rdx = address of parameter map (tagged)
- // rdi = address of backing store (tagged)
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ SmiSubConstant(r9, r9, Smi::FromInt(1));
- __ SmiToInteger64(kScratchRegister, r9);
- __ movq(FieldOperand(rdx, kScratchRegister,
- times_pointer_size,
- kParameterMapHeaderSize),
- r8);
- __ movq(FieldOperand(rdi, kScratchRegister,
- times_pointer_size,
- FixedArray::kHeaderSize),
- r11);
- __ SmiAddConstant(r8, r8, Smi::FromInt(1));
- __ bind(&parameters_test);
- __ SmiTest(r9);
- __ j(not_zero, &parameters_loop, Label::kNear);
-
- __ bind(&skip_parameter_map);
-
- // rcx = argument count (tagged)
- // rdi = address of backing store (tagged)
- // Copy arguments header and remaining slots (if there are any).
- __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
- factory->fixed_array_map());
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
-
- Label arguments_loop, arguments_test;
- __ movq(r8, rbx);
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- // Untag rcx for the loop below.
- __ SmiToInteger64(rcx, rcx);
- __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
- __ subq(rdx, kScratchRegister);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ subq(rdx, Immediate(kPointerSize));
- __ movq(r9, Operand(rdx, 0));
- __ movq(FieldOperand(rdi, r8,
- times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- __ addq(r8, Immediate(1));
-
- __ bind(&arguments_test);
- __ cmpq(r8, rcx);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- // rcx = argument count (untagged)
- __ bind(&runtime);
- __ Integer32ToSmi(rcx, rcx);
- __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[8] : number of parameters
- // esp[16] : receiver displacement
- // esp[24] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
- __ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ SmiToInteger64(rcx, rcx);
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
- __ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ testq(rcx, rcx);
- __ j(zero, &add_arguments_object, Label::kNear);
- __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current native context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rdi, offset));
-
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- rcx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ testq(rcx, rcx);
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
-
-
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- // Untag the length for the loop below.
- __ SmiToInteger64(rcx, rcx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: last_match_info (expected JSArray)
- // rsp[16]: previous index
- // rsp[24]: subject string
- // rsp[32]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime;
- // Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
- __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
- __ testq(kScratchRegister, kScratchRegister);
- __ j(zero, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- Condition is_smi = masm->CheckSmi(rax);
- __ Check(NegateCondition(is_smi),
- "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // rax: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
- __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
- __ j(not_equal, &runtime);
-
- // rax: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ SmiToInteger32(rdx,
- FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // Or number_of_captures <= offsets vector size / 2 - 1
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1));
- __ j(above, &runtime);
-
- // Reset offset for possibly sliced string.
- __ Set(r14, 0);
- __ movq(rdi, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rdi, &runtime);
- __ movq(r15, rdi); // Make a copy of the original subject string.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- // rax: RegExp data (FixedArray)
- // rdi: subject string
- // r15: subject string
- // Handle subject string according to its encoding and representation:
- // (1) Sequential two byte? If yes, go to (9).
- // (2) Sequential one byte? If yes, go to (6).
- // (3) Anything but sequential or cons? If yes, go to (7).
- // (4) Cons string. If the string is flat, replace subject with first string.
- // Otherwise bailout.
- // (5a) Is subject sequential two byte? If yes, go to (9).
- // (5b) Is subject external? If yes, go to (8).
- // (6) One byte sequential. Load regexp code for one byte.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (7) Not a long external string? If yes, go to (10).
- // (8) External string. Make it, offset-wise, look like a sequential string.
- // (8a) Is the external string one byte? If yes, go to (6).
- // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
- // (10) Short external string or not a string? If yes, bail out to runtime.
- // (11) Sliced string. Replace subject with parent. Go to (5a).
-
- Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
- external_string /* 8 */, check_underlying /* 5a */,
- not_seq_nor_cons /* 7 */, check_code /* E */,
- not_long_external /* 10 */;
-
- // (1) Sequential two byte? If yes, go to (9).
- __ andb(rbx, Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask |
- kShortExternalStringMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string); // Go to (9).
-
- // (2) Sequential one byte? If yes, go to (6).
- // Any other sequential string must be one byte.
- __ andb(rbx, Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask));
- __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
-
- // (3) Anything but sequential or cons? If yes, go to (7).
- // We check whether the subject string is a cons, since sequential strings
- // have already been covered.
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmpq(rbx, Immediate(kExternalStringTag));
- __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
-
- // (4) Cons string. Check that it's flat.
- // Replace subject with first string and reload instance type.
- __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
- Heap::kempty_stringRootIndex);
- __ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
- __ bind(&check_underlying);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-
- // (5a) Is subject sequential two byte? If yes, go to (9).
- __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string); // Go to (9).
- // (5b) Is subject external? If yes, go to (8).
- __ testb(rbx, Immediate(kStringRepresentationMask));
- // The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
- __ j(not_zero, &external_string); // Go to (8)
-
- // (6) One byte sequential. Load regexp code for one byte.
- __ bind(&seq_one_byte_string);
- // rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rcx, 1); // Type is one byte.
-
- // (E) Carry on. String handling is done.
- __ bind(&check_code);
- // r11: irregexp code
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // smi (code flushing support)
- __ JumpIfSmi(r11, &runtime);
-
- // rdi: sequential subject string (or look-alike, external string)
- // r15: original subject string
- // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r11: code
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- // We have to use r15 instead of rdi to load the length because rdi might
- // have been only made to look like a sequential string when it actually
- // is an external string.
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
- __ j(above_equal, &runtime);
- __ SmiToInteger64(rbx, rbx);
-
- // rdi: subject string
- // rbx: previous index
- // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
- // r11: code
- // All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->regexp_entry_native(), 1);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 9;
- int argument_slots_on_stack =
- masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
- __ EnterApiExitFrame(argument_slots_on_stack);
-
- // Argument 9: Pass current isolate address.
- // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- // Immediate(ExternalReference::isolate_address()));
- __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- kScratchRegister);
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
- Immediate(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- // Argument 6 is passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
- Immediate(0));
-#else
- __ Set(r9, 0);
-#endif
-
- // Argument 5: static offsets vector buffer.
- __ LoadAddress(r8,
- ExternalReference::address_of_static_offsets_vector(isolate));
- // Argument 5 passed in r8 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
-#endif
-
- // First four arguments are passed in registers on both Linux and Windows.
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // Keep track on aliasing between argX defined above and the registers used.
- // rdi: subject string
- // rbx: previous index
- // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
- // r11: code
- // r14: slice offset
- // r15: original subject string
-
- // Argument 2: Previous index.
- __ movq(arg2, rbx);
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
- // Prepare start and end index of the input.
- // Load the length from the original sliced string if that is the case.
- __ addq(rbx, r14);
- __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset));
- __ addq(r14, arg3); // Using arg3 as scratch.
-
- // rbx: start index of the input
- // r14: end index of the input
- // r15: original subject string
- __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
- __ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
- __ jmp(&setup_rest, Label::kNear);
- __ bind(&setup_two_byte);
- __ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
- __ bind(&setup_rest);
-
- // Argument 1: Original subject string.
- // The original subject is in the previous stack frame. Therefore we have to
- // use rbp, which points exactly to one pointer size below the previous rsp.
- // (Because creating a new stack frame pushes the previous rbp onto the stack
- // and thereby moves up rsp by one kPointerSize.)
- __ movq(arg1, r15);
-
- // Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(r11);
-
- __ LeaveApiExitFrame();
-
- // Check the result.
- Label success;
- Label exception;
- __ cmpl(rax, Immediate(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
- __ j(equal, &success, Label::kNear);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
- __ j(equal, &exception);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
- // If none of the above, it can only be retry.
- // Handle that in the runtime system.
- __ j(not_equal, &runtime);
-
- // For failure return null.
- __ LoadRoot(rax, Heap::kNullValueRootIndex);
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- __ SmiToInteger32(rax,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rax, rax, times_1, 2));
-
- // rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ movq(r15, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(r15, &runtime);
- __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information. Ensure no overflow in add.
- STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rax);
- __ j(greater, &runtime);
-
- // rbx: last_match_info backing store (FixedArray)
- // rdx: number of capture registers
- // Store the capture count.
- __ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
- kScratchRegister);
- // Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rax);
- __ RecordWriteField(rbx,
- RegExpImpl::kLastSubjectOffset,
- rax,
- rdi,
- kDontSaveFPRegs);
- __ movq(rax, rcx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ RecordWriteField(rbx,
- RegExpImpl::kLastInputOffset,
- rax,
- rdi,
- kDontSaveFPRegs);
-
- // Get the static offsets vector filled by the native regexp code.
- __ LoadAddress(rcx,
- ExternalReference::address_of_static_offsets_vector(isolate));
-
- // rbx: last_match_info backing store (FixedArray)
- // rcx: offsets vector
- // rdx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ subq(rdx, Immediate(1));
- __ j(negative, &done, Label::kNear);
- // Read the value from the static offsets vector buffer and make it a smi.
- __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
- __ Integer32ToSmi(rdi, rdi);
- // Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
- rdx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- rdi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ movq(rax, r15);
- __ ret(4 * kPointerSize);
-
- __ bind(&exception);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate);
- Operand pending_exception_operand =
- masm->ExternalOperand(pending_exception_address, rbx);
- __ movq(rax, pending_exception_operand);
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ cmpq(rax, rdx);
- __ j(equal, &runtime);
- __ movq(pending_exception_operand, rdx);
-
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- Label termination_exception;
- __ j(equal, &termination_exception, Label::kNear);
- __ Throw(rax);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(rax);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-
- // Deferred code for string handling.
- // (7) Not a long external string? If yes, go to (10).
- __ bind(&not_seq_nor_cons);
- // Compare flags are still set from (3).
- __ j(greater, &not_long_external, Label::kNear); // Go to (10).
-
- // (8) External string. Short external strings have been ruled out.
- __ bind(&external_string);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ testb(rbx, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
- }
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // (8a) Is the external string one byte? If yes, go to (6).
- __ testb(rbx, Immediate(kStringEncodingMask));
- __ j(not_zero, &seq_one_byte_string); // Goto (6).
-
- // rdi: subject string (flat two-byte)
- // rax: RegExp data (FixedArray)
- // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
- __ bind(&seq_two_byte_string);
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
- __ Set(rcx, 0); // Type is two byte.
- __ jmp(&check_code); // Go to (E).
-
- // (10) Not a string or a short external string? If yes, bail out to runtime.
- __ bind(&not_long_external);
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
- __ j(not_zero, &runtime);
-
- // (11) Sliced string. Replace subject with parent. Go to (5a).
- // Load offset into r14 and replace subject string with parent.
- __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
- __ jmp(&check_underlying);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ movq(r8, Operand(rsp, kPointerSize * 3));
- __ JumpIfNotSmi(r8, &slowcase);
- __ SmiToInteger32(rbx, r8);
- __ cmpl(rbx, Immediate(kMaxInlineLength));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in rbx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- rbx, // In: Number of elements.
- rax, // Out: Start of allocation (tagged).
- rcx, // Out: End of allocation.
- rdx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // rax: Start of allocated area, object-tagged.
- // rbx: Number of array elements as int32.
- // r8: Number of array elements as smi.
-
- // Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
-
- // Set empty properties FixedArray.
- __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
-
- // Set elements to point to FixedArray allocated right after the JSArray.
- __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
- // Set input, index and length fields from arguments.
- __ movq(r8, Operand(rsp, kPointerSize * 1));
- __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 2));
- __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 3));
- __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
-
- // Fill out the elements FixedArray.
- // rax: JSArray.
- // rcx: FixedArray.
- // rbx: Number of elements in array as int32.
-
- // Set map.
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
- // Set length.
- __ Integer32ToSmi(rdx, rbx);
- __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with undefined.
- // rax: JSArray.
- // rbx: Number of elements in array that remains to be filled, as int32.
- // rcx: Start of elements in FixedArray.
- // rdx: undefined.
- Label loop;
- __ testl(rbx, rbx);
- __ bind(&loop);
- __ j(less_equal, &done); // Jump if rcx is negative or zero.
- __ subl(rbx, Immediate(1));
- __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- Factory* factory = masm->isolate()->factory();
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- factory->heap_number_map(),
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ movq(rbx, Operand(rsp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-static void CheckInputType(MacroAssembler* masm,
- Register input,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
- __ j(not_equal, fail);
- }
- // We could be strict about internalized/non-internalized here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-static void BranchIfNotInternalizedString(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the internalized bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ testb(scratch, Immediate(kIsInternalizedMask));
- __ j(zero, label);
-}
-
-
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Label check_unequal_objects, done;
- Condition cc = GetCondition();
- Factory* factory = masm->isolate()->factory();
-
- Label miss;
- CheckInputType(masm, rdx, left_, &miss);
- CheckInputType(masm, rax, right_, &miss);
-
- // Compare two smis.
- Label non_smi, smi_done;
- __ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
- __ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
- __ bind(&smi_done);
- __ movq(rax, rdx);
- __ ret(0);
- __ bind(&non_smi);
-
- // The compare stub returns a positive, negative, or zero 64-bit integer
- // value in rax, corresponding to result of comparing the two inputs.
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- {
- Label not_user_equal, user_equal;
- __ JumpIfSmi(rax, &not_user_equal);
- __ JumpIfSmi(rdx, &not_user_equal);
-
- __ CmpObjectType(rax, JS_OBJECT_TYPE, rbx);
- __ j(not_equal, &not_user_equal);
-
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &not_user_equal);
-
- __ testb(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &user_equal);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &user_equal);
-
- __ jmp(&not_user_equal);
-
- __ bind(&user_equal);
-
- __ pop(rbx); // Return address.
- __ push(rax);
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- __ bind(&not_user_equal);
- }
-
- // Two identical objects are equal unless they are both NaN or undefined.
- {
- Label not_identical;
- __ cmpq(rax, rdx);
- __ j(not_equal, &not_identical, Label::kNear);
-
- if (cc != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
- if (cc != equal) {
- // Call runtime on identical objects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical, Label::kNear);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc == greater_equal || cc == greater) {
- __ neg(rax);
- }
- __ ret(0);
-
- __ bind(&not_identical);
- }
-
- if (cc == equal) { // Both strict and non-strict.
- Label slow; // Fallthrough label.
-
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- if (strict()) {
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- {
- Label not_smis;
- __ SelectNonSmi(rbx, rax, rdx, &not_smis);
-
- // Check if the non-smi operand is a heap number.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->heap_number_map());
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&not_smis);
- }
-
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- Label first_non_object;
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(below, &first_non_object, Label::kNear);
- // Return non-zero (eax (not rax) is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- }
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc != not_equal);
- if (cc == less || cc == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
-
- // Fast negative check for internalized-to-internalized equality.
- Label check_for_strings;
- if (cc == equal) {
- BranchIfNotInternalizedString(
- masm, &check_for_strings, rax, kScratchRegister);
- BranchIfNotInternalizedString(
- masm, &check_for_strings, rdx, kScratchRegister);
-
- // We've already checked for object identity, so if both operands are
- // internalized strings they aren't equal. Register eax (not rax) already
- // holds a non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(
- rdx, rax, rcx, rbx, &check_unequal_objects);
-
- // Inline comparison of ASCII strings.
- if (cc == equal) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- rdx,
- rax,
- rcx,
- rbx);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- r8);
- }
-
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc == equal && !strict()) {
- // Not strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects, return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
- __ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects, Label::kNear);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
- __ j(below, &not_both_objects, Label::kNear);
- __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(below, &not_both_objects, Label::kNear);
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal, Label::kNear);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(rax, EQUAL);
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in rax,
- // or return equal if we fell through to here.
- __ ret(0);
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
- }
-
- // Restore return address on the stack.
- __ push(rcx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // rbx : cache cell for call target
- // rdi : the function to call
- ASSERT(!FLAG_optimize_constructed_arrays);
- Isolate* isolate = masm->isolate();
- Label initialize, done;
-
- // Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmpq(rcx, rdi);
- __ j(equal, &done, Label::kNear);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ j(equal, &done, Label::kNear);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
- __ j(equal, &initialize, Label::kNear);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function.
- __ bind(&initialize);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // rbx : cache cell for call target
- // rdi : the function to call
- ASSERT(FLAG_optimize_constructed_arrays);
- Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
-
- // Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmpq(rcx, rdi);
- __ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ j(equal, &done);
-
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- LAST_FAST_ELEMENTS_KIND);
- __ Cmp(rcx, terminal_kind_sentinel);
- __ j(not_equal, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
-
- __ bind(&miss);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
- __ j(equal, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- GetInitialFastElementsKind());
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- initial_kind_sentinel);
- __ jmp(&done);
-
- __ bind(&not_array_function);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : cache cell for call target
- // rdi : the function to call
- Isolate* isolate = masm->isolate();
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // +1 ~ return address
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
- // Call as function is indicated with the hole.
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &call, Label::kNear);
- // Patch the receiver on the stack with the global receiver object.
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
- __ bind(&call);
- }
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &non_function);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(equal, &call_as_function);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
- }
- // Check for function proxy.
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
- __ pop(rcx);
- __ push(rdi); // put proxy as additional argument under return address
- __ push(rcx);
- __ Set(rax, argc_ + 1);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor =
- Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // rax : number of arguments
- // rbx : cache cell for call target
- // rdi : constructor function
- Label slow, non_function_call;
-
- // Check that function is not a smi.
- __ JumpIfSmi(rdi, &non_function_call);
- // Check that function is a JSFunction.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? rcx : rbx;
- __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(jmp_reg, FieldOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
- __ jmp(jmp_reg);
-
- // rdi: called object
- // rax: number of arguments
- // rcx: object map
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing rax).
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return false;
-}
-
-
-bool CEntryStub::IsPregenerated() {
-#ifdef _WIN64
- return result_size_ == 1;
-#else
- return true;
-#endif
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- // It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-}
-
-
-void CodeStub::GenerateFPStubs(Isolate* isolate) {
-}
-
-
-void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ movq(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, Immediate(0xf));
- __ cmpq(scratch, Immediate(0xf));
- __ j(equal, oom_label);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
-
- // Simple results returned in rax (both AMD64 and Win64 calling conventions).
- // Complex results must be written to address passed as first argument.
- // AMD64 calling convention: a struct of two pointers in rax+rdx
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
-#ifdef _WIN64
- __ movq(rcx, rax);
-#else // _WIN64
- __ movq(rdi, rax);
-#endif
- __ movq(kScratchRegister,
- ExternalReference::perform_gc_function(masm->isolate()));
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ incl(scope_depth_operand);
- }
-
- // Call C function.
-#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(StackSpaceOperand(0), r14); // argc.
- __ movq(StackSpaceOperand(1), r15); // argv.
- if (result_size_ < 2) {
- // Pass a pointer to the Arguments object as the first argument.
- // Return result in single register (rax).
- __ lea(rcx, StackSpaceOperand(0));
- __ LoadAddress(rdx, ExternalReference::isolate_address());
- } else {
- ASSERT_EQ(2, result_size_);
- // Pass a pointer to the result location as the first argument.
- __ lea(rcx, StackSpaceOperand(2));
- // Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, StackSpaceOperand(0));
- __ LoadAddress(r8, ExternalReference::isolate_address());
- }
-
-#else // _WIN64
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
- __ movq(rdx, ExternalReference::isolate_address());
-#endif
- __ call(rbx);
- // Result is in rax - do not destroy this register!
-
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ decl(scope_depth_operand);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
-#ifdef _WIN64
- // If return value is on the stack, pop it to registers.
- if (result_size_ > 1) {
- ASSERT_EQ(2, result_size_);
- // Read result values stored on stack. Result is stored
- // above the four argument mirror slots and the two
- // Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
- }
-#endif
- __ lea(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, Label::kNear);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, masm->isolate());
- Operand pending_exception_operand =
- masm->ExternalOperand(pending_exception_address);
- __ movq(rax, pending_exception_operand);
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ movq(pending_exception_operand, rdx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
-#ifdef _WIN64
- int arg_stack_space = (result_size_ < 2 ? 2 : 4);
-#else
- int arg_stack_space = 0;
-#endif
- __ EnterExitFrame(arg_stack_space, save_doubles_);
-
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE64);
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ Set(rax, static_cast<int64_t>(false));
- __ Store(external_caught, rax);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
- __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64);
- __ bind(&already_have_failure);
- __ Store(pending_exception, rax);
- // Fall through to the next label.
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(rax);
-
- __ bind(&throw_normal_exception);
- __ Throw(rax);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, handler_entry, exit;
- Label not_outermost_js, not_outermost_js_2;
- { // NOLINT. Scope block confuses linter.
- MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
- // Set up frame.
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE64);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
-#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
-#endif
- __ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
-
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
- __ InitializeSmiConstantRegister();
- __ InitializeRootRegister();
- }
-
- Isolate* isolate = masm->isolate();
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
- {
- Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ push(c_entry_fp_operand);
- }
-
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
- __ Load(rax, js_entry_sp);
- __ testq(rax, rax);
- __ j(not_zero, &not_outermost_js);
- __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ movq(rax, rbp);
- __ Store(js_entry_sp, rax);
- Label cont;
- __ jmp(&cont);
- __ bind(&not_outermost_js);
- __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
- __ bind(&cont);
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- __ Store(pending_exception, rax);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE64);
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
- __ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
-
- // Clear any pending exceptions.
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
- __ Store(pending_exception, rax);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline builtin and
- // pop the faked function when we return. We load the address from an
- // external reference instead of inlining the call target address directly
- // in the code, because the builtin stubs may not have been generated yet
- // at the time this code is generated.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
- __ Load(rax, construct_entry);
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
- __ Load(rax, entry);
- }
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
- __ call(kScratchRegister);
-
- // Unlink this frame from the handler chain.
- __ PopTryHandler();
-
- __ bind(&exit);
- // Check if the current stack frame is marked as the outermost JS frame.
- __ pop(rbx);
- __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ j(not_equal, &not_outermost_js_2);
- __ movq(kScratchRegister, js_entry_sp);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
- __ bind(&not_outermost_js_2);
-
- // Restore the top frame descriptor from the stack.
- { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ pop(c_entry_fp_operand);
- }
-
- // Restore callee-saved registers (X64 conventions).
- __ pop(rbx);
-#ifdef _WIN64
- // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
-#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(rbp);
- __ ret(0);
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Implements "value instanceof function" operator.
- // Expected input state with no inline cache:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
- // Expected input state with an inline one-element cache:
- // rsp[0] : return address
- // rsp[1] : offset from return address to location of inline cache
- // rsp[2] : function pointer
- // rsp[3] : value
- // Returns a bitwise zero to indicate that the value
- // is and instance of the function and anything else to
- // indicate that the value is not an instance.
-
- static const int kOffsetToMapCheckValue = 2;
- static const int kOffsetToResultValue = 18;
- // The last 4 bytes of the instruction sequence
- // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
- // Move(kScratchRegister, FACTORY->the_hole_value())
- // in front of the hole value address.
- static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
- // The last 4 bytes of the instruction sequence
- // __ j(not_equal, &cache_miss);
- // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
- // before the offset of the hole value in the root array.
- static const unsigned int kWordBeforeResultValue = 0x458B4909;
- // Only the inline check flag is supported on X64.
- ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
- int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
-
- // Get the object - go slow case if it's a smi.
- Label slow;
-
- __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
- __ JumpIfSmi(rax, &slow);
-
- // Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
- __ j(below, &slow);
- __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
- // rdx is function, rax is map.
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
- __ bind(&miss);
- }
-
- __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- __ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Register mapping:
- // rax is object map.
- // rdx is function.
- // rbx is function prototype.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- } else {
- // Get return address and delta to inlined map check.
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
- __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
- }
- __ movq(kScratchRegister,
- Operand(kScratchRegister, kOffsetToMapCheckValue));
- __ movq(Operand(kScratchRegister, 0), rax);
- }
-
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmpq(rcx, rbx);
- __ j(equal, &is_instance, Label::kNear);
- __ cmpq(rcx, kScratchRegister);
- // The code at is_not_instance assumes that kScratchRegister contains a
- // non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance, Label::kNear);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- STATIC_ASSERT(kSmiTag == 0);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Store offset of true in the root array at the inline check site.
- int true_offset = 0x100 +
- (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
- // Assert it is a 1-byte signed value.
- ASSERT(true_offset >= 0 && true_offset < 0x100);
- __ movl(rax, Immediate(true_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
- if (FLAG_debug_code) {
- __ movl(rax, Immediate(kWordBeforeResultValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
- }
- __ Set(rax, 0);
- }
- __ ret(2 * kPointerSize + extra_stack_space);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Store offset of false in the root array at the inline check site.
- int false_offset = 0x100 +
- (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
- // Assert it is a 1-byte signed value.
- ASSERT(false_offset >= 0 && false_offset < 0x100);
- __ movl(rax, Immediate(false_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
- if (FLAG_debug_code) {
- __ movl(rax, Immediate(kWordBeforeResultValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
- }
- }
- __ ret(2 * kPointerSize + extra_stack_space);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- if (HasCallSiteInlineCheck()) {
- // Remove extra value from the stack.
- __ pop(rcx);
- __ pop(rax);
- __ push(rcx);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-// Passing arguments in registers is not supported.
-Register InstanceofStub::left() { return no_reg; }
-
-
-Register InstanceofStub::right() { return no_reg; }
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ testb(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- __ SmiToInteger32(index_, index_);
-
- StringCharLoadGenerator::Generate(
- masm, object_, index_, result_, &call_runtime_);
-
- __ Integer32ToSmi(result_, result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- Factory* factory = masm->isolate()->factory();
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- factory->heap_number_map(),
- index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!index_.is(rax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ movq(index_, rax);
- }
- __ pop(object_);
- // Reload the instance type.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ Integer32ToSmi(index_, index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(code_, &slow_case_);
- __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
- __ j(above, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movq(result_, FieldOperand(result_, index.reg, index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfSmi(rax, &call_runtime);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &call_runtime);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(rdx, &call_runtime);
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // rax: first string
- // rdx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length, Label::kNear);
- // Second string is empty, result is first string which is already in rax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length, Label::kNear);
- // First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // rax: first string
- // rbx: length of first string
- // rcx: length of second string
- // rdx: second string
- // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
- // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
-
- // If arguments where known to be strings, maps are not loaded to r8 and r9
- // by the code above.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- }
- // Get the instance types of the two strings as they will be needed soon.
- __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
- __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-
- // Look at the length of the result of adding the two strings.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx);
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return an internalized string here.
- __ SmiCompare(rbx, Smi::FromInt(2));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(rdi, 2);
- __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
- // rbx - first byte: first character
- // rbx - second byte: *maybe* second character
- // Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ shll(rcx, Immediate(kBitsPerByte));
- __ orl(rbx, rcx);
- // Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
- __ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ASCII the result is an ASCII cons string.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii, allocated, ascii_data;
- __ movl(rcx, r8);
- __ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testl(rcx, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an ASCII cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
- __ movq(rax, rcx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
- // rcx: first instance type AND second instance type.
- // r8: first instance type.
- // r9: second instance type.
- __ testb(rcx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // rax: first string
- // rbx: length of resulting flat string as smi
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- Label first_prepared, second_prepared;
- Label first_is_sequential, second_is_sequential;
- __ bind(&string_add_flat_result);
-
- __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
- // r14: length of first string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r8, Immediate(kStringRepresentationMask));
- __ j(zero, &first_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r8, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
- __ jmp(&first_prepared, Label::kNear);
- __ bind(&first_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ bind(&first_prepared);
-
- // Check whether both strings have same encoding.
- __ xorl(r8, r9);
- __ testb(r8, Immediate(kStringEncodingMask));
- __ j(not_zero, &call_runtime);
-
- __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
- // r15: length of second string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r9, Immediate(kStringRepresentationMask));
- __ j(zero, &second_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r9, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
- __ jmp(&second_prepared, Label::kNear);
- __ bind(&second_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r9: instance type of second string
- // First string and second string have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ SmiToInteger32(rbx, rbx);
- __ testb(r9, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii_string_add_flat_result);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- false,
- &not_cached);
- __ movq(arg, scratch1);
- __ movq(Operand(rsp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(zero, slow);
- __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ movq(Operand(rsp, stack_offset), arg);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- } else {
- __ movzxwl(kScratchRegister, Operand(src, 0));
- __ movw(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(2));
- __ addq(dest, Immediate(2));
- }
- __ decl(count);
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
- // boundary before starting rep movs. Copy remaining characters after running
- // rep movs.
- // Count is positive int32, dest and src are character pointers.
- ASSERT(dest.is(rdi)); // rep movs destination
- ASSERT(src.is(rsi)); // rep movs source
- ASSERT(count.is(rcx)); // rep movs count
-
- // Nothing to do for zero characters.
- Label done;
- __ testl(count, count);
- __ j(zero, &done, Label::kNear);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- STATIC_ASSERT(2 == sizeof(uc16));
- __ addl(count, count);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ testl(count, Immediate(~7));
- __ j(zero, &last_bytes, Label::kNear);
-
- // Copy from edi to esi using rep movs instruction.
- __ movl(kScratchRegister, count);
- __ shr(count, Immediate(3)); // Number of doublewords to copy.
- __ repmovsq();
-
- // Find number of bytes left.
- __ movl(count, kScratchRegister);
- __ and_(count, Immediate(7));
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ testl(count, count);
- __ j(zero, &done, Label::kNear);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- __ decl(count);
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ leal(scratch, Operand(c1, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index, Label::kNear);
- __ leal(scratch, Operand(c2, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, Immediate(kBitsPerByte));
- __ orl(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ SmiToInteger32(mask,
- FieldOperand(string_table, StringTable::kCapacityOffset));
- __ decl(mask);
-
- Register map = scratch4;
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string (32-bit int)
- // string_table: string table
- // mask: capacity mask (32-bit int)
- // map: -
- // scratch: -
-
- // Perform a number of probes in the string table.
- static const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- __ movl(scratch, hash);
- if (i > 0) {
- __ addl(scratch, Immediate(StringTable::GetProbeOffset(i)));
- }
- __ andl(scratch, mask);
-
- // Load the entry from the string table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ movq(candidate,
- FieldOperand(string_table,
- scratch,
- times_pointer_size,
- StringTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CmpObjectType(candidate, ODDBALL_TYPE, map);
- __ j(not_equal, &is_string, Label::kNear);
-
- __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
- __ j(equal, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ cmpq(kScratchRegister, candidate);
- __ Assert(equal, "oddball in string table is not undefined or the hole");
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // If length is not 2 the string is not a candidate.
- __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
- Smi::FromInt(2));
- __ j(not_equal, &next_probe[i]);
-
- // We use kScratchRegister as a temporary register in assumption that
- // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
- Register temp = kScratchRegister;
-
- // Check that the candidate is a non-external ASCII string.
- __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe[i]);
-
- // Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
- __ andl(temp, Immediate(0x0000ffff));
- __ cmpl(chars, temp);
- __ j(equal, &found_in_string_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- if (!result.is(rax)) {
- __ movq(rax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = (seed + character) + ((seed + character) << 10);
- __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
- __ SmiToInteger32(scratch, scratch);
- __ addl(scratch, character);
- __ movl(hash, scratch);
- __ shll(scratch, Immediate(10));
- __ addl(hash, scratch);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ shrl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ addl(hash, character);
- // hash += hash << 10;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(10));
- __ addl(hash, scratch);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ shrl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ leal(hash, Operand(hash, hash, times_8, 0));
- // hash ^= hash >> 11;
- __ movl(scratch, hash);
- __ shrl(scratch, Immediate(11));
- __ xorl(hash, scratch);
- // hash += hash << 15;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(15));
- __ addl(hash, scratch);
-
- __ andl(hash, Immediate(String::kHashBitMask));
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero);
- __ Set(hash, StringHasher::kZeroHash);
- __ bind(&hash_not_zero);
-}
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: to
- // rsp[16]: from
- // rsp[24]: string
-
- const int kToOffset = 1 * kPointerSize;
- const int kFromOffset = kToOffset + kPointerSize;
- const int kStringOffset = kFromOffset + kPointerSize;
- const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
-
- // Make sure first argument is a string.
- __ movq(rax, Operand(rsp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: string
- // rbx: instance type
- // Calculate length of sub string using the smi values.
- __ movq(rcx, Operand(rsp, kToOffset));
- __ movq(rdx, Operand(rsp, kFromOffset));
- __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
-
- __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
- Label not_original_string;
- // Shorter than original string's length: an actual substring.
- __ j(below, &not_original_string, Label::kNear);
- // Longer than original string's length or negative: unsafe arguments.
- __ j(above, &runtime);
- // Return original string.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
- __ bind(&not_original_string);
-
- Label single_char;
- __ SmiCompare(rcx, Smi::FromInt(1));
- __ j(equal, &single_char);
-
- __ SmiToInteger32(rcx, rcx);
-
- // rax: string
- // rbx: instance type
- // rcx: sub string length
- // rdx: from index (smi)
- // Deal with different string types: update the index if necessary
- // and put the underlying string into edi.
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ testb(rbx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- __ testb(rbx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- // Flat cons strings have an empty second part.
- __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
- Heap::kempty_stringRootIndex);
- __ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
- // Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
- // Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the correct register.
- __ movq(rdi, rax);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // rdi: underlying subject string
- // rbx: instance type of underlying subject string
- // rdx: adjusted start index (smi)
- // rcx: length
- // If coming from the make_two_character_string path, the string
- // is too short to be sliced anyways.
- __ cmpq(rcx, Immediate(SlicedString::kMinLength));
- // Short slice. Copy instead of slicing.
- __ j(less, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(rbx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
- __ bind(&set_slice_header);
- __ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
- __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
- __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
-
- __ bind(&copy_routine);
- }
-
- // rdi: underlying subject string
- // rbx: instance type of underlying subject string
- // rdx: adjusted start index (smi)
- // rcx: length
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- Label two_byte_sequential, sequential_string;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(rbx, Immediate(kExternalStringTag));
- __ j(zero, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ testb(rbx, Immediate(kShortExternalStringMask));
- __ j(not_zero, &runtime);
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&sequential_string);
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ testb(rbx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_sequential);
-
- // Allocate the result.
- __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
- { // Locate character of sub string start.
- SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqOneByteString::kHeaderSize - kHeapObjectTag));
- }
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
-
- // rax: result string
- // rcx: result length
- // rdi: first character of result
- // rsi: character of sub string start
- // r14: original value of rsi
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, r14); // Restore rsi.
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
-
- __ bind(&two_byte_sequential);
- // Allocate the result.
- __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
- { // Locate character of sub string start.
- SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqOneByteString::kHeaderSize - kHeapObjectTag));
- }
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
-
- // rax: result string
- // rcx: result length
- // rdi: first character of result
- // rsi: character of sub string start
- // r14: original value of rsi
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, r14); // Restore esi.
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-
- __ bind(&single_char);
- // rax: string
- // rbx: instance type
- // rcx: sub string length (smi)
- // rdx: from index (smi)
- StringCharAtGenerator generator(
- rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ ret(kArgumentsSize);
- generator.SkipSlow(masm, &runtime);
-}
-
-
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label check_zero_length;
- __ movq(length, FieldOperand(left, String::kLengthOffset));
- __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
- __ j(equal, &check_zero_length, Label::kNear);
- __ Move(rax, Smi::FromInt(NOT_EQUAL));
- __ ret(0);
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTest(length);
- __ j(not_zero, &compare_chars, Label::kNear);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- // Compare characters.
- __ bind(&compare_chars);
- Label strings_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
-
- // Characters are equal.
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- // Characters are not equal.
- __ bind(&strings_not_equal);
- __ Move(rax, Smi::FromInt(NOT_EQUAL));
- __ ret(0);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Ensure that you can always subtract a string length from a non-negative
- // number (e.g. another length).
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
-
- // Find minimum length and length difference.
- __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movq(scratch4, scratch1);
- __ SmiSub(scratch4,
- scratch4,
- FieldOperand(right, String::kLengthOffset));
- // Register scratch4 now holds left.length - right.length.
- const Register length_difference = scratch4;
- Label left_shorter;
- __ j(less, &left_shorter, Label::kNear);
- // The right string isn't longer that the left one.
- // Get the right string's length by subtracting the (non-negative) difference
- // from the left string's length.
- __ SmiSub(scratch1, scratch1, length_difference);
- __ bind(&left_shorter);
- // Register scratch1 now holds Min(left.length, right.length).
- const Register min_length = scratch1;
-
- Label compare_lengths;
- // If min-length is zero, go directly to comparing lengths.
- __ SmiTest(min_length);
- __ j(zero, &compare_lengths, Label::kNear);
-
- // Compare loop.
- Label result_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
-
- // Completed loop without finding different characters.
- // Compare lengths (precomputed).
- __ bind(&compare_lengths);
- __ SmiTest(length_difference);
-#ifndef ENABLE_LATIN_1
- __ j(not_zero, &result_not_equal, Label::kNear);
-#else
- Label length_not_equal;
- __ j(not_zero, &length_not_equal, Label::kNear);
-#endif
-
- // Result is EQUAL.
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- Label result_greater;
-#ifdef ENABLE_LATIN_1
- Label result_less;
- __ bind(&length_not_equal);
- __ j(greater, &result_greater, Label::kNear);
- __ jmp(&result_less, Label::kNear);
-#endif
- __ bind(&result_not_equal);
- // Unequal comparison of left to right, either character or length.
-#ifndef ENABLE_LATIN_1
- __ j(greater, &result_greater, Label::kNear);
-#else
- __ j(above, &result_greater, Label::kNear);
- __ bind(&result_less);
-#endif
-
- // Result is LESS.
- __ Move(rax, Smi::FromInt(LESS));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Move(rax, Smi::FromInt(GREATER));
- __ ret(0);
-}
-
-
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
- Label::Distance near_jump) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiToInteger32(length, length);
- __ lea(left,
- FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
- FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ movb(scratch, Operand(left, index, times_1, 0));
- __ cmpb(scratch, Operand(right, index, times_1, 0));
- __ j(not_equal, chars_not_equal, near_jump);
- __ incq(index);
- __ j(not_zero, &loop);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: right string
- // rsp[16]: left string
-
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
-
- // Check for identity.
- Label not_same;
- __ cmpq(rdx, rax);
- __ j(not_equal, &not_same, Label::kNear);
- __ Move(rax, Smi::FromInt(EQUAL));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_compare_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
-
- // Inline comparison of ASCII strings.
- __ IncrementCounter(counters->string_compare_native(), 1);
- // Drop arguments from the stack
- __ pop(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rcx);
- GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
- Label miss;
- __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
-
- if (GetCondition() == equal) {
- // For equality we do not care about the sign of the result.
- __ subq(rax, rdx);
- } else {
- Label done;
- __ subq(rdx, rax);
- __ j(no_overflow, &done, Label::kNear);
- // Correct sign of result in case of overflow.
- __ not_(rdx);
- __ bind(&done);
- __ movq(rax, rdx);
- }
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
-
- Label generic_stub;
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss;
-
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(rdx, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(rax, &miss);
- }
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(rax, &right_smi, Label::kNear);
- __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&left, Label::kNear);
- __ bind(&right_smi);
- __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
- __ cvtlsi2sd(xmm1, rcx);
-
- __ bind(&left);
- __ JumpIfSmi(rdx, &left_smi, Label::kNear);
- __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&left_smi);
- __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
- __ cvtlsi2sd(xmm0, rcx);
-
- __ bind(&done);
- // Compare operands
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
-
- // Return a result of -1, 0, or 1, based on EFLAGS.
- // Performing mov, because xor would destroy the flag register.
- __ movl(rax, Immediate(0));
- __ movl(rcx, Immediate(0));
- __ setcc(above, rax); // Add one to zero if carry clear and not equal.
- __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
- __ ret(0);
-
- __ bind(&unordered);
- __ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rax, masm->isolate()->factory()->undefined_value());
- __ j(not_equal, &miss);
- __ JumpIfSmi(rdx, &unordered);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ jmp(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
- __ j(equal, &unordered);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
- ASSERT(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
- Register tmp1 = rcx;
- Register tmp2 = rbx;
-
- // Check that both operands are heap objects.
- Label miss;
- Condition cond = masm->CheckEitherSmi(left, right, tmp1);
- __ j(cond, &miss, Label::kNear);
-
- // Check that both operands are internalized strings.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &miss, Label::kNear);
-
- // Internalized strings are compared by identity.
- Label done;
- __ cmpq(left, right);
- // Make sure rax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(rax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASSERT(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
- Register tmp1 = rcx;
- Register tmp2 = rbx;
-
- // Check that both operands are heap objects.
- Label miss;
- Condition cond = masm->CheckEitherSmi(left, right, tmp1);
- __ j(cond, &miss, Label::kNear);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
-
- Label succeed1;
- __ testb(tmp1, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed1, Label::kNear);
- __ cmpb(tmp1, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
- __ j(not_equal, &miss, Label::kNear);
- __ bind(&succeed1);
-
- Label succeed2;
- __ testb(tmp2, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed2, Label::kNear);
- __ cmpb(tmp2, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
- __ j(not_equal, &miss, Label::kNear);
- __ bind(&succeed2);
-
- // Unique names are compared by identity.
- Label done;
- __ cmpq(left, right);
- // Make sure rax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(rax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
- Label miss;
-
- bool equality = Token::IsEqualityOp(op_);
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
- Register tmp1 = rcx;
- Register tmp2 = rbx;
- Register tmp3 = rdi;
-
- // Check that both operands are heap objects.
- Condition cond = masm->CheckEitherSmi(left, right, tmp1);
- __ j(cond, &miss);
-
- // Check that both operands are strings. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ movq(tmp3, tmp1);
- STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
- __ testb(tmp3, Immediate(kIsNotStringMask));
- __ j(not_zero, &miss);
-
- // Fast check for identical strings.
- Label not_same;
- __ cmpq(left, right);
- __ j(not_equal, &not_same, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- // Handle not identical strings.
- __ bind(&not_same);
-
- // Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical.
- if (equality) {
- Label do_compare;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &do_compare, Label::kNear);
- // Make sure rax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(rax));
- __ ret(0);
- __ bind(&do_compare);
- }
-
- // Check that both strings are sequential ASCII.
- Label runtime;
- __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
-
- // Compare flat ASCII strings. Returns when done.
- if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
- }
-
- // Handle more complex cases in runtime.
- __ bind(&runtime);
- __ pop(tmp1); // Return address.
- __ push(left);
- __ push(right);
- __ push(tmp1);
- if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
- } else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
- Label miss;
- Condition either_smi = masm->CheckEitherSmi(rdx, rax);
- __ j(either_smi, &miss, Label::kNear);
-
- __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
-
- ASSERT(GetCondition() == equal);
- __ subq(rax, rdx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- Label miss;
- Condition either_smi = masm->CheckEitherSmi(rdx, rax);
- __ j(either_smi, &miss, Label::kNear);
-
- __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ Cmp(rcx, known_map_);
- __ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
- __ Cmp(rbx, known_map_);
- __ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
-
- __ subq(rax, rdx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdx);
- __ push(rax);
- __ push(rdx);
- __ push(rax);
- __ Push(Smi::FromInt(op_));
- __ CallExternalReference(miss, 3);
-
- // Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
- __ pop(rax);
- __ pop(rdx);
- }
-
- // Do a tail call to the rewritten stub.
- __ jmp(rdi);
-}
-
-
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0) {
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // r0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r0;
- // Capacity is smi 2^n.
- __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
- __ decl(index);
- __ and_(index,
- Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
-
- Register entity_name = r0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties,
- index,
- times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done);
-
- // Stop if found the property.
- __ Cmp(entity_name, Handle<String>(name));
- __ j(equal, miss);
-
- Label the_hole;
- // Check for the hole and skip.
- __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
- __ j(equal, &the_hole, Label::kNear);
-
- // Check if the entry name is not an internalized string.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(kIsInternalizedMask));
- __ j(zero, miss);
-
- __ bind(&the_hole);
- }
-
- StringDictionaryLookupStub stub(properties,
- r0,
- r0,
- StringDictionaryLookupStub::NEGATIVE_LOOKUP);
- __ Push(Handle<Object>(name));
- __ push(Immediate(name->Hash()));
- __ CallStub(&stub);
- __ testq(r0, r0);
- __ j(not_zero, miss);
- __ jmp(done);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r1|. Jump to the |miss| label
-// otherwise.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- ASSERT(!elements.is(r0));
- ASSERT(!elements.is(r1));
- ASSERT(!name.is(r0));
- ASSERT(!name.is(r1));
-
- __ AssertString(name);
-
- __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
- __ decl(r0);
-
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r1, Immediate(String::kHashShift));
- if (i > 0) {
- __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(r1, r0);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
-
- // Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ j(equal, done);
- }
-
- StringDictionaryLookupStub stub(elements,
- r0,
- r1,
- POSITIVE_LOOKUP);
- __ push(name);
- __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r0, Immediate(String::kHashShift));
- __ push(r0);
- __ CallStub(&stub);
-
- __ testq(r0, r0);
- __ j(zero, miss);
- __ jmp(done);
-}
-
-
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Stack frame on entry:
- // esp[0 * kPointerSize]: return address.
- // esp[1 * kPointerSize]: key's hash.
- // esp[2 * kPointerSize]: key.
- // Registers:
- // dictionary_: StringDictionary to probe.
- // result_: used as scratch.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- Register scratch = result_;
-
- __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
- __ decl(scratch);
- __ push(scratch);
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, Operand(rsp, 2 * kPointerSize));
- if (i > 0) {
- __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(rsp, 0));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
-
- // Having undefined at this place means the name is not contained.
- __ movq(scratch, Operand(dictionary_,
- index_,
- times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
-
- __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
- __ j(equal, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
- __ j(equal, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // If we hit a non internalized string key during negative lookup
- // we have to bailout as this key might be equal to the
- // key we are looking for.
-
- // Check if the entry name is not an internalized string.
- __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsInternalizedMask));
- __ j(zero, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ movq(scratch, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
- }
-
- __ bind(&in_dictionary);
- __ movq(scratch, Immediate(1));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_in_dictionary);
- __ movq(scratch, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-}
-
-
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore.
- { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
- { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
- { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
- // StoreArrayLiteralElementStub::Generate
- { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub::Generate
- { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return true; // Always have SSE2 on x64.
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
- __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(),
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
-#ifdef _WIN64
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
- Register address =
- arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(arg1));
- __ Move(address, regs_.address());
- __ Move(arg1, regs_.object());
- // TODO(gc) Can we just set address arg2 in the beginning?
- __ Move(arg2, address);
- __ LoadAddress(arg3, ExternalReference::isolate_address());
- int argument_count = 3;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_object;
-
- __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
- __ movq(regs_.scratch1(),
- Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ subq(regs_.scratch1(), Immediate(1));
- __ movq(Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset),
- regs_.scratch1());
- __ j(negative, &need_incremental);
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &on_black,
- Label::kNear);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&on_black);
-
- // Get the value from the slot.
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- zero,
- &ensure_not_white,
- Label::kNear);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- zero,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need an extra register for this, so we push the object register
- // temporarily.
- __ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
- __ pop(regs_.object());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : element value to store
- // -- rbx : array literal
- // -- rdi : map of array literal
- // -- rcx : element index as smi
- // -- rdx : array literal index in function
- // -- rsp[0] : return address
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- __ CheckFastElements(rdi, &double_elements);
-
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(rax, &smi_element);
- __ CheckFastSmiElements(rdi, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ pop(rdi); // Pop return address and remember to put back later for tail
- // call.
- __ push(rbx);
- __ push(rcx);
- __ push(rax);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ push(rdx);
- __ push(rdi); // Return return address so that tail call returns to right
- // place.
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- // Update the write barrier for the array store.
- __ RecordWrite(rbx, rcx, rax,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
- // FAST_*_ELEMENTS, and value is Smi.
- __ bind(&smi_element);
- __ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
- FixedArrayBase::kHeaderSize), rax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
- __ SmiToInteger32(r11, rcx);
- __ StoreNumberToDoubleElements(rax,
- r9,
- r11,
- xmm0,
- &slow_elements);
- __ ret(0);
-}
-
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- CEntryStub ces(1, kSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ movq(rbx, MemOperand(rbp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ pop(rcx);
- __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size,
- extra_expression_stack_count_ * kPointerSize));
- __ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
- ProfileEntryHookStub stub;
- masm->CallStub(&stub);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // Save volatile registers.
- // Live registers at this point are the same as at the start of any
- // JS function:
- // o rdi: the JS function object being called (i.e. ourselves)
- // o rsi: our context
- // o rbp: our caller's frame pointer
- // o rsp: stack pointer (pointing to return address)
- // o rcx: rcx is zero for method calls and non-zero for function calls.
-#ifdef _WIN64
- const int kNumSavedRegisters = 1;
-
- __ push(rcx);
-#else
- const int kNumSavedRegisters = 3;
-
- __ push(rcx);
- __ push(rdi);
- __ push(rsi);
-#endif
-
- // Calculate the original stack pointer and store it in the second arg.
-#ifdef _WIN64
- __ lea(rdx, Operand(rsp, kNumSavedRegisters * kPointerSize));
-#else
- __ lea(rsi, Operand(rsp, kNumSavedRegisters * kPointerSize));
-#endif
-
- // Calculate the function address to the first arg.
-#ifdef _WIN64
- __ movq(rcx, Operand(rdx, 0));
- __ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
-#else
- __ movq(rdi, Operand(rsi, 0));
- __ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
-#endif
-
- // Call the entry hook function.
- __ movq(rax, &entry_hook_, RelocInfo::NONE64);
- __ movq(rax, Operand(rax, 0));
-
- AllowExternalCallThatCantCauseGC scope(masm);
-
- const int kArgumentCount = 2;
- __ PrepareCallCFunction(kArgumentCount);
- __ CallCFunction(rax, kArgumentCount);
-
- // Restore volatile regs.
-#ifdef _WIN64
- __ pop(rcx);
-#else
- __ pop(rsi);
- __ pop(rdi);
- __ pop(rcx);
-#endif
-
- __ Ret();
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.h b/src/3rdparty/v8/src/x64/code-stubs-x64.h
deleted file mode 100644
index 675e95b..0000000
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.h
+++ /dev/null
@@ -1,623 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_CODE_STUBS_X64_H_
-#define V8_X64_CODE_STUBS_X64_H_
-
-#include "ic-inl.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- explicit TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- static void GenerateOperation(MacroAssembler* masm,
- TranscendentalCache::Type type);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow,
- Label::Distance non_smi_near = Label::kFar,
- Label::Distance slow_near = Label::kFar);
- void GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be rdi.
- Register src, // Must be rsi.
- Register count, // Must be rcx.
- bool ascii);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- StringCompareStub() {}
-
- // Compares two flat ASCII strings and returns result in rax.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in rax.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
- Label::Distance near_jump = Label::kFar);
-};
-
-
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask);
-
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- StringDictionaryLookupStub(Register dictionary,
- Register result,
- Register index,
- LookupMode mode)
- : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0);
-
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- Major MajorKey() { return StringDictionaryLookup; }
-
- int MinorKey() {
- return DictionaryBits::encode(dictionary_.code()) |
- ResultBits::encode(result_.code()) |
- IndexBits::encode(index_.code()) |
- LookupModeBits::encode(mode_);
- }
-
- class DictionaryBits: public BitField<int, 0, 4> {};
- class ResultBits: public BitField<int, 4, 4> {};
- class IndexBits: public BitField<int, 8, 4> {};
- class LookupModeBits: public BitField<LookupMode, 12, 1> {};
-
- Register dictionary_;
- Register result_;
- Register index_;
- LookupMode mode_;
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
- static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
-
- static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
- static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
-
- static Mode GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- ASSERT(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 7);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers, where the third
- // is always rcx (needed for shift operations). The input is two registers
- // that must be preserved and one scratch register provided by the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_orig_(object),
- address_orig_(address),
- scratch0_orig_(scratch0),
- object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
- if (scratch0.is(rcx)) {
- scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
- }
- if (object.is(rcx)) {
- object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
- }
- if (address.is(rcx)) {
- address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
- }
- ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!address_orig_.is(object_));
- ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
- // We don't have to save scratch0_orig_ because it was given to us as
- // a scratch register. But if we had to switch to a different reg then
- // we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
- if (!rcx.is(scratch0_orig_) &&
- !rcx.is(object_orig_) &&
- !rcx.is(address_orig_)) {
- masm->push(rcx);
- }
- masm->push(scratch1_);
- if (!address_.is(address_orig_)) {
- masm->push(address_);
- masm->movq(address_, address_orig_);
- }
- if (!object_.is(object_orig_)) {
- masm->push(object_);
- masm->movq(object_, object_orig_);
- }
- }
-
- void Restore(MacroAssembler* masm) {
- // These will have been preserved the entire time, so we just need to move
- // them back. Only in one case is the orig_ reg different from the plain
- // one, since only one of them can alias with rcx.
- if (!object_.is(object_orig_)) {
- masm->movq(object_orig_, object_);
- masm->pop(object_);
- }
- if (!address_.is(address_orig_)) {
- masm->movq(address_orig_, address_);
- masm->pop(address_);
- }
- masm->pop(scratch1_);
- if (!rcx.is(scratch0_orig_) &&
- !rcx.is(object_orig_) &&
- !rcx.is(address_orig_)) {
- masm->pop(rcx);
- }
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved.
-
- // The three scratch registers (incl. rcx) will be restored by other means
- // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee
- // save and don't need to be preserved.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_orig_;
- Register address_orig_;
- Register scratch0_orig_;
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- // Third scratch register is always rcx.
-
- Register GetRegThatIsNotRcxOr(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(rcx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 4> {};
- class ValueBits: public BitField<int, 4, 4> {};
- class AddressBits: public BitField<int, 8, 4> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.cc b/src/3rdparty/v8/src/x64/codegen-x64.cc
deleted file mode 100644
index fa8b44a..0000000
--- a/src/3rdparty/v8/src/x64/codegen-x64.cc
+++ /dev/null
@@ -1,785 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-
-#define __ masm.
-
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) {
- // Fallback to library function if function cannot be created.
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- }
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- // Move double input into registers.
- __ push(rbx);
- __ push(rdi);
- __ movq(rbx, xmm0);
- __ push(rbx);
- __ fld_d(Operand(rsp, 0));
- TranscendentalCacheStub::GenerateOperation(&masm, type);
- // The return value is expected to be in xmm0.
- __ fstp_d(Operand(rsp, 0));
- __ pop(rbx);
- __ movq(xmm0, rbx);
- __ pop(rdi);
- __ pop(rbx);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- XMMRegister input = xmm0;
- XMMRegister result = xmm1;
- __ push(rax);
- __ push(rbx);
-
- MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
-
- __ pop(rbx);
- __ pop(rax);
- __ movsd(xmm0, result);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateSqrtFunction() {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) return &sqrt;
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- // Move double input into registers.
- __ sqrtsd(xmm0, xmm0);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-#ifdef _WIN64
-typedef double (*ModuloFunction)(double, double);
-// Define custom fmod implementation.
-ModuloFunction CreateModuloFunction() {
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
- CHECK(buffer);
- Assembler masm(NULL, buffer, static_cast<int>(actual_size));
- // Generated code is put into a fixed, unmovable, buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // Windows 64 ABI passes double arguments in xmm0, xmm1 and
- // returns result in xmm0.
- // Argument backing space is allocated on the stack above
- // the return address.
-
- // Compute x mod y.
- // Load y and x (use argument backing store as temporary storage).
- __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
- __ movsd(Operand(rsp, kPointerSize), xmm0);
- __ fld_d(Operand(rsp, kPointerSize * 2));
- __ fld_d(Operand(rsp, kPointerSize));
-
- // Clear exception flags before operation.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testb(rax, Immediate(5));
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
-
- Label valid_result;
- Label return_result;
- // If Invalid Operand or Zero Division exceptions are set,
- // return NaN.
- __ testb(rax, Immediate(5));
- __ j(zero, &valid_result);
- __ fstp(0); // Drop result in st(0).
- int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue, RelocInfo::NONE64);
- __ movq(Operand(rsp, kPointerSize), rcx);
- __ movsd(xmm0, Operand(rsp, kPointerSize));
- __ jmp(&return_result);
-
- // If result is valid, return that.
- __ bind(&valid_result);
- __ fstp_d(Operand(rsp, kPointerSize));
- __ movsd(xmm0, Operand(rsp, kPointerSize));
-
- // Clean up FPU stack and exceptions and return xmm0
- __ bind(&return_result);
- __ fstp(0); // Unload y.
-
- Label clear_exceptions;
- __ testb(rax, Immediate(0x3f /* Any Exception*/));
- __ j(not_zero, &clear_exceptions);
- __ ret(0);
- __ bind(&clear_exceptions);
- __ fnclex();
- __ ret(0);
-
- CodeDesc desc;
- masm.GetCode(&desc);
- OS::ProtectCode(buffer, actual_size);
- // Call the function from C++ through this pointer.
- return FUNCTION_CAST<ModuloFunction>(buffer);
-}
-
-#endif
-
-#undef __
-
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rbx : target map
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
- __ j(equal, allocation_site_info_found);
- }
-
- // Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rbx : target map
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- // The fail label is not actually used since we do not allocate.
- Label allocated, new_backing_store, only_change_map, done;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
- __ j(equal, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
- __ j(equal, &only_change_map);
-
- // Check backing store for COW-ness. For COW arrays we have to
- // allocate a new backing store.
- __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &new_backing_store);
- // Check if the backing store is in new-space. If not, we need to allocate
- // a new one since the old one is in pointer-space.
- // If in new space, we can reuse the old backing store because it is
- // the same size.
- __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
-
- __ movq(r14, r8); // Destination array equals source array.
-
- // r8 : source FixedArray
- // r9 : elements array length
- // r14: destination FixedDoubleArray
- // Set backing store's map
- __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
-
- __ bind(&allocated);
- // Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Convert smis to doubles and holes to hole NaNs. The Array's length
- // remains unchanged.
- STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
- STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
-
- Label loop, entry, convert_hole;
- __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
- // r15: the-hole NaN
- __ jmp(&entry);
-
- // Allocate new backing store.
- __ bind(&new_backing_store);
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
- __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
- // Set backing store's map
- __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
- // Set receiver's backing store.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
- __ movq(r11, r14);
- __ RecordWriteField(rdx,
- JSObject::kElementsOffset,
- r11,
- r15,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Set backing store's length.
- __ Integer32ToSmi(r11, r9);
- __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
- __ jmp(&allocated);
-
- __ bind(&only_change_map);
- // Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&done);
-
- // Conversion loop.
- __ bind(&loop);
- __ movq(rbx,
- FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
- // r9 : current element's index
- // rbx: current element (smi-tagged)
- __ JumpIfNotSmi(rbx, &convert_hole);
- __ SmiToInteger32(rbx, rbx);
- __ cvtlsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
- xmm0);
- __ jmp(&entry);
- __ bind(&convert_hole);
-
- if (FLAG_debug_code) {
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Assert(equal, "object found in smi-only array");
- }
-
- __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
- __ bind(&entry);
- __ decq(r9);
- __ j(not_sign, &loop);
-
- __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rbx : target map
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label loop, entry, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
- __ j(equal, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
- __ j(equal, &only_change_map);
-
- __ push(rax);
-
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- // r8 : source FixedDoubleArray
- // r9 : number of elements
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
- __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
- // r11: destination FixedArray
- __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
- __ Integer32ToSmi(r14, r9);
- __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
-
- // Prepare for conversion loop.
- __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
- __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
- // rsi: the-hole NaN
- // rdi: pointer to the-hole
- __ jmp(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(fail);
-
- // Box doubles into heap numbers.
- __ bind(&loop);
- __ movq(r14, FieldOperand(r8,
- r9,
- times_pointer_size,
- FixedDoubleArray::kHeaderSize));
- // r9 : current element's index
- // r14: current element
- __ cmpq(r14, rsi);
- __ j(equal, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(rax, r15, &gc_required);
- // rax: new heap number
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
- __ movq(FieldOperand(r11,
- r9,
- times_pointer_size,
- FixedArray::kHeaderSize),
- rax);
- __ movq(r15, r9);
- __ RecordWriteArray(r11,
- rax,
- r15,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&entry, Label::kNear);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ movq(FieldOperand(r11,
- r9,
- times_pointer_size,
- FixedArray::kHeaderSize),
- rdi);
-
- __ bind(&entry);
- __ decq(r9);
- __ j(not_sign, &loop);
-
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
- __ RecordWriteField(rdx,
- JSObject::kElementsOffset,
- r11,
- r15,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- __ bind(&only_change_map);
- // Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- // Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ testb(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ testb(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addq(index, result);
- __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
- Heap::kempty_stringRootIndex);
- __ j(not_equal, call_runtime);
- __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label seq_string;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
-
- // Handle external strings.
- Label ascii_external, done;
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ testb(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
- }
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ testb(result, Immediate(kShortExternalStringTag));
- __ j(not_zero, call_runtime);
- // Check encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ testb(result, Immediate(kStringEncodingMask));
- __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &ascii_external, Label::kNear);
- // Two-byte string.
- __ movzxwl(result, Operand(result, index, times_2, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&ascii_external);
- // Ascii string.
- __ movzxbl(result, Operand(result, index, times_1, 0));
- __ jmp(&done, Label::kNear);
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii;
- __ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzxwl(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii);
- __ movzxbl(result, FieldOperand(string,
- index,
- times_1,
- SeqOneByteString::kHeaderSize));
- __ bind(&done);
-}
-
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ Check(masm->CheckSmi(index), "Non-smi index");
- __ Check(masm->CheckSmi(value), "Non-smi value");
-
- __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
-
- __ SmiCompare(index, Smi::FromInt(0));
- __ Check(greater_equal, "Index is negative");
-
- __ push(value);
- __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
- __ pop(value);
- }
-
- __ SmiToInteger32(value, value);
- __ SmiToInteger32(index, index);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- } else {
- __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
- value);
- }
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2) {
- ASSERT(!input.is(result));
- ASSERT(!input.is(double_scratch));
- ASSERT(!result.is(double_scratch));
- ASSERT(!temp1.is(temp2));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
- __ xorpd(result, result);
- __ ucomisd(double_scratch, input);
- __ j(above_equal, &done);
- __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
- __ j(above_equal, &done);
- __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
- __ mulsd(double_scratch, input);
- __ addsd(double_scratch, result);
- __ movq(temp2, double_scratch);
- __ subsd(double_scratch, result);
- __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
- __ lea(temp1, Operand(temp2, 0x1ff800));
- __ and_(temp2, Immediate(0x7ff));
- __ shr(temp1, Immediate(11));
- __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
- __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
- __ shl(temp1, Immediate(52));
- __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ subsd(double_scratch, input);
- __ movsd(input, double_scratch);
- __ subsd(result, double_scratch);
- __ mulsd(input, double_scratch);
- __ mulsd(result, input);
- __ movq(input, temp1);
- __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
- __ subsd(result, double_scratch);
- __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
- __ mulsd(result, input);
-
- __ bind(&done);
-}
-
-#undef __
-
-
-static const int kNoCodeAgeSequenceLength = 6;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(rbp);
- patcher.masm()->movq(rbp, rsp);
- patcher.masm()->push(rsi);
- patcher.masm()->push(rdi);
- initialized = true;
- }
- return sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length);
- patcher.masm()->call(stub->instruction_start());
- patcher.masm()->nop();
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.h b/src/3rdparty/v8/src/x64/codegen-x64.h
deleted file mode 100644
index 3a7646b..0000000
--- a/src/3rdparty/v8/src/x64/codegen-x64.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_CODEGEN_X64_H_
-#define V8_X64_CODEGEN_X64_H_
-
-#include "ast.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- CodeGenerator() {
- InitializeAstVisitor();
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Expression* type);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_CODEGEN_X64_H_
diff --git a/src/3rdparty/v8/src/x64/cpu-x64.cc b/src/3rdparty/v8/src/x64/cpu-x64.cc
deleted file mode 100644
index 80e22c6..0000000
--- a/src/3rdparty/v8/src/x64/cpu-x64.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for x64 independent of OS goes here.
-
-#if defined(__GNUC__) && !defined(__MINGW64__)
-#include "third_party/valgrind/valgrind.h"
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return true; // Yay!
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // No need to flush the instruction cache on Intel. On Intel instruction
- // cache flushing is only necessary when multiple cores running the same
- // code simultaneously. V8 (and JavaScript) is single threaded and when code
- // is patched on an intel CPU the core performing the patching will have its
- // own instruction cache updated automatically.
-
- // If flushing of the instruction cache becomes necessary Windows has the
- // API function FlushInstructionCache.
-
- // By default, valgrind only checks the stack for writes that might need to
- // invalidate already cached translated code. This leads to random
- // instability when code patches or moves are sometimes unnoticed. One
- // solution is to run valgrind with --smc-check=all, but this comes at a big
- // performance cost. We can notify valgrind to invalidate its cache.
-#ifdef VALGRIND_DISCARD_TRANSLATIONS
- unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
- USE(res);
-#endif
-}
-
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/debug-x64.cc b/src/3rdparty/v8/src/x64/debug-x64.cc
deleted file mode 100644
index 1b29e58..0000000
--- a/src/3rdparty/v8/src/x64/debug-x64.cc
+++ /dev/null
@@ -1,354 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "assembler.h"
-#include "codegen.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_return()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return !Assembler::IsNop(rinfo()->pc());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_slot()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-const bool Debug::FramePaddingLayout::kIsSupported = true;
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- bool convert_call_to_jmp) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue));
- }
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize));
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as as two smis causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- ASSERT(!reg.is(kScratchRegister));
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- // Store the 64-bit value as two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, kDebugZapValue);
- }
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- }
- // Reconstruct the 64-bit value from two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
- }
- }
-
- // Read current padding counter and skip corresponding number of words.
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
-
- // Get rid of the internal frame.
- }
-
- // If this call did not replace a call but patched other code then there will
- // be an unwanted return address left on the stack. Here we get rid of that.
- if (convert_call_to_jmp) {
- __ addq(rsp, Immediate(kPointerSize));
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ movq(kScratchRegister, after_break_target);
- __ jmp(Operand(kScratchRegister, 0));
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for IC load call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), 0, false);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for IC store call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for IC call call (from ic-x64.cc)
- // ----------- S t a t e -------------
- // -- rcx: function name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rcx.bit(), 0, false);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-x64.cc).
- // ----------- S t a t e -------------
- // -- rax: return value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-x64.cc).
- // ----------- S t a t e -------------
- // -- rdi : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rdi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-x64.cc).
- // ----------- S t a t e -------------
- // -- rdi : function
- // -- rbx: cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-x64.cc).
- // rax is the actual number of arguments not encoded as a smi, see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -----------------------------------
- // The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rdi.bit(), rax.bit(), false);
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-x64.cc).
- // rax is the actual number of arguments not encoded as a smi, see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -- rbx: cache cell for call target
- // -----------------------------------
- // The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction.
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- __ Nop(Assembler::kDebugBreakSlotLength);
- ASSERT_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
- masm->isolate());
- __ movq(rax, restarter_frame_function_slot);
- __ movq(Operand(rax, 0), Immediate(0));
-
- // We do not know our frame height, but set rsp based on rbp.
- __ lea(rsp, Operand(rbp, -1 * kPointerSize));
-
- __ pop(rdi); // Function.
- __ pop(rbp);
-
- // Load context from the function.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Get function code.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
-
- // Re-run JSFunction, rdi is function, rsi is context.
- __ jmp(rdx);
-}
-
-const bool Debug::kFrameDropperSupported = true;
-
-#undef __
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc b/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
deleted file mode 100644
index 8e776f9..0000000
--- a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
+++ /dev/null
@@ -1,1076 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-
-const int Deoptimizer::table_entry_size_ = 10;
-
-
-int Deoptimizer::patch_size() {
- return Assembler::kCallInstructionLength;
-}
-
-
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
-
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
-
- // For each LLazyBailout instruction insert a absolute call to the
- // corresponding deoptimization entry, or a short call to an absolute
- // jump if space is short. The absolute jumps are put in a table just
- // before the safepoint table (space was allocated there when the Code
- // object was created, if necessary).
-
- Address instruction_start = function->code()->instruction_start();
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- // Position where Call will be patched in.
- Address call_address = instruction_start + deopt_data->Pc(i)->value();
- // There is room enough to write a long call instruction because we pad
- // LLazyBailout instructions with nops if necessary.
- CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
- patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
- RelocInfo::NONE64);
- ASSERT(prev_call_address == NULL ||
- call_address >= prev_call_address + patch_size());
- ASSERT(call_address + patch_size() <= code->instruction_end());
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
- }
-}
-
-
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1f;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(check_code->entry(),
- Assembler::target_address_at(call_target_address));
- // The back edge bookkeeping code matches the pattern:
- //
- // add <profiling_counter>, <-delta>
- // jns ok
- // call <stack guard>
- // test rax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // add <profiling_counter>, <-delta> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test rax, <loop nesting depth>
- // ok:
- //
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- Assembler::set_target_address_at(call_target_address,
- check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
- "(fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
- output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- intptr_t pc = reinterpret_cast<intptr_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
- //
- // FROM TO
- // | .... | | .... |
- // +-------------------------+ +-------------------------+
- // | JSFunction continuation | | JSFunction continuation |
- // +-------------------------+ +-------------------------+
- // | | saved frame (rbp) | | saved frame (rbp) |
- // | +=========================+<-rbp +=========================+<-rbp
- // | | JSFunction context | | JSFunction context |
- // v +-------------------------+ +-------------------------|
- // | COMPILED_STUB marker | | STUB_FAILURE marker |
- // +-------------------------+ +-------------------------+
- // | | | caller args.arguments_ |
- // | ... | +-------------------------+
- // | | | caller args.length_ |
- // |-------------------------|<-rsp +-------------------------+
- // | caller args pointer |
- // +-------------------------+
- // | caller stack param 1 |
- // parameters in registers +-------------------------+
- // and spilled to stack | .... |
- // +-------------------------+
- // | caller stack param n |
- // +-------------------------+<-rsp
- // rax = number of parameters
- // rbx = failure handler address
- // rbp = saved frame
- // rsi = JSFunction context
- //
-
- ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
- int major_key = compiled_code_->major_key();
- CodeStubInterfaceDescriptor* descriptor =
- isolate_->code_stub_interface_descriptor(major_key);
-
- // The output frame must have room for all pushed register parameters
- // and the standard stack frame slots. Include space for an argument
- // object to the callee and optionally the space to pass the argument
- // object to the stub failure handler.
- int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
- sizeof(Arguments) + kPointerSize;
- int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
- int input_frame_size = input_->GetFrameSize();
- int output_frame_size = height_in_bytes + fixed_frame_size;
- if (trace_) {
- PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
- CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
- height_in_bytes);
- }
-
- // The stub failure trampoline is a single frame.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, NULL);
- output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ASSERT(frame_index == 0);
- output_[frame_index] = output_frame;
-
- // The top address for the output frame can be computed from the input
- // frame pointer and the output frame's height. Subtract space for the
- // context and function slots.
- intptr_t top_address = input_->GetRegister(rbp.code()) - (2 * kPointerSize) -
- height_in_bytes;
- output_frame->SetTop(top_address);
-
- // Read caller's PC (JSFunction continuation) from the input frame.
- unsigned input_frame_offset = input_frame_size - kPointerSize;
- unsigned output_frame_offset = output_frame_size - kPointerSize;
- intptr_t value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Read caller's FP from the input frame, and set this frame's FP.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- intptr_t frame_ptr = input_->GetRegister(rbp.code());
- output_frame->SetRegister(rbp.code(), frame_ptr);
- output_frame->SetFp(frame_ptr);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // The context can be gotten from the input frame.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(rsi.code(), value);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_frame_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function (stub failure sentinel)\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- intptr_t caller_arg_count = 0;
- if (descriptor->stack_parameter_count_ != NULL) {
- caller_arg_count =
- input_->GetRegister(descriptor->stack_parameter_count_->code());
- }
-
- // Build the Arguments object for the caller's parameters and a pointer to it.
- output_frame_offset -= kPointerSize;
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (caller_arg_count - 1) * kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args.arguments\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = caller_arg_count;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args.length\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = frame_ptr - (output_frame_size - output_frame_offset) -
- StandardFrameConstants::kMarkerOffset + kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args*\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Copy the register parameters to the failure frame.
- for (int i = 0; i < descriptor->register_param_count_; ++i) {
- output_frame_offset -= kPointerSize;
- DoTranslateCommand(iterator, 0, output_frame_offset);
- }
-
- ASSERT(0 == output_frame_offset);
-
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-
- intptr_t handler =
- reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
- output_frame->SetRegister(rax.code(), params);
- output_frame->SetRegister(rbx.code(), handler);
-
- // Compute this frame's PC, state, and continuation.
- Code* trampoline = NULL;
- int extra = descriptor->extra_expression_stack_count_;
- StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
- ASSERT(trampoline != NULL);
- output_frame->SetPc(reinterpret_cast<intptr_t>(
- trampoline->instruction_start()));
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(notify_failure->entry()));
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 7 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- intptr_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- intptr_t pc = reinterpret_cast<intptr_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- intptr_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(rsi.code(), value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Code* continuation = (bailout_type_ == EAGER)
- ? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
- : isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
- }
-}
-
-
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers rbp and rsp are set to the correct values though.
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
- }
-}
-
-
-#define __ masm()->
-
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::NumAllocatableRegisters();
- __ subq(rsp, Immediate(kDoubleRegsSize));
-
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(rsp, offset), xmm_reg);
- }
-
- // We push all registers onto the stack, even though we do not need
- // to restore all later.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- Register r = Register::from_code(i);
- __ push(r);
- }
-
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
- kDoubleRegsSize;
-
- // When calling new_deoptimizer_function we need to pass the last argument
- // on the stack on windows and in r8 on linux. The remaining arguments are
- // all passed in registers (different ones on linux and windows though).
-
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // We use this to keep the value of the fifth argument temporarily.
- // Unfortunately we can't store it directly in r8 (used for passing
- // this on linux), since it is another parameter passing register on windows.
- Register arg5 = r11;
-
- // Get the bailout id from the stack.
- __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible
- // and compute the fp-to-sp delta in register arg5.
- if (type() == EAGER) {
- __ Set(arg4, 0);
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
-
- __ subq(arg5, rbp);
- __ neg(arg5);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6);
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(arg1, rax);
- __ Set(arg2, type());
- // Args 3 and 4 are already in the right registers.
-
- // On windows put the arguments on the stack (PrepareCallCFunction
- // has created space for this). On linux pass the arguments in r8 and r9.
-#ifdef _WIN64
- __ movq(Operand(rsp, 4 * kPointerSize), arg5);
- __ LoadAddress(arg5, ExternalReference::isolate_address());
- __ movq(Operand(rsp, 5 * kPointerSize), arg5);
-#else
- __ movq(r8, arg5);
- __ LoadAddress(r9, ExternalReference::isolate_address());
-#endif
-
- Isolate* isolate = masm()->isolate();
-
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
- // Preserve deoptimizer object in register rax and get the input
- // frame descriptor pointer.
- __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters -1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(rbx, offset));
- }
-
- // Fill in the double input registers.
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- __ pop(Operand(rbx, dst_offset));
- }
-
- // Remove the bailout id from the stack.
- if (type() == EAGER) {
- __ addq(rsp, Immediate(kPointerSize));
- } else {
- __ addq(rsp, Immediate(2 * kPointerSize));
- }
-
- // Compute a pointer to the unwinding limit in register rcx; that is
- // the first stack slot not part of the input frame.
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
- Label pop_loop_header;
- __ jmp(&pop_loop_header);
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
- __ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(rax);
- __ PrepareCallCFunction(2);
- __ movq(arg1, rax);
- __ LoadAddress(arg2, ExternalReference::isolate_address());
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 2);
- }
- __ pop(rax);
-
- // Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
- // Outer loop state: rax = current FrameDescription**, rdx = one past the
- // last FrameDescription**.
- __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_8, 0));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movq(rbx, Operand(rax, 0));
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
- __ bind(&inner_loop_header);
- __ testq(rcx, rcx);
- __ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kPointerSize));
- __ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
- __ j(below, &outer_push_loop);
-
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(rbx, src_offset));
- }
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(rbx, FrameDescription::state_offset()));
- }
- __ push(Operand(rbx, FrameDescription::pc_offset()));
- __ push(Operand(rbx, FrameDescription::continuation_offset()));
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(rbx, offset));
- }
-
- // Restore the registers from the stack.
- for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
- Register r = Register::from_code(i);
- // Do not restore rsp, simply pop the value into the next register
- // and overwrite this afterwards.
- if (r.is(rsp)) {
- ASSERT(i > 0);
- r = Register::from_code(i - 1);
- }
- __ pop(r);
- }
-
- // Set up the roots register.
- __ InitializeRootRegister();
- __ InitializeSmiConstantRegister();
-
- // Return to the continuation point.
- __ ret(0);
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ push_imm32(i);
- __ jmp(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/disasm-x64.cc b/src/3rdparty/v8/src/x64/disasm-x64.cc
deleted file mode 100644
index fb0914d..0000000
--- a/src/3rdparty/v8/src/x64/disasm-x64.cc
+++ /dev/null
@@ -1,1869 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "disasm.h"
-#include "lazy-instance.h"
-
-namespace disasm {
-
-enum OperandType {
- UNSET_OP_ORDER = 0,
- // Operand size decides between 16, 32 and 64 bit operands.
- REG_OPER_OP_ORDER = 1, // Register destination, operand source.
- OPER_REG_OP_ORDER = 2, // Operand destination, register source.
- // Fixed 8-bit operands.
- BYTE_SIZE_OPERAND_FLAG = 4,
- BYTE_REG_OPER_OP_ORDER = REG_OPER_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
- BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
-};
-
-//------------------------------------------------------------------
-// Tables
-//------------------------------------------------------------------
-struct ByteMnemonic {
- int b; // -1 terminates, otherwise must be in range (0..255)
- OperandType op_order_;
- const char* mnem;
-};
-
-
-static const ByteMnemonic two_operands_instr[] = {
- { 0x00, BYTE_OPER_REG_OP_ORDER, "add" },
- { 0x01, OPER_REG_OP_ORDER, "add" },
- { 0x02, BYTE_REG_OPER_OP_ORDER, "add" },
- { 0x03, REG_OPER_OP_ORDER, "add" },
- { 0x08, BYTE_OPER_REG_OP_ORDER, "or" },
- { 0x09, OPER_REG_OP_ORDER, "or" },
- { 0x0A, BYTE_REG_OPER_OP_ORDER, "or" },
- { 0x0B, REG_OPER_OP_ORDER, "or" },
- { 0x10, BYTE_OPER_REG_OP_ORDER, "adc" },
- { 0x11, OPER_REG_OP_ORDER, "adc" },
- { 0x12, BYTE_REG_OPER_OP_ORDER, "adc" },
- { 0x13, REG_OPER_OP_ORDER, "adc" },
- { 0x18, BYTE_OPER_REG_OP_ORDER, "sbb" },
- { 0x19, OPER_REG_OP_ORDER, "sbb" },
- { 0x1A, BYTE_REG_OPER_OP_ORDER, "sbb" },
- { 0x1B, REG_OPER_OP_ORDER, "sbb" },
- { 0x20, BYTE_OPER_REG_OP_ORDER, "and" },
- { 0x21, OPER_REG_OP_ORDER, "and" },
- { 0x22, BYTE_REG_OPER_OP_ORDER, "and" },
- { 0x23, REG_OPER_OP_ORDER, "and" },
- { 0x28, BYTE_OPER_REG_OP_ORDER, "sub" },
- { 0x29, OPER_REG_OP_ORDER, "sub" },
- { 0x2A, BYTE_REG_OPER_OP_ORDER, "sub" },
- { 0x2B, REG_OPER_OP_ORDER, "sub" },
- { 0x30, BYTE_OPER_REG_OP_ORDER, "xor" },
- { 0x31, OPER_REG_OP_ORDER, "xor" },
- { 0x32, BYTE_REG_OPER_OP_ORDER, "xor" },
- { 0x33, REG_OPER_OP_ORDER, "xor" },
- { 0x38, BYTE_OPER_REG_OP_ORDER, "cmp" },
- { 0x39, OPER_REG_OP_ORDER, "cmp" },
- { 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
- { 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
- { 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
- { 0x85, REG_OPER_OP_ORDER, "test" },
- { 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
- { 0x87, REG_OPER_OP_ORDER, "xchg" },
- { 0x88, BYTE_OPER_REG_OP_ORDER, "mov" },
- { 0x89, OPER_REG_OP_ORDER, "mov" },
- { 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
- { 0x8B, REG_OPER_OP_ORDER, "mov" },
- { 0x8D, REG_OPER_OP_ORDER, "lea" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const ByteMnemonic zero_operands_instr[] = {
- { 0xC3, UNSET_OP_ORDER, "ret" },
- { 0xC9, UNSET_OP_ORDER, "leave" },
- { 0xF4, UNSET_OP_ORDER, "hlt" },
- { 0xFC, UNSET_OP_ORDER, "cld" },
- { 0xCC, UNSET_OP_ORDER, "int3" },
- { 0x60, UNSET_OP_ORDER, "pushad" },
- { 0x61, UNSET_OP_ORDER, "popad" },
- { 0x9C, UNSET_OP_ORDER, "pushfd" },
- { 0x9D, UNSET_OP_ORDER, "popfd" },
- { 0x9E, UNSET_OP_ORDER, "sahf" },
- { 0x99, UNSET_OP_ORDER, "cdq" },
- { 0x9B, UNSET_OP_ORDER, "fwait" },
- { 0xA4, UNSET_OP_ORDER, "movs" },
- { 0xA5, UNSET_OP_ORDER, "movs" },
- { 0xA6, UNSET_OP_ORDER, "cmps" },
- { 0xA7, UNSET_OP_ORDER, "cmps" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const ByteMnemonic call_jump_instr[] = {
- { 0xE8, UNSET_OP_ORDER, "call" },
- { 0xE9, UNSET_OP_ORDER, "jmp" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const ByteMnemonic short_immediate_instr[] = {
- { 0x05, UNSET_OP_ORDER, "add" },
- { 0x0D, UNSET_OP_ORDER, "or" },
- { 0x15, UNSET_OP_ORDER, "adc" },
- { 0x1D, UNSET_OP_ORDER, "sbb" },
- { 0x25, UNSET_OP_ORDER, "and" },
- { 0x2D, UNSET_OP_ORDER, "sub" },
- { 0x35, UNSET_OP_ORDER, "xor" },
- { 0x3D, UNSET_OP_ORDER, "cmp" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const char* const conditional_code_suffix[] = {
- "o", "no", "c", "nc", "z", "nz", "na", "a",
- "s", "ns", "pe", "po", "l", "ge", "le", "g"
-};
-
-
-enum InstructionType {
- NO_INSTR,
- ZERO_OPERANDS_INSTR,
- TWO_OPERANDS_INSTR,
- JUMP_CONDITIONAL_SHORT_INSTR,
- REGISTER_INSTR,
- PUSHPOP_INSTR, // Has implicit 64-bit operand size.
- MOVE_REG_INSTR,
- CALL_JUMP_INSTR,
- SHORT_IMMEDIATE_INSTR
-};
-
-
-enum Prefixes {
- ESCAPE_PREFIX = 0x0F,
- OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
- ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
- REPNE_PREFIX = 0xF2,
- REP_PREFIX = 0xF3,
- REPEQ_PREFIX = REP_PREFIX
-};
-
-
-struct InstructionDesc {
- const char* mnem;
- InstructionType type;
- OperandType op_order_;
- bool byte_size_operation; // Fixed 8-bit operation.
-};
-
-
-class InstructionTable {
- public:
- InstructionTable();
- const InstructionDesc& Get(byte x) const {
- return instructions_[x];
- }
-
- private:
- InstructionDesc instructions_[256];
- void Clear();
- void Init();
- void CopyTable(const ByteMnemonic bm[], InstructionType type);
- void SetTableRange(InstructionType type, byte start, byte end, bool byte_size,
- const char* mnem);
- void AddJumpConditionalShort();
-};
-
-
-InstructionTable::InstructionTable() {
- Clear();
- Init();
-}
-
-
-void InstructionTable::Clear() {
- for (int i = 0; i < 256; i++) {
- instructions_[i].mnem = "(bad)";
- instructions_[i].type = NO_INSTR;
- instructions_[i].op_order_ = UNSET_OP_ORDER;
- instructions_[i].byte_size_operation = false;
- }
-}
-
-
-void InstructionTable::Init() {
- CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
- CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
- CopyTable(call_jump_instr, CALL_JUMP_INSTR);
- CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
- AddJumpConditionalShort();
- SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, false, "push");
- SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, false, "pop");
- SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, false, "mov");
-}
-
-
-void InstructionTable::CopyTable(const ByteMnemonic bm[],
- InstructionType type) {
- for (int i = 0; bm[i].b >= 0; i++) {
- InstructionDesc* id = &instructions_[bm[i].b];
- id->mnem = bm[i].mnem;
- OperandType op_order = bm[i].op_order_;
- id->op_order_ =
- static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
- id->type = type;
- id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
- }
-}
-
-
-void InstructionTable::SetTableRange(InstructionType type,
- byte start,
- byte end,
- bool byte_size,
- const char* mnem) {
- for (byte b = start; b <= end; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
- id->mnem = mnem;
- id->type = type;
- id->byte_size_operation = byte_size;
- }
-}
-
-
-void InstructionTable::AddJumpConditionalShort() {
- for (byte b = 0x70; b <= 0x7F; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
- id->mnem = NULL; // Computed depending on condition code.
- id->type = JUMP_CONDITIONAL_SHORT_INSTR;
- }
-}
-
-
-static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
- LAZY_INSTANCE_INITIALIZER;
-
-
-static InstructionDesc cmov_instructions[16] = {
- {"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovnc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovnz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovna", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmova", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovs", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovns", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovpe", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovpo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovl", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovge", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
-};
-
-//------------------------------------------------------------------------------
-// DisassemblerX64 implementation.
-
-enum UnimplementedOpcodeAction {
- CONTINUE_ON_UNIMPLEMENTED_OPCODE,
- ABORT_ON_UNIMPLEMENTED_OPCODE
-};
-
-// A new DisassemblerX64 object is created to disassemble each instruction.
-// The object can only disassemble a single instruction.
-class DisassemblerX64 {
- public:
- DisassemblerX64(const NameConverter& converter,
- UnimplementedOpcodeAction unimplemented_action =
- ABORT_ON_UNIMPLEMENTED_OPCODE)
- : converter_(converter),
- tmp_buffer_pos_(0),
- abort_on_unimplemented_(
- unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE),
- rex_(0),
- operand_size_(0),
- group_1_prefix_(0),
- byte_size_operand_(false),
- instruction_table_(instruction_table.Pointer()) {
- tmp_buffer_[0] = '\0';
- }
-
- virtual ~DisassemblerX64() {
- }
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
-
- private:
- enum OperandSize {
- BYTE_SIZE = 0,
- WORD_SIZE = 1,
- DOUBLEWORD_SIZE = 2,
- QUADWORD_SIZE = 3
- };
-
- const NameConverter& converter_;
- v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
- unsigned int tmp_buffer_pos_;
- bool abort_on_unimplemented_;
- // Prefixes parsed
- byte rex_;
- byte operand_size_; // 0x66 or (if no group 3 prefix is present) 0x0.
- byte group_1_prefix_; // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
- // Byte size operand override.
- bool byte_size_operand_;
- const InstructionTable* const instruction_table_;
-
- void setRex(byte rex) {
- ASSERT_EQ(0x40, rex & 0xF0);
- rex_ = rex;
- }
-
- bool rex() { return rex_ != 0; }
-
- bool rex_b() { return (rex_ & 0x01) != 0; }
-
- // Actual number of base register given the low bits and the rex.b state.
- int base_reg(int low_bits) { return low_bits | ((rex_ & 0x01) << 3); }
-
- bool rex_x() { return (rex_ & 0x02) != 0; }
-
- bool rex_r() { return (rex_ & 0x04) != 0; }
-
- bool rex_w() { return (rex_ & 0x08) != 0; }
-
- OperandSize operand_size() {
- if (byte_size_operand_) return BYTE_SIZE;
- if (rex_w()) return QUADWORD_SIZE;
- if (operand_size_ != 0) return WORD_SIZE;
- return DOUBLEWORD_SIZE;
- }
-
- char operand_size_code() {
- return "bwlq"[operand_size()];
- }
-
- const char* NameOfCPURegister(int reg) const {
- return converter_.NameOfCPURegister(reg);
- }
-
- const char* NameOfByteCPURegister(int reg) const {
- return converter_.NameOfByteCPURegister(reg);
- }
-
- const char* NameOfXMMRegister(int reg) const {
- return converter_.NameOfXMMRegister(reg);
- }
-
- const char* NameOfAddress(byte* addr) const {
- return converter_.NameOfAddress(addr);
- }
-
- // Disassembler helper functions.
- void get_modrm(byte data,
- int* mod,
- int* regop,
- int* rm) {
- *mod = (data >> 6) & 3;
- *regop = ((data & 0x38) >> 3) | (rex_r() ? 8 : 0);
- *rm = (data & 7) | (rex_b() ? 8 : 0);
- }
-
- void get_sib(byte data,
- int* scale,
- int* index,
- int* base) {
- *scale = (data >> 6) & 3;
- *index = ((data >> 3) & 7) | (rex_x() ? 8 : 0);
- *base = (data & 7) | (rex_b() ? 8 : 0);
- }
-
- typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const;
-
- int PrintRightOperandHelper(byte* modrmp,
- RegisterNameMapping register_name);
- int PrintRightOperand(byte* modrmp);
- int PrintRightByteOperand(byte* modrmp);
- int PrintRightXMMOperand(byte* modrmp);
- int PrintOperands(const char* mnem,
- OperandType op_order,
- byte* data);
- int PrintImmediate(byte* data, OperandSize size);
- int PrintImmediateOp(byte* data);
- const char* TwoByteMnemonic(byte opcode);
- int TwoByteOpcodeInstruction(byte* data);
- int F6F7Instruction(byte* data);
- int ShiftInstruction(byte* data);
- int JumpShort(byte* data);
- int JumpConditional(byte* data);
- int JumpConditionalShort(byte* data);
- int SetCC(byte* data);
- int FPUInstruction(byte* data);
- int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
- int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
- void AppendToBuffer(const char* format, ...);
-
- void UnimplementedInstruction() {
- if (abort_on_unimplemented_) {
- CHECK(false);
- } else {
- AppendToBuffer("'Unimplemented Instruction'");
- }
- }
-};
-
-
-void DisassemblerX64::AppendToBuffer(const char* format, ...) {
- v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
- va_list args;
- va_start(args, format);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
- va_end(args);
- tmp_buffer_pos_ += result;
-}
-
-
-int DisassemblerX64::PrintRightOperandHelper(
- byte* modrmp,
- RegisterNameMapping direct_register_name) {
- int mod, regop, rm;
- get_modrm(*modrmp, &mod, &regop, &rm);
- RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
- &DisassemblerX64::NameOfCPURegister;
- switch (mod) {
- case 0:
- if ((rm & 7) == 5) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
- AppendToBuffer("[0x%x]", disp);
- return 5;
- } else if ((rm & 7) == 4) {
- // Codes for SIB byte.
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- // index == rsp means no index. Only use sib byte with no index for
- // rsp and r12 base.
- AppendToBuffer("[%s]", NameOfCPURegister(base));
- return 2;
- } else if (base == 5) {
- // base == rbp means no base register (when mod == 0).
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
- NameOfCPURegister(index),
- 1 << scale, disp);
- return 6;
- } else if (index != 4 && base != 5) {
- // [base+index*scale]
- AppendToBuffer("[%s+%s*%d]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale);
- return 2;
- } else {
- UnimplementedInstruction();
- return 1;
- }
- } else {
- AppendToBuffer("[%s]", NameOfCPURegister(rm));
- return 1;
- }
- break;
- case 1: // fall through
- case 2:
- if ((rm & 7) == 4) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<char*>(modrmp + 2);
- if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(base), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(base), disp);
- }
- } else {
- if (-disp > 0) {
- AppendToBuffer("[%s+%s*%d-0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- -disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- disp);
- }
- }
- return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<char*>(modrmp + 1);
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(rm), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
- }
- return (mod == 2) ? 5 : 2;
- }
- break;
- case 3:
- AppendToBuffer("%s", (this->*register_name)(rm));
- return 1;
- default:
- UnimplementedInstruction();
- return 1;
- }
- UNREACHABLE();
-}
-
-
-int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
- int64_t value;
- int count;
- switch (size) {
- case BYTE_SIZE:
- value = *data;
- count = 1;
- break;
- case WORD_SIZE:
- value = *reinterpret_cast<int16_t*>(data);
- count = 2;
- break;
- case DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data);
- count = 4;
- break;
- case QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data);
- count = 4;
- break;
- default:
- UNREACHABLE();
- value = 0; // Initialize variables on all paths to satisfy the compiler.
- count = 0;
- }
- AppendToBuffer("%" V8_PTR_PREFIX "x", value);
- return count;
-}
-
-
-int DisassemblerX64::PrintRightOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfCPURegister);
-}
-
-
-int DisassemblerX64::PrintRightByteOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfByteCPURegister);
-}
-
-
-int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfXMMRegister);
-}
-
-
-// Returns number of bytes used including the current *data.
-// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
-int DisassemblerX64::PrintOperands(const char* mnem,
- OperandType op_order,
- byte* data) {
- byte modrm = *data;
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int advance = 0;
- const char* register_name =
- byte_size_operand_ ? NameOfByteCPURegister(regop)
- : NameOfCPURegister(regop);
- switch (op_order) {
- case REG_OPER_OP_ORDER: {
- AppendToBuffer("%s%c %s,",
- mnem,
- operand_size_code(),
- register_name);
- advance = byte_size_operand_ ? PrintRightByteOperand(data)
- : PrintRightOperand(data);
- break;
- }
- case OPER_REG_OP_ORDER: {
- AppendToBuffer("%s%c ", mnem, operand_size_code());
- advance = byte_size_operand_ ? PrintRightByteOperand(data)
- : PrintRightOperand(data);
- AppendToBuffer(",%s", register_name);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return advance;
-}
-
-
-// Returns number of bytes used by machine instruction, including *data byte.
-// Writes immediate instructions to 'tmp_buffer_'.
-int DisassemblerX64::PrintImmediateOp(byte* data) {
- bool byte_size_immediate = (*data & 0x02) != 0;
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- const char* mnem = "Imm???";
- switch (regop) {
- case 0:
- mnem = "add";
- break;
- case 1:
- mnem = "or";
- break;
- case 2:
- mnem = "adc";
- break;
- case 3:
- mnem = "sbb";
- break;
- case 4:
- mnem = "and";
- break;
- case 5:
- mnem = "sub";
- break;
- case 6:
- mnem = "xor";
- break;
- case 7:
- mnem = "cmp";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s%c ", mnem, operand_size_code());
- int count = PrintRightOperand(data + 1);
- AppendToBuffer(",0x");
- OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
- count += PrintImmediate(data + 1 + count, immediate_size);
- return 1 + count;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::F6F7Instruction(byte* data) {
- ASSERT(*data == 0xF7 || *data == 0xF6);
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- if (mod == 3 && regop != 0) {
- const char* mnem = NULL;
- switch (regop) {
- case 2:
- mnem = "not";
- break;
- case 3:
- mnem = "neg";
- break;
- case 4:
- mnem = "mul";
- break;
- case 5:
- mnem = "imul";
- break;
- case 7:
- mnem = "idiv";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s%c %s",
- mnem,
- operand_size_code(),
- NameOfCPURegister(rm));
- return 2;
- } else if (regop == 0) {
- AppendToBuffer("test%c ", operand_size_code());
- int count = PrintRightOperand(data + 1); // Use name of 64-bit register.
- AppendToBuffer(",0x");
- count += PrintImmediate(data + 1 + count, operand_size());
- return 1 + count;
- } else {
- UnimplementedInstruction();
- return 2;
- }
-}
-
-
-int DisassemblerX64::ShiftInstruction(byte* data) {
- byte op = *data & (~1);
- if (op != 0xD0 && op != 0xD2 && op != 0xC0) {
- UnimplementedInstruction();
- return 1;
- }
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- regop &= 0x7; // The REX.R bit does not affect the operation.
- int imm8 = -1;
- int num_bytes = 2;
- if (mod != 3) {
- UnimplementedInstruction();
- return num_bytes;
- }
- const char* mnem = NULL;
- switch (regop) {
- case 0:
- mnem = "rol";
- break;
- case 1:
- mnem = "ror";
- break;
- case 2:
- mnem = "rcl";
- break;
- case 3:
- mnem = "rcr";
- break;
- case 4:
- mnem = "shl";
- break;
- case 5:
- mnem = "shr";
- break;
- case 7:
- mnem = "sar";
- break;
- default:
- UnimplementedInstruction();
- return num_bytes;
- }
- ASSERT_NE(NULL, mnem);
- if (op == 0xD0) {
- imm8 = 1;
- } else if (op == 0xC0) {
- imm8 = *(data + 2);
- num_bytes = 3;
- }
- AppendToBuffer("%s%c %s,",
- mnem,
- operand_size_code(),
- byte_size_operand_ ? NameOfByteCPURegister(rm)
- : NameOfCPURegister(rm));
- if (op == 0xD2) {
- AppendToBuffer("cl");
- } else {
- AppendToBuffer("%d", imm8);
- }
- return num_bytes;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::JumpShort(byte* data) {
- ASSERT_EQ(0xEB, *data);
- byte b = *(data + 1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- AppendToBuffer("jmp %s", NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::JumpConditional(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
- const char* mnem = conditional_code_suffix[cond];
- AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
- return 6; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::JumpConditionalShort(byte* data) {
- byte cond = *data & 0x0F;
- byte b = *(data + 1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- const char* mnem = conditional_code_suffix[cond];
- AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::SetCC(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- const char* mnem = conditional_code_suffix[cond];
- AppendToBuffer("set%s%c ", mnem, operand_size_code());
- PrintRightByteOperand(data + 2);
- return 3; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::FPUInstruction(byte* data) {
- byte escape_opcode = *data;
- ASSERT_EQ(0xD8, escape_opcode & 0xF8);
- byte modrm_byte = *(data+1);
-
- if (modrm_byte >= 0xC0) {
- return RegisterFPUInstruction(escape_opcode, modrm_byte);
- } else {
- return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
- }
-}
-
-int DisassemblerX64::MemoryFPUInstruction(int escape_opcode,
- int modrm_byte,
- byte* modrm_start) {
- const char* mnem = "?";
- int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
- switch (escape_opcode) {
- case 0xD9: switch (regop) {
- case 0: mnem = "fld_s"; break;
- case 3: mnem = "fstp_s"; break;
- case 7: mnem = "fstcw"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDB: switch (regop) {
- case 0: mnem = "fild_s"; break;
- case 1: mnem = "fisttp_s"; break;
- case 2: mnem = "fist_s"; break;
- case 3: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD: switch (regop) {
- case 0: mnem = "fld_d"; break;
- case 3: mnem = "fstp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDF: switch (regop) {
- case 5: mnem = "fild_d"; break;
- case 7: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(modrm_start);
- return count + 1;
-}
-
-int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
- byte modrm_byte) {
- bool has_register = false; // Is the FPU register encoded in modrm_byte?
- const char* mnem = "?";
-
- switch (escape_opcode) {
- case 0xD8:
- UnimplementedInstruction();
- break;
-
- case 0xD9:
- switch (modrm_byte & 0xF8) {
- case 0xC0:
- mnem = "fld";
- has_register = true;
- break;
- case 0xC8:
- mnem = "fxch";
- has_register = true;
- break;
- default:
- switch (modrm_byte) {
- case 0xE0: mnem = "fchs"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE3: mnem = "fninit"; break;
- case 0xE4: mnem = "ftst"; break;
- case 0xE8: mnem = "fld1"; break;
- case 0xEB: mnem = "fldpi"; break;
- case 0xED: mnem = "fldln2"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xF0: mnem = "f2xm1"; break;
- case 0xF1: mnem = "fyl2x"; break;
- case 0xF2: mnem = "fptan"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xFD: mnem = "fscale"; break;
- case 0xFE: mnem = "fsin"; break;
- case 0xFF: mnem = "fcos"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDA:
- if (modrm_byte == 0xE9) {
- mnem = "fucompp";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDB:
- if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomi";
- has_register = true;
- } else if (modrm_byte == 0xE2) {
- mnem = "fclex";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDC:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "ffree"; break;
- case 0xD8: mnem = "fstp"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDE:
- if (modrm_byte == 0xD9) {
- mnem = "fcompp";
- } else {
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "faddp"; break;
- case 0xE8: mnem = "fsubp"; break;
- case 0xC8: mnem = "fmulp"; break;
- case 0xF8: mnem = "fdivp"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDF:
- if (modrm_byte == 0xE0) {
- mnem = "fnstsw_ax";
- } else if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomip";
- has_register = true;
- }
- break;
-
- default: UnimplementedInstruction();
- }
-
- if (has_register) {
- AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
- } else {
- AppendToBuffer("%s", mnem);
- }
- return 2;
-}
-
-
-
-// Handle all two-byte opcodes, which start with 0x0F.
-// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
-// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
-int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
- byte opcode = *(data + 1);
- byte* current = data + 2;
- // At return, "current" points to the start of the next instruction.
- const char* mnemonic = TwoByteMnemonic(opcode);
- if (operand_size_ == 0x66) {
- // 0x66 0x0F prefix.
- int mod, regop, rm;
- if (opcode == 0x3A) {
- byte third_byte = *current;
- current = data + 3;
- if (third_byte == 0x17) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("extractps "); // reg/m32, xmm, imm8
- current += PrintRightOperand(current);
- AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
- current += 1;
- } else if (third_byte == 0x0b) {
- get_modrm(*current, &mod, &regop, &rm);
- // roundsd xmm, xmm/m64, imm8
- AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(", %d", (*current) & 3);
- current += 1;
- } else {
- UnimplementedInstruction();
- }
- } else {
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x1f) {
- current++;
- if (rm == 4) { // SIB byte present.
- current++;
- }
- if (mod == 1) { // Byte displacement.
- current += 1;
- } else if (mod == 2) { // 32-bit displacement.
- current += 4;
- } // else no immediate displacement.
- AppendToBuffer("nop");
- } else if (opcode == 0x28) {
- AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x29) {
- AppendToBuffer("movapd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0x6E) {
- AppendToBuffer("mov%c %s,",
- rex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x6F) {
- AppendToBuffer("movdqa %s,",
- NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x7E) {
- AppendToBuffer("mov%c ",
- rex_w() ? 'q' : 'd');
- current += PrintRightOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0x7F) {
- AppendToBuffer("movdqa ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0xD6) {
- AppendToBuffer("movq ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0x50) {
- AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else {
- const char* mnemonic = "?";
- if (opcode == 0x54) {
- mnemonic = "andpd";
- } else if (opcode == 0x56) {
- mnemonic = "orpd";
- } else if (opcode == 0x57) {
- mnemonic = "xorpd";
- } else if (opcode == 0x2E) {
- mnemonic = "ucomisd";
- } else if (opcode == 0x2F) {
- mnemonic = "comisd";
- } else {
- UnimplementedInstruction();
- }
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
- }
- } else if (group_1_prefix_ == 0xF2) {
- // Beginning of instructions with prefix 0xF2.
-
- if (opcode == 0x11 || opcode == 0x10) {
- // MOVSD: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movsd ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
- } else if (opcode == 0x2A) {
- // CVTSI2SD: integer to XMM double conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%sd %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x2C) {
- // CVTTSD2SI:
- // Convert with truncation scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvttsd2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x2D) {
- // CVTSD2SI: Convert scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvtsd2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
- // XMM arithmetic. Mnemonic was retrieved at the start of this function.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else {
- UnimplementedInstruction();
- }
- } else if (group_1_prefix_ == 0xF3) {
- // Instructions with prefix 0xF3.
- if (opcode == 0x11 || opcode == 0x10) {
- // MOVSS: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movss ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- }
- } else if (opcode == 0x2A) {
- // CVTSI2SS: integer to XMM single conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%ss %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x2C) {
- // CVTTSS2SI:
- // Convert with truncation scalar single-precision FP to dword integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvttss2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x5A) {
- // CVTSS2SD:
- // Convert scalar single-precision FP to scalar double-precision FP.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x7E) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else {
- UnimplementedInstruction();
- }
- } else if (opcode == 0x1F) {
- // NOP
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- current++;
- if (rm == 4) { // SIB byte present.
- current++;
- }
- if (mod == 1) { // Byte displacement.
- current += 1;
- } else if (mod == 2) { // 32-bit displacement.
- current += 4;
- } // else no immediate displacement.
- AppendToBuffer("nop");
-
- } else if (opcode == 0x28) {
- // movaps xmm, xmm/m128
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
-
- } else if (opcode == 0x29) {
- // movaps xmm/m128, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movaps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
-
- } else if (opcode == 0xA2 || opcode == 0x31) {
- // RDTSC or CPUID
- AppendToBuffer("%s", mnemonic);
-
- } else if ((opcode & 0xF0) == 0x40) {
- // CMOVcc: conditional move.
- int condition = opcode & 0x0F;
- const InstructionDesc& idesc = cmov_instructions[condition];
- byte_size_operand_ = idesc.byte_size_operation;
- current += PrintOperands(idesc.mnem, idesc.op_order_, current);
-
- } else if (opcode == 0x57) {
- // xorps xmm, xmm/m128
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
-
- } else if (opcode == 0x50) {
- // movmskps reg, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s, ", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
-
- } else if ((opcode & 0xF0) == 0x80) {
- // Jcc: Conditional jump (branch).
- current = data + JumpConditional(data);
-
- } else if (opcode == 0xBE || opcode == 0xBF || opcode == 0xB6 ||
- opcode == 0xB7 || opcode == 0xAF) {
- // Size-extending moves, IMUL.
- current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current);
-
- } else if ((opcode & 0xF0) == 0x90) {
- // SETcc: Set byte on condition. Needs pointer to beginning of instruction.
- current = data + SetCC(data);
-
- } else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) {
- // SHLD, SHRD (double-precision shift), BTS (bit set).
- AppendToBuffer("%s ", mnemonic);
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- current += PrintRightOperand(current);
- if (opcode == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
- } else {
- UnimplementedInstruction();
- }
- return static_cast<int>(current - data);
-}
-
-
-// Mnemonics for two-byte opcode instructions starting with 0x0F.
-// The argument is the second byte of the two-byte opcode.
-// Returns NULL if the instruction is not handled here.
-const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
- switch (opcode) {
- case 0x1F:
- return "nop";
- case 0x2A: // F2/F3 prefix.
- return "cvtsi2s";
- case 0x31:
- return "rdtsc";
- case 0x51: // F2 prefix.
- return "sqrtsd";
- case 0x58: // F2 prefix.
- return "addsd";
- case 0x59: // F2 prefix.
- return "mulsd";
- case 0x5C: // F2 prefix.
- return "subsd";
- case 0x5E: // F2 prefix.
- return "divsd";
- case 0xA2:
- return "cpuid";
- case 0xA5:
- return "shld";
- case 0xAB:
- return "bts";
- case 0xAD:
- return "shrd";
- case 0xAF:
- return "imul";
- case 0xB6:
- return "movzxb";
- case 0xB7:
- return "movzxw";
- case 0xBE:
- return "movsxb";
- case 0xBF:
- return "movsxw";
- default:
- return NULL;
- }
-}
-
-
-// Disassembles the instruction at instr, and writes it into out_buffer.
-int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
- byte* instr) {
- tmp_buffer_pos_ = 0; // starting to write as position 0
- byte* data = instr;
- bool processed = true; // Will be set to false if the current instruction
- // is not in 'instructions' table.
- byte current;
-
- // Scan for prefixes.
- while (true) {
- current = *data;
- if (current == OPERAND_SIZE_OVERRIDE_PREFIX) { // Group 3 prefix.
- operand_size_ = current;
- } else if ((current & 0xF0) == 0x40) { // REX prefix.
- setRex(current);
- if (rex_w()) AppendToBuffer("REX.W ");
- } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix (0xF2 or 0xF3).
- group_1_prefix_ = current;
- } else { // Not a prefix - an opcode.
- break;
- }
- data++;
- }
-
- const InstructionDesc& idesc = instruction_table_->Get(current);
- byte_size_operand_ = idesc.byte_size_operation;
- switch (idesc.type) {
- case ZERO_OPERANDS_INSTR:
- if (current >= 0xA4 && current <= 0xA7) {
- // String move or compare operations.
- if (group_1_prefix_ == REP_PREFIX) {
- // REP.
- AppendToBuffer("rep ");
- }
- if (rex_w()) AppendToBuffer("REX.W ");
- AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
- } else {
- AppendToBuffer("%s", idesc.mnem, operand_size_code());
- }
- data++;
- break;
-
- case TWO_OPERANDS_INSTR:
- data++;
- data += PrintOperands(idesc.mnem, idesc.op_order_, data);
- break;
-
- case JUMP_CONDITIONAL_SHORT_INSTR:
- data += JumpConditionalShort(data);
- break;
-
- case REGISTER_INSTR:
- AppendToBuffer("%s%c %s",
- idesc.mnem,
- operand_size_code(),
- NameOfCPURegister(base_reg(current & 0x07)));
- data++;
- break;
- case PUSHPOP_INSTR:
- AppendToBuffer("%s %s",
- idesc.mnem,
- NameOfCPURegister(base_reg(current & 0x07)));
- data++;
- break;
- case MOVE_REG_INSTR: {
- byte* addr = NULL;
- switch (operand_size()) {
- case WORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
- data += 3;
- break;
- case DOUBLEWORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- data += 5;
- break;
- case QUADWORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
- data += 9;
- break;
- default:
- UNREACHABLE();
- }
- AppendToBuffer("mov%c %s,%s",
- operand_size_code(),
- NameOfCPURegister(base_reg(current & 0x07)),
- NameOfAddress(addr));
- break;
- }
-
- case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
- AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case SHORT_IMMEDIATE_INSTR: {
- byte* addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case NO_INSTR:
- processed = false;
- break;
-
- default:
- UNIMPLEMENTED(); // This type is not implemented.
- }
-
- // The first byte didn't match any of the simple opcodes, so we
- // need to do special processing on it.
- if (!processed) {
- switch (*data) {
- case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
- data += 3;
- break;
-
- case 0x69: // fall through
- case 0x6B: {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, &regop, &rm);
- int32_t imm = *data == 0x6B ? *(data + 2)
- : *reinterpret_cast<int32_t*>(data + 2);
- AppendToBuffer("imul%c %s,%s,0x%x",
- operand_size_code(),
- NameOfCPURegister(regop),
- NameOfCPURegister(rm), imm);
- data += 2 + (*data == 0x6B ? 1 : 4);
- break;
- }
-
- case 0x81: // fall through
- case 0x83: // 0x81 with sign extension bit set
- data += PrintImmediateOp(data);
- break;
-
- case 0x0F:
- data += TwoByteOpcodeInstruction(data);
- break;
-
- case 0x8F: {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == 0) {
- AppendToBuffer("pop ");
- data += PrintRightOperand(data);
- }
- }
- break;
-
- case 0xFF: {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case 0:
- mnem = "inc";
- break;
- case 1:
- mnem = "dec";
- break;
- case 2:
- mnem = "call";
- break;
- case 4:
- mnem = "jmp";
- break;
- case 6:
- mnem = "push";
- break;
- default:
- mnem = "???";
- }
- AppendToBuffer(((regop <= 1) ? "%s%c " : "%s "),
- mnem,
- operand_size_code());
- data += PrintRightOperand(data);
- }
- break;
-
- case 0xC7: // imm32, fall through
- case 0xC6: // imm8
- {
- bool is_byte = *data == 0xC6;
- data++;
- if (is_byte) {
- AppendToBuffer("movb ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- AppendToBuffer("mov%c ", operand_size_code());
- data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
- }
- }
- break;
-
- case 0x80: {
- data++;
- AppendToBuffer("cmpb ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- }
- break;
-
- case 0x88: // 8bit, fall through
- case 0x89: // 32bit
- {
- bool is_byte = *data == 0x88;
- int mod, regop, rm;
- data++;
- get_modrm(*data, &mod, &regop, &rm);
- if (is_byte) {
- AppendToBuffer("movb ");
- data += PrintRightByteOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else {
- AppendToBuffer("mov%c ", operand_size_code());
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- }
- }
- break;
-
- case 0x90:
- case 0x91:
- case 0x92:
- case 0x93:
- case 0x94:
- case 0x95:
- case 0x96:
- case 0x97: {
- int reg = (*data & 0x7) | (rex_b() ? 8 : 0);
- if (reg == 0) {
- AppendToBuffer("nop"); // Common name for xchg rax,rax.
- } else {
- AppendToBuffer("xchg%c rax, %s",
- operand_size_code(),
- NameOfCPURegister(reg));
- }
- data++;
- }
- break;
- case 0xB0:
- case 0xB1:
- case 0xB2:
- case 0xB3:
- case 0xB4:
- case 0xB5:
- case 0xB6:
- case 0xB7:
- case 0xB8:
- case 0xB9:
- case 0xBA:
- case 0xBB:
- case 0xBC:
- case 0xBD:
- case 0xBE:
- case 0xBF: {
- // mov reg8,imm8 or mov reg32,imm32
- byte opcode = *data;
- data++;
- bool is_32bit = (opcode >= 0xB8);
- int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
- if (is_32bit) {
- AppendToBuffer("mov%c %s, ",
- operand_size_code(),
- NameOfCPURegister(reg));
- data += PrintImmediate(data, DOUBLEWORD_SIZE);
- } else {
- AppendToBuffer("movb %s, ",
- NameOfByteCPURegister(reg));
- data += PrintImmediate(data, BYTE_SIZE);
- }
- break;
- }
- case 0xFE: {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == 1) {
- AppendToBuffer("decb ");
- data += PrintRightByteOperand(data);
- } else {
- UnimplementedInstruction();
- }
- break;
- }
- case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
- data += 5;
- break;
-
- case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA1: // Fall through.
- case 0xA3:
- switch (operand_size()) {
- case DOUBLEWORD_SIZE: {
- const char* memory_location = NameOfAddress(
- reinterpret_cast<byte*>(
- *reinterpret_cast<int32_t*>(data + 1)));
- if (*data == 0xA1) { // Opcode 0xA1
- AppendToBuffer("movzxlq rax,(%s)", memory_location);
- } else { // Opcode 0xA3
- AppendToBuffer("movzxlq (%s),rax", memory_location);
- }
- data += 5;
- break;
- }
- case QUADWORD_SIZE: {
- // New x64 instruction mov rax,(imm_64).
- const char* memory_location = NameOfAddress(
- *reinterpret_cast<byte**>(data + 1));
- if (*data == 0xA1) { // Opcode 0xA1
- AppendToBuffer("movq rax,(%s)", memory_location);
- } else { // Opcode 0xA3
- AppendToBuffer("movq (%s),rax", memory_location);
- }
- data += 9;
- break;
- }
- default:
- UnimplementedInstruction();
- data += 2;
- }
- break;
-
- case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA9: {
- int64_t value = 0;
- switch (operand_size()) {
- case WORD_SIZE:
- value = *reinterpret_cast<uint16_t*>(data + 1);
- data += 3;
- break;
- case DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data + 1);
- data += 5;
- break;
- case QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data + 1);
- data += 5;
- break;
- default:
- UNREACHABLE();
- }
- AppendToBuffer("test%c rax,0x%" V8_PTR_PREFIX "x",
- operand_size_code(),
- value);
- break;
- }
- case 0xD1: // fall through
- case 0xD3: // fall through
- case 0xC1:
- data += ShiftInstruction(data);
- break;
- case 0xD0: // fall through
- case 0xD2: // fall through
- case 0xC0:
- byte_size_operand_ = true;
- data += ShiftInstruction(data);
- break;
-
- case 0xD9: // fall through
- case 0xDA: // fall through
- case 0xDB: // fall through
- case 0xDC: // fall through
- case 0xDD: // fall through
- case 0xDE: // fall through
- case 0xDF:
- data += FPUInstruction(data);
- break;
-
- case 0xEB:
- data += JumpShort(data);
- break;
-
- case 0xF6:
- byte_size_operand_ = true; // fall through
- case 0xF7:
- data += F6F7Instruction(data);
- break;
-
- case 0x3C:
- AppendToBuffer("cmp al, 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data +=2;
- break;
-
- default:
- UnimplementedInstruction();
- data += 1;
- }
- } // !processed
-
- if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
- tmp_buffer_[tmp_buffer_pos_] = '\0';
- }
-
- int instr_len = static_cast<int>(data - instr);
- ASSERT(instr_len > 0); // Ensure progress.
-
- int outp = 0;
- // Instruction bytes.
- for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, "%02x", *bp);
- }
- for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " ");
- }
-
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " %s",
- tmp_buffer_.start());
- return instr_len;
-}
-
-//------------------------------------------------------------------------------
-
-
-static const char* cpu_regs[16] = {
- "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-};
-
-
-static const char* byte_cpu_regs[16] = {
- "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
- "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"
-};
-
-
-static const char* xmm_regs[16] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
- "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
-};
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- if (0 <= reg && reg < 16)
- return cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- if (0 <= reg && reg < 16)
- return byte_cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- if (0 <= reg && reg < 16)
- return xmm_regs[reg];
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // X64 does not embed debug strings at the moment.
- UNREACHABLE();
- return "";
-}
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) { }
-
-Disassembler::~Disassembler() { }
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- DisassemblerX64 d(converter_, CONTINUE_ON_UNIMPLEMENTED_OPCODE);
- return d.InstructionDecode(buffer, instruction);
-}
-
-
-// The X64 assembler does not use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return -1;
-}
-
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", prev_pc);
- fprintf(f, " ");
-
- for (byte* bp = prev_pc; bp < pc; bp++) {
- fprintf(f, "%02x", *bp);
- }
- for (int i = 6 - static_cast<int>(pc - prev_pc); i >= 0; i--) {
- fprintf(f, " ");
- }
- fprintf(f, " %s\n", buffer.start());
- }
-}
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/frames-x64.cc b/src/3rdparty/v8/src/x64/frames-x64.cc
deleted file mode 100644
index 6c58bc9..0000000
--- a/src/3rdparty/v8/src/x64/frames-x64.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/frames-x64.h b/src/3rdparty/v8/src/x64/frames-x64.h
deleted file mode 100644
index c9092af..0000000
--- a/src/3rdparty/v8/src/x64/frames-x64.h
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_FRAMES_X64_H_
-#define V8_X64_FRAMES_X64_H_
-
-namespace v8 {
-namespace internal {
-
-const int kNumRegs = 16;
-const RegList kJSCallerSaved =
- 1 << 0 | // rax
- 1 << 1 | // rcx
- 1 << 2 | // rdx
- 1 << 3 | // rbx - used as a caller-saved register in JavaScript code
- 1 << 7; // rdi - callee function
-
-const int kNumJSCallerSaved = 5;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-// Number of registers for which space is reserved in safepoints.
-const int kNumSafepointRegisters = 16;
-
-// ----------------------------------------------------
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
-#ifdef _WIN64
- static const int kCallerFPOffset = -10 * kPointerSize;
-#else
- static const int kCallerFPOffset = -8 * kPointerSize;
-#endif
- static const int kArgvOffset = 6 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- static const int kCallerFPOffset = +0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_X64_FRAMES_X64_H_
diff --git a/src/3rdparty/v8/src/x64/full-codegen-x64.cc b/src/3rdparty/v8/src/x64/full-codegen-x64.cc
deleted file mode 100644
index e236ce1..0000000
--- a/src/3rdparty/v8/src/x64/full-codegen-x64.cc
+++ /dev/null
@@ -1,4594 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- void EmitJumpIfNotSmi(Register reg,
- Label* target,
- Label::Distance near_jump = Label::kFar) {
- __ testb(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target, near_jump); // Always taken before patched.
- }
-
- void EmitJumpIfSmi(Register reg,
- Label* target,
- Label::Distance near_jump = Label::kFar) {
- __ testb(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target, near_jump); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ testl(rax, Immediate(delta_to_patch_site));
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- } else {
- __ nop(); // Signals no inlined code.
- }
- }
-
- private:
- // jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- ASSERT(cc == carry || cc == not_carry);
- __ bind(&patch_site_);
- __ j(cc, target, near_jump);
- }
-
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them. The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-// o rdi: the JS function object being called (i.e. ourselves)
-// o rsi: our context
-// o rbp: our caller's frame pointer
-// o rsp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-x64.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). rcx is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
- __ bind(&ok);
- }
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- info->set_prologue_offset(masm_->pc_offset());
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = info->scope()->num_stack_slots();
- if (locals_count == 1) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- } else if (locals_count > 1) {
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment cmnt(masm_, "[ Allocate context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(
- rsi, context_offset, rax, rbx, kDontSaveFPRegs);
- }
- }
- }
-
- // Possibly allocate an arguments object.
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Arguments object must be allocated after the context object, in
- // case the "arguments" or ".arguments" variables are in the context.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(rdi);
- } else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // The receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ lea(rdx,
- Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
- __ Push(Smi::FromInt(num_parameters));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
- }
- ArgumentsAccessStub stub(type);
- __ CallStub(&stub);
-
- SetVar(arguments, rax, rbx, rdx);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- EmitReturnSequence();
- }
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Set(rax, 0);
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
- __ SmiAddConstant(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- Smi::FromInt(-delta));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing; if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
- RelocInfo::NONE64);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- kScratchRegister);
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- Label ok;
-
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
-
- EmitProfilingCounterReset();
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(rax);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- InterruptStub stub;
- __ CallStub(&stub);
- }
- __ pop(rax);
- EmitProfilingCounterReset();
- __ bind(&ok);
- }
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ movq(rsp, rbp);
- __ pop(rbp);
-
- int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, rcx);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated at least 7 bytes: "movq rsp, rbp; pop rbp; ret k"
- // (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand operand = codegen()->VarOperand(var, result_register());
- __ push(operand);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ PushRoot(index);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- if (lit->IsSmi()) {
- __ SafeMove(result_register(), Smi::cast(*lit));
- } else {
- __ Move(result_register(), lit);
- }
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- if (lit->IsSmi()) {
- __ SafePush(Smi::cast(*lit));
- } else {
- __ Push(lit);
- }
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ Move(result_register(), lit);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ Move(result_register(), isolate()->factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ Move(result_register(), isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ Push(isolate()->factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ Push(isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ PushRoot(value_root_index);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ push(result_register());
- __ CallStub(&stub, condition->test_id());
- __ testq(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ j(cc, if_true);
- } else if (if_true == fall_through) {
- __ j(NegateCondition(cc), if_false);
- } else {
- __ j(cc, if_true);
- __ jmp(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- ASSERT(var->IsStackAllocated());
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return Operand(rbp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- MemOperand location = VarOperand(var, dest);
- __ movq(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!scratch0.is(src));
- ASSERT(!scratch0.is(scratch1));
- ASSERT(!scratch1.is(src));
- MemOperand location = VarOperand(var, scratch0);
- __ movq(location, src);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- Label skip;
- if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- Split(equal, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
- __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
- __ Check(not_equal, "Declaration in with context.");
- __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
- __ Check(not_equal, "Declaration in catch context.");
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(StackOperand(variable), kScratchRegister);
- }
- break;
-
- case Variable::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
- // No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(rsi);
- __ Push(variable->name());
- // Declaration nodes are always introduced in one of four modes.
- ASSERT(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ Push(Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- } else {
- __ Push(Smi::FromInt(0)); // Indicates no initial value.
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ movq(StackOperand(variable), result_register());
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ movq(ContextOperand(rsi, variable->index()), result_register());
- int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(rsi,
- offset,
- result_register(),
- rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(rsi);
- __ Push(variable->name());
- __ Push(Smi::FromInt(NONE));
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
-
- // Load instance object.
- __ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
- __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
-
- // Assign it.
- __ movq(ContextOperand(rsi, variable->index()), rax);
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(rsi,
- Context::SlotOffset(variable->index()),
- rax,
- rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
-
- // Traverse into body.
- Visit(declaration->module());
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ push(rsi); // The context is the first argument.
- __ Push(pairs);
- __ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
-
- __ cmpq(rdx, rax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
- patch_site.EmitPatchInfo();
-
- __ testq(rax, rax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ jmp(nested_statement.break_label());
- } else {
- __ jmp(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, &exit);
- Register null_value = rdi;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
- __ j(equal, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(rax, &convert);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &done_convert);
- __ bind(&convert);
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- __ push(rax);
-
- // Check for proxies.
- Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- __ j(below_equal, &call_runtime);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(rax); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array;
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kMetaMapRootIndex);
- __ j(not_equal, &fixed_array);
-
- // We got a map in register rax. Get the enumeration cache from it.
- __ bind(&use_cache);
-
- Label no_descriptors;
-
- __ EnumLength(rdx, rax);
- __ Cmp(rdx, Smi::FromInt(0));
- __ j(equal, &no_descriptors);
-
- __ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ push(rax); // Map.
- __ push(rcx); // Enumeration cache.
- __ push(rdx); // Number of valid entries for the map in the enum cache.
- __ Push(Smi::FromInt(0)); // Initial index.
- __ jmp(&loop);
-
- __ bind(&no_descriptors);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&exit);
-
- // We got a fixed array in register rax. Iterate through that.
- Label non_proxy;
- __ bind(&fixed_array);
-
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(rbx, cell);
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
-
- __ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
- __ movq(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
- __ j(above, &non_proxy);
- __ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
- __ bind(&non_proxy);
- __ push(rbx); // Smi
- __ push(rax); // Array
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ push(rax); // Fixed array length (as smi).
- __ Push(Smi::FromInt(0)); // Initial index.
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&loop);
- __ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
- __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
- __ j(above_equal, loop_statement.break_label());
-
- // Get the current entry of the array into register rbx.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm()->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rbx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register rdx.
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ movq(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ j(equal, &update_each, Label::kNear);
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ Cmp(rdx, Smi::FromInt(0));
- __ j(equal, &update_each, Label::kNear);
-
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
- // just skip it.
- __ push(rcx); // Enumerable.
- __ push(rbx); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ Cmp(rax, Smi::FromInt(0));
- __ j(equal, loop_statement.continue_label());
- __ movq(rbx, rax);
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register rbx.
- __ bind(&update_each);
- __ movq(result_register(), rbx);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for going to the next element by incrementing the
- // index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_label());
- __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ jmp(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_label());
- __ addq(rsp, Immediate(5 * kPointerSize));
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ Push(info);
- __ CallStub(&stub);
- } else {
- __ push(rsi);
- __ Push(info);
- __ Push(pretenure
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value());
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
- TypeofState typeof_state,
- Label* slow) {
- Register context = rsi;
- Register temp = rdx;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- // Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering rsi.
- context = temp;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(temp)) {
- __ movq(temp, context);
- }
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
- __ bind(&next);
- // Terminate at native context.
- __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
- __ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
- // Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
- __ jmp(&next);
- __ bind(&fast);
- }
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- __ movq(rax, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ Move(rcx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- CallIC(ic, mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- ASSERT(var->IsContextSlot());
- Register context = rsi;
- Register temp = rbx;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering rsi.
- context = temp;
- }
- }
- // Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an rsi-based operand (the write barrier cannot be allowed to
- // destroy the rsi register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, done);
- if (local->mode() == CONST) {
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST_HARMONY
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ jmp(done);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in rcx and the global
- // object on the stack.
- __ Move(rcx, var->name());
- __ movq(rax, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(rax);
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- ASSERT(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- Label done;
- GetVar(rax, var);
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- }
- __ bind(&done);
- context()->Plug(rax);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case Variable::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ push(rsi); // Context.
- __ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
- context()->Plug(rax);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // rdi = JS function.
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ movq(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ push(rcx);
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->pattern());
- __ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ movq(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(rbx);
- __ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ PushRoot(Heap::kNullValueRootIndex);
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- } else {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Move(rbx, Smi::FromInt(expr->literal_index()));
- __ Move(rcx, constant_properties);
- __ Move(rdx, Smi::FromInt(flags));
- FastCloneShallowObjectStub stub(properties_count);
- __ CallStub(&stub);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in rax.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(rax); // Save result on the stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ Move(rcx, key->handle());
- __ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
- break;
- case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
- __ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ push(Operand(rsp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
- Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_constant_fast_elements =
- IsFastObjectElementsKind(constant_elements_kind);
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
-
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_elements);
- Heap* heap = isolate()->heap();
- if (has_constant_fast_elements &&
- constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
- length);
- __ CallStub(&stub);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
-
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- if (has_constant_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(rax);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ movq(FieldOperand(rbx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(rbx, offset, result_register(), rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
- __ Move(rcx, Smi::FromInt(i));
- __ Move(rdx, Smi::FromInt(expr->literal_index()));
- StoreArrayLiteralElementStub stub;
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ push(result_register());
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY: {
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- __ movq(rdx, Operand(rsp, 0));
- __ push(rax);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(rax); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
- }
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ Move(rcx, key->handle());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left,
- Expression* right) {
- // Do combined smi check of the operands. Left operand is on the
- // stack (popped into rdx). Right operand is in rax but moved into
- // rcx to make the shifts easier.
- Label done, stub_call, smi_case;
- __ pop(rdx);
- __ movq(rcx, rax);
- __ or_(rax, rdx);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
-
- __ bind(&stub_call);
- __ movq(rax, rcx);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done, Label::kNear);
-
- __ bind(&smi_case);
- switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(rax, rdx, rcx);
- break;
- case Token::SHL:
- __ SmiShiftLeft(rax, rdx, rcx);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call);
- break;
- case Token::ADD:
- __ SmiAdd(rax, rdx, rcx, &stub_call);
- break;
- case Token::SUB:
- __ SmiSub(rax, rdx, rcx, &stub_call);
- break;
- case Token::MUL:
- __ SmiMul(rax, rdx, rcx, &stub_call);
- break;
- case Token::BIT_OR:
- __ SmiOr(rax, rdx, rcx);
- break;
- case Token::BIT_AND:
- __ SmiAnd(rax, rdx, rcx);
- break;
- case Token::BIT_XOR:
- __ SmiXor(rax, rdx, rcx);
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
- __ pop(rdx);
- BinaryOpStub stub(op, mode);
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(rax); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ movq(rdx, rax);
- __ pop(rax); // Restore value.
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(rax); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- __ pop(rax); // Restore value.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
- // Global var, const, or let.
- __ Move(rcx, var->name());
- __ movq(rdx, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (op == Token::INIT_CONST) {
- // Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ movq(rdx, StackOperand(var));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- __ movq(StackOperand(var), rax);
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(rax);
- __ push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- if (var->IsLookupSlot()) {
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, rcx);
- __ movq(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &assign, Label::kNear);
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&assign);
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- }
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
- MemOperand location = VarOperand(var, rcx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
- // Check for an uninitialized let binding.
- __ movq(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ Check(equal, "Let binding re-initialization.");
- }
- // Perform the assignment.
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- }
- // Non-initializing assignments to consts are ignored.
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- __ pop(rcx);
- __ pop(rdx);
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(rax);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(rdx);
- EmitKeyedPropertyLoad(expr);
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- __ call(code, rmode, ast_id);
-}
-
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ Move(rcx, name);
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(rcx);
- __ push(rax);
- __ push(rcx);
-
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
-
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ Move(rbx, cell);
-
- CallFunctionStub stub(arg_count, flags);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- context()->DropAndPlug(1, rax);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
- } else {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
-
- // Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
-
- // Push the language mode.
- __ Push(Smi::FromInt(language_mode()));
-
- // Push the start position of the scope the calls resides in.
- __ Push(Smi::FromInt(scope()->start_position()));
-
- // Push the qml mode flag
- __ Push(Smi::FromInt(is_qml_mode()));
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
-
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ PushRoot(Heap::kUndefinedValueRootIndex); // Reserved receiver slot.
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and resolve
- // eval.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
-
- // The runtime call returns a pair of values in rax (function) and
- // rdx (receiver). Touch up the stack with the right values.
- __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
- __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Call to a global variable. Push global object as receiver for the
- // call IC lookup.
- __ push(proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
- }
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in rax) and
- // the object holding it (returned in rdx).
- __ push(context_register());
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ push(rax);
- // The receiver is implicitly the global receiver. Indicate this by
- // passing the hole to the call function stub.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found by
- // LoadContextSlot. That object could be the hole if the receiver is
- // implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
- RelocInfo::CODE_TARGET);
- } else {
- EmitKeyedCallWithIC(expr, property->key());
- }
- } else {
- // Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
- // Load global receiver object.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- // Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into rdi and rax.
- __ Set(rax, arg_count);
- __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code, but not in the snapshot.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ Move(rbx, cell);
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ JumpIfSmi(rax, if_true);
- __ jmp(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
- Split(non_negative_smi, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, if_true);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(below_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(not_zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(rax);
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(not_zero, if_true);
-
- // Check for fast case object. Generate false result for slow case object.
- __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
- __ j(equal, if_false);
-
- // Look for valueOf string in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(rcx, rbx);
- __ cmpq(rcx, Immediate(0));
- __ j(equal, &done);
-
- __ LoadInstanceDescriptors(rbx, rbx);
- // rbx: descriptor array.
- // rcx: valid entries in the descriptor array.
- // Calculate the end of the descriptor array.
- __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
- SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
- __ lea(rcx,
- Operand(
- rbx, index.reg, index.scale, DescriptorArray::kFirstOffset));
- // Calculate location of the first key name.
- __ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
- // Loop through all the keys in the descriptor array. If one of these is the
- // internalized string "valueOf" the result is false.
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, FieldOperand(rbx, 0));
- __ Cmp(rdx, FACTORY->value_of_string());
- __ j(equal, if_false);
- __ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmpq(rbx, rcx);
- __ j(not_equal, &loop);
-
- __ bind(&done);
- // Reload map as register rbx was used as temporary above.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testq(rcx, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ cmpq(rcx,
- ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, SYMBOL_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(rbx);
- __ cmpq(rax, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- VisitForAccumulatorValue(args->at(0));
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label exit;
- // Get the number of formal parameters.
- __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit, Label::kNear);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- __ AssertSmi(rax);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(rax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
- // Map is now in rax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &non_function_constructor);
-
- // rax now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ Move(rax, isolate()->factory()->function_class_string());
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ Move(rax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(rax, Heap::kNullValueRootIndex);
-
- // All done.
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rbx, rax);
-
- __ bind(&heapnumber_allocated);
-
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(1);
-#ifdef _WIN64
- __ movq(rcx,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
-
-#else
- __ movq(rdi,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
-#endif
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorps(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
- __ movq(rax, rbx);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(rax, &done);
- // If the object is not a value type, return the object.
- __ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
- __ j(not_equal, &done);
- __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label runtime, done, not_date_object;
- Register object = rax;
- Register result = rax;
- Register scratch = rcx;
-
- __ JumpIfSmi(object, &not_date_object);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ j(not_equal, &not_date_object);
-
- if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ movq(scratch, stamp);
- __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2);
-#ifdef _WIN64
- __ movq(rcx, object);
- __ movq(rdx, index, RelocInfo::NONE64);
-#else
- __ movq(rdi, object);
- __ movq(rsi, index, RelocInfo::NONE64);
-#endif
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
- }
-
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(rcx);
- __ pop(rbx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(rcx);
- __ pop(rbx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(rbx); // rax = value. rbx = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(rbx, &done);
-
- // If the object is not a value type, return the value.
- __ CmpObjectType(rbx, JS_VALUE_TYPE, rcx);
- __ j(not_equal, &done);
-
- // Store the value.
- __ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ movq(rdx, rax);
- __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(rax, rbx);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(rbx);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = rbx;
- Register index = rax;
- Register result = rdx;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = rbx;
- Register index = rax;
- Register scratch = rdx;
- Register result = rax;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result, Smi::FromInt(0));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in rdi. Move it in there.
- __ movq(rdi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(rax);
- __ CallRuntime(Runtime::kCall, args->length());
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = rax;
- Register cache = rbx;
- Register tmp = rcx;
- __ movq(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(cache,
- FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ movq(cache,
- ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache,
- FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done, not_found;
- // tmp now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- SmiIndex index =
- __ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpq(key, FieldOperand(cache,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ j(not_equal, &not_found, Label::kNear);
- __ movq(rax, FieldOperand(cache,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize + kPointerSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = rax;
- Register left = rbx;
- Register tmp = rcx;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmpq(left, right);
- __ j(equal, &ok, Label::kNear);
- // Fail if either is a non-HeapObject.
- Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
- __ j(either_smi, &fail, Label::kNear);
- __ j(zero, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(JS_REGEXP_TYPE));
- __ j(not_equal, &fail, Label::kNear);
- __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Move(rax, isolate()->factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&ok);
- __ Move(rax, isolate()->factory()->true_value());
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ testl(FieldOperand(rax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ j(zero, if_true);
- __ jmp(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(rax);
-
- __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ IndexFromHash(rax, rax);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, return_result, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- VisitForStackValue(args->at(1));
- // Load this to rax (= array)
- VisitForAccumulatorValue(args->at(0));
- // All aliases of the same register have disjoint lifetimes.
- Register array = rax;
- Register elements = no_reg; // Will be rax.
-
- Register index = rdx;
-
- Register string_length = rcx;
-
- Register string = rsi;
-
- Register scratch = rbx;
-
- Register array_length = rdi;
- Register result_pos = no_reg; // Will be rdi.
-
- Operand separator_operand = Operand(rsp, 2 * kPointerSize);
- Operand result_operand = Operand(rsp, 1 * kPointerSize);
- Operand array_length_operand = Operand(rsp, 0 * kPointerSize);
- // Separator operand is already pushed. Make room for the two
- // other stack fields, and clear the direction flag in anticipation
- // of calling CopyBytes.
- __ subq(rsp, Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ JumpIfSmi(array, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch, &bailout);
-
- // Array has fast elements, so its length must be a smi.
- // If the array has length zero, return the empty string.
- __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ SmiCompare(array_length, Smi::FromInt(0));
- __ j(not_zero, &non_trivial_array);
- __ LoadRoot(rax, Heap::kempty_stringRootIndex);
- __ jmp(&return_result);
-
- // Save the array length on the stack.
- __ bind(&non_trivial_array);
- __ SmiToInteger32(array_length, array_length);
- __ movl(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ movq(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, 0);
- __ Set(string_length, 0);
- // Loop condition: while (index < array_length).
- // Live loop registers: index(int32), array_length(int32), string(String*),
- // scratch, string_length(int32), elements(FixedArray*).
- if (generate_debug_code_) {
- __ cmpq(index, array_length);
- __ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
- }
- __ bind(&loop);
- __ movq(string, FieldOperand(elements,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ andb(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
- __ j(not_equal, &bailout);
- __ AddSmiField(string_length,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ j(overflow, &bailout);
- __ incl(index);
- __ cmpl(index, array_length);
- __ j(less, &loop);
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
- // array_length: Array length.
-
- // If array_length is 1, return elements[0], a string.
- __ cmpl(array_length, Immediate(1));
- __ j(not_equal, &not_size_one_array);
- __ movq(rax, FieldOperand(elements, FixedArray::kHeaderSize));
- __ jmp(&return_result);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
-
- // Check that the separator is a sequential ASCII string.
- __ movq(string, separator_operand);
- __ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ andb(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
- __ j(not_equal, &bailout);
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
- // string: Separator string.
-
- // Add (separator length times (array_length - 1)) to string_length.
- __ SmiToInteger32(scratch,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ decl(index);
- __ imull(scratch, index);
- __ j(overflow, &bailout);
- __ addl(string_length, scratch);
- __ j(overflow, &bailout);
-
- // Live registers and stack values:
- // string_length: Total length of result string.
- // elements: FixedArray of strings.
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
- __ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
- __ movq(string, separator_operand);
- __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
- Smi::FromInt(1));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case:
- __ Set(index, 0);
- __ movl(scratch, array_length_operand);
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < array_length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
- // scratch: array length.
-
- // Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incl(index);
- __ bind(&loop_1_condition);
- __ cmpl(index, scratch);
- __ j(less, &loop_1); // Loop while (index < array_length).
- __ jmp(&done);
-
- // Generic bailout code used from several places.
- __ bind(&bailout);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ jmp(&return_result);
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Get the separator ASCII character value.
- // Register "string" holds the separator.
- __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ Set(index, 0);
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // elements: The FixedArray of strings we are joining.
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // scratch: Separator character.
-
- // Copy the separator character to the result.
- __ movb(Operand(result_pos, 0), scratch);
- __ incq(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incl(index);
- __ cmpl(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- // Make elements point to end of elements array, and index
- // count from -array_length to zero, so we don't need to maintain
- // a loop limit.
- __ movl(index, array_length_operand);
- __ lea(elements, FieldOperand(elements, index, times_pointer_size,
- FixedArray::kHeaderSize));
- __ neg(index);
-
- // Replace separator string with pointer to its first character, and
- // make scratch be its length.
- __ movq(string, separator_operand);
- __ SmiToInteger32(scratch,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ movq(separator_operand, string);
-
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // scratch: Separator length.
- // separator_operand (rsp[0x10]): Address of first char of separator.
-
- // Copy the separator to the result.
- __ movq(string, separator_operand);
- __ movl(string_length, scratch);
- __ CopyBytes(result_pos, string, string_length, 2);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ movq(string, Operand(elements, index, times_pointer_size, 0));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incq(index);
- __ j(not_equal, &loop_3); // Loop while (index < 0).
-
- __ bind(&done);
- __ movq(rax, result_operand);
-
- __ bind(&return_result);
- // Drop temp values from the stack, and restore context register.
- __ addq(rsp, Immediate(3 * kPointerSize));
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ movq(rax, GlobalObjectOperand());
- __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function using a call IC.
- __ Move(rcx, expr->name());
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- } else {
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ Push(Smi::FromInt(strict_mode_flag));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(rax);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
- if (var->IsUnallocated()) {
- __ push(var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ Push(var->name());
- __ Push(Smi::FromInt(kNonStrictMode));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(rax);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global variables is false. 'this' is
- // not really a variable, though we implement it as one. The
- // subexpression does not have side effects.
- context()->Plug(var->is_this());
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(rax);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- // We handle value contexts explicitly rather than simply visiting
- // for control and plugging the control flow into the context,
- // because we need to prepare a pair of extra administrative AST ids
- // for the optimizing compiler.
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
- __ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- } else {
- __ PushRoot(Heap::kTrueValueRootIndex);
- }
- __ jmp(&done, Label::kNear);
- __ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- } else {
- __ PushRoot(Heap::kFalseValueRootIndex);
- }
- __ bind(&done);
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(rax);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand-sides are rewritten to have a 'throw
- // ReferenceError' as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ Push(Smi::FromInt(0));
- }
- if (assign_type == NAMED_PROPERTY) {
- VisitForAccumulatorValue(prop->obj());
- __ push(rax); // Copy of receiver, needed for later store.
- EmitNamedPropertyLoad(prop);
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
- __ push(rax); // Copy of key, needed for later store.
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- __ JumpIfSmi(rax, &no_conversion, Label::kNear);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(rax);
- break;
- case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
- break;
- case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- break;
- }
- }
- }
-
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- }
- __ j(overflow, &stub_call, Label::kNear);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(rax, &done, Label::kNear);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- }
- }
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- // Call stub for +1/-1.
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(1));
- BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
-
- // Store the value returned in rax.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(rax);
- }
- // For all contexts except kEffect: We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- // Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
- }
- break;
- case NAMED_PROPERTY: {
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(rax);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ pop(rcx);
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(rax);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
-
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ Move(rcx, proxy->name());
- __ movq(rax, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallIC(ic);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(rax);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ push(rsi);
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(rax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_string())) {
- __ JumpIfSmi(rax, if_true);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
- __ JumpIfSmi(rax, if_false);
- // Check for undetectable objects => false.
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
- __ j(above_equal, if_false);
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- __ j(equal, if_true);
- __ CompareRoot(rax, Heap::kFalseValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, if_true);
- __ JumpIfSmi(rax, if_false);
- // Check for undetectable objects => true.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
- __ JumpIfSmi(rax, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
- __ j(equal, if_true);
- __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
- __ JumpIfSmi(rax, if_false);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, if_true);
- }
- if (FLAG_harmony_symbols) {
- __ CmpObjectType(rax, SYMBOL_TYPE, rdx);
- __ j(equal, if_true);
- }
- __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx);
- __ j(below, if_false);
- __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, if_false);
- // Check for undetectable objects => false.
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(zero, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
- // The stub returns 0 for true.
- Split(zero, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cc = CompareIC::ComputeCondition(op);
- __ pop(rdx);
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
- Split(cc, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
- Split(cc, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(rax, nil_value);
- if (expr->op() == Token::EQ_STRICT) {
- Split(equal, if_true, if_false, fall_through);
- } else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- __ j(equal, if_true);
- __ CompareRoot(rax, other_nil_value);
- __ j(equal, if_true);
- __ JumpIfSmi(rax, if_false);
- // It can be an undetectable object.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(rax);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return rax;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return rsi;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT(IsAligned(frame_offset, kPointerSize));
- __ movq(Operand(rbp, frame_offset), value);
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ movq(dst, ContextOperand(rsi, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ Push(Smi::FromInt(0));
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts created by a call to eval have the same closure as the
- // context calling eval, not the anonymous closure containing the eval
- // code. Fetch it from the context.
- __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
- } else {
- ASSERT(declaration_scope->is_function_scope());
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-
-void FullCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Cook return address on top of stack (smi encoded Code* delta)
- __ pop(rdx);
- __ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
- __ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
-
- // Store result register while executing finally block.
- __ push(result_register());
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Load(rdx, pending_message_obj);
- __ push(rdx);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ Load(rdx, has_pending_message);
- __ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Load(rdx, pending_message_script);
- __ push(rdx);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Restore pending message from stack.
- __ pop(rdx);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Store(pending_message_script, rdx);
-
- __ pop(rdx);
- __ SmiToInteger32(rdx, rdx);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ Store(has_pending_message, rdx);
-
- __ pop(rdx);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Store(pending_message_obj, rdx);
-
- // Restore result register from stack.
- __ pop(result_register());
-
- // Uncook return address.
- __ pop(rdx);
- __ SmiToInteger32(rdx, rdx);
- __ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
- __ jmp(rdx);
-}
-
-
-#undef __
-
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ movq(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
- }
- __ PopTryHandler();
- __ call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/ic-x64.cc b/src/3rdparty/v8/src/x64/ic-x64.cc
deleted file mode 100644
index 15423e4..0000000
--- a/src/3rdparty/v8/src/x64/ic-x64.cc
+++ /dev/null
@@ -1,1690 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
- __ j(equal, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // r0: used to hold receiver instance type.
- // Holds the property dictionary on fall through.
- // r1: used to hold receivers map.
-
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
- __ j(below, miss);
-
- // If this assert fails, we have to check upper bound too.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, r0, miss);
-
- // Check for non-global object that requires access check.
- __ testb(FieldOperand(r1, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor)));
- __ j(not_zero, miss);
-
- __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, miss);
-}
-
-
-
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not an internalized string,
-// and will jump to the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // r0 - used to hold the capacity of the property dictionary.
- //
- // r1 - used to hold the index into the property dictionary.
- //
- // result - holds the result on exit if the load succeeded.
-
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r1 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Test(Operand(elements, r1, times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(PropertyDetails::TypeField::kMask));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(result,
- Operand(elements, r1, times_pointer_size,
- kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property even though it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not an internalized string, and will jump to the miss_label
-// in that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register scratch0,
- Register scratch1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // scratch0 - used during the positive dictionary lookup and is clobbered.
- //
- // scratch1 - used for index into the property dictionary and is clobbered.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- scratch0,
- scratch1);
-
- // If probing finds an entry in the dictionary, scratch0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ Test(Operand(elements,
- scratch1,
- times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(scratch1, Operand(elements,
- scratch1,
- times_pointer_size,
- kValueOffset - kHeapObjectTag));
- __ movq(Operand(scratch1, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ movq(scratch0, value);
- __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
- __ j(below, slow);
-
- // Check bit field.
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << interceptor_bit)));
- __ j(not_zero, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch - used to hold elements of the receiver and the loaded value.
-
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
- // Check that the key (index) is within bounds.
- __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
- // Unsigned comparison rejects negative indices.
- __ j(above_equal, out_of_range);
- // Fast case: Do the load.
- SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movq(scratch, FieldOperand(elements,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ movq(result, scratch);
- }
-}
-
-
-// Checks whether a key is an array index string or an internalized string.
-// Falls through if the key is an internalized string.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_internalized) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_internalized);
- // Is the string an array index, with cached numeric value?
- __ movl(hash, FieldOperand(key, String::kHashFieldOffset));
- __ testl(hash, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, index_string); // The value in hash is used at jump target.
-
- // Is the string internalized?
- STATIC_ASSERT(kInternalizedTag != 0);
- __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsInternalizedMask));
- __ j(zero, not_internalized);
-}
-
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(rcx, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rax,
- NULL,
- &slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rdx: receiver
- // rax: key
- // rbx: key as untagged int32
- // rcx: elements
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow);
- __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // Slow case: Jump to runtime.
- // rdx: receiver
- // rax: key
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rcx, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ movl(rcx, rbx);
- __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
- __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
- __ shr(rdi, Immediate(String::kHashShift));
- __ xor_(rcx, rdi);
- int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- __ and_(rcx, Immediate(mask));
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ movq(rdi, rcx);
- __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
- __ LoadAddress(kScratchRegister, cache_keys);
- int off = kPointerSize * i * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
- __ j(not_equal, &try_next_entry);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
- __ j(equal, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
- __ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
- __ j(not_equal, &slow);
-
- // Get field offset, which is a 32-bit integer.
- ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- if (i != 0) {
- __ addl(rcx, Immediate(i));
- }
- __ LoadAddress(kScratchRegister, cache_field_offsets);
- __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
- __ j(above_equal, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rcx, rdi);
- __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
- FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // rdx: receiver
- // rax: key
- // rbx: elements
-
- __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
- __ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
-
- GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_string);
- __ IndexFromHash(rbx, rax);
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Register receiver = rdx;
- Register index = rax;
- Register scratch = rcx;
- Register result = rax;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &slow);
-
- // Check that the key is an array index, that is Uint32.
- STATIC_ASSERT(kSmiValueSize <= 32);
- __ JumpUnlessNonNegativeSmi(rax, &slow);
-
- // Get the map of the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movb(rcx, FieldOperand(rcx, Map::kBitFieldOffset));
- __ andb(rcx, Immediate(kSlowCaseBitFieldMask));
- __ cmpb(rcx, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(rcx);
- __ push(rdx); // receiver
- __ push(rax); // key
- __ push(rcx); // return address
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate()),
- 2,
- 1);
-
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- // rax: value
- // rbx: receiver's elements array (a FixedArray)
- // rcx: index
- // rdx: receiver (a JSArray)
- // r9: map of receiver
- if (check_map == kCheckMap) {
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, fast_double);
- }
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(rax, &non_smi_value);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Writing a non-smi, check whether array allows non-smi elements.
- // r9: receiver's map
- __ CheckFastObjectElements(r9, &transition_smi_elements);
-
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- }
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ movq(rdx, rax); // Preserve the value which is returned.
- __ RecordWriteArray(
- rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, slow);
- }
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- }
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- rbx,
- rdi,
- slow);
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- slow);
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
- slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- slow);
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow_with_tagged_index);
- // Get the map from the receiver.
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(r9, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow_with_tagged_index);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
- __ SmiToInteger32(rcx, rcx);
-
- __ CmpInstanceType(r9, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JSObject.
- __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- // rax: value
- // rdx: JSObject
- // rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check array bounds.
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
- // rax: value
- // rbx: FixedArray
- // rcx: index
- __ j(above, &fast_object);
-
- // Slow case: call runtime.
- __ bind(&slow);
- __ Integer32ToSmi(rcx, rcx);
- __ bind(&slow_with_tagged_index);
- GenerateRuntimeSetProperty(masm, strict_mode);
- // Never returns to here.
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // rax: value
- // rdx: receiver (a JSArray)
- // rbx: receiver's elements array (a FixedArray)
- // rcx: index
- // flags: smicompare (rdx.length(), rbx)
- __ j(not_equal, &slow); // do not leave holes in the array
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
- __ j(below_equal, &slow);
- // Increment index to get new length.
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // rax: value
- // rdx: receiver (a JSArray)
- // rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check the key against the length in the array, compute the
- // address to store into and fall through to fast case.
- __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
- __ j(below_equal, &extra);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength);
-}
-
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- rax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(rdx, &number);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(rbx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &boolean);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, rdx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdi : function
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- __ JumpIfSmi(rdi, miss);
- // Check that the value is a JavaScript function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
-
- // rax: elements
- // Search the dictionary placing the result in rdi.
- GenerateDictionaryLoad(masm, &miss, rax, rcx, rbx, rdi, rdi);
-
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ push(rdx);
- __ push(rcx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ Set(rax, 2);
- __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
- __ CallStub(&stub);
-
- // Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
- __ JumpIfSmi(rdx, &invoke);
- __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
- __ j(equal, &global);
- __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- Label do_call, slow_call, slow_load;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &check_string);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in rdx is not used after this point.
- // rcx: key
- // rdi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // rax: elements
- // rcx: smi key
- // Check whether the elements is a number dictionary.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow_load);
- __ SmiToInteger32(rbx, rcx);
- // ebx: untagged index
- __ LoadFromNumberDictionary(&slow_load, rax, rcx, rbx, r9, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rcx); // save the key
- __ push(rdx); // pass the receiver
- __ push(rcx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(rcx); // restore the key
- }
- __ movq(rdi, rax);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, rcx, rax, rbx, &index_string, &slow_call);
-
- // The key is known to be an internalized string.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor internalized string,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(rbx, rcx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ JumpIfSmi(rcx, &miss);
- Condition cond = masm->IsObjectStringType(rcx, rax, rax);
- __ j(NegateCondition(cond), &miss);
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
- __ j(below, slow_case);
-
- // Check that the key is a positive smi.
- Condition check = masm->CheckNonNegativeSmi(key);
- __ j(NegateCondition(check), slow_case);
-
- // Load the elements into scratch1 and check its map. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments.
- __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpq(key, scratch2);
- __ j(greater_equal, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ SmiToInteger64(scratch3, key);
- __ movq(scratch2, FieldOperand(scratch1,
- scratch3,
- times_pointer_size,
- kHeaderSize));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ j(equal, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
- __ SmiToInteger64(scratch3, scratch2);
- return FieldOperand(scratch1,
- scratch3,
- times_pointer_size,
- Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpq(key, scratch);
- __ j(greater_equal, slow_case);
- __ SmiToInteger64(scratch, key);
- return FieldOperand(backing_store,
- scratch,
- times_pointer_size,
- FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, notin;
- Operand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
- __ movq(rax, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
- __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow);
- __ movq(rax, unmapped_location);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, notin;
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
- __ movq(mapped_location, rax);
- __ lea(r9, mapped_location);
- __ movq(r8, rax);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
- __ movq(unmapped_location, rax);
- __ lea(r9, unmapped_location);
- __ movq(r8, rax);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label slow, notin;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
- __ movq(rdi, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
- __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow);
- __ movq(rdi, unmapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, rax, rcx, rbx, rdx);
-
- // Cache miss: Jump to runtime.
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
-
- // rdx: elements
- // Search the dictionary placing the result in rax.
- GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax);
- __ ret(0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->load_miss(), 1);
-
- __ pop(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_miss(), 1);
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
-
- GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
- __ ret(0);
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ pop(rbx);
- __ push(rdx);
- __ push(rcx);
- __ push(rax);
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(strict_mode));
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(strict_mode)); // Strict mode.
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rbx : target map
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
- __ movq(rax, rdx);
- __ Ret();
- __ bind(&fail);
- }
-
- __ pop(rbx);
- __ push(rdx);
- __ push(rbx); // return address
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rbx : target map
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
- __ movq(rax, rdx);
- __ Ret();
- __ bind(&fail);
- }
-
- __ pop(rbx);
- __ push(rdx);
- __ push(rbx); // return address
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- ASSERT(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
- }
-
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
- Address jmp_address = test_instruction_address - delta;
- ASSERT((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
deleted file mode 100644
index f1fe452..0000000
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
+++ /dev/null
@@ -1,5846 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "x64/lithium-codegen-x64.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) const {
- codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
- }
-
- virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(GetStackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
- PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
-}
-
-
-void LChunkBuilder::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- int length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
- __ bind(&ok);
- }
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- ASSERT(!frame_is_built_);
- frame_is_built_ = true;
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- if (info()->IsStub()) {
- __ Push(Smi::FromInt(StackFrame::STUB));
- } else {
- __ push(rdi); // Callee's JS function.
- }
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ subq(rsp, Immediate(slots * kPointerSize));
- __ push(rax);
- __ Set(rax, slots);
- __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
- Label loop;
- __ bind(&loop);
- __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
- kScratchRegister);
- __ decl(rax);
- __ j(not_zero, &loop);
- __ pop(rax);
- } else {
- __ subq(rsp, Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ movq(Operand(rsp, offset), rax);
- }
-#endif
- }
-
- if (info()->saves_caller_doubles()) {
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movsd(MemOperand(rsp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
- instr->CompileToNative(this);
- }
- }
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- Label needs_frame_not_call;
- Label needs_frame_is_call;
- for (int i = 0; i < jump_table_.length(); i++) {
- __ bind(&jump_table_[i].label);
- Address entry = jump_table_[i].address;
- bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (jump_table_[i].needs_frame) {
- __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
- if (is_lazy_deopt) {
- if (needs_frame_is_call.is_bound()) {
- __ jmp(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
- __ call(kScratchRegister);
- }
- } else {
- if (needs_frame_not_call.is_bound()) {
- __ jmp(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
- __ jmp(kScratchRegister);
- }
- }
- } else {
- if (is_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- }
- }
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred build frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
- frame_is_built_ = true;
- // Build the frame in such a way that esi isn't trashed.
- __ push(rbp); // Caller's frame pointer.
- __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Push(Smi::FromInt(StackFrame::STUB));
- __ lea(rbp, Operand(rsp, 2 * kPointerSize));
- }
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred destroy frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(frame_is_built_);
- frame_is_built_ = false;
- __ movq(rsp, rbp);
- __ pop(rbp);
- }
- __ jmp(code->exit());
- }
- }
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
-bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsTagged();
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
- // Does not handle registers. In X64 assembler, plain registers are not
- // representable as an Operand.
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return Operand(rbp, StackSlotOffset(op->index()));
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
- bool has_closure_id = !info()->closure().is_null() &&
- *info()->closure() != *environment->closure();
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- }
-
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
- }
- }
-
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- XMMRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- __ CallRuntime(function, num_arguments);
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr) {
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, environment->zone());
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
-
- if (FLAG_trap_on_deopt) {
- Label done;
- if (cc != no_condition) {
- __ j(NegateCondition(cc), &done, Label::kNear);
- }
- __ int3();
- __ bind(&done);
- }
-
- ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
- if (cc == no_condition && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- }
- } else {
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (jump_table_.is_empty() ||
- jump_table_.last().address != entry ||
- jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
- jump_table_.Add(table_entry, zone());
- }
- if (cc == no_condition) {
- __ jmp(&jump_table_.last().label);
- } else {
- __ j(cc, &jump_table_.last().label);
- }
- }
-}
-
-
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
- maps.Add(map, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode, int argc) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- ASSERT(kind == expected_safepoint_kind_);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
-
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
- if (kind & Safepoint::kWithRegisters) {
- // Register rsi always contains a pointer to the context.
- safepoint.DefinePointerRegister(rsi, zone());
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
-
- Label positive_dividend, done;
- __ testl(dividend, dividend);
- __ j(not_sign, &positive_dividend, Label::kNear);
- __ negl(dividend);
- __ andl(dividend, Immediate(divisor - 1));
- __ negl(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done, Label::kNear);
- DeoptimizeIf(no_condition, instr->environment());
- } else {
- __ jmp(&done, Label::kNear);
- }
- __ bind(&positive_dividend);
- __ andl(dividend, Immediate(divisor - 1));
- __ bind(&done);
- } else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
-
- ASSERT(left_reg.is(rax));
- ASSERT(result_reg.is(rdx));
- ASSERT(!right_reg.is(rax));
- ASSERT(!right_reg.is(rdx));
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
-
- __ testl(left_reg, left_reg);
- __ j(zero, &remainder_eq_dividend, Label::kNear);
- __ j(sign, &slow, Label::kNear);
-
- __ testl(right_reg, right_reg);
- __ j(not_sign, &both_positive, Label::kNear);
- // The sign of the divisor doesn't matter.
- __ neg(right_reg);
-
- __ bind(&both_positive);
- // If the dividend is smaller than the nonnegative
- // divisor, the dividend is the result.
- __ cmpl(left_reg, right_reg);
- __ j(less, &remainder_eq_dividend, Label::kNear);
-
- // Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->temp());
- __ movl(scratch, right_reg);
- __ subl(scratch, Immediate(1));
- __ testl(scratch, right_reg);
- __ j(not_zero, &do_subtraction, Label::kNear);
- __ andl(left_reg, scratch);
- __ jmp(&remainder_eq_dividend, Label::kNear);
-
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ movl(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ subl(left_reg, right_reg);
- // Check if the dividend is less than the divisor.
- __ cmpl(left_reg, right_reg);
- __ j(less, &remainder_eq_dividend, Label::kNear);
- }
- __ movl(left_reg, scratch);
-
- // Slow case, using idiv instruction.
- __ bind(&slow);
-
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend eax to edx.
- // (We are using only the low 32 bits of the values.)
- __ cdq();
-
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- Label done;
- __ testl(left_reg, left_reg);
- __ j(not_sign, &positive_left, Label::kNear);
- __ idivl(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
-
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&positive_left);
- __ idivl(right_reg);
- __ bind(&done);
- } else {
- __ idivl(right_reg);
- }
- __ jmp(&done, Label::kNear);
-
- __ bind(&remainder_eq_dividend);
- __ movl(result_reg, left_reg);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
-
- const Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- const Register result = ToRegister(instr->result());
-
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
- return;
-
- case 1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- return;
-
- case -1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ negl(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
- return;
- }
-
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- __ movsxlq(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ sar(result, Immediate(power));
- } else {
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ sarl(result, Immediate(power));
- }
- } else {
- Register reg1 = ToRegister(instr->temp());
- Register reg2 = ToRegister(instr->result());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- // The multiply is int64, so sign-extend to r64.
- __ movsxlq(reg1, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ neg(reg1);
- DeoptimizeIf(zero, instr->environment());
- }
- __ movq(reg2, multiplier, RelocInfo::NONE64);
- // Result just fit in r64, because it's int32 * uint32.
- __ imul(reg2, reg1);
-
- __ addq(reg2, Immediate(1 << 30));
- __ sar(reg2, Immediate(shift));
- }
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
-
- if (test_value != 0) {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
- }
-
- if (divisor < 0) __ negl(dividend);
-
- return;
- }
-
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->left()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rdx));
-
- Register left_reg = rax;
-
- // Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ testl(right_reg, right_reg);
- DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend to rdx.
- __ cdq();
- __ idivl(right_reg);
-
- if (!instr->is_flooring()) {
- // Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- Label done;
- __ testl(rdx, rdx);
- __ j(zero, &done, Label::kNear);
- __ xorl(rdx, right_reg);
- __ sarl(rdx, Immediate(31));
- __ addl(rax, rdx);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->left());
- LOperand* right = instr->right();
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movl(kScratchRegister, left);
- }
-
- bool can_overflow =
- instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right->IsConstantOperand()) {
- int right_value = ToInteger32(LConstantOperand::cast(right));
- if (right_value == -1) {
- __ negl(left);
- } else if (right_value == 0) {
- __ xorl(left, left);
- } else if (right_value == 2) {
- __ addl(left, left);
- } else if (!can_overflow) {
- // If the multiplication is known to not overflow, we
- // can use operations that don't set the overflow flag
- // correctly.
- switch (right_value) {
- case 1:
- // Do nothing.
- break;
- case 3:
- __ leal(left, Operand(left, left, times_2, 0));
- break;
- case 4:
- __ shll(left, Immediate(2));
- break;
- case 5:
- __ leal(left, Operand(left, left, times_4, 0));
- break;
- case 8:
- __ shll(left, Immediate(3));
- break;
- case 9:
- __ leal(left, Operand(left, left, times_8, 0));
- break;
- case 16:
- __ shll(left, Immediate(4));
- break;
- default:
- __ imull(left, left, Immediate(right_value));
- break;
- }
- } else {
- __ imull(left, left, Immediate(right_value));
- }
- } else if (right->IsStackSlot()) {
- __ imull(left, ToOperand(right));
- } else {
- __ imull(left, ToRegister(right));
- }
-
- if (can_overflow) {
- DeoptimizeIf(overflow, instr->environment());
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ testl(left, left);
- __ j(not_zero, &done, Label::kNear);
- if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
- } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
- __ cmpl(kScratchRegister, Immediate(0));
- DeoptimizeIf(less, instr->environment());
- }
- } else if (right->IsStackSlot()) {
- __ orl(kScratchRegister, ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
- } else {
- // Test the non-zero operand for negative sign.
- __ orl(kScratchRegister, ToRegister(right));
- DeoptimizeIf(sign, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
-
- if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), Immediate(right_operand));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), Immediate(right_operand));
- break;
- case Token::BIT_XOR:
- __ xorl(ToRegister(left), Immediate(right_operand));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else if (right->IsStackSlot()) {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToOperand(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- ASSERT(right->IsRegister());
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), ToRegister(right));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), ToRegister(right));
- break;
- case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToRegister(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- if (right->IsRegister()) {
- ASSERT(ToRegister(right).is(rcx));
-
- switch (instr->op()) {
- case Token::ROR:
- __ rorl_cl(ToRegister(left));
- break;
- case Token::SAR:
- __ sarl_cl(ToRegister(left));
- break;
- case Token::SHR:
- __ shrl_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case Token::SHL:
- __ shll_cl(ToRegister(left));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ rorl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sarl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
- } else {
- __ shrl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ shll(ToRegister(left), Immediate(shift_count));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ subl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
- } else if (right->IsRegister()) {
- __ subl(ToRegister(left), ToRegister(right));
- } else {
- __ subl(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Set(ToRegister(instr->result()), instr->value());
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
- double v = instr->value();
- uint64_t int_val = BitCast<uint64_t, double>(v);
- // Use xor to produce +0.0 in a fast and compact way, but avoid to
- // do so if the constant is -0.0.
- if (int_val == 0) {
- __ xorps(res, res);
- } else {
- Register tmp = ToRegister(instr->temp());
- __ Set(tmp, int_val);
- __ movq(res, tmp);
- }
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
- if (value->IsSmi()) {
- __ Move(ToRegister(instr->result()), value);
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte.
- __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(result, Immediate(Map::kElementsKindMask));
- __ shr(result, Immediate(Map::kElementsKindShift));
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
- __ j(not_equal, &done, Label::kNear);
- __ movq(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Smi* index = instr->index();
- Label runtime, done, not_date_object;
- ASSERT(object.is(result));
- ASSERT(object.is(rax));
-
- Condition cc = masm()->CheckSmi(object);
- DeoptimizeIf(cc, instr->environment());
- __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- DeoptimizeIf(not_equal, instr->environment());
-
- if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ movq(kScratchRegister, stamp);
- __ cmpq(kScratchRegister, FieldOperand(object,
- JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2);
-#ifdef _WIN64
- __ movq(rcx, object);
- __ movq(rdx, index, RelocInfo::NONE64);
-#else
- __ movq(rdi, object);
- __ movq(rsi, index, RelocInfo::NONE64);
-#endif
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->value()));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
- } else if (right->IsRegister()) {
- __ addl(ToRegister(left), ToRegister(right));
- } else {
- __ addl(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
- Label return_left;
- Condition condition = (operation == HMathMinMax::kMathMin)
- ? less_equal
- : greater_equal;
- Register left_reg = ToRegister(left);
- if (right->IsConstantOperand()) {
- Immediate right_imm =
- Immediate(ToInteger32(LConstantOperand::cast(right)));
- __ cmpl(left_reg, right_imm);
- __ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_imm);
- } else if (right->IsRegister()) {
- Register right_reg = ToRegister(right);
- __ cmpl(left_reg, right_reg);
- __ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_reg);
- } else {
- Operand right_op = ToOperand(right);
- __ cmpl(left_reg, right_op);
- __ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_op);
- }
- __ bind(&return_left);
- } else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
- XMMRegister left_reg = ToDoubleRegister(left);
- XMMRegister right_reg = ToDoubleRegister(right);
- __ ucomisd(left_reg, right_reg);
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(left_reg, xmm_scratch);
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- __ orpd(left_reg, right_reg);
- } else {
- // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
- __ addsd(left_reg, right_reg);
- }
- __ jmp(&return_left, Label::kNear);
-
- __ bind(&check_nan_left);
- __ ucomisd(left_reg, left_reg); // NaN check.
- __ j(parity_even, &return_left, Label::kNear);
- __ bind(&return_right);
- __ movsd(left_reg, right_reg);
-
- __ bind(&return_left);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- // All operations except MOD are computed in-place.
- ASSERT(instr->op() == Token::MOD || left.is(result));
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- __ movaps(left, left);
- break;
- case Token::MOD:
- __ PrepareCallCFunction(2);
- __ movaps(xmm0, left);
- ASSERT(right.is(xmm1));
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movaps(result, xmm0);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->left()).is(rdx));
- ASSERT(ToRegister(instr->right()).is(rax));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- if (cc != always) {
- __ jmp(chunk_->GetAssemblyLabel(right_block));
- }
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->value());
- __ testl(reg, reg);
- EmitBranch(true_block, false_block, not_zero);
- } else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, equal);
- } else if (type.IsSmi()) {
- __ SmiCompare(reg, Smi::FromInt(0));
- EmitBranch(true_block, false_block, not_equal);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
-
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
- // undefined -> false.
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, false_label);
- }
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
- // true -> true.
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
- // false -> false.
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ j(equal, false_label);
- }
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
- // 'null' -> false.
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ j(equal, false_label);
- }
-
- if (expected.Contains(ToBooleanStub::SMI)) {
- // Smis: 0 -> false, all other -> true.
- __ Cmp(reg, Smi::FromInt(0));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
- } else if (expected.NeedsMap()) {
- // If we need a map later and have a Smi -> deopt.
- __ testb(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
- }
-
- const Register map = kScratchRegister;
- if (expected.NeedsMap()) {
- __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
-
- if (expected.CanBeUndetectable()) {
- // Undetectable -> false.
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
- }
- }
-
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
- // spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, true_label);
- }
-
- if (expected.Contains(ToBooleanStub::STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, true_label);
- __ jmp(false_label);
- __ bind(&not_string);
- }
-
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, false_label);
- __ jmp(true_label);
- __ bind(&not_heap_number);
- }
-
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = equal;
- break;
- case Token::LT:
- cond = is_unsigned ? below : less;
- break;
- case Token::GT:
- cond = is_unsigned ? above : greater;
- break;
- case Token::LTE:
- cond = is_unsigned ? below_equal : less_equal;
- break;
- case Token::GTE:
- cond = is_unsigned ? above_equal : greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
- } else {
- int32_t value;
- if (right->IsConstantOperand()) {
- value = ToInteger32(LConstantOperand::cast(right));
- __ cmpl(ToRegister(left), Immediate(value));
- } else if (left->IsConstantOperand()) {
- value = ToInteger32(LConstantOperand::cast(left));
- if (right->IsRegister()) {
- __ cmpl(ToRegister(right), Immediate(value));
- } else {
- __ cmpl(ToOperand(right), Immediate(value));
- }
- // We transposed the operands. Reverse the condition.
- cc = ReverseCondition(cc);
- } else {
- if (right->IsRegister()) {
- __ cmpl(ToRegister(left), ToRegister(right));
- } else {
- __ cmpl(ToRegister(left), ToOperand(right));
- }
- }
- }
- EmitBranch(true_block, false_block, cc);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmpq(left, right);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmpq(left, Immediate(instr->hydrogen()->right()));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, equal);
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ CompareRoot(reg, other_nil_value);
- __ j(equal, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->temp());
- __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Label* is_not_object,
- Label* is_object) {
- ASSERT(!input.is(kScratchRegister));
-
- __ JumpIfSmi(input, is_not_object);
-
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, is_object);
-
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, is_not_object);
-
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(below, is_not_object);
- __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- return below_equal;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsObject(reg, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
- Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
-
- return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsString(reg, temp, false_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Condition is_smi;
- if (instr->value()->IsRegister()) {
- Register input = ToRegister(instr->value());
- is_smi = masm()->CheckSmi(input);
- } else {
- Operand input = ToOperand(instr->value());
- is_smi = masm()->CheckSmi(input);
- }
- EmitBranch(true_block, false_block, is_smi);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(temp, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = TokenToCondition(op, false);
- __ testq(rax, rax);
-
- EmitBranch(true_block, false_block, condition);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return equal;
- if (to == LAST_TYPE) return above_equal;
- if (from == FIRST_TYPE) return below_equal;
- UNREACHABLE();
- return equal;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
-
- __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ movl(result, FieldOperand(input, String::kHashFieldOffset));
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ testl(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-// Branches to a label or falls through with the answer in the z flag.
-// Trashes the temp register.
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String> class_name,
- Register input,
- Register temp,
- Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!input.is(temp2));
- ASSERT(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
- __ j(equal, is_true);
- } else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
-
- // Objects with a non-function constructor have class 'Object'.
- __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
- __ j(not_equal, is_true);
- } else {
- __ j(not_equal, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ movq(temp, FieldOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- ASSERT(class_name->IsInternalizedString());
- __ Cmp(temp, class_name);
- // End with the answer in the z flag.
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- Label true_value, done;
- __ testq(rax, rax);
- __ j(zero, &true_value, Label::kNear);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- virtual LInstruction* instr() { return instr_; }
- Label* map_check() { return &map_check_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
-
- // A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- // Use a temp register to avoid memory operands with variable lengths.
- Register map = ToRegister(instr->temp());
- __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<JSGlobalPropertyCell> cache_cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ cmpq(map, Operand(kScratchRegister, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Patched to load either true or false.
- __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
-#ifdef DEBUG
- // Check that the code size between patch label and patch sites is invariant.
- Label end_of_patched_code;
- __ bind(&end_of_patched_code);
- ASSERT(true);
-#endif
- __ jmp(&done);
-
- // The inlined call site cache did not match. Check for null and string
- // before calling the deferred code.
- __ bind(&cache_miss); // Null is not an instance of anything.
- __ CompareRoot(object, Heap::kNullValueRootIndex);
- __ j(equal, &false_result, Label::kNear);
-
- // String values are not instances of anything.
- __ JumpIfNotString(object, kScratchRegister, deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
-
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- {
- PushSafepointRegistersScope scope(this);
- InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
- InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
- InstanceofStub stub(flags);
-
- __ push(ToRegister(instr->value()));
- __ PushHeapObject(instr->function());
-
- static const int kAdditionalDelta = 10;
- int delta =
- masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- ASSERT(delta >= 0);
- __ push_imm32(delta);
-
- // We are pushing three values on the stack but recording a
- // safepoint with two arguments because stub is going to
- // remove the third argument from the stack before jumping
- // to instanceof builtin on the slow path.
- CallCodeGeneric(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS,
- 2);
- ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Move result to a register that survives the end of the
- // PushSafepointRegisterScope.
- __ movq(kScratchRegister, rax);
- }
- __ testq(kScratchRegister, kScratchRegister);
- Label load_false;
- Label done;
- __ j(not_zero, &load_false);
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&load_false);
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = TokenToCondition(op, false);
- Label true_value, done;
- __ testq(rax, rax);
- __ j(condition, &true_value, Label::kNear);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Preserve the return value on the stack and rely on the runtime
- // call to return the value in the same register.
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (info()->saves_caller_doubles()) {
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(rsp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
- if (NeedsEagerFrame()) {
- __ movq(rsp, rbp);
- __ pop(rbp);
- }
- if (info()->IsStub()) {
- __ Ret(0, r10);
- } else {
- __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
- }
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ LoadGlobalCell(result, instr->hydrogen()->cell());
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(rax));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- __ Move(rcx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We have a temp because CompareRoot might clobber kScratchRegister.
- Register cell = ToRegister(instr->temp());
- ASSERT(!value.is(cell));
- __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- // Store the value.
- __ movq(Operand(cell, 0), value);
- } else {
- // Store the value.
- __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(Operand(kScratchRegister, 0), value);
- }
- // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(rdx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- __ Move(rcx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result, ContextOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- Label is_not_hole;
- __ j(not_equal, &is_not_hole, Label::kNear);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
-
- Operand target = ContextOperand(context, instr->slot_index());
-
- Label skip_assignment;
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &skip_assignment);
- }
- }
- __ movq(target, value);
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- int offset = Context::SlotOffset(instr->slot_index());
- Register scratch = ToRegister(instr->temp());
- __ RecordWriteContextSlot(context,
- offset,
- value,
- scratch,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ movq(result, FieldOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- DeoptimizeIf(not_equal, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- }
-}
-
-
-// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
-// prototype chain, which causes unbounded code generation.
-static bool CompactEmit(SmallMapList* list,
- Handle<String> name,
- int i,
- Isolate* isolate) {
- Handle<Map> map = list->at(i);
- // If the map has ElementsKind transitions, we will generate map checks
- // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
- if (map->HasElementsTransition()) return false;
- LookupResult lookup(isolate);
- map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstantFunction();
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- bool all_are_compact = true;
- for (int i = 0; i < map_count; ++i) {
- if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
- all_are_compact = false;
- break;
- }
- }
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- bool compact = all_are_compact ? true :
- CompactEmit(instr->hydrogen()->types(), name, i, isolate());
- __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ Move(rcx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rax));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- __ Move(rcx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function.
- __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
- DeoptimizeIf(not_equal, instr->environment());
-
- // Check whether the function has an instance prototype.
- Label non_instance;
- __ testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &non_instance, Label::kNear);
-
- // Get the prototype or initial map from the function.
- __ movq(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
- __ j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
- __ jmp(&done, Label::kNear);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in the function's map.
- __ bind(&non_instance);
- __ movq(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(rax)) ? rbx : rax);
- __ push(temp);
- __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Immediate(Map::kElementsKindMask));
- __ shr(temp, Immediate(Map::kElementsKindShift));
- __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input,
- ExternalPixelArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- if (instr->index()->IsRegister()) {
- __ subl(length, ToRegister(instr->index()));
- } else {
- __ subl(length, ToOperand(instr->index()));
- }
- __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed (in this case) instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index argument
- // to the bounds check, which can be tagged, so that case must be
- // handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(ToDoubleRegister(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ movzxbq(result, operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(result, operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ testl(result, result);
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
- __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movsd(result, double_load_operand);
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register result = ToRegister(instr->result());
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that
- // case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- // Load the result.
- __ movq(result,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr->environment());
- } else {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
- Register elements_pointer_reg = ToRegister(elements_pointer);
- int shift_size = ElementsKindToShiftSize(elements_kind);
- if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
- if (constant_value & 0xF0000000) {
- Abort("array index constant value too big");
- }
- return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
- } else {
- ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- offset + (additional_index << shift_size));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rdx));
- ASSERT(ToRegister(instr->key()).is(rax));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -2 * kPointerSize));
- } else {
- // Check for arguments adapter frame.
- Label done, adapted;
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted, Label::kNear);
-
- // No arguments adaptor frame.
- __ movq(result, rbp);
- __ jmp(&done, Label::kNear);
-
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- if (instr->elements()->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->elements()));
- } else {
- __ cmpq(rbp, ToOperand(instr->elements()));
- }
- __ movl(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done, Label::kNear);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiToInteger32(result,
- Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
-
- // Do not transform the receiver to object for strict mode
- // functions.
- __ movq(kScratchRegister,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &receiver_ok, Label::kNear);
-
- // Do not transform the receiver to object for builtins.
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &receiver_ok, Label::kNear);
-
- // Normal function. Replace undefined or null with global receiver.
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, &global_object, Label::kNear);
- __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
- __ j(equal, &global_object, Label::kNear);
-
- // The receiver should be a JS object.
- Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr->environment());
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok, Label::kNear);
-
- __ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- ASSERT(receiver.is(rax)); // Used for parameter count.
- ASSERT(function.is(rdi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(rax));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpq(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr->environment());
-
- __ push(receiver);
- __ movq(receiver, length);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ testl(length, length);
- __ j(zero, &invoke, Label::kNear);
- __ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
- __ decl(length);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- EmitPushTaggedOperand(argument);
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- __ movq(result, rsi);
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result,
- Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- __ push(rsi); // The context is the first argument.
- __ PushHeapObject(instr->hydrogen()->pairs());
- __ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register result = ToRegister(instr->result());
- __ movq(result, instr->qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- RDIState rdi_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- if (can_invoke_directly) {
- if (rdi_state == RDI_UNINITIALIZED) {
- __ LoadHeapObject(rdi, function);
- }
-
- // Change context.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Set rax to arguments count if adaption is not needed. Assumes that rax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ Set(rax, arity);
- }
-
- // Invoke function.
- __ SetCallKind(rcx, call_kind);
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- }
-
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
- } else {
- // We need to adapt arguments.
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
- }
-
- // Restore context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- RDI_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->value());
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
-
- Label done;
- Register tmp = input_reg.is(rax) ? rcx : rax;
- Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label negative;
- __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| will be restored
- // unchanged by popping safepoint registers.
- __ testl(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
-
- __ bind(&negative);
-
- Label allocated, slow;
- __ AllocateHeapNumber(tmp, tmp2, &slow);
- __ jmp(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- // Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) {
- __ movq(tmp, rax);
- }
-
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
- __ bind(&allocated);
- __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ shl(tmp2, Immediate(1));
- __ shr(tmp2, Immediate(1));
- __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
- __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->value());
- __ testl(input_reg, input_reg);
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LUnaryMathOperation* instr_;
- };
-
- ASSERT(instr->value()->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
-
- if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ xorps(scratch, scratch);
- __ subsd(scratch, input_reg);
- __ andpd(input_reg, scratch);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else { // Tagged case.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- EmitIntegerMathAbs(instr);
- __ Integer32ToSmi(input_reg, input_reg);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
-
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Deoptimize if minus zero.
- __ movq(output_reg, input_reg);
- __ subq(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
- }
- __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
- __ cvttsd2si(output_reg, xmm_scratch);
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
- } else {
- Label negative_sign, done;
- // Deoptimize on unordered.
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
- __ j(below, &negative_sign, Label::kNear);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Check for negative zero.
- Label positive_sign;
- __ j(above, &positive_sign, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ testq(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ Set(output_reg, 0);
- __ jmp(&done);
- __ bind(&positive_sign);
- }
-
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
- __ jmp(&done, Label::kNear);
-
- // Non-zero negative reaches here.
- __ bind(&negative_sign);
- // Truncate, then compare and compensate.
- __ cvttsd2si(output_reg, input_reg);
- __ cvtlsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(equal, &done, Label::kNear);
- __ subl(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- const XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
- static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
-
- Label done, round_to_zero, below_one_half, do_not_compensate, restore;
- __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half);
-
- // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
- __ addsd(xmm_scratch, input_reg);
- __ cvttsd2si(output_reg, xmm_scratch);
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
- __ jmp(&done);
-
- __ bind(&below_one_half);
- __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero);
-
- // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
- // compare and compensate.
- __ movq(kScratchRegister, input_reg); // Back up input_reg.
- __ subsd(input_reg, xmm_scratch);
- __ cvttsd2si(output_reg, input_reg);
- // Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmpl(output_reg, Immediate(0x80000000));
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
-
- __ cvtlsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(equal, &restore, Label::kNear);
- __ subl(output_reg, Immediate(1));
- // No overflow because we already ruled out minint.
- __ bind(&restore);
- __ movq(input_reg, kScratchRegister); // Restore input_reg.
- __ jmp(&done);
-
- __ bind(&round_to_zero);
- // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
- // we can ignore the difference between a result of -0 and +0.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movq(output_reg, input_reg);
- __ testq(output_reg, output_reg);
- __ RecordComment("Minus zero");
- DeoptimizeIf(negative, instr->environment());
- }
- __ Set(output_reg, 0);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ sqrtsd(input_reg, input_reg);
-}
-
-
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done, sqrt;
- // Check base for -Infinity. According to IEEE-754, double-precision
- // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
- __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &sqrt, Label::kNear);
- __ j(carry, &sqrt, Label::kNear);
- // If input is -Infinity, return Infinity.
- __ xorps(input_reg, input_reg);
- __ subsd(input_reg, xmm_scratch);
- __ jmp(&done, Label::kNear);
-
- // Square root.
- __ bind(&sqrt);
- __ xorps(xmm_scratch, xmm_scratch);
- __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ sqrtsd(input_reg, input_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
-
- // Choose register conforming to calling convention (when bailing out).
-#ifdef _WIN64
- Register exponent = rdx;
-#else
- Register exponent = rdi;
-#endif
- ASSERT(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(exponent));
- ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(xmm1));
- ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
- ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
-
- if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(exponent, &no_deopt);
- __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-
- // Choose the right register for the first argument depending on
- // calling convention.
-#ifdef _WIN64
- ASSERT(ToRegister(instr->global_object()).is(rcx));
- Register global_object = rcx;
-#else
- ASSERT(ToRegister(instr->global_object()).is(rdi));
- Register global_object = rdi;
-#endif
-
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
-
- __ movq(global_object,
- FieldOperand(global_object, GlobalObject::kNativeContextOffset));
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
- // rbx: FixedArray of the native context's random seeds
-
- // Load state[0].
- __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ testl(rax, rax);
- __ j(zero, deferred->entry());
- // Load state[1].
- __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- // Only operate on the lower 32 bit of rax.
- __ movzxwl(rdx, rax);
- __ imull(rdx, rdx, Immediate(18273));
- __ shrl(rax, Immediate(16));
- __ addl(rax, rdx);
- // Save state[0].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzxwl(rdx, rcx);
- __ imull(rdx, rdx, Immediate(36969));
- __ shrl(rcx, Immediate(16));
- __ addl(rcx, rdx);
- // Save state[1].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shll(rax, Immediate(14));
- __ andl(rcx, Immediate(0x3FFFF));
- __ addl(rax, rcx);
-
- __ bind(deferred->exit());
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movq(rcx, V8_INT64_C(0x4130000000000000),
- RelocInfo::NONE64); // 1.0 x 2^20 as double
- __ movq(xmm2, rcx);
- __ movd(xmm1, rax);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Return value is in rax.
-}
-
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(rdi));
- ASSERT(instr->HasPointerMap());
-
- if (instr->known_function().is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- } else {
- CallKnownFunction(instr->known_function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- RDI_CONTAINS_TARGET);
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->key()).is(rcx));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(rdi));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->target(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- RDI_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->constructor()).is(rdi));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- __ Set(rax, instr->arity());
- if (FLAG_optimize_constructed_arrays) {
- // No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_value);
- }
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(ToRegister(instr->constructor()).is(rdi));
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(FLAG_optimize_constructed_arrays);
-
- __ Set(rax, instr->arity());
- __ Move(rbx, instr->hydrogen()->property_cell());
- Handle<Code> array_construct_code =
- isolate()->builtins()->ArrayConstructCode();
- CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- int offset = instr->offset();
-
- if (!instr->transition().is_null()) {
- if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ Move(FieldOperand(object, HeapObject::kMapOffset),
- instr->transition());
- } else {
- Register temp = ToRegister(instr->temp());
- __ Move(kScratchRegister, instr->transition());
- __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
- // Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- kScratchRegister,
- temp,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
- }
-
- // Do the store.
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ movq(FieldOperand(object, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- } else {
- Register temp = ToRegister(instr->temp());
- __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(temp, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(temp,
- offset,
- value,
- object,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rdx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- __ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- if (instr->length()->IsRegister()) {
- Register reg = ToRegister(instr->length());
- if (!instr->hydrogen()->length()->representation().IsTagged()) {
- __ AssertZeroExtended(reg);
- }
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ Cmp(reg, Smi::FromInt(constant_index));
- } else {
- __ cmpq(reg, Immediate(constant_index));
- }
- } else {
- Register reg2 = ToRegister(instr->index());
- if (!instr->hydrogen()->index()->representation().IsTagged()) {
- __ AssertZeroExtended(reg2);
- }
- __ cmpq(reg, reg2);
- }
- } else {
- Operand length = ToOperand(instr->length());
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ Cmp(length, Smi::FromInt(constant_index));
- } else {
- __ cmpq(length, Immediate(constant_index));
- }
- } else {
- __ cmpq(length, ToRegister(instr->index()));
- }
- }
- DeoptimizeIf(below_equal, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister value(ToDoubleRegister(instr->value()));
- __ cvtsd2ss(value, value);
- __ movss(operand, value);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(operand, ToDoubleRegister(instr->value()));
- } else {
- Register value(ToRegister(instr->value()));
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(operand, value);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(operand, value);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(operand, value);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- XMMRegister value = ToDoubleRegister(instr->value());
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(value, kScratchRegister);
-
- __ bind(&have_value);
- }
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- __ movsd(double_store_operand, value);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- Operand operand =
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- Register key_reg(ToRegister(key));
- __ lea(key_reg, operand);
- __ movq(Operand(key_reg, 0), value);
- __ RecordWrite(elements,
- key_reg,
- value,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- } else {
- __ movq(operand, value);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rdx));
- ASSERT(ToRegister(instr->key()).is(rcx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
- __ j(not_equal, &not_applicable);
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
- // Write barrier.
- ASSERT_NE(instr->temp(), NULL);
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- ToRegister(instr->temp()), kDontSaveFPRegs);
- } else if (FLAG_compiled_transitions) {
- PushSafepointRegistersScope scope(this);
- if (!object_reg.is(rax)) {
- __ movq(rax, object_reg);
- }
- __ Move(rbx, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(rdx));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(rbx));
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(rdx));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(rbx));
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
- } else {
- UNREACHABLE();
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ Push(Smi::FromInt(const_index));
- } else {
- Register index = ToRegister(instr->index());
- __ Integer32ToSmi(index, index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- __ AssertSmi(rax);
- __ SmiToInteger32(rax, rax);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
-
- __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
- __ j(above, deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ movq(result, FieldOperand(result,
- char_code, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
- __ j(equal, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Integer32ToSmi(char_code, char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- if (input->IsRegister()) {
- __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
- } else {
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
- }
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- LOperand* temp = instr->temp();
-
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- __ Integer32ToSmi(reg, reg);
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagU(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmpl(reg, Immediate(Smi::kMaxValue));
- __ j(above, deferred->entry());
- __ Integer32ToSmi(reg, reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
- Label slow;
- Register reg = ToRegister(instr->value());
- Register tmp = reg.is(rax) ? rcx : rax;
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label done;
- // Load value into xmm1 which will be preserved across potential call to
- // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
- // XMM registers on x64).
- __ LoadUint32(xmm1, reg, xmm0);
-
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, &slow);
- __ jmp(&done, Label::kNear);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- // Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- if (!reg.is(rax)) __ movq(reg, rax);
-
- // Done. Put the value in xmm1 into the value of the allocated heap
- // number.
- __ bind(&done);
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1);
- __ StoreToSafepointRegisterSlot(reg, reg);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagD* instr_;
- };
-
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->temp());
-
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ ucomisd(input_reg, input_reg);
- __ j(parity_odd, &no_special_nan_handling);
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(MemOperand(rsp, 0), input_reg);
- __ cmpl(MemOperand(rsp, sizeof(kHoleNanLower32)),
- Immediate(kHoleNanUpper32));
- Label canonicalize;
- __ j(not_equal, &canonicalize);
- __ addq(rsp, Immediate(kDoubleSize));
- __ Move(reg, factory()->the_hole_value());
- __ jmp(&done);
- __ bind(&canonicalize);
- __ addq(rsp, Immediate(kDoubleSize));
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(input_reg, kScratchRegister);
- }
-
- __ bind(&no_special_nan_handling);
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Move(reg, Smi::FromInt(0));
-
- {
- PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- // Ensure that value in rax survives popping registers.
- __ movq(kScratchRegister, rax);
- }
- __ movq(reg, kScratchRegister);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->value()->Equals(instr->result()));
- Register input = ToRegister(instr->value());
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ Integer32ToSmi(input, input);
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- ASSERT(instr->value()->Equals(instr->result()));
- Register input = ToRegister(instr->value());
- if (instr->needs_check()) {
- Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr->environment());
- } else {
- __ AssertSmi(input);
- }
- __ SmiToInteger32(input, input);
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- XMMRegister result_reg,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
- Label load_smi, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
-
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number;
- __ j(equal, &heap_number, Label::kNear);
-
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
-
- // Convert undefined to NaN. Compute NaN as 0/0.
- __ xorps(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- }
- // Heap number to XMM conversion.
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, result_reg);
- __ j(not_equal, &done, Label::kNear);
- __ movmskpd(kScratchRegister, result_reg);
- __ testq(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, env);
- }
- __ jmp(&done, Label::kNear);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ testq(input_reg, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ testq(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi);
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::hole_nan_as_double()));
- __ movq(result_reg, kScratchRegister);
- __ jmp(&done, Label::kNear);
- } else {
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- // Smi to XMM conversion
- __ bind(&load_smi);
- __ SmiToInteger32(kScratchRegister, input_reg);
- __ cvtlsi2sd(result_reg, kScratchRegister);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
- Register input_reg = ToRegister(instr->value());
-
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
-
- if (instr->truncating()) {
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
- __ Set(input_reg, 0);
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
-
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2siq(input_reg, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(input_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
- } else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
-
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, xmm0);
- __ cvtlsi2sd(xmm_temp, input_reg);
- __ ucomisd(xmm0, xmm_temp);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(input_reg, input_reg);
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
- HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
- }
- }
- }
-
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsDoubleRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsRegister());
-
- XMMRegister input_reg = ToDoubleRegister(input);
- Register result_reg = ToRegister(result);
-
- if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2siq(result_reg, input_reg);
- __ movq(kScratchRegister,
- V8_INT64_C(0x8000000000000000),
- RelocInfo::NONE64);
- __ cmpq(result_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr->environment());
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
-
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(first)));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
- } else {
- DeoptimizeIf(below, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr->environment());
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (IsPowerOf2(mask)) {
- ASSERT(tag == 0 || IsPowerOf2(tag));
- __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
- } else {
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ andb(kScratchRegister, Immediate(mask));
- __ cmpb(kScratchRegister, Immediate(tag));
- DeoptimizeIf(not_equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ cmpq(reg, Operand(kScratchRegister, 0));
- } else {
- __ Cmp(reg, target);
- }
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapCommon(Register reg,
- Handle<Map> map,
- CompareMapMode mode,
- LInstruction* instr) {
- Label success;
- __ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&success);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
-
- Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
- __ j(equal, &success);
- }
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- ASSERT(instr->unclamped()->Equals(instr->result()));
- Register value_reg = ToRegister(instr->result());
- __ ClampUint8(value_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- ASSERT(instr->unclamped()->Equals(instr->result()));
- Register input_reg = ToRegister(instr->unclamped());
- XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
- Label is_smi, done, heap_number;
-
- __ JumpIfSmi(input_reg, &is_smi);
-
- // Check for heap number
- __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ Cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
- __ movq(input_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
-
- // Heap number
- __ bind(&heap_number);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
- __ jmp(&done, Label::kNear);
-
- // smi
- __ bind(&is_smi);
- __ SmiToInteger32(input_reg, input_reg);
- __ ClampUint8(input_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
- Register reg = ToRegister(instr->temp());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1));
- } else {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr);
- }
- }
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- no_reg,
- scratch,
- deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- Immediate(instance_size >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- Immediate(initial_map->pre_allocated_property_fields()));
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- Immediate(initial_map->unused_property_fields()));
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- Immediate(initial_map->inobject_properties()));
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ movq(FieldOperand(result, JSObject::kMapOffset), map);
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ movq(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Push(Smi::FromInt(instance_size));
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- // Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Integer32ToSmi(size, size);
- __ push(size);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(rbx, Immediate(Map::kElementsKindMask));
- __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
- Map::kElementsKindShift));
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ Push(isolate()->factory()->empty_fixed_array());
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(rcx));
- ASSERT(!result.is(rcx));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ lea(rcx, Operand(result, elements_offset));
- } else {
- __ movq(rcx, FieldOperand(source, i));
- }
- __ movq(FieldOperand(result, object_offset + i), rcx);
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
- } else {
- __ movq(rcx, value, RelocInfo::NONE64);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex);
- __ movq(FieldOperand(result, object_size), kScratchRegister);
- __ movq(FieldOperand(result, object_size + kPointerSize), source);
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ movq(rcx, FieldOperand(source, i));
- __ movq(FieldOperand(result, elements_offset + i), rcx);
- }
-
- // Copy elements backing store content.
- int elements_length = elements->length();
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ movq(rcx, value, RelocInfo::NONE64);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
- } else {
- __ movq(rcx, value, RelocInfo::NONE64);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
- __ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ movb(rcx, FieldOperand(rcx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
- __ cmpb(rcx, Immediate(boilerplate_elements_kind <<
- Map::kElementsKindShift));
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= instr->hydrogen()->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
-
- // Set up the parameters to the stub/runtime call and pick the right
- // runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- __ LoadHeapObject(rax, literals);
- __ Move(rbx, Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Move(rcx, constant_properties);
- __ Move(rdx, Smi::FromInt(flags));
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).is(rax));
- __ push(rax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- Label materialized;
- // Registers will be used as follows:
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
- int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(rcx, instr->hydrogen()->literals());
- __ movq(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ push(rcx);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->pattern());
- __ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ movq(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(rbx);
- __ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ Push(shared_info);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(rsi);
- __ Push(shared_info);
- __ PushRoot(pretenure ?
- Heap::kTrueValueRootIndex :
- Heap::kFalseValueRootIndex);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->value();
- EmitPushTaggedOperand(input);
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
- ASSERT(!operand->IsDoubleRegister());
- if (operand->IsConstantOperand()) {
- Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- if (object->IsSmi()) {
- __ Push(Handle<Smi>::cast(object));
- } else {
- __ PushHeapObject(Handle<HeapObject>::cast(object));
- }
- } else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
- } else {
- __ push(ToOperand(operand));
- }
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition =
- EmitTypeofIs(true_label, false_label, input, instr->type_literal());
- if (final_branch_condition != no_condition) {
- EmitBranch(true_block, false_block, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
-
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = zero;
-
- } else if (type_name->Equals(heap()->boolean_string())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = equal;
-
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->undefined_string())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = not_zero;
-
- } else if (type_name->Equals(heap()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, true_label);
- }
- if (FLAG_harmony_symbols) {
- __ CmpObjectType(input, SYMBOL_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- } else {
- __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
- }
- __ j(below, false_label);
- __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label);
- // Check for undetectable objects => false.
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = zero;
-
- } else {
- __ jmp(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(obj);
- EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ Push(Smi::FromInt(strict_mode_flag()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(key);
- EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStackCheck* instr_;
- };
-
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &done, Label::kNear);
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
- __ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- } else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
-
- Register null_value = rdi;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
- DeoptimizeIf(equal, instr->environment());
-
- Condition cc = masm()->CheckSmi(rax);
- DeoptimizeIf(cc, instr->environment());
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- DeoptimizeIf(below_equal, instr->environment());
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(null_value, &call_runtime);
-
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(rax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kMetaMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ Cmp(result, Smi::FromInt(0));
- __ j(not_equal, &load_cache);
- __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
- __ jmp(&done);
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ movq(result,
- FieldOperand(result, DescriptorArray::kEnumCacheOffset));
- __ movq(result,
- FieldOperand(result, FixedArray::SizeFor(instr->idx())));
- __ bind(&done);
- Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(cc, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- __ cmpq(ToRegister(instr->map()),
- FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
-
- Label out_of_object, done;
- __ SmiToInteger32(index, index);
- __ cmpl(index, Immediate(0));
- __ j(less, &out_of_object);
- __ movq(object, FieldOperand(object,
- index,
- times_pointer_size,
- JSObject::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&out_of_object);
- __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
- __ negl(index);
- // Index is now equal to out of object property index plus 1.
- __ movq(object, FieldOperand(object,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(&done);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h b/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
deleted file mode 100644
index 66880aa..0000000
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
+++ /dev/null
@@ -1,450 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
-#define V8_X64_LITHIUM_CODEGEN_X64_H_
-
-#include "x64/lithium-x64.h"
-
-#include "checks.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "x64/lithium-gap-resolver-x64.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4, info->zone()),
- jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- translations_(info->zone()),
- deferred_(8, info->zone()),
- osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
- bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsInteger32Constant(LConstantOperand* op) const;
- int ToInteger32(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagU(LNumberTagU* instr);
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LInstruction* instr);
-
-// Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
-
- LPlatformChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- int GetNextEmittedBlock(int block);
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register scratch);
-
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return info()->num_parameters(); }
-
- void Abort(const char* reason);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS
- };
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc);
-
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr);
-
- enum RDIState {
- RDI_UNINITIALIZED,
- RDI_CONTAINS_TARGET
- };
-
- // Generate a direct call to a known function. Expects the function
- // to be in rdi.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- RDIState rdi_state);
-
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- XMMRegister ToDoubleRegister(int index) const;
- Operand BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordPosition(int position);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(
- Register input,
- XMMRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
- // Emits code for pushing either a tagged constant, a (non-double)
- // register, or a stack slot operand.
- void EmitPushTaggedOperand(LOperand* operand);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
- void EnsureSpaceForLazyDeopt(int space_needed);
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
- int last_lazy_deopt_pc_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- ASSERT(codegen_->info()->is_calling());
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->masm_->PushSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- }
-
- ~PushSafepointRegistersScope() {
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- codegen_->masm_->PopSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_LITHIUM_CODEGEN_X64_H_
diff --git a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc b/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc
deleted file mode 100644
index 22183a2..0000000
--- a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "x64/lithium-gap-resolver-x64.h"
-#include "x64/lithium-codegen-x64.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32, owner->zone()) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- PerformMove(i);
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph. We use operand swaps to resolve cycles,
- // which means that a call to PerformMove could change any source operand
- // in the move graph.
-
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack-allocated local. Recursion may allow
- // multiple moves to be pending.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- // Though PerformMove can change any source operand in the move graph,
- // this call cannot create a blocking move via a swap (this loop does
- // not miss any). Assume there is a non-blocking move with source A
- // and this move is blocked on source B and there is a swap of A and
- // B. Then A and B must be involved in the same cycle (or they would
- // not be swapped). Since this move's destination is B and there is
- // only a single incoming edge to an operand, this move must also be
- // involved in the same cycle. In that case, the blocking move will
- // be created but will be "pending" when we return from PerformMove.
- PerformMove(i);
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // This move's source may have changed due to swaps to resolve cycles and
- // so it may now be the last move in the cycle. If so remove it.
- if (moves_[index].source()->Equals(destination)) {
- moves_[index].Eliminate();
- return;
- }
-
- // The move may be blocked on a (at most one) pending move, in which case
- // we have a cycle. Search for such a blocking move and perform a swap to
- // resolve it.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- EmitSwap(index);
- return;
- }
- }
-
- // This move is not blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- Register src = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movq(dst, src);
- }
-
- } else if (source->IsStackSlot()) {
- Operand src = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movq(kScratchRegister, src);
- __ movq(dst, kScratchRegister);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
- __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
- // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
- // value.
- __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
- __ movq(dst, kScratchRegister);
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ movaps(cgen_->ToDoubleRegister(destination), src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- __ movsd(cgen_->ToOperand(destination), src);
- }
- } else if (source->IsDoubleStackSlot()) {
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- __ movsd(cgen_->ToDoubleRegister(destination), src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- __ movsd(xmm0, src);
- __ movsd(cgen_->ToOperand(destination), xmm0);
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::EmitSwap(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Swap two general-purpose registers.
- Register src = cgen_->ToRegister(source);
- Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
-
- } else if ((source->IsRegister() && destination->IsStackSlot()) ||
- (source->IsStackSlot() && destination->IsRegister())) {
- // Swap a general-purpose register and a stack slot.
- Register reg =
- cgen_->ToRegister(source->IsRegister() ? source : destination);
- Operand mem =
- cgen_->ToOperand(source->IsRegister() ? destination : source);
- __ movq(kScratchRegister, mem);
- __ movq(mem, reg);
- __ movq(reg, kScratchRegister);
-
- } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
- (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
- // Swap two stack slots or two double stack slots.
- Operand src = cgen_->ToOperand(source);
- Operand dst = cgen_->ToOperand(destination);
- __ movsd(xmm0, src);
- __ movq(kScratchRegister, dst);
- __ movsd(dst, xmm0);
- __ movq(src, kScratchRegister);
-
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- // Swap two double registers.
- XMMRegister source_reg = cgen_->ToDoubleRegister(source);
- XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
- __ movaps(xmm0, source_reg);
- __ movaps(source_reg, destination_reg);
- __ movaps(destination_reg, xmm0);
-
- } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- // Swap a double register and a double stack slot.
- ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
- (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
- XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
- LOperand* other = source->IsDoubleRegister() ? destination : source;
- ASSERT(other->IsDoubleStackSlot());
- Operand other_operand = cgen_->ToOperand(other);
- __ movsd(xmm0, other_operand);
- __ movsd(other_operand, reg);
- __ movsd(reg, xmm0);
-
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-
- // The swap of source and destination has executed a move from source to
- // destination.
- moves_[index].Eliminate();
-
- // Any unperformed (including pending) move with a source of either
- // this move's source or destination needs to have their source
- // changed to reflect the state of affairs after the swap.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(source)) {
- moves_[i].set_source(destination);
- } else if (other_move.Blocks(destination)) {
- moves_[i].set_source(source);
- }
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h b/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h
deleted file mode 100644
index d828455..0000000
--- a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Execute a move by emitting a swap of two operands. The move from
- // source to destination is removed from the move graph.
- void EmitSwap(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.cc b/src/3rdparty/v8/src/x64/lithium-x64.cc
deleted file mode 100644
index f591437..0000000
--- a/src/3rdparty/v8/src/x64/lithium-x64.cc
+++ /dev/null
@@ -1,2438 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "lithium-allocator-inl.h"
-#include "x64/lithium-x64.h"
-#include "x64/lithium-codegen-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sal-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[rcx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
- return spill_slot_count_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- // All stack slots are Double stack slots on x64.
- // Alternatively, at some point, start using half-size
- // stack slots for int32 values.
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LCodeGen::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
- int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseFixed(right_value, rcx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
- }
- }
-
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* left_operand = UseFixed(left, rdx);
- LOperand* right_operand = UseFixed(right, rax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- instr->set_hydrogen_value(current);
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
-
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment.
- Representation rep = value->representation();
- HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
- return AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left = UseFixed(instr->left(), rax);
- LOperand* right = UseFixed(instr->right(), rdx);
- LInstanceOf* result = new(zone()) LInstanceOf(left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
- FixedTemp(rdi));
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
- LOperand* receiver = UseFixed(instr->receiver(), rax);
- LOperand* length = UseFixed(instr->length(), rbx);
- LOperand* elements = UseFixed(instr->elements(), rcx);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = UseOrConstant(instr->argument());
- return new(zone()) LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new(zone()) LGlobalObject(instr->qml_global()));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
- return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- case kMathPowHalf:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* key = UseFixed(instr->key(), rcx);
- argument_count_ -= instr->argument_count();
- LCallKeyed* result = new(zone()) LCallKeyed(key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- LCallGlobal* result = new(zone()) LCallGlobal(instr->qml_global());
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- ASSERT(FLAG_optimize_constructed_arrays);
- LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
- LCallFunction* result = new(zone()) LCallFunction(function);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new(zone()) LBitI(left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new(zone()) LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
- }
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, rax));
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, rax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
- } else {
- // use two r64
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LOperand* temp = TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LInstruction* result;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod =
- new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
- } else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* value = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new(zone()) LModI(value, divisor, temp);
- result = DefineFixed(mod, rdx);
- }
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LMulI* mul = new(zone()) LMulI(left, right);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- AssignEnvironment(mul);
- }
- return DefineSameAsFirst(mul);
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
- } else {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
- return DefineSameAsFirst(minmax);
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm1) :
-#ifdef _WIN64
- UseFixed(instr->right(), rdx);
-#else
- UseFixed(instr->right(), rdi);
-#endif
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
-#ifdef _WIN64
- LOperand* global_object = UseFixed(instr->global_object(), rcx);
-#else
- LOperand* global_object = UseFixed(instr->global_object(), rdi);
-#endif
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LCmpT* result = new(zone()) LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- } else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left;
- LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
- } else {
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return new(zone()) LCmpIDAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpConstantEqAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsUndetectableAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
-
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
-
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasCachedArrayIndexAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- return new(zone()) LClassOfTestAndBranch(value,
- TempRegister(),
- TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), rax);
- LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(rcx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), rcx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = Use(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), rax);
- return MarkAsCall(new(zone()) LThrow(value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- // Only mark conversions that might need to allocate as calling rather than
- // all changes. This makes simple, non-allocating conversion not have to force
- // building a stack frame.
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
- } else {
- bool truncating = instr->CanTruncateToInt32();
- LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
-
- // Make sure that temp and result_temp are different registers.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
- return AssignPointerMap(Define(result, result_temp));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
- } else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- }
- } else {
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
- } else {
- ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(Define(result, temp));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg));
- } else if (input_rep.IsInteger32()) {
- return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
- } else {
- ASSERT(input_rep.IsTagged());
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
- FixedTemp(xmm1));
- return AssignEnvironment(DefineSameAsFirst(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), rax));
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- LOperand* temp = TempRegister();
- return DefineAsRegister(new(zone()) LConstantD(temp));
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), rax);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to avoid reloading the cell value address in the case where
- // we perform a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
- : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), rdx);
- LOperand* value = UseFixed(instr->value(), rax);
- LStoreGlobalGeneric* result = new(zone()) LStoreGlobalGeneric(global_object,
- value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- LOperand* temp;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- temp = TempRegister();
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- temp = NULL;
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), rax);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, rax), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rax);
- LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
-
- if (!instr->is_external()) {
- LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
-
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rdx);
- LOperand* key = UseFixed(instr->key(), rax);
-
- LLoadKeyedGeneric* result = new(zone()) LLoadKeyedGeneric(object, key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = instr->key()->representation().IsTagged();
-
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
- key = clobbers_key ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = (clobbers_key || needs_write_barrier)
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
-
- return new(zone()) LStoreKeyed(object, key, val);
- }
-
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = clobbers_key ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rdx);
- LOperand* key = UseFixed(instr->key(), rcx);
- LOperand* value = UseFixed(instr->value(), rax);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- LStoreKeyedGeneric* result =
- new(zone()) LStoreKeyedGeneric(object, key, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LOperand* temp_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
- return result;
- } else if (FLAG_compiled_transitions) {
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, NULL);
- return AssignPointerMap(result);
- } else {
- LOperand* object = UseFixed(instr->object(), rax);
- LOperand* fixed_object_reg = FixedTemp(rdx);
- LOperand* new_map_reg = FixedTemp(rbx);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = instr->is_in_object()
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- // We only need a scratch register if we have a write barrier or we
- // have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rdx);
- LOperand* value = UseFixed(instr->value(), rax);
-
- LStoreNamedGeneric* result = new(zone()) LStoreNamedGeneric(object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), rax),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
- LOperand* temp = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseAtStart(instr->object());
- LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- ASSERT(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- ASSERT(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
- Register reg = descriptor->register_params_[instr->index()];
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = Use(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), rax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseAtStart(instr->value()));
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
- LInstruction* result = AssignEnvironment(lazy_bailout);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- info()->MarkAsDeferredCalling();
- if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
- } else {
- ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
- }
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseOrConstantAtStart(instr->key());
- LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* object = UseFixed(instr->enumerable(), rax);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.h b/src/3rdparty/v8/src/x64/lithium-x64.h
deleted file mode 100644
index 0133578..0000000
--- a/src/3rdparty/v8/src/x64/lithium-x64.h
+++ /dev/null
@@ -1,2641 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_LITHIUM_X64_H_
-#define V8_X64_LITHIUM_X64_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(AllocateObject) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeclareGlobals) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(DummyUse) \
- V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(MapEnumLength) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(In) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(Uint32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(MathExp) \
- V(MathFloorOfDiv) \
- V(MathMinMax) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(Random) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(StringLength) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(Throw) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
-
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { is_call_ = true; }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- bool ClobbersDoubleRegisters() const { return is_call_; }
-
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- // Iterator support.
- friend class InputIterator;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- bool is_call_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
-
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
- static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos,
- Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap: public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
-
- private:
- int block_id_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- virtual bool IsControl() const { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-};
-
-
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 1> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUnaryMathOperation(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
- public:
- LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
-};
-
-
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsObjectAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
- explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
- public:
- explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
- }
-
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
- public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
-};
-
-
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LConstantD(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCmpMapAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LValueOf(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LDateField: public LTemplateInstruction<1, 1, 0> {
- public:
- LDateField(LOperand* date, Smi* index) : index_(index) {
- inputs_[0] = date;
- }
-
- LOperand* date() { return inputs_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-
- private:
- Smi* index_;
-};
-
-
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
-class LThrow: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRandom(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LReturn(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- LOperand* object() { return inputs_[0]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- bool is_external() const {
- return hydrogen()->is_external();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- LOperand* global_object() { return inputs_[0]; }
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
- }
-
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
- public:
- explicit LGlobalObject(bool qml_global) : qml_global_(qml_global) {}
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- LOperand* key() { return inputs_[0]; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- LOperand* function() { return inputs_[0]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberTagU(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberTagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change);
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* new_map_temp,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = new_map_temp;
- temps_[1] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
- }
-
- LOperand* char_code() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- LOperand* string() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped,
- LOperand* temp_xmm) {
- inputs_[0] = unclamped;
- temps_[0] = temp_xmm;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp_xmm() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LAllocateObject(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LAllocate: public LTemplateInstruction<1, 1, 1> {
- public:
- LAllocate(LOperand* size, LOperand* temp) {
- inputs_[0] = size;
- temps_[0] = temp;
- }
-
- LOperand* size() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk: public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- zone_(graph->zone()),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* reason);
-
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- XMMRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
- // Assigns an environment to an instruction. An instruction which can
- // deoptimize must have an environment.
- LInstruction* AssignEnvironment(LInstruction* instr);
- // Assigns a pointer map to an instruction. An instruction which can
- // trigger a GC or a lazy deoptimization must have a pointer map.
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // Marks a call for the register allocator. Assigns a pointer map to
- // support GC and lazy deoptimization. Assigns an environment to support
- // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Zone* zone_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::int
-
-#endif // V8_X64_LITHIUM_X64_H_
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
deleted file mode 100644
index 5f467e3..0000000
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
+++ /dev/null
@@ -1,4637 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "assembler-x64.h"
-#include "macro-assembler-x64.h"
-#include "serialize.h"
-#include "debug.h"
-#include "heap.h"
-
-namespace v8 {
-namespace internal {
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false),
- root_array_available_(true) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-static const int kInvalidRootRegisterDelta = -1;
-
-
-intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
- if (predictable_code_size() &&
- (other.address() < reinterpret_cast<Address>(isolate()) ||
- other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
- return kInvalidRootRegisterDelta;
- }
- Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
- intptr_t delta = other.address() - roots_register_value;
- return delta;
-}
-
-
-Operand MacroAssembler::ExternalOperand(ExternalReference target,
- Register scratch) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- return Operand(kRootRegister, static_cast<int32_t>(delta));
- }
- }
- movq(scratch, target);
- return Operand(scratch, 0);
-}
-
-
-void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
- return;
- }
- }
- // Safe code.
- if (destination.is(rax)) {
- load_rax(source);
- } else {
- movq(kScratchRegister, source);
- movq(destination, Operand(kScratchRegister, 0));
- }
-}
-
-
-void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
- return;
- }
- }
- // Safe code.
- if (source.is(rax)) {
- store_rax(destination);
- } else {
- movq(kScratchRegister, destination);
- movq(Operand(kScratchRegister, 0), source);
- }
-}
-
-
-void MacroAssembler::LoadAddress(Register destination,
- ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
- return;
- }
- }
- // Safe code.
- movq(destination, source);
-}
-
-
-int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- // This calculation depends on the internals of LoadAddress.
- // It's correctness is ensured by the asserts in the Call
- // instruction below.
- intptr_t delta = RootRegisterDelta(source);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
- // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
- int size = 4;
- if (!is_int8(static_cast<int32_t>(delta))) {
- size += 3; // Need full four-byte displacement in lea.
- }
- return size;
- }
- }
- // Size of movq(destination, src);
- return 10;
-}
-
-
-void MacroAssembler::PushAddress(ExternalReference source) {
- int64_t address = reinterpret_cast<int64_t>(source.address());
- if (is_int32(address) && !Serializer::enabled()) {
- if (emit_debug_code()) {
- movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- }
- push(Immediate(static_cast<int32_t>(address)));
- return;
- }
- LoadAddress(kScratchRegister, source);
- push(kScratchRegister);
-}
-
-
-void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- movq(destination, Operand(kRootRegister,
- (index << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::LoadRootIndexed(Register destination,
- Register variable_offset,
- int fixed_offset) {
- ASSERT(root_array_available_);
- movq(destination,
- Operand(kRootRegister,
- variable_offset, times_pointer_size,
- (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
- source);
-}
-
-
-void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
- (index << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- ASSERT(!with.AddressUsesRegister(kScratchRegister));
- LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then) {
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- // Load store buffer top.
- LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
- // Store pointer to buffer.
- movq(Operand(scratch, 0), addr);
- // Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
- // Write back new top of buffer.
- StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
- // Call stub on end of buffer.
- Label done;
- // Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kReturnAtEnd) {
- Label buffer_overflowed;
- j(not_equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- j(equal, &done, Label::kNear);
- }
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
- CallStub(&store_buffer_overflow);
- if (and_then == kReturnAtEnd) {
- ret(0);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance distance) {
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
- } else {
- movq(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
- }
- movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
- j(cc, branch, distance);
- } else {
- ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
- intptr_t new_space_start =
- reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
- if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
- } else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
- }
- and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
- j(cc, branch, distance);
- }
-}
-
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !dst.is(rsi));
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
-
- lea(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
- Label ok;
- testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- }
-}
-
-
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
-
- // Array access: calculate the destination address. Index is not a smi.
- Register dst = index;
- lea(dst, Operand(object, index, times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- }
-}
-
-
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !address.is(rsi));
-
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
- AssertNotSmi(object);
-
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
- return;
- }
-
- if (emit_debug_code()) {
- Label ok;
- cmpq(value, Operand(address, 0));
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
- Label done;
-
- if (smi_check == INLINE_SMI_CHECK) {
- // Skip barrier if writing a smi.
- JumpIfSmi(value, &done);
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
-
- bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- }
-}
-
-
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- Label ok;
- CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- j(equal, &ok, Label::kNear);
- CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedDoubleArrayMapRootIndex);
- j(equal, &ok, Label::kNear);
- CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- j(equal, &ok, Label::kNear);
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::Check(Condition cc, const char* msg) {
- Label L;
- j(cc, &L, Label::kNear);
- Abort(msg);
- // Control will not return here.
- bind(&L);
-}
-
-
-void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
- j(zero, &alignment_as_expected, Label::kNear);
- // Abort if stack is not aligned.
- int3();
- bind(&alignment_as_expected);
- }
-}
-
-
-void MacroAssembler::NegativeZeroTest(Register result,
- Register op,
- Label* then_label) {
- Label ok;
- testl(result, result);
- j(not_zero, &ok, Label::kNear);
- testl(op, op);
- j(sign, then_label);
- bind(&ok);
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
- push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE64);
- push(kScratchRegister);
- movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
- RelocInfo::NONE64);
- push(kScratchRegister);
-
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
- // Control will not return here.
- int3();
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
- ret((argc - 1) * kPointerSize);
-}
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
- }
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
- shr(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- Integer32ToSmi(index, hash);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(rax, function->nargs);
- LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1, kSaveFPRegs);
- CallStub(&ces);
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
- CallStub(&ces);
-}
-
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- Set(rax, num_arguments);
- LoadAddress(rbx, ext);
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
- // ...
- // -- rsp[8 * num_arguments] : argument 0 (receiver)
- // -----------------------------------
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- JumpToExternalReference(ext, result_size);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-static int Offset(ExternalReference ref0, ExternalReference ref1) {
- int64_t offset = (ref0.address() - ref1.address());
- // Check that fits into int.
- ASSERT(static_cast<int>(offset) == offset);
- return static_cast<int>(offset);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
-#if defined(_WIN64) && !defined(__MINGW64__)
- // We need to prepare a slot for result handle on stack and put
- // a pointer to it into 1st arg register.
- EnterApiExitFrame(arg_stack_space + 1);
-
- // rcx must be used to pass the pointer to the return value slot.
- lea(rcx, StackSpaceOperand(arg_stack_space));
-#else
- EnterApiExitFrame(arg_stack_space);
-#endif
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- int stack_space) {
- Label empty_result;
- Label prologue;
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label write_back;
-
- Factory* factory = isolate()->factory();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = Offset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = Offset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate());
-
- // Allocate HandleScope in callee-save registers.
- Register prev_next_address_reg = r14;
- Register prev_limit_reg = rbx;
- Register base_reg = r15;
- movq(base_reg, next_address);
- movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
- movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
- addl(Operand(base_reg, kLevelOffset), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- // Call the api function!
- movq(rax, reinterpret_cast<int64_t>(function_address),
- RelocInfo::EXTERNAL_REFERENCE);
- call(rax);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
-#if defined(_WIN64) && !defined(__MINGW64__)
- // rax keeps a pointer to v8::Handle, unpack it.
- movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- testq(rax, rax);
- j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- movq(rax, Operand(rax, 0));
- bind(&prologue);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- subl(Operand(base_reg, kLevelOffset), Immediate(1));
- movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
- j(not_equal, &delete_allocated_handles);
- bind(&leave_exit_frame);
-
- // Check if the function scheduled an exception.
- movq(rsi, scheduled_exception_address);
- Cmp(Operand(rsi, 0), factory->the_hole_value());
- j(not_equal, &promote_scheduled_exception);
-
-#if ENABLE_EXTRA_CHECKS
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = rax;
- Register map = rcx;
-
- JumpIfSmi(return_value, &ok, Label::kNear);
- movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- j(below, &ok, Label::kNear);
-
- CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- j(above_equal, &ok, Label::kNear);
-
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kTrueValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kFalseValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kNullValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- Abort("API call returned invalid object");
-
- bind(&ok);
-#endif
-
- LeaveApiExitFrame();
- ret(stack_space * kPointerSize);
-
- bind(&empty_result);
- // It was zero; the result is undefined.
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- jmp(&prologue);
-
- bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
- movq(prev_limit_reg, rax);
-#ifdef _WIN64
- LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
- LoadAddress(rax,
- ExternalReference::delete_handle_scope_extensions(isolate()));
- call(rax);
- movq(rax, prev_limit_reg);
- jmp(&leave_exit_frame);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
- int result_size) {
- // Set the entry point and jump to the C entry runtime stub.
- LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- movq(target, FieldOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(rdi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(rdi, id);
- movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-}
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const Register saved_regs[] = {
- REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
- REG(r9), REG(r10), REG(r11)
-};
-
-#undef REG
-
-static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
-
-
-void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1,
- Register exclusion2,
- Register exclusion3) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- for (int i = 0; i < kNumberOfSavedRegs; i++) {
- Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
- }
- }
- // R12 to r15 are callee save on all platforms.
- if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(rsp, i * kDoubleSize), reg);
- }
- }
-}
-
-
-void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1,
- Register exclusion2,
- Register exclusion3) {
- if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(rsp, i * kDoubleSize));
- }
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
- }
- for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
- Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
- }
- }
-}
-
-
-void MacroAssembler::Set(Register dst, int64_t x) {
- if (x == 0) {
- xorl(dst, dst);
- } else if (is_uint32(x)) {
- movl(dst, Immediate(static_cast<uint32_t>(x)));
- } else if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- movq(dst, x, RelocInfo::NONE64);
- }
-}
-
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
- }
-}
-
-
-bool MacroAssembler::IsUnsafeInt(const int x) {
- static const int kMaxBits = 17;
- return !is_intn(x, kMaxBits);
-}
-
-
-void MacroAssembler::SafeMove(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
- if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(dst, kScratchRegister);
- } else {
- Move(dst, src);
- }
-}
-
-
-void MacroAssembler::SafePush(Smi* src) {
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
- if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Push(Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(Operand(rsp, 0), kScratchRegister);
- } else {
- Push(src);
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// Smi tagging, untagging and tag detection.
-
-Register MacroAssembler::GetSmiConstant(Smi* source) {
- int value = source->value();
- if (value == 0) {
- xorl(kScratchRegister, kScratchRegister);
- return kScratchRegister;
- }
- if (value == 1) {
- return kSmiConstantRegister;
- }
- LoadSmiConstant(kScratchRegister, source);
- return kScratchRegister;
-}
-
-void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
- if (emit_debug_code()) {
- movq(dst,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE64);
- cmpq(dst, kSmiConstantRegister);
- if (allow_stub_calls()) {
- Assert(equal, "Uninitialized kSmiConstantRegister");
- } else {
- Label ok;
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- }
- int value = source->value();
- if (value == 0) {
- xorl(dst, dst);
- return;
- }
- bool negative = value < 0;
- unsigned int uvalue = negative ? -value : value;
-
- switch (uvalue) {
- case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
- break;
- case 8:
- xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
- break;
- case 4:
- xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
- break;
- case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
- break;
- case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
- break;
- case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
- break;
- case 1:
- movq(dst, kSmiConstantRegister);
- break;
- case 0:
- UNREACHABLE();
- return;
- default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
- return;
- }
- if (negative) {
- neg(dst);
- }
-}
-
-
-void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (!dst.is(src)) {
- movl(dst, src);
- }
- shl(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
- if (emit_debug_code()) {
- testb(dst, Immediate(0x01));
- Label ok;
- j(zero, &ok, Label::kNear);
- if (allow_stub_calls()) {
- Abort("Integer32ToSmiField writing to non-smi location");
- } else {
- int3();
- }
- bind(&ok);
- }
- ASSERT(kSmiShift % kBitsPerByte == 0);
- movl(Operand(dst, kSmiShift / kBitsPerByte), src);
-}
-
-
-void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
- Register src,
- int constant) {
- if (dst.is(src)) {
- addl(dst, Immediate(constant));
- } else {
- leal(dst, Operand(src, constant));
- }
- shl(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::SmiToInteger32(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (!dst.is(src)) {
- movq(dst, src);
- }
- shr(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
- movl(dst, Operand(src, kSmiShift / kBitsPerByte));
-}
-
-
-void MacroAssembler::SmiToInteger64(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (!dst.is(src)) {
- movq(dst, src);
- }
- sar(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
- movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
-}
-
-
-void MacroAssembler::SmiTest(Register src) {
- testq(src, src);
-}
-
-
-void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
- AssertSmi(smi1);
- AssertSmi(smi2);
- cmpq(smi1, smi2);
-}
-
-
-void MacroAssembler::SmiCompare(Register dst, Smi* src) {
- AssertSmi(dst);
- Cmp(dst, src);
-}
-
-
-void MacroAssembler::Cmp(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
- if (src->value() == 0) {
- testq(dst, dst);
- } else {
- Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
- }
-}
-
-
-void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
- AssertSmi(dst);
- AssertSmi(src);
- cmpq(dst, src);
-}
-
-
-void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
- AssertSmi(dst);
- AssertSmi(src);
- cmpq(dst, src);
-}
-
-
-void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- AssertSmi(dst);
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
-}
-
-
-void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
- // The Operand cannot use the smi register.
- Register smi_reg = GetSmiConstant(src);
- ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
-}
-
-
-void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
-}
-
-
-void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power) {
- ASSERT(power >= 0);
- ASSERT(power < 64);
- if (power == 0) {
- SmiToInteger64(dst, src);
- return;
- }
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (power < kSmiShift) {
- sar(dst, Immediate(kSmiShift - power));
- } else if (power > kSmiShift) {
- shl(dst, Immediate(power - kSmiShift));
- }
-}
-
-
-void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
- Register src,
- int power) {
- ASSERT((0 <= power) && (power < 32));
- if (dst.is(src)) {
- shr(dst, Immediate(power + kSmiShift));
- } else {
- UNIMPLEMENTED(); // Not used.
- }
-}
-
-
-void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
- Label* on_not_smis,
- Label::Distance near_jump) {
- if (dst.is(src1) || dst.is(src2)) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- movq(kScratchRegister, src1);
- or_(kScratchRegister, src2);
- JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- or_(dst, src2);
- JumpIfNotSmi(dst, on_not_smis, near_jump);
- }
-}
-
-
-Condition MacroAssembler::CheckSmi(Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- testb(src, Immediate(kSmiTagMask));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckSmi(const Operand& src) {
- STATIC_ASSERT(kSmiTag == 0);
- testb(src, Immediate(kSmiTagMask));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- // Test that both bits of the mask 0x8000000000000001 are zero.
- movq(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
- testb(kScratchRegister, Immediate(3));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
- if (first.is(second)) {
- return CheckSmi(first);
- }
- STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
- leal(kScratchRegister, Operand(first, second, times_1, 0));
- testb(kScratchRegister, Immediate(0x03));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
- Register second) {
- if (first.is(second)) {
- return CheckNonNegativeSmi(first);
- }
- movq(kScratchRegister, first);
- or_(kScratchRegister, second);
- rol(kScratchRegister, Immediate(1));
- testl(kScratchRegister, Immediate(3));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckEitherSmi(Register first,
- Register second,
- Register scratch) {
- if (first.is(second)) {
- return CheckSmi(first);
- }
- if (scratch.is(second)) {
- andl(scratch, first);
- } else {
- if (!scratch.is(first)) {
- movl(scratch, first);
- }
- andl(scratch, second);
- }
- testb(scratch, Immediate(kSmiTagMask));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckIsMinSmi(Register src) {
- ASSERT(!src.is(kScratchRegister));
- // If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
- return overflow;
-}
-
-
-Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
- // A 32-bit integer value can always be converted to a smi.
- return always;
-}
-
-
-Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
- // An unsigned 32-bit integer value is valid as long as the high bit
- // is not set.
- testl(src, src);
- return positive;
-}
-
-
-void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
- if (dst.is(src)) {
- andl(dst, Immediate(kSmiTagMask));
- } else {
- movl(dst, Immediate(kSmiTagMask));
- andl(dst, src);
- }
-}
-
-
-void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
- if (!(src.AddressUsesRegister(dst))) {
- movl(dst, Immediate(kSmiTagMask));
- andl(dst, src);
- } else {
- movl(dst, src);
- andl(dst, Immediate(kSmiTagMask));
- }
-}
-
-
-void MacroAssembler::JumpIfNotValidSmiValue(Register src,
- Label* on_invalid,
- Label::Distance near_jump) {
- Condition is_valid = CheckInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid, near_jump);
-}
-
-
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
- Label* on_invalid,
- Label::Distance near_jump) {
- Condition is_valid = CheckUInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid, near_jump);
-}
-
-
-void MacroAssembler::JumpIfSmi(Register src,
- Label* on_smi,
- Label::Distance near_jump) {
- Condition smi = CheckSmi(src);
- j(smi, on_smi, near_jump);
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register src,
- Label* on_not_smi,
- Label::Distance near_jump) {
- Condition smi = CheckSmi(src);
- j(NegateCondition(smi), on_not_smi, near_jump);
-}
-
-
-void MacroAssembler::JumpUnlessNonNegativeSmi(
- Register src, Label* on_not_smi_or_negative,
- Label::Distance near_jump) {
- Condition non_negative_smi = CheckNonNegativeSmi(src);
- j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
-}
-
-
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- Label* on_equals,
- Label::Distance near_jump) {
- SmiCompare(src, constant);
- j(equal, on_equals, near_jump);
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
- Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump) {
- Condition both_smi = CheckBothSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi, near_jump);
-}
-
-
-void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
- Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump) {
- Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi, near_jump);
-}
-
-
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result, near_jump);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result, near_jump);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
-void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- return;
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- switch (constant->value()) {
- case 1:
- addq(dst, kSmiConstantRegister);
- return;
- case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
- return;
- case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
- return;
- case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
- return;
- default:
- Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
- return;
- }
- } else {
- switch (constant->value()) {
- case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
- return;
- case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
- return;
- case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
- return;
- case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
- return;
- default:
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- return;
- }
- }
-}
-
-
-void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
- if (constant->value() != 0) {
- addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
- }
-}
-
-
-void MacroAssembler::SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
-
- LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
- } else {
- if (constant->value() == Smi::kMinValue) {
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
- }
- }
-}
-
-
-void MacroAssembler::SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- }
- } else {
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
- }
- }
-}
-
-
-void MacroAssembler::SmiNeg(Register dst,
- Register src,
- Label* on_smi_result,
- Label::Distance near_jump) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
- // Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
- j(not_equal, on_smi_result, near_jump);
- movq(src, kScratchRegister);
- } else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
- // If the result is zero or Smi::kMinValue, negation failed to create a smi.
- j(not_equal, on_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- ASSERT(!src2.AddressUsesRegister(dst));
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible.
- if (!dst.is(src1)) {
- if (emit_debug_code()) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- Check(no_overflow, "Smi addition overflow");
- }
- lea(dst, Operand(src1, src2, times_1, 0));
- } else {
- addq(dst, src2);
- Assert(no_overflow, "Smi addition overflow");
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result, near_jump);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
-}
-
-
-void MacroAssembler::SmiMul(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
-
- if (dst.is(src1)) {
- Label failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, &failure, Label::kNear);
-
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- Label correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result, Label::kNear);
-
- movq(dst, kScratchRegister);
- xor_(dst, src2);
- // Result was positive zero.
- j(positive, &zero_correct_result, Label::kNear);
-
- bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result, near_jump);
-
- bind(&zero_correct_result);
- Set(dst, 0);
-
- bind(&correct_result);
- } else {
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- Label correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result, Label::kNear);
- // One of src1 and src2 is zero, the check whether the other is
- // negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
- j(negative, on_not_smi_result, near_jump);
- bind(&correct_result);
- }
-}
-
-
-void MacroAssembler::SmiDiv(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
-
- // Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
- j(zero, on_not_smi_result, near_jump);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- // We need to rule out dividing Smi::kMinValue by -1, since that would
- // overflow in idiv and raise an exception.
- // We combine this with negative zero test (negative zero only happens
- // when dividing zero by a negative number).
-
- // We overshoot a little and go to slow case if we divide min-value
- // by any negative value, not just -1.
- Label safe_div;
- testl(rax, Immediate(0x7fffffff));
- j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
- if (src1.is(rax)) {
- j(positive, &safe_div, Label::kNear);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result, near_jump);
- } else {
- j(negative, on_not_smi_result, near_jump);
- }
- bind(&safe_div);
-
- SmiToInteger32(src2, src2);
- // Sign extend src1 into edx:eax.
- cdq();
- idivl(src2);
- Integer32ToSmi(src2, src2);
- // Check that the remainder is zero.
- testl(rdx, rdx);
- if (src1.is(rax)) {
- Label smi_result;
- j(zero, &smi_result, Label::kNear);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result, near_jump);
- bind(&smi_result);
- } else {
- j(not_zero, on_not_smi_result, near_jump);
- }
- if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- Integer32ToSmi(dst, rax);
-}
-
-
-void MacroAssembler::SmiMod(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
-
- testq(src2, src2);
- j(zero, on_not_smi_result, near_jump);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- SmiToInteger32(src2, src2);
-
- // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
- Label safe_div;
- cmpl(rax, Immediate(Smi::kMinValue));
- j(not_equal, &safe_div, Label::kNear);
- cmpl(src2, Immediate(-1));
- j(not_equal, &safe_div, Label::kNear);
- // Retag inputs and go slow case.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- jmp(on_not_smi_result, near_jump);
- bind(&safe_div);
-
- // Sign extend eax into edx:eax.
- cdq();
- idivl(src2);
- // Restore smi tags on inputs.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, go slow to return a floating point negative zero.
- Label smi_result;
- testl(rdx, rdx);
- j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
- j(negative, on_not_smi_result, near_jump);
- bind(&smi_result);
- Integer32ToSmi(dst, rdx);
-}
-
-
-void MacroAssembler::SmiNot(Register dst, Register src) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
- // Set tag and padding bits before negating, so that they are zero afterwards.
- movl(kScratchRegister, Immediate(~0));
- if (dst.is(src)) {
- xor_(dst, kScratchRegister);
- } else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
- }
- not_(dst);
-}
-
-
-void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
- ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- and_(dst, src2);
-}
-
-
-void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- Set(dst, 0);
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
- } else {
- LoadSmiConstant(dst, constant);
- and_(dst, src);
- }
-}
-
-
-void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
- if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
- movq(dst, src1);
- }
- or_(dst, src2);
-}
-
-
-void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
- } else {
- LoadSmiConstant(dst, constant);
- or_(dst, src);
- }
-}
-
-
-void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
- if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
- movq(dst, src1);
- }
- xor_(dst, src2);
-}
-
-
-void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
- } else {
- LoadSmiConstant(dst, constant);
- xor_(dst, src);
- }
-}
-
-
-void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
- Register src,
- int shift_value) {
- ASSERT(is_uint5(shift_value));
- if (shift_value > 0) {
- if (dst.is(src)) {
- sar(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- } else {
- UNIMPLEMENTED(); // Not used.
- }
- }
-}
-
-
-void MacroAssembler::SmiShiftLeftConstant(Register dst,
- Register src,
- int shift_value) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift_value > 0) {
- shl(dst, Immediate(shift_value));
- }
-}
-
-
-void MacroAssembler::SmiShiftLogicalRightConstant(
- Register dst, Register src, int shift_value,
- Label* on_not_smi_result, Label::Distance near_jump) {
- // Logic right shift interprets its result as an *unsigned* number.
- if (dst.is(src)) {
- UNIMPLEMENTED(); // Not used.
- } else {
- movq(dst, src);
- if (shift_value == 0) {
- testq(dst, dst);
- j(negative, on_not_smi_result, near_jump);
- }
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- }
-}
-
-
-void MacroAssembler::SmiShiftLeft(Register dst,
- Register src1,
- Register src2) {
- ASSERT(!dst.is(rcx));
- // Untag shift amount.
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- // Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
- shl_cl(dst);
-}
-
-
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
- if (src1.is(rcx) || src2.is(rcx)) {
- movq(kScratchRegister, rcx);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- Label positive_result;
- j(positive, &positive_result, Label::kNear);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result, near_jump);
- bind(&positive_result);
- } else {
- // src2 was zero and src1 negative.
- j(negative, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiShiftArithmeticRight(Register dst,
- Register src1,
- Register src2) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- if (src1.is(rcx)) {
- movq(kScratchRegister, src1);
- } else if (src2.is(rcx)) {
- movq(kScratchRegister, src2);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- sar_cl(dst); // Shift 32 + original rcx & 0x1f.
- shl(dst, Immediate(kSmiShift));
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else if (src2.is(rcx)) {
- movq(src2, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis,
- Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
- // Both operands must not be smis.
-#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
- }
-#endif
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
- testl(kScratchRegister, src2);
- // If non-zero then both are smis.
- j(not_zero, on_not_smis, near_jump);
-
- // Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
- // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
- // If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
- // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
- // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
-SmiIndex MacroAssembler::SmiToIndex(Register dst,
- Register src,
- int shift) {
- ASSERT(is_uint6(shift));
- // There is a possible optimization if shift is in the range 60-63, but that
- // will (and must) never happen.
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
- } else {
- shl(dst, Immediate(shift - kSmiShift));
- }
- return SmiIndex(dst, times_1);
-}
-
-SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
- Register src,
- int shift) {
- // Register src holds a positive smi.
- ASSERT(is_uint6(shift));
- if (!dst.is(src)) {
- movq(dst, src);
- }
- neg(dst);
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
- } else {
- shl(dst, Immediate(shift - kSmiShift));
- }
- return SmiIndex(dst, times_1);
-}
-
-
-void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
- ASSERT_EQ(0, kSmiShift % kBitsPerByte);
- addl(dst, Operand(src, kSmiShift / kBitsPerByte));
-}
-
-
-void MacroAssembler::JumpIfNotString(Register object,
- Register object_map,
- Label* not_string,
- Label::Distance near_jump) {
- Condition is_smi = CheckSmi(object);
- j(is_smi, not_string, near_jump);
- CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
- j(above_equal, not_string, near_jump);
-}
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
- Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
- Label::Distance near_jump) {
- // Check that both objects are not smis.
- Condition either_smi = CheckEitherSmi(first_object, second_object);
- j(either_smi, on_fail, near_jump);
-
- // Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
- movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail, near_jump);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label* failure,
- Label::Distance near_jump) {
- if (!scratch.is(instance_type)) {
- movl(scratch, instance_type);
- }
-
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
- andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
- j(not_equal, failure, near_jump);
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
- Label::Distance near_jump) {
- // Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
-
- // Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail, near_jump);
-}
-
-
-
-void MacroAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
-}
-
-
-void MacroAssembler::Move(Register dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
- if (source->IsSmi()) {
- Move(dst, Smi::cast(*source));
- } else {
- movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
- }
-}
-
-
-void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
- if (source->IsSmi()) {
- Move(dst, Smi::cast(*source));
- } else {
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- movq(dst, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
- if (source->IsSmi()) {
- Cmp(dst, Smi::cast(*source));
- } else {
- Move(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
- if (source->IsSmi()) {
- Cmp(dst, Smi::cast(*source));
- } else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- cmpq(dst, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Push(Handle<Object> source) {
- if (source->IsSmi()) {
- Push(Smi::cast(*source));
- } else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
- }
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(result, Operand(result, 0));
- } else {
- Move(result, object);
- }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(kScratchRegister, Operand(kScratchRegister, 0));
- push(kScratchRegister);
- } else {
- Push(object);
- }
-}
-
-
-void MacroAssembler::LoadGlobalCell(Register dst,
- Handle<JSGlobalPropertyCell> cell) {
- if (dst.is(rax)) {
- load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
- } else {
- movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(dst, Operand(dst, 0));
- }
-}
-
-
-void MacroAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- push(constant);
- }
-}
-
-
-void MacroAssembler::Drop(int stack_elements) {
- if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
- }
-}
-
-
-void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
-}
-
-
-void MacroAssembler::TestBit(const Operand& src, int bits) {
- int byte_offset = bits / kBitsPerByte;
- int bit_in_byte = bits & (kBitsPerByte - 1);
- testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
-}
-
-
-void MacroAssembler::Jump(ExternalReference ext) {
- LoadAddress(kScratchRegister, ext);
- jmp(kScratchRegister);
-}
-
-
-void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
- movq(kScratchRegister, destination, rmode);
- jmp(kScratchRegister);
-}
-
-
-void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
- // TODO(X64): Inline this
- jmp(code_object, rmode);
-}
-
-
-int MacroAssembler::CallSize(ExternalReference ext) {
- // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
- const int kCallInstructionSize = 3;
- return LoadAddressSize(ext) + kCallInstructionSize;
-}
-
-
-void MacroAssembler::Call(ExternalReference ext) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(ext);
-#endif
- LoadAddress(kScratchRegister, ext);
- call(kScratchRegister);
-#ifdef DEBUG
- CHECK_EQ(end_position, pc_offset());
-#endif
-}
-
-
-void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(destination, rmode);
-#endif
- movq(kScratchRegister, destination, rmode);
- call(kScratchRegister);
-#ifdef DEBUG
- CHECK_EQ(pc_offset(), end_position);
-#endif
-}
-
-
-void MacroAssembler::Call(Handle<Code> code_object,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(code_object);
-#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- call(code_object, rmode, ast_id);
-#ifdef DEBUG
- CHECK_EQ(end_position, pc_offset());
-#endif
-}
-
-
-void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
- // Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
- // r10 is kScratchRegister.
- push(r11);
- // r12 is kSmiConstantRegister.
- // r13 is kRootRegister.
- push(r14);
- push(r15);
- STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
- // Use lea for symmetry with Popad.
- int sp_delta =
- (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
-}
-
-
-void MacroAssembler::Popad() {
- // Popad must not change the flags, so use lea instead of addq.
- int sp_delta =
- (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
-}
-
-
-void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
-}
-
-
-// Order general registers are pushed by Pushad:
-// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-const int
-MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
- 0,
- 1,
- 2,
- 3,
- -1,
- -1,
- 4,
- 5,
- 6,
- 7,
- -1,
- 8,
- -1,
- -1,
- 9,
- 10
-};
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
- const Immediate& imm) {
- movq(SafepointRegisterSlot(dst), imm);
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- movq(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- movq(dst, SafepointRegisterSlot(src));
-}
-
-
-Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // We will build up the handler from the bottom by pushing on the stack.
- // First push the frame pointer and context.
- if (kind == StackHandler::JS_ENTRY) {
- // The frame pointer does not point to a JS frame so we save NULL for
- // rbp. We expect the code throwing an exception to check rbp before
- // dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
- Push(Smi::FromInt(0)); // No context.
- } else {
- push(rbp);
- push(rsi);
- }
-
- // Push the state and the code object.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- push(Immediate(state));
- Push(CodeObject());
-
- // Link the current handler as the next handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(ExternalOperand(handler_address));
- // Set this new handler as the current one.
- movq(ExternalOperand(handler_address), rsp);
-}
-
-
-void MacroAssembler::PopTryHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // rax = exception, rdi = code object, rdx = state.
- movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
- shr(rdx, Immediate(StackHandler::kKindWidth));
- movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
- SmiToInteger64(rdx, rdx);
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
- jmp(rdi);
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in rax.
- if (!value.is(rax)) {
- movq(rax, value);
- }
- // Drop the stack pointer to the top of the top handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- movq(rsp, ExternalOperand(handler_address));
- // Restore the next handler.
- pop(ExternalOperand(handler_address));
-
- // Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
-
- // Restore the context and frame pointer.
- pop(rsi); // Context.
- pop(rbp); // Frame pointer.
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
- // rbp or rsi.
- Label skip;
- testq(rsi, rsi);
- j(zero, &skip, Label::kNear);
- movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
- bind(&skip);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in rax.
- if (!value.is(rax)) {
- movq(rax, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- Load(rsp, handler_address);
-
- // Unwind the handlers until the top ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind, Label::kNear);
- bind(&fetch_next);
- movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- testl(Operand(rsp, StackHandlerConstants::kStateOffset),
- Immediate(StackHandler::KindField::kMask));
- j(not_zero, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(ExternalOperand(handler_address));
-
- // Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
-
- // Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(rsi);
- pop(rbp);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::Ret() {
- ret(0);
-}
-
-
-void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
- if (is_uint16(bytes_dropped)) {
- ret(bytes_dropped);
- } else {
- pop(scratch);
- addq(rsp, Immediate(bytes_dropped));
- push(scratch);
- ret(0);
- }
-}
-
-
-void MacroAssembler::FCmp() {
- fucomip();
- fstp(0);
-}
-
-
-void MacroAssembler::CmpObjectType(Register heap_object,
- InstanceType type,
- Register map) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- CmpInstanceType(map, type);
-}
-
-
-void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(type)));
-}
-
-
-void MacroAssembler::CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(below_equal, fail, distance);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register maybe_number,
- Register elements,
- Register index,
- XMMRegister xmm_scratch,
- Label* fail,
- int elements_offset) {
- Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
-
- JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
- CheckMap(maybe_number,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- cmpl(FieldOperand(maybe_number, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- j(greater_equal, &maybe_nan, Label::kNear);
-
- bind(&not_nan);
- movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- movsd(FieldOperand(elements, index, times_8,
- FixedDoubleArray::kHeaderSize - elements_offset),
- xmm_scratch);
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- j(greater, &is_nan, Label::kNear);
- cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
- j(zero, &not_nan);
- bind(&is_nan);
- // Convert all NaNs to the same canonical NaN value when they are stored in
- // the double array.
- Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- movq(xmm_scratch, kScratchRegister);
- jmp(&have_double_value, Label::kNear);
-
- bind(&smi_value);
- // Value is a smi. convert to a double and store.
- // Preserve original value.
- SmiToInteger32(kScratchRegister, maybe_number);
- cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, index, times_8,
- FixedDoubleArray::kHeaderSize - elements_offset),
- xmm_scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
- Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- Cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
- }
- }
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, map, &success, mode);
- j(not_equal, fail);
- bind(&success);
-}
-
-
-void MacroAssembler::ClampUint8(Register reg) {
- Label done;
- testl(reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- setcc(negative, reg); // 1 if negative, 0 if positive.
- decb(reg); // 0 if negative, 255 if positive.
- bind(&done);
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister temp_xmm_reg,
- Register result_reg) {
- Label done;
- Label conv_failure;
- xorps(temp_xmm_reg, temp_xmm_reg);
- cvtsd2si(result_reg, input_reg);
- testl(result_reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- cmpl(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
- movl(result_reg, Immediate(0));
- setcc(above, result_reg);
- subl(result_reg, Immediate(1));
- andl(result_reg, Immediate(255));
- jmp(&done, Label::kNear);
- bind(&conv_failure);
- Set(result_reg, 0);
- ucomisd(input_reg, temp_xmm_reg);
- j(below, &done, Label::kNear);
- Set(result_reg, 255);
- bind(&done);
-}
-
-
-void MacroAssembler::LoadUint32(XMMRegister dst,
- Register src,
- XMMRegister scratch) {
- if (FLAG_debug_code) {
- cmpq(src, Immediate(0xffffffff));
- Assert(below_equal, "input GPR is expected to have upper32 cleared");
- }
- cvtqsi2sd(dst, src);
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::EnumLength(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
- Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- and_(dst, kScratchRegister);
-}
-
-
-void MacroAssembler::DispatchMap(Register obj,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- j(equal, success, RelocInfo::CODE_TARGET);
-
- bind(&fail);
-}
-
-
-void MacroAssembler::AssertNumber(Register object) {
- if (emit_debug_code()) {
- Label ok;
- Condition is_smi = CheckSmi(object);
- j(is_smi, &ok, Label::kNear);
- Cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Check(equal, "Operand is not a number");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
- Condition is_smi = CheckSmi(object);
- Check(NegateCondition(is_smi), "Operand is a smi");
- }
-}
-
-
-void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
- Condition is_smi = CheckSmi(object);
- Check(is_smi, "Operand is not a smi");
- }
-}
-
-
-void MacroAssembler::AssertSmi(const Operand& object) {
- if (emit_debug_code()) {
- Condition is_smi = CheckSmi(object);
- Check(is_smi, "Operand is not a smi");
- }
-}
-
-
-void MacroAssembler::AssertZeroExtended(Register int32_register) {
- if (emit_debug_code()) {
- ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
- cmpq(kScratchRegister, int32_register);
- Check(above_equal, "32 bit value in register is not zero-extended");
- }
-}
-
-
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- testb(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a string");
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Check(below, "Operand is not a string");
- }
-}
-
-
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- if (emit_debug_code()) {
- ASSERT(!src.is(kScratchRegister));
- LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
- Check(equal, message);
- }
-}
-
-
-
-Condition MacroAssembler::IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- testb(instance_type, Immediate(kIsNotStringMask));
- return zero;
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Label* miss,
- bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- testl(function, Immediate(kSmiTagMask));
- j(zero, miss);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
-
- if (miss_on_bound_function) {
- movq(kScratchRegister,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
- // field).
- TestBit(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kCompilerHintsOffset),
- SharedFunctionInfo::kBoundFunction);
- j(not_zero, miss);
- }
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance, Label::kNear);
-
- // Get the prototype or initial map from the function.
- movq(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- CompareRoot(result, Heap::kTheHoleValueRootIndex);
- j(equal, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CmpObjectType(result, MAP_TYPE, kScratchRegister);
- j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- movq(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done, Label::kNear);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- movq(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- movl(counter_operand, Immediate(value));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- if (value == 1) {
- incl(counter_operand);
- } else {
- addl(counter_operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- if (value == 1) {
- decl(counter_operand);
- } else {
- subl(counter_operand, Immediate(value));
- }
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- Set(rax, 0); // No arguments.
- LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be rcx to
- // follow the calling convention which requires the call type to be
- // in rcx.
- ASSERT(dst.is(rcx));
- if (call_kind == CALL_AS_FUNCTION) {
- LoadSmiConstant(dst, Smi::FromInt(1));
- } else {
- LoadSmiConstant(dst, Smi::FromInt(0));
- }
-}
-
-
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected,
- actual,
- Handle<Code>::null(),
- code,
- &done,
- &definitely_mismatches,
- flag,
- Label::kNear,
- call_wrapper,
- call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- jmp(code);
- }
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- Register dummy = rax;
- InvokePrologue(expected,
- actual,
- code,
- dummy,
- &done,
- &definitely_mismatches,
- flag,
- Label::kNear,
- call_wrapper,
- call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- Call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- Jump(code, rmode);
- }
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- ASSERT(function.is(rdi));
- movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- // Advances rdx to the end of the Code object header, to the start of
- // the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Get the function and setup the context.
- LoadHeapObject(rdi, function);
- movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- Label::Distance near_jump,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- bool definitely_matches = false;
- *definitely_mismatches = false;
- Label invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- Set(rax, actual.immediate());
- if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
- // Don't worry about adapting arguments for built-ins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- *definitely_mismatches = true;
- Set(rbx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
- j(equal, &invoke, Label::kNear);
- ASSERT(expected.reg().is(rbx));
- Set(rax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
- j(equal, &invoke, Label::kNear);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
- }
-
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(rcx, call_kind);
- Call(adaptor, RelocInfo::CODE_TARGET);
- call_wrapper.AfterCall();
- if (!*definitely_mismatches) {
- jmp(done, near_jump);
- }
- } else {
- SetCallKind(rcx, call_kind);
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
- movq(rbp, rsp);
- push(rsi); // Context.
- Push(Smi::FromInt(type));
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
- if (emit_debug_code()) {
- movq(kScratchRegister,
- isolate()->factory()->undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, "code object not properly patched");
- }
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code()) {
- Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
- Check(equal, "stack frame types must match");
- }
- movq(rsp, rbp);
- pop(rbp);
-}
-
-
-void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
- // Set up the frame structure on the stack.
- // All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
- movq(rbp, rsp);
-
- // Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
-
- // Save the frame pointer and the context in top.
- if (save_rax) {
- movq(r14, rax); // Backup rax in callee-save register.
- }
-
- Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
- Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
-}
-
-
-void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
- bool save_doubles) {
-#ifdef _WIN64
- const int kShadowSpace = 4;
- arg_stack_space += kShadowSpace;
-#endif
- // Optionally save all XMM registers.
- if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
- arg_stack_space * kPointerSize;
- subq(rsp, Immediate(space));
- int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
- }
- } else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kPointerSize));
- }
-
- // Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
- if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
- }
-
- // Patch the saved entry sp.
- movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
-}
-
-
-void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
- EnterExitFramePrologue(true);
-
- // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
- // so it must be retained across the C-call.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
-
- EnterExitFrameEpilogue(arg_stack_space, save_doubles);
-}
-
-
-void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
- EnterExitFramePrologue(false);
- EnterExitFrameEpilogue(arg_stack_space, false);
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
- // Registers:
- // r15 : argv
- if (save_doubles) {
- int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
- }
- }
- // Get the return address from the stack and restore the frame pointer.
- movq(rcx, Operand(rbp, 1 * kPointerSize));
- movq(rbp, Operand(rbp, 0 * kPointerSize));
-
- // Drop everything up to and including the arguments and the receiver
- // from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
-
- // Push the return address to get ready to return.
- push(rcx);
-
- LeaveExitFrameEpilogue();
-}
-
-
-void MacroAssembler::LeaveApiExitFrame() {
- movq(rsp, rbp);
- pop(rbp);
-
- LeaveExitFrameEpilogue();
-}
-
-
-void MacroAssembler::LeaveExitFrameEpilogue() {
- // Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Isolate::kContextAddress, isolate());
- Operand context_operand = ExternalOperand(context_address);
- movq(rsi, context_operand);
-#ifdef DEBUG
- movq(context_operand, Immediate(0));
-#endif
-
- // Clear the top frame.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
- movq(c_entry_fp_operand, Immediate(0));
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!scratch.is(kScratchRegister));
- // Load current lexical context from the stack frame.
- movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- // When generating debug code, make sure the lexical context is set.
- if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
- }
- // Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- }
-
- // Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- j(equal, &same_contexts);
-
- // Compare security tokens.
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Preserve original value of holder_reg.
- push(holder_reg);
- movq(holder_reg,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- CompareRoot(holder_reg, Heap::kNullValueRootIndex);
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
-
- // Read the first word and compare to native_context_map(),
- movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(holder_reg);
- }
-
- movq(kScratchRegister,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- int token_offset =
- Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
- j(not_equal, miss);
-
- bind(&same_contexts);
-}
-
-
-void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
- // First of all we assign the hash seed to scratch.
- LoadRoot(scratch, Heap::kHashSeedRootIndex);
- SmiToInteger32(scratch, scratch);
-
- // Xor original key with a seed.
- xorl(r0, scratch);
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- movl(scratch, r0);
- notl(r0);
- shll(scratch, Immediate(15));
- addl(r0, scratch);
- // hash = hash ^ (hash >> 12);
- movl(scratch, r0);
- shrl(scratch, Immediate(12));
- xorl(r0, scratch);
- // hash = hash + (hash << 2);
- leal(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- movl(scratch, r0);
- shrl(scratch, Immediate(4));
- xorl(r0, scratch);
- // hash = hash * 2057;
- imull(r0, r0, Immediate(2057));
- // hash = hash ^ (hash >> 16);
- movl(scratch, r0);
- shrl(scratch, Immediate(16));
- xorl(r0, scratch);
-}
-
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
-
- Label done;
-
- GetNumberHash(r0, r1);
-
- // Compute capacity mask.
- SmiToInteger32(r1, FieldOperand(elements,
- SeededNumberDictionary::kCapacityOffset));
- decl(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- movq(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- cmpq(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- j(equal, &done);
- } else {
- j(not_equal, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Smi::FromInt(PropertyDetails::TypeField::kMask));
- j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
-void MacroAssembler::LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Just return if allocation top is already known.
- if ((flags & RESULT_CONTAINS_TOP) != 0) {
- // No use of scratch if allocation top is provided.
- ASSERT(!scratch.is_valid());
-#ifdef DEBUG
- // Assert that result actually contains top on entry.
- Operand top_operand = ExternalOperand(new_space_allocation_top);
- cmpq(result, top_operand);
- Check(equal, "Unexpected allocation top");
-#endif
- return;
- }
-
- // Move address of new object to result. Use scratch register if available,
- // and keep address in scratch until call to UpdateAllocationTopHelper.
- if (scratch.is_valid()) {
- LoadAddress(scratch, new_space_allocation_top);
- movq(result, Operand(scratch, 0));
- } else {
- Load(result, new_space_allocation_top);
- }
-}
-
-
-void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
- if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
- }
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Update new top.
- if (scratch.is_valid()) {
- // Scratch already contains address of allocation top.
- movq(Operand(scratch, 0), result_end);
- } else {
- Store(new_space_allocation_top, result_end);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- if (result_end.is_valid()) {
- movl(result_end, Immediate(0x7191));
- }
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- if (!top_reg.is(result)) {
- movq(top_reg, result);
- }
- addq(top_reg, Immediate(object_size));
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(top_reg, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
-
- bool tag_result = (flags & TAG_OBJECT) != 0;
- if (top_reg.is(result)) {
- if (tag_result) {
- subq(result, Immediate(object_size - kHeapObjectTag));
- } else {
- subq(result, Immediate(object_size));
- }
- } else if (tag_result) {
- // Tag the result if requested.
- ASSERT(kHeapObjectTag == 1);
- incq(result);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & SIZE_IN_WORDS) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- movl(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- // We assume that element_count*element_size + header_size does not
- // overflow.
- lea(result_end, Operand(element_count, element_size, header_size));
- addq(result_end, result);
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(result_end, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- ASSERT(kHeapObjectTag == 1);
- incq(result);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- movl(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- // object_size is left unchanged by this function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- if (!object_size.is(result_end)) {
- movq(result_end, object_size);
- }
- addq(result_end, result);
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(result_end, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
- }
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
- Operand top_operand = ExternalOperand(new_space_allocation_top);
-#ifdef DEBUG
- cmpq(object, top_operand);
- Check(below, "Undo allocation of non allocated memory");
-#endif
- movq(top_operand, object);
-}
-
-
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch,
- no_reg,
- gc_required,
- TAG_OBJECT);
-
- // Set the map.
- LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
- kObjectAlignmentMask;
- ASSERT(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
- kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
- if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
- }
-
- // Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
- kObjectAlignmentMask;
- movl(scratch1, length);
- ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
- if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
- }
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-// Copy memory, byte-by-byte, from source to destination. Not optimized for
-// long or aligned copies. The contents of scratch and length are destroyed.
-// Destination is incremented by length, source, length and scratch are
-// clobbered.
-// A simpler loop is faster on small copies, but slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register destination,
- Register source,
- Register length,
- int min_length,
- Register scratch) {
- ASSERT(min_length >= 0);
- if (emit_debug_code()) {
- cmpl(length, Immediate(min_length));
- Assert(greater_equal, "Invalid min_length");
- }
- Label loop, done, short_string, short_loop;
-
- const int kLongStringLimit = 20;
- if (min_length <= kLongStringLimit) {
- cmpl(length, Immediate(kLongStringLimit));
- j(less_equal, &short_string);
- }
-
- ASSERT(source.is(rsi));
- ASSERT(destination.is(rdi));
- ASSERT(length.is(rcx));
-
- // Because source is 8-byte aligned in our uses of this function,
- // we keep source aligned for the rep movs operation by copying the odd bytes
- // at the end of the ranges.
- movq(scratch, length);
- shrl(length, Immediate(3));
- repmovsq();
- // Move remaining bytes of length.
- andl(scratch, Immediate(0x7));
- movq(length, Operand(source, scratch, times_1, -8));
- movq(Operand(destination, scratch, times_1, -8), length);
- addq(destination, scratch);
-
- if (min_length <= kLongStringLimit) {
- jmp(&done);
-
- bind(&short_string);
- if (min_length == 0) {
- testl(length, length);
- j(zero, &done);
- }
- lea(scratch, Operand(destination, length, times_1, 0));
-
- bind(&short_loop);
- movb(length, Operand(source, 0));
- movb(Operand(destination, 0), length);
- incq(source);
- incq(destination);
- cmpq(destination, scratch);
- j(not_equal, &short_loop);
-
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- jmp(&entry);
- bind(&loop);
- movq(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
- bind(&entry);
- cmpq(start_offset, end_offset);
- j(less, &loop);
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in rsi).
- movq(dst, rsi);
- }
-
- // We should not have found a with context by walking the context
- // chain (i.e., the static scope chain and runtime context chain do
- // not agree). A variable occurring in such a scope should have
- // slot type LOOKUP and not CONTEXT.
- if (emit_debug_code()) {
- CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
- Heap::kWithContextMapRootIndex);
- Check(not_equal, "Variable resolved to with context.");
- }
-}
-
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- // Load the global or builtins object from the current context.
- movq(scratch,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check that the function's map is the same as the expected cached map.
- movq(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- int offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
- j(not_equal, no_map_match);
-
- // Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- movq(map_in_out, FieldOperand(scratch, offset));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- movq(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-#ifdef _WIN64
-static const int kRegisterPassedArguments = 4;
-#else
-static const int kRegisterPassedArguments = 6;
-#endif
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- movq(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- movq(function, Operand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadArrayFunction(Register function) {
- movq(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movq(function,
- Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map) {
- // Load the initial map. The global functions all have initial maps.
- movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
- jmp(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
- // On Windows 64 stack slots are reserved by the caller for all arguments
- // including the ones passed in registers, and space is always allocated for
- // the four register arguments even if the function takes fewer than four
- // arguments.
- // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
- // and the caller does not reserve stack slots for them.
- ASSERT(num_arguments >= 0);
-#ifdef _WIN64
- const int kMinimumStackSlots = kRegisterPassedArguments;
- if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
- return num_arguments;
-#else
- if (num_arguments < kRegisterPassedArguments) return 0;
- return num_arguments - kRegisterPassedArguments;
-#endif
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments) {
- int frame_alignment = OS::ActivationFrameAlignment();
- ASSERT(frame_alignment != 0);
- ASSERT(num_arguments >= 0);
-
- // Make stack end at alignment and allocate space for arguments and old rsp.
- movq(kScratchRegister, rsp);
- ASSERT(IsPowerOf2(frame_alignment));
- int argument_slots_on_stack =
- ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
- and_(rsp, Immediate(-frame_alignment));
- movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- LoadAddress(rax, function);
- CallCFunction(rax, num_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
- ASSERT(has_frame());
- // Check stack alignment.
- if (emit_debug_code()) {
- CheckStackAlignment();
- }
-
- call(function);
- ASSERT(OS::ActivationFrameAlignment() != 0);
- ASSERT(num_arguments >= 0);
- int argument_slots_on_stack =
- ArgumentStackSlotsForCFunctionCall(num_arguments);
- movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
-}
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
-}
-
-
-CodePatcher::CodePatcher(byte* address, int size)
- : address_(address),
- size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- movq(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
- }
- if (mask < (1 << kBitsPerByte)) {
- testb(Operand(scratch, MemoryChunk::kFlagsOffset),
- Immediate(static_cast<uint8_t>(mask)));
- } else {
- testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* on_black,
- Label::Distance on_black_distance) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- // The mask_scratch register contains a 1 at the position of the first bit
- // and a 0 at all other positions, including the position of the second bit.
- movq(rcx, mask_scratch);
- // Make rcx into a mask that covers both marking bits using the operation
- // rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
- // Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
- j(equal, on_black, on_black_distance);
-}
-
-
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(
- Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance) {
- Label is_data_object;
- movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- j(equal, &is_data_object, Label::kNear);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, not_data_object, not_data_object_distance);
- bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
- movq(bitmap_reg, addr_reg);
- // Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- movq(rcx, addr_reg);
- int shift =
- Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
- shrl(rcx, Immediate(shift));
- and_(rcx,
- Immediate((Page::kPageAlignmentMask >> shift) &
- ~(Bitmap::kBytesPerCell - 1)));
-
- addq(bitmap_reg, rcx);
- movq(rcx, addr_reg);
- shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
- movl(mask_reg, Immediate(1));
- shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = rcx; // Holds map while checking type.
- Register length = rcx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- movq(map, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- j(not_equal, &not_heap_number, Label::kNear);
- movq(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = rcx;
- movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- testb(instance_type, Immediate(kExternalStringTag));
- j(zero, &not_external, Label::kNear);
- movq(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either ASCII or UC16.
- ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
-
- bind(&done);
-}
-
-
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next, start;
- Register empty_fixed_array_value = r8;
- LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- movq(rcx, rax);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-
- EnumLength(rdx, rbx);
- Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
- j(equal, call_runtime);
-
- jmp(&start);
-
- bind(&next);
-
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLength(rdx, rbx);
- Cmp(rdx, Smi::FromInt(0));
- j(not_equal, call_runtime);
-
- bind(&start);
-
- // Check that there are no elements. Register rcx contains the current JS
- // object we've reached through the prototype chain.
- cmpq(empty_fixed_array_value,
- FieldOperand(rcx, JSObject::kElementsOffset));
- j(not_equal, call_runtime);
-
- movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
- j(not_equal, &next);
-}
-
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
- Register receiver_reg,
- Register scratch_reg) {
- Label no_info_available;
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- lea(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
- movq(kScratchRegister, new_space_start);
- cmpq(scratch_reg, kScratchRegister);
- j(less, &no_info_available);
- cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
- j(greater, &no_info_available);
- CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
- Heap::kAllocationSiteInfoMapRootIndex);
- bind(&no_info_available);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.h b/src/3rdparty/v8/src/x64/macro-assembler-x64.h
deleted file mode 100644
index 43b6bfb..0000000
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.h
+++ /dev/null
@@ -1,1508 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
-#define V8_X64_MACRO_ASSEMBLER_X64_H_
-
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Default scratch register used by MacroAssembler (and other code that needs
-// a spare register). The register isn't callee save, and not used by the
-// function calling convention.
-const Register kScratchRegister = { 10 }; // r10.
-const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
-const Register kRootRegister = { 13 }; // r13 (callee save).
-// Value of smi in kSmiConstantRegister.
-const int kSmiConstantRegisterValue = 1;
-// Actual value of root register is offset from the root array's start
-// to take advantage of negitive 8-bit displacement values.
-const int kRootRegisterBias = 128;
-
-// Convenience for platform-independent signatures.
-typedef Operand MemOperand;
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-// Forward declaration.
-class JumpTarget;
-
-struct SmiIndex {
- SmiIndex(Register index_register, ScaleFactor scale)
- : reg(index_register),
- scale(scale) {}
- Register reg;
- ScaleFactor scale;
-};
-
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // Prevent the use of the RootArray during the lifetime of this
- // scope object.
- class NoRootArrayScope BASE_EMBEDDED {
- public:
- explicit NoRootArrayScope(MacroAssembler* assembler)
- : variable_(&assembler->root_array_available_),
- old_value_(assembler->root_array_available_) {
- assembler->root_array_available_ = false;
- }
- ~NoRootArrayScope() {
- *variable_ = old_value_;
- }
- private:
- bool* variable_;
- bool old_value_;
- };
-
- // Operand pointing to an external reference.
- // May emit code to set up the scratch register. The operand is
- // only guaranteed to be correct as long as the scratch register
- // isn't changed.
- // If the operand is used more than once, use a scratch register
- // that is guaranteed not to be clobbered.
- Operand ExternalOperand(ExternalReference reference,
- Register scratch = kScratchRegister);
- // Loads and stores the value of an external reference.
- // Special case code for load and store to take advantage of
- // load_rax/store_rax if possible/necessary.
- // For other operations, just use:
- // Operand operand = ExternalOperand(extref);
- // operation(operand, ..);
- void Load(Register destination, ExternalReference source);
- void Store(ExternalReference destination, Register source);
- // Loads the address of the external reference into the destination
- // register.
- void LoadAddress(Register destination, ExternalReference source);
- // Returns the size of the code generated by LoadAddress.
- // Used by CallSize(ExternalReference) to find the size of a call.
- int LoadAddressSize(ExternalReference source);
- // Pushes the address of the external reference onto the stack.
- void PushAddress(ExternalReference source);
-
- // Operations on roots in the root-array.
- void LoadRoot(Register destination, Heap::RootListIndex index);
- void StoreRoot(Register source, Heap::RootListIndex index);
- // Load a root value where the index (or part of it) is variable.
- // The variable_offset register is added to the fixed_offset value
- // to get the index into the root-array.
- void LoadRootIndexed(Register destination,
- Register variable_offset,
- int fixed_offset);
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
- void PushRoot(Heap::RootListIndex index);
-
- // These functions do not arrange the registers in any particular order so
- // they are not useful for calls that can cause a GC. The caller can
- // exclude up to 3 registers that do not need to be saved and restored.
- void PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
-
-// ---------------------------------------------------------------------------
-// GC Support
-
-
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_equal, branch, distance);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, equal, branch, distance);
- }
-
- // Check if an object has the black incremental marking color. Also uses rcx!
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // Operand(reg, off).
- void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- save_fp,
- remembered_set_action,
- smi_check);
- }
-
- // Notify the garbage collector that we wrote a pointer into a fixed array.
- // |array| is the array being stored into, |value| is the
- // object being stored. |index| is the array index represented as a non-smi.
- // All registers are clobbered by the operation RecordWriteArray
- // filters out smis so it does not update the write barrier if the
- // value is a smi.
- void RecordWriteArray(
- Register array,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // For page containing |object| mark region covering |address|
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. The address and value registers are clobbered by the
- // operation. RecordWrite filters out smis so it does not update
- // the write barrier if the value is a smi.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
- // Enter specific kind of exit frame; either in normal or
- // debug mode. Expects the number of arguments in register rax and
- // sets up the number of arguments in register rdi and the pointer
- // to the first argument in register rsi.
- //
- // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
- // accessible via StackSpaceOperand.
- void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
-
- // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
- // memory (not GCed) on the stack accessible via StackSpaceOperand.
- void EnterApiExitFrame(int arg_stack_space);
-
- // Leave the current exit frame. Expects/provides the return value in
- // register rax:rdx (untouched) and the pointer to the first
- // argument in register rsi.
- void LeaveExitFrame(bool save_doubles = false);
-
- // Leave the current exit frame. Expects/provides the return value in
- // register rax (untouched).
- void LeaveApiExitFrame();
-
- // Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { Pushad(); }
- void PopSafepointRegisters() { Popad(); }
- // Store the value in register src in the safepoint register stack
- // slot for register dst.
- void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
- void StoreToSafepointRegisterSlot(Register dst, Register src);
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- movq(kRootRegister, roots_array_start);
- addq(kRootRegister, Immediate(kRootRegisterBias));
- }
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Set up call kind marking in rcx. The method takes rcx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
-
- // ---------------------------------------------------------------------------
- // Smi tagging, untagging and operations on tagged smis.
-
- void InitializeSmiConstantRegister() {
- movq(kSmiConstantRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE64);
- }
-
- // Conversions between tagged smi values and non-tagged integer values.
-
- // Tag an integer value. The result must be known to be a valid smi value.
- // Only uses the low 32 bits of the src register. Sets the N and Z flags
- // based on the value of the resulting smi.
- void Integer32ToSmi(Register dst, Register src);
-
- // Stores an integer32 value into a memory field that already holds a smi.
- void Integer32ToSmiField(const Operand& dst, Register src);
-
- // Adds constant to src and tags the result as a smi.
- // Result must be a valid smi.
- void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
-
- // Convert smi to 32-bit integer. I.e., not sign extended into
- // high 32 bits of destination.
- void SmiToInteger32(Register dst, Register src);
- void SmiToInteger32(Register dst, const Operand& src);
-
- // Convert smi to 64-bit integer (sign extended if necessary).
- void SmiToInteger64(Register dst, Register src);
- void SmiToInteger64(Register dst, const Operand& src);
-
- // Multiply a positive smi's integer value by a power of two.
- // Provides result as 64-bit integer value.
- void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power);
-
- // Divide a positive smi's integer value by a power of two.
- // Provides result as 32-bit integer value.
- void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
- Register src,
- int power);
-
- // Perform the logical or of two smi values and return a smi value.
- // If either argument is not a smi, jump to on_not_smis and retain
- // the original values of source registers. The destination register
- // may be changed if it's not one of the source registers.
- void SmiOrIfSmis(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis,
- Label::Distance near_jump = Label::kFar);
-
-
- // Simple comparison of smis. Both sides must be known smis to use these,
- // otherwise use Cmp.
- void SmiCompare(Register smi1, Register smi2);
- void SmiCompare(Register dst, Smi* src);
- void SmiCompare(Register dst, const Operand& src);
- void SmiCompare(const Operand& dst, Register src);
- void SmiCompare(const Operand& dst, Smi* src);
- // Compare the int32 in src register to the value of the smi stored at dst.
- void SmiCompareInteger32(const Operand& dst, Register src);
- // Sets sign and zero flags depending on value of smi in register.
- void SmiTest(Register src);
-
- // Functions performing a check on a known or potential smi. Returns
- // a condition that is satisfied if the check is successful.
-
- // Is the value a tagged smi.
- Condition CheckSmi(Register src);
- Condition CheckSmi(const Operand& src);
-
- // Is the value a non-negative tagged smi.
- Condition CheckNonNegativeSmi(Register src);
-
- // Are both values tagged smis.
- Condition CheckBothSmi(Register first, Register second);
-
- // Are both values non-negative tagged smis.
- Condition CheckBothNonNegativeSmi(Register first, Register second);
-
- // Are either value a tagged smi.
- Condition CheckEitherSmi(Register first,
- Register second,
- Register scratch = kScratchRegister);
-
- // Is the value the minimum smi value (since we are using
- // two's complement numbers, negating the value is known to yield
- // a non-smi value).
- Condition CheckIsMinSmi(Register src);
-
- // Checks whether an 32-bit integer value is a valid for conversion
- // to a smi.
- Condition CheckInteger32ValidSmiValue(Register src);
-
- // Checks whether an 32-bit unsigned integer value is a valid for
- // conversion to a smi.
- Condition CheckUInteger32ValidSmiValue(Register src);
-
- // Check whether src is a Smi, and set dst to zero if it is a smi,
- // and to one if it isn't.
- void CheckSmiToIndicator(Register dst, Register src);
- void CheckSmiToIndicator(Register dst, const Operand& src);
-
- // Test-and-jump functions. Typically combines a check function
- // above with a conditional jump.
-
- // Jump if the value cannot be represented by a smi.
- void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
- Label::Distance near_jump = Label::kFar);
-
- // Jump if the unsigned integer value cannot be represented by a smi.
- void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
- Label::Distance near_jump = Label::kFar);
-
- // Jump to label if the value is a tagged smi.
- void JumpIfSmi(Register src,
- Label* on_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Jump to label if the value is not a tagged smi.
- void JumpIfNotSmi(Register src,
- Label* on_not_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Jump to label if the value is not a non-negative tagged smi.
- void JumpUnlessNonNegativeSmi(Register src,
- Label* on_not_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Jump to label if the value, which must be a tagged smi, has value equal
- // to the constant.
- void JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- Label* on_equals,
- Label::Distance near_jump = Label::kFar);
-
- // Jump if either or both register are not smi values.
- void JumpIfNotBothSmi(Register src1,
- Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Jump if either or both register are not non-negative smi values.
- void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Operations on tagged smi values.
-
- // Smis represent a subset of integers. The subset is always equivalent to
- // a two's complement interpretation of a fixed number of bits.
-
- // Optimistically adds an integer constant to a supposed smi.
- // If the src is not a smi, or the result is not a smi, jump to
- // the label.
- void SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result.
- // No overflow testing on the result is done.
- void SmiAddConstant(Register dst, Register src, Smi* constant);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result.
- // No overflow testing on the result is done.
- void SmiAddConstant(const Operand& dst, Smi* constant);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result,
- // or jumping to a label if the result cannot be represented by a smi.
- void SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Subtract an integer constant from a tagged smi, giving a tagged smi as
- // result. No testing on the result is done. Sets the N and Z flags
- // based on the value of the resulting integer.
- void SmiSubConstant(Register dst, Register src, Smi* constant);
-
- // Subtract an integer constant from a tagged smi, giving a tagged smi as
- // result, or jumping to a label if the result cannot be represented by a smi.
- void SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Negating a smi can give a negative zero or too large positive value.
- // NOTICE: This operation jumps on success, not failure!
- void SmiNeg(Register dst,
- Register src,
- Label* on_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Adds smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
- void SmiAdd(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
- void SmiAdd(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- void SmiAdd(Register dst,
- Register src1,
- Register src2);
-
- // Subtracts smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
- void SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- void SmiSub(Register dst,
- Register src1,
- Register src2);
-
- void SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- void SmiSub(Register dst,
- Register src1,
- const Operand& src2);
-
- // Multiplies smi values and return the result as a smi,
- // if possible.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
- void SmiMul(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Divides one smi by another and returns the quotient.
- // Clobbers rax and rdx registers.
- void SmiDiv(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Divides one smi by another and returns the remainder.
- // Clobbers rax and rdx registers.
- void SmiMod(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Bitwise operations.
- void SmiNot(Register dst, Register src);
- void SmiAnd(Register dst, Register src1, Register src2);
- void SmiOr(Register dst, Register src1, Register src2);
- void SmiXor(Register dst, Register src1, Register src2);
- void SmiAndConstant(Register dst, Register src1, Smi* constant);
- void SmiOrConstant(Register dst, Register src1, Smi* constant);
- void SmiXorConstant(Register dst, Register src1, Smi* constant);
-
- void SmiShiftLeftConstant(Register dst,
- Register src,
- int shift_value);
- void SmiShiftLogicalRightConstant(Register dst,
- Register src,
- int shift_value,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
- void SmiShiftArithmeticRightConstant(Register dst,
- Register src,
- int shift_value);
-
- // Shifts a smi value to the left, and returns the result if that is a smi.
- // Uses and clobbers rcx, so dst may not be rcx.
- void SmiShiftLeft(Register dst,
- Register src1,
- Register src2);
- // Shifts a smi value to the right, shifting in zero bits at the top, and
- // returns the unsigned intepretation of the result if that is a smi.
- // Uses and clobbers rcx, so dst may not be rcx.
- void SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
- // Shifts a smi value to the right, sign extending the top, and
- // returns the signed intepretation of the result. That will always
- // be a valid smi value, since it's numerically smaller than the
- // original.
- // Uses and clobbers rcx, so dst may not be rcx.
- void SmiShiftArithmeticRight(Register dst,
- Register src1,
- Register src2);
-
- // Specialized operations
-
- // Select the non-smi register of two registers where exactly one is a
- // smi. If neither are smis, jump to the failure label.
- void SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis,
- Label::Distance near_jump = Label::kFar);
-
- // Converts, if necessary, a smi to a combination of number and
- // multiplier to be used as a scaled index.
- // The src register contains a *positive* smi value. The shift is the
- // power of two to multiply the index value by (e.g.
- // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
- // The returned index register may be either src or dst, depending
- // on what is most efficient. If src and dst are different registers,
- // src is always unchanged.
- SmiIndex SmiToIndex(Register dst, Register src, int shift);
-
- // Converts a positive smi to a negative index.
- SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
-
- // Add the value of a smi in memory to an int32 register.
- // Sets flags as a normal add.
- void AddSmiField(Register dst, const Operand& src);
-
- // Basic Smi operations.
- void Move(Register dst, Smi* source) {
- LoadSmiConstant(dst, source);
- }
-
- void Move(const Operand& dst, Smi* source) {
- Register constant = GetSmiConstant(source);
- movq(dst, constant);
- }
-
- void Push(Smi* smi);
- void Test(const Operand& dst, Smi* source);
-
-
- // ---------------------------------------------------------------------------
- // String macros.
-
- // If object is a string, its map is loaded into object_map.
- void JumpIfNotString(Register object,
- Register object_map,
- Label* not_string,
- Label::Distance near_jump = Label::kFar);
-
-
- void JumpIfNotBothSequentialAsciiStrings(
- Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_not_both_flat_ascii,
- Label::Distance near_jump = Label::kFar);
-
- // Check whether the instance type represents a flat ASCII string. Jump to the
- // label if not. If the instance type can be scratched specify same register
- // for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label*on_not_flat_ascii_string,
- Label::Distance near_jump = Label::kFar);
-
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
- Label::Distance near_jump = Label::kFar);
-
- // ---------------------------------------------------------------------------
- // Macro instructions.
-
- // Load a register with a long value as efficiently as possible.
- void Set(Register dst, int64_t x);
- void Set(const Operand& dst, int64_t x);
-
- // Move if the registers are not identical.
- void Move(Register target, Register source);
-
- // Support for constant splitting.
- bool IsUnsafeInt(const int x);
- void SafeMove(Register dst, Smi* src);
- void SafePush(Smi* src);
-
- // Bit-field support.
- void TestBit(const Operand& dst, int bit_index);
-
- // Handle support
- void Move(Register dst, Handle<Object> source);
- void Move(const Operand& dst, Handle<Object> source);
- void Cmp(Register dst, Handle<Object> source);
- void Cmp(const Operand& dst, Handle<Object> source);
- void Cmp(Register dst, Smi* src);
- void Cmp(const Operand& dst, Smi* src);
- void Push(Handle<Object> source);
-
- // Load a heap object and handle the case of new-space objects by
- // indirecting via a global cell.
- void LoadHeapObject(Register result, Handle<HeapObject> object);
- void PushHeapObject(Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
- // Load a global cell into a register.
- void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the rsp register.
- void Drop(int stack_elements);
-
- void Call(Label* target) { call(target); }
-
- // Control Flow
- void Jump(Address destination, RelocInfo::Mode rmode);
- void Jump(ExternalReference ext);
- void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
-
- void Call(Address destination, RelocInfo::Mode rmode);
- void Call(ExternalReference ext);
- void Call(Handle<Code> code_object,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // The size of the code generated for different call instructions.
- int CallSize(Address destination, RelocInfo::Mode rmode) {
- return kCallInstructionLength;
- }
- int CallSize(ExternalReference ext);
- int CallSize(Handle<Code> code_object) {
- // Code calls use 32-bit relative addressing.
- return kShortCallInstructionLength;
- }
- int CallSize(Register target) {
- // Opcode: REX_opt FF /2 m64
- return (target.high_bit() != 0) ? 3 : 2;
- }
- int CallSize(const Operand& target) {
- // Opcode: REX_opt FF /2 m64
- return (target.requires_rex() ? 2 : 1) + target.operand_size();
- }
-
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- Call(self, RelocInfo::CODE_TARGET);
- }
-
- // Non-x64 instructions.
- // Push/pop all general purpose registers.
- // Does not push rsp/rbp nor any of the assembler's special purpose registers
- // (kScratchRegister, kSmiConstantRegister, kRootRegister).
- void Pushad();
- void Popad();
- // Sets the stack as after performing Popad, without actually loading the
- // registers.
- void Dropad();
-
- // Compare object type for heap object.
- // Always use unsigned comparisons: above and below, not less and greater.
- // Incoming register is heap_object and outgoing register is map.
- // They may be the same register, and may be kScratchRegister.
- void CmpObjectType(Register heap_object, InstanceType type, Register map);
-
- // Compare instance type for map.
- // Always use unsigned comparisons: above and below, not less and greater.
- void CmpInstanceType(Register map, InstanceType type);
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by index in
- // the FastDoubleElements array elements, otherwise jump to fail. Note that
- // index must not be smi-tagged.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register index,
- XMMRegister xmm_scratch,
- Label* fail,
- int elements_offset = 0);
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
- // result of map compare. If multiple map compares are required, the compare
- // sequences branches to early_success.
- void CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
-
- // Check if the object in register heap_object is a string. Afterwards the
- // register map contains the object map and the register instance_type
- // contains the instance_type. The registers map and instance_type can be the
- // same in which case it contains the instance type afterwards. Either of the
- // registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type);
-
- // FCmp compares and pops the two values on top of the FPU stack.
- // The flag results are similar to integer cmp, but requires unsigned
- // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
- void FCmp();
-
- void ClampUint8(Register reg);
-
- void ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister temp_xmm_reg,
- Register result_reg);
-
- void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void EnumLength(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
-
- template<typename Field>
- void DecodeField(Register reg) {
- static const int shift = Field::kShift + kSmiShift;
- static const int mask = Field::kMask >> Field::kShift;
- shr(reg, Immediate(shift));
- and_(reg, Immediate(mask));
- shl(reg, Immediate(kSmiShift));
- }
-
- // Abort execution if argument is not a number, enabled via --debug-code.
- void AssertNumber(Register object);
-
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
-
- // Abort execution if argument is not a smi, enabled via --debug-code.
- void AssertSmi(Register object);
- void AssertSmi(const Operand& object);
-
- // Abort execution if a 64 bit register containing a 32 bit payload does not
- // have zeros in the top 32 bits, enabled via --debug-code.
- void AssertZeroExtended(Register reg);
-
- // Abort execution if argument is not a string, enabled via --debug-code.
- void AssertString(Register object);
-
- // Abort execution if argument is not the root value with the given index,
- // enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link it into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Activate the top handler in the try hander chain and pass the
- // thrown value.
- void Throw(Register value);
-
- // Propagate an uncatchable exception out of the current JS stack.
- void ThrowUncatchable(Register value);
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, but the scratch register and kScratchRegister,
- // which must be different, are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- void GetNumberHash(Register r0, Register scratch);
-
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result);
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
- // should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
- // Allocate a heap number in new space with undefined value. Returns
- // tagged pointer in result register, or jumps to gc_required if new
- // space is full.
- void AllocateHeapNumber(Register result,
- Register scratch,
- Label* gc_required);
-
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a raw sliced string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Check if result is zero and op is negative.
- void NegativeZeroTest(Register result, Register op, Label* then_label);
-
- // Check if result is zero and op is negative in code using jump targets.
- void NegativeZeroTest(CodeGenerator* cgen,
- Register result,
- Register op,
- JumpTarget* then_target);
-
- // Check if result is zero and any of op1 and op2 are negative.
- // Register scratch is destroyed, and it must be different from op2.
- void NegativeZeroTest(Register result, Register op1, Register op2,
- Register scratch, Label* then_label);
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other register may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Label* miss,
- bool miss_on_bound_function = false);
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // Find the function context up the context chain.
- void LoadContext(Register dst, int context_chain_length);
-
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same.
- void LoadGlobalFunctionInitialMap(Register function, Register map);
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub.
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
-
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
-
- // Call a runtime function and save the value of XMM registers.
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext, int result_size);
-
- // Prepares stack to put arguments (aligns and so on). WIN64 calling
- // convention requires to put the pointer to the return value slot into
- // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
- // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
- // inside the exit frame (not GCed) accessible via StackSpaceOperand.
- void PrepareCallApiFunction(int arg_stack_space);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Clobbers r14, r15, rbx and
- // caller-save registers. Restores context. On return removes
- // stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address, int stack_space);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // The number of slots reserved for arguments depends on platform. On Windows
- // stack slots are reserved for the arguments passed in registers. On other
- // platforms stack slots are only reserved for the arguments actually passed
- // on the stack.
- void PrepareCallCFunction(int num_arguments);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
-
- // Calculate the number of stack slots to reserve for arguments when calling a
- // C function.
- int ArgumentStackSlotsForCFunctionCall(int num_arguments);
-
- // ---------------------------------------------------------------------------
- // Utilities
-
- void Ret();
-
- // Return and drop arguments from stack, where the number of arguments
- // may be bigger than 2^16 - 1. Requires a scratch register.
- void Ret(int bytes_dropped, Register scratch);
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
- // Copy length bytes from source to destination.
- // Uses scratch register internally (if you have a low-eight register
- // free, do use it, otherwise kScratchRegister will be used).
- // The min_length is a minimum limit on the value that length will have.
- // The algorithm has some special cases that might be omitted if the string
- // is known to always be long.
- void CopyBytes(Register destination,
- Register source,
- Register length,
- int min_length = 0,
- Register scratch = kScratchRegister);
-
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value);
- void IncrementCounter(StatsCounter* counter, int value);
- void DecrementCounter(StatsCounter* counter, int value);
-
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
-
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Check that the stack is aligned.
- void CheckStackAlignment();
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- static int SafepointRegisterStackIndex(Register reg) {
- return SafepointRegisterStackIndex(reg.code());
- }
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- // Expects object in rax and returns map with validated enum cache
- // in rax. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value,
- Label* call_runtime);
-
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to equal
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
-
- private:
- // Order general registers are pushed by Pushad.
- // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
- static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
- static const int kNumSafepointSavedRegisters = 11;
- static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
- bool generating_stub_;
- bool allow_stub_calls_;
- bool has_frame_;
- bool root_array_available_;
-
- // Returns a register holding the smi value. The register MUST NOT be
- // modified. It may be the "smi 1 constant" register.
- Register GetSmiConstant(Smi* value);
-
- intptr_t RootRegisterDelta(ExternalReference other);
-
- // Moves the smi value to the destination register.
- void LoadSmiConstant(Register dst, Smi* value);
-
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- Label::Distance near_jump = Label::kFar,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
-
- void EnterExitFramePrologue(bool save_rax);
-
- // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
- // accessible via StackSpaceOperand.
- void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
-
- void LeaveExitFrameEpilogue();
-
- // Allocation support helpers.
- // Loads the top of new-space into the result register.
- // Otherwise the address of the new-space top is loaded into scratch (if
- // scratch is valid), and the new-space top is loaded into result.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags);
- // Update allocation top with value in result_end register.
- // If scratch is valid, it contains the address of the allocation top.
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
-
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- Object* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance distance = Label::kFar);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Uses rcx as scratch and leaves addr_reg
- // unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
- // Compute memory operands for safepoint stack slots.
- Operand SafepointRegisterSlot(Register reg);
- static int SafepointRegisterStackIndex(int reg_code) {
- return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
- }
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-};
-
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. Is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-// Generate an Operand for loading a field from an object.
-inline Operand FieldOperand(Register object, int offset) {
- return Operand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
- int offset) {
- return Operand(object, index, scale, offset - kHeapObjectTag);
-}
-
-
-inline Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
-}
-
-
-inline Operand GlobalObjectOperand() {
- return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
-}
-
-
-static inline Operand QmlGlobalObjectOperand() {
- return ContextOperand(rsi, Context::QML_GLOBAL_OBJECT_INDEX);
-}
-
-
-// Provides access to exit frame stack space (not GCed).
-inline Operand StackSpaceOperand(int index) {
-#ifdef _WIN64
- const int kShaddowSpace = 4;
- return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
-#else
- return Operand(rsp, index * kPointerSize);
-#endif
-}
-
-
-
-#ifdef GENERATED_CODE_COVERAGE
-extern void LogGeneratedCodeCoverage(const char* file_line);
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
- masm->pushfq(); \
- masm->Pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
- masm->pop(rax); \
- masm->Popad(); \
- masm->popfq(); \
- } \
- masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
deleted file mode 100644
index f5b5e95..0000000
--- a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
+++ /dev/null
@@ -1,1553 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "serialize.h"
-#include "unicode.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "x64/regexp-macro-assembler-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-
-/*
- * This assembler uses the following register assignment convention
- * - rdx : Currently loaded character(s) as ASCII or UC16. Must be loaded
- * using LoadCurrentCharacter before using any of the dispatch methods.
- * Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - rdi : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character
- * offset! Is always a 32-bit signed (negative) offset, but must be
- * maintained sign-extended to 64 bits, since it is used as index.
- * - rsi : End of input (points to byte after last character in input),
- * so that rsi+rdi points to the current character.
- * - rbp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - rsp : Points to tip of C stack.
- * - rcx : Points to tip of backtrack stack. The backtrack stack contains
- * only 32-bit values. Most are offsets from some base (e.g., character
- * positions from end of string or code location from Code* pointer).
- * - r8 : Code object pointer. Used to convert between absolute and
- * code-object-relative addresses.
- *
- * The registers rax, rbx, r9 and r11 are free to use for computations.
- * If changed to use r12+, they should be saved as callee-save registers.
- * The macro assembler special registers r12 and r13 (kSmiConstantRegister,
- * kRootRegister) aren't special during execution of RegExp code (they don't
- * hold the values assumed when creating JS code), so no Smi or Root related
- * macro operations can be used.
- *
- * Each call to a C++ method should retain these registers.
- *
- * The stack will have the following content, in some order, indexable from the
- * frame pointer (see, e.g., kStackHighEnd):
- * - Isolate* isolate (address of the current isolate)
- * - direct_call (if 1, direct call from JavaScript code, if 0 call
- * through the runtime system)
- * - stack_area_base (high end of the memory area to use as
- * backtracking stack)
- * - capture array size (may fit multiple sets of matches)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (address of end of string)
- * - start of input (address of first character in string)
- * - start index (character index of start)
- * - String* input_string (input string)
- * - return address
- * - backup of callee save registers (rbx, possibly rsi and rdi).
- * - success counter (only useful for global regexp to count matches)
- * - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
- * - At start of string (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - register 0 rbp[-n] (Only positions must be stored in the first
- * - register 1 rbp[-n-8] num_saved_registers_ registers)
- * - ...
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out uninitialized.
- *
- * The first seven values must be provided by the calling code by
- * calling the code's entry address cast to a function pointer with the
- * following signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * int* capture_output_array,
- * bool at_start,
- * byte* stack_area_base,
- * bool direct_call)
- */
-
-#define __ ACCESS_MASM((&masm_))
-
-RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(Isolate::Current(), NULL, kRegExpCodeSize),
- no_root_array_scope_(&masm_),
- code_relative_fixup_positions_(4, zone),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code when we know more.
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerX64::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ addq(rdi, Immediate(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ addq(register_location(reg), Immediate(by));
- }
-}
-
-
-void RegExpMacroAssemblerX64::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(rbx);
- __ addq(rbx, code_object_pointer());
- __ jmp(rbx);
-}
-
-
-void RegExpMacroAssemblerX64::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmpl(current_character(), Immediate(c));
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmpl(current_character(), Immediate(limit));
- BranchOrBacktrack(greater, on_greater);
-}
-
-
-void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
- BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
- BranchOrBacktrack(not_equal, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmpl(current_character(), Immediate(limit));
- BranchOrBacktrack(less, on_less);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ASCII character.
- if (mode_ == ASCII) {
- ASSERT(String::IsOneByte(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmpl(rdi, Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(rsi, rdi, times_1, byte_offset),
- Immediate(static_cast<int8_t>(str[0])));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset));
- __ cmpl(rax, Immediate(static_cast<int32_t>(str[0])));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(rbx, Operand(rsi, rdi, times_1, 0));
- for (int i = 1, n = str.length(); i < n; ) {
- if (mode_ == ASCII) {
- if (i + 8 <= n) {
- uint64_t combined_chars =
- (static_cast<uint64_t>(str[i + 0]) << 0) ||
- (static_cast<uint64_t>(str[i + 1]) << 8) ||
- (static_cast<uint64_t>(str[i + 2]) << 16) ||
- (static_cast<uint64_t>(str[i + 3]) << 24) ||
- (static_cast<uint64_t>(str[i + 4]) << 32) ||
- (static_cast<uint64_t>(str[i + 5]) << 40) ||
- (static_cast<uint64_t>(str[i + 6]) << 48) ||
- (static_cast<uint64_t>(str[i + 7]) << 56);
- __ movq(rax, combined_chars, RelocInfo::NONE64);
- __ cmpq(rax, Operand(rbx, byte_offset + i));
- i += 8;
- } else if (i + 4 <= n) {
- uint32_t combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) ||
- (static_cast<uint32_t>(str[i + 1]) << 8) ||
- (static_cast<uint32_t>(str[i + 2]) << 16) ||
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmpl(Operand(rbx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(rbx, byte_offset + i),
- Immediate(static_cast<int8_t>(str[i])));
- i++;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i + 4 <= n) {
- uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]);
- __ movq(rax, combined_chars, RelocInfo::NONE64);
- __ cmpq(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- i += 4;
- } else if (i + 2 <= n) {
- uint32_t combined_chars = *reinterpret_cast<const uint32_t*>(&str[i]);
- __ cmpl(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
- Immediate(combined_chars));
- i += 2;
- } else {
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- __ cmpl(rax, Immediate(str[i]));
- i++;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
- Label fallthrough;
- __ cmpl(rdi, Operand(backtrack_stackpointer(), 0));
- __ j(not_equal, &fallthrough);
- Drop();
- BranchOrBacktrack(no_condition, on_equal);
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ movq(rdx, register_location(start_reg)); // Offset of start of capture
- __ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
- __ subq(rbx, rdx); // Length of capture.
-
- // -----------------------
- // rdx = Start offset of capture.
- // rbx = Length of capture
-
- // If length is negative, this code will fail (it's a symptom of a partial or
- // illegal capture where start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp, and we must not generate code that can cause
- // this condition).
-
- // If length is zero, either the capture is empty or it is nonparticipating.
- // In either case succeed immediately.
- __ j(equal, &fallthrough);
-
- // -----------------------
- // rdx - Start of capture
- // rbx - length of capture
- // Check that there are sufficient characters left in the input.
- __ movl(rax, rdi);
- __ addl(rax, rbx);
- BranchOrBacktrack(greater, on_no_match);
-
- if (mode_ == ASCII) {
- Label loop_increment;
- if (on_no_match == NULL) {
- on_no_match = &backtrack_label_;
- }
-
- __ lea(r9, Operand(rsi, rdx, times_1, 0));
- __ lea(r11, Operand(rsi, rdi, times_1, 0));
- __ addq(rbx, r9); // End of capture
- // ---------------------
- // r11 - current input character address
- // r9 - current capture character address
- // rbx - end of capture
-
- Label loop;
- __ bind(&loop);
- __ movzxbl(rdx, Operand(r9, 0));
- __ movzxbl(rax, Operand(r11, 0));
- // al - input character
- // dl - capture character
- __ cmpb(rax, rdx);
- __ j(equal, &loop_increment);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- // I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
- // a match.
- __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
- __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
- __ cmpb(rax, rdx);
- __ j(not_equal, on_no_match); // Definitely not equal.
- __ subb(rax, Immediate('a'));
- __ cmpb(rax, Immediate('z' - 'a'));
-#ifndef ENABLE_LATIN_1
- __ j(above, on_no_match); // Weren't letters anyway.
-#else
- __ j(below_equal, &loop_increment); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ subb(rax, Immediate(224 - 'a'));
- __ cmpb(rax, Immediate(254 - 224));
- __ j(above, on_no_match); // Weren't Latin-1 letters.
- __ cmpb(rax, Immediate(247 - 224)); // Check for 247.
- __ j(equal, on_no_match);
-#endif
- __ bind(&loop_increment);
- // Increment pointers into match and capture strings.
- __ addq(r11, Immediate(1));
- __ addq(r9, Immediate(1));
- // Compare to end of capture, and loop if not done.
- __ cmpq(r9, rbx);
- __ j(below, &loop);
-
- // Compute new value of character position after the matched part.
- __ movq(rdi, r11);
- __ subq(rdi, rsi);
- } else {
- ASSERT(mode_ == UC16);
- // Save important/volatile registers before calling C function.
-#ifndef _WIN64
- // Caller save on Linux and callee save in Windows.
- __ push(rsi);
- __ push(rdi);
-#endif
- __ push(backtrack_stackpointer());
-
- static const int num_arguments = 4;
- __ PrepareCallCFunction(num_arguments);
-
- // Put arguments into parameter registers. Parameters are
- // Address byte_offset1 - Address captured substring's start.
- // Address byte_offset2 - Address of current character position.
- // size_t byte_length - length of capture in bytes(!)
- // Isolate* isolate
-#ifdef _WIN64
- // Compute and set byte_offset1 (start of capture).
- __ lea(rcx, Operand(rsi, rdx, times_1, 0));
- // Set byte_offset2.
- __ lea(rdx, Operand(rsi, rdi, times_1, 0));
- // Set byte_length.
- __ movq(r8, rbx);
- // Isolate.
- __ LoadAddress(r9, ExternalReference::isolate_address());
-#else // AMD64 calling convention
- // Compute byte_offset2 (current position = rsi+rdi).
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- // Compute and set byte_offset1 (start of capture).
- __ lea(rdi, Operand(rsi, rdx, times_1, 0));
- // Set byte_offset2.
- __ movq(rsi, rax);
- // Set byte_length.
- __ movq(rdx, rbx);
- // Isolate.
- __ LoadAddress(rcx, ExternalReference::isolate_address());
-#endif
-
- { // NOLINT: Can't find a way to open this scope without confusing the
- // linter.
- AllowExternalCallThatCantCauseGC scope(&masm_);
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
- __ CallCFunction(compare, num_arguments);
- }
-
- // Restore original values before reacting on result value.
- __ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(backtrack_stackpointer());
-#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
-#endif
-
- // Check if function returned non-zero for success or zero for failure.
- __ testq(rax, rax);
- BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- // Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
- __ addq(rdi, rbx);
- }
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
-
- // Find length of back-referenced capture.
- __ movq(rdx, register_location(start_reg));
- __ movq(rax, register_location(start_reg + 1));
- __ subq(rax, rdx); // Length to check.
-
- // Fail on partial or illegal capture (start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp).
- __ Check(greater_equal, "Invalid capture referenced");
-
- // Succeed on empty capture (including non-participating capture)
- __ j(equal, &fallthrough);
-
- // -----------------------
- // rdx - Start of capture
- // rax - length of capture
-
- // Check that there are sufficient characters left in the input.
- __ movl(rbx, rdi);
- __ addl(rbx, rax);
- BranchOrBacktrack(greater, on_no_match);
-
- // Compute pointers to match string and capture string
- __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
- __ addq(rdx, rsi); // Start of capture.
- __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
-
- // -----------------------
- // rbx - current capture character address.
- // rbx - current input character address .
- // r9 - end of input to match (capture length after rbx).
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ movzxbl(rax, Operand(rdx, 0));
- __ cmpb(rax, Operand(rbx, 0));
- } else {
- ASSERT(mode_ == UC16);
- __ movzxwl(rax, Operand(rdx, 0));
- __ cmpw(rax, Operand(rbx, 0));
- }
- BranchOrBacktrack(not_equal, on_no_match);
- // Increment pointers into capture and match string.
- __ addq(rbx, Immediate(char_size()));
- __ addq(rdx, Immediate(char_size()));
- // Check if we have reached end of match area.
- __ cmpq(rdx, r9);
- __ j(below, &loop);
-
- // Success.
- // Set current character position to position after match.
- __ movq(rdi, rbx);
- __ subq(rdi, rsi);
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- __ cmpl(current_character(), Immediate(c));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c == 0) {
- __ testl(current_character(), Immediate(mask));
- } else {
- __ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
- __ cmpl(rax, Immediate(c));
- }
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- if (c == 0) {
- __ testl(current_character(), Immediate(mask));
- } else {
- __ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
- __ cmpl(rax, Immediate(c));
- }
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ lea(rax, Operand(current_character(), -minus));
- __ and_(rax, Immediate(mask));
- __ cmpl(rax, Immediate(c));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ leal(rax, Operand(current_character(), -from));
- __ cmpl(rax, Immediate(to - from));
- BranchOrBacktrack(below_equal, on_in_range);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ leal(rax, Operand(current_character(), -from));
- __ cmpl(rax, Immediate(to - from));
- BranchOrBacktrack(above, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerX64::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ Move(rax, table);
- Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ movq(rbx, current_character());
- __ and_(rbx, Immediate(kTableMask));
- index = rbx;
- }
- __ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
- Immediate(0));
- BranchOrBacktrack(not_equal, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check, using the sequence:
- // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
- // cmp(rax, Immediate(max - min))
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ cmpl(current_character(), Immediate(' '));
- __ j(equal, &success);
- // Check range 0x09..0x0d
- __ lea(rax, Operand(current_character(), -'\t'));
- __ cmpl(rax, Immediate('\r' - '\t'));
- BranchOrBacktrack(above, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmpl(current_character(), Immediate(' '));
- BranchOrBacktrack(equal, on_no_match);
- __ lea(rax, Operand(current_character(), -'\t'));
- __ cmpl(rax, Immediate('\r' - '\t'));
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ lea(rax, Operand(current_character(), -'0'));
- __ cmpl(rax, Immediate('9' - '0'));
- BranchOrBacktrack(above, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ lea(rax, Operand(current_character(), -'0'));
- __ cmpl(rax, Immediate('9' - '0'));
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
- BranchOrBacktrack(below_equal, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
- __ cmpl(rax, Immediate(0x2029 - 0x2028));
- BranchOrBacktrack(below_equal, on_no_match);
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
- if (mode_ == ASCII) {
- BranchOrBacktrack(above, on_no_match);
- } else {
- Label done;
- BranchOrBacktrack(below_equal, &done);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
- __ cmpl(rax, Immediate(0x2029 - 0x2028));
- BranchOrBacktrack(above, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmpl(current_character(), Immediate('z'));
- BranchOrBacktrack(above, on_no_match);
- }
- __ movq(rbx, ExternalReference::re_word_character_map());
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- __ testb(Operand(rbx, current_character(), times_1, 0),
- current_character());
- BranchOrBacktrack(zero, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmpl(current_character(), Immediate('z'));
- __ j(above, &done);
- }
- __ movq(rbx, ExternalReference::re_word_character_map());
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- __ testb(Operand(rbx, current_character(), times_1, 0),
- current_character());
- BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
-
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerX64::Fail() {
- STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
- if (!global()) {
- __ Set(rax, FAILURE);
- }
- __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
- Label return_rax;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
- // Entry code:
- __ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // is generated.
- FrameScope scope(&masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
- __ push(rbp);
- __ movq(rbp, rsp);
- // Save parameters and callee-save registers. Order here should correspond
- // to order of kBackup_ebx etc.
-#ifdef _WIN64
- // MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
- // Store register parameters in pre-allocated stack slots,
- __ movq(Operand(rbp, kInputString), rcx);
- __ movq(Operand(rbp, kStartIndex), rdx); // Passed as int32 in edx.
- __ movq(Operand(rbp, kInputStart), r8);
- __ movq(Operand(rbp, kInputEnd), r9);
- // Callee-save on Win64.
- __ push(rsi);
- __ push(rdi);
- __ push(rbx);
-#else
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
- // Push register parameters on stack for reference.
- ASSERT_EQ(kInputString, -1 * kPointerSize);
- ASSERT_EQ(kStartIndex, -2 * kPointerSize);
- ASSERT_EQ(kInputStart, -3 * kPointerSize);
- ASSERT_EQ(kInputEnd, -4 * kPointerSize);
- ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
- __ push(rdi);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx);
- __ push(r8);
- __ push(r9);
-
- __ push(rbx); // Callee-save
-#endif
-
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_.isolate());
- __ movq(rcx, rsp);
- __ movq(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ Set(rax, EXCEPTION);
- __ jmp(&return_rax);
-
- __ bind(&stack_limit_hit);
- __ Move(code_object_pointer(), masm_.CodeObject());
- CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_rax);
-
- __ bind(&stack_ok);
-
- // Allocate space on stack for registers.
- __ subq(rsp, Immediate(num_registers_ * kPointerSize));
- // Load string length.
- __ movq(rsi, Operand(rbp, kInputEnd));
- // Load input position.
- __ movq(rdi, Operand(rbp, kInputStart));
- // Set up rdi to be negative offset from string end.
- __ subq(rdi, rsi);
- // Set rax to address of char before start of the string
- // (effectively string position -1).
- __ movq(rbx, Operand(rbp, kStartIndex));
- __ neg(rbx);
- if (mode_ == UC16) {
- __ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
- } else {
- __ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
- }
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ movq(Operand(rbp, kInputStartMinusOne), rax);
-
-#ifdef WIN32
- // Ensure that we have written to each stack page, in order. Skipping a page
- // on Windows can cause segmentation faults. Assuming page size is 4k.
- const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kPointerSize;
- for (int i = num_saved_registers_ + kRegistersPerPage - 1;
- i < num_registers_;
- i += kRegistersPerPage) {
- __ movq(register_location(i), rax); // One write every page.
- }
-#endif // WIN32
-
- // Initialize code object pointer.
- __ Move(code_object_pointer(), masm_.CodeObject());
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ Set(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) {
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- if (num_saved_registers_ > 8) {
- __ Set(rcx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
- __ subq(rcx, Immediate(kPointerSize));
- __ cmpq(rcx,
- Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
- __ j(greater, &init_loop);
- } else { // Unroll the loop.
- for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(register_location(i), rax);
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
-
- __ jmp(&start_label_);
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ movq(rdx, Operand(rbp, kStartIndex));
- __ movq(rbx, Operand(rbp, kRegisterOutput));
- __ movq(rcx, Operand(rbp, kInputEnd));
- __ subq(rcx, Operand(rbp, kInputStart));
- if (mode_ == UC16) {
- __ lea(rcx, Operand(rcx, rdx, times_2, 0));
- } else {
- __ addq(rcx, rdx);
- }
- for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(rax, register_location(i));
- if (i == 0 && global_with_zero_length_check()) {
- // Keep capture start in rdx for the zero-length check later.
- __ movq(rdx, rax);
- }
- __ addq(rax, rcx); // Convert to index from start, not end.
- if (mode_ == UC16) {
- __ sar(rax, Immediate(1)); // Convert byte index to character index.
- }
- __ movl(Operand(rbx, i * kIntSize), rax);
- }
- }
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- // Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
- __ subq(rcx, Immediate(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmpq(rcx, Immediate(num_saved_registers_));
- __ j(less, &exit_label_);
-
- __ movq(Operand(rbp, kNumOutputRegisters), rcx);
- // Advance the location for output.
- __ addq(Operand(rbp, kRegisterOutput),
- Immediate(num_saved_registers_ * kIntSize));
-
- // Prepare rax to initialize registers with its value in the next run.
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- // rdx: capture start index
- __ cmpq(rdi, rdx);
- // Not a zero-length match, restart.
- __ j(not_equal, &load_char_start_regexp);
- // rdi (offset from the end) is zero if we already reached the end.
- __ testq(rdi, rdi);
- __ j(zero, &exit_label_, Label::kNear);
- // Advance current position after a zero-length match.
- if (mode_ == UC16) {
- __ addq(rdi, Immediate(2));
- } else {
- __ incq(rdi);
- }
- }
-
- __ jmp(&load_char_start_regexp);
- } else {
- __ movq(rax, Immediate(SUCCESS));
- }
- }
-
- __ bind(&exit_label_);
- if (global()) {
- // Return the number of successful captures.
- __ movq(rax, Operand(rbp, kSuccessfulCaptures));
- }
-
- __ bind(&return_rax);
-#ifdef _WIN64
- // Restore callee save registers.
- __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rsi);
- // Stack now at rbp.
-#else
- // Restore callee save register.
- __ movq(rbx, Operand(rbp, kBackup_rbx));
- // Skip rsp to rbp.
- __ movq(rsp, rbp);
-#endif
- // Exit function frame, restore previous one.
- __ pop(rbp);
- __ ret(0);
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- __ push(backtrack_stackpointer());
- __ push(rdi);
-
- CallCheckStackGuardState();
- __ testq(rax, rax);
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ j(not_zero, &return_rax);
-
- // Restore registers.
- __ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(rdi);
- __ pop(backtrack_stackpointer());
- // String might have moved: Reload esi from frame.
- __ movq(rsi, Operand(rbp, kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
-
- Label grow_failed;
- // Save registers before calling C function
-#ifndef _WIN64
- // Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
- __ push(rsi);
- __ push(rdi);
-#endif
-
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
-#ifdef _WIN64
- // Microsoft passes parameters in rcx, rdx, r8.
- // First argument, backtrack stackpointer, is already in rcx.
- __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
- __ LoadAddress(r8, ExternalReference::isolate_address());
-#else
- // AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movq(rdi, backtrack_stackpointer()); // First argument.
- __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
- __ LoadAddress(rdx, ExternalReference::isolate_address());
-#endif
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_.isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ testq(rax, rax);
- __ j(equal, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ movq(backtrack_stackpointer(), rax);
- // Restore saved registers and continue.
- __ Move(code_object_pointer(), masm_.CodeObject());
-#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
-#endif
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ Set(rax, EXCEPTION);
- __ jmp(&return_rax);
- }
-
- FixupCodeRelativePositions();
-
- CodeDesc code_desc;
- masm_.GetCode(&code_desc);
- Isolate* isolate = ISOLATE;
- Handle<Code> code = isolate->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP),
- masm_.CodeObject());
- PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerX64::GoTo(Label* to) {
- BranchOrBacktrack(no_condition, to);
-}
-
-
-void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ cmpq(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(greater_equal, if_ge);
-}
-
-
-void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ cmpq(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(less, if_lt);
-}
-
-
-void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ cmpq(rdi, register_location(reg));
- BranchOrBacktrack(equal, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerX64::Implementation() {
- return kX64Implementation;
-}
-
-
-void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerX64::PopCurrentPosition() {
- Pop(rdi);
-}
-
-
-void RegExpMacroAssemblerX64::PopRegister(int register_index) {
- Pop(rax);
- __ movq(register_location(register_index), rax);
-}
-
-
-void RegExpMacroAssemblerX64::PushBacktrack(Label* label) {
- Push(label);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerX64::PushCurrentPosition() {
- Push(rdi);
-}
-
-
-void RegExpMacroAssemblerX64::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ movq(rax, register_location(register_index));
- Push(rax);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
- __ movq(rdi, register_location(reg));
-}
-
-
-void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
- __ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
-}
-
-
-void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ cmpq(rdi, Immediate(-by * char_size()));
- __ j(greater_equal, &after_position, Label::kNear);
- __ movq(rdi, Immediate(-by * char_size()));
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ movq(register_location(register_index), Immediate(to));
-}
-
-
-bool RegExpMacroAssemblerX64::Succeed() {
- __ jmp(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ movq(register_location(reg), rdi);
- } else {
- __ lea(rax, Operand(rdi, cp_offset * char_size()));
- __ movq(register_location(reg), rax);
- }
-}
-
-
-void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ movq(register_location(reg), rax);
- }
-}
-
-
-void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movq(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
- __ movq(register_location(reg), rax);
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
- // This function call preserves no register values. Caller should
- // store anything volatile in a C call or overwritten by this function.
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
-#ifdef _WIN64
- // Second argument: Code* of self. (Do this before overwriting r8).
- __ movq(rdx, code_object_pointer());
- // Third argument: RegExp code frame pointer.
- __ movq(r8, rbp);
- // First argument: Next address on the stack (will be address of
- // return address).
- __ lea(rcx, Operand(rsp, -kPointerSize));
-#else
- // Third argument: RegExp code frame pointer.
- __ movq(rdx, rbp);
- // Second argument: Code* of self.
- __ movq(rsi, code_object_pointer());
- // First argument: Next address on the stack (will be address of
- // return address).
- __ lea(rdi, Operand(rsp, -kPointerSize));
-#endif
- ExternalReference stack_check =
- ExternalReference::re_check_stack_guard_state(masm_.isolate());
- __ CallCFunction(stack_check, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles(isolate);
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
-
- // Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
-
- if (*code_handle != re_code) { // Return address no longer valid
- intptr_t delta = code_handle->address() - re_code->address();
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- Handle<String> subject_tmp = subject;
- int slice_offset = 0;
-
- // Extract the underlying string and the slice offset.
- if (StringShape(*subject_tmp).IsCons()) {
- subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
- } else if (StringShape(*subject_tmp).IsSliced()) {
- SlicedString* slice = SlicedString::cast(*subject_tmp);
- subject_tmp = Handle<String>(slice->parent());
- slice_offset = slice->offset();
- }
-
- // String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
- StringShape(*subject_tmp).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject_tmp,
- start_index + slice_offset);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = static_cast<int>(end_address - start_address);
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
- // Subject string might have been a ConsString that underwent
- // short-circuiting during GC. That will not change start_address but
- // will change pointer inside the subject handle.
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- }
-
- return 0;
-}
-
-
-Operand RegExpMacroAssemblerX64::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return Operand(rbp, kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmpl(rdi, Immediate(-cp_offset * char_size()));
- BranchOrBacktrack(greater_equal, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerX64::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition < 0) { // No condition
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ j(condition, &backtrack_label_);
- return;
- }
- __ j(condition, to);
-}
-
-
-void RegExpMacroAssemblerX64::SafeCall(Label* to) {
- __ call(to);
-}
-
-
-void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
- __ bind(label);
- __ subq(Operand(rsp, 0), code_object_pointer());
-}
-
-
-void RegExpMacroAssemblerX64::SafeReturn() {
- __ addq(Operand(rsp, 0), code_object_pointer());
- __ ret(0);
-}
-
-
-void RegExpMacroAssemblerX64::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- // Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
- __ movl(Operand(backtrack_stackpointer(), 0), source);
-}
-
-
-void RegExpMacroAssemblerX64::Push(Immediate value) {
- // Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
- __ movl(Operand(backtrack_stackpointer(), 0), value);
-}
-
-
-void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
- for (int i = 0, n = code_relative_fixup_positions_.length(); i < n; i++) {
- int position = code_relative_fixup_positions_[i];
- // The position succeeds a relative label offset from position.
- // Patch the relative offset to be relative to the Code object pointer
- // instead.
- int patch_position = position - kIntSize;
- int offset = masm_.long_at(patch_position);
- masm_.long_at_put(patch_position,
- offset
- + position
- + Code::kHeaderSize
- - kHeapObjectTag);
- }
- code_relative_fixup_positions_.Clear();
-}
-
-
-void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
- __ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
- MarkPositionForCodeRelativeFixup();
-}
-
-
-void RegExpMacroAssemblerX64::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ movsxlq(target, Operand(backtrack_stackpointer(), 0));
- // Notice: This updates flags, unlike normal Pop.
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
-}
-
-
-void RegExpMacroAssemblerX64::Drop() {
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
-}
-
-
-void RegExpMacroAssemblerX64::CheckPreemption() {
- // Check for preemption.
- Label no_preempt;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_.isolate());
- __ load_rax(stack_limit);
- __ cmpq(rsp, rax);
- __ j(above, &no_preempt);
-
- SafeCall(&check_preempt_label_);
-
- __ bind(&no_preempt);
-}
-
-
-void RegExpMacroAssemblerX64::CheckStackLimit() {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_.isolate());
- __ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
- __ j(above, &no_stack_overflow);
-
- SafeCall(&stack_overflow_label_);
-
- __ bind(&no_stack_overflow);
-}
-
-
-void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ movl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
- } else if (characters == 2) {
- __ movzxwl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
- } else {
- ASSERT(characters == 1);
- __ movzxbl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ movl(current_character(),
- Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
- } else {
- ASSERT(characters == 1);
- __ movzxwl(current_character(),
- Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
- }
- }
-}
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h
deleted file mode 100644
index a082cf2..0000000
--- a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#include "macro-assembler.h"
-#include "code.h"
-#include "x64/macro-assembler-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-
-class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerX64(Mode mode, int registers_to_save, Zone* zone);
- virtual ~RegExpMacroAssemblerX64();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- static Result Match(Handle<Code> regexp,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate);
-
- static Result Execute(Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- bool at_start);
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from rbp of function parameters and stored registers.
- static const int kFramePointer = 0;
- // Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
-
-#ifdef _WIN64
- // Parameters (first four passed as registers, but with room on stack).
- // In Microsoft 64-bit Calling Convention, there is room on the callers
- // stack (before the return address) to spill parameter registers. We
- // use this space to store the register passed parameters.
- static const int kInputString = kFrameAlign;
- // StartIndex is passed as 32 bit int.
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value. NumOutputRegisters is passed as 32-bit value. The upper
- // 32 bit of this 64-bit stack slot may contain garbage.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- // DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-#else
- // In AMD64 ABI Calling Convention, the first six integer parameters
- // are passed as registers, and caller must allocate space on the stack
- // if it wants them stored. We push the parameters after the frame pointer.
- static const int kInputString = kFramePointer - kPointerSize;
- static const int kStartIndex = kInputString - kPointerSize;
- static const int kInputStart = kStartIndex - kPointerSize;
- static const int kInputEnd = kInputStart - kPointerSize;
- static const int kRegisterOutput = kInputEnd - kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value.
- static const int kNumOutputRegisters = kRegisterOutput - kPointerSize;
- static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-#endif
-
-#ifdef _WIN64
- // Microsoft calling convention has three callee-saved registers
- // (that we are using). We push these after the frame pointer.
- static const int kBackup_rsi = kFramePointer - kPointerSize;
- static const int kBackup_rdi = kBackup_rsi - kPointerSize;
- static const int kBackup_rbx = kBackup_rdi - kPointerSize;
- static const int kLastCalleeSaveRegister = kBackup_rbx;
-#else
- // AMD64 Calling Convention has only one callee-save register that
- // we use. We push this after the frame pointer (and after the
- // parameters).
- static const int kBackup_rbx = kNumOutputRegisters - kPointerSize;
- static const int kLastCalleeSaveRegister = kBackup_rbx;
-#endif
-
- static const int kSuccessfulCaptures = kLastCalleeSaveRegister - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
-
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState();
-
- // The rbp-relative location of a regexp register.
- Operand register_location(int register_index);
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return rdx; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return rcx; }
-
- // The registers containing a self pointer to this code's Code object.
- inline Register code_object_pointer() { return r8; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- void MarkPositionForCodeRelativeFixup() {
- code_relative_fixup_positions_.Add(masm_.pc_offset(), zone());
- }
-
- void FixupCodeRelativePositions();
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to);
- inline void SafeCallTarget(Label* label);
- inline void SafeReturn();
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer (rcx) by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pushes a value on the backtrack stack. Decrements the stack pointer (rcx)
- // by a word size and stores the value there.
- inline void Push(Immediate value);
-
- // Pushes the Code object relative offset of a label on the backtrack stack
- // (i.e., a backtrack target). Decrements the stack pointer (rcx)
- // by a word size and stores the value there.
- inline void Push(Label* label);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // (rcx) and increments it by a word size.
- inline void Pop(Register target);
-
- // Drops the top value from the backtrack stack without reading it.
- // Increments the stack pointer (rcx) by a word size.
- inline void Drop();
-
- MacroAssembler masm_;
- MacroAssembler::NoRootArrayScope no_root_array_scope_;
-
- ZoneList<int> code_relative_fixup_positions_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/simulator-x64.cc b/src/3rdparty/v8/src/x64/simulator-x64.cc
deleted file mode 100644
index 209aa2d..0000000
--- a/src/3rdparty/v8/src/x64/simulator-x64.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/src/3rdparty/v8/src/x64/simulator-x64.h b/src/3rdparty/v8/src/x64/simulator-x64.h
deleted file mode 100644
index 8aba701..0000000
--- a/src/3rdparty/v8/src/x64/simulator-x64.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_SIMULATOR_X64_H_
-#define V8_X64_SIMULATOR_X64_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the x64 architecture the only thing we can
-// do is to call the entry directly.
-// TODO(X64): Don't pass p0, since it isn't used?
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on x64 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/src/3rdparty/v8/src/x64/stub-cache-x64.cc b/src/3rdparty/v8/src/x64/stub-cache-x64.cc
deleted file mode 100644
index c471569..0000000
--- a/src/3rdparty/v8/src/x64/stub-cache-x64.cc
+++ /dev/null
@@ -1,3613 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // The offset is scaled by 4, based on
- // kHeapObjectTagSize, which is two bits
- Register offset) {
- // We need to scale up the pointer by 2 because the offset is scaled by less
- // than the pointer size.
- ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
- ScaleFactor scale_factor = times_2;
-
- ASSERT_EQ(24, sizeof(StubCache::Entry));
- // The offset register holds the entry offset times four (due to masking
- // and shifting optimizations).
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
-
- __ LoadAddress(kScratchRegister, key_offset);
-
- // Check that the key in the entry matches the name.
- // Multiply entry offset by 16 to get the entry address. Since the
- // offset register already holds the entry offset times four, multiply
- // by a further four.
- __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
- __ j(not_equal, &miss);
-
- // Get the map entry from the cache.
- // Use key_offset + kPointerSize * 2, rather than loading map_offset.
- __ movq(kScratchRegister,
- Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Get the code entry from the cache.
- __ LoadAddress(kScratchRegister, value_offset);
- __ movq(kScratchRegister,
- Operand(kScratchRegister, offset, scale_factor, 0));
-
- // Check that the flags match what we're looking for.
- __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
- __ cmpl(offset, Immediate(flags));
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(kScratchRegister);
-
- __ bind(&miss);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be an internalized string and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register r0,
- Register r1) {
- ASSERT(name->IsInternalizedString());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ testb(FieldOperand(r0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = r0;
- __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, miss_label);
-
- Label done;
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
- USE(extra2); // The register extra2 is not used on the X64 platform.
- USE(extra3); // The register extra2 is not used on the X64 platform.
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 24.
- ASSERT(sizeof(Entry) == 24);
-
- // Make sure the flags do not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
-
- // Check scratch register is valid, extra and extra2 are unused.
- ASSERT(!scratch.is(no_reg));
- ASSERT(extra2.is(no_reg));
- ASSERT(extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
- // Use only the low 32 bits of the map pointer.
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
-
- // Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
- __ subl(scratch, name);
- __ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ movq(prototype,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- __ movq(prototype,
- FieldOperand(prototype, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ movq(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
- Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ Move(prototype, isolate->global_object());
- __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- prototype);
- __ j(not_equal, miss);
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, miss_label);
-
- // Load length directly from the JS array.
- __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
- __ ret(0);
-}
-
-
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ testl(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length directly from the string.
- __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register result,
- Register scratch,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, result, miss_label);
- if (!result.is(rax)) __ movq(rax, result);
- __ ret(0);
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index) {
- DoGenerateFastPropertyLoad(
- masm, dst, src, index.is_inobject(holder), index.translate(holder));
-}
-
-
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
- int offset = index * kPointerSize;
- if (!inobject) {
- // Calculate the offset into the properties array.
- offset = offset + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- src = dst;
- }
- __ movq(dst, FieldOperand(src, offset));
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- __ Move(kScratchRegister, interceptor);
- __ push(kScratchRegister);
- __ push(receiver);
- __ push(holder);
- __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
- __ PushAddress(ExternalReference::isolate_address());
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ Set(rax, 6);
- __ LoadAddress(rbx, ref);
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-}
-
-
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 4;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : last argument in the internal frame of the caller
- // -----------------------------------
- __ movq(scratch, Operand(rsp, 0));
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- __ movq(Operand(rsp, 0), scratch);
- __ Move(scratch, Smi::FromInt(0));
- for (int i = 1; i <= kFastApiCallArguments; i++) {
- __ movq(Operand(rsp, i * kPointerSize), scratch);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address.
- // -- rsp[8] : last fast api call extra argument.
- // -- ...
- // -- rsp[kFastApiCallArguments * 8] : first fast api call extra argument.
- // -- rsp[kFastApiCallArguments * 8 + 8] : last argument in the internal
- // frame.
- // -----------------------------------
- __ movq(scratch, Operand(rsp, 0));
- __ movq(Operand(rsp, kFastApiCallArguments * kPointerSize), scratch);
- __ addq(rsp, Immediate(kPointerSize * kFastApiCallArguments));
-}
-
-
-// Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- rsp[16] : api function
- // (first fast api call extra argument)
- // -- rsp[24] : api call data
- // -- rsp[32] : isolate
- // -- rsp[40] : last argument
- // -- ...
- // -- rsp[(argc + 4) * 8] : first argument
- // -- rsp[(argc + 5) * 8] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(rdi, function);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- __ movq(Operand(rsp, 2 * kPointerSize), rdi);
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(rcx, api_call_info);
- __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
- } else {
- __ Move(Operand(rsp, 3 * kPointerSize), call_data);
- }
- __ movq(kScratchRegister, ExternalReference::isolate_address());
- __ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister);
-
- // Prepare arguments.
- __ lea(rbx, Operand(rsp, 4 * kPointerSize));
-
-#if defined(__MINGW64__)
- Register arguments_arg = rcx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register arguments_arg = rdx;
-#else
- Register arguments_arg = rdi;
-#endif
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiStackSpace);
-
- __ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
- __ addq(rbx, Immediate(argc * kPointerSize));
- __ movq(StackSpaceOperand(1), rbx); // v8::Arguments::values_.
- __ Set(StackSpaceOperand(2), argc); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
-
- // v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
-
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ CallApiFunctionAndReturn(function_address,
- argc + kFastApiCallArguments + 1);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 6);
-
- // Restore the name_ register.
- __ pop(name_);
-
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- // Leave the internal frame.
- }
-
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(not_equal, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_ic_state_;
-};
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- Handle<Code> code = (kind == Code::STORE_IC)
- ? masm->isolate()->builtins()->StoreIC_Miss()
- : masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Handle<Code> code =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
- }
-
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, mode);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ pop(scratch1); // Return address.
- __ push(receiver_reg);
- __ Push(transition);
- __ push(rax);
- __ push(scratch1);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (!transition.is_null()) {
- // Update the map of the object.
- __ Move(scratch1, transition);
- __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch1, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
- }
-
- // Return the value (register rax).
- __ ret(0);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ Move(scratch, cell);
- __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, miss);
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-#undef __
-#define __ ACCESS_MASM((masm()))
-
-
-void StubCompiler::GenerateTailCall(Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- int save_at_depth,
- Label* miss,
- PrototypeCheckType check) {
- Handle<JSObject> first = object;
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg. On the first
- // iteration, reg is an alias for object_reg, on later iterations,
- // it is an alias for holder_reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), object_reg);
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsInternalizedString()) {
- name = factory()->InternalizeString(name);
- }
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- bool in_new_space = heap()->InNewSpace(*prototype);
- Handle<Map> current_map(current->map());
- if (in_new_space) {
- // Save the map in scratch1 for later.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (in_new_space) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- __ Move(reg, prototype);
- }
- }
-
- if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
- ASSERT(current.is_identical_to(holder));
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Perform security check for access to the global object.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
- Label* miss) {
- if (!miss->is_unused()) {
- __ jmp(success);
- __ bind(miss);
- GenerateLoadMiss(masm(), kind());
- }
-}
-
-
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success,
- Handle<ExecutableAccessorInfo> callback) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- ASSERT(!reg.is(scratch2()));
- ASSERT(!reg.is(scratch3()));
- ASSERT(!reg.is(scratch4()));
-
- // Load the properties dictionary.
- Register dictionary = scratch4();
- __ movq(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &miss,
- &probe_done,
- dictionary,
- this->name(),
- scratch2(),
- scratch3());
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // index into the dictionary. Check that the value is the callback.
- Register index = scratch3();
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(scratch2(),
- Operand(dictionary, index, times_pointer_size,
- kValueOffset - kHeapObjectTag));
- __ movq(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2(), scratch3());
- __ j(not_equal, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
- return reg;
-}
-
-
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- if (!last->HasFastProperties()) {
- __ movq(scratch2(), FieldOperand(reg, HeapObject::kMapOffset));
- __ movq(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset));
- __ Cmp(scratch2(), isolate()->factory()->null_value());
- __ j(not_equal, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex index) {
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
- __ ret(0);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
- // Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch2().is(reg));
- __ pop(scratch2()); // Get return address to place it below.
-
- __ push(receiver()); // receiver
- __ push(reg); // holder
- if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch1(), callback);
- __ push(FieldOperand(scratch1(),
- ExecutableAccessorInfo::kDataOffset)); // data
- } else {
- __ Push(Handle<Object>(callback->data(), isolate()));
- }
- __ PushAddress(ExternalReference::isolate_address()); // isolate
- __ push(name()); // name
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
-
-#if defined(__MINGW64__)
- Register accessor_info_arg = rdx;
- Register name_arg = rcx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register accessor_info_arg = r8;
- Register name_arg = rdx;
-#else
- Register accessor_info_arg = rsi;
- Register name_arg = rdi;
-#endif
-
- ASSERT(!name_arg.is(scratch2()));
- __ movq(name_arg, rsp);
- __ push(scratch2()); // Restore return address.
-
- // 4 elements array for v8::Arguments::values_ and handler for name.
- const int kStackSpace = 5;
-
- // Allocate v8::AccessorInfo in non-GCed stack space.
- const int kArgStackSpace = 1;
-
- __ PrepareCallApiFunction(kArgStackSpace);
- __ lea(rax, Operand(name_arg, 4 * kPointerSize));
-
- // v8::AccessorInfo::args_.
- __ movq(StackSpaceOperand(0), rax);
-
- // The context register (rsi) has been saved in PrepareCallApiFunction and
- // could be used to pass arguments.
- __ lea(accessor_info_arg, StackSpaceOperand(0));
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ CallApiFunctionAndReturn(getter_address, kStackSpace);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
- // Return the constant value.
- __ LoadHeapObject(rax, value);
- __ ret(0);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* callback =
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
- compile_followup_inline = callback->getter() != NULL &&
- callback->IsCompatibleReceiver(*object);
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (must_preserve_receiver_reg) {
- __ push(receiver());
- }
- __ push(holder_reg);
- __ push(this->name());
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
-
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- __ pop(scratch2()); // save old return address
- PushInterceptorArguments(masm(), receiver(), holder_reg,
- this->name(), interceptor_holder);
- __ push(scratch2()); // restore old return address
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Cmp(rcx, name);
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, miss);
- CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ Move(rdi, cell);
- __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(rdi, miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
- __ j(not_equal, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
- __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
- } else {
- __ Cmp(rdi, function);
- }
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
-
- // Check that the function really is a function.
- __ JumpIfSmi(rdi, &miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- if (argc == 0) {
- // Noop, return the length.
- __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- // Get the elements array of the object.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_array_map());
- __ j(not_equal, &check_double);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ movq(FieldOperand(rdi,
- rax,
- times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- rcx);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&check_double);
-
- // Check that the elements are in double mode.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_double_array_map());
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &call_builtin);
-
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
- __ StoreNumberToDoubleElements(
- rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(rbx, &not_fast_object, Label::kNear);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(rbx, &call_builtin);
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &call_builtin);
- // rdx: receiver
- // rbx: map
-
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- rbx,
- rdi,
- &call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(rbx, &call_builtin);
- }
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ movq(Operand(rdx, 0), rcx);
-
- __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- __ movq(rbx, Operand(rsp, argc * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(rbx, &no_fast_elements_check);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ Load(rcx, new_space_allocation_top);
-
- // Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
- __ j(not_equal, &call_builtin);
- __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
- Operand limit_operand =
- masm()->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ Store(new_space_allocation_top, rcx);
-
- // Push the argument...
- __ movq(Operand(rdx, 0), rbx);
- // ... and fill the rest with holes.
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
- }
-
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
- // Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Increment element's and array's sizes.
- __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(kAllocationDelta));
-
- // Make new length a smi before returning it.
- __ Integer32ToSmi(rax, rax);
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- isolate()),
- argc + 1,
- 1);
- }
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss, return_undefined, call_builtin;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- // Get the elements array of the object.
- __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rcx and calculate new length.
- __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ subl(rcx, Immediate(1));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
- __ movq(rax, FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- // Check if element is already the hole.
- __ cmpq(rax, r9);
- // If so, call slow-case to also check prototypes for value.
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
-
- // Fill with the hole and return original value.
- __ movq(FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
-
- Register receiver = rbx;
- Register index = rdi;
- Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
-
- Register receiver = rax;
- Register index = rdi;
- Register scratch = rdx;
- Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kempty_stringRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = rbx;
- __ movq(code, Operand(rsp, 1 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
-
- StringCharFromCodeGenerator generator(code, rax);
- generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // TODO(872): implement this.
- return Handle<Code>::null();
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
- // Load the (only) argument into rax.
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(rax, &not_smi);
- __ SmiToInteger32(rax, rax);
-
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ movl(rbx, rax);
- __ sarl(rbx, Immediate(kBitsPerInt - 1));
-
- // Do bitwise not or do nothing depending on ebx.
- __ xorl(rax, rbx);
-
- // Add 1 or do nothing depending on ebx.
- __ subl(rax, rbx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- // Smi case done.
- __ Integer32ToSmi(rax, rax);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its value.
- __ bind(&not_smi);
- __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- const int sign_mask_shift =
- (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
- __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
- RelocInfo::NONE64);
- __ testq(rbx, rdi);
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number. We still have the sign mask in rdi.
- __ bind(&negative_sign);
- __ xor_(rbx, rdi);
- __ AllocateHeapNumber(rax, rdx, &slow);
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ movq(rax, Operand(rsp, 4 * kPointerSize));
- __ movq(Operand(rsp, 0 * kPointerSize), rax);
-
- GenerateFastApiCall(masm(), optimization, argc);
-
- __ bind(&miss);
- __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(rdx, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- Counters* counters = isolate()->counters();
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(counters->call_const(), 1);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
- rdi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ CmpObjectType(rdx, SYMBOL_TYPE, rax);
- __ j(not_equal, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &fast);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
- }
- }
-
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
- &miss);
-
- // Restore receiver.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the function really is a function.
- __ JumpIfSmi(rax, &miss);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- // Invoke the function.
- __ movq(rdi, rax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle load cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- // Set up the context (function already in rdi).
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- rdx, rcx, rbx, rdi,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- __ pop(rbx); // remove the return address
- __ push(rdx); // receiver
- __ Push(callback); // callback info
- __ push(rcx); // name
- __ push(rax); // value
- __ push(rbx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(rax);
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- __ push(rdx);
- __ push(rax);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(rax);
-
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rdx, rbx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ pop(rbx); // remove the return address
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
- __ Push(Smi::FromInt(strict_mode_));
- __ push(rbx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ Move(rbx, cell);
- Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ movq(cell_operand, rax);
- // Cells are always rescanned, so no write barrier here.
-
- // Return the value (register rax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rcx, name);
- __ j(not_equal, &miss);
-
- // Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- rdx, rcx, rbx, rdi,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_store_field(), 1);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array,
- elements_kind,
- grow_mode_).GetCode(isolate());
-
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(rdx, &miss, Label::kNear);
-
- __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int i = 0; i < receiver_count; ++i) {
- // Check map and tail call if there's a match
- __ Cmp(rdi, receiver_maps->at(i));
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Handle<GlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
-
- __ bind(&success);
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret(0);
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { rax, rcx, rdx, rbx, rdi, r8 };
- return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { rdx, rax, rbx, rcx, rdi, r8 };
- return registers;
-}
-
-
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
- Register name_reg,
- Label* miss) {
- __ Cmp(name_reg, name);
- __ j(not_equal, miss);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- __ push(rax);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete) {
- Label success, miss;
- // TODO(verwaest): Directly store to rax. Currently we cannot do this, since
- // rax is used as receiver(), which we would otherwise clobber before a
- // potential miss.
-
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
-
- // Get the value from the cell.
- __ Move(rbx, cell);
- __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
- } else if (FLAG_debug_code) {
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
-
- HandlerFrontendFooter(&success, &miss);
- __ bind(&success);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
- __ movq(rax, rbx);
- __ ret(0);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
- Handle<Code> stub = KeyedLoadFastElementStub(
- receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode(isolate());
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
- } else {
- Handle<Code> stub =
- KeyedLoadDictionaryElementStub().GetCode(isolate());
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
- }
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
- }
-
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
- __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int current = 0; current < receiver_count; ++current) {
- // Check map and tail call if there's a match
- __ Cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), kind());
-
- // Return the generated code.
- InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(Code::IC_FRAGMENT, type, name, state);
-}
-
-
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r8 for holding undefined which is used in several places below.
- __ Move(r8, factory()->undefined_value());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmpq(rbx, r8);
- __ j(not_equal, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rbx, &generic_stub_call);
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // rbx: initial map
- __ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // rbx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ shl(rcx, Immediate(kPointerSizeLog2));
- __ cmpq(rcx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(instance_size, rdx, rcx, no_reg,
- &generic_stub_call, NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // rbx: initial map
- // rdx: JSObject (untagged)
- __ movq(Operand(rdx, JSObject::kMapOffset), rbx);
- __ Move(rbx, factory()->empty_fixed_array());
- __ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
- __ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Load the address of the first in-object property into r9.
- __ lea(r9, Operand(rdx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains only the
- // return address on top of the argc arguments.
- __ lea(rcx, Operand(rsp, rax, times_pointer_size, 0));
-
- // rax: argc
- // rcx: first argument
- // rdx: JSObject (untagged)
- // r8: undefined
- // r9: first in-object property of the JSObject
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ movq(rbx, r8);
- __ cmpq(rax, Immediate(arg_number));
- __ cmovq(above, rbx, Operand(rcx, arg_number * -kPointerSize));
- // Store value in the property.
- __ movq(Operand(r9, i * kPointerSize), rbx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- isolate());
- __ Move(Operand(r9, i * kPointerSize), constant);
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ movq(Operand(r9, i * kPointerSize), r8);
- }
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Move argc to rbx and the JSObject to return to rax and tag it.
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- __ or_(rax, Immediate(kHeapObjectTag));
-
- // rax: JSObject
- // rbx: argc
- // Remove caller arguments and receiver from the stack and return.
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- __ JumpIfNotSmi(rax, &miss_force_generic);
- __ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rdx: receiver
- // rax: key
- // rbx: key as untagged int32
- // rcx: elements
- __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi or a heap number containing a smi and branch
- // if the check fails.
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- masm->isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
- __ movsd(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, xmm_scratch0);
- __ cvtlsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- __ Integer32ToSmi(key, scratch);
- __ bind(&key_ok);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpq(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- Label check_heap_number;
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Float to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(rax, &slow);
- } else {
- __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
- }
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- { // Clamp the value to [0..255].
- Label done;
- __ testl(rdx, Immediate(0xFFFFFF00));
- __ j(zero, &done, Label::kNear);
- __ setcc(negative, rdx); // 1 if negative, 0 if positive.
- __ decb(rdx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2sd(xmm0, rdx);
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
- // Fast path: use machine instruction to convert to int64. If that
- // fails (out-of-range), go into the runtime.
- __ cvttsd2siq(r8, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(r8, kScratchRegister);
- __ j(equal, &slow);
-
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), r8);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), r8);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), r8);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Miss case: call runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store, grow;
- Label check_capacity, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(rax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- } else {
- // Do the store and update the write barrier.
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ SmiToInteger32(rcx, rcx);
- __ lea(rcx,
- FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- // Make sure to preserve the value in register rax.
- __ movq(rbx, rax);
- __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_array_map());
- __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
- }
-
- // Store the element at index zero.
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ ret(0);
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub.
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &miss_force_generic);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store;
- Label grow, slow, check_capacity, restore_key_transition_elements_kind;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rdi);
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- // Handle smi values specially
- __ bind(&finish_store);
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- __ bind(&restore_key_transition_elements_kind);
- // Restore smi-tagging of rcx.
- __ Integer32ToSmi(rcx, rcx);
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(rax, &value_is_smi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_double_array_map());
- __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
-
- // Increment the length of the array.
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
-
- __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
- }
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ ret(0);
-
- __ bind(&check_capacity);
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/zone-inl.h b/src/3rdparty/v8/src/zone-inl.h
deleted file mode 100644
index e312b20..0000000
--- a/src/3rdparty/v8/src/zone-inl.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ZONE_INL_H_
-#define V8_ZONE_INL_H_
-
-#include "zone.h"
-
-#include "counters.h"
-#include "isolate.h"
-#include "utils.h"
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-
-inline void* Zone::New(int size) {
- ASSERT(scope_nesting_ > 0);
- // Round up the requested size to fit the alignment.
- size = RoundUp(size, kAlignment);
-
- // If the allocation size is divisible by 8 then we return an 8-byte aligned
- // address.
- if (kPointerSize == 4 && kAlignment == 4) {
- position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
- } else {
- ASSERT(kAlignment >= kPointerSize);
- }
-
- // Check if the requested size is available without expanding.
- Address result = position_;
-
- if (size > limit_ - position_) {
- result = NewExpand(size);
- } else {
- position_ += size;
- }
-
- // Check that the result has the proper alignment and return it.
- ASSERT(IsAddressAligned(result, kAlignment, 0));
- allocation_size_ += size;
- return reinterpret_cast<void*>(result);
-}
-
-
-template <typename T>
-T* Zone::NewArray(int length) {
- return static_cast<T*>(New(length * sizeof(T)));
-}
-
-
-bool Zone::excess_allocation() {
- return segment_bytes_allocated_ > zone_excess_limit_;
-}
-
-
-void Zone::adjust_segment_bytes_allocated(int delta) {
- segment_bytes_allocated_ += delta;
- isolate_->counters()->zone_segment_bytes()->Set(segment_bytes_allocated_);
-}
-
-
-template <typename Config>
-ZoneSplayTree<Config>::~ZoneSplayTree() {
- // Reset the root to avoid unneeded iteration over all tree nodes
- // in the destructor. For a zone-allocated tree, nodes will be
- // freed by the Zone.
- SplayTree<Config, ZoneAllocationPolicy>::ResetRoot();
-}
-
-
-void* ZoneObject::operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
-}
-
-inline void* ZoneAllocationPolicy::New(size_t size) {
- ASSERT(zone_);
- return zone_->New(static_cast<int>(size));
-}
-
-
-template <typename T>
-void* ZoneList<T>::operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
-}
-
-
-ZoneScope::ZoneScope(Zone* zone, ZoneScopeMode mode)
- : zone_(zone), mode_(mode) {
- zone_->scope_nesting_++;
-}
-
-
-bool ZoneScope::ShouldDeleteOnExit() {
- return zone_->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ZONE_INL_H_
diff --git a/src/3rdparty/v8/src/zone.cc b/src/3rdparty/v8/src/zone.cc
deleted file mode 100644
index 51b8113..0000000
--- a/src/3rdparty/v8/src/zone.cc
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <string.h>
-
-#include "v8.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Segments represent chunks of memory: They have starting address
-// (encoded in the this pointer) and a size in bytes. Segments are
-// chained together forming a LIFO structure with the newest segment
-// available as segment_head_. Segments are allocated using malloc()
-// and de-allocated using free().
-
-class Segment {
- public:
- void Initialize(Segment* next, int size) {
- next_ = next;
- size_ = size;
- }
-
- Segment* next() const { return next_; }
- void clear_next() { next_ = NULL; }
-
- int size() const { return size_; }
- int capacity() const { return size_ - sizeof(Segment); }
-
- Address start() const { return address(sizeof(Segment)); }
- Address end() const { return address(size_); }
-
- private:
- // Computes the address of the nth byte in this segment.
- Address address(int n) const {
- return Address(this) + n;
- }
-
- Segment* next_;
- int size_;
-};
-
-
-Zone::Zone(Isolate* isolate)
- : zone_excess_limit_(256 * MB),
- segment_bytes_allocated_(0),
- position_(0),
- limit_(0),
- scope_nesting_(0),
- segment_head_(NULL),
- isolate_(isolate) {
-}
-unsigned Zone::allocation_size_ = 0;
-
-ZoneScope::~ZoneScope() {
- if (ShouldDeleteOnExit()) zone_->DeleteAll();
- zone_->scope_nesting_--;
-}
-
-
-// Creates a new segment, sets it size, and pushes it to the front
-// of the segment chain. Returns the new segment.
-Segment* Zone::NewSegment(int size) {
- Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
- adjust_segment_bytes_allocated(size);
- if (result != NULL) {
- result->Initialize(segment_head_, size);
- segment_head_ = result;
- }
- return result;
-}
-
-
-// Deletes the given segment. Does not touch the segment chain.
-void Zone::DeleteSegment(Segment* segment, int size) {
- adjust_segment_bytes_allocated(-size);
- Malloced::Delete(segment);
-}
-
-
-void Zone::DeleteAll() {
-#ifdef DEBUG
- // Constant byte value used for zapping dead memory in debug mode.
- static const unsigned char kZapDeadByte = 0xcd;
-#endif
-
- // Find a segment with a suitable size to keep around.
- Segment* keep = segment_head_;
- while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
- keep = keep->next();
- }
-
- // Traverse the chained list of segments, zapping (in debug mode)
- // and freeing every segment except the one we wish to keep.
- Segment* current = segment_head_;
- while (current != NULL) {
- Segment* next = current->next();
- if (current == keep) {
- // Unlink the segment we wish to keep from the list.
- current->clear_next();
- } else {
- int size = current->size();
-#ifdef DEBUG
- // Zap the entire current segment (including the header).
- memset(current, kZapDeadByte, size);
-#endif
- DeleteSegment(current, size);
- }
- current = next;
- }
-
- // If we have found a segment we want to keep, we must recompute the
- // variables 'position' and 'limit' to prepare for future allocate
- // attempts. Otherwise, we must clear the position and limit to
- // force a new segment to be allocated on demand.
- if (keep != NULL) {
- Address start = keep->start();
- position_ = RoundUp(start, kAlignment);
- limit_ = keep->end();
-#ifdef DEBUG
- // Zap the contents of the kept segment (but not the header).
- memset(start, kZapDeadByte, keep->capacity());
-#endif
- } else {
- position_ = limit_ = 0;
- }
-
- // Update the head segment to be the kept segment (if any).
- segment_head_ = keep;
-}
-
-
-void Zone::DeleteKeptSegment() {
- if (segment_head_ != NULL) {
- DeleteSegment(segment_head_, segment_head_->size());
- segment_head_ = NULL;
- }
-}
-
-
-Address Zone::NewExpand(int size) {
- // Make sure the requested size is already properly aligned and that
- // there isn't enough room in the Zone to satisfy the request.
- ASSERT(size == RoundDown(size, kAlignment));
- ASSERT(size > limit_ - position_);
-
- // Compute the new segment size. We use a 'high water mark'
- // strategy, where we increase the segment size every time we expand
- // except that we employ a maximum segment size when we delete. This
- // is to avoid excessive malloc() and free() overhead.
- Segment* head = segment_head_;
- int old_size = (head == NULL) ? 0 : head->size();
- static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
- int new_size_no_overhead = size + (old_size << 1);
- int new_size = kSegmentOverhead + new_size_no_overhead;
- // Guard against integer overflow.
- if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
- V8::FatalProcessOutOfMemory("Zone");
- return NULL;
- }
- if (new_size < kMinimumSegmentSize) {
- new_size = kMinimumSegmentSize;
- } else if (new_size > kMaximumSegmentSize) {
- // Limit the size of new segments to avoid growing the segment size
- // exponentially, thus putting pressure on contiguous virtual address space.
- // All the while making sure to allocate a segment large enough to hold the
- // requested size.
- new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
- }
- Segment* segment = NewSegment(new_size);
- if (segment == NULL) {
- V8::FatalProcessOutOfMemory("Zone");
- return NULL;
- }
-
- // Recompute 'top' and 'limit' based on the new segment.
- Address result = RoundUp(segment->start(), kAlignment);
- position_ = result + size;
- // Check for address overflow.
- if (position_ < result) {
- V8::FatalProcessOutOfMemory("Zone");
- return NULL;
- }
- limit_ = segment->end();
- ASSERT(position_ <= limit_);
- return result;
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/zone.h b/src/3rdparty/v8/src/zone.h
deleted file mode 100644
index 01e887e..0000000
--- a/src/3rdparty/v8/src/zone.h
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ZONE_H_
-#define V8_ZONE_H_
-
-#include "allocation.h"
-#include "checks.h"
-#include "hashmap.h"
-#include "globals.h"
-#include "list.h"
-#include "splay-tree.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Zone scopes are in one of two modes. Either they delete the zone
-// on exit or they do not.
-enum ZoneScopeMode {
- DELETE_ON_EXIT,
- DONT_DELETE_ON_EXIT
-};
-
-class Segment;
-class Isolate;
-
-// The Zone supports very fast allocation of small chunks of
-// memory. The chunks cannot be deallocated individually, but instead
-// the Zone supports deallocating all chunks in one fast
-// operation. The Zone is used to hold temporary data structures like
-// the abstract syntax tree, which is deallocated after compilation.
-
-// Note: There is no need to initialize the Zone; the first time an
-// allocation is attempted, a segment of memory will be requested
-// through a call to malloc().
-
-// Note: The implementation is inherently not thread safe. Do not use
-// from multi-threaded code.
-
-class Zone {
- public:
- explicit Zone(Isolate* isolate);
- ~Zone() { DeleteKeptSegment(); }
- // Allocate 'size' bytes of memory in the Zone; expands the Zone by
- // allocating new segments of memory on demand using malloc().
- inline void* New(int size);
-
- template <typename T>
- inline T* NewArray(int length);
-
- // Deletes all objects and free all memory allocated in the Zone. Keeps one
- // small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
- void DeleteAll();
-
- // Deletes the last small segment kept around by DeleteAll().
- void DeleteKeptSegment();
-
- // Returns true if more memory has been allocated in zones than
- // the limit allows.
- inline bool excess_allocation();
-
- inline void adjust_segment_bytes_allocated(int delta);
-
- inline Isolate* isolate() { return isolate_; }
-
- static unsigned allocation_size_;
-
- private:
- friend class Isolate;
- friend class ZoneScope;
-
- // All pointers returned from New() have this alignment. In addition, if the
- // object being allocated has a size that is divisible by 8 then its alignment
- // will be 8.
- static const int kAlignment = kPointerSize;
-
- // Never allocate segments smaller than this size in bytes.
- static const int kMinimumSegmentSize = 8 * KB;
-
- // Never allocate segments larger than this size in bytes.
- static const int kMaximumSegmentSize = 1 * MB;
-
- // Never keep segments larger than this size in bytes around.
- static const int kMaximumKeptSegmentSize = 64 * KB;
-
- // Report zone excess when allocation exceeds this limit.
- int zone_excess_limit_;
-
- // The number of bytes allocated in segments. Note that this number
- // includes memory allocated from the OS but not yet allocated from
- // the zone.
- int segment_bytes_allocated_;
-
- // Expand the Zone to hold at least 'size' more bytes and allocate
- // the bytes. Returns the address of the newly allocated chunk of
- // memory in the Zone. Should only be called if there isn't enough
- // room in the Zone already.
- Address NewExpand(int size);
-
- // Creates a new segment, sets it size, and pushes it to the front
- // of the segment chain. Returns the new segment.
- Segment* NewSegment(int size);
-
- // Deletes the given segment. Does not touch the segment chain.
- void DeleteSegment(Segment* segment, int size);
-
- // The free region in the current (front) segment is represented as
- // the half-open interval [position, limit). The 'position' variable
- // is guaranteed to be aligned as dictated by kAlignment.
- Address position_;
- Address limit_;
-
- int scope_nesting_;
-
- Segment* segment_head_;
- Isolate* isolate_;
-};
-
-
-// ZoneObject is an abstraction that helps define classes of objects
-// allocated in the Zone. Use it as a base class; see ast.h.
-class ZoneObject {
- public:
- // Allocate a new ZoneObject of 'size' bytes in the Zone.
- INLINE(void* operator new(size_t size, Zone* zone));
-
- // Ideally, the delete operator should be private instead of
- // public, but unfortunately the compiler sometimes synthesizes
- // (unused) destructors for classes derived from ZoneObject, which
- // require the operator to be visible. MSVC requires the delete
- // operator to be public.
-
- // ZoneObjects should never be deleted individually; use
- // Zone::DeleteAll() to delete all zone objects in one go.
- void operator delete(void*, size_t) { UNREACHABLE(); }
- void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
-};
-
-
-// The ZoneAllocationPolicy is used to specialize generic data
-// structures to allocate themselves and their elements in the Zone.
-struct ZoneAllocationPolicy {
- public:
- explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) { }
- INLINE(void* New(size_t size));
- INLINE(static void Delete(void *pointer)) { }
-
- private:
- Zone* zone_;
-};
-
-
-// ZoneLists are growable lists with constant-time access to the
-// elements. The list itself and all its elements are allocated in the
-// Zone. ZoneLists cannot be deleted individually; you can delete all
-// objects in the Zone by calling Zone::DeleteAll().
-template<typename T>
-class ZoneList: public List<T, ZoneAllocationPolicy> {
- public:
- // Construct a new ZoneList with the given capacity; the length is
- // always zero. The capacity must be non-negative.
- ZoneList(int capacity, Zone* zone)
- : List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) { }
-
- INLINE(void* operator new(size_t size, Zone* zone));
-
- // Construct a new ZoneList by copying the elements of the given ZoneList.
- ZoneList(const ZoneList<T>& other, Zone* zone)
- : List<T, ZoneAllocationPolicy>(other.length(),
- ZoneAllocationPolicy(zone)) {
- AddAll(other, ZoneAllocationPolicy(zone));
- }
-
- // We add some convenience wrappers so that we can pass in a Zone
- // instead of a (less convenient) ZoneAllocationPolicy.
- INLINE(void Add(const T& element, Zone* zone)) {
- List<T, ZoneAllocationPolicy>::Add(element, ZoneAllocationPolicy(zone));
- }
- INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other,
- Zone* zone)) {
- List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
- }
- INLINE(void AddAll(const Vector<T>& other, Zone* zone)) {
- List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
- }
- INLINE(void InsertAt(int index, const T& element, Zone* zone)) {
- List<T, ZoneAllocationPolicy>::InsertAt(index, element,
- ZoneAllocationPolicy(zone));
- }
- INLINE(Vector<T> AddBlock(T value, int count, Zone* zone)) {
- return List<T, ZoneAllocationPolicy>::AddBlock(value, count,
- ZoneAllocationPolicy(zone));
- }
- INLINE(void Allocate(int length, Zone* zone)) {
- List<T, ZoneAllocationPolicy>::Allocate(length, ZoneAllocationPolicy(zone));
- }
- INLINE(void Initialize(int capacity, Zone* zone)) {
- List<T, ZoneAllocationPolicy>::Initialize(capacity,
- ZoneAllocationPolicy(zone));
- }
-
- void operator delete(void* pointer) { UNREACHABLE(); }
- void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
-};
-
-
-// ZoneScopes keep track of the current parsing and compilation
-// nesting and cleans up generated ASTs in the Zone when exiting the
-// outer-most scope.
-class ZoneScope BASE_EMBEDDED {
- public:
- INLINE(ZoneScope(Zone* zone, ZoneScopeMode mode));
-
- virtual ~ZoneScope();
-
- inline bool ShouldDeleteOnExit();
-
- // For ZoneScopes that do not delete on exit by default, call this
- // method to request deletion on exit.
- void DeleteOnExit() {
- mode_ = DELETE_ON_EXIT;
- }
-
- inline static int nesting();
-
- private:
- Zone* zone_;
- ZoneScopeMode mode_;
-};
-
-
-// A zone splay tree. The config type parameter encapsulates the
-// different configurations of a concrete splay tree (see splay-tree.h).
-// The tree itself and all its elements are allocated in the Zone.
-template <typename Config>
-class ZoneSplayTree: public SplayTree<Config, ZoneAllocationPolicy> {
- public:
- explicit ZoneSplayTree(Zone* zone)
- : SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
- ~ZoneSplayTree();
-};
-
-
-typedef TemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
-
-} } // namespace v8::internal
-
-#endif // V8_ZONE_H_